summaryrefslogtreecommitdiffstats
path: root/drivers/net/dsa
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/net/dsa/Kconfig125
-rw-r--r--drivers/net/dsa/Makefile26
-rw-r--r--drivers/net/dsa/b53/Kconfig47
-rw-r--r--drivers/net/dsa/b53/Makefile8
-rw-r--r--drivers/net/dsa/b53/b53_common.c2774
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c399
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c368
-rw-r--r--drivers/net/dsa/b53/b53_priv.h392
-rw-r--r--drivers/net/dsa/b53/b53_regs.h527
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c249
-rw-r--r--drivers/net/dsa/b53/b53_serdes.h124
-rw-r--r--drivers/net/dsa/b53/b53_spi.c376
-rw-r--r--drivers/net/dsa/b53/b53_srab.c698
-rw-r--r--drivers/net/dsa/bcm_sf2.c1623
-rw-r--r--drivers/net/dsa/bcm_sf2.h237
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c1346
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h496
-rw-r--r--drivers/net/dsa/dsa_loop.c420
-rw-r--r--drivers/net/dsa/dsa_loop.h20
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c35
-rw-r--r--drivers/net/dsa/hirschmann/Kconfig11
-rw-r--r--drivers/net/dsa/hirschmann/Makefile5
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c2123
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h321
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c481
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h58
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c453
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.h76
-rw-r--r--drivers/net/dsa/lan9303-core.c1416
-rw-r--r--drivers/net/dsa/lan9303.h13
-rw-r--r--drivers/net/dsa/lan9303_i2c.c118
-rw-r--r--drivers/net/dsa/lan9303_mdio.c177
-rw-r--r--drivers/net/dsa/lantiq_gswip.c2287
-rw-r--r--drivers/net/dsa/lantiq_pce.h153
-rw-r--r--drivers/net/dsa/microchip/Kconfig30
-rw-r--r--drivers/net/dsa/microchip/Makefile9
-rw-r--r--drivers/net/dsa/microchip/ksz8.h59
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c1420
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h812
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c213
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1202
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h61
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c133
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h1620
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c3044
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h632
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c246
-rw-r--r--drivers/net/dsa/microchip/lan937x.h23
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c391
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h202
-rw-r--r--drivers/net/dsa/mt7530.c3417
-rw-r--r--drivers/net/dsa/mt7530.h872
-rw-r--r--drivers/net/dsa/mv88e6060.c336
-rw-r--r--drivers/net/dsa/mv88e6060.h119
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig19
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile21
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c7270
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h807
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c833
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.h23
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c579
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h364
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c496
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c680
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c1214
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h381
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_avb.c239
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c319
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c615
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h176
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c246
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.h39
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c1740
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h456
-rw-r--r--drivers/net/dsa/mv88e6xxx/port_hidden.c71
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c514
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h177
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c1609
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h248
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.c190
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.h55
-rw-r--r--drivers/net/dsa/mv88e6xxx/trace.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/trace.h96
-rw-r--r--drivers/net/dsa/ocelot/Kconfig32
-rw-r--r--drivers/net/dsa/ocelot/Makefile11
-rw-r--r--drivers/net/dsa/ocelot/felix.c2150
-rw-r--r--drivers/net/dsa/ocelot/felix.h101
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c2743
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c1119
-rw-r--r--drivers/net/dsa/qca/Kconfig17
-rw-r--r--drivers/net/dsa/qca/Makefile4
-rw-r--r--drivers/net/dsa/qca/ar9331.c1135
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c2090
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c1234
-rw-r--r--drivers/net/dsa/qca/qca8k.h515
-rw-r--r--drivers/net/dsa/realtek/Kconfig54
-rw-r--r--drivers/net/dsa/realtek/Makefile6
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c290
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c570
-rw-r--r--drivers/net/dsa/realtek/realtek.h150
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c2148
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c448
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c1873
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c1109
-rw-r--r--drivers/net/dsa/rzn1_a5psw.h260
-rw-r--r--drivers/net/dsa/sja1105/Kconfig51
-rw-r--r--drivers/net/dsa/sja1105/Makefile25
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h422
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c855
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c145
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c1413
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.h41
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c629
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c542
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c3481
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c547
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c974
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h207
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c977
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c1952
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h548
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c897
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.h104
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c802
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.h74
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c1233
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c174
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c229
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h30
-rw-r--r--drivers/net/dsa/xrs700x/Kconfig26
-rw-r--r--drivers/net/dsa/xrs700x/Makefile4
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c827
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.h43
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c161
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c180
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_reg.h208
136 files changed, 88166 insertions, 0 deletions
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
new file mode 100644
index 000000000..07507b482
--- /dev/null
+++ b/drivers/net/dsa/Kconfig
@@ -0,0 +1,125 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Distributed Switch Architecture drivers"
+ depends on NET_DSA
+
+source "drivers/net/dsa/b53/Kconfig"
+
+config NET_DSA_BCM_SF2
+ tristate "Broadcom Starfighter 2 Ethernet switch support"
+ depends on HAS_IOMEM
+ select NET_DSA_TAG_BRCM
+ select FIXED_PHY
+ select BCM7XXX_PHY
+ select MDIO_BCM_UNIMAC
+ select B53
+ help
+ This enables support for the Broadcom Starfighter 2 Ethernet
+ switch chips.
+
+config NET_DSA_LOOP
+ tristate "DSA mock-up Ethernet switch chip support"
+ select FIXED_PHY
+ help
+ This enables support for a fake mock-up switch chip which
+ exercises the DSA APIs.
+
+source "drivers/net/dsa/hirschmann/Kconfig"
+
+config NET_DSA_LANTIQ_GSWIP
+ tristate "Lantiq / Intel GSWIP"
+ depends on HAS_IOMEM
+ select NET_DSA_TAG_GSWIP
+ help
+ This enables support for the Lantiq / Intel GSWIP 2.1 found in
+ the xrx200 / VR9 SoC.
+
+config NET_DSA_MT7530
+ tristate "MediaTek MT753x and MT7621 Ethernet switch support"
+ select NET_DSA_TAG_MTK
+ select MEDIATEK_GE_PHY
+ help
+ This enables support for the MediaTek MT7530, MT7531, and MT7621
+ Ethernet switch chips.
+
+config NET_DSA_MV88E6060
+ tristate "Marvell 88E6060 ethernet switch chip support"
+ select NET_DSA_TAG_TRAILER
+ help
+ This enables support for the Marvell 88E6060 ethernet switch
+ chip.
+
+source "drivers/net/dsa/microchip/Kconfig"
+
+source "drivers/net/dsa/mv88e6xxx/Kconfig"
+
+source "drivers/net/dsa/ocelot/Kconfig"
+
+source "drivers/net/dsa/qca/Kconfig"
+
+source "drivers/net/dsa/sja1105/Kconfig"
+
+source "drivers/net/dsa/xrs700x/Kconfig"
+
+source "drivers/net/dsa/realtek/Kconfig"
+
+config NET_DSA_RZN1_A5PSW
+ tristate "Renesas RZ/N1 A5PSW Ethernet switch support"
+ depends on OF && ARCH_RZN1
+ select NET_DSA_TAG_RZN1_A5PSW
+ select PCS_RZN1_MIIC
+ help
+ This driver supports the A5PSW switch, which is embedded in Renesas
+ RZ/N1 SoC.
+
+config NET_DSA_SMSC_LAN9303
+ tristate
+ select NET_DSA_TAG_LAN9303
+ select REGMAP
+ help
+ This enables support for the Microchip LAN9303/LAN9354 3 port ethernet
+ switch chips.
+
+config NET_DSA_SMSC_LAN9303_I2C
+ tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode"
+ depends on I2C
+ depends on VLAN_8021Q || VLAN_8021Q=n
+ select NET_DSA_SMSC_LAN9303
+ select REGMAP_I2C
+ help
+ Enable access functions if the SMSC/Microchip LAN9303 is configured
+ for I2C managed mode.
+
+config NET_DSA_SMSC_LAN9303_MDIO
+ tristate "Microchip LAN9303/LAN9354 3-ports 10/100 ethernet switch in MDIO managed mode"
+ select NET_DSA_SMSC_LAN9303
+ depends on VLAN_8021Q || VLAN_8021Q=n
+ help
+ Enable access functions if the Microchip LAN9303/LAN9354 is configured
+ for MDIO managed mode.
+
+config NET_DSA_VITESSE_VSC73XX
+ tristate
+ select FIXED_PHY
+ select VITESSE_PHY
+ select GPIOLIB
+ help
+ This enables support for the Vitesse VSC7385, VSC7388,
+ VSC7395 and VSC7398 SparX integrated ethernet switches.
+
+config NET_DSA_VITESSE_VSC73XX_SPI
+ tristate "Vitesse VSC7385/7388/7395/7398 SPI mode support"
+ depends on SPI
+ select NET_DSA_VITESSE_VSC73XX
+ help
+ This enables support for the Vitesse VSC7385, VSC7388, VSC7395
+ and VSC7398 SparX integrated ethernet switches in SPI managed mode.
+
+config NET_DSA_VITESSE_VSC73XX_PLATFORM
+ tristate "Vitesse VSC7385/7388/7395/7398 Platform mode support"
+ depends on HAS_IOMEM
+ select NET_DSA_VITESSE_VSC73XX
+ help
+ This enables support for the Vitesse VSC7385, VSC7388, VSC7395
+ and VSC7398 SparX integrated ethernet switches, connected over
+ a CPU-attached address bus and work in memory-mapped I/O mode.
+endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
new file mode 100644
index 000000000..16eb879e0
--- /dev/null
+++ b/drivers/net/dsa/Makefile
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
+bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
+obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
+ifdef CONFIG_NET_DSA_LOOP
+obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
+endif
+obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
+obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
+obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
+obj-$(CONFIG_NET_DSA_RZN1_A5PSW) += rzn1_a5psw.o
+obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
+obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
+obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
+obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx-core.o
+obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM) += vitesse-vsc73xx-platform.o
+obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o
+obj-y += b53/
+obj-y += hirschmann/
+obj-y += microchip/
+obj-y += mv88e6xxx/
+obj-y += ocelot/
+obj-y += qca/
+obj-y += realtek/
+obj-y += sja1105/
+obj-y += xrs700x/
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
new file mode 100644
index 000000000..90b525160
--- /dev/null
+++ b/drivers/net/dsa/b53/Kconfig
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig B53
+ tristate "Broadcom BCM53xx managed switch support"
+ depends on NET_DSA
+ select NET_DSA_TAG_BRCM
+ select NET_DSA_TAG_BRCM_LEGACY
+ select NET_DSA_TAG_BRCM_PREPEND
+ help
+ This driver adds support for Broadcom managed switch chips. It supports
+ BCM5325E, BCM5365, BCM539x, BCM53115 and BCM53125 as well as BCM63XX
+ integrated switches.
+
+config B53_SPI_DRIVER
+ tristate "B53 SPI connected switch driver"
+ depends on B53 && SPI
+ help
+ Select to enable support for registering switches configured through SPI.
+
+config B53_MDIO_DRIVER
+ tristate "B53 MDIO connected switch driver"
+ depends on B53
+ help
+ Select to enable support for registering switches configured through MDIO.
+
+config B53_MMAP_DRIVER
+ tristate "B53 MMAP connected switch driver"
+ depends on B53 && HAS_IOMEM
+ default BCM63XX || BMIPS_GENERIC
+ help
+ Select to enable support for memory-mapped switches like the BCM63XX
+ integrated switches.
+
+config B53_SRAB_DRIVER
+ tristate "B53 SRAB connected switch driver"
+ depends on B53 && HAS_IOMEM
+ depends on B53_SERDES || !B53_SERDES
+ default ARCH_BCM_IPROC
+ help
+ Select to enable support for memory-mapped Switch Register Access
+ Bridge Registers (SRAB) like it is found on the BCM53010
+
+config B53_SERDES
+ tristate "B53 SerDes support"
+ depends on B53
+ default ARCH_BCM_NSP
+ help
+ Select to enable support for SerDes on e.g: Northstar Plus SoCs.
diff --git a/drivers/net/dsa/b53/Makefile b/drivers/net/dsa/b53/Makefile
new file mode 100644
index 000000000..b1be13023
--- /dev/null
+++ b/drivers/net/dsa/b53/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_B53) += b53_common.o
+
+obj-$(CONFIG_B53_SPI_DRIVER) += b53_spi.o
+obj-$(CONFIG_B53_MDIO_DRIVER) += b53_mdio.o
+obj-$(CONFIG_B53_MMAP_DRIVER) += b53_mmap.o
+obj-$(CONFIG_B53_SRAB_DRIVER) += b53_srab.o
+obj-$(CONFIG_B53_SERDES) += b53_serdes.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
new file mode 100644
index 000000000..59cdfc51c
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -0,0 +1,2774 @@
+/*
+ * B53 switch driver main logic
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/b53.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+
+#include "b53_regs.h"
+#include "b53_priv.h"
+
+struct b53_mib_desc {
+ u8 size;
+ u8 offset;
+ const char *name;
+};
+
+/* BCM5365 MIB counters */
+static const struct b53_mib_desc b53_mibs_65[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPkts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredTransmit" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x38, "TxPausePkts" },
+ { 8, 0x44, "RxOctets" },
+ { 4, 0x4c, "RxUndersizePkts" },
+ { 4, 0x50, "RxPausePkts" },
+ { 4, 0x54, "Pkts64Octets" },
+ { 4, 0x58, "Pkts65to127Octets" },
+ { 4, 0x5c, "Pkts128to255Octets" },
+ { 4, 0x60, "Pkts256to511Octets" },
+ { 4, 0x64, "Pkts512to1023Octets" },
+ { 4, 0x68, "Pkts1024to1522Octets" },
+ { 4, 0x6c, "RxOversizePkts" },
+ { 4, 0x70, "RxJabbers" },
+ { 4, 0x74, "RxAlignmentErrors" },
+ { 4, 0x78, "RxFCSErrors" },
+ { 8, 0x7c, "RxGoodOctets" },
+ { 4, 0x84, "RxDropPkts" },
+ { 4, 0x88, "RxUnicastPkts" },
+ { 4, 0x8c, "RxMulticastPkts" },
+ { 4, 0x90, "RxBroadcastPkts" },
+ { 4, 0x94, "RxSAChanges" },
+ { 4, 0x98, "RxFragments" },
+};
+
+#define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65)
+
+/* BCM63xx MIB counters */
+static const struct b53_mib_desc b53_mibs_63xx[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x0c, "TxQoSPkts" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPkts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredTransmit" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x38, "TxPausePkts" },
+ { 8, 0x3c, "TxQoSOctets" },
+ { 8, 0x44, "RxOctets" },
+ { 4, 0x4c, "RxUndersizePkts" },
+ { 4, 0x50, "RxPausePkts" },
+ { 4, 0x54, "Pkts64Octets" },
+ { 4, 0x58, "Pkts65to127Octets" },
+ { 4, 0x5c, "Pkts128to255Octets" },
+ { 4, 0x60, "Pkts256to511Octets" },
+ { 4, 0x64, "Pkts512to1023Octets" },
+ { 4, 0x68, "Pkts1024to1522Octets" },
+ { 4, 0x6c, "RxOversizePkts" },
+ { 4, 0x70, "RxJabbers" },
+ { 4, 0x74, "RxAlignmentErrors" },
+ { 4, 0x78, "RxFCSErrors" },
+ { 8, 0x7c, "RxGoodOctets" },
+ { 4, 0x84, "RxDropPkts" },
+ { 4, 0x88, "RxUnicastPkts" },
+ { 4, 0x8c, "RxMulticastPkts" },
+ { 4, 0x90, "RxBroadcastPkts" },
+ { 4, 0x94, "RxSAChanges" },
+ { 4, 0x98, "RxFragments" },
+ { 4, 0xa0, "RxSymbolErrors" },
+ { 4, 0xa4, "RxQoSPkts" },
+ { 8, 0xa8, "RxQoSOctets" },
+ { 4, 0xb0, "Pkts1523to2047Octets" },
+ { 4, 0xb4, "Pkts2048to4095Octets" },
+ { 4, 0xb8, "Pkts4096to8191Octets" },
+ { 4, 0xbc, "Pkts8192to9728Octets" },
+ { 4, 0xc0, "RxDiscarded" },
+};
+
+#define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx)
+
+/* MIB counters */
+static const struct b53_mib_desc b53_mibs[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPkts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredTransmit" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x38, "TxPausePkts" },
+ { 8, 0x50, "RxOctets" },
+ { 4, 0x58, "RxUndersizePkts" },
+ { 4, 0x5c, "RxPausePkts" },
+ { 4, 0x60, "Pkts64Octets" },
+ { 4, 0x64, "Pkts65to127Octets" },
+ { 4, 0x68, "Pkts128to255Octets" },
+ { 4, 0x6c, "Pkts256to511Octets" },
+ { 4, 0x70, "Pkts512to1023Octets" },
+ { 4, 0x74, "Pkts1024to1522Octets" },
+ { 4, 0x78, "RxOversizePkts" },
+ { 4, 0x7c, "RxJabbers" },
+ { 4, 0x80, "RxAlignmentErrors" },
+ { 4, 0x84, "RxFCSErrors" },
+ { 8, 0x88, "RxGoodOctets" },
+ { 4, 0x90, "RxDropPkts" },
+ { 4, 0x94, "RxUnicastPkts" },
+ { 4, 0x98, "RxMulticastPkts" },
+ { 4, 0x9c, "RxBroadcastPkts" },
+ { 4, 0xa0, "RxSAChanges" },
+ { 4, 0xa4, "RxFragments" },
+ { 4, 0xa8, "RxJumboPkts" },
+ { 4, 0xac, "RxSymbolErrors" },
+ { 4, 0xc0, "RxDiscarded" },
+};
+
+#define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs)
+
+static const struct b53_mib_desc b53_mibs_58xx[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x0c, "TxQPKTQ0" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPKts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredCollision" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x34, "TxFrameInDisc" },
+ { 4, 0x38, "TxPausePkts" },
+ { 4, 0x3c, "TxQPKTQ1" },
+ { 4, 0x40, "TxQPKTQ2" },
+ { 4, 0x44, "TxQPKTQ3" },
+ { 4, 0x48, "TxQPKTQ4" },
+ { 4, 0x4c, "TxQPKTQ5" },
+ { 8, 0x50, "RxOctets" },
+ { 4, 0x58, "RxUndersizePkts" },
+ { 4, 0x5c, "RxPausePkts" },
+ { 4, 0x60, "RxPkts64Octets" },
+ { 4, 0x64, "RxPkts65to127Octets" },
+ { 4, 0x68, "RxPkts128to255Octets" },
+ { 4, 0x6c, "RxPkts256to511Octets" },
+ { 4, 0x70, "RxPkts512to1023Octets" },
+ { 4, 0x74, "RxPkts1024toMaxPktsOctets" },
+ { 4, 0x78, "RxOversizePkts" },
+ { 4, 0x7c, "RxJabbers" },
+ { 4, 0x80, "RxAlignmentErrors" },
+ { 4, 0x84, "RxFCSErrors" },
+ { 8, 0x88, "RxGoodOctets" },
+ { 4, 0x90, "RxDropPkts" },
+ { 4, 0x94, "RxUnicastPkts" },
+ { 4, 0x98, "RxMulticastPkts" },
+ { 4, 0x9c, "RxBroadcastPkts" },
+ { 4, 0xa0, "RxSAChanges" },
+ { 4, 0xa4, "RxFragments" },
+ { 4, 0xa8, "RxJumboPkt" },
+ { 4, 0xac, "RxSymblErr" },
+ { 4, 0xb0, "InRangeErrCount" },
+ { 4, 0xb4, "OutRangeErrCount" },
+ { 4, 0xb8, "EEELpiEvent" },
+ { 4, 0xbc, "EEELpiDuration" },
+ { 4, 0xc0, "RxDiscard" },
+ { 4, 0xc8, "TxQPKTQ6" },
+ { 4, 0xcc, "TxQPKTQ7" },
+ { 4, 0xd0, "TxPkts64Octets" },
+ { 4, 0xd4, "TxPkts65to127Octets" },
+ { 4, 0xd8, "TxPkts128to255Octets" },
+ { 4, 0xdc, "TxPkts256to511Ocets" },
+ { 4, 0xe0, "TxPkts512to1023Ocets" },
+ { 4, 0xe4, "TxPkts1024toMaxPktOcets" },
+};
+
+#define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
+
+static int b53_do_vlan_op(struct b53_device *dev, u8 op)
+{
+ unsigned int i;
+
+ b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
+
+ for (i = 0; i < 10; i++) {
+ u8 vta;
+
+ b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
+ if (!(vta & VTA_START_CMD))
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -EIO;
+}
+
+static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
+ struct b53_vlan *vlan)
+{
+ if (is5325(dev)) {
+ u32 entry = 0;
+
+ if (vlan->members) {
+ entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
+ VA_UNTAG_S_25) | vlan->members;
+ if (dev->core_rev >= 3)
+ entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
+ else
+ entry |= VA_VALID_25;
+ }
+
+ b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
+ VTA_RW_STATE_WR | VTA_RW_OP_EN);
+ } else if (is5365(dev)) {
+ u16 entry = 0;
+
+ if (vlan->members)
+ entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
+ VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
+ VTA_RW_STATE_WR | VTA_RW_OP_EN);
+ } else {
+ b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
+ b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
+ (vlan->untag << VTE_UNTAG_S) | vlan->members);
+
+ b53_do_vlan_op(dev, VTA_CMD_WRITE);
+ }
+
+ dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
+ vid, vlan->members, vlan->untag);
+}
+
+static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
+ struct b53_vlan *vlan)
+{
+ if (is5325(dev)) {
+ u32 entry = 0;
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
+ VTA_RW_STATE_RD | VTA_RW_OP_EN);
+ b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
+
+ if (dev->core_rev >= 3)
+ vlan->valid = !!(entry & VA_VALID_25_R4);
+ else
+ vlan->valid = !!(entry & VA_VALID_25);
+ vlan->members = entry & VA_MEMBER_MASK;
+ vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
+
+ } else if (is5365(dev)) {
+ u16 entry = 0;
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
+ VTA_RW_STATE_WR | VTA_RW_OP_EN);
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
+
+ vlan->valid = !!(entry & VA_VALID_65);
+ vlan->members = entry & VA_MEMBER_MASK;
+ vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
+ } else {
+ u32 entry = 0;
+
+ b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
+ b53_do_vlan_op(dev, VTA_CMD_READ);
+ b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
+ vlan->members = entry & VTE_MEMBERS;
+ vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
+ vlan->valid = true;
+ }
+}
+
+static void b53_set_forwarding(struct b53_device *dev, int enable)
+{
+ u8 mgmt;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+
+ if (enable)
+ mgmt |= SM_SW_FWD_EN;
+ else
+ mgmt &= ~SM_SW_FWD_EN;
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+
+ /* Include IMP port in dumb forwarding mode
+ */
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
+ mgmt |= B53_MII_DUMB_FWDG_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
+
+ /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
+ * frames should be flooded or not.
+ */
+ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
+ mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+}
+
+static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
+ bool enable_filtering)
+{
+ u8 mgmt, vc0, vc1, vc4 = 0, vc5;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
+
+ if (is5325(dev) || is5365(dev)) {
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
+ } else if (is63xx(dev)) {
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
+ } else {
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
+ }
+
+ if (enable) {
+ vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
+ vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
+ vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ if (enable_filtering) {
+ vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+ vc5 |= VC5_DROP_VTABLE_MISS;
+ } else {
+ vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+ vc5 &= ~VC5_DROP_VTABLE_MISS;
+ }
+
+ if (is5325(dev))
+ vc0 &= ~VC0_RESERVED_1;
+
+ if (is5325(dev) || is5365(dev))
+ vc1 |= VC1_RX_MCST_TAG_EN;
+
+ } else {
+ vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
+ vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
+ vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ vc5 &= ~VC5_DROP_VTABLE_MISS;
+
+ if (is5325(dev) || is5365(dev))
+ vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+ else
+ vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
+
+ if (is5325(dev) || is5365(dev))
+ vc1 &= ~VC1_RX_MCST_TAG_EN;
+ }
+
+ if (!is5325(dev) && !is5365(dev))
+ vc5 &= ~VC5_VID_FFF_EN;
+
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
+
+ if (is5325(dev) || is5365(dev)) {
+ /* enable the high 8 bit vid check on 5325 */
+ if (is5325(dev) && enable)
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
+ VC3_HIGH_8BIT_EN);
+ else
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
+
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
+ } else if (is63xx(dev)) {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
+ } else {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
+ }
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+
+ dev->vlan_enabled = enable;
+
+ dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n",
+ port, enable, enable_filtering);
+}
+
+static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
+{
+ u32 port_mask = 0;
+ u16 max_size = JMS_MIN_SIZE;
+
+ if (is5325(dev) || is5365(dev))
+ return -EINVAL;
+
+ if (enable) {
+ port_mask = dev->enabled_ports;
+ max_size = JMS_MAX_SIZE;
+ if (allow_10_100)
+ port_mask |= JPM_10_100_JUMBO_EN;
+ }
+
+ b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
+ return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
+}
+
+static int b53_flush_arl(struct b53_device *dev, u8 mask)
+{
+ unsigned int i;
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
+ FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
+
+ for (i = 0; i < 10; i++) {
+ u8 fast_age_ctrl;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
+ &fast_age_ctrl);
+
+ if (!(fast_age_ctrl & FAST_AGE_DONE))
+ goto out;
+
+ msleep(1);
+ }
+
+ return -ETIMEDOUT;
+out:
+ /* Only age dynamic entries (default behavior) */
+ b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
+ return 0;
+}
+
+static int b53_fast_age_port(struct b53_device *dev, int port)
+{
+ b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
+
+ return b53_flush_arl(dev, FAST_AGE_PORT);
+}
+
+static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
+{
+ b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
+
+ return b53_flush_arl(dev, FAST_AGE_VLAN);
+}
+
+void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
+{
+ struct b53_device *dev = ds->priv;
+ unsigned int i;
+ u16 pvlan;
+
+ /* Enable the IMP port to be in the same VLAN as the other ports
+ * on a per-port basis such that we only have Port i and IMP in
+ * the same VLAN.
+ */
+ b53_for_each_port(dev, i) {
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
+ pvlan |= BIT(cpu_port);
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
+ }
+}
+EXPORT_SYMBOL(b53_imp_vlan_setup);
+
+static void b53_port_set_ucast_flood(struct b53_device *dev, int port,
+ bool unicast)
+{
+ u16 uc;
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
+ if (unicast)
+ uc |= BIT(port);
+ else
+ uc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+}
+
+static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
+ bool multicast)
+{
+ u16 mc;
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+}
+
+static void b53_port_set_learning(struct b53_device *dev, int port,
+ bool learning)
+{
+ u16 reg;
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, &reg);
+ if (learning)
+ reg &= ~BIT(port);
+ else
+ reg |= BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
+}
+
+int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+{
+ struct b53_device *dev = ds->priv;
+ unsigned int cpu_port;
+ int ret = 0;
+ u16 pvlan;
+
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+ b53_port_set_ucast_flood(dev, port, true);
+ b53_port_set_mcast_flood(dev, port, true);
+ b53_port_set_learning(dev, port, false);
+
+ if (dev->ops->irq_enable)
+ ret = dev->ops->irq_enable(dev, port);
+ if (ret)
+ return ret;
+
+ /* Clear the Rx and Tx disable bits and set to no spanning tree */
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
+
+ /* Set this port, and only this one to be in the default VLAN,
+ * if member of a bridge, restore its membership prior to
+ * bringing down this port.
+ */
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+ pvlan &= ~0x1ff;
+ pvlan |= BIT(port);
+ pvlan |= dev->ports[port].vlan_ctl_mask;
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+
+ b53_imp_vlan_setup(ds, cpu_port);
+
+ /* If EEE was enabled, restore it */
+ if (dev->ports[port].eee.eee_enabled)
+ b53_eee_enable_set(ds, port, true);
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_enable_port);
+
+void b53_disable_port(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+ u8 reg;
+
+ /* Disable Tx/Rx for the port */
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
+ reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
+
+ if (dev->ops->irq_disable)
+ dev->ops->irq_disable(dev, port);
+}
+EXPORT_SYMBOL(b53_disable_port);
+
+void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+ bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE);
+ u8 hdr_ctl, val;
+ u16 reg;
+
+ /* Resolve which bit controls the Broadcom tag */
+ switch (port) {
+ case 8:
+ val = BRCM_HDR_P8_EN;
+ break;
+ case 7:
+ val = BRCM_HDR_P7_EN;
+ break;
+ case 5:
+ val = BRCM_HDR_P5_EN;
+ break;
+ default:
+ val = 0;
+ break;
+ }
+
+ /* Enable management mode if tagging is requested */
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl);
+ if (tag_en)
+ hdr_ctl |= SM_SW_FWD_MODE;
+ else
+ hdr_ctl &= ~SM_SW_FWD_MODE;
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl);
+
+ /* Configure the appropriate IMP port */
+ b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl);
+ if (port == 8)
+ hdr_ctl |= GC_FRM_MGMT_PORT_MII;
+ else if (port == 5)
+ hdr_ctl |= GC_FRM_MGMT_PORT_M;
+ b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
+
+ /* Enable Broadcom tags for IMP port */
+ b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
+ if (tag_en)
+ hdr_ctl |= val;
+ else
+ hdr_ctl &= ~val;
+ b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl);
+
+ /* Registers below are only accessible on newer devices */
+ if (!is58xx(dev))
+ return;
+
+ /* Enable reception Broadcom tag for CPU TX (switch RX) to
+ * allow us to tag outgoing frames
+ */
+ b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, &reg);
+ if (tag_en)
+ reg &= ~BIT(port);
+ else
+ reg |= BIT(port);
+ b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg);
+
+ /* Enable transmission of Broadcom tags from the switch (CPU RX) to
+ * allow delivering frames to the per-port net_devices
+ */
+ b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, &reg);
+ if (tag_en)
+ reg &= ~BIT(port);
+ else
+ reg |= BIT(port);
+ b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg);
+}
+EXPORT_SYMBOL(b53_brcm_hdr_setup);
+
+static void b53_enable_cpu_port(struct b53_device *dev, int port)
+{
+ u8 port_ctrl;
+
+ /* BCM5325 CPU port is at 8 */
+ if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25)
+ port = B53_CPU_PORT;
+
+ port_ctrl = PORT_CTRL_RX_BCST_EN |
+ PORT_CTRL_RX_MCST_EN |
+ PORT_CTRL_RX_UCST_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
+
+ b53_brcm_hdr_setup(dev->ds, port);
+
+ b53_port_set_ucast_flood(dev, port, true);
+ b53_port_set_mcast_flood(dev, port, true);
+ b53_port_set_learning(dev, port, false);
+}
+
+static void b53_enable_mib(struct b53_device *dev)
+{
+ u8 gc;
+
+ b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
+ gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
+ b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
+}
+
+static u16 b53_default_pvid(struct b53_device *dev)
+{
+ if (is5325(dev) || is5365(dev))
+ return 1;
+ else
+ return 0;
+}
+
+static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+
+ return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
+}
+
+int b53_configure_vlan(struct dsa_switch *ds)
+{
+ struct b53_device *dev = ds->priv;
+ struct b53_vlan vl = { 0 };
+ struct b53_vlan *v;
+ int i, def_vid;
+ u16 vid;
+
+ def_vid = b53_default_pvid(dev);
+
+ /* clear all vlan entries */
+ if (is5325(dev) || is5365(dev)) {
+ for (i = def_vid; i < dev->num_vlans; i++)
+ b53_set_vlan_entry(dev, i, &vl);
+ } else {
+ b53_do_vlan_op(dev, VTA_CMD_CLEAR);
+ }
+
+ b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering);
+
+ /* Create an untagged VLAN entry for the default PVID in case
+ * CONFIG_VLAN_8021Q is disabled and there are no calls to
+ * dsa_slave_vlan_rx_add_vid() to create the default VLAN
+ * entry. Do this only when the tagging protocol is not
+ * DSA_TAG_PROTO_NONE
+ */
+ b53_for_each_port(dev, i) {
+ v = &dev->vlans[def_vid];
+ v->members |= BIT(i);
+ if (!b53_vlan_port_needs_forced_tagged(ds, i))
+ v->untag = v->members;
+ b53_write16(dev, B53_VLAN_PAGE,
+ B53_VLAN_PORT_DEF_TAG(i), def_vid);
+ }
+
+ /* Upon initial call we have not set-up any VLANs, but upon
+ * system resume, we need to restore all VLAN entries.
+ */
+ for (vid = def_vid; vid < dev->num_vlans; vid++) {
+ v = &dev->vlans[vid];
+
+ if (!v->members)
+ continue;
+
+ b53_set_vlan_entry(dev, vid, v);
+ b53_fast_age_vlan(dev, vid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_configure_vlan);
+
+static void b53_switch_reset_gpio(struct b53_device *dev)
+{
+ int gpio = dev->reset_gpio;
+
+ if (gpio < 0)
+ return;
+
+ /* Reset sequence: RESET low(50ms)->high(20ms)
+ */
+ gpio_set_value(gpio, 0);
+ mdelay(50);
+
+ gpio_set_value(gpio, 1);
+ mdelay(20);
+
+ dev->current_page = 0xff;
+}
+
+static int b53_switch_reset(struct b53_device *dev)
+{
+ unsigned int timeout = 1000;
+ u8 mgmt, reg;
+
+ b53_switch_reset_gpio(dev);
+
+ if (is539x(dev)) {
+ b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
+ b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
+ }
+
+ /* This is specific to 58xx devices here, do not use is58xx() which
+ * covers the larger Starfigther 2 family, including 7445/7278 which
+ * still use this driver as a library and need to perform the reset
+ * earlier.
+ */
+ if (dev->chip_id == BCM58XX_DEVICE_ID ||
+ dev->chip_id == BCM583XX_DEVICE_ID) {
+ b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
+ reg |= SW_RST | EN_SW_RST | EN_CH_RST;
+ b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
+
+ do {
+ b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
+ if (!(reg & SW_RST))
+ break;
+
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+
+ if (timeout == 0) {
+ dev_err(dev->dev,
+ "Timeout waiting for SW_RST to clear!\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+
+ if (!(mgmt & SM_SW_FWD_EN)) {
+ mgmt &= ~SM_SW_FWD_MODE;
+ mgmt |= SM_SW_FWD_EN;
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+
+ if (!(mgmt & SM_SW_FWD_EN)) {
+ dev_err(dev->dev, "Failed to enable switch!\n");
+ return -EINVAL;
+ }
+ }
+
+ b53_enable_mib(dev);
+
+ return b53_flush_arl(dev, FAST_AGE_STATIC);
+}
+
+static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
+{
+ struct b53_device *priv = ds->priv;
+ u16 value = 0;
+ int ret;
+
+ if (priv->ops->phy_read16)
+ ret = priv->ops->phy_read16(priv, addr, reg, &value);
+ else
+ ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
+ reg * 2, &value);
+
+ return ret ? ret : value;
+}
+
+static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+ struct b53_device *priv = ds->priv;
+
+ if (priv->ops->phy_write16)
+ return priv->ops->phy_write16(priv, addr, reg, val);
+
+ return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
+}
+
+static int b53_reset_switch(struct b53_device *priv)
+{
+ /* reset vlans */
+ memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
+ memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
+
+ priv->serdes_lane = B53_INVALID_LANE;
+
+ return b53_switch_reset(priv);
+}
+
+static int b53_apply_config(struct b53_device *priv)
+{
+ /* disable switching */
+ b53_set_forwarding(priv, 0);
+
+ b53_configure_vlan(priv->ds);
+
+ /* enable switching */
+ b53_set_forwarding(priv, 1);
+
+ return 0;
+}
+
+static void b53_reset_mib(struct b53_device *priv)
+{
+ u8 gc;
+
+ b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
+
+ b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
+ msleep(1);
+ b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
+ msleep(1);
+}
+
+static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
+{
+ if (is5365(dev))
+ return b53_mibs_65;
+ else if (is63xx(dev))
+ return b53_mibs_63xx;
+ else if (is58xx(dev))
+ return b53_mibs_58xx;
+ else
+ return b53_mibs;
+}
+
+static unsigned int b53_get_mib_size(struct b53_device *dev)
+{
+ if (is5365(dev))
+ return B53_MIBS_65_SIZE;
+ else if (is63xx(dev))
+ return B53_MIBS_63XX_SIZE;
+ else if (is58xx(dev))
+ return B53_MIBS_58XX_SIZE;
+ else
+ return B53_MIBS_SIZE;
+}
+
+static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
+{
+ /* These ports typically do not have built-in PHYs */
+ switch (port) {
+ case B53_CPU_PORT_25:
+ case 7:
+ case B53_CPU_PORT:
+ return NULL;
+ }
+
+ return mdiobus_get_phy(ds->slave_mii_bus, port);
+}
+
+void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ struct b53_device *dev = ds->priv;
+ const struct b53_mib_desc *mibs = b53_get_mib(dev);
+ unsigned int mib_size = b53_get_mib_size(dev);
+ struct phy_device *phydev;
+ unsigned int i;
+
+ if (stringset == ETH_SS_STATS) {
+ for (i = 0; i < mib_size; i++)
+ strscpy(data + i * ETH_GSTRING_LEN,
+ mibs[i].name, ETH_GSTRING_LEN);
+ } else if (stringset == ETH_SS_PHY_STATS) {
+ phydev = b53_get_phy_device(ds, port);
+ if (!phydev)
+ return;
+
+ phy_ethtool_get_strings(phydev, data);
+ }
+}
+EXPORT_SYMBOL(b53_get_strings);
+
+void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
+{
+ struct b53_device *dev = ds->priv;
+ const struct b53_mib_desc *mibs = b53_get_mib(dev);
+ unsigned int mib_size = b53_get_mib_size(dev);
+ const struct b53_mib_desc *s;
+ unsigned int i;
+ u64 val = 0;
+
+ if (is5365(dev) && port == 5)
+ port = 8;
+
+ mutex_lock(&dev->stats_mutex);
+
+ for (i = 0; i < mib_size; i++) {
+ s = &mibs[i];
+
+ if (s->size == 8) {
+ b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
+ } else {
+ u32 val32;
+
+ b53_read32(dev, B53_MIB_PAGE(port), s->offset,
+ &val32);
+ val = val32;
+ }
+ data[i] = (u64)val;
+ }
+
+ mutex_unlock(&dev->stats_mutex);
+}
+EXPORT_SYMBOL(b53_get_ethtool_stats);
+
+void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data)
+{
+ struct phy_device *phydev;
+
+ phydev = b53_get_phy_device(ds, port);
+ if (!phydev)
+ return;
+
+ phy_ethtool_get_stats(phydev, NULL, data);
+}
+EXPORT_SYMBOL(b53_get_ethtool_phy_stats);
+
+int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct b53_device *dev = ds->priv;
+ struct phy_device *phydev;
+
+ if (sset == ETH_SS_STATS) {
+ return b53_get_mib_size(dev);
+ } else if (sset == ETH_SS_PHY_STATS) {
+ phydev = b53_get_phy_device(ds, port);
+ if (!phydev)
+ return 0;
+
+ return phy_ethtool_get_sset_count(phydev);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_get_sset_count);
+
+enum b53_devlink_resource_id {
+ B53_DEVLINK_PARAM_ID_VLAN_TABLE,
+};
+
+static u64 b53_devlink_vlan_table_get(void *priv)
+{
+ struct b53_device *dev = priv;
+ struct b53_vlan *vl;
+ unsigned int i;
+ u64 count = 0;
+
+ for (i = 0; i < dev->num_vlans; i++) {
+ vl = &dev->vlans[i];
+ if (vl->members)
+ count++;
+ }
+
+ return count;
+}
+
+int b53_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct b53_device *dev = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params, dev->num_vlans,
+ dev->num_vlans,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans,
+ B53_DEVLINK_PARAM_ID_VLAN_TABLE,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ B53_DEVLINK_PARAM_ID_VLAN_TABLE,
+ b53_devlink_vlan_table_get, dev);
+
+ return 0;
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+EXPORT_SYMBOL(b53_setup_devlink_resources);
+
+static int b53_setup(struct dsa_switch *ds)
+{
+ struct b53_device *dev = ds->priv;
+ unsigned int port;
+ int ret;
+
+ /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
+ * which forces the CPU port to be tagged in all VLANs.
+ */
+ ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
+
+ ret = b53_reset_switch(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ b53_reset_mib(dev);
+
+ ret = b53_apply_config(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to apply configuration\n");
+ return ret;
+ }
+
+ /* Configure IMP/CPU port, disable all other ports. Enabled
+ * ports will be configured with .port_enable
+ */
+ for (port = 0; port < dev->num_ports; port++) {
+ if (dsa_is_cpu_port(ds, port))
+ b53_enable_cpu_port(dev, port);
+ else
+ b53_disable_port(ds, port);
+ }
+
+ return b53_setup_devlink_resources(ds);
+}
+
+static void b53_teardown(struct dsa_switch *ds)
+{
+ dsa_devlink_resources_unregister(ds);
+}
+
+static void b53_force_link(struct b53_device *dev, int port, int link)
+{
+ u8 reg, val, off;
+
+ /* Override the port settings */
+ if (port == dev->imp_port) {
+ off = B53_PORT_OVERRIDE_CTRL;
+ val = PORT_OVERRIDE_EN;
+ } else {
+ off = B53_GMII_PORT_OVERRIDE_CTRL(port);
+ val = GMII_PO_EN;
+ }
+
+ b53_read8(dev, B53_CTRL_PAGE, off, &reg);
+ reg |= val;
+ if (link)
+ reg |= PORT_OVERRIDE_LINK;
+ else
+ reg &= ~PORT_OVERRIDE_LINK;
+ b53_write8(dev, B53_CTRL_PAGE, off, reg);
+}
+
+static void b53_force_port_config(struct b53_device *dev, int port,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ u8 reg, val, off;
+
+ /* Override the port settings */
+ if (port == dev->imp_port) {
+ off = B53_PORT_OVERRIDE_CTRL;
+ val = PORT_OVERRIDE_EN;
+ } else {
+ off = B53_GMII_PORT_OVERRIDE_CTRL(port);
+ val = GMII_PO_EN;
+ }
+
+ b53_read8(dev, B53_CTRL_PAGE, off, &reg);
+ reg |= val;
+ if (duplex == DUPLEX_FULL)
+ reg |= PORT_OVERRIDE_FULL_DUPLEX;
+ else
+ reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
+
+ switch (speed) {
+ case 2000:
+ reg |= PORT_OVERRIDE_SPEED_2000M;
+ fallthrough;
+ case SPEED_1000:
+ reg |= PORT_OVERRIDE_SPEED_1000M;
+ break;
+ case SPEED_100:
+ reg |= PORT_OVERRIDE_SPEED_100M;
+ break;
+ case SPEED_10:
+ reg |= PORT_OVERRIDE_SPEED_10M;
+ break;
+ default:
+ dev_err(dev->dev, "unknown speed: %d\n", speed);
+ return;
+ }
+
+ if (rx_pause)
+ reg |= PORT_OVERRIDE_RX_FLOW;
+ if (tx_pause)
+ reg |= PORT_OVERRIDE_TX_FLOW;
+
+ b53_write8(dev, B53_CTRL_PAGE, off, reg);
+}
+
+static void b53_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct b53_device *dev = ds->priv;
+ struct ethtool_eee *p = &dev->ports[port].eee;
+ u8 rgmii_ctrl = 0, reg = 0, off;
+ bool tx_pause = false;
+ bool rx_pause = false;
+
+ if (!phy_is_pseudo_fixed_link(phydev))
+ return;
+
+ /* Enable flow control on BCM5301x's CPU port */
+ if (is5301x(dev) && dsa_is_cpu_port(ds, port))
+ tx_pause = rx_pause = true;
+
+ if (phydev->pause) {
+ if (phydev->asym_pause)
+ tx_pause = true;
+ rx_pause = true;
+ }
+
+ b53_force_port_config(dev, port, phydev->speed, phydev->duplex,
+ tx_pause, rx_pause);
+ b53_force_link(dev, port, phydev->link);
+
+ if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
+ if (port == dev->imp_port)
+ off = B53_RGMII_CTRL_IMP;
+ else
+ off = B53_RGMII_CTRL_P(port);
+
+ /* Configure the port RGMII clock delay by DLL disabled and
+ * tx_clk aligned timing (restoring to reset defaults)
+ */
+ b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
+ RGMII_CTRL_TIMING_SEL);
+
+ /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
+ * sure that we enable the port TX clock internal delay to
+ * account for this internal delay that is inserted, otherwise
+ * the switch won't be able to receive correctly.
+ *
+ * PHY_INTERFACE_MODE_RGMII means that we are not introducing
+ * any delay neither on transmission nor reception, so the
+ * BCM53125 must also be configured accordingly to account for
+ * the lack of delay and introduce
+ *
+ * The BCM53125 switch has its RX clock and TX clock control
+ * swapped, hence the reason why we modify the TX clock path in
+ * the "RGMII" case
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
+ rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
+ b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
+
+ dev_info(ds->dev, "Configured port %d for %s\n", port,
+ phy_modes(phydev->interface));
+ }
+
+ /* configure MII port if necessary */
+ if (is5325(dev)) {
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ &reg);
+
+ /* reverse mii needs to be enabled */
+ if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ reg | PORT_OVERRIDE_RV_MII_25);
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ &reg);
+
+ if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
+ dev_err(ds->dev,
+ "Failed to enable reverse MII mode\n");
+ return;
+ }
+ }
+ }
+
+ /* Re-negotiate EEE if it was enabled already */
+ p->eee_enabled = b53_eee_init(ds, port, phydev);
+}
+
+void b53_port_event(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+ bool link;
+ u16 sts;
+
+ b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts);
+ link = !!(sts & BIT(port));
+ dsa_port_phylink_mac_change(ds, port, link);
+}
+EXPORT_SYMBOL(b53_port_event);
+
+static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct b53_device *dev = ds->priv;
+
+ /* Internal ports need GMII for PHYLIB */
+ __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
+
+ /* These switches appear to support MII and RevMII too, but beyond
+ * this, the code gives very few clues. FIXME: We probably need more
+ * interface modes here.
+ *
+ * According to b53_srab_mux_init(), ports 3..5 can support:
+ * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting.
+ * However, the interface mode read from the MUX configuration is
+ * not passed back to DSA, so phylink uses NA.
+ * DT can specify RGMII for ports 0, 1.
+ * For MDIO, port 8 can be RGMII_TXID.
+ */
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ /* 5325/5365 are not capable of gigabit speeds, everything else is.
+ * Note: the original code also exclulded Gigagbit for MII, RevMII
+ * and 802.3z modes. MII and RevMII are not able to work above 100M,
+ * so will be excluded by the generic validator implementation.
+ * However, the exclusion of Gigabit for 802.3z just seems wrong.
+ */
+ if (!(is5325(dev) || is5365(dev)))
+ config->mac_capabilities |= MAC_1000;
+
+ /* Get the implementation specific capabilities */
+ if (dev->ops->phylink_get_caps)
+ dev->ops->phylink_get_caps(dev, port, config);
+
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
+}
+
+static struct phylink_pcs *b53_phylink_mac_select_pcs(struct dsa_switch *ds,
+ int port,
+ phy_interface_t interface)
+{
+ struct b53_device *dev = ds->priv;
+
+ if (!dev->ops->phylink_mac_select_pcs)
+ return NULL;
+
+ return dev->ops->phylink_mac_select_pcs(dev, port, interface);
+}
+
+void b53_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+EXPORT_SYMBOL(b53_phylink_mac_config);
+
+void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct b53_device *dev = ds->priv;
+
+ if (mode == MLO_AN_PHY)
+ return;
+
+ if (mode == MLO_AN_FIXED) {
+ b53_force_link(dev, port, false);
+ return;
+ }
+
+ if (phy_interface_mode_is_8023z(interface) &&
+ dev->ops->serdes_link_set)
+ dev->ops->serdes_link_set(dev, port, mode, interface, false);
+}
+EXPORT_SYMBOL(b53_phylink_mac_link_down);
+
+void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct b53_device *dev = ds->priv;
+
+ if (mode == MLO_AN_PHY)
+ return;
+
+ if (mode == MLO_AN_FIXED) {
+ b53_force_port_config(dev, port, speed, duplex,
+ tx_pause, rx_pause);
+ b53_force_link(dev, port, true);
+ return;
+ }
+
+ if (phy_interface_mode_is_8023z(interface) &&
+ dev->ops->serdes_link_set)
+ dev->ops->serdes_link_set(dev, port, mode, interface, true);
+}
+EXPORT_SYMBOL(b53_phylink_mac_link_up);
+
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct b53_device *dev = ds->priv;
+
+ b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering);
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_vlan_filtering);
+
+static int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct b53_device *dev = ds->priv;
+
+ if ((is5325(dev) || is5365(dev)) && vlan->vid == 0)
+ return -EOPNOTSUPP;
+
+ /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
+ * receiving VLAN tagged frames at all, we can still allow the port to
+ * be configured for egress untagged.
+ */
+ if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 &&
+ !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
+ return -EINVAL;
+
+ if (vlan->vid >= dev->num_vlans)
+ return -ERANGE;
+
+ b53_enable_vlan(dev, port, true, ds->vlan_filtering);
+
+ return 0;
+}
+
+int b53_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct b53_device *dev = ds->priv;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct b53_vlan *vl;
+ int err;
+
+ err = b53_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
+
+ vl = &dev->vlans[vlan->vid];
+
+ b53_get_vlan_entry(dev, vlan->vid, vl);
+
+ if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
+ untagged = true;
+
+ vl->members |= BIT(port);
+ if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
+ vl->untag |= BIT(port);
+ else
+ vl->untag &= ~BIT(port);
+
+ b53_set_vlan_entry(dev, vlan->vid, vl);
+ b53_fast_age_vlan(dev, vlan->vid);
+
+ if (pvid && !dsa_is_cpu_port(ds, port)) {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+ vlan->vid);
+ b53_fast_age_vlan(dev, vlan->vid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_vlan_add);
+
+int b53_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct b53_device *dev = ds->priv;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct b53_vlan *vl;
+ u16 pvid;
+
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+
+ vl = &dev->vlans[vlan->vid];
+
+ b53_get_vlan_entry(dev, vlan->vid, vl);
+
+ vl->members &= ~BIT(port);
+
+ if (pvid == vlan->vid)
+ pvid = b53_default_pvid(dev);
+
+ if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
+ vl->untag &= ~(BIT(port));
+
+ b53_set_vlan_entry(dev, vlan->vid, vl);
+ b53_fast_age_vlan(dev, vlan->vid);
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
+ b53_fast_age_vlan(dev, pvid);
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_vlan_del);
+
+/* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */
+static int b53_arl_op_wait(struct b53_device *dev)
+{
+ unsigned int timeout = 10;
+ u8 reg;
+
+ do {
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
+ if (!(reg & ARLTBL_START_DONE))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
+
+ return -ETIMEDOUT;
+}
+
+static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
+{
+ u8 reg;
+
+ if (op > ARLTBL_RW)
+ return -EINVAL;
+
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
+ reg |= ARLTBL_START_DONE;
+ if (op)
+ reg |= ARLTBL_RW;
+ else
+ reg &= ~ARLTBL_RW;
+ if (dev->vlan_enabled)
+ reg &= ~ARLTBL_IVL_SVL_SELECT;
+ else
+ reg |= ARLTBL_IVL_SVL_SELECT;
+ b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
+
+ return b53_arl_op_wait(dev);
+}
+
+static int b53_arl_read(struct b53_device *dev, u64 mac,
+ u16 vid, struct b53_arl_entry *ent, u8 *idx)
+{
+ DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
+ unsigned int i;
+ int ret;
+
+ ret = b53_arl_op_wait(dev);
+ if (ret)
+ return ret;
+
+ bitmap_zero(free_bins, dev->num_arl_bins);
+
+ /* Read the bins */
+ for (i = 0; i < dev->num_arl_bins; i++) {
+ u64 mac_vid;
+ u32 fwd_entry;
+
+ b53_read64(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
+ b53_arl_to_entry(ent, mac_vid, fwd_entry);
+
+ if (!(fwd_entry & ARLTBL_VALID)) {
+ set_bit(i, free_bins);
+ continue;
+ }
+ if ((mac_vid & ARLTBL_MAC_MASK) != mac)
+ continue;
+ if (dev->vlan_enabled &&
+ ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
+ continue;
+ *idx = i;
+ return 0;
+ }
+
+ *idx = find_first_bit(free_bins, dev->num_arl_bins);
+ return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
+}
+
+static int b53_arl_op(struct b53_device *dev, int op, int port,
+ const unsigned char *addr, u16 vid, bool is_valid)
+{
+ struct b53_arl_entry ent;
+ u32 fwd_entry;
+ u64 mac, mac_vid = 0;
+ u8 idx = 0;
+ int ret;
+
+ /* Convert the array into a 64-bit MAC */
+ mac = ether_addr_to_u64(addr);
+
+ /* Perform a read for the given MAC and VID */
+ b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
+ b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+
+ /* Issue a read operation for this MAC */
+ ret = b53_arl_rw_op(dev, 1);
+ if (ret)
+ return ret;
+
+ ret = b53_arl_read(dev, mac, vid, &ent, &idx);
+
+ /* If this is a read, just finish now */
+ if (op)
+ return ret;
+
+ switch (ret) {
+ case -ETIMEDOUT:
+ return ret;
+ case -ENOSPC:
+ dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
+ addr, vid);
+ return is_valid ? ret : 0;
+ case -ENOENT:
+ /* We could not find a matching MAC, so reset to a new entry */
+ dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
+ addr, vid, idx);
+ fwd_entry = 0;
+ break;
+ default:
+ dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
+ addr, vid, idx);
+ break;
+ }
+
+ /* For multicast address, the port is a bitmask and the validity
+ * is determined by having at least one port being still active
+ */
+ if (!is_multicast_ether_addr(addr)) {
+ ent.port = port;
+ ent.is_valid = is_valid;
+ } else {
+ if (is_valid)
+ ent.port |= BIT(port);
+ else
+ ent.port &= ~BIT(port);
+
+ ent.is_valid = !!(ent.port);
+ }
+
+ ent.vid = vid;
+ ent.is_static = true;
+ ent.is_age = false;
+ memcpy(ent.mac, addr, ETH_ALEN);
+ b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
+
+ b53_write64(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
+ b53_write32(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
+
+ return b53_arl_rw_op(dev, 0);
+}
+
+int b53_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct b53_device *priv = ds->priv;
+ int ret;
+
+ /* 5325 and 5365 require some more massaging, but could
+ * be supported eventually
+ */
+ if (is5325(priv) || is5365(priv))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->arl_mutex);
+ ret = b53_arl_op(priv, 0, port, addr, vid, true);
+ mutex_unlock(&priv->arl_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(b53_fdb_add);
+
+int b53_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct b53_device *priv = ds->priv;
+ int ret;
+
+ mutex_lock(&priv->arl_mutex);
+ ret = b53_arl_op(priv, 0, port, addr, vid, false);
+ mutex_unlock(&priv->arl_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(b53_fdb_del);
+
+static int b53_arl_search_wait(struct b53_device *dev)
+{
+ unsigned int timeout = 1000;
+ u8 reg;
+
+ do {
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
+ if (!(reg & ARL_SRCH_STDN))
+ return 0;
+
+ if (reg & ARL_SRCH_VLID)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
+ u64 mac_vid;
+ u32 fwd_entry;
+
+ b53_read64(dev, B53_ARLIO_PAGE,
+ B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE,
+ B53_ARL_SRCH_RSTL(idx), &fwd_entry);
+ b53_arl_to_entry(ent, mac_vid, fwd_entry);
+}
+
+static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ if (!ent->is_valid)
+ return 0;
+
+ if (port != ent->port)
+ return 0;
+
+ return cb(ent->mac, ent->vid, ent->is_static, data);
+}
+
+int b53_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct b53_device *priv = ds->priv;
+ struct b53_arl_entry results[2];
+ unsigned int count = 0;
+ int ret;
+ u8 reg;
+
+ mutex_lock(&priv->arl_mutex);
+
+ /* Start search operation */
+ reg = ARL_SRCH_STDN;
+ b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
+
+ do {
+ ret = b53_arl_search_wait(priv);
+ if (ret)
+ break;
+
+ b53_arl_search_rd(priv, 0, &results[0]);
+ ret = b53_fdb_copy(port, &results[0], cb, data);
+ if (ret)
+ break;
+
+ if (priv->num_arl_bins > 2) {
+ b53_arl_search_rd(priv, 1, &results[1]);
+ ret = b53_fdb_copy(port, &results[1], cb, data);
+ if (ret)
+ break;
+
+ if (!results[0].is_valid && !results[1].is_valid)
+ break;
+ }
+
+ } while (count++ < b53_max_arl_entries(priv) / 2);
+
+ mutex_unlock(&priv->arl_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_fdb_dump);
+
+int b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct b53_device *priv = ds->priv;
+ int ret;
+
+ /* 5325 and 5365 require some more massaging, but could
+ * be supported eventually
+ */
+ if (is5325(priv) || is5365(priv))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->arl_mutex);
+ ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
+ mutex_unlock(&priv->arl_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(b53_mdb_add);
+
+int b53_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct b53_device *priv = ds->priv;
+ int ret;
+
+ mutex_lock(&priv->arl_mutex);
+ ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
+ mutex_unlock(&priv->arl_mutex);
+ if (ret)
+ dev_err(ds->dev, "failed to delete MDB entry\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(b53_mdb_del);
+
+int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ bool *tx_fwd_offload, struct netlink_ext_ack *extack)
+{
+ struct b53_device *dev = ds->priv;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ u16 pvlan, reg;
+ unsigned int i;
+
+ /* On 7278, port 7 which connects to the ASP should only receive
+ * traffic from matching CFP rules.
+ */
+ if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
+ return -EINVAL;
+
+ /* Make this port leave the all VLANs join since we will have proper
+ * VLAN entries from now on
+ */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
+ reg &= ~BIT(port);
+ if ((reg & BIT(cpu_port)) == BIT(cpu_port))
+ reg &= ~BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ }
+
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+
+ b53_for_each_port(dev, i) {
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+
+ /* Add this local port to the remote port VLAN control
+ * membership and update the remote port bitmask
+ */
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
+ reg |= BIT(port);
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
+ dev->ports[i].vlan_ctl_mask = reg;
+
+ pvlan |= BIT(i);
+ }
+
+ /* Configure the local port VLAN control membership to include
+ * remote ports and update the local port bitmask
+ */
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+ dev->ports[port].vlan_ctl_mask = pvlan;
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_br_join);
+
+void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+{
+ struct b53_device *dev = ds->priv;
+ struct b53_vlan *vl = &dev->vlans[0];
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ unsigned int i;
+ u16 pvlan, reg, pvid;
+
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+
+ b53_for_each_port(dev, i) {
+ /* Don't touch the remaining ports */
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
+ dev->ports[port].vlan_ctl_mask = reg;
+
+ /* Prevent self removal to preserve isolation */
+ if (port != i)
+ pvlan &= ~BIT(i);
+ }
+
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+ dev->ports[port].vlan_ctl_mask = pvlan;
+
+ pvid = b53_default_pvid(dev);
+
+ /* Make this port join all VLANs without VLAN entries */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
+ reg |= BIT(port);
+ if (!(reg & BIT(cpu_port)))
+ reg |= BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ } else {
+ b53_get_vlan_entry(dev, pvid, vl);
+ vl->members |= BIT(port) | BIT(cpu_port);
+ vl->untag |= BIT(port) | BIT(cpu_port);
+ b53_set_vlan_entry(dev, pvid, vl);
+ }
+}
+EXPORT_SYMBOL(b53_br_leave);
+
+void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
+{
+ struct b53_device *dev = ds->priv;
+ u8 hw_state;
+ u8 reg;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ hw_state = PORT_CTRL_DIS_STATE;
+ break;
+ case BR_STATE_LISTENING:
+ hw_state = PORT_CTRL_LISTEN_STATE;
+ break;
+ case BR_STATE_LEARNING:
+ hw_state = PORT_CTRL_LEARN_STATE;
+ break;
+ case BR_STATE_FORWARDING:
+ hw_state = PORT_CTRL_FWD_STATE;
+ break;
+ case BR_STATE_BLOCKING:
+ hw_state = PORT_CTRL_BLOCK_STATE;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
+ reg &= ~PORT_CTRL_STP_STATE_MASK;
+ reg |= hw_state;
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
+}
+EXPORT_SYMBOL(b53_br_set_stp_state);
+
+void b53_br_fast_age(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+
+ if (b53_fast_age_port(dev, port))
+ dev_err(ds->dev, "fast ageing failed\n");
+}
+EXPORT_SYMBOL(b53_br_fast_age);
+
+int b53_br_flags_pre(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_br_flags_pre);
+
+int b53_br_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & BR_FLOOD)
+ b53_port_set_ucast_flood(ds->priv, port,
+ !!(flags.val & BR_FLOOD));
+ if (flags.mask & BR_MCAST_FLOOD)
+ b53_port_set_mcast_flood(ds->priv, port,
+ !!(flags.val & BR_MCAST_FLOOD));
+ if (flags.mask & BR_LEARNING)
+ b53_port_set_learning(ds->priv, port,
+ !!(flags.val & BR_LEARNING));
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_br_flags);
+
+static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
+{
+ /* Broadcom switches will accept enabling Broadcom tags on the
+ * following ports: 5, 7 and 8, any other port is not supported
+ */
+ switch (port) {
+ case B53_CPU_PORT_25:
+ case 7:
+ case B53_CPU_PORT:
+ return true;
+ }
+
+ return false;
+}
+
+static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol tag_protocol)
+{
+ bool ret = b53_possible_cpu_port(ds, port);
+
+ if (!ret) {
+ dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
+ port);
+ return ret;
+ }
+
+ switch (tag_protocol) {
+ case DSA_TAG_PROTO_BRCM:
+ case DSA_TAG_PROTO_BRCM_PREPEND:
+ dev_warn(ds->dev,
+ "Port %d is stacked to Broadcom tag switch\n", port);
+ ret = false;
+ break;
+ default:
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mprot)
+{
+ struct b53_device *dev = ds->priv;
+
+ if (!b53_can_enable_brcm_tags(ds, port, mprot)) {
+ dev->tag_protocol = DSA_TAG_PROTO_NONE;
+ goto out;
+ }
+
+ /* Older models require a different 6 byte tag */
+ if (is5325(dev) || is5365(dev) || is63xx(dev)) {
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY;
+ goto out;
+ }
+
+ /* Broadcom BCM58xx chips have a flow accelerator on Port 8
+ * which requires us to use the prepended Broadcom tag type
+ */
+ if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) {
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND;
+ goto out;
+ }
+
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM;
+out:
+ return dev->tag_protocol;
+}
+EXPORT_SYMBOL(b53_get_tag_protocol);
+
+int b53_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct b53_device *dev = ds->priv;
+ u16 reg, loc;
+
+ if (ingress)
+ loc = B53_IG_MIR_CTL;
+ else
+ loc = B53_EG_MIR_CTL;
+
+ b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
+ reg |= BIT(port);
+ b53_write16(dev, B53_MGMT_PAGE, loc, reg);
+
+ b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
+ reg &= ~CAP_PORT_MASK;
+ reg |= mirror->to_local_port;
+ reg |= MIRROR_EN;
+ b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_mirror_add);
+
+void b53_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct b53_device *dev = ds->priv;
+ bool loc_disable = false, other_loc_disable = false;
+ u16 reg, loc;
+
+ if (mirror->ingress)
+ loc = B53_IG_MIR_CTL;
+ else
+ loc = B53_EG_MIR_CTL;
+
+ /* Update the desired ingress/egress register */
+ b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
+ reg &= ~BIT(port);
+ if (!(reg & MIRROR_MASK))
+ loc_disable = true;
+ b53_write16(dev, B53_MGMT_PAGE, loc, reg);
+
+ /* Now look at the other one to know if we can disable mirroring
+ * entirely
+ */
+ if (mirror->ingress)
+ b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, &reg);
+ else
+ b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, &reg);
+ if (!(reg & MIRROR_MASK))
+ other_loc_disable = true;
+
+ b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
+ /* Both no longer have ports, let's disable mirroring */
+ if (loc_disable && other_loc_disable) {
+ reg &= ~MIRROR_EN;
+ reg &= ~mirror->to_local_port;
+ }
+ b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
+}
+EXPORT_SYMBOL(b53_mirror_del);
+
+void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
+{
+ struct b53_device *dev = ds->priv;
+ u16 reg;
+
+ b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
+ if (enable)
+ reg |= BIT(port);
+ else
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
+}
+EXPORT_SYMBOL(b53_eee_enable_set);
+
+
+/* Returns 0 if EEE was not enabled, or 1 otherwise
+ */
+int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
+{
+ int ret;
+
+ ret = phy_init_eee(phy, false);
+ if (ret)
+ return 0;
+
+ b53_eee_enable_set(ds, port, true);
+
+ return 1;
+}
+EXPORT_SYMBOL(b53_eee_init);
+
+int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+{
+ struct b53_device *dev = ds->priv;
+ struct ethtool_eee *p = &dev->ports[port].eee;
+ u16 reg;
+
+ if (is5325(dev) || is5365(dev))
+ return -EOPNOTSUPP;
+
+ b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, &reg);
+ e->eee_enabled = p->eee_enabled;
+ e->eee_active = !!(reg & BIT(port));
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_get_mac_eee);
+
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+{
+ struct b53_device *dev = ds->priv;
+ struct ethtool_eee *p = &dev->ports[port].eee;
+
+ if (is5325(dev) || is5365(dev))
+ return -EOPNOTSUPP;
+
+ p->eee_enabled = e->eee_enabled;
+ b53_eee_enable_set(ds, port, e->eee_enabled);
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_set_mac_eee);
+
+static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
+{
+ struct b53_device *dev = ds->priv;
+ bool enable_jumbo;
+ bool allow_10_100;
+
+ if (is5325(dev) || is5365(dev))
+ return -EOPNOTSUPP;
+
+ enable_jumbo = (mtu >= JMS_MIN_SIZE);
+ allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
+
+ return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
+}
+
+static int b53_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return JMS_MAX_SIZE;
+}
+
+static const struct dsa_switch_ops b53_switch_ops = {
+ .get_tag_protocol = b53_get_tag_protocol,
+ .setup = b53_setup,
+ .teardown = b53_teardown,
+ .get_strings = b53_get_strings,
+ .get_ethtool_stats = b53_get_ethtool_stats,
+ .get_sset_count = b53_get_sset_count,
+ .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
+ .phy_read = b53_phy_read16,
+ .phy_write = b53_phy_write16,
+ .adjust_link = b53_adjust_link,
+ .phylink_get_caps = b53_phylink_get_caps,
+ .phylink_mac_select_pcs = b53_phylink_mac_select_pcs,
+ .phylink_mac_config = b53_phylink_mac_config,
+ .phylink_mac_link_down = b53_phylink_mac_link_down,
+ .phylink_mac_link_up = b53_phylink_mac_link_up,
+ .port_enable = b53_enable_port,
+ .port_disable = b53_disable_port,
+ .get_mac_eee = b53_get_mac_eee,
+ .set_mac_eee = b53_set_mac_eee,
+ .port_bridge_join = b53_br_join,
+ .port_bridge_leave = b53_br_leave,
+ .port_pre_bridge_flags = b53_br_flags_pre,
+ .port_bridge_flags = b53_br_flags,
+ .port_stp_state_set = b53_br_set_stp_state,
+ .port_fast_age = b53_br_fast_age,
+ .port_vlan_filtering = b53_vlan_filtering,
+ .port_vlan_add = b53_vlan_add,
+ .port_vlan_del = b53_vlan_del,
+ .port_fdb_dump = b53_fdb_dump,
+ .port_fdb_add = b53_fdb_add,
+ .port_fdb_del = b53_fdb_del,
+ .port_mirror_add = b53_mirror_add,
+ .port_mirror_del = b53_mirror_del,
+ .port_mdb_add = b53_mdb_add,
+ .port_mdb_del = b53_mdb_del,
+ .port_max_mtu = b53_get_max_mtu,
+ .port_change_mtu = b53_change_mtu,
+};
+
+struct b53_chip_data {
+ u32 chip_id;
+ const char *dev_name;
+ u16 vlans;
+ u16 enabled_ports;
+ u8 imp_port;
+ u8 cpu_port;
+ u8 vta_regs[3];
+ u8 arl_bins;
+ u16 arl_buckets;
+ u8 duplex_reg;
+ u8 jumbo_pm_reg;
+ u8 jumbo_size_reg;
+};
+
+#define B53_VTA_REGS \
+ { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
+#define B53_VTA_REGS_9798 \
+ { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
+#define B53_VTA_REGS_63XX \
+ { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
+
+static const struct b53_chip_data b53_switch_chips[] = {
+ {
+ .chip_id = BCM5325_DEVICE_ID,
+ .dev_name = "BCM5325",
+ .vlans = 16,
+ .enabled_ports = 0x3f,
+ .arl_bins = 2,
+ .arl_buckets = 1024,
+ .imp_port = 5,
+ .duplex_reg = B53_DUPLEX_STAT_FE,
+ },
+ {
+ .chip_id = BCM5365_DEVICE_ID,
+ .dev_name = "BCM5365",
+ .vlans = 256,
+ .enabled_ports = 0x3f,
+ .arl_bins = 2,
+ .arl_buckets = 1024,
+ .imp_port = 5,
+ .duplex_reg = B53_DUPLEX_STAT_FE,
+ },
+ {
+ .chip_id = BCM5389_DEVICE_ID,
+ .dev_name = "BCM5389",
+ .vlans = 4096,
+ .enabled_ports = 0x11f,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM5395_DEVICE_ID,
+ .dev_name = "BCM5395",
+ .vlans = 4096,
+ .enabled_ports = 0x11f,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM5397_DEVICE_ID,
+ .dev_name = "BCM5397",
+ .vlans = 4096,
+ .enabled_ports = 0x11f,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS_9798,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM5398_DEVICE_ID,
+ .dev_name = "BCM5398",
+ .vlans = 4096,
+ .enabled_ports = 0x17f,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS_9798,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53115_DEVICE_ID,
+ .dev_name = "BCM53115",
+ .vlans = 4096,
+ .enabled_ports = 0x11f,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .vta_regs = B53_VTA_REGS,
+ .imp_port = 8,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53125_DEVICE_ID,
+ .dev_name = "BCM53125",
+ .vlans = 4096,
+ .enabled_ports = 0x1ff,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53128_DEVICE_ID,
+ .dev_name = "BCM53128",
+ .vlans = 4096,
+ .enabled_ports = 0x1ff,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM63XX_DEVICE_ID,
+ .dev_name = "BCM63xx",
+ .vlans = 4096,
+ .enabled_ports = 0, /* pdata must provide them */
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS_63XX,
+ .duplex_reg = B53_DUPLEX_STAT_63XX,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
+ },
+ {
+ .chip_id = BCM53010_DEVICE_ID,
+ .dev_name = "BCM53010",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53011_DEVICE_ID,
+ .dev_name = "BCM53011",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53012_DEVICE_ID,
+ .dev_name = "BCM53012",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53018_DEVICE_ID,
+ .dev_name = "BCM53018",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53019_DEVICE_ID,
+ .dev_name = "BCM53019",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM58XX_DEVICE_ID,
+ .dev_name = "BCM585xx/586xx/88312",
+ .vlans = 4096,
+ .enabled_ports = 0x1ff,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM583XX_DEVICE_ID,
+ .dev_name = "BCM583xx/11360",
+ .vlans = 4096,
+ .enabled_ports = 0x103,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ /* Starfighter 2 */
+ {
+ .chip_id = BCM4908_DEVICE_ID,
+ .dev_name = "BCM4908",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_bins = 4,
+ .arl_buckets = 256,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM7445_DEVICE_ID,
+ .dev_name = "BCM7445",
+ .vlans = 4096,
+ .enabled_ports = 0x1ff,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM7278_DEVICE_ID,
+ .dev_name = "BCM7278",
+ .vlans = 4096,
+ .enabled_ports = 0x1ff,
+ .arl_bins = 4,
+ .arl_buckets = 256,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+};
+
+static int b53_switch_init(struct b53_device *dev)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
+ const struct b53_chip_data *chip = &b53_switch_chips[i];
+
+ if (chip->chip_id == dev->chip_id) {
+ if (!dev->enabled_ports)
+ dev->enabled_ports = chip->enabled_ports;
+ dev->name = chip->dev_name;
+ dev->duplex_reg = chip->duplex_reg;
+ dev->vta_regs[0] = chip->vta_regs[0];
+ dev->vta_regs[1] = chip->vta_regs[1];
+ dev->vta_regs[2] = chip->vta_regs[2];
+ dev->jumbo_pm_reg = chip->jumbo_pm_reg;
+ dev->imp_port = chip->imp_port;
+ dev->num_vlans = chip->vlans;
+ dev->num_arl_bins = chip->arl_bins;
+ dev->num_arl_buckets = chip->arl_buckets;
+ break;
+ }
+ }
+
+ /* check which BCM5325x version we have */
+ if (is5325(dev)) {
+ u8 vc4;
+
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
+
+ /* check reserved bits */
+ switch (vc4 & 3) {
+ case 1:
+ /* BCM5325E */
+ break;
+ case 3:
+ /* BCM5325F - do not use port 4 */
+ dev->enabled_ports &= ~BIT(4);
+ break;
+ default:
+/* On the BCM47XX SoCs this is the supported internal switch.*/
+#ifndef CONFIG_BCM47XX
+ /* BCM5325M */
+ return -EINVAL;
+#else
+ break;
+#endif
+ }
+ }
+
+ dev->num_ports = fls(dev->enabled_ports);
+
+ dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
+
+ /* Include non standard CPU port built-in PHYs to be probed */
+ if (is539x(dev) || is531x5(dev)) {
+ for (i = 0; i < dev->num_ports; i++) {
+ if (!(dev->ds->phys_mii_mask & BIT(i)) &&
+ !b53_possible_cpu_port(dev->ds, i))
+ dev->ds->phys_mii_mask |= BIT(i);
+ }
+ }
+
+ dev->ports = devm_kcalloc(dev->dev,
+ dev->num_ports, sizeof(struct b53_port),
+ GFP_KERNEL);
+ if (!dev->ports)
+ return -ENOMEM;
+
+ dev->vlans = devm_kcalloc(dev->dev,
+ dev->num_vlans, sizeof(struct b53_vlan),
+ GFP_KERNEL);
+ if (!dev->vlans)
+ return -ENOMEM;
+
+ dev->reset_gpio = b53_switch_get_reset_gpio(dev);
+ if (dev->reset_gpio >= 0) {
+ ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
+ GPIOF_OUT_INIT_HIGH, "robo_reset");
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+struct b53_device *b53_switch_alloc(struct device *base,
+ const struct b53_io_ops *ops,
+ void *priv)
+{
+ struct dsa_switch *ds;
+ struct b53_device *dev;
+
+ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return NULL;
+
+ ds->dev = base;
+
+ dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ ds->priv = dev;
+ dev->dev = base;
+
+ dev->ds = ds;
+ dev->priv = priv;
+ dev->ops = ops;
+ ds->ops = &b53_switch_ops;
+ dev->vlan_enabled = true;
+ /* Let DSA handle the case were multiple bridges span the same switch
+ * device and different VLAN awareness settings are requested, which
+ * would be breaking filtering semantics for any of the other bridge
+ * devices. (not hardware supported)
+ */
+ ds->vlan_filtering_is_global = true;
+
+ mutex_init(&dev->reg_mutex);
+ mutex_init(&dev->stats_mutex);
+ mutex_init(&dev->arl_mutex);
+
+ return dev;
+}
+EXPORT_SYMBOL(b53_switch_alloc);
+
+int b53_switch_detect(struct b53_device *dev)
+{
+ u32 id32;
+ u16 tmp;
+ u8 id8;
+ int ret;
+
+ ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
+ if (ret)
+ return ret;
+
+ switch (id8) {
+ case 0:
+ /* BCM5325 and BCM5365 do not have this register so reads
+ * return 0. But the read operation did succeed, so assume this
+ * is one of them.
+ *
+ * Next check if we can write to the 5325's VTA register; for
+ * 5365 it is read only.
+ */
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
+
+ if (tmp == 0xf)
+ dev->chip_id = BCM5325_DEVICE_ID;
+ else
+ dev->chip_id = BCM5365_DEVICE_ID;
+ break;
+ case BCM5389_DEVICE_ID:
+ case BCM5395_DEVICE_ID:
+ case BCM5397_DEVICE_ID:
+ case BCM5398_DEVICE_ID:
+ dev->chip_id = id8;
+ break;
+ default:
+ ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
+ if (ret)
+ return ret;
+
+ switch (id32) {
+ case BCM53115_DEVICE_ID:
+ case BCM53125_DEVICE_ID:
+ case BCM53128_DEVICE_ID:
+ case BCM53010_DEVICE_ID:
+ case BCM53011_DEVICE_ID:
+ case BCM53012_DEVICE_ID:
+ case BCM53018_DEVICE_ID:
+ case BCM53019_DEVICE_ID:
+ dev->chip_id = id32;
+ break;
+ default:
+ dev_err(dev->dev,
+ "unsupported switch detected (BCM53%02x/BCM%x)\n",
+ id8, id32);
+ return -ENODEV;
+ }
+ }
+
+ if (dev->chip_id == BCM5325_DEVICE_ID)
+ return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
+ &dev->core_rev);
+ else
+ return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
+ &dev->core_rev);
+}
+EXPORT_SYMBOL(b53_switch_detect);
+
+int b53_switch_register(struct b53_device *dev)
+{
+ int ret;
+
+ if (dev->pdata) {
+ dev->chip_id = dev->pdata->chip_id;
+ dev->enabled_ports = dev->pdata->enabled_ports;
+ }
+
+ if (!dev->chip_id && b53_switch_detect(dev))
+ return -EINVAL;
+
+ ret = b53_switch_init(dev);
+ if (ret)
+ return ret;
+
+ dev_info(dev->dev, "found switch: %s, rev %i\n",
+ dev->name, dev->core_rev);
+
+ return dsa_register_switch(dev->ds);
+}
+EXPORT_SYMBOL(b53_switch_register);
+
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_DESCRIPTION("B53 switch library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
new file mode 100644
index 000000000..6ddc03b58
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -0,0 +1,399 @@
+/*
+ * B53 register access through MII registers
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/brcmphy.h>
+#include <linux/rtnetlink.h>
+#include <net/dsa.h>
+
+#include "b53_priv.h"
+
+/* MII registers */
+#define REG_MII_PAGE 0x10 /* MII Page register */
+#define REG_MII_ADDR 0x11 /* MII Address register */
+#define REG_MII_DATA0 0x18 /* MII Data register 0 */
+#define REG_MII_DATA1 0x19 /* MII Data register 1 */
+#define REG_MII_DATA2 0x1a /* MII Data register 2 */
+#define REG_MII_DATA3 0x1b /* MII Data register 3 */
+
+#define REG_MII_PAGE_ENABLE BIT(0)
+#define REG_MII_ADDR_WRITE BIT(0)
+#define REG_MII_ADDR_READ BIT(1)
+
+static int b53_mdio_op(struct b53_device *dev, u8 page, u8 reg, u16 op)
+{
+ int i;
+ u16 v;
+ int ret;
+ struct mii_bus *bus = dev->priv;
+
+ if (dev->current_page != page) {
+ /* set page number */
+ v = (page << 8) | REG_MII_PAGE_ENABLE;
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_PAGE, v);
+ if (ret)
+ return ret;
+ dev->current_page = page;
+ }
+
+ /* set register address */
+ v = (reg << 8) | op;
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_ADDR, v);
+ if (ret)
+ return ret;
+
+ /* check if operation completed */
+ for (i = 0; i < 5; ++i) {
+ v = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_ADDR);
+ if (!(v & (REG_MII_ADDR_WRITE | REG_MII_ADDR_READ)))
+ break;
+ usleep_range(10, 100);
+ }
+
+ if (WARN_ON(i == 5))
+ return -EIO;
+
+ return 0;
+}
+
+static int b53_mdio_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0) & 0xff;
+
+ return 0;
+}
+
+static int b53_mdio_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0);
+
+ return 0;
+}
+
+static int b53_mdio_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0);
+ *val |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA1) << 16;
+
+ return 0;
+}
+
+static int b53_mdio_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ u64 temp = 0;
+ int i;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ for (i = 2; i >= 0; i--) {
+ temp <<= 16;
+ temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i);
+ }
+
+ *val = temp;
+
+ return 0;
+}
+
+static int b53_mdio_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ u64 temp = 0;
+ int i;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ for (i = 3; i >= 0; i--) {
+ temp <<= 16;
+ temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i);
+ }
+
+ *val = temp;
+
+ return 0;
+}
+
+static int b53_mdio_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0, value);
+ if (ret)
+ return ret;
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0, value);
+ if (ret)
+ return ret;
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ struct mii_bus *bus = dev->priv;
+ unsigned int i;
+ u32 temp = value;
+
+ for (i = 0; i < 2; i++) {
+ int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i,
+ temp & 0xffff);
+ if (ret)
+ return ret;
+ temp >>= 16;
+ }
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct mii_bus *bus = dev->priv;
+ unsigned int i;
+ u64 temp = value;
+
+ for (i = 0; i < 3; i++) {
+ int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i,
+ temp & 0xffff);
+ if (ret)
+ return ret;
+ temp >>= 16;
+ }
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct mii_bus *bus = dev->priv;
+ unsigned int i;
+ u64 temp = value;
+
+ for (i = 0; i < 4; i++) {
+ int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i,
+ temp & 0xffff);
+ if (ret)
+ return ret;
+ temp >>= 16;
+ }
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_phy_read16(struct b53_device *dev, int addr, int reg,
+ u16 *value)
+{
+ struct mii_bus *bus = dev->priv;
+
+ *value = mdiobus_read_nested(bus, addr, reg);
+
+ return 0;
+}
+
+static int b53_mdio_phy_write16(struct b53_device *dev, int addr, int reg,
+ u16 value)
+{
+ struct mii_bus *bus = dev->bus;
+
+ return mdiobus_write_nested(bus, addr, reg, value);
+}
+
+static const struct b53_io_ops b53_mdio_ops = {
+ .read8 = b53_mdio_read8,
+ .read16 = b53_mdio_read16,
+ .read32 = b53_mdio_read32,
+ .read48 = b53_mdio_read48,
+ .read64 = b53_mdio_read64,
+ .write8 = b53_mdio_write8,
+ .write16 = b53_mdio_write16,
+ .write32 = b53_mdio_write32,
+ .write48 = b53_mdio_write48,
+ .write64 = b53_mdio_write64,
+ .phy_read16 = b53_mdio_phy_read16,
+ .phy_write16 = b53_mdio_phy_write16,
+};
+
+#define B53_BRCM_OUI_1 0x0143bc00
+#define B53_BRCM_OUI_2 0x03625c00
+#define B53_BRCM_OUI_3 0x00406000
+#define B53_BRCM_OUI_4 0x01410c00
+
+static int b53_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct b53_device *dev;
+ u32 phy_id;
+ int ret;
+
+ /* allow the generic PHY driver to take over the non-management MDIO
+ * addresses
+ */
+ if (mdiodev->addr != BRCM_PSEUDO_PHY_ADDR && mdiodev->addr != 0) {
+ dev_err(&mdiodev->dev, "leaving address %d to PHY\n",
+ mdiodev->addr);
+ return -ENODEV;
+ }
+
+ /* read the first port's id */
+ phy_id = mdiobus_read(mdiodev->bus, 0, 2) << 16;
+ phy_id |= mdiobus_read(mdiodev->bus, 0, 3);
+
+ /* BCM5325, BCM539x (OUI_1)
+ * BCM53125, BCM53128 (OUI_2)
+ * BCM5365 (OUI_3)
+ */
+ if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_3 &&
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_4) {
+ dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
+ return -ENODEV;
+ }
+
+ /* First probe will come from SWITCH_MDIO controller on the 7445D0
+ * switch, which will conflict with the 7445 integrated switch
+ * pseudo-phy (we end-up programming both). In that case, we return
+ * -EPROBE_DEFER for the first time we get here, and wait until we come
+ * back with the slave MDIO bus which has the correct indirection
+ * layer setup
+ */
+ if (of_machine_is_compatible("brcm,bcm7445d0") &&
+ strcmp(mdiodev->bus->name, "sf2 slave mii"))
+ return -EPROBE_DEFER;
+
+ dev = b53_switch_alloc(&mdiodev->dev, &b53_mdio_ops, mdiodev->bus);
+ if (!dev)
+ return -ENOMEM;
+
+ /* we don't use page 0xff, so force a page set */
+ dev->current_page = 0xff;
+ dev->bus = mdiodev->bus;
+
+ dev_set_drvdata(&mdiodev->dev, dev);
+
+ ret = b53_switch_register(dev);
+ if (ret) {
+ dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void b53_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (!dev)
+ return;
+
+ b53_switch_remove(dev);
+}
+
+static void b53_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (!dev)
+ return;
+
+ b53_switch_shutdown(dev);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static const struct of_device_id b53_of_match[] = {
+ { .compatible = "brcm,bcm5325" },
+ { .compatible = "brcm,bcm53115" },
+ { .compatible = "brcm,bcm53125" },
+ { .compatible = "brcm,bcm53128" },
+ { .compatible = "brcm,bcm5365" },
+ { .compatible = "brcm,bcm5389" },
+ { .compatible = "brcm,bcm5395" },
+ { .compatible = "brcm,bcm5397" },
+ { .compatible = "brcm,bcm5398" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, b53_of_match);
+
+static struct mdio_driver b53_mdio_driver = {
+ .probe = b53_mdio_probe,
+ .remove = b53_mdio_remove,
+ .shutdown = b53_mdio_shutdown,
+ .mdiodrv.driver = {
+ .name = "bcm53xx",
+ .of_match_table = b53_of_match,
+ },
+};
+mdio_module_driver(b53_mdio_driver);
+
+MODULE_DESCRIPTION("B53 MDIO access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
new file mode 100644
index 000000000..d9434ed94
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -0,0 +1,368 @@
+/*
+ * B53 register access through memory mapped registers
+ *
+ * Copyright (C) 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/bits.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/b53.h>
+
+#include "b53_priv.h"
+
+struct b53_mmap_priv {
+ void __iomem *regs;
+};
+
+static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ void __iomem *regs = priv->regs;
+
+ *val = readb(regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ void __iomem *regs = priv->regs;
+
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian)
+ *val = ioread16be(regs + (page << 8) + reg);
+ else
+ *val = readw(regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ void __iomem *regs = priv->regs;
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian)
+ *val = ioread32be(regs + (page << 8) + reg);
+ else
+ *val = readl(regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ void __iomem *regs = priv->regs;
+
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (reg % 4) {
+ u16 lo;
+ u32 hi;
+
+ if (dev->pdata && dev->pdata->big_endian) {
+ lo = ioread16be(regs + (page << 8) + reg);
+ hi = ioread32be(regs + (page << 8) + reg + 2);
+ } else {
+ lo = readw(regs + (page << 8) + reg);
+ hi = readl(regs + (page << 8) + reg + 2);
+ }
+
+ *val = ((u64)hi << 16) | lo;
+ } else {
+ u32 lo;
+ u16 hi;
+
+ if (dev->pdata && dev->pdata->big_endian) {
+ lo = ioread32be(regs + (page << 8) + reg);
+ hi = ioread16be(regs + (page << 8) + reg + 4);
+ } else {
+ lo = readl(regs + (page << 8) + reg);
+ hi = readw(regs + (page << 8) + reg + 4);
+ }
+
+ *val = ((u64)hi << 32) | lo;
+ }
+
+ return 0;
+}
+
+static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ void __iomem *regs = priv->regs;
+ u32 hi, lo;
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian) {
+ lo = ioread32be(regs + (page << 8) + reg);
+ hi = ioread32be(regs + (page << 8) + reg + 4);
+ } else {
+ lo = readl(regs + (page << 8) + reg);
+ hi = readl(regs + (page << 8) + reg + 4);
+ }
+
+ *val = ((u64)hi << 32) | lo;
+
+ return 0;
+}
+
+static int b53_mmap_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ void __iomem *regs = priv->regs;
+
+ writeb(value, regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ void __iomem *regs = priv->regs;
+
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian)
+ iowrite16be(value, regs + (page << 8) + reg);
+ else
+ writew(value, regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ void __iomem *regs = priv->regs;
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian)
+ iowrite32be(value, regs + (page << 8) + reg);
+ else
+ writel(value, regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (reg % 4) {
+ u32 hi = (u32)(value >> 16);
+ u16 lo = (u16)value;
+
+ b53_mmap_write16(dev, page, reg, lo);
+ b53_mmap_write32(dev, page, reg + 2, hi);
+ } else {
+ u16 hi = (u16)(value >> 32);
+ u32 lo = (u32)value;
+
+ b53_mmap_write32(dev, page, reg, lo);
+ b53_mmap_write16(dev, page, reg + 4, hi);
+ }
+
+ return 0;
+}
+
+static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ u32 hi, lo;
+
+ hi = upper_32_bits(value);
+ lo = lower_32_bits(value);
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ b53_mmap_write32(dev, page, reg, lo);
+ b53_mmap_write32(dev, page, reg + 4, hi);
+
+ return 0;
+}
+
+static int b53_mmap_phy_read16(struct b53_device *dev, int addr, int reg,
+ u16 *value)
+{
+ return -EIO;
+}
+
+static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
+ u16 value)
+{
+ return -EIO;
+}
+
+static const struct b53_io_ops b53_mmap_ops = {
+ .read8 = b53_mmap_read8,
+ .read16 = b53_mmap_read16,
+ .read32 = b53_mmap_read32,
+ .read48 = b53_mmap_read48,
+ .read64 = b53_mmap_read64,
+ .write8 = b53_mmap_write8,
+ .write16 = b53_mmap_write16,
+ .write32 = b53_mmap_write32,
+ .write48 = b53_mmap_write48,
+ .write64 = b53_mmap_write64,
+ .phy_read16 = b53_mmap_phy_read16,
+ .phy_write16 = b53_mmap_phy_write16,
+};
+
+static int b53_mmap_probe_of(struct platform_device *pdev,
+ struct b53_platform_data **ppdata)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *of_ports, *of_port;
+ struct device *dev = &pdev->dev;
+ struct b53_platform_data *pdata;
+ void __iomem *mem;
+
+ mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ pdata = devm_kzalloc(dev, sizeof(struct b53_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->regs = mem;
+ pdata->chip_id = BCM63XX_DEVICE_ID;
+ pdata->big_endian = of_property_read_bool(np, "big-endian");
+
+ of_ports = of_get_child_by_name(np, "ports");
+ if (!of_ports) {
+ dev_err(dev, "no ports child node found\n");
+ return -EINVAL;
+ }
+
+ for_each_available_child_of_node(of_ports, of_port) {
+ u32 reg;
+
+ if (of_property_read_u32(of_port, "reg", &reg))
+ continue;
+
+ if (reg < B53_N_PORTS)
+ pdata->enabled_ports |= BIT(reg);
+ }
+
+ of_node_put(of_ports);
+ *ppdata = pdata;
+
+ return 0;
+}
+
+static int b53_mmap_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct b53_platform_data *pdata = pdev->dev.platform_data;
+ struct b53_mmap_priv *priv;
+ struct b53_device *dev;
+ int ret;
+
+ if (!pdata && np) {
+ ret = b53_mmap_probe_of(pdev, &pdata);
+ if (ret) {
+ dev_err(&pdev->dev, "OF probe error\n");
+ return ret;
+ }
+ }
+
+ if (!pdata)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regs = pdata->regs;
+
+ dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, priv);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->pdata = pdata;
+
+ platform_set_drvdata(pdev, dev);
+
+ return b53_switch_register(dev);
+}
+
+static int b53_mmap_remove(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (dev)
+ b53_switch_remove(dev);
+
+ return 0;
+}
+
+static void b53_mmap_shutdown(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (dev)
+ b53_switch_shutdown(dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id b53_mmap_of_table[] = {
+ { .compatible = "brcm,bcm3384-switch" },
+ { .compatible = "brcm,bcm6328-switch" },
+ { .compatible = "brcm,bcm6368-switch" },
+ { .compatible = "brcm,bcm63xx-switch" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
+
+static struct platform_driver b53_mmap_driver = {
+ .probe = b53_mmap_probe,
+ .remove = b53_mmap_remove,
+ .shutdown = b53_mmap_shutdown,
+ .driver = {
+ .name = "b53-switch",
+ .of_match_table = b53_mmap_of_table,
+ },
+};
+
+module_platform_driver(b53_mmap_driver);
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_DESCRIPTION("B53 MMAP access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
new file mode 100644
index 000000000..795cbffd5
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -0,0 +1,392 @@
+/*
+ * B53 common definitions
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __B53_PRIV_H
+#define __B53_PRIV_H
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/phylink.h>
+#include <linux/etherdevice.h>
+#include <net/dsa.h>
+
+#include "b53_regs.h"
+
+struct b53_device;
+struct net_device;
+
+struct b53_io_ops {
+ int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value);
+ int (*read16)(struct b53_device *dev, u8 page, u8 reg, u16 *value);
+ int (*read32)(struct b53_device *dev, u8 page, u8 reg, u32 *value);
+ int (*read48)(struct b53_device *dev, u8 page, u8 reg, u64 *value);
+ int (*read64)(struct b53_device *dev, u8 page, u8 reg, u64 *value);
+ int (*write8)(struct b53_device *dev, u8 page, u8 reg, u8 value);
+ int (*write16)(struct b53_device *dev, u8 page, u8 reg, u16 value);
+ int (*write32)(struct b53_device *dev, u8 page, u8 reg, u32 value);
+ int (*write48)(struct b53_device *dev, u8 page, u8 reg, u64 value);
+ int (*write64)(struct b53_device *dev, u8 page, u8 reg, u64 value);
+ int (*phy_read16)(struct b53_device *dev, int addr, int reg, u16 *value);
+ int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
+ int (*irq_enable)(struct b53_device *dev, int port);
+ void (*irq_disable)(struct b53_device *dev, int port);
+ void (*phylink_get_caps)(struct b53_device *dev, int port,
+ struct phylink_config *config);
+ struct phylink_pcs *(*phylink_mac_select_pcs)(struct b53_device *dev,
+ int port,
+ phy_interface_t interface);
+ u8 (*serdes_map_lane)(struct b53_device *dev, int port);
+ void (*serdes_link_set)(struct b53_device *dev, int port,
+ unsigned int mode, phy_interface_t interface,
+ bool link_up);
+};
+
+#define B53_INVALID_LANE 0xff
+
+enum {
+ BCM4908_DEVICE_ID = 0x4908,
+ BCM5325_DEVICE_ID = 0x25,
+ BCM5365_DEVICE_ID = 0x65,
+ BCM5389_DEVICE_ID = 0x89,
+ BCM5395_DEVICE_ID = 0x95,
+ BCM5397_DEVICE_ID = 0x97,
+ BCM5398_DEVICE_ID = 0x98,
+ BCM53115_DEVICE_ID = 0x53115,
+ BCM53125_DEVICE_ID = 0x53125,
+ BCM53128_DEVICE_ID = 0x53128,
+ BCM63XX_DEVICE_ID = 0x6300,
+ BCM53010_DEVICE_ID = 0x53010,
+ BCM53011_DEVICE_ID = 0x53011,
+ BCM53012_DEVICE_ID = 0x53012,
+ BCM53018_DEVICE_ID = 0x53018,
+ BCM53019_DEVICE_ID = 0x53019,
+ BCM58XX_DEVICE_ID = 0x5800,
+ BCM583XX_DEVICE_ID = 0x58300,
+ BCM7445_DEVICE_ID = 0x7445,
+ BCM7278_DEVICE_ID = 0x7278,
+};
+
+struct b53_pcs {
+ struct phylink_pcs pcs;
+ struct b53_device *dev;
+ u8 lane;
+};
+
+#define B53_N_PORTS 9
+#define B53_N_PORTS_25 6
+#define B53_N_PCS 2
+
+struct b53_port {
+ u16 vlan_ctl_mask;
+ struct ethtool_eee eee;
+};
+
+struct b53_vlan {
+ u16 members;
+ u16 untag;
+ bool valid;
+};
+
+struct b53_device {
+ struct dsa_switch *ds;
+ struct b53_platform_data *pdata;
+ const char *name;
+
+ struct mutex reg_mutex;
+ struct mutex stats_mutex;
+ struct mutex arl_mutex;
+ const struct b53_io_ops *ops;
+
+ /* chip specific data */
+ u32 chip_id;
+ u8 core_rev;
+ u8 vta_regs[3];
+ u8 duplex_reg;
+ u8 jumbo_pm_reg;
+ u8 jumbo_size_reg;
+ int reset_gpio;
+ u8 num_arl_bins;
+ u16 num_arl_buckets;
+ enum dsa_tag_protocol tag_protocol;
+
+ /* used ports mask */
+ u16 enabled_ports;
+ unsigned int imp_port;
+
+ /* connect specific data */
+ u8 current_page;
+ struct device *dev;
+ u8 serdes_lane;
+
+ /* Master MDIO bus we got probed from */
+ struct mii_bus *bus;
+
+ void *priv;
+
+ /* run time configuration */
+ bool enable_jumbo;
+
+ unsigned int num_vlans;
+ struct b53_vlan *vlans;
+ bool vlan_enabled;
+ unsigned int num_ports;
+ struct b53_port *ports;
+
+ struct b53_pcs pcs[B53_N_PCS];
+};
+
+#define b53_for_each_port(dev, i) \
+ for (i = 0; i < B53_N_PORTS; i++) \
+ if (dev->enabled_ports & BIT(i))
+
+
+static inline int is5325(struct b53_device *dev)
+{
+ return dev->chip_id == BCM5325_DEVICE_ID;
+}
+
+static inline int is5365(struct b53_device *dev)
+{
+#ifdef CONFIG_BCM47XX
+ return dev->chip_id == BCM5365_DEVICE_ID;
+#else
+ return 0;
+#endif
+}
+
+static inline int is5397_98(struct b53_device *dev)
+{
+ return dev->chip_id == BCM5397_DEVICE_ID ||
+ dev->chip_id == BCM5398_DEVICE_ID;
+}
+
+static inline int is539x(struct b53_device *dev)
+{
+ return dev->chip_id == BCM5395_DEVICE_ID ||
+ dev->chip_id == BCM5397_DEVICE_ID ||
+ dev->chip_id == BCM5398_DEVICE_ID;
+}
+
+static inline int is531x5(struct b53_device *dev)
+{
+ return dev->chip_id == BCM53115_DEVICE_ID ||
+ dev->chip_id == BCM53125_DEVICE_ID ||
+ dev->chip_id == BCM53128_DEVICE_ID;
+}
+
+static inline int is63xx(struct b53_device *dev)
+{
+ return dev->chip_id == BCM63XX_DEVICE_ID;
+}
+
+static inline int is5301x(struct b53_device *dev)
+{
+ return dev->chip_id == BCM53010_DEVICE_ID ||
+ dev->chip_id == BCM53011_DEVICE_ID ||
+ dev->chip_id == BCM53012_DEVICE_ID ||
+ dev->chip_id == BCM53018_DEVICE_ID ||
+ dev->chip_id == BCM53019_DEVICE_ID;
+}
+
+static inline int is58xx(struct b53_device *dev)
+{
+ return dev->chip_id == BCM58XX_DEVICE_ID ||
+ dev->chip_id == BCM583XX_DEVICE_ID ||
+ dev->chip_id == BCM7445_DEVICE_ID ||
+ dev->chip_id == BCM7278_DEVICE_ID;
+}
+
+#define B53_CPU_PORT_25 5
+#define B53_CPU_PORT 8
+
+static inline unsigned int b53_max_arl_entries(struct b53_device *dev)
+{
+ return dev->num_arl_buckets * dev->num_arl_bins;
+}
+
+struct b53_device *b53_switch_alloc(struct device *base,
+ const struct b53_io_ops *ops,
+ void *priv);
+
+int b53_switch_detect(struct b53_device *dev);
+
+int b53_switch_register(struct b53_device *dev);
+
+static inline void b53_switch_remove(struct b53_device *dev)
+{
+ dsa_unregister_switch(dev->ds);
+}
+
+static inline void b53_switch_shutdown(struct b53_device *dev)
+{
+ dsa_switch_shutdown(dev->ds);
+}
+
+#define b53_build_op(type_op_size, val_type) \
+static inline int b53_##type_op_size(struct b53_device *dev, u8 page, \
+ u8 reg, val_type val) \
+{ \
+ int ret; \
+ \
+ mutex_lock(&dev->reg_mutex); \
+ ret = dev->ops->type_op_size(dev, page, reg, val); \
+ mutex_unlock(&dev->reg_mutex); \
+ \
+ return ret; \
+}
+
+b53_build_op(read8, u8 *);
+b53_build_op(read16, u16 *);
+b53_build_op(read32, u32 *);
+b53_build_op(read48, u64 *);
+b53_build_op(read64, u64 *);
+
+b53_build_op(write8, u8);
+b53_build_op(write16, u16);
+b53_build_op(write32, u32);
+b53_build_op(write48, u64);
+b53_build_op(write64, u64);
+
+struct b53_arl_entry {
+ u16 port;
+ u8 mac[ETH_ALEN];
+ u16 vid;
+ u8 is_valid:1;
+ u8 is_age:1;
+ u8 is_static:1;
+};
+
+static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
+ u64 mac_vid, u32 fwd_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK;
+ ent->is_valid = !!(fwd_entry & ARLTBL_VALID);
+ ent->is_age = !!(fwd_entry & ARLTBL_AGE);
+ ent->is_static = !!(fwd_entry & ARLTBL_STATIC);
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->vid = mac_vid >> ARLTBL_VID_S;
+}
+
+static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
+ const struct b53_arl_entry *ent)
+{
+ *mac_vid = ether_addr_to_u64(ent->mac);
+ *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S;
+ *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK;
+ if (ent->is_valid)
+ *fwd_entry |= ARLTBL_VALID;
+ if (ent->is_static)
+ *fwd_entry |= ARLTBL_STATIC;
+ if (ent->is_age)
+ *fwd_entry |= ARLTBL_AGE;
+}
+
+#ifdef CONFIG_BCM47XX
+
+#include <linux/bcm47xx_nvram.h>
+#include <bcm47xx_board.h>
+static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
+{
+ enum bcm47xx_board board = bcm47xx_board_get();
+
+ switch (board) {
+ case BCM47XX_BOARD_LINKSYS_WRT300NV11:
+ case BCM47XX_BOARD_LINKSYS_WRT310NV1:
+ return 8;
+ default:
+ return bcm47xx_nvram_gpio_pin("robo_reset");
+ }
+}
+#else
+static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
+{
+ return -ENOENT;
+}
+#endif
+
+/* Exported functions towards other drivers */
+void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port);
+int b53_configure_vlan(struct dsa_switch *ds);
+void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data);
+void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
+int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
+void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
+int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ bool *tx_fwd_offload, struct netlink_ext_ack *extack);
+void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge);
+void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
+void b53_br_fast_age(struct dsa_switch *ds, int port);
+int b53_br_flags_pre(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+int b53_br_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+int b53_setup_devlink_resources(struct dsa_switch *ds);
+void b53_port_event(struct dsa_switch *ds, int port);
+void b53_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
+void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause);
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack);
+int b53_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int b53_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int b53_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int b53_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int b53_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+int b53_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+int b53_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+ struct netlink_ext_ack *extack);
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mprot);
+void b53_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+void b53_disable_port(struct dsa_switch *ds, int port);
+void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
+void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable);
+int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
+int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+
+#endif
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
new file mode 100644
index 000000000..b2c539a42
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -0,0 +1,527 @@
+/*
+ * B53 register definitions
+ *
+ * Copyright (C) 2004 Broadcom Corporation
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __B53_REGS_H
+#define __B53_REGS_H
+
+/* Management Port (SMP) Page offsets */
+#define B53_CTRL_PAGE 0x00 /* Control */
+#define B53_STAT_PAGE 0x01 /* Status */
+#define B53_MGMT_PAGE 0x02 /* Management Mode */
+#define B53_MIB_AC_PAGE 0x03 /* MIB Autocast */
+#define B53_ARLCTRL_PAGE 0x04 /* ARL Control */
+#define B53_ARLIO_PAGE 0x05 /* ARL Access */
+#define B53_FRAMEBUF_PAGE 0x06 /* Management frame access */
+#define B53_MEM_ACCESS_PAGE 0x08 /* Memory access */
+
+/* PHY Registers */
+#define B53_PORT_MII_PAGE(i) (0x10 + (i)) /* Port i MII Registers */
+#define B53_IM_PORT_PAGE 0x18 /* Inverse MII Port (to EMAC) */
+#define B53_ALL_PORT_PAGE 0x19 /* All ports MII (broadcast) */
+
+/* MIB registers */
+#define B53_MIB_PAGE(i) (0x20 + (i))
+
+/* Quality of Service (QoS) Registers */
+#define B53_QOS_PAGE 0x30
+
+/* Port VLAN Page */
+#define B53_PVLAN_PAGE 0x31
+
+/* VLAN Registers */
+#define B53_VLAN_PAGE 0x34
+
+/* Jumbo Frame Registers */
+#define B53_JUMBO_PAGE 0x40
+
+/* EEE Control Registers Page */
+#define B53_EEE_PAGE 0x92
+
+/* CFP Configuration Registers Page */
+#define B53_CFP_PAGE 0xa1
+
+/*************************************************************************
+ * Control Page registers
+ *************************************************************************/
+
+/* Port Control Register (8 bit) */
+#define B53_PORT_CTRL(i) (0x00 + (i))
+#define PORT_CTRL_RX_DISABLE BIT(0)
+#define PORT_CTRL_TX_DISABLE BIT(1)
+#define PORT_CTRL_RX_BCST_EN BIT(2) /* Broadcast RX (P8 only) */
+#define PORT_CTRL_RX_MCST_EN BIT(3) /* Multicast RX (P8 only) */
+#define PORT_CTRL_RX_UCST_EN BIT(4) /* Unicast RX (P8 only) */
+#define PORT_CTRL_STP_STATE_S 5
+#define PORT_CTRL_NO_STP (0 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_DIS_STATE (1 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_BLOCK_STATE (2 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_LISTEN_STATE (3 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_LEARN_STATE (4 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_FWD_STATE (5 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_STP_STATE_MASK (0x7 << PORT_CTRL_STP_STATE_S)
+
+/* SMP Control Register (8 bit) */
+#define B53_SMP_CTRL 0x0a
+
+/* Switch Mode Control Register (8 bit) */
+#define B53_SWITCH_MODE 0x0b
+#define SM_SW_FWD_MODE BIT(0) /* 1 = Managed Mode */
+#define SM_SW_FWD_EN BIT(1) /* Forwarding Enable */
+
+/* IMP Port state override register (8 bit) */
+#define B53_PORT_OVERRIDE_CTRL 0x0e
+#define PORT_OVERRIDE_LINK BIT(0)
+#define PORT_OVERRIDE_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */
+#define PORT_OVERRIDE_SPEED_S 2
+#define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */
+#define PORT_OVERRIDE_RX_FLOW BIT(4)
+#define PORT_OVERRIDE_TX_FLOW BIT(5)
+#define PORT_OVERRIDE_SPEED_2000M BIT(6) /* BCM5301X only, requires setting 1000M */
+#define PORT_OVERRIDE_EN BIT(7) /* Use the register contents */
+
+/* Power-down mode control */
+#define B53_PD_MODE_CTRL_25 0x0f
+
+/* IP Multicast control (8 bit) */
+#define B53_IP_MULTICAST_CTRL 0x21
+#define B53_IPMC_FWD_EN BIT(1)
+#define B53_UC_FWD_EN BIT(6)
+#define B53_MC_FWD_EN BIT(7)
+
+/* Switch control (8 bit) */
+#define B53_SWITCH_CTRL 0x22
+#define B53_MII_DUMB_FWDG_EN BIT(6)
+
+/* (16 bit) */
+#define B53_UC_FLOOD_MASK 0x32
+#define B53_MC_FLOOD_MASK 0x34
+#define B53_IPMC_FLOOD_MASK 0x36
+#define B53_DIS_LEARNING 0x3c
+
+/*
+ * Override Ports 0-7 State on devices with xMII interfaces (8 bit)
+ *
+ * For port 8 still use B53_PORT_OVERRIDE_CTRL
+ * Please note that not all ports are available on every hardware, e.g. BCM5301X
+ * don't include overriding port 6, BCM63xx also have some limitations.
+ */
+#define B53_GMII_PORT_OVERRIDE_CTRL(i) (0x58 + (i))
+#define GMII_PO_LINK BIT(0)
+#define GMII_PO_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */
+#define GMII_PO_SPEED_S 2
+#define GMII_PO_SPEED_10M (0 << GMII_PO_SPEED_S)
+#define GMII_PO_SPEED_100M (1 << GMII_PO_SPEED_S)
+#define GMII_PO_SPEED_1000M (2 << GMII_PO_SPEED_S)
+#define GMII_PO_RX_FLOW BIT(4)
+#define GMII_PO_TX_FLOW BIT(5)
+#define GMII_PO_EN BIT(6) /* Use the register contents */
+#define GMII_PO_SPEED_2000M BIT(7) /* BCM5301X only, requires setting 1000M */
+
+#define B53_RGMII_CTRL_IMP 0x60
+#define RGMII_CTRL_ENABLE_GMII BIT(7)
+#define RGMII_CTRL_TIMING_SEL BIT(2)
+#define RGMII_CTRL_DLL_RXC BIT(1)
+#define RGMII_CTRL_DLL_TXC BIT(0)
+
+#define B53_RGMII_CTRL_P(i) (B53_RGMII_CTRL_IMP + (i))
+
+/* Software reset register (8 bit) */
+#define B53_SOFTRESET 0x79
+#define SW_RST BIT(7)
+#define EN_CH_RST BIT(6)
+#define EN_SW_RST BIT(4)
+
+/* Fast Aging Control register (8 bit) */
+#define B53_FAST_AGE_CTRL 0x88
+#define FAST_AGE_STATIC BIT(0)
+#define FAST_AGE_DYNAMIC BIT(1)
+#define FAST_AGE_PORT BIT(2)
+#define FAST_AGE_VLAN BIT(3)
+#define FAST_AGE_STP BIT(4)
+#define FAST_AGE_MC BIT(5)
+#define FAST_AGE_DONE BIT(7)
+
+/* Fast Aging Port Control register (8 bit) */
+#define B53_FAST_AGE_PORT_CTRL 0x89
+
+/* Fast Aging VID Control register (16 bit) */
+#define B53_FAST_AGE_VID_CTRL 0x8a
+
+/*************************************************************************
+ * Status Page registers
+ *************************************************************************/
+
+/* Link Status Summary Register (16bit) */
+#define B53_LINK_STAT 0x00
+
+/* Link Status Change Register (16 bit) */
+#define B53_LINK_STAT_CHANGE 0x02
+
+/* Port Speed Summary Register (16 bit for FE, 32 bit for GE) */
+#define B53_SPEED_STAT 0x04
+#define SPEED_PORT_FE(reg, port) (((reg) >> (port)) & 1)
+#define SPEED_PORT_GE(reg, port) (((reg) >> 2 * (port)) & 3)
+#define SPEED_STAT_10M 0
+#define SPEED_STAT_100M 1
+#define SPEED_STAT_1000M 2
+
+/* Duplex Status Summary (16 bit) */
+#define B53_DUPLEX_STAT_FE 0x06
+#define B53_DUPLEX_STAT_GE 0x08
+#define B53_DUPLEX_STAT_63XX 0x0c
+
+/* Revision ID register for BCM5325 */
+#define B53_REV_ID_25 0x50
+
+/* Strap Value (48 bit) */
+#define B53_STRAP_VALUE 0x70
+#define SV_GMII_CTRL_115 BIT(27)
+
+/*************************************************************************
+ * Management Mode Page Registers
+ *************************************************************************/
+
+/* Global Management Config Register (8 bit) */
+#define B53_GLOBAL_CONFIG 0x00
+#define GC_RESET_MIB 0x01
+#define GC_RX_BPDU_EN 0x02
+#define GC_MIB_AC_HDR_EN 0x10
+#define GC_MIB_AC_EN 0x20
+#define GC_FRM_MGMT_PORT_M 0xC0
+#define GC_FRM_MGMT_PORT_04 0x00
+#define GC_FRM_MGMT_PORT_MII 0x80
+
+/* Broadcom Header control register (8 bit) */
+#define B53_BRCM_HDR 0x03
+#define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */
+#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
+#define BRCM_HDR_P7_EN BIT(2) /* Enable tagging on port 7 */
+
+/* Mirror capture control register (16 bit) */
+#define B53_MIR_CAP_CTL 0x10
+#define CAP_PORT_MASK 0xf
+#define BLK_NOT_MIR BIT(14)
+#define MIRROR_EN BIT(15)
+
+/* Ingress mirror control register (16 bit) */
+#define B53_IG_MIR_CTL 0x12
+#define MIRROR_MASK 0x1ff
+#define DIV_EN BIT(13)
+#define MIRROR_FILTER_MASK 0x3
+#define MIRROR_FILTER_SHIFT 14
+#define MIRROR_ALL 0
+#define MIRROR_DA 1
+#define MIRROR_SA 2
+
+/* Ingress mirror divider register (16 bit) */
+#define B53_IG_MIR_DIV 0x14
+#define IN_MIRROR_DIV_MASK 0x3ff
+
+/* Ingress mirror MAC address register (48 bit) */
+#define B53_IG_MIR_MAC 0x16
+
+/* Egress mirror control register (16 bit) */
+#define B53_EG_MIR_CTL 0x1C
+
+/* Egress mirror divider register (16 bit) */
+#define B53_EG_MIR_DIV 0x1E
+
+/* Egress mirror MAC address register (48 bit) */
+#define B53_EG_MIR_MAC 0x20
+
+/* Device ID register (8 or 32 bit) */
+#define B53_DEVICE_ID 0x30
+
+/* Revision ID register (8 bit) */
+#define B53_REV_ID 0x40
+
+/* Broadcom header RX control (16 bit) */
+#define B53_BRCM_HDR_RX_DIS 0x60
+
+/* Broadcom header TX control (16 bit) */
+#define B53_BRCM_HDR_TX_DIS 0x62
+
+/*************************************************************************
+ * ARL Access Page Registers
+ *************************************************************************/
+
+/* VLAN Table Access Register (8 bit) */
+#define B53_VT_ACCESS 0x80
+#define B53_VT_ACCESS_9798 0x60 /* for BCM5397/BCM5398 */
+#define B53_VT_ACCESS_63XX 0x60 /* for BCM6328/62/68 */
+#define VTA_CMD_WRITE 0
+#define VTA_CMD_READ 1
+#define VTA_CMD_CLEAR 2
+#define VTA_START_CMD BIT(7)
+
+/* VLAN Table Index Register (16 bit) */
+#define B53_VT_INDEX 0x81
+#define B53_VT_INDEX_9798 0x61
+#define B53_VT_INDEX_63XX 0x62
+
+/* VLAN Table Entry Register (32 bit) */
+#define B53_VT_ENTRY 0x83
+#define B53_VT_ENTRY_9798 0x63
+#define B53_VT_ENTRY_63XX 0x64
+#define VTE_MEMBERS 0x1ff
+#define VTE_UNTAG_S 9
+#define VTE_UNTAG (0x1ff << 9)
+
+/*************************************************************************
+ * ARL I/O Registers
+ *************************************************************************/
+
+/* ARL Table Read/Write Register (8 bit) */
+#define B53_ARLTBL_RW_CTRL 0x00
+#define ARLTBL_RW BIT(0)
+#define ARLTBL_IVL_SVL_SELECT BIT(6)
+#define ARLTBL_START_DONE BIT(7)
+
+/* MAC Address Index Register (48 bit) */
+#define B53_MAC_ADDR_IDX 0x02
+
+/* VLAN ID Index Register (16 bit) */
+#define B53_VLAN_ID_IDX 0x08
+
+/* ARL Table MAC/VID Entry N Registers (64 bit)
+ *
+ * BCM5325 and BCM5365 share most definitions below
+ */
+#define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10)
+#define ARLTBL_MAC_MASK 0xffffffffffffULL
+#define ARLTBL_VID_S 48
+#define ARLTBL_VID_MASK_25 0xff
+#define ARLTBL_VID_MASK 0xfff
+#define ARLTBL_DATA_PORT_ID_S_25 48
+#define ARLTBL_DATA_PORT_ID_MASK_25 0xf
+#define ARLTBL_AGE_25 BIT(61)
+#define ARLTBL_STATIC_25 BIT(62)
+#define ARLTBL_VALID_25 BIT(63)
+
+/* ARL Table Data Entry N Registers (32 bit) */
+#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18)
+#define ARLTBL_DATA_PORT_ID_MASK 0x1ff
+#define ARLTBL_TC(tc) ((3 & tc) << 11)
+#define ARLTBL_AGE BIT(14)
+#define ARLTBL_STATIC BIT(15)
+#define ARLTBL_VALID BIT(16)
+
+/* Maximum number of bin entries in the ARL for all switches */
+#define B53_ARLTBL_MAX_BIN_ENTRIES 4
+
+/* ARL Search Control Register (8 bit) */
+#define B53_ARL_SRCH_CTL 0x50
+#define B53_ARL_SRCH_CTL_25 0x20
+#define ARL_SRCH_VLID BIT(0)
+#define ARL_SRCH_STDN BIT(7)
+
+/* ARL Search Address Register (16 bit) */
+#define B53_ARL_SRCH_ADDR 0x51
+#define B53_ARL_SRCH_ADDR_25 0x22
+#define B53_ARL_SRCH_ADDR_65 0x24
+#define ARL_ADDR_MASK GENMASK(14, 0)
+
+/* ARL Search MAC/VID Result (64 bit) */
+#define B53_ARL_SRCH_RSTL_0_MACVID 0x60
+
+/* Single register search result on 5325 */
+#define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24
+/* Single register search result on 5365 */
+#define B53_ARL_SRCH_RSTL_0_MACVID_65 0x30
+
+/* ARL Search Data Result (32 bit) */
+#define B53_ARL_SRCH_RSTL_0 0x68
+
+#define B53_ARL_SRCH_RSTL_MACVID(x) (B53_ARL_SRCH_RSTL_0_MACVID + ((x) * 0x10))
+#define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10))
+
+/*************************************************************************
+ * Port VLAN Registers
+ *************************************************************************/
+
+/* Port VLAN mask (16 bit) IMP port is always 8, also on 5325 & co */
+#define B53_PVLAN_PORT_MASK(i) ((i) * 2)
+
+/* Join all VLANs register (16 bit) */
+#define B53_JOIN_ALL_VLAN_EN 0x50
+
+/*************************************************************************
+ * 802.1Q Page Registers
+ *************************************************************************/
+
+/* Global QoS Control (8 bit) */
+#define B53_QOS_GLOBAL_CTL 0x00
+
+/* Enable 802.1Q for individual Ports (16 bit) */
+#define B53_802_1P_EN 0x04
+
+/*************************************************************************
+ * VLAN Page Registers
+ *************************************************************************/
+
+/* VLAN Control 0 (8 bit) */
+#define B53_VLAN_CTRL0 0x00
+#define VC0_8021PF_CTRL_MASK 0x3
+#define VC0_8021PF_CTRL_NONE 0x0
+#define VC0_8021PF_CTRL_CHANGE_PRI 0x1
+#define VC0_8021PF_CTRL_CHANGE_VID 0x2
+#define VC0_8021PF_CTRL_CHANGE_BOTH 0x3
+#define VC0_8021QF_CTRL_MASK 0xc
+#define VC0_8021QF_CTRL_CHANGE_PRI 0x1
+#define VC0_8021QF_CTRL_CHANGE_VID 0x2
+#define VC0_8021QF_CTRL_CHANGE_BOTH 0x3
+#define VC0_RESERVED_1 BIT(1)
+#define VC0_DROP_VID_MISS BIT(4)
+#define VC0_VID_HASH_VID BIT(5)
+#define VC0_VID_CHK_EN BIT(6) /* Use VID,DA or VID,SA */
+#define VC0_VLAN_EN BIT(7) /* 802.1Q VLAN Enabled */
+
+/* VLAN Control 1 (8 bit) */
+#define B53_VLAN_CTRL1 0x01
+#define VC1_RX_MCST_TAG_EN BIT(1)
+#define VC1_RX_MCST_FWD_EN BIT(2)
+#define VC1_RX_MCST_UNTAG_EN BIT(3)
+
+/* VLAN Control 2 (8 bit) */
+#define B53_VLAN_CTRL2 0x02
+
+/* VLAN Control 3 (8 bit when BCM5325, 16 bit else) */
+#define B53_VLAN_CTRL3 0x03
+#define B53_VLAN_CTRL3_63XX 0x04
+#define VC3_MAXSIZE_1532 BIT(6) /* 5325 only */
+#define VC3_HIGH_8BIT_EN BIT(7) /* 5325 only */
+
+/* VLAN Control 4 (8 bit) */
+#define B53_VLAN_CTRL4 0x05
+#define B53_VLAN_CTRL4_25 0x04
+#define B53_VLAN_CTRL4_63XX 0x06
+#define VC4_ING_VID_CHECK_S 6
+#define VC4_ING_VID_CHECK_MASK (0x3 << VC4_ING_VID_CHECK_S)
+#define VC4_ING_VID_VIO_FWD 0 /* forward, but do not learn */
+#define VC4_ING_VID_VIO_DROP 1 /* drop VID violations */
+#define VC4_NO_ING_VID_CHK 2 /* do not check */
+#define VC4_ING_VID_VIO_TO_IMP 3 /* redirect to MII port */
+
+/* VLAN Control 5 (8 bit) */
+#define B53_VLAN_CTRL5 0x06
+#define B53_VLAN_CTRL5_25 0x05
+#define B53_VLAN_CTRL5_63XX 0x07
+#define VC5_VID_FFF_EN BIT(2)
+#define VC5_DROP_VTABLE_MISS BIT(3)
+
+/* VLAN Control 6 (8 bit) */
+#define B53_VLAN_CTRL6 0x07
+#define B53_VLAN_CTRL6_63XX 0x08
+
+/* VLAN Table Access Register (16 bit) */
+#define B53_VLAN_TABLE_ACCESS_25 0x06 /* BCM5325E/5350 */
+#define B53_VLAN_TABLE_ACCESS_65 0x08 /* BCM5365 */
+#define VTA_VID_LOW_MASK_25 0xf
+#define VTA_VID_LOW_MASK_65 0xff
+#define VTA_VID_HIGH_S_25 4
+#define VTA_VID_HIGH_S_65 8
+#define VTA_VID_HIGH_MASK_25 (0xff << VTA_VID_HIGH_S_25E)
+#define VTA_VID_HIGH_MASK_65 (0xf << VTA_VID_HIGH_S_65)
+#define VTA_RW_STATE BIT(12)
+#define VTA_RW_STATE_RD 0
+#define VTA_RW_STATE_WR BIT(12)
+#define VTA_RW_OP_EN BIT(13)
+
+/* VLAN Read/Write Registers for (16/32 bit) */
+#define B53_VLAN_WRITE_25 0x08
+#define B53_VLAN_WRITE_65 0x0a
+#define B53_VLAN_READ 0x0c
+#define VA_MEMBER_MASK 0x3f
+#define VA_UNTAG_S_25 6
+#define VA_UNTAG_MASK_25 0x3f
+#define VA_UNTAG_S_65 7
+#define VA_UNTAG_MASK_65 0x1f
+#define VA_VID_HIGH_S 12
+#define VA_VID_HIGH_MASK (0xffff << VA_VID_HIGH_S)
+#define VA_VALID_25 BIT(20)
+#define VA_VALID_25_R4 BIT(24)
+#define VA_VALID_65 BIT(14)
+
+/* VLAN Port Default Tag (16 bit) */
+#define B53_VLAN_PORT_DEF_TAG(i) (0x10 + 2 * (i))
+
+/*************************************************************************
+ * Jumbo Frame Page Registers
+ *************************************************************************/
+
+/* Jumbo Enable Port Mask (bit i == port i enabled) (32 bit) */
+#define B53_JUMBO_PORT_MASK 0x01
+#define B53_JUMBO_PORT_MASK_63XX 0x04
+#define JPM_10_100_JUMBO_EN BIT(24) /* GigE always enabled */
+
+/* Good Frame Max Size without 802.1Q TAG (16 bit) */
+#define B53_JUMBO_MAX_SIZE 0x05
+#define B53_JUMBO_MAX_SIZE_63XX 0x08
+#define JMS_MIN_SIZE 1518
+#define JMS_MAX_SIZE 9724
+
+/*************************************************************************
+ * EEE Configuration Page Registers
+ *************************************************************************/
+
+/* EEE Enable control register (16 bit) */
+#define B53_EEE_EN_CTRL 0x00
+
+/* EEE LPI assert status register (16 bit) */
+#define B53_EEE_LPI_ASSERT_STS 0x02
+
+/* EEE LPI indicate status register (16 bit) */
+#define B53_EEE_LPI_INDICATE 0x4
+
+/* EEE Receiving idle symbols status register (16 bit) */
+#define B53_EEE_RX_IDLE_SYM_STS 0x6
+
+/* EEE Pipeline timer register (32 bit) */
+#define B53_EEE_PIP_TIMER 0xC
+
+/* EEE Sleep timer Gig register (32 bit) */
+#define B53_EEE_SLEEP_TIMER_GIG(i) (0x10 + 4 * (i))
+
+/* EEE Sleep timer FE register (32 bit) */
+#define B53_EEE_SLEEP_TIMER_FE(i) (0x34 + 4 * (i))
+
+/* EEE Minimum LP timer Gig register (32 bit) */
+#define B53_EEE_MIN_LP_TIMER_GIG(i) (0x58 + 4 * (i))
+
+/* EEE Minimum LP timer FE register (32 bit) */
+#define B53_EEE_MIN_LP_TIMER_FE(i) (0x7c + 4 * (i))
+
+/* EEE Wake timer Gig register (16 bit) */
+#define B53_EEE_WAKE_TIMER_GIG(i) (0xa0 + 2 * (i))
+
+/* EEE Wake timer FE register (16 bit) */
+#define B53_EEE_WAKE_TIMER_FE(i) (0xb2 + 2 * (i))
+
+
+/*************************************************************************
+ * CFP Configuration Page Registers
+ *************************************************************************/
+
+/* CFP Control Register with ports map (8 bit) */
+#define B53_CFP_CTRL 0x00
+
+#endif /* !__B53_REGS_H */
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
new file mode 100644
index 000000000..069021077
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
+/*
+ * Northstar Plus switch SerDes/SGMII PHY main logic
+ *
+ * Copyright (C) 2018 Florian Fainelli <f.fainelli@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <net/dsa.h>
+
+#include "b53_priv.h"
+#include "b53_serdes.h"
+#include "b53_regs.h"
+
+static inline struct b53_pcs *pcs_to_b53_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct b53_pcs, pcs);
+}
+
+static void b53_serdes_write_blk(struct b53_device *dev, u8 offset, u16 block,
+ u16 value)
+{
+ b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block);
+ b53_write16(dev, B53_SERDES_PAGE, offset, value);
+}
+
+static u16 b53_serdes_read_blk(struct b53_device *dev, u8 offset, u16 block)
+{
+ u16 value;
+
+ b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block);
+ b53_read16(dev, B53_SERDES_PAGE, offset, &value);
+
+ return value;
+}
+
+static void b53_serdes_set_lane(struct b53_device *dev, u8 lane)
+{
+ if (dev->serdes_lane == lane)
+ return;
+
+ WARN_ON(lane > 1);
+
+ b53_serdes_write_blk(dev, B53_SERDES_LANE,
+ SERDES_XGXSBLK0_BLOCKADDRESS, lane);
+ dev->serdes_lane = lane;
+}
+
+static void b53_serdes_write(struct b53_device *dev, u8 lane,
+ u8 offset, u16 block, u16 value)
+{
+ b53_serdes_set_lane(dev, lane);
+ b53_serdes_write_blk(dev, offset, block, value);
+}
+
+static u16 b53_serdes_read(struct b53_device *dev, u8 lane,
+ u8 offset, u16 block)
+{
+ b53_serdes_set_lane(dev, lane);
+ return b53_serdes_read_blk(dev, offset, block);
+}
+
+static int b53_serdes_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
+ u16 reg;
+
+ reg = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
+ SERDES_DIGITAL_BLK);
+ if (interface == PHY_INTERFACE_MODE_1000BASEX)
+ reg |= FIBER_MODE_1000X;
+ else
+ reg &= ~FIBER_MODE_1000X;
+ b53_serdes_write(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
+ SERDES_DIGITAL_BLK, reg);
+
+ return 0;
+}
+
+static void b53_serdes_an_restart(struct phylink_pcs *pcs)
+{
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
+ u16 reg;
+
+ reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
+ SERDES_MII_BLK);
+ reg |= BMCR_ANRESTART;
+ b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
+ SERDES_MII_BLK, reg);
+}
+
+static void b53_serdes_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
+ u16 dig, bmsr;
+
+ dig = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_STATUS,
+ SERDES_DIGITAL_BLK);
+ bmsr = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMSR),
+ SERDES_MII_BLK);
+
+ switch ((dig >> SPEED_STATUS_SHIFT) & SPEED_STATUS_MASK) {
+ case SPEED_STATUS_10:
+ state->speed = SPEED_10;
+ break;
+ case SPEED_STATUS_100:
+ state->speed = SPEED_100;
+ break;
+ case SPEED_STATUS_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ case SPEED_STATUS_2500:
+ state->speed = SPEED_2500;
+ break;
+ }
+
+ state->duplex = dig & DUPLEX_STATUS ? DUPLEX_FULL : DUPLEX_HALF;
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
+ state->link = !!(dig & LINK_STATUS);
+ if (dig & PAUSE_RESOLUTION_RX_SIDE)
+ state->pause |= MLO_PAUSE_RX;
+ if (dig & PAUSE_RESOLUTION_TX_SIDE)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
+ phy_interface_t interface, bool link_up)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+ u16 reg;
+
+ if (lane == B53_INVALID_LANE)
+ return;
+
+ reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
+ SERDES_MII_BLK);
+ if (link_up)
+ reg &= ~BMCR_PDOWN;
+ else
+ reg |= BMCR_PDOWN;
+ b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
+ SERDES_MII_BLK, reg);
+}
+EXPORT_SYMBOL(b53_serdes_link_set);
+
+static const struct phylink_pcs_ops b53_pcs_ops = {
+ .pcs_get_state = b53_serdes_get_state,
+ .pcs_config = b53_serdes_config,
+ .pcs_an_restart = b53_serdes_an_restart,
+};
+
+void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+
+ if (lane == B53_INVALID_LANE)
+ return;
+
+ switch (lane) {
+ case 0:
+ /* It appears lane 0 supports 2500base-X and 1000base-X */
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_2500FD;
+ fallthrough;
+ case 1:
+ /* It appears lane 1 only supports 1000base-X and SGMII */
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_1000FD;
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(b53_serdes_phylink_get_caps);
+
+struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev,
+ int port,
+ phy_interface_t interface)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+
+ if (lane == B53_INVALID_LANE || lane >= B53_N_PCS ||
+ !dev->pcs[lane].dev)
+ return NULL;
+
+ if (!phy_interface_mode_is_8023z(interface) &&
+ interface != PHY_INTERFACE_MODE_SGMII)
+ return NULL;
+
+ return &dev->pcs[lane].pcs;
+}
+EXPORT_SYMBOL(b53_serdes_phylink_mac_select_pcs);
+
+int b53_serdes_init(struct b53_device *dev, int port)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_pcs *pcs;
+ u16 id0, msb, lsb;
+
+ if (lane == B53_INVALID_LANE)
+ return -EINVAL;
+
+ id0 = b53_serdes_read(dev, lane, B53_SERDES_ID0, SERDES_ID0);
+ msb = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_PHYSID1),
+ SERDES_MII_BLK);
+ lsb = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_PHYSID2),
+ SERDES_MII_BLK);
+ if (id0 == 0 || id0 == 0xffff) {
+ dev_err(dev->dev, "SerDes not initialized, check settings\n");
+ return -ENODEV;
+ }
+
+ dev_info(dev->dev,
+ "SerDes lane %d, model: %d, rev %c%d (OUI: 0x%08x)\n",
+ lane, id0 & SERDES_ID0_MODEL_MASK,
+ (id0 >> SERDES_ID0_REV_LETTER_SHIFT) + 0x41,
+ (id0 >> SERDES_ID0_REV_NUM_SHIFT) & SERDES_ID0_REV_NUM_MASK,
+ (u32)msb << 16 | lsb);
+
+ pcs = &dev->pcs[lane];
+ pcs->dev = dev;
+ pcs->lane = lane;
+ pcs->pcs.ops = &b53_pcs_ops;
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_serdes_init);
+
+MODULE_AUTHOR("Florian Fainelli <f.fainelli@gmail.com>");
+MODULE_DESCRIPTION("B53 Switch SerDes driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h
new file mode 100644
index 000000000..ef81f5da5
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_serdes.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/*
+ * Northstar Plus switch SerDes/SGMII PHY definitions
+ *
+ * Copyright (C) 2018 Florian Fainelli <f.fainelli@gmail.com>
+ */
+
+#include <linux/phy.h>
+#include <linux/types.h>
+
+/* Non-standard page used to access SerDes PHY registers on NorthStar Plus */
+#define B53_SERDES_PAGE 0x16
+#define B53_SERDES_BLKADDR 0x3e
+#define B53_SERDES_LANE 0x3c
+
+#define B53_SERDES_ID0 0x20
+#define SERDES_ID0_MODEL_MASK 0x3f
+#define SERDES_ID0_REV_NUM_SHIFT 11
+#define SERDES_ID0_REV_NUM_MASK 0x7
+#define SERDES_ID0_REV_LETTER_SHIFT 14
+
+#define B53_SERDES_MII_REG(x) (0x20 + (x) * 2)
+#define B53_SERDES_DIGITAL_CONTROL(x) (0x1e + (x) * 2)
+#define B53_SERDES_DIGITAL_STATUS 0x28
+
+/* SERDES_DIGITAL_CONTROL1 */
+#define FIBER_MODE_1000X BIT(0)
+#define TBI_INTERFACE BIT(1)
+#define SIGNAL_DETECT_EN BIT(2)
+#define INVERT_SIGNAL_DETECT BIT(3)
+#define AUTODET_EN BIT(4)
+#define SGMII_MASTER_MODE BIT(5)
+#define DISABLE_DLL_PWRDOWN BIT(6)
+#define CRC_CHECKER_DIS BIT(7)
+#define COMMA_DET_EN BIT(8)
+#define ZERO_COMMA_DET_EN BIT(9)
+#define REMOTE_LOOPBACK BIT(10)
+#define SEL_RX_PKTS_FOR_CNTR BIT(11)
+#define MASTER_MDIO_PHY_SEL BIT(13)
+#define DISABLE_SIGNAL_DETECT_FLT BIT(14)
+
+/* SERDES_DIGITAL_CONTROL2 */
+#define EN_PARALLEL_DET BIT(0)
+#define DIS_FALSE_LINK BIT(1)
+#define FLT_FORCE_LINK BIT(2)
+#define EN_AUTONEG_ERR_TIMER BIT(3)
+#define DIS_REMOTE_FAULT_SENSING BIT(4)
+#define FORCE_XMIT_DATA BIT(5)
+#define AUTONEG_FAST_TIMERS BIT(6)
+#define DIS_CARRIER_EXTEND BIT(7)
+#define DIS_TRRR_GENERATION BIT(8)
+#define BYPASS_PCS_RX BIT(9)
+#define BYPASS_PCS_TX BIT(10)
+#define TEST_CNTR_EN BIT(11)
+#define TX_PACKET_SEQ_TEST BIT(12)
+#define TX_IDLE_JAM_SEQ_TEST BIT(13)
+#define CLR_BER_CNTR BIT(14)
+
+/* SERDES_DIGITAL_CONTROL3 */
+#define TX_FIFO_RST BIT(0)
+#define FIFO_ELAST_TX_RX_SHIFT 1
+#define FIFO_ELAST_TX_RX_5K 0
+#define FIFO_ELAST_TX_RX_10K 1
+#define FIFO_ELAST_TX_RX_13_5K 2
+#define FIFO_ELAST_TX_RX_18_5K 3
+#define BLOCK_TXEN_MODE BIT(9)
+#define JAM_FALSE_CARRIER_MODE BIT(10)
+#define EXT_PHY_CRS_MODE BIT(11)
+#define INVERT_EXT_PHY_CRS BIT(12)
+#define DISABLE_TX_CRS BIT(13)
+
+/* SERDES_DIGITAL_STATUS */
+#define SGMII_MODE BIT(0)
+#define LINK_STATUS BIT(1)
+#define DUPLEX_STATUS BIT(2)
+#define SPEED_STATUS_SHIFT 3
+#define SPEED_STATUS_10 0
+#define SPEED_STATUS_100 1
+#define SPEED_STATUS_1000 2
+#define SPEED_STATUS_2500 3
+#define SPEED_STATUS_MASK SPEED_STATUS_2500
+#define PAUSE_RESOLUTION_TX_SIDE BIT(5)
+#define PAUSE_RESOLUTION_RX_SIDE BIT(6)
+#define LINK_STATUS_CHANGE BIT(7)
+#define EARLY_END_EXT_DET BIT(8)
+#define CARRIER_EXT_ERR_DET BIT(9)
+#define RX_ERR_DET BIT(10)
+#define TX_ERR_DET BIT(11)
+#define CRC_ERR_DET BIT(12)
+#define FALSE_CARRIER_ERR_DET BIT(13)
+#define RXFIFO_ERR_DET BIT(14)
+#define TXFIFO_ERR_DET BIT(15)
+
+/* Block offsets */
+#define SERDES_DIGITAL_BLK 0x8300
+#define SERDES_ID0 0x8310
+#define SERDES_MII_BLK 0xffe0
+#define SERDES_XGXSBLK0_BLOCKADDRESS 0xffd0
+
+struct phylink_link_state;
+
+static inline u8 b53_serdes_map_lane(struct b53_device *dev, int port)
+{
+ if (!dev->ops->serdes_map_lane)
+ return B53_INVALID_LANE;
+
+ return dev->ops->serdes_map_lane(dev, port);
+}
+
+void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
+ phy_interface_t interface, bool link_up);
+struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev,
+ int port,
+ phy_interface_t interface);
+void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config);
+#if IS_ENABLED(CONFIG_B53_SERDES)
+int b53_serdes_init(struct b53_device *dev, int port);
+#else
+static inline int b53_serdes_init(struct b53_device *dev, int port)
+{
+ return -ENODEV;
+}
+#endif
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
new file mode 100644
index 000000000..308f15d38
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -0,0 +1,376 @@
+/*
+ * B53 register access through SPI
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_data/b53.h>
+
+#include "b53_priv.h"
+
+#define B53_SPI_DATA 0xf0
+
+#define B53_SPI_STATUS 0xfe
+#define B53_SPI_CMD_SPIF BIT(7)
+#define B53_SPI_CMD_RACK BIT(5)
+
+#define B53_SPI_CMD_READ 0x00
+#define B53_SPI_CMD_WRITE 0x01
+#define B53_SPI_CMD_NORMAL 0x60
+#define B53_SPI_CMD_FAST 0x10
+
+#define B53_SPI_PAGE_SELECT 0xff
+
+static inline int b53_spi_read_reg(struct spi_device *spi, u8 reg, u8 *val,
+ unsigned int len)
+{
+ u8 txbuf[2];
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_READ;
+ txbuf[1] = reg;
+
+ return spi_write_then_read(spi, txbuf, 2, val, len);
+}
+
+static inline int b53_spi_clear_status(struct spi_device *spi)
+{
+ unsigned int i;
+ u8 rxbuf;
+ int ret;
+
+ for (i = 0; i < 10; i++) {
+ ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1);
+ if (ret)
+ return ret;
+
+ if (!(rxbuf & B53_SPI_CMD_SPIF))
+ break;
+
+ mdelay(1);
+ }
+
+ if (i == 10)
+ return -EIO;
+
+ return 0;
+}
+
+static inline int b53_spi_set_page(struct spi_device *spi, u8 page)
+{
+ u8 txbuf[3];
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = B53_SPI_PAGE_SELECT;
+ txbuf[2] = page;
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static inline int b53_prepare_reg_access(struct spi_device *spi, u8 page)
+{
+ int ret = b53_spi_clear_status(spi);
+
+ if (ret)
+ return ret;
+
+ return b53_spi_set_page(spi, page);
+}
+
+static int b53_spi_prepare_reg_read(struct spi_device *spi, u8 reg)
+{
+ u8 rxbuf;
+ int retry_count;
+ int ret;
+
+ ret = b53_spi_read_reg(spi, reg, &rxbuf, 1);
+ if (ret)
+ return ret;
+
+ for (retry_count = 0; retry_count < 10; retry_count++) {
+ ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1);
+ if (ret)
+ return ret;
+
+ if (rxbuf & B53_SPI_CMD_RACK)
+ break;
+
+ mdelay(1);
+ }
+
+ if (retry_count == 10)
+ return -EIO;
+
+ return 0;
+}
+
+static int b53_spi_read(struct b53_device *dev, u8 page, u8 reg, u8 *data,
+ unsigned int len)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ ret = b53_spi_prepare_reg_read(spi, reg);
+ if (ret)
+ return ret;
+
+ return b53_spi_read_reg(spi, B53_SPI_DATA, data, len);
+}
+
+static int b53_spi_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ return b53_spi_read(dev, page, reg, val, 1);
+}
+
+static int b53_spi_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ __le16 value;
+ int ret;
+
+ ret = b53_spi_read(dev, page, reg, (u8 *)&value, 2);
+
+ if (!ret)
+ *val = le16_to_cpu(value);
+
+ return ret;
+}
+
+static int b53_spi_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ __le32 value;
+ int ret;
+
+ ret = b53_spi_read(dev, page, reg, (u8 *)&value, 4);
+
+ if (!ret)
+ *val = le32_to_cpu(value);
+
+ return ret;
+}
+
+static int b53_spi_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ __le64 value;
+ int ret;
+
+ *val = 0;
+ ret = b53_spi_read(dev, page, reg, (u8 *)&value, 6);
+ if (!ret)
+ *val = le64_to_cpu(value);
+
+ return ret;
+}
+
+static int b53_spi_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ __le64 value;
+ int ret;
+
+ ret = b53_spi_read(dev, page, reg, (u8 *)&value, 8);
+
+ if (!ret)
+ *val = le64_to_cpu(value);
+
+ return ret;
+}
+
+static int b53_spi_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[3];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ txbuf[2] = value;
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static int b53_spi_write16(struct b53_device *dev, u8 page, u8 reg, u16 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[4];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le16(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static int b53_spi_write32(struct b53_device *dev, u8 page, u8 reg, u32 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[6];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le32(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static int b53_spi_write48(struct b53_device *dev, u8 page, u8 reg, u64 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[10];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le64(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf) - 2);
+}
+
+static int b53_spi_write64(struct b53_device *dev, u8 page, u8 reg, u64 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[10];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le64(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static const struct b53_io_ops b53_spi_ops = {
+ .read8 = b53_spi_read8,
+ .read16 = b53_spi_read16,
+ .read32 = b53_spi_read32,
+ .read48 = b53_spi_read48,
+ .read64 = b53_spi_read64,
+ .write8 = b53_spi_write8,
+ .write16 = b53_spi_write16,
+ .write32 = b53_spi_write32,
+ .write48 = b53_spi_write48,
+ .write64 = b53_spi_write64,
+};
+
+static int b53_spi_probe(struct spi_device *spi)
+{
+ struct b53_device *dev;
+ int ret;
+
+ dev = b53_switch_alloc(&spi->dev, &b53_spi_ops, spi);
+ if (!dev)
+ return -ENOMEM;
+
+ if (spi->dev.platform_data)
+ dev->pdata = spi->dev.platform_data;
+
+ ret = b53_switch_register(dev);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, dev);
+
+ return 0;
+}
+
+static void b53_spi_remove(struct spi_device *spi)
+{
+ struct b53_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ b53_switch_remove(dev);
+}
+
+static void b53_spi_shutdown(struct spi_device *spi)
+{
+ struct b53_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ b53_switch_shutdown(dev);
+
+ spi_set_drvdata(spi, NULL);
+}
+
+static const struct of_device_id b53_spi_of_match[] = {
+ { .compatible = "brcm,bcm5325" },
+ { .compatible = "brcm,bcm5365" },
+ { .compatible = "brcm,bcm5395" },
+ { .compatible = "brcm,bcm5397" },
+ { .compatible = "brcm,bcm5398" },
+ { .compatible = "brcm,bcm53115" },
+ { .compatible = "brcm,bcm53125" },
+ { .compatible = "brcm,bcm53128" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, b53_spi_of_match);
+
+static const struct spi_device_id b53_spi_ids[] = {
+ { .name = "bcm5325" },
+ { .name = "bcm5365" },
+ { .name = "bcm5395" },
+ { .name = "bcm5397" },
+ { .name = "bcm5398" },
+ { .name = "bcm53115" },
+ { .name = "bcm53125" },
+ { .name = "bcm53128" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, b53_spi_ids);
+
+static struct spi_driver b53_spi_driver = {
+ .driver = {
+ .name = "b53-switch",
+ .of_match_table = b53_spi_of_match,
+ },
+ .probe = b53_spi_probe,
+ .remove = b53_spi_remove,
+ .shutdown = b53_spi_shutdown,
+ .id_table = b53_spi_ids,
+};
+
+module_spi_driver(b53_spi_driver);
+
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_DESCRIPTION("B53 SPI access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
new file mode 100644
index 000000000..bcb440344
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -0,0 +1,698 @@
+/*
+ * B53 register access through Switch Register Access Bridge Registers
+ *
+ * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/b53.h>
+#include <linux/of.h>
+
+#include "b53_priv.h"
+#include "b53_serdes.h"
+
+/* command and status register of the SRAB */
+#define B53_SRAB_CMDSTAT 0x2c
+#define B53_SRAB_CMDSTAT_RST BIT(2)
+#define B53_SRAB_CMDSTAT_WRITE BIT(1)
+#define B53_SRAB_CMDSTAT_GORDYN BIT(0)
+#define B53_SRAB_CMDSTAT_PAGE 24
+#define B53_SRAB_CMDSTAT_REG 16
+
+/* high order word of write data to switch registe */
+#define B53_SRAB_WD_H 0x30
+
+/* low order word of write data to switch registe */
+#define B53_SRAB_WD_L 0x34
+
+/* high order word of read data from switch register */
+#define B53_SRAB_RD_H 0x38
+
+/* low order word of read data from switch register */
+#define B53_SRAB_RD_L 0x3c
+
+/* command and status register of the SRAB */
+#define B53_SRAB_CTRLS 0x40
+#define B53_SRAB_CTRLS_HOST_INTR BIT(1)
+#define B53_SRAB_CTRLS_RCAREQ BIT(3)
+#define B53_SRAB_CTRLS_RCAGNT BIT(4)
+#define B53_SRAB_CTRLS_SW_INIT_DONE BIT(6)
+
+/* the register captures interrupt pulses from the switch */
+#define B53_SRAB_INTR 0x44
+#define B53_SRAB_INTR_P(x) BIT(x)
+#define B53_SRAB_SWITCH_PHY BIT(8)
+#define B53_SRAB_1588_SYNC BIT(9)
+#define B53_SRAB_IMP1_SLEEP_TIMER BIT(10)
+#define B53_SRAB_P7_SLEEP_TIMER BIT(11)
+#define B53_SRAB_IMP0_SLEEP_TIMER BIT(12)
+
+/* Port mux configuration registers */
+#define B53_MUX_CONFIG_P5 0x00
+#define MUX_CONFIG_SGMII 0
+#define MUX_CONFIG_MII_LITE 1
+#define MUX_CONFIG_RGMII 2
+#define MUX_CONFIG_GMII 3
+#define MUX_CONFIG_GPHY 4
+#define MUX_CONFIG_INTERNAL 5
+#define MUX_CONFIG_MASK 0x7
+#define B53_MUX_CONFIG_P4 0x04
+
+struct b53_srab_port_priv {
+ int irq;
+ bool irq_enabled;
+ struct b53_device *dev;
+ unsigned int num;
+ phy_interface_t mode;
+};
+
+struct b53_srab_priv {
+ void __iomem *regs;
+ void __iomem *mux_config;
+ struct b53_srab_port_priv port_intrs[B53_N_PORTS];
+};
+
+static int b53_srab_request_grant(struct b53_device *dev)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ u32 ctrls;
+ int i;
+
+ ctrls = readl(regs + B53_SRAB_CTRLS);
+ ctrls |= B53_SRAB_CTRLS_RCAREQ;
+ writel(ctrls, regs + B53_SRAB_CTRLS);
+
+ for (i = 0; i < 20; i++) {
+ ctrls = readl(regs + B53_SRAB_CTRLS);
+ if (ctrls & B53_SRAB_CTRLS_RCAGNT)
+ break;
+ usleep_range(10, 100);
+ }
+ if (WARN_ON(i == 5))
+ return -EIO;
+
+ return 0;
+}
+
+static void b53_srab_release_grant(struct b53_device *dev)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ u32 ctrls;
+
+ ctrls = readl(regs + B53_SRAB_CTRLS);
+ ctrls &= ~B53_SRAB_CTRLS_RCAREQ;
+ writel(ctrls, regs + B53_SRAB_CTRLS);
+}
+
+static int b53_srab_op(struct b53_device *dev, u8 page, u8 reg, u32 op)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int i;
+ u32 cmdstat;
+
+ /* set register address */
+ cmdstat = (page << B53_SRAB_CMDSTAT_PAGE) |
+ (reg << B53_SRAB_CMDSTAT_REG) |
+ B53_SRAB_CMDSTAT_GORDYN |
+ op;
+ writel(cmdstat, regs + B53_SRAB_CMDSTAT);
+
+ /* check if operation completed */
+ for (i = 0; i < 5; ++i) {
+ cmdstat = readl(regs + B53_SRAB_CMDSTAT);
+ if (!(cmdstat & B53_SRAB_CMDSTAT_GORDYN))
+ break;
+ usleep_range(10, 100);
+ }
+
+ if (WARN_ON(i == 5))
+ return -EIO;
+
+ return 0;
+}
+
+static int b53_srab_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L) & 0xff;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L) & 0xffff;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L);
+ *val += ((u64)readl(regs + B53_SRAB_RD_H) & 0xffff) << 32;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L);
+ *val += (u64)readl(regs + B53_SRAB_RD_H) << 32;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel(value, regs + B53_SRAB_WD_L);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel(value, regs + B53_SRAB_WD_L);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel(value, regs + B53_SRAB_WD_L);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel((u32)value, regs + B53_SRAB_WD_L);
+ writel((u16)(value >> 32), regs + B53_SRAB_WD_H);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel((u32)value, regs + B53_SRAB_WD_L);
+ writel((u32)(value >> 32), regs + B53_SRAB_WD_H);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static irqreturn_t b53_srab_port_thread(int irq, void *dev_id)
+{
+ struct b53_srab_port_priv *port = dev_id;
+ struct b53_device *dev = port->dev;
+
+ if (port->mode == PHY_INTERFACE_MODE_SGMII)
+ b53_port_event(dev->ds, port->num);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t b53_srab_port_isr(int irq, void *dev_id)
+{
+ struct b53_srab_port_priv *port = dev_id;
+ struct b53_device *dev = port->dev;
+ struct b53_srab_priv *priv = dev->priv;
+
+ /* Acknowledge the interrupt */
+ writel(BIT(port->num), priv->regs + B53_SRAB_INTR);
+
+ return IRQ_WAKE_THREAD;
+}
+
+#if IS_ENABLED(CONFIG_B53_SERDES)
+static u8 b53_srab_serdes_map_lane(struct b53_device *dev, int port)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p = &priv->port_intrs[port];
+
+ if (p->mode != PHY_INTERFACE_MODE_SGMII)
+ return B53_INVALID_LANE;
+
+ switch (port) {
+ case 5:
+ return 0;
+ case 4:
+ return 1;
+ default:
+ return B53_INVALID_LANE;
+ }
+}
+#endif
+
+static int b53_srab_irq_enable(struct b53_device *dev, int port)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p = &priv->port_intrs[port];
+ int ret = 0;
+
+ /* Interrupt is optional and was not specified, do not make
+ * this fatal
+ */
+ if (p->irq == -ENXIO)
+ return ret;
+
+ ret = request_threaded_irq(p->irq, b53_srab_port_isr,
+ b53_srab_port_thread, 0,
+ dev_name(dev->dev), p);
+ if (!ret)
+ p->irq_enabled = true;
+
+ return ret;
+}
+
+static void b53_srab_irq_disable(struct b53_device *dev, int port)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p = &priv->port_intrs[port];
+
+ if (p->irq_enabled) {
+ free_irq(p->irq, p);
+ p->irq_enabled = false;
+ }
+}
+
+static void b53_srab_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p = &priv->port_intrs[port];
+
+ switch (p->mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+#if IS_ENABLED(CONFIG_B53_SERDES)
+ /* If p->mode indicates SGMII mode, that essentially means we
+ * are using a serdes. As the serdes for the capabilities.
+ */
+ b53_serdes_phylink_get_caps(dev, port, config);
+#endif
+ break;
+
+ case PHY_INTERFACE_MODE_NA:
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ /* If we support RGMII, support all RGMII modes, since
+ * that dictates the PHY delay settings.
+ */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+
+ default:
+ /* Some other mode (e.g. MII, GMII etc) */
+ __set_bit(p->mode, config->supported_interfaces);
+ break;
+ }
+}
+
+static const struct b53_io_ops b53_srab_ops = {
+ .read8 = b53_srab_read8,
+ .read16 = b53_srab_read16,
+ .read32 = b53_srab_read32,
+ .read48 = b53_srab_read48,
+ .read64 = b53_srab_read64,
+ .write8 = b53_srab_write8,
+ .write16 = b53_srab_write16,
+ .write32 = b53_srab_write32,
+ .write48 = b53_srab_write48,
+ .write64 = b53_srab_write64,
+ .irq_enable = b53_srab_irq_enable,
+ .irq_disable = b53_srab_irq_disable,
+ .phylink_get_caps = b53_srab_phylink_get_caps,
+#if IS_ENABLED(CONFIG_B53_SERDES)
+ .phylink_mac_select_pcs = b53_serdes_phylink_mac_select_pcs,
+ .serdes_map_lane = b53_srab_serdes_map_lane,
+ .serdes_link_set = b53_serdes_link_set,
+#endif
+};
+
+static const struct of_device_id b53_srab_of_match[] = {
+ { .compatible = "brcm,bcm53010-srab" },
+ { .compatible = "brcm,bcm53011-srab" },
+ { .compatible = "brcm,bcm53012-srab" },
+ { .compatible = "brcm,bcm53018-srab" },
+ { .compatible = "brcm,bcm53019-srab" },
+ { .compatible = "brcm,bcm5301x-srab" },
+ { .compatible = "brcm,bcm11360-srab", .data = (void *)BCM583XX_DEVICE_ID },
+ { .compatible = "brcm,bcm58522-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,bcm58525-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,bcm58535-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,bcm58622-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,bcm58623-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,bcm58625-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,cygnus-srab", .data = (void *)BCM583XX_DEVICE_ID },
+ { .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,omega-srab", .data = (void *)BCM583XX_DEVICE_ID },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, b53_srab_of_match);
+
+static void b53_srab_intr_set(struct b53_srab_priv *priv, bool set)
+{
+ u32 reg;
+
+ reg = readl(priv->regs + B53_SRAB_CTRLS);
+ if (set)
+ reg |= B53_SRAB_CTRLS_HOST_INTR;
+ else
+ reg &= ~B53_SRAB_CTRLS_HOST_INTR;
+ writel(reg, priv->regs + B53_SRAB_CTRLS);
+}
+
+static void b53_srab_prepare_irq(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *port;
+ unsigned int i;
+ char *name;
+
+ /* Clear all pending interrupts */
+ writel(0xffffffff, priv->regs + B53_SRAB_INTR);
+
+ for (i = 0; i < B53_N_PORTS; i++) {
+ port = &priv->port_intrs[i];
+
+ /* There is no port 6 */
+ if (i == 6)
+ continue;
+
+ name = kasprintf(GFP_KERNEL, "link_state_p%d", i);
+ if (!name)
+ return;
+
+ port->num = i;
+ port->dev = dev;
+ port->irq = platform_get_irq_byname_optional(pdev, name);
+ kfree(name);
+ }
+
+ b53_srab_intr_set(priv, true);
+}
+
+static void b53_srab_mux_init(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p;
+ unsigned int port;
+ u32 reg, off = 0;
+ int ret;
+
+ if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
+ return;
+
+ priv->mux_config = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->mux_config))
+ return;
+
+ /* Obtain the port mux configuration so we know which lanes
+ * actually map to SerDes lanes
+ */
+ for (port = 5; port > 3; port--, off += 4) {
+ p = &priv->port_intrs[port];
+
+ reg = readl(priv->mux_config + B53_MUX_CONFIG_P5 + off);
+ switch (reg & MUX_CONFIG_MASK) {
+ case MUX_CONFIG_SGMII:
+ p->mode = PHY_INTERFACE_MODE_SGMII;
+ ret = b53_serdes_init(dev, port);
+ if (ret)
+ continue;
+ break;
+ case MUX_CONFIG_MII_LITE:
+ p->mode = PHY_INTERFACE_MODE_MII;
+ break;
+ case MUX_CONFIG_GMII:
+ p->mode = PHY_INTERFACE_MODE_GMII;
+ break;
+ case MUX_CONFIG_RGMII:
+ p->mode = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case MUX_CONFIG_INTERNAL:
+ p->mode = PHY_INTERFACE_MODE_INTERNAL;
+ break;
+ default:
+ p->mode = PHY_INTERFACE_MODE_NA;
+ break;
+ }
+
+ if (p->mode != PHY_INTERFACE_MODE_NA)
+ dev_info(&pdev->dev, "Port %d mode: %s\n",
+ port, phy_modes(p->mode));
+ }
+}
+
+static int b53_srab_probe(struct platform_device *pdev)
+{
+ struct b53_platform_data *pdata = pdev->dev.platform_data;
+ struct device_node *dn = pdev->dev.of_node;
+ const struct of_device_id *of_id = NULL;
+ struct b53_srab_priv *priv;
+ struct b53_device *dev;
+
+ if (dn)
+ of_id = of_match_node(b53_srab_of_match, dn);
+
+ if (of_id) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->chip_id = (u32)(unsigned long)of_id->data;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv);
+ if (!dev)
+ return -ENOMEM;
+
+ if (pdata)
+ dev->pdata = pdata;
+
+ platform_set_drvdata(pdev, dev);
+
+ b53_srab_prepare_irq(pdev);
+ b53_srab_mux_init(pdev);
+
+ return b53_switch_register(dev);
+}
+
+static int b53_srab_remove(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (!dev)
+ return 0;
+
+ b53_srab_intr_set(dev->priv, false);
+ b53_switch_remove(dev);
+
+ return 0;
+}
+
+static void b53_srab_shutdown(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (!dev)
+ return;
+
+ b53_switch_shutdown(dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct platform_driver b53_srab_driver = {
+ .probe = b53_srab_probe,
+ .remove = b53_srab_remove,
+ .shutdown = b53_srab_shutdown,
+ .driver = {
+ .name = "b53-srab-switch",
+ .of_match_table = b53_srab_of_match,
+ },
+};
+
+module_platform_driver(b53_srab_driver);
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_DESCRIPTION("B53 Switch Register Access Bridge Registers (SRAB) access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
new file mode 100644
index 000000000..cd1f240c9
--- /dev/null
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -0,0 +1,1623 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Broadcom Starfighter 2 DSA switch driver
+ *
+ * Copyright (C) 2014, Broadcom Corporation
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
+#include <linux/mii.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <net/dsa.h>
+#include <linux/ethtool.h>
+#include <linux/if_bridge.h>
+#include <linux/brcmphy.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_data/b53.h>
+
+#include "bcm_sf2.h"
+#include "bcm_sf2_regs.h"
+#include "b53/b53_priv.h"
+#include "b53/b53_regs.h"
+
+static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
+{
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ switch (port) {
+ case 7:
+ return REG_RGMII_11_CNTRL;
+ default:
+ break;
+ }
+ break;
+ default:
+ switch (port) {
+ case 0:
+ return REG_RGMII_0_CNTRL;
+ case 1:
+ return REG_RGMII_1_CNTRL;
+ case 2:
+ return REG_RGMII_2_CNTRL;
+ default:
+ break;
+ }
+ }
+
+ WARN_ONCE(1, "Unsupported port %d\n", port);
+
+ /* RO fallback reg */
+ return REG_SWITCH_STATUS;
+}
+
+static u16 bcm_sf2_reg_led_base(struct bcm_sf2_priv *priv, int port)
+{
+ switch (port) {
+ case 0:
+ return REG_LED_0_CNTRL;
+ case 1:
+ return REG_LED_1_CNTRL;
+ case 2:
+ return REG_LED_2_CNTRL;
+ }
+
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ switch (port) {
+ case 3:
+ return REG_LED_3_CNTRL;
+ case 7:
+ return REG_LED_4_CNTRL;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ WARN_ONCE(1, "Unsupported port %d\n", port);
+
+ /* RO fallback reg */
+ return REG_SWITCH_STATUS;
+}
+
+static u32 bcm_sf2_port_override_offset(struct bcm_sf2_priv *priv, int port)
+{
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ case BCM7445_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP :
+ CORE_STS_OVERRIDE_GMIIP_PORT(port);
+ case BCM7278_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP2 :
+ CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+ default:
+ WARN_ONCE(1, "Unsupported device: %d\n", priv->type);
+ }
+
+ /* RO fallback register */
+ return REG_SWITCH_STATUS;
+}
+
+/* Return the number of active ports, not counting the IMP (CPU) port */
+static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int port, count = 0;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_cpu_port(ds, port))
+ continue;
+ if (priv->port_sts[port].enabled)
+ count++;
+ }
+
+ return count;
+}
+
+static void bcm_sf2_recalc_clock(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned long new_rate;
+ unsigned int ports_active;
+ /* Frequenty in Mhz */
+ static const unsigned long rate_table[] = {
+ 59220000,
+ 60820000,
+ 62500000,
+ 62500000,
+ };
+
+ ports_active = bcm_sf2_num_active_ports(ds);
+ if (ports_active == 0 || !priv->clk_mdiv)
+ return;
+
+ /* If we overflow our table, just use the recommended operational
+ * frequency
+ */
+ if (ports_active > ARRAY_SIZE(rate_table))
+ new_rate = 90000000;
+ else
+ new_rate = rate_table[ports_active - 1];
+ clk_set_rate(priv->clk_mdiv, new_rate);
+}
+
+static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int i;
+ u32 reg;
+
+ /* Enable the port memories */
+ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
+ reg &= ~P_TXQ_PSM_VDD(port);
+ core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+
+ /* Enable forwarding */
+ core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
+
+ /* Enable IMP port in dumb mode */
+ reg = core_readl(priv, CORE_SWITCH_CTRL);
+ reg |= MII_DUMB_FWDG_EN;
+ core_writel(priv, reg, CORE_SWITCH_CTRL);
+
+ /* Configure Traffic Class to QoS mapping, allow each priority to map
+ * to a different queue number
+ */
+ reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
+ for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
+ reg |= i << (PRT_TO_QID_SHIFT * i);
+ core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
+
+ b53_brcm_hdr_setup(ds, port);
+
+ if (port == 8) {
+ /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+ reg = core_readl(priv, CORE_IMP_CTL);
+ reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
+ reg &= ~(RX_DIS | TX_DIS);
+ core_writel(priv, reg, CORE_IMP_CTL);
+ } else {
+ reg = core_readl(priv, CORE_G_PCTL_PORT(port));
+ reg &= ~(RX_DIS | TX_DIS);
+ core_writel(priv, reg, CORE_G_PCTL_PORT(port));
+ }
+
+ priv->port_sts[port].enabled = true;
+}
+
+static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ u32 reg;
+
+ reg = reg_readl(priv, REG_SPHY_CNTRL);
+ if (enable) {
+ reg |= PHY_RESET;
+ reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
+ reg_writel(priv, reg, REG_SPHY_CNTRL);
+ udelay(21);
+ reg = reg_readl(priv, REG_SPHY_CNTRL);
+ reg &= ~PHY_RESET;
+ } else {
+ reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
+ reg_writel(priv, reg, REG_SPHY_CNTRL);
+ mdelay(1);
+ reg |= CK25_DIS;
+ }
+ reg_writel(priv, reg, REG_SPHY_CNTRL);
+
+ /* Use PHY-driven LED signaling */
+ if (!enable) {
+ u16 led_ctrl = bcm_sf2_reg_led_base(priv, 0);
+
+ if (priv->type == BCM7278_DEVICE_ID ||
+ priv->type == BCM7445_DEVICE_ID) {
+ reg = reg_led_readl(priv, led_ctrl, 0);
+ reg |= LED_CNTRL_SPDLNK_SRC_SEL;
+ reg_led_writel(priv, reg, led_ctrl, 0);
+ }
+ }
+}
+
+static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
+ int port)
+{
+ unsigned int off;
+
+ switch (port) {
+ case 7:
+ off = P7_IRQ_OFF;
+ break;
+ case 0:
+ /* Port 0 interrupts are located on the first bank */
+ intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
+ return;
+ default:
+ off = P_IRQ_OFF(port);
+ break;
+ }
+
+ intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
+}
+
+static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
+ int port)
+{
+ unsigned int off;
+
+ switch (port) {
+ case 7:
+ off = P7_IRQ_OFF;
+ break;
+ case 0:
+ /* Port 0 interrupts are located on the first bank */
+ intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
+ intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
+ return;
+ default:
+ off = P_IRQ_OFF(port);
+ break;
+ }
+
+ intrl2_1_mask_set(priv, P_IRQ_MASK(off));
+ intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
+}
+
+static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int i;
+ u32 reg;
+
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ priv->port_sts[port].enabled = true;
+
+ bcm_sf2_recalc_clock(ds);
+
+ /* Clear the memory power down */
+ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
+ reg &= ~P_TXQ_PSM_VDD(port);
+ core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+
+ /* Enable Broadcom tags for that port if requested */
+ if (priv->brcm_tag_mask & BIT(port))
+ b53_brcm_hdr_setup(ds, port);
+
+ /* Configure Traffic Class to QoS mapping, allow each priority to map
+ * to a different queue number
+ */
+ reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
+ for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++)
+ reg |= i << (PRT_TO_QID_SHIFT * i);
+ core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
+
+ /* Re-enable the GPHY and re-apply workarounds */
+ if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
+ bcm_sf2_gphy_enable_set(ds, true);
+ if (phy) {
+ /* if phy_stop() has been called before, phy
+ * will be in halted state, and phy_start()
+ * will call resume.
+ *
+ * the resume path does not configure back
+ * autoneg settings, and since we hard reset
+ * the phy manually here, we need to reset the
+ * state machine also.
+ */
+ phy->state = PHY_READY;
+ phy_init_hw(phy);
+ }
+ }
+
+ /* Enable MoCA port interrupts to get notified */
+ if (port == priv->moca_port)
+ bcm_sf2_port_intr_enable(priv, port);
+
+ /* Set per-queue pause threshold to 32 */
+ core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));
+
+ /* Set ACB threshold to 24 */
+ for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
+ reg = acb_readl(priv, ACB_QUEUE_CFG(port *
+ SF2_NUM_EGRESS_QUEUES + i));
+ reg &= ~XOFF_THRESHOLD_MASK;
+ reg |= 24;
+ acb_writel(priv, reg, ACB_QUEUE_CFG(port *
+ SF2_NUM_EGRESS_QUEUES + i));
+ }
+
+ return b53_enable_port(ds, port, phy);
+}
+
+static void bcm_sf2_port_disable(struct dsa_switch *ds, int port)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ u32 reg;
+
+ /* Disable learning while in WoL mode */
+ if (priv->wol_ports_mask & (1 << port)) {
+ reg = core_readl(priv, CORE_DIS_LEARN);
+ reg |= BIT(port);
+ core_writel(priv, reg, CORE_DIS_LEARN);
+ return;
+ }
+
+ if (port == priv->moca_port)
+ bcm_sf2_port_intr_disable(priv, port);
+
+ if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
+ bcm_sf2_gphy_enable_set(ds, false);
+
+ b53_disable_port(ds, port);
+
+ /* Power down the port memory */
+ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
+ reg |= P_TXQ_PSM_VDD(port);
+ core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+
+ priv->port_sts[port].enabled = false;
+
+ bcm_sf2_recalc_clock(ds);
+}
+
+
+static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
+ int regnum, u16 val)
+{
+ int ret = 0;
+ u32 reg;
+
+ reg = reg_readl(priv, REG_SWITCH_CNTRL);
+ reg |= MDIO_MASTER_SEL;
+ reg_writel(priv, reg, REG_SWITCH_CNTRL);
+
+ /* Page << 8 | offset */
+ reg = 0x70;
+ reg <<= 2;
+ core_writel(priv, addr, reg);
+
+ /* Page << 8 | offset */
+ reg = 0x80 << 8 | regnum << 1;
+ reg <<= 2;
+
+ if (op)
+ ret = core_readl(priv, reg);
+ else
+ core_writel(priv, val, reg);
+
+ reg = reg_readl(priv, REG_SWITCH_CNTRL);
+ reg &= ~MDIO_MASTER_SEL;
+ reg_writel(priv, reg, REG_SWITCH_CNTRL);
+
+ return ret & 0xffff;
+}
+
+static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct bcm_sf2_priv *priv = bus->priv;
+
+ /* Intercept reads from Broadcom pseudo-PHY address, else, send
+ * them to our master MDIO bus controller
+ */
+ if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
+ return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
+ else
+ return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
+}
+
+static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct bcm_sf2_priv *priv = bus->priv;
+
+ /* Intercept writes to the Broadcom pseudo-PHY address, else,
+ * send them to our master MDIO bus controller
+ */
+ if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
+ return bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
+ else
+ return mdiobus_write_nested(priv->master_mii_bus, addr,
+ regnum, val);
+}
+
+static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
+{
+ struct dsa_switch *ds = dev_id;
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+
+ priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
+ ~priv->irq0_mask;
+ intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
+{
+ struct dsa_switch *ds = dev_id;
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+
+ priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
+ ~priv->irq1_mask;
+ intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+
+ if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) {
+ priv->port_sts[7].link = true;
+ dsa_port_phylink_mac_change(ds, 7, true);
+ }
+ if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) {
+ priv->port_sts[7].link = false;
+ dsa_port_phylink_mac_change(ds, 7, false);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+ int ret;
+
+ /* The watchdog reset does not work on 7278, we need to hit the
+ * "external" reset line through the reset controller.
+ */
+ if (priv->type == BCM7278_DEVICE_ID) {
+ ret = reset_control_assert(priv->rcdev);
+ if (ret)
+ return ret;
+
+ return reset_control_deassert(priv->rcdev);
+ }
+
+ reg = core_readl(priv, CORE_WATCHDOG_CTRL);
+ reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
+ core_writel(priv, reg, CORE_WATCHDOG_CTRL);
+
+ do {
+ reg = core_readl(priv, CORE_WATCHDOG_CTRL);
+ if (!(reg & SOFTWARE_RESET))
+ break;
+
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void bcm_sf2_crossbar_setup(struct bcm_sf2_priv *priv)
+{
+ struct device *dev = priv->dev->ds->dev;
+ int shift;
+ u32 mask;
+ u32 reg;
+ int i;
+
+ mask = BIT(priv->num_crossbar_int_ports) - 1;
+
+ reg = reg_readl(priv, REG_CROSSBAR);
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ shift = CROSSBAR_BCM4908_INT_P7 * priv->num_crossbar_int_ports;
+ reg &= ~(mask << shift);
+ if (0) /* FIXME */
+ reg |= CROSSBAR_BCM4908_EXT_SERDES << shift;
+ else if (priv->int_phy_mask & BIT(7))
+ reg |= CROSSBAR_BCM4908_EXT_GPHY4 << shift;
+ else if (phy_interface_mode_is_rgmii(priv->port_sts[7].mode))
+ reg |= CROSSBAR_BCM4908_EXT_RGMII << shift;
+ else if (WARN(1, "Invalid port mode\n"))
+ return;
+ break;
+ default:
+ return;
+ }
+ reg_writel(priv, reg, REG_CROSSBAR);
+
+ reg = reg_readl(priv, REG_CROSSBAR);
+ for (i = 0; i < priv->num_crossbar_int_ports; i++) {
+ shift = i * priv->num_crossbar_int_ports;
+
+ dev_dbg(dev, "crossbar int port #%d - ext port #%d\n", i,
+ (reg >> shift) & mask);
+ }
+}
+
+static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
+{
+ intrl2_0_mask_set(priv, 0xffffffff);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_1_mask_set(priv, 0xffffffff);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+}
+
+static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
+ struct device_node *dn)
+{
+ struct device *dev = priv->dev->ds->dev;
+ struct bcm_sf2_port_status *port_st;
+ struct device_node *port;
+ unsigned int port_num;
+ struct property *prop;
+ int err;
+
+ priv->moca_port = -1;
+
+ for_each_available_child_of_node(dn, port) {
+ if (of_property_read_u32(port, "reg", &port_num))
+ continue;
+
+ if (port_num >= DSA_MAX_PORTS) {
+ dev_err(dev, "Invalid port number %d\n", port_num);
+ continue;
+ }
+
+ port_st = &priv->port_sts[port_num];
+
+ /* Internal PHYs get assigned a specific 'phy-mode' property
+ * value: "internal" to help flag them before MDIO probing
+ * has completed, since they might be turned off at that
+ * time
+ */
+ err = of_get_phy_mode(port, &port_st->mode);
+ if (err)
+ continue;
+
+ if (port_st->mode == PHY_INTERFACE_MODE_INTERNAL)
+ priv->int_phy_mask |= 1 << port_num;
+
+ if (port_st->mode == PHY_INTERFACE_MODE_MOCA)
+ priv->moca_port = port_num;
+
+ if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
+ priv->brcm_tag_mask |= 1 << port_num;
+
+ /* Ensure that port 5 is not picked up as a DSA CPU port
+ * flavour but a regular port instead. We should be using
+ * devlink to be able to set the port flavour.
+ */
+ if (port_num == 5 && priv->type == BCM7278_DEVICE_ID) {
+ prop = of_find_property(port, "ethernet", NULL);
+ if (prop)
+ of_remove_property(port, prop);
+ }
+ }
+}
+
+static int bcm_sf2_mdio_register(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct device_node *dn, *child;
+ struct phy_device *phydev;
+ struct property *prop;
+ static int index;
+ int err, reg;
+
+ /* Find our integrated MDIO bus node */
+ dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
+ priv->master_mii_bus = of_mdio_find_bus(dn);
+ if (!priv->master_mii_bus) {
+ err = -EPROBE_DEFER;
+ goto err_of_node_put;
+ }
+
+ priv->master_mii_dn = dn;
+
+ priv->slave_mii_bus = mdiobus_alloc();
+ if (!priv->slave_mii_bus) {
+ err = -ENOMEM;
+ goto err_put_master_mii_bus_dev;
+ }
+
+ priv->slave_mii_bus->priv = priv;
+ priv->slave_mii_bus->name = "sf2 slave mii";
+ priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
+ priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
+ snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
+ index++);
+ priv->slave_mii_bus->dev.of_node = dn;
+
+ /* Include the pseudo-PHY address to divert reads towards our
+ * workaround. This is only required for 7445D0, since 7445E0
+ * disconnects the internal switch pseudo-PHY such that we can use the
+ * regular SWITCH_MDIO master controller instead.
+ *
+ * Here we flag the pseudo PHY as needing special treatment and would
+ * otherwise make all other PHY read/writes go to the master MDIO bus
+ * controller that comes with this switch backed by the "mdio-unimac"
+ * driver.
+ */
+ if (of_machine_is_compatible("brcm,bcm7445d0"))
+ priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0);
+ else
+ priv->indir_phy_mask = 0;
+
+ ds->phys_mii_mask = priv->indir_phy_mask;
+ ds->slave_mii_bus = priv->slave_mii_bus;
+ priv->slave_mii_bus->parent = ds->dev->parent;
+ priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
+
+ /* We need to make sure that of_phy_connect() will not work by
+ * removing the 'phandle' and 'linux,phandle' properties and
+ * unregister the existing PHY device that was already registered.
+ */
+ for_each_available_child_of_node(dn, child) {
+ if (of_property_read_u32(child, "reg", &reg) ||
+ reg >= PHY_MAX_ADDR)
+ continue;
+
+ if (!(priv->indir_phy_mask & BIT(reg)))
+ continue;
+
+ prop = of_find_property(child, "phandle", NULL);
+ if (prop)
+ of_remove_property(child, prop);
+
+ prop = of_find_property(child, "linux,phandle", NULL);
+ if (prop)
+ of_remove_property(child, prop);
+
+ phydev = of_phy_find_device(child);
+ if (phydev)
+ phy_device_remove(phydev);
+ }
+
+ err = mdiobus_register(priv->slave_mii_bus);
+ if (err && dn)
+ goto err_free_slave_mii_bus;
+
+ return 0;
+
+err_free_slave_mii_bus:
+ mdiobus_free(priv->slave_mii_bus);
+err_put_master_mii_bus_dev:
+ put_device(&priv->master_mii_bus->dev);
+err_of_node_put:
+ of_node_put(dn);
+ return err;
+}
+
+static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
+{
+ mdiobus_unregister(priv->slave_mii_bus);
+ mdiobus_free(priv->slave_mii_bus);
+ put_device(&priv->master_mii_bus->dev);
+ of_node_put(priv->master_mii_dn);
+}
+
+static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+
+ /* The BCM7xxx PHY driver expects to find the integrated PHY revision
+ * in bits 15:8 and the patch level in bits 7:0 which is exactly what
+ * the REG_PHY_REVISION register layout is.
+ */
+ if (priv->int_phy_mask & BIT(port))
+ return priv->hw_params.gphy_rev;
+ else
+ return PHY_BRCM_AUTO_PWRDWN_ENABLE |
+ PHY_BRCM_DIS_TXCRXC_NOENRGY |
+ PHY_BRCM_IDDQ_SUSPEND;
+}
+
+static void bcm_sf2_sw_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ unsigned long *interfaces = config->supported_interfaces;
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+
+ if (priv->int_phy_mask & BIT(port)) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ } else if (priv->moca_port == port) {
+ __set_bit(PHY_INTERFACE_MODE_MOCA, interfaces);
+ } else {
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
+ phy_interface_set_rgmii(interfaces);
+ }
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+}
+
+static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ u32 id_mode_dis = 0, port_mode;
+ u32 reg_rgmii_ctrl;
+ u32 reg;
+
+ if (port == core_readl(priv, CORE_IMP0_PRT_ID))
+ return;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ id_mode_dis = 1;
+ fallthrough;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ port_mode = EXT_GPHY;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ port_mode = EXT_EPHY;
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ port_mode = EXT_REVMII;
+ break;
+ default:
+ /* Nothing required for all other PHYs: internal and MoCA */
+ return;
+ }
+
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+
+ /* Clear id_mode_dis bit, and the existing port mode, let
+ * RGMII_MODE_EN bet set by mac_link_{up,down}
+ */
+ reg = reg_readl(priv, reg_rgmii_ctrl);
+ reg &= ~ID_MODE_DIS;
+ reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
+
+ reg |= port_mode;
+ if (id_mode_dis)
+ reg |= ID_MODE_DIS;
+
+ reg_writel(priv, reg, reg_rgmii_ctrl);
+}
+
+static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
+ phy_interface_t interface, bool link)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ u32 reg_rgmii_ctrl;
+ u32 reg;
+
+ if (!phy_interface_mode_is_rgmii(interface) &&
+ interface != PHY_INTERFACE_MODE_MII &&
+ interface != PHY_INTERFACE_MODE_REVMII)
+ return;
+
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+
+ /* If the link is down, just disable the interface to conserve power */
+ reg = reg_readl(priv, reg_rgmii_ctrl);
+ if (link)
+ reg |= RGMII_MODE_EN;
+ else
+ reg &= ~RGMII_MODE_EN;
+ reg_writel(priv, reg, reg_rgmii_ctrl);
+}
+
+static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ u32 reg, offset;
+
+ if (priv->wol_ports_mask & BIT(port))
+ return;
+
+ offset = bcm_sf2_port_override_offset(priv, port);
+ reg = core_readl(priv, offset);
+ reg &= ~LINK_STS;
+ core_writel(priv, reg, offset);
+
+ bcm_sf2_sw_mac_link_set(ds, port, interface, false);
+}
+
+static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct ethtool_eee *p = &priv->dev->ports[port].eee;
+ u32 reg_rgmii_ctrl = 0;
+ u32 reg, offset;
+
+ bcm_sf2_sw_mac_link_set(ds, port, interface, true);
+
+ offset = bcm_sf2_port_override_offset(priv, port);
+
+ if (phy_interface_mode_is_rgmii(interface) ||
+ interface == PHY_INTERFACE_MODE_MII ||
+ interface == PHY_INTERFACE_MODE_REVMII) {
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+ reg = reg_readl(priv, reg_rgmii_ctrl);
+ reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
+
+ if (tx_pause)
+ reg |= TX_PAUSE_EN;
+ if (rx_pause)
+ reg |= RX_PAUSE_EN;
+
+ reg_writel(priv, reg, reg_rgmii_ctrl);
+ }
+
+ reg = LINK_STS;
+ if (port == 8) {
+ if (priv->type == BCM4908_DEVICE_ID)
+ reg |= GMII_SPEED_UP_2G;
+ reg |= MII_SW_OR;
+ } else {
+ reg |= SW_OVERRIDE;
+ }
+
+ switch (speed) {
+ case SPEED_1000:
+ reg |= SPDSTS_1000 << SPEED_SHIFT;
+ break;
+ case SPEED_100:
+ reg |= SPDSTS_100 << SPEED_SHIFT;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ reg |= DUPLX_MODE;
+
+ if (tx_pause)
+ reg |= TXFLOW_CNTL;
+ if (rx_pause)
+ reg |= RXFLOW_CNTL;
+
+ core_writel(priv, reg, offset);
+
+ if (mode == MLO_AN_PHY && phydev)
+ p->eee_enabled = b53_eee_init(ds, port, phydev);
+}
+
+static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *status)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+
+ status->link = false;
+
+ /* MoCA port is special as we do not get link status from CORE_LNKSTS,
+ * which means that we need to force the link at the port override
+ * level to get the data to flow. We do use what the interrupt handler
+ * did determine before.
+ *
+ * For the other ports, we just force the link status, since this is
+ * a fixed PHY device.
+ */
+ if (port == priv->moca_port) {
+ status->link = priv->port_sts[port].link;
+ /* For MoCA interfaces, also force a link down notification
+ * since some version of the user-space daemon (mocad) use
+ * cmd->autoneg to force the link, which messes up the PHY
+ * state machine and make it go in PHY_FORCING state instead.
+ */
+ if (!status->link)
+ netif_carrier_off(dsa_to_port(ds, port)->slave);
+ status->duplex = DUPLEX_FULL;
+ } else {
+ status->link = true;
+ }
+}
+
+static void bcm_sf2_enable_acb(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ u32 reg;
+
+ /* Enable ACB globally */
+ reg = acb_readl(priv, ACB_CONTROL);
+ reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
+ acb_writel(priv, reg, ACB_CONTROL);
+ reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
+ reg |= ACB_EN | ACB_ALGORITHM;
+ acb_writel(priv, reg, ACB_CONTROL);
+}
+
+static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int port;
+
+ bcm_sf2_intr_disable(priv);
+
+ /* Disable all ports physically present including the IMP
+ * port, the other ones have already been disabled during
+ * bcm_sf2_sw_setup
+ */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
+ bcm_sf2_port_disable(ds, port);
+ }
+
+ if (!priv->wol_ports_mask)
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int bcm_sf2_sw_resume(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ int ret;
+
+ if (!priv->wol_ports_mask)
+ clk_prepare_enable(priv->clk);
+
+ ret = bcm_sf2_sw_rst(priv);
+ if (ret) {
+ pr_err("%s: failed to software reset switch\n", __func__);
+ return ret;
+ }
+
+ bcm_sf2_crossbar_setup(priv);
+
+ ret = bcm_sf2_cfp_resume(ds);
+ if (ret)
+ return ret;
+
+ if (priv->hw_params.num_gphy == 1)
+ bcm_sf2_gphy_enable_set(ds, true);
+
+ ds->ops->setup(ds);
+
+ return 0;
+}
+
+static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct ethtool_wolinfo pwol = { };
+
+ /* Get the parent device WoL settings */
+ if (p->ethtool_ops->get_wol)
+ p->ethtool_ops->get_wol(p, &pwol);
+
+ /* Advertise the parent device supported settings */
+ wol->supported = pwol.supported;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+
+ if (pwol.wolopts & WAKE_MAGICSECURE)
+ memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
+
+ if (priv->wol_ports_mask & (1 << port))
+ wol->wolopts = pwol.wolopts;
+ else
+ wol->wolopts = 0;
+}
+
+static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ struct ethtool_wolinfo pwol = { };
+
+ if (p->ethtool_ops->get_wol)
+ p->ethtool_ops->get_wol(p, &pwol);
+ if (wol->wolopts & ~pwol.supported)
+ return -EINVAL;
+
+ if (wol->wolopts)
+ priv->wol_ports_mask |= (1 << port);
+ else
+ priv->wol_ports_mask &= ~(1 << port);
+
+ /* If we have at least one port enabled, make sure the CPU port
+ * is also enabled. If the CPU port is the last one enabled, we disable
+ * it since this configuration does not make sense.
+ */
+ if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
+ priv->wol_ports_mask |= (1 << cpu_port);
+ else
+ priv->wol_ports_mask &= ~(1 << cpu_port);
+
+ return p->ethtool_ops->set_wol(p, wol);
+}
+
+static int bcm_sf2_sw_setup(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int port;
+
+ /* Enable all valid ports and disable those unused */
+ for (port = 0; port < priv->hw_params.num_ports; port++) {
+ /* IMP port receives special treatment */
+ if (dsa_is_user_port(ds, port))
+ bcm_sf2_port_setup(ds, port, NULL);
+ else if (dsa_is_cpu_port(ds, port))
+ bcm_sf2_imp_setup(ds, port);
+ else
+ bcm_sf2_port_disable(ds, port);
+ }
+
+ b53_configure_vlan(ds);
+ bcm_sf2_enable_acb(ds);
+
+ return b53_setup_devlink_resources(ds);
+}
+
+static void bcm_sf2_sw_teardown(struct dsa_switch *ds)
+{
+ dsa_devlink_resources_unregister(ds);
+}
+
+/* The SWITCH_CORE register space is managed by b53 but operates on a page +
+ * register basis so we need to translate that into an address that the
+ * bus-glue understands.
+ */
+#define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2)
+
+static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg,
+ u8 *val)
+{
+ struct bcm_sf2_priv *priv = dev->priv;
+
+ *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
+
+ return 0;
+}
+
+static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg,
+ u16 *val)
+{
+ struct bcm_sf2_priv *priv = dev->priv;
+
+ *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
+
+ return 0;
+}
+
+static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg,
+ u32 *val)
+{
+ struct bcm_sf2_priv *priv = dev->priv;
+
+ *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg));
+
+ return 0;
+}
+
+static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg,
+ u64 *val)
+{
+ struct bcm_sf2_priv *priv = dev->priv;
+
+ *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg));
+
+ return 0;
+}
+
+static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg,
+ u8 value)
+{
+ struct bcm_sf2_priv *priv = dev->priv;
+
+ core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
+
+ return 0;
+}
+
+static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ struct bcm_sf2_priv *priv = dev->priv;
+
+ core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
+
+ return 0;
+}
+
+static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ struct bcm_sf2_priv *priv = dev->priv;
+
+ core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
+
+ return 0;
+}
+
+static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct bcm_sf2_priv *priv = dev->priv;
+
+ core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg));
+
+ return 0;
+}
+
+static const struct b53_io_ops bcm_sf2_io_ops = {
+ .read8 = bcm_sf2_core_read8,
+ .read16 = bcm_sf2_core_read16,
+ .read32 = bcm_sf2_core_read32,
+ .read48 = bcm_sf2_core_read64,
+ .read64 = bcm_sf2_core_read64,
+ .write8 = bcm_sf2_core_write8,
+ .write16 = bcm_sf2_core_write16,
+ .write32 = bcm_sf2_core_write32,
+ .write48 = bcm_sf2_core_write64,
+ .write64 = bcm_sf2_core_write64,
+};
+
+static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data)
+{
+ int cnt = b53_get_sset_count(ds, port, stringset);
+
+ b53_get_strings(ds, port, stringset, data);
+ bcm_sf2_cfp_get_strings(ds, port, stringset,
+ data + cnt * ETH_GSTRING_LEN);
+}
+
+static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ int cnt = b53_get_sset_count(ds, port, ETH_SS_STATS);
+
+ b53_get_ethtool_stats(ds, port, data);
+ bcm_sf2_cfp_get_ethtool_stats(ds, port, data + cnt);
+}
+
+static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port,
+ int sset)
+{
+ int cnt = b53_get_sset_count(ds, port, sset);
+
+ if (cnt < 0)
+ return cnt;
+
+ cnt += bcm_sf2_cfp_get_sset_count(ds, port, sset);
+
+ return cnt;
+}
+
+static const struct dsa_switch_ops bcm_sf2_ops = {
+ .get_tag_protocol = b53_get_tag_protocol,
+ .setup = bcm_sf2_sw_setup,
+ .teardown = bcm_sf2_sw_teardown,
+ .get_strings = bcm_sf2_sw_get_strings,
+ .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
+ .get_sset_count = bcm_sf2_sw_get_sset_count,
+ .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
+ .get_phy_flags = bcm_sf2_sw_get_phy_flags,
+ .phylink_get_caps = bcm_sf2_sw_get_caps,
+ .phylink_mac_config = bcm_sf2_sw_mac_config,
+ .phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
+ .phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
+ .phylink_fixed_state = bcm_sf2_sw_fixed_state,
+ .suspend = bcm_sf2_sw_suspend,
+ .resume = bcm_sf2_sw_resume,
+ .get_wol = bcm_sf2_sw_get_wol,
+ .set_wol = bcm_sf2_sw_set_wol,
+ .port_enable = bcm_sf2_port_setup,
+ .port_disable = bcm_sf2_port_disable,
+ .get_mac_eee = b53_get_mac_eee,
+ .set_mac_eee = b53_set_mac_eee,
+ .port_bridge_join = b53_br_join,
+ .port_bridge_leave = b53_br_leave,
+ .port_pre_bridge_flags = b53_br_flags_pre,
+ .port_bridge_flags = b53_br_flags,
+ .port_stp_state_set = b53_br_set_stp_state,
+ .port_fast_age = b53_br_fast_age,
+ .port_vlan_filtering = b53_vlan_filtering,
+ .port_vlan_add = b53_vlan_add,
+ .port_vlan_del = b53_vlan_del,
+ .port_fdb_dump = b53_fdb_dump,
+ .port_fdb_add = b53_fdb_add,
+ .port_fdb_del = b53_fdb_del,
+ .get_rxnfc = bcm_sf2_get_rxnfc,
+ .set_rxnfc = bcm_sf2_set_rxnfc,
+ .port_mirror_add = b53_mirror_add,
+ .port_mirror_del = b53_mirror_del,
+ .port_mdb_add = b53_mdb_add,
+ .port_mdb_del = b53_mdb_del,
+};
+
+struct bcm_sf2_of_data {
+ u32 type;
+ const u16 *reg_offsets;
+ unsigned int core_reg_align;
+ unsigned int num_cfp_rules;
+ unsigned int num_crossbar_int_ports;
+};
+
+static const u16 bcm_sf2_4908_reg_offsets[] = {
+ [REG_SWITCH_CNTRL] = 0x00,
+ [REG_SWITCH_STATUS] = 0x04,
+ [REG_DIR_DATA_WRITE] = 0x08,
+ [REG_DIR_DATA_READ] = 0x0c,
+ [REG_SWITCH_REVISION] = 0x10,
+ [REG_PHY_REVISION] = 0x14,
+ [REG_SPHY_CNTRL] = 0x24,
+ [REG_CROSSBAR] = 0xc8,
+ [REG_RGMII_11_CNTRL] = 0x014c,
+ [REG_LED_0_CNTRL] = 0x40,
+ [REG_LED_1_CNTRL] = 0x4c,
+ [REG_LED_2_CNTRL] = 0x58,
+ [REG_LED_3_CNTRL] = 0x64,
+ [REG_LED_4_CNTRL] = 0x88,
+ [REG_LED_5_CNTRL] = 0xa0,
+ [REG_LED_AGGREGATE_CTRL] = 0xb8,
+
+};
+
+static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
+ .type = BCM4908_DEVICE_ID,
+ .core_reg_align = 0,
+ .reg_offsets = bcm_sf2_4908_reg_offsets,
+ .num_cfp_rules = 256,
+ .num_crossbar_int_ports = 2,
+};
+
+/* Register offsets for the SWITCH_REG_* block */
+static const u16 bcm_sf2_7445_reg_offsets[] = {
+ [REG_SWITCH_CNTRL] = 0x00,
+ [REG_SWITCH_STATUS] = 0x04,
+ [REG_DIR_DATA_WRITE] = 0x08,
+ [REG_DIR_DATA_READ] = 0x0C,
+ [REG_SWITCH_REVISION] = 0x18,
+ [REG_PHY_REVISION] = 0x1C,
+ [REG_SPHY_CNTRL] = 0x2C,
+ [REG_RGMII_0_CNTRL] = 0x34,
+ [REG_RGMII_1_CNTRL] = 0x40,
+ [REG_RGMII_2_CNTRL] = 0x4c,
+ [REG_LED_0_CNTRL] = 0x90,
+ [REG_LED_1_CNTRL] = 0x94,
+ [REG_LED_2_CNTRL] = 0x98,
+};
+
+static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
+ .type = BCM7445_DEVICE_ID,
+ .core_reg_align = 0,
+ .reg_offsets = bcm_sf2_7445_reg_offsets,
+ .num_cfp_rules = 256,
+};
+
+static const u16 bcm_sf2_7278_reg_offsets[] = {
+ [REG_SWITCH_CNTRL] = 0x00,
+ [REG_SWITCH_STATUS] = 0x04,
+ [REG_DIR_DATA_WRITE] = 0x08,
+ [REG_DIR_DATA_READ] = 0x0c,
+ [REG_SWITCH_REVISION] = 0x10,
+ [REG_PHY_REVISION] = 0x14,
+ [REG_SPHY_CNTRL] = 0x24,
+ [REG_RGMII_0_CNTRL] = 0xe0,
+ [REG_RGMII_1_CNTRL] = 0xec,
+ [REG_RGMII_2_CNTRL] = 0xf8,
+ [REG_LED_0_CNTRL] = 0x40,
+ [REG_LED_1_CNTRL] = 0x4c,
+ [REG_LED_2_CNTRL] = 0x58,
+};
+
+static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
+ .type = BCM7278_DEVICE_ID,
+ .core_reg_align = 1,
+ .reg_offsets = bcm_sf2_7278_reg_offsets,
+ .num_cfp_rules = 128,
+};
+
+static const struct of_device_id bcm_sf2_of_match[] = {
+ { .compatible = "brcm,bcm4908-switch",
+ .data = &bcm_sf2_4908_data
+ },
+ { .compatible = "brcm,bcm7445-switch-v4.0",
+ .data = &bcm_sf2_7445_data
+ },
+ { .compatible = "brcm,bcm7278-switch-v4.0",
+ .data = &bcm_sf2_7278_data
+ },
+ { .compatible = "brcm,bcm7278-switch-v4.8",
+ .data = &bcm_sf2_7278_data
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
+
+static int bcm_sf2_sw_probe(struct platform_device *pdev)
+{
+ const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
+ struct device_node *dn = pdev->dev.of_node;
+ const struct of_device_id *of_id = NULL;
+ const struct bcm_sf2_of_data *data;
+ struct b53_platform_data *pdata;
+ struct dsa_switch_ops *ops;
+ struct device_node *ports;
+ struct bcm_sf2_priv *priv;
+ struct b53_device *dev;
+ struct dsa_switch *ds;
+ void __iomem **base;
+ unsigned int i;
+ u32 reg, rev;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
+ if (!dev)
+ return -ENOMEM;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ of_id = of_match_node(bcm_sf2_of_match, dn);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+
+ data = of_id->data;
+
+ /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
+ priv->type = data->type;
+ priv->reg_offsets = data->reg_offsets;
+ priv->core_reg_align = data->core_reg_align;
+ priv->num_cfp_rules = data->num_cfp_rules;
+ priv->num_crossbar_int_ports = data->num_crossbar_int_ports;
+
+ priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "switch");
+ if (IS_ERR(priv->rcdev))
+ return PTR_ERR(priv->rcdev);
+
+ /* Auto-detection using standard registers will not work, so
+ * provide an indication of what kind of device we are for
+ * b53_common to work with
+ */
+ pdata->chip_id = priv->type;
+ dev->pdata = pdata;
+
+ priv->dev = dev;
+ ds = dev->ds;
+ ds->ops = &bcm_sf2_ops;
+
+ /* Advertise the 8 egress queues */
+ ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ spin_lock_init(&priv->indir_lock);
+ mutex_init(&priv->cfp.lock);
+ INIT_LIST_HEAD(&priv->cfp.rules_list);
+
+ /* CFP rule #0 cannot be used for specific classifications, flag it as
+ * permanently used
+ */
+ set_bit(0, priv->cfp.used);
+ set_bit(0, priv->cfp.unique);
+
+ /* Balance of_node_put() done by of_find_node_by_name() */
+ of_node_get(dn);
+ ports = of_find_node_by_name(dn, "ports");
+ if (ports) {
+ bcm_sf2_identify_ports(priv, ports);
+ of_node_put(ports);
+ }
+
+ priv->irq0 = irq_of_parse_and_map(dn, 0);
+ priv->irq1 = irq_of_parse_and_map(dn, 1);
+
+ base = &priv->core;
+ for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
+ *base = devm_platform_ioremap_resource(pdev, i);
+ if (IS_ERR(*base)) {
+ pr_err("unable to find register: %s\n", reg_names[i]);
+ return PTR_ERR(*base);
+ }
+ base++;
+ }
+
+ priv->clk = devm_clk_get_optional(&pdev->dev, "sw_switch");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
+ if (IS_ERR(priv->clk_mdiv)) {
+ ret = PTR_ERR(priv->clk_mdiv);
+ goto out_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk_mdiv);
+ if (ret)
+ goto out_clk;
+
+ ret = bcm_sf2_sw_rst(priv);
+ if (ret) {
+ pr_err("unable to software reset switch: %d\n", ret);
+ goto out_clk_mdiv;
+ }
+
+ bcm_sf2_crossbar_setup(priv);
+
+ bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+
+ ret = bcm_sf2_mdio_register(ds);
+ if (ret) {
+ pr_err("failed to register MDIO bus\n");
+ goto out_clk_mdiv;
+ }
+
+ bcm_sf2_gphy_enable_set(priv->dev->ds, false);
+
+ ret = bcm_sf2_cfp_rst(priv);
+ if (ret) {
+ pr_err("failed to reset CFP\n");
+ goto out_mdio;
+ }
+
+ /* Disable all interrupts and request them */
+ bcm_sf2_intr_disable(priv);
+
+ ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
+ "switch_0", ds);
+ if (ret < 0) {
+ pr_err("failed to request switch_0 IRQ\n");
+ goto out_mdio;
+ }
+
+ ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
+ "switch_1", ds);
+ if (ret < 0) {
+ pr_err("failed to request switch_1 IRQ\n");
+ goto out_mdio;
+ }
+
+ /* Reset the MIB counters */
+ reg = core_readl(priv, CORE_GMNCFGCFG);
+ reg |= RST_MIB_CNT;
+ core_writel(priv, reg, CORE_GMNCFGCFG);
+ reg &= ~RST_MIB_CNT;
+ core_writel(priv, reg, CORE_GMNCFGCFG);
+
+ /* Get the maximum number of ports for this switch */
+ priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
+ if (priv->hw_params.num_ports > DSA_MAX_PORTS)
+ priv->hw_params.num_ports = DSA_MAX_PORTS;
+
+ /* Assume a single GPHY setup if we can't read that property */
+ if (of_property_read_u32(dn, "brcm,num-gphy",
+ &priv->hw_params.num_gphy))
+ priv->hw_params.num_gphy = 1;
+
+ rev = reg_readl(priv, REG_SWITCH_REVISION);
+ priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
+ SWITCH_TOP_REV_MASK;
+ priv->hw_params.core_rev = (rev & SF2_REV_MASK);
+
+ rev = reg_readl(priv, REG_PHY_REVISION);
+ priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
+
+ ret = b53_switch_register(dev);
+ if (ret)
+ goto out_mdio;
+
+ dev_info(&pdev->dev,
+ "Starfighter 2 top: %x.%02x, core: %x.%02x, IRQs: %d, %d\n",
+ priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
+ priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
+ priv->irq0, priv->irq1);
+
+ return 0;
+
+out_mdio:
+ bcm_sf2_mdio_unregister(priv);
+out_clk_mdiv:
+ clk_disable_unprepare(priv->clk_mdiv);
+out_clk:
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int bcm_sf2_sw_remove(struct platform_device *pdev)
+{
+ struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return 0;
+
+ priv->wol_ports_mask = 0;
+ /* Disable interrupts */
+ bcm_sf2_intr_disable(priv);
+ dsa_unregister_switch(priv->dev->ds);
+ bcm_sf2_cfp_exit(priv->dev->ds);
+ bcm_sf2_mdio_unregister(priv);
+ clk_disable_unprepare(priv->clk_mdiv);
+ clk_disable_unprepare(priv->clk);
+ if (priv->type == BCM7278_DEVICE_ID)
+ reset_control_assert(priv->rcdev);
+
+ return 0;
+}
+
+static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
+{
+ struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ /* For a kernel about to be kexec'd we want to keep the GPHY on for a
+ * successful MDIO bus scan to occur. If we did turn off the GPHY
+ * before (e.g: port_disable), this will also power it back on.
+ *
+ * Do not rely on kexec_in_progress, just power the PHY on.
+ */
+ if (priv->hw_params.num_gphy == 1)
+ bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+
+ dsa_switch_shutdown(priv->dev->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcm_sf2_suspend(struct device *dev)
+{
+ struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_suspend(priv->dev->ds);
+}
+
+static int bcm_sf2_resume(struct device *dev)
+{
+ struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_resume(priv->dev->ds);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
+ bcm_sf2_suspend, bcm_sf2_resume);
+
+
+static struct platform_driver bcm_sf2_driver = {
+ .probe = bcm_sf2_sw_probe,
+ .remove = bcm_sf2_sw_remove,
+ .shutdown = bcm_sf2_sw_shutdown,
+ .driver = {
+ .name = "brcm-sf2",
+ .of_match_table = bcm_sf2_of_match,
+ .pm = &bcm_sf2_pm_ops,
+ },
+};
+module_platform_driver(bcm_sf2_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:brcm-sf2");
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
new file mode 100644
index 000000000..00afc94ce
--- /dev/null
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Broadcom Starfighter2 private context
+ *
+ * Copyright (C) 2014, Broadcom Corporation
+ */
+
+#ifndef __BCM_SF2_H
+#define __BCM_SF2_H
+
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/reset.h>
+
+#include <net/dsa.h>
+
+#include "bcm_sf2_regs.h"
+#include "b53/b53_priv.h"
+
+struct bcm_sf2_hw_params {
+ u16 top_rev;
+ u16 core_rev;
+ u16 gphy_rev;
+ u32 num_gphy;
+ u8 num_acb_queue;
+ u8 num_rgmii;
+ u8 num_ports;
+ u8 fcb_pause_override:1;
+ u8 acb_packets_inflight:1;
+};
+
+#define BCM_SF2_REGS_NAME {\
+ "core", "reg", "intrl2_0", "intrl2_1", "fcb", "acb" \
+}
+
+#define BCM_SF2_REGS_NUM 6
+
+struct bcm_sf2_port_status {
+ phy_interface_t mode;
+ unsigned int link;
+ bool enabled;
+};
+
+struct bcm_sf2_cfp_priv {
+ /* Mutex protecting concurrent accesses to the CFP registers */
+ struct mutex lock;
+ DECLARE_BITMAP(used, CFP_NUM_RULES);
+ DECLARE_BITMAP(unique, CFP_NUM_RULES);
+ unsigned int rules_cnt;
+ struct list_head rules_list;
+};
+
+struct bcm_sf2_priv {
+ /* Base registers, keep those in order with BCM_SF2_REGS_NAME */
+ void __iomem *core;
+ void __iomem *reg;
+ void __iomem *intrl2_0;
+ void __iomem *intrl2_1;
+ void __iomem *fcb;
+ void __iomem *acb;
+
+ struct reset_control *rcdev;
+
+ /* Register offsets indirection tables */
+ u32 type;
+ const u16 *reg_offsets;
+ unsigned int core_reg_align;
+ unsigned int num_cfp_rules;
+ unsigned int num_crossbar_int_ports;
+
+ /* spinlock protecting access to the indirect registers */
+ spinlock_t indir_lock;
+
+ int irq0;
+ int irq1;
+ u32 irq0_stat;
+ u32 irq0_mask;
+ u32 irq1_stat;
+ u32 irq1_mask;
+
+ /* Backing b53_device */
+ struct b53_device *dev;
+
+ struct bcm_sf2_hw_params hw_params;
+
+ struct bcm_sf2_port_status port_sts[DSA_MAX_PORTS];
+
+ /* Mask of ports enabled for Wake-on-LAN */
+ u32 wol_ports_mask;
+
+ struct clk *clk;
+ struct clk *clk_mdiv;
+
+ /* MoCA port location */
+ int moca_port;
+
+ /* Bitmask of ports having an integrated PHY */
+ unsigned int int_phy_mask;
+
+ /* Master and slave MDIO bus controller */
+ unsigned int indir_phy_mask;
+ struct device_node *master_mii_dn;
+ struct mii_bus *slave_mii_bus;
+ struct mii_bus *master_mii_bus;
+
+ /* Bitmask of ports needing BRCM tags */
+ unsigned int brcm_tag_mask;
+
+ /* CFP rules context */
+ struct bcm_sf2_cfp_priv cfp;
+};
+
+static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds)
+{
+ struct b53_device *dev = ds->priv;
+
+ return dev->priv;
+}
+
+static inline u32 bcm_sf2_mangle_addr(struct bcm_sf2_priv *priv, u32 off)
+{
+ return off << priv->core_reg_align;
+}
+
+#define SF2_IO_MACRO(name) \
+static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \
+{ \
+ return readl_relaxed(priv->name + off); \
+} \
+static inline void name##_writel(struct bcm_sf2_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->name + off); \
+} \
+
+/* Accesses to 64-bits register requires us to latch the hi/lo pairs
+ * using the REG_DIR_DATA_{READ,WRITE} ancillary registers. The 'indir_lock'
+ * spinlock is automatically grabbed and released to provide relative
+ * atomiticy with latched reads/writes.
+ */
+#define SF2_IO64_MACRO(name) \
+static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
+{ \
+ u32 indir, dir; \
+ spin_lock(&priv->indir_lock); \
+ dir = name##_readl(priv, off); \
+ indir = reg_readl(priv, REG_DIR_DATA_READ); \
+ spin_unlock(&priv->indir_lock); \
+ return (u64)indir << 32 | dir; \
+} \
+static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
+ u32 off) \
+{ \
+ spin_lock(&priv->indir_lock); \
+ reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \
+ name##_writel(priv, lower_32_bits(val), off); \
+ spin_unlock(&priv->indir_lock); \
+}
+
+#define SWITCH_INTR_L2(which) \
+static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
+ u32 mask) \
+{ \
+ priv->irq##which##_mask &= ~(mask); \
+ intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
+} \
+static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
+ u32 mask) \
+{ \
+ intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
+ priv->irq##which##_mask |= (mask); \
+} \
+
+static inline u32 core_readl(struct bcm_sf2_priv *priv, u32 off)
+{
+ u32 tmp = bcm_sf2_mangle_addr(priv, off);
+ return readl_relaxed(priv->core + tmp);
+}
+
+static inline void core_writel(struct bcm_sf2_priv *priv, u32 val, u32 off)
+{
+ u32 tmp = bcm_sf2_mangle_addr(priv, off);
+ writel_relaxed(val, priv->core + tmp);
+}
+
+static inline u32 reg_readl(struct bcm_sf2_priv *priv, u16 off)
+{
+ return readl_relaxed(priv->reg + priv->reg_offsets[off]);
+}
+
+static inline void reg_writel(struct bcm_sf2_priv *priv, u32 val, u16 off)
+{
+ writel_relaxed(val, priv->reg + priv->reg_offsets[off]);
+}
+
+SF2_IO64_MACRO(core);
+SF2_IO_MACRO(intrl2_0);
+SF2_IO_MACRO(intrl2_1);
+SF2_IO_MACRO(fcb);
+SF2_IO_MACRO(acb);
+
+SWITCH_INTR_L2(0);
+SWITCH_INTR_L2(1);
+
+static inline u32 reg_led_readl(struct bcm_sf2_priv *priv, u16 off, u16 reg)
+{
+ return readl_relaxed(priv->reg + priv->reg_offsets[off] + reg);
+}
+
+static inline void reg_led_writel(struct bcm_sf2_priv *priv, u32 val, u16 off, u16 reg)
+{
+ writel_relaxed(val, priv->reg + priv->reg_offsets[off] + reg);
+}
+
+/* RXNFC */
+int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs);
+int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc);
+int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
+void bcm_sf2_cfp_exit(struct dsa_switch *ds);
+int bcm_sf2_cfp_resume(struct dsa_switch *ds);
+void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data);
+void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data);
+int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset);
+
+#endif /* __BCM_SF2_H */
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
new file mode 100644
index 000000000..c4010b7bf
--- /dev/null
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -0,0 +1,1346 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Broadcom Starfighter 2 DSA switch CFP support
+ *
+ * Copyright (C) 2016, Broadcom
+ */
+
+#include <linux/list.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <net/dsa.h>
+#include <linux/bitmap.h>
+#include <net/flow_offload.h>
+#include <net/switchdev.h>
+#include <uapi/linux/if_bridge.h>
+
+#include "bcm_sf2.h"
+#include "bcm_sf2_regs.h"
+
+struct cfp_rule {
+ int port;
+ struct ethtool_rx_flow_spec fs;
+ struct list_head next;
+};
+
+struct cfp_udf_slice_layout {
+ u8 slices[UDFS_PER_SLICE];
+ u32 mask_value;
+ u32 base_offset;
+};
+
+struct cfp_udf_layout {
+ struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES];
+};
+
+static const u8 zero_slice[UDFS_PER_SLICE] = { };
+
+/* UDF slices layout for a TCPv4/UDPv4 specification */
+static const struct cfp_udf_layout udf_tcpip4_layout = {
+ .udfs = {
+ [1] = {
+ .slices = {
+ /* End of L2, byte offset 12, src IP[0:15] */
+ CFG_UDF_EOL2 | 6,
+ /* End of L2, byte offset 14, src IP[16:31] */
+ CFG_UDF_EOL2 | 7,
+ /* End of L2, byte offset 16, dst IP[0:15] */
+ CFG_UDF_EOL2 | 8,
+ /* End of L2, byte offset 18, dst IP[16:31] */
+ CFG_UDF_EOL2 | 9,
+ /* End of L3, byte offset 0, src port */
+ CFG_UDF_EOL3 | 0,
+ /* End of L3, byte offset 2, dst port */
+ CFG_UDF_EOL3 | 1,
+ 0, 0, 0
+ },
+ .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
+ .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET,
+ },
+ },
+};
+
+/* UDF slices layout for a TCPv6/UDPv6 specification */
+static const struct cfp_udf_layout udf_tcpip6_layout = {
+ .udfs = {
+ [0] = {
+ .slices = {
+ /* End of L2, byte offset 8, src IP[0:15] */
+ CFG_UDF_EOL2 | 4,
+ /* End of L2, byte offset 10, src IP[16:31] */
+ CFG_UDF_EOL2 | 5,
+ /* End of L2, byte offset 12, src IP[32:47] */
+ CFG_UDF_EOL2 | 6,
+ /* End of L2, byte offset 14, src IP[48:63] */
+ CFG_UDF_EOL2 | 7,
+ /* End of L2, byte offset 16, src IP[64:79] */
+ CFG_UDF_EOL2 | 8,
+ /* End of L2, byte offset 18, src IP[80:95] */
+ CFG_UDF_EOL2 | 9,
+ /* End of L2, byte offset 20, src IP[96:111] */
+ CFG_UDF_EOL2 | 10,
+ /* End of L2, byte offset 22, src IP[112:127] */
+ CFG_UDF_EOL2 | 11,
+ /* End of L3, byte offset 0, src port */
+ CFG_UDF_EOL3 | 0,
+ },
+ .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
+ .base_offset = CORE_UDF_0_B_0_8_PORT_0,
+ },
+ [3] = {
+ .slices = {
+ /* End of L2, byte offset 24, dst IP[0:15] */
+ CFG_UDF_EOL2 | 12,
+ /* End of L2, byte offset 26, dst IP[16:31] */
+ CFG_UDF_EOL2 | 13,
+ /* End of L2, byte offset 28, dst IP[32:47] */
+ CFG_UDF_EOL2 | 14,
+ /* End of L2, byte offset 30, dst IP[48:63] */
+ CFG_UDF_EOL2 | 15,
+ /* End of L2, byte offset 32, dst IP[64:79] */
+ CFG_UDF_EOL2 | 16,
+ /* End of L2, byte offset 34, dst IP[80:95] */
+ CFG_UDF_EOL2 | 17,
+ /* End of L2, byte offset 36, dst IP[96:111] */
+ CFG_UDF_EOL2 | 18,
+ /* End of L2, byte offset 38, dst IP[112:127] */
+ CFG_UDF_EOL2 | 19,
+ /* End of L3, byte offset 2, dst port */
+ CFG_UDF_EOL3 | 1,
+ },
+ .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
+ .base_offset = CORE_UDF_0_D_0_11_PORT_0,
+ },
+ },
+};
+
+static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
+{
+ unsigned int i, count = 0;
+
+ for (i = 0; i < UDFS_PER_SLICE; i++) {
+ if (layout[i] != 0)
+ count++;
+ }
+
+ return count;
+}
+
+static inline u32 udf_upper_bits(int num_udf)
+{
+ return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1);
+}
+
+static inline u32 udf_lower_bits(int num_udf)
+{
+ return (u8)GENMASK(num_udf - 1, 0);
+}
+
+static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l,
+ unsigned int start)
+{
+ const struct cfp_udf_slice_layout *slice_layout;
+ unsigned int slice_idx;
+
+ for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) {
+ slice_layout = &l->udfs[slice_idx];
+ if (memcmp(slice_layout->slices, zero_slice,
+ sizeof(zero_slice)))
+ break;
+ }
+
+ return slice_idx;
+}
+
+static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
+ const struct cfp_udf_layout *layout,
+ unsigned int slice_num)
+{
+ u32 offset = layout->udfs[slice_num].base_offset;
+ unsigned int i;
+
+ for (i = 0; i < UDFS_PER_SLICE; i++)
+ core_writel(priv, layout->udfs[slice_num].slices[i],
+ offset + i * 4);
+}
+
+static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = core_readl(priv, CORE_CFP_ACC);
+ reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
+ reg |= OP_STR_DONE | op;
+ core_writel(priv, reg, CORE_CFP_ACC);
+
+ do {
+ reg = core_readl(priv, CORE_CFP_ACC);
+ if (!(reg & OP_STR_DONE))
+ break;
+
+ cpu_relax();
+ } while (timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
+ unsigned int addr)
+{
+ u32 reg;
+
+ WARN_ON(addr >= priv->num_cfp_rules);
+
+ reg = core_readl(priv, CORE_CFP_ACC);
+ reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
+ reg |= addr << XCESS_ADDR_SHIFT;
+ core_writel(priv, reg, CORE_CFP_ACC);
+}
+
+static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
+{
+ /* Entry #0 is reserved */
+ return priv->num_cfp_rules - 1;
+}
+
+static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
+ unsigned int rule_index,
+ int src_port,
+ unsigned int port_num,
+ unsigned int queue_num,
+ bool fwd_map_change)
+{
+ int ret;
+ u32 reg;
+
+ /* Replace ARL derived destination with DST_MAP derived, define
+ * which port and queue this should be forwarded to.
+ */
+ if (fwd_map_change)
+ reg = CHANGE_FWRD_MAP_IB_REP_ARL |
+ BIT(port_num + DST_MAP_IB_SHIFT) |
+ CHANGE_TC | queue_num << NEW_TC_SHIFT;
+ else
+ reg = 0;
+
+ /* Enable looping back to the original port */
+ if (src_port == port_num)
+ reg |= LOOP_BK_EN;
+
+ core_writel(priv, reg, CORE_ACT_POL_DATA0);
+
+ /* Set classification ID that needs to be put in Broadcom tag */
+ core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1);
+
+ core_writel(priv, 0, CORE_ACT_POL_DATA2);
+
+ /* Configure policer RAM now */
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
+ if (ret) {
+ pr_err("Policer entry at %d failed\n", rule_index);
+ return ret;
+ }
+
+ /* Disable the policer */
+ core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
+
+ /* Now the rate meter */
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
+ if (ret) {
+ pr_err("Meter entry at %d failed\n", rule_index);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
+ struct flow_dissector_key_ipv4_addrs *addrs,
+ struct flow_dissector_key_ports *ports,
+ const __be16 vlan_tci,
+ unsigned int slice_num, u8 num_udf,
+ bool mask)
+{
+ u32 reg, offset;
+
+ /* UDF_Valid[7:0] [31:24]
+ * S-Tag [23:8]
+ * C-Tag [7:0]
+ */
+ reg = udf_lower_bits(num_udf) << 24 | be16_to_cpu(vlan_tci) >> 8;
+ if (mask)
+ core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
+ else
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
+
+ /* C-Tag [31:24]
+ * UDF_n_A8 [23:8]
+ * UDF_n_A7 [7:0]
+ */
+ reg = (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(4);
+ else
+ offset = CORE_CFP_DATA_PORT(4);
+ core_writel(priv, reg, offset);
+
+ /* UDF_n_A7 [31:24]
+ * UDF_n_A6 [23:8]
+ * UDF_n_A5 [7:0]
+ */
+ reg = be16_to_cpu(ports->dst) >> 8;
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(3);
+ else
+ offset = CORE_CFP_DATA_PORT(3);
+ core_writel(priv, reg, offset);
+
+ /* UDF_n_A5 [31:24]
+ * UDF_n_A4 [23:8]
+ * UDF_n_A3 [7:0]
+ */
+ reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
+ (u32)be16_to_cpu(ports->src) << 8 |
+ (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(2);
+ else
+ offset = CORE_CFP_DATA_PORT(2);
+ core_writel(priv, reg, offset);
+
+ /* UDF_n_A3 [31:24]
+ * UDF_n_A2 [23:8]
+ * UDF_n_A1 [7:0]
+ */
+ reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
+ (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
+ (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(1);
+ else
+ offset = CORE_CFP_DATA_PORT(1);
+ core_writel(priv, reg, offset);
+
+ /* UDF_n_A1 [31:24]
+ * UDF_n_A0 [23:8]
+ * Reserved [7:4]
+ * Slice ID [3:2]
+ * Slice valid [1:0]
+ */
+ reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
+ (u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
+ SLICE_NUM(slice_num) | SLICE_VALID;
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(0);
+ else
+ offset = CORE_CFP_DATA_PORT(0);
+ core_writel(priv, reg, offset);
+}
+
+static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
+ unsigned int port_num,
+ unsigned int queue_num,
+ struct ethtool_rx_flow_spec *fs)
+{
+ __be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
+ struct ethtool_rx_flow_spec_input input = {};
+ const struct cfp_udf_layout *layout;
+ unsigned int slice_num, rule_index;
+ struct ethtool_rx_flow_rule *flow;
+ struct flow_match_ipv4_addrs ipv4;
+ struct flow_match_ports ports;
+ struct flow_match_ip ip;
+ u8 ip_proto, ip_frag;
+ u8 num_udf;
+ u32 reg;
+ int ret;
+
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ ip_proto = IPPROTO_TCP;
+ break;
+ case UDP_V4_FLOW:
+ ip_proto = IPPROTO_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
+
+ /* Extract VLAN TCI */
+ if (fs->flow_type & FLOW_EXT) {
+ vlan_tci = fs->h_ext.vlan_tci;
+ vlan_m_tci = fs->m_ext.vlan_tci;
+ }
+
+ /* Locate the first rule available */
+ if (fs->location == RX_CLS_LOC_ANY)
+ rule_index = find_first_zero_bit(priv->cfp.used,
+ priv->num_cfp_rules);
+ else
+ rule_index = fs->location;
+
+ if (rule_index > bcm_sf2_cfp_rule_size(priv))
+ return -ENOSPC;
+
+ input.fs = fs;
+ flow = ethtool_rx_flow_rule_create(&input);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
+ flow_rule_match_ports(flow->rule, &ports);
+ flow_rule_match_ip(flow->rule, &ip);
+
+ layout = &udf_tcpip4_layout;
+ /* We only use one UDF slice for now */
+ slice_num = bcm_sf2_get_slice_number(layout, 0);
+ if (slice_num == UDF_NUM_SLICES) {
+ ret = -EINVAL;
+ goto out_err_flow_rule;
+ }
+
+ num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
+
+ /* Apply the UDF layout for this filter */
+ bcm_sf2_cfp_udf_set(priv, layout, slice_num);
+
+ /* Apply to all packets received through this port */
+ core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
+
+ /* Source port map match */
+ core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
+
+ /* S-Tag status [31:30]
+ * C-Tag status [29:28]
+ * L2 framing [27:26]
+ * L3 framing [25:24]
+ * IP ToS [23:16]
+ * IP proto [15:08]
+ * IP Fragm [7]
+ * Non 1st frag [6]
+ * IP Authen [5]
+ * TTL range [4:3]
+ * PPPoE session [2]
+ * Reserved [1]
+ * UDF_Valid[8] [0]
+ */
+ core_writel(priv, ip.key->tos << IPTOS_SHIFT |
+ ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
+ udf_upper_bits(num_udf),
+ CORE_CFP_DATA_PORT(6));
+
+ /* Mask with the specific layout for IPv4 packets */
+ core_writel(priv, layout->udfs[slice_num].mask_value |
+ udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
+
+ /* Program the match and the mask */
+ bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, vlan_tci,
+ slice_num, num_udf, false);
+ bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, vlan_m_tci,
+ SLICE_NUM_MASK, num_udf, true);
+
+ /* Insert into TCAM now */
+ bcm_sf2_cfp_rule_addr_set(priv, rule_index);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
+ if (ret) {
+ pr_err("TCAM entry at addr %d failed\n", rule_index);
+ goto out_err_flow_rule;
+ }
+
+ /* Insert into Action and policer RAMs now */
+ ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num,
+ queue_num, true);
+ if (ret)
+ goto out_err_flow_rule;
+
+ /* Turn on CFP for this rule now */
+ reg = core_readl(priv, CORE_CFP_CTL_REG);
+ reg |= BIT(port);
+ core_writel(priv, reg, CORE_CFP_CTL_REG);
+
+ /* Flag the rule as being used and return it */
+ set_bit(rule_index, priv->cfp.used);
+ set_bit(rule_index, priv->cfp.unique);
+ fs->location = rule_index;
+
+ return 0;
+
+out_err_flow_rule:
+ ethtool_rx_flow_rule_destroy(flow);
+ return ret;
+}
+
+static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
+ const __be32 *ip6_addr, const __be16 port,
+ const __be16 vlan_tci,
+ unsigned int slice_num, u32 udf_bits,
+ bool mask)
+{
+ u32 reg, tmp, val, offset;
+
+ /* UDF_Valid[7:0] [31:24]
+ * S-Tag [23:8]
+ * C-Tag [7:0]
+ */
+ reg = udf_bits << 24 | be16_to_cpu(vlan_tci) >> 8;
+ if (mask)
+ core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
+ else
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
+
+ /* C-Tag [31:24]
+ * UDF_n_B8 [23:8] (port)
+ * UDF_n_B7 (upper) [7:0] (addr[15:8])
+ */
+ reg = be32_to_cpu(ip6_addr[3]);
+ val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff);
+ val |= (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(4);
+ else
+ offset = CORE_CFP_DATA_PORT(4);
+ core_writel(priv, val, offset);
+
+ /* UDF_n_B7 (lower) [31:24] (addr[7:0])
+ * UDF_n_B6 [23:8] (addr[31:16])
+ * UDF_n_B5 (upper) [7:0] (addr[47:40])
+ */
+ tmp = be32_to_cpu(ip6_addr[2]);
+ val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
+ ((tmp >> 8) & 0xff);
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(3);
+ else
+ offset = CORE_CFP_DATA_PORT(3);
+ core_writel(priv, val, offset);
+
+ /* UDF_n_B5 (lower) [31:24] (addr[39:32])
+ * UDF_n_B4 [23:8] (addr[63:48])
+ * UDF_n_B3 (upper) [7:0] (addr[79:72])
+ */
+ reg = be32_to_cpu(ip6_addr[1]);
+ val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
+ ((reg >> 8) & 0xff);
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(2);
+ else
+ offset = CORE_CFP_DATA_PORT(2);
+ core_writel(priv, val, offset);
+
+ /* UDF_n_B3 (lower) [31:24] (addr[71:64])
+ * UDF_n_B2 [23:8] (addr[95:80])
+ * UDF_n_B1 (upper) [7:0] (addr[111:104])
+ */
+ tmp = be32_to_cpu(ip6_addr[0]);
+ val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
+ ((tmp >> 8) & 0xff);
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(1);
+ else
+ offset = CORE_CFP_DATA_PORT(1);
+ core_writel(priv, val, offset);
+
+ /* UDF_n_B1 (lower) [31:24] (addr[103:96])
+ * UDF_n_B0 [23:8] (addr[127:112])
+ * Reserved [7:4]
+ * Slice ID [3:2]
+ * Slice valid [1:0]
+ */
+ reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
+ SLICE_NUM(slice_num) | SLICE_VALID;
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(0);
+ else
+ offset = CORE_CFP_DATA_PORT(0);
+ core_writel(priv, reg, offset);
+}
+
+static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
+ int port, u32 location)
+{
+ struct cfp_rule *rule;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ if (rule->port == port && rule->fs.location == location)
+ return rule;
+ }
+
+ return NULL;
+}
+
+static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct cfp_rule *rule = NULL;
+ size_t fs_size = 0;
+ int ret = 1;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return ret;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ ret = 1;
+ if (rule->port != port)
+ continue;
+
+ if (rule->fs.flow_type != fs->flow_type ||
+ rule->fs.ring_cookie != fs->ring_cookie ||
+ rule->fs.h_ext.data[0] != fs->h_ext.data[0])
+ continue;
+
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip6_spec);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip4_spec);
+ break;
+ default:
+ continue;
+ }
+
+ ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
+ ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
+ /* Compare VLAN TCI values as well */
+ if (rule->fs.flow_type & FLOW_EXT) {
+ ret |= rule->fs.h_ext.vlan_tci != fs->h_ext.vlan_tci;
+ ret |= rule->fs.m_ext.vlan_tci != fs->m_ext.vlan_tci;
+ }
+ if (ret == 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
+ unsigned int port_num,
+ unsigned int queue_num,
+ struct ethtool_rx_flow_spec *fs)
+{
+ __be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
+ struct ethtool_rx_flow_spec_input input = {};
+ unsigned int slice_num, rule_index[2];
+ const struct cfp_udf_layout *layout;
+ struct ethtool_rx_flow_rule *flow;
+ struct flow_match_ipv6_addrs ipv6;
+ struct flow_match_ports ports;
+ u8 ip_proto, ip_frag;
+ int ret = 0;
+ u8 num_udf;
+ u32 reg;
+
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case TCP_V6_FLOW:
+ ip_proto = IPPROTO_TCP;
+ break;
+ case UDP_V6_FLOW:
+ ip_proto = IPPROTO_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
+
+ /* Extract VLAN TCI */
+ if (fs->flow_type & FLOW_EXT) {
+ vlan_tci = fs->h_ext.vlan_tci;
+ vlan_m_tci = fs->m_ext.vlan_tci;
+ }
+
+ layout = &udf_tcpip6_layout;
+ slice_num = bcm_sf2_get_slice_number(layout, 0);
+ if (slice_num == UDF_NUM_SLICES)
+ return -EINVAL;
+
+ num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
+
+ /* Negotiate two indexes, one for the second half which we are chained
+ * from, which is what we will return to user-space, and a second one
+ * which is used to store its first half. That first half does not
+ * allow any choice of placement, so it just needs to find the next
+ * available bit. We return the second half as fs->location because
+ * that helps with the rule lookup later on since the second half is
+ * chained from its first half, we can easily identify IPv6 CFP rules
+ * by looking whether they carry a CHAIN_ID.
+ *
+ * We also want the second half to have a lower rule_index than its
+ * first half because the HW search is by incrementing addresses.
+ */
+ if (fs->location == RX_CLS_LOC_ANY)
+ rule_index[1] = find_first_zero_bit(priv->cfp.used,
+ priv->num_cfp_rules);
+ else
+ rule_index[1] = fs->location;
+ if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
+ return -ENOSPC;
+
+ /* Flag it as used (cleared on error path) such that we can immediately
+ * obtain a second one to chain from.
+ */
+ set_bit(rule_index[1], priv->cfp.used);
+
+ rule_index[0] = find_first_zero_bit(priv->cfp.used,
+ priv->num_cfp_rules);
+ if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
+ ret = -ENOSPC;
+ goto out_err;
+ }
+
+ input.fs = fs;
+ flow = ethtool_rx_flow_rule_create(&input);
+ if (IS_ERR(flow)) {
+ ret = PTR_ERR(flow);
+ goto out_err;
+ }
+ flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
+ flow_rule_match_ports(flow->rule, &ports);
+
+ /* Apply the UDF layout for this filter */
+ bcm_sf2_cfp_udf_set(priv, layout, slice_num);
+
+ /* Apply to all packets received through this port */
+ core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
+
+ /* Source port map match */
+ core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
+
+ /* S-Tag status [31:30]
+ * C-Tag status [29:28]
+ * L2 framing [27:26]
+ * L3 framing [25:24]
+ * IP ToS [23:16]
+ * IP proto [15:08]
+ * IP Fragm [7]
+ * Non 1st frag [6]
+ * IP Authen [5]
+ * TTL range [4:3]
+ * PPPoE session [2]
+ * Reserved [1]
+ * UDF_Valid[8] [0]
+ */
+ reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT |
+ ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf);
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
+
+ /* Mask with the specific layout for IPv6 packets including
+ * UDF_Valid[8]
+ */
+ reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf);
+ core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
+
+ /* Slice the IPv6 source address and port */
+ bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
+ ports.key->src, vlan_tci, slice_num,
+ udf_lower_bits(num_udf), false);
+ bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
+ ports.mask->src, vlan_m_tci, SLICE_NUM_MASK,
+ udf_lower_bits(num_udf), true);
+
+ /* Insert into TCAM now because we need to insert a second rule */
+ bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
+ if (ret) {
+ pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
+ goto out_err_flow_rule;
+ }
+
+ /* Insert into Action and policer RAMs now */
+ ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num,
+ queue_num, false);
+ if (ret)
+ goto out_err_flow_rule;
+
+ /* Now deal with the second slice to chain this rule */
+ slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
+ if (slice_num == UDF_NUM_SLICES) {
+ ret = -EINVAL;
+ goto out_err_flow_rule;
+ }
+
+ num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
+
+ /* Apply the UDF layout for this filter */
+ bcm_sf2_cfp_udf_set(priv, layout, slice_num);
+
+ /* Chained rule, source port match is coming from the rule we are
+ * chained from.
+ */
+ core_writel(priv, 0, CORE_CFP_DATA_PORT(7));
+ core_writel(priv, 0, CORE_CFP_MASK_PORT(7));
+
+ /*
+ * CHAIN ID [31:24] chain to previous slice
+ * Reserved [23:20]
+ * UDF_Valid[11:8] [19:16]
+ * UDF_Valid[7:0] [15:8]
+ * UDF_n_D11 [7:0]
+ */
+ reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 |
+ udf_lower_bits(num_udf) << 8;
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
+
+ /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */
+ reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 |
+ udf_lower_bits(num_udf) << 8;
+ core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
+
+ bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
+ ports.key->dst, 0, slice_num,
+ 0, false);
+ bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
+ ports.key->dst, 0, SLICE_NUM_MASK,
+ 0, true);
+
+ /* Insert into TCAM now */
+ bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
+ if (ret) {
+ pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
+ goto out_err_flow_rule;
+ }
+
+ /* Insert into Action and policer RAMs now, set chain ID to
+ * the one we are chained to
+ */
+ ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num,
+ queue_num, true);
+ if (ret)
+ goto out_err_flow_rule;
+
+ /* Turn on CFP for this rule now */
+ reg = core_readl(priv, CORE_CFP_CTL_REG);
+ reg |= BIT(port);
+ core_writel(priv, reg, CORE_CFP_CTL_REG);
+
+ /* Flag the second half rule as being used now, return it as the
+ * location, and flag it as unique while dumping rules
+ */
+ set_bit(rule_index[0], priv->cfp.used);
+ set_bit(rule_index[1], priv->cfp.unique);
+ fs->location = rule_index[1];
+
+ return ret;
+
+out_err_flow_rule:
+ ethtool_rx_flow_rule_destroy(flow);
+out_err:
+ clear_bit(rule_index[1], priv->cfp.used);
+ return ret;
+}
+
+static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ __u64 ring_cookie = fs->ring_cookie;
+ struct switchdev_obj_port_vlan vlan;
+ unsigned int queue_num, port_num;
+ u16 vid;
+ int ret;
+
+ /* This rule is a Wake-on-LAN filter and we must specifically
+ * target the CPU port in order for it to be working.
+ */
+ if (ring_cookie == RX_CLS_FLOW_WAKE)
+ ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
+
+ /* We do not support discarding packets, check that the
+ * destination port is enabled and that we are within the
+ * number of ports supported by the switch
+ */
+ port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES;
+
+ if (ring_cookie == RX_CLS_FLOW_DISC ||
+ !(dsa_is_user_port(ds, port_num) ||
+ dsa_is_cpu_port(ds, port_num)) ||
+ port_num >= priv->hw_params.num_ports)
+ return -EINVAL;
+
+ /* If the rule is matching a particular VLAN, make sure that we honor
+ * the matching and have it tagged or untagged on the destination port,
+ * we do this on egress with a VLAN entry. The egress tagging attribute
+ * is expected to be provided in h_ext.data[1] bit 0. A 1 means untagged,
+ * a 0 means tagged.
+ */
+ if (fs->flow_type & FLOW_EXT) {
+ /* We cannot support matching multiple VLAN IDs yet */
+ if ((be16_to_cpu(fs->m_ext.vlan_tci) & VLAN_VID_MASK) !=
+ VLAN_VID_MASK)
+ return -EINVAL;
+
+ vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
+ vlan.vid = vid;
+ if (be32_to_cpu(fs->h_ext.data[1]) & 1)
+ vlan.flags = BRIDGE_VLAN_INFO_UNTAGGED;
+ else
+ vlan.flags = 0;
+
+ ret = ds->ops->port_vlan_add(ds, port_num, &vlan, NULL);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * We have a small oddity where Port 6 just does not have a
+ * valid bit here (so we substract by one).
+ */
+ queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES;
+ if (port_num >= 7)
+ port_num -= 1;
+
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num,
+ queue_num, fs);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num,
+ queue_num, fs);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule = NULL;
+ int ret = -EINVAL;
+
+ /* Check for unsupported extensions */
+ if (fs->flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ fs->location > bcm_sf2_cfp_rule_size(priv))
+ return -EINVAL;
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ !(ds->ops->port_vlan_add || ds->ops->port_vlan_del))
+ return -EOPNOTSUPP;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ test_bit(fs->location, priv->cfp.used))
+ return -EBUSY;
+
+ ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
+ if (ret == 0)
+ return -EEXIST;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
+
+ rule->port = port;
+ memcpy(&rule->fs, fs, sizeof(*fs));
+ list_add_tail(&rule->next, &priv->cfp.rules_list);
+
+ return ret;
+}
+
+static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
+ u32 loc, u32 *next_loc)
+{
+ int ret;
+ u32 reg;
+
+ /* Indicate which rule we want to read */
+ bcm_sf2_cfp_rule_addr_set(priv, loc);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
+ if (ret)
+ return ret;
+
+ /* Check if this is possibly an IPv6 rule that would
+ * indicate we need to delete its companion rule
+ * as well
+ */
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
+ if (next_loc)
+ *next_loc = (reg >> 24) & CHAIN_ID_MASK;
+
+ /* Clear its valid bits */
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
+ reg &= ~SLICE_VALID;
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
+
+ /* Write back this entry into the TCAM now */
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
+ if (ret)
+ return ret;
+
+ clear_bit(loc, priv->cfp.used);
+ clear_bit(loc, priv->cfp.unique);
+
+ return 0;
+}
+
+static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
+ u32 loc)
+{
+ u32 next_loc = 0;
+ int ret;
+
+ ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
+ if (ret)
+ return ret;
+
+ /* If this was an IPv6 rule, delete is companion rule too */
+ if (next_loc)
+ ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
+
+ return ret;
+}
+
+static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
+{
+ struct cfp_rule *rule;
+ int ret;
+
+ if (loc > bcm_sf2_cfp_rule_size(priv))
+ return -EINVAL;
+
+ /* Refuse deleting unused rules, and those that are not unique since
+ * that could leave IPv6 rules with one of the chained rule in the
+ * table.
+ */
+ if (!test_bit(loc, priv->cfp.unique) || loc == 0)
+ return -EINVAL;
+
+ rule = bcm_sf2_cfp_rule_find(priv, port, loc);
+ if (!rule)
+ return -EINVAL;
+
+ ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
+
+ list_del(&rule->next);
+ kfree(rule);
+
+ return ret;
+}
+
+static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof(flow->m_u); i++)
+ flow->m_u.hdata[i] ^= 0xff;
+
+ flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
+ flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
+ flow->m_ext.data[0] ^= cpu_to_be32(~0);
+ flow->m_ext.data[1] ^= cpu_to_be32(~0);
+}
+
+static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
+ struct ethtool_rxnfc *nfc)
+{
+ struct cfp_rule *rule;
+
+ rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
+ if (!rule)
+ return -EINVAL;
+
+ memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
+
+ bcm_sf2_invert_masks(&nfc->fs);
+
+ /* Put the TCAM size here */
+ nfc->data = bcm_sf2_cfp_rule_size(priv);
+
+ return 0;
+}
+
+/* We implement the search doing a TCAM search operation */
+static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
+ int port, struct ethtool_rxnfc *nfc,
+ u32 *rule_locs)
+{
+ unsigned int index = 1, rules_cnt = 0;
+
+ for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) {
+ rule_locs[rules_cnt] = index;
+ rules_cnt++;
+ }
+
+ /* Put the TCAM size here */
+ nfc->data = bcm_sf2_cfp_rule_size(priv);
+ nfc->rule_cnt = rules_cnt;
+
+ return 0;
+}
+
+int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs)
+{
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ int ret = 0;
+
+ mutex_lock(&priv->cfp.lock);
+
+ switch (nfc->cmd) {
+ case ETHTOOL_GRXCLSRLCNT:
+ /* Subtract the default, unusable rule */
+ nfc->rule_cnt = bitmap_weight(priv->cfp.unique,
+ priv->num_cfp_rules) - 1;
+ /* We support specifying rule locations */
+ nfc->data |= RX_CLS_LOC_SPECIAL;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = bcm_sf2_cfp_rule_get(priv, port, nfc);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&priv->cfp.lock);
+
+ if (ret)
+ return ret;
+
+ /* Pass up the commands to the attached master network device */
+ if (p->ethtool_ops->get_rxnfc) {
+ ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc)
+{
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ int ret = 0;
+
+ mutex_lock(&priv->cfp.lock);
+
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
+ break;
+
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&priv->cfp.lock);
+
+ if (ret)
+ return ret;
+
+ /* Pass up the commands to the attached master network device.
+ * This can fail, so rollback the operation if we need to.
+ */
+ if (p->ethtool_ops->set_rxnfc) {
+ ret = p->ethtool_ops->set_rxnfc(p, nfc);
+ if (ret && ret != -EOPNOTSUPP) {
+ mutex_lock(&priv->cfp.lock);
+ bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
+ mutex_unlock(&priv->cfp.lock);
+ } else {
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = core_readl(priv, CORE_CFP_ACC);
+ reg |= TCAM_RESET;
+ core_writel(priv, reg, CORE_CFP_ACC);
+
+ do {
+ reg = core_readl(priv, CORE_CFP_ACC);
+ if (!(reg & TCAM_RESET))
+ break;
+
+ cpu_relax();
+ } while (timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void bcm_sf2_cfp_exit(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule, *n;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return;
+
+ list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
+ bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
+}
+
+int bcm_sf2_cfp_resume(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule;
+ int ret = 0;
+ u32 reg;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return ret;
+
+ reg = core_readl(priv, CORE_CFP_CTL_REG);
+ reg &= ~CFP_EN_MAP_MASK;
+ core_writel(priv, reg, CORE_CFP_CTL_REG);
+
+ ret = bcm_sf2_cfp_rst(priv);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
+ rule->fs.location);
+ if (ret) {
+ dev_err(ds->dev, "failed to remove rule\n");
+ return ret;
+ }
+
+ ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
+ if (ret) {
+ dev_err(ds->dev, "failed to restore rule\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static const struct bcm_sf2_cfp_stat {
+ unsigned int offset;
+ unsigned int ram_loc;
+ const char *name;
+} bcm_sf2_cfp_stats[] = {
+ {
+ .offset = CORE_STAT_GREEN_CNTR,
+ .ram_loc = GREEN_STAT_RAM,
+ .name = "Green"
+ },
+ {
+ .offset = CORE_STAT_YELLOW_CNTR,
+ .ram_loc = YELLOW_STAT_RAM,
+ .name = "Yellow"
+ },
+ {
+ .offset = CORE_STAT_RED_CNTR,
+ .ram_loc = RED_STAT_RAM,
+ .name = "Red"
+ },
+};
+
+void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
+ char buf[ETH_GSTRING_LEN];
+ unsigned int i, j, iter;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 1; i < priv->num_cfp_rules; i++) {
+ for (j = 0; j < s; j++) {
+ snprintf(buf, sizeof(buf),
+ "CFP%03d_%sCntr",
+ i, bcm_sf2_cfp_stats[j].name);
+ iter = (i - 1) * s + j;
+ strscpy(data + iter * ETH_GSTRING_LEN,
+ buf, ETH_GSTRING_LEN);
+ }
+ }
+}
+
+void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
+ const struct bcm_sf2_cfp_stat *stat;
+ unsigned int i, j, iter;
+ struct cfp_rule *rule;
+ int ret;
+
+ mutex_lock(&priv->cfp.lock);
+ for (i = 1; i < priv->num_cfp_rules; i++) {
+ rule = bcm_sf2_cfp_rule_find(priv, port, i);
+ if (!rule)
+ continue;
+
+ for (j = 0; j < s; j++) {
+ stat = &bcm_sf2_cfp_stats[j];
+
+ bcm_sf2_cfp_rule_addr_set(priv, i);
+ ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ);
+ if (ret)
+ continue;
+
+ iter = (i - 1) * s + j;
+ data[iter] = core_readl(priv, stat->offset);
+ }
+
+ }
+ mutex_unlock(&priv->cfp.lock);
+}
+
+int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ /* 3 counters per CFP rules */
+ return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats);
+}
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
new file mode 100644
index 000000000..da0dedbd6
--- /dev/null
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -0,0 +1,496 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Broadcom Starfighter 2 switch register defines
+ *
+ * Copyright (C) 2014, Broadcom Corporation
+ */
+#ifndef __BCM_SF2_REGS_H
+#define __BCM_SF2_REGS_H
+
+/* Register set relative to 'REG' */
+
+enum bcm_sf2_reg_offs {
+ REG_SWITCH_CNTRL = 0,
+ REG_SWITCH_STATUS,
+ REG_DIR_DATA_WRITE,
+ REG_DIR_DATA_READ,
+ REG_SWITCH_REVISION,
+ REG_PHY_REVISION,
+ REG_SPHY_CNTRL,
+ REG_CROSSBAR,
+ REG_RGMII_0_CNTRL,
+ REG_RGMII_1_CNTRL,
+ REG_RGMII_2_CNTRL,
+ REG_RGMII_11_CNTRL,
+ REG_LED_0_CNTRL,
+ REG_LED_1_CNTRL,
+ REG_LED_2_CNTRL,
+ REG_LED_3_CNTRL,
+ REG_LED_4_CNTRL,
+ REG_LED_5_CNTRL,
+ REG_LED_AGGREGATE_CTRL,
+ REG_SWITCH_REG_MAX,
+};
+
+/* Relative to REG_SWITCH_CNTRL */
+#define MDIO_MASTER_SEL (1 << 0)
+
+/* Relative to REG_SWITCH_REVISION */
+#define SF2_REV_MASK 0xffff
+#define SWITCH_TOP_REV_SHIFT 16
+#define SWITCH_TOP_REV_MASK 0xffff
+
+/* Relative to REG_PHY_REVISION */
+#define PHY_REVISION_MASK 0xffff
+
+/* Relative to REG_SPHY_CNTRL */
+#define IDDQ_BIAS (1 << 0)
+#define EXT_PWR_DOWN (1 << 1)
+#define FORCE_DLL_EN (1 << 2)
+#define IDDQ_GLOBAL_PWR (1 << 3)
+#define CK25_DIS (1 << 4)
+#define PHY_RESET (1 << 5)
+#define PHY_PHYAD_SHIFT 8
+#define PHY_PHYAD_MASK 0x1F
+
+/* Relative to REG_CROSSBAR */
+#define CROSSBAR_BCM4908_INT_P7 0
+#define CROSSBAR_BCM4908_INT_RUNNER 1
+#define CROSSBAR_BCM4908_EXT_SERDES 0
+#define CROSSBAR_BCM4908_EXT_GPHY4 1
+#define CROSSBAR_BCM4908_EXT_RGMII 2
+
+/* Relative to REG_LED_*_CNTRL (BCM7278, BCM7445) */
+#define LED_CNTRL_NO_LINK_ENCODE_SHIFT 0
+#define LED_CNTRL_M10_ENCODE_SHIFT 2
+#define LED_CNTRL_M100_ENCODE_SHIFT 4
+#define LED_CNTRL_M1000_ENCODE_SHIFT 6
+#define LED_CNTRL_SEL_NO_LINK_ENCODE_SHIFT 8
+#define LED_CNTRL_SEL_10M_ENCODE_SHIFT 10
+#define LED_CNTRL_SEL_100M_ENCODE_SHIFT 12
+#define LED_CNTRL_SEL_1000M_ENCODE_SHIFT 14
+#define LED_CNTRL_RX_DV_EN (1 << 16)
+#define LED_CNTRL_TX_EN_EN (1 << 17)
+#define LED_CNTRL_SPDLNK_LED0_ACT_SEL_SHIFT 18
+#define LED_CNTRL_SPDLNK_LED1_ACT_SEL_SHIFT 20
+#define LED_CNTRL_ACT_LED_ACT_SEL_SHIFT 22
+#define LED_CNTRL_SPDLNK_SRC_SEL (1 << 24)
+#define LED_CNTRL_SPDLNK_LED0_ACT_POL_SEL (1 << 25)
+#define LED_CNTRL_SPDLNK_LED1_ACT_POL_SEL (1 << 26)
+#define LED_CNTRL_ACT_LED_POL_SEL (1 << 27)
+#define LED_CNTRL_MASK 0x3
+
+/* Register relative to REG_LED_*_CNTRL (BCM4908) */
+#define REG_LED_CTRL 0x0
+#define LED_CTRL_RX_ACT_EN 0x00000001
+#define LED_CTRL_TX_ACT_EN 0x00000002
+#define LED_CTRL_SPDLNK_LED0_ACT_SEL 0x00000004
+#define LED_CTRL_SPDLNK_LED1_ACT_SEL 0x00000008
+#define LED_CTRL_SPDLNK_LED2_ACT_SEL 0x00000010
+#define LED_CTRL_ACT_LED_ACT_SEL 0x00000020
+#define LED_CTRL_SPDLNK_LED0_ACT_POL_SEL 0x00000040
+#define LED_CTRL_SPDLNK_LED1_ACT_POL_SEL 0x00000080
+#define LED_CTRL_SPDLNK_LED2_ACT_POL_SEL 0x00000100
+#define LED_CTRL_ACT_LED_POL_SEL 0x00000200
+#define LED_CTRL_LED_SPD_OVRD 0x00001c00
+#define LED_CTRL_LNK_STATUS_OVRD 0x00002000
+#define LED_CTRL_SPD_OVRD_EN 0x00004000
+#define LED_CTRL_LNK_OVRD_EN 0x00008000
+
+/* Register relative to REG_LED_*_CNTRL (BCM4908) */
+#define REG_LED_LINK_SPEED_ENC_SEL 0x4
+#define LED_LINK_SPEED_ENC_SEL_NO_LINK_SHIFT 0
+#define LED_LINK_SPEED_ENC_SEL_10M_SHIFT 3
+#define LED_LINK_SPEED_ENC_SEL_100M_SHIFT 6
+#define LED_LINK_SPEED_ENC_SEL_1000M_SHIFT 9
+#define LED_LINK_SPEED_ENC_SEL_2500M_SHIFT 12
+#define LED_LINK_SPEED_ENC_SEL_10G_SHIFT 15
+#define LED_LINK_SPEED_ENC_SEL_MASK 0x7
+
+/* Register relative to REG_LED_*_CNTRL (BCM4908) */
+#define REG_LED_LINK_SPEED_ENC 0x8
+#define LED_LINK_SPEED_ENC_NO_LINK_SHIFT 0
+#define LED_LINK_SPEED_ENC_M10_SHIFT 3
+#define LED_LINK_SPEED_ENC_M100_SHIFT 6
+#define LED_LINK_SPEED_ENC_M1000_SHIFT 9
+#define LED_LINK_SPEED_ENC_M2500_SHIFT 12
+#define LED_LINK_SPEED_ENC_M10G_SHIFT 15
+#define LED_LINK_SPEED_ENC_MASK 0x7
+
+/* Relative to REG_RGMII_CNTRL */
+#define RGMII_MODE_EN (1 << 0)
+#define ID_MODE_DIS (1 << 1)
+#define PORT_MODE_SHIFT 2
+#define INT_EPHY (0 << PORT_MODE_SHIFT)
+#define INT_GPHY (1 << PORT_MODE_SHIFT)
+#define EXT_EPHY (2 << PORT_MODE_SHIFT)
+#define EXT_GPHY (3 << PORT_MODE_SHIFT)
+#define EXT_REVMII (4 << PORT_MODE_SHIFT)
+#define PORT_MODE_MASK 0x7
+#define RVMII_REF_SEL (1 << 5)
+#define RX_PAUSE_EN (1 << 6)
+#define TX_PAUSE_EN (1 << 7)
+#define TX_CLK_STOP_EN (1 << 8)
+#define LPI_COUNT_SHIFT 9
+#define LPI_COUNT_MASK 0x3F
+
+/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */
+#define INTRL2_CPU_STATUS 0x00
+#define INTRL2_CPU_SET 0x04
+#define INTRL2_CPU_CLEAR 0x08
+#define INTRL2_CPU_MASK_STATUS 0x0c
+#define INTRL2_CPU_MASK_SET 0x10
+#define INTRL2_CPU_MASK_CLEAR 0x14
+
+/* Shared INTRL2_0 and INTRL2_ interrupt sources macros */
+#define P_LINK_UP_IRQ(x) (1 << (0 + (x)))
+#define P_LINK_DOWN_IRQ(x) (1 << (1 + (x)))
+#define P_ENERGY_ON_IRQ(x) (1 << (2 + (x)))
+#define P_ENERGY_OFF_IRQ(x) (1 << (3 + (x)))
+#define P_GPHY_IRQ(x) (1 << (4 + (x)))
+#define P_NUM_IRQ 5
+#define P_IRQ_MASK(x) (P_LINK_UP_IRQ((x)) | \
+ P_LINK_DOWN_IRQ((x)) | \
+ P_ENERGY_ON_IRQ((x)) | \
+ P_ENERGY_OFF_IRQ((x)) | \
+ P_GPHY_IRQ((x)))
+
+/* INTRL2_0 interrupt sources */
+#define P0_IRQ_OFF 0
+#define MEM_DOUBLE_IRQ (1 << 5)
+#define EEE_LPI_IRQ (1 << 6)
+#define P5_CPU_WAKE_IRQ (1 << 7)
+#define P8_CPU_WAKE_IRQ (1 << 8)
+#define P7_CPU_WAKE_IRQ (1 << 9)
+#define IEEE1588_IRQ (1 << 10)
+#define MDIO_ERR_IRQ (1 << 11)
+#define MDIO_DONE_IRQ (1 << 12)
+#define GISB_ERR_IRQ (1 << 13)
+#define UBUS_ERR_IRQ (1 << 14)
+#define FAILOVER_ON_IRQ (1 << 15)
+#define FAILOVER_OFF_IRQ (1 << 16)
+#define TCAM_SOFT_ERR_IRQ (1 << 17)
+
+/* INTRL2_1 interrupt sources */
+#define P7_IRQ_OFF 0
+#define P_IRQ_OFF(x) ((6 - (x)) * P_NUM_IRQ)
+
+/* Register set relative to 'ACB' */
+#define ACB_CONTROL 0x00
+#define ACB_EN (1 << 0)
+#define ACB_ALGORITHM (1 << 1)
+#define ACB_FLUSH_SHIFT 2
+#define ACB_FLUSH_MASK 0x3
+
+#define ACB_QUEUE_0_CFG 0x08
+#define XOFF_THRESHOLD_MASK 0x7ff
+#define XON_EN (1 << 11)
+#define TOTAL_XOFF_THRESHOLD_SHIFT 12
+#define TOTAL_XOFF_THRESHOLD_MASK 0x7ff
+#define TOTAL_XOFF_EN (1 << 23)
+#define TOTAL_XON_EN (1 << 24)
+#define PKTLEN_SHIFT 25
+#define PKTLEN_MASK 0x3f
+#define ACB_QUEUE_CFG(x) (ACB_QUEUE_0_CFG + ((x) * 0x4))
+
+/* Register set relative to 'CORE' */
+#define CORE_G_PCTL_PORT0 0x00000
+#define CORE_G_PCTL_PORT(x) (CORE_G_PCTL_PORT0 + (x * 0x4))
+#define CORE_IMP_CTL 0x00020
+#define RX_DIS (1 << 0)
+#define TX_DIS (1 << 1)
+#define RX_BCST_EN (1 << 2)
+#define RX_MCST_EN (1 << 3)
+#define RX_UCST_EN (1 << 4)
+
+#define CORE_SWMODE 0x0002c
+#define SW_FWDG_MODE (1 << 0)
+#define SW_FWDG_EN (1 << 1)
+#define RTRY_LMT_DIS (1 << 2)
+
+#define CORE_STS_OVERRIDE_IMP 0x00038
+#define GMII_SPEED_UP_2G (1 << 6)
+#define MII_SW_OR (1 << 7)
+
+/* Alternate layout for e.g: 7278 */
+#define CORE_STS_OVERRIDE_IMP2 0x39040
+
+#define CORE_NEW_CTRL 0x00084
+#define IP_MC (1 << 0)
+#define OUTRANGEERR_DISCARD (1 << 1)
+#define INRANGEERR_DISCARD (1 << 2)
+#define CABLE_DIAG_LEN (1 << 3)
+#define OVERRIDE_AUTO_PD_WAR (1 << 4)
+#define EN_AUTO_PD_WAR (1 << 5)
+#define UC_FWD_EN (1 << 6)
+#define MC_FWD_EN (1 << 7)
+
+#define CORE_SWITCH_CTRL 0x00088
+#define MII_DUMB_FWDG_EN (1 << 6)
+
+#define CORE_DIS_LEARN 0x000f0
+
+#define CORE_SFT_LRN_CTRL 0x000f8
+#define SW_LEARN_CNTL(x) (1 << (x))
+
+#define CORE_STS_OVERRIDE_GMIIP_PORT(x) (0x160 + (x) * 4)
+#define CORE_STS_OVERRIDE_GMIIP2_PORT(x) (0x39000 + (x) * 8)
+#define LINK_STS (1 << 0)
+#define DUPLX_MODE (1 << 1)
+#define SPEED_SHIFT 2
+#define SPEED_MASK 0x3
+#define RXFLOW_CNTL (1 << 4)
+#define TXFLOW_CNTL (1 << 5)
+#define SW_OVERRIDE (1 << 6)
+
+#define CORE_WATCHDOG_CTRL 0x001e4
+#define SOFTWARE_RESET (1 << 7)
+#define EN_CHIP_RST (1 << 6)
+#define EN_SW_RESET (1 << 4)
+
+#define CORE_FAST_AGE_CTRL 0x00220
+#define EN_FAST_AGE_STATIC (1 << 0)
+#define EN_AGE_DYNAMIC (1 << 1)
+#define EN_AGE_PORT (1 << 2)
+#define EN_AGE_VLAN (1 << 3)
+#define EN_AGE_SPT (1 << 4)
+#define EN_AGE_MCAST (1 << 5)
+#define FAST_AGE_STR_DONE (1 << 7)
+
+#define CORE_FAST_AGE_PORT 0x00224
+#define AGE_PORT_MASK 0xf
+
+#define CORE_FAST_AGE_VID 0x00228
+#define AGE_VID_MASK 0x3fff
+
+#define CORE_LNKSTS 0x00400
+#define LNK_STS_MASK 0x1ff
+
+#define CORE_SPDSTS 0x00410
+#define SPDSTS_10 0
+#define SPDSTS_100 1
+#define SPDSTS_1000 2
+#define SPDSTS_SHIFT 2
+#define SPDSTS_MASK 0x3
+
+#define CORE_DUPSTS 0x00420
+#define CORE_DUPSTS_MASK 0x1ff
+
+#define CORE_PAUSESTS 0x00428
+#define PAUSESTS_TX_PAUSE_SHIFT 9
+
+#define CORE_GMNCFGCFG 0x0800
+#define RST_MIB_CNT (1 << 0)
+#define RXBPDU_EN (1 << 1)
+
+#define CORE_IMP0_PRT_ID 0x0804
+
+#define CORE_RST_MIB_CNT_EN 0x0950
+
+#define CORE_ARLA_VTBL_RWCTRL 0x1600
+#define ARLA_VTBL_CMD_WRITE 0
+#define ARLA_VTBL_CMD_READ 1
+#define ARLA_VTBL_CMD_CLEAR 2
+#define ARLA_VTBL_STDN (1 << 7)
+
+#define CORE_ARLA_VTBL_ADDR 0x1604
+#define VTBL_ADDR_INDEX_MASK 0xfff
+
+#define CORE_ARLA_VTBL_ENTRY 0x160c
+#define FWD_MAP_MASK 0x1ff
+#define UNTAG_MAP_MASK 0x1ff
+#define UNTAG_MAP_SHIFT 9
+#define MSTP_INDEX_MASK 0x7
+#define MSTP_INDEX_SHIFT 18
+#define FWD_MODE (1 << 21)
+
+#define CORE_MEM_PSM_VDD_CTRL 0x2380
+#define P_TXQ_PSM_VDD_SHIFT 2
+#define P_TXQ_PSM_VDD_MASK 0x3
+#define P_TXQ_PSM_VDD(x) (P_TXQ_PSM_VDD_MASK << \
+ ((x) * P_TXQ_PSM_VDD_SHIFT))
+
+#define CORE_PORT_TC2_QOS_MAP_PORT(x) (0xc1c0 + ((x) * 0x10))
+#define PRT_TO_QID_MASK 0x3
+#define PRT_TO_QID_SHIFT 3
+
+#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8))
+#define PORT_VLAN_CTRL_MASK 0x1ff
+
+#define CORE_TXQ_THD_PAUSE_QN_PORT_0 0x2c80
+#define TXQ_PAUSE_THD_MASK 0x7ff
+#define CORE_TXQ_THD_PAUSE_QN_PORT(x) (CORE_TXQ_THD_PAUSE_QN_PORT_0 + \
+ (x) * 0x8)
+
+#define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8))
+#define CFI_SHIFT 12
+#define PRI_SHIFT 13
+#define PRI_MASK 0x7
+
+#define CORE_JOIN_ALL_VLAN_EN 0xd140
+
+#define CORE_CFP_ACC 0x28000
+#define OP_STR_DONE (1 << 0)
+#define OP_SEL_SHIFT 1
+#define OP_SEL_READ (1 << OP_SEL_SHIFT)
+#define OP_SEL_WRITE (2 << OP_SEL_SHIFT)
+#define OP_SEL_SEARCH (4 << OP_SEL_SHIFT)
+#define OP_SEL_MASK (7 << OP_SEL_SHIFT)
+#define CFP_RAM_CLEAR (1 << 4)
+#define RAM_SEL_SHIFT 10
+#define TCAM_SEL (1 << RAM_SEL_SHIFT)
+#define ACT_POL_RAM (2 << RAM_SEL_SHIFT)
+#define RATE_METER_RAM (4 << RAM_SEL_SHIFT)
+#define GREEN_STAT_RAM (8 << RAM_SEL_SHIFT)
+#define YELLOW_STAT_RAM (16 << RAM_SEL_SHIFT)
+#define RED_STAT_RAM (24 << RAM_SEL_SHIFT)
+#define RAM_SEL_MASK (0x1f << RAM_SEL_SHIFT)
+#define TCAM_RESET (1 << 15)
+#define XCESS_ADDR_SHIFT 16
+#define XCESS_ADDR_MASK 0xff
+#define SEARCH_STS (1 << 27)
+#define RD_STS_SHIFT 28
+#define RD_STS_TCAM (1 << RD_STS_SHIFT)
+#define RD_STS_ACT_POL_RAM (2 << RD_STS_SHIFT)
+#define RD_STS_RATE_METER_RAM (4 << RD_STS_SHIFT)
+#define RD_STS_STAT_RAM (8 << RD_STS_SHIFT)
+
+#define CORE_CFP_RATE_METER_GLOBAL_CTL 0x28010
+
+#define CORE_CFP_DATA_PORT_0 0x28040
+#define CORE_CFP_DATA_PORT(x) (CORE_CFP_DATA_PORT_0 + \
+ (x) * 0x10)
+
+/* UDF_DATA7 */
+#define L3_FRAMING_SHIFT 24
+#define L3_FRAMING_MASK (0x3 << L3_FRAMING_SHIFT)
+#define IPTOS_SHIFT 16
+#define IPTOS_MASK 0xff
+#define IPPROTO_SHIFT 8
+#define IPPROTO_MASK (0xff << IPPROTO_SHIFT)
+#define IP_FRAG_SHIFT 7
+#define IP_FRAG (1 << IP_FRAG_SHIFT)
+
+/* UDF_DATA0 */
+#define SLICE_VALID 3
+#define SLICE_NUM_SHIFT 2
+#define SLICE_NUM(x) ((x) << SLICE_NUM_SHIFT)
+#define SLICE_NUM_MASK 0x3
+
+#define CORE_CFP_MASK_PORT_0 0x280c0
+
+#define CORE_CFP_MASK_PORT(x) (CORE_CFP_MASK_PORT_0 + \
+ (x) * 0x10)
+
+#define CORE_ACT_POL_DATA0 0x28140
+#define VLAN_BYP (1 << 0)
+#define EAP_BYP (1 << 1)
+#define STP_BYP (1 << 2)
+#define REASON_CODE_SHIFT 3
+#define REASON_CODE_MASK 0x3f
+#define LOOP_BK_EN (1 << 9)
+#define NEW_TC_SHIFT 10
+#define NEW_TC_MASK 0x7
+#define CHANGE_TC (1 << 13)
+#define DST_MAP_IB_SHIFT 14
+#define DST_MAP_IB_MASK 0x1ff
+#define CHANGE_FWRD_MAP_IB_SHIFT 24
+#define CHANGE_FWRD_MAP_IB_MASK 0x3
+#define CHANGE_FWRD_MAP_IB_NO_DEST (0 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define CHANGE_FWRD_MAP_IB_REM_ARL (1 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define CHANGE_FWRD_MAP_IB_REP_ARL (2 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define CHANGE_FWRD_MAP_IB_ADD_DST (3 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define NEW_DSCP_IB_SHIFT 26
+#define NEW_DSCP_IB_MASK 0x3f
+
+#define CORE_ACT_POL_DATA1 0x28150
+#define CHANGE_DSCP_IB (1 << 0)
+#define DST_MAP_OB_SHIFT 1
+#define DST_MAP_OB_MASK 0x3ff
+#define CHANGE_FWRD_MAP_OB_SHIT 11
+#define CHANGE_FWRD_MAP_OB_MASK 0x3
+#define NEW_DSCP_OB_SHIFT 13
+#define NEW_DSCP_OB_MASK 0x3f
+#define CHANGE_DSCP_OB (1 << 19)
+#define CHAIN_ID_SHIFT 20
+#define CHAIN_ID_MASK 0xff
+#define CHANGE_COLOR (1 << 28)
+#define NEW_COLOR_SHIFT 29
+#define NEW_COLOR_MASK 0x3
+#define NEW_COLOR_GREEN (0 << NEW_COLOR_SHIFT)
+#define NEW_COLOR_YELLOW (1 << NEW_COLOR_SHIFT)
+#define NEW_COLOR_RED (2 << NEW_COLOR_SHIFT)
+#define RED_DEFAULT (1 << 31)
+
+#define CORE_ACT_POL_DATA2 0x28160
+#define MAC_LIMIT_BYPASS (1 << 0)
+#define CHANGE_TC_O (1 << 1)
+#define NEW_TC_O_SHIFT 2
+#define NEW_TC_O_MASK 0x7
+#define SPCP_RMK_DISABLE (1 << 5)
+#define CPCP_RMK_DISABLE (1 << 6)
+#define DEI_RMK_DISABLE (1 << 7)
+
+#define CORE_RATE_METER0 0x28180
+#define COLOR_MODE (1 << 0)
+#define POLICER_ACTION (1 << 1)
+#define COUPLING_FLAG (1 << 2)
+#define POLICER_MODE_SHIFT 3
+#define POLICER_MODE_MASK 0x3
+#define POLICER_MODE_RFC2698 (0 << POLICER_MODE_SHIFT)
+#define POLICER_MODE_RFC4115 (1 << POLICER_MODE_SHIFT)
+#define POLICER_MODE_MEF (2 << POLICER_MODE_SHIFT)
+#define POLICER_MODE_DISABLE (3 << POLICER_MODE_SHIFT)
+
+#define CORE_RATE_METER1 0x28190
+#define EIR_TK_BKT_MASK 0x7fffff
+
+#define CORE_RATE_METER2 0x281a0
+#define EIR_BKT_SIZE_MASK 0xfffff
+
+#define CORE_RATE_METER3 0x281b0
+#define EIR_REF_CNT_MASK 0x7ffff
+
+#define CORE_RATE_METER4 0x281c0
+#define CIR_TK_BKT_MASK 0x7fffff
+
+#define CORE_RATE_METER5 0x281d0
+#define CIR_BKT_SIZE_MASK 0xfffff
+
+#define CORE_RATE_METER6 0x281e0
+#define CIR_REF_CNT_MASK 0x7ffff
+
+#define CORE_STAT_GREEN_CNTR 0x28200
+#define CORE_STAT_YELLOW_CNTR 0x28210
+#define CORE_STAT_RED_CNTR 0x28220
+
+#define CORE_CFP_CTL_REG 0x28400
+#define CFP_EN_MAP_MASK 0x1ff
+
+/* IPv4 slices, 3 of them */
+#define CORE_UDF_0_A_0_8_PORT_0 0x28440
+#define CFG_UDF_OFFSET_MASK 0x1f
+#define CFG_UDF_OFFSET_BASE_SHIFT 5
+#define CFG_UDF_SOF (0 << CFG_UDF_OFFSET_BASE_SHIFT)
+#define CFG_UDF_EOL2 (2 << CFG_UDF_OFFSET_BASE_SHIFT)
+#define CFG_UDF_EOL3 (3 << CFG_UDF_OFFSET_BASE_SHIFT)
+
+/* IPv6 slices */
+#define CORE_UDF_0_B_0_8_PORT_0 0x28500
+
+/* IPv6 chained slices */
+#define CORE_UDF_0_D_0_11_PORT_0 0x28680
+
+/* Number of slices for IPv4, IPv6 and non-IP */
+#define UDF_NUM_SLICES 4
+#define UDFS_PER_SLICE 9
+
+/* Spacing between different slices */
+#define UDF_SLICE_OFFSET 0x40
+
+#define CFP_NUM_RULES 256
+
+/* Number of egress queues per port */
+#define SF2_NUM_EGRESS_QUEUES 8
+
+#endif /* __BCM_SF2_REGS_H */
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
new file mode 100644
index 000000000..5b139f220
--- /dev/null
+++ b/drivers/net/dsa/dsa_loop.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Distributed Switch Architecture loopback driver
+ *
+ * Copyright (C) 2016, Florian Fainelli <f.fainelli@gmail.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/export.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/module.h>
+#include <linux/if_bridge.h>
+#include <linux/dsa/loop.h>
+#include <net/dsa.h>
+
+#include "dsa_loop.h"
+
+static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
+ [DSA_LOOP_PHY_READ_OK] = { "phy_read_ok", },
+ [DSA_LOOP_PHY_READ_ERR] = { "phy_read_err", },
+ [DSA_LOOP_PHY_WRITE_OK] = { "phy_write_ok", },
+ [DSA_LOOP_PHY_WRITE_ERR] = { "phy_write_err", },
+};
+
+static struct phy_device *phydevs[PHY_MAX_ADDR];
+
+enum dsa_loop_devlink_resource_id {
+ DSA_LOOP_DEVLINK_PARAM_ID_VTU,
+};
+
+static u64 dsa_loop_devlink_vtu_get(void *priv)
+{
+ struct dsa_loop_priv *ps = priv;
+ unsigned int i, count = 0;
+ struct dsa_loop_vlan *vl;
+
+ for (i = 0; i < ARRAY_SIZE(ps->vlans); i++) {
+ vl = &ps->vlans[i];
+ if (vl->members)
+ count++;
+ }
+
+ return count;
+}
+
+static int dsa_loop_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct dsa_loop_priv *ps = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params, ARRAY_SIZE(ps->vlans),
+ ARRAY_SIZE(ps->vlans),
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "VTU", ARRAY_SIZE(ps->vlans),
+ DSA_LOOP_DEVLINK_PARAM_ID_VTU,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ DSA_LOOP_DEVLINK_PARAM_ID_VTU,
+ dsa_loop_devlink_vtu_get, ps);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+
+static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ dev_dbg(ds->dev, "%s: port: %d\n", __func__, port);
+
+ return DSA_TAG_PROTO_NONE;
+}
+
+static int dsa_loop_setup(struct dsa_switch *ds)
+{
+ struct dsa_loop_priv *ps = ds->priv;
+ unsigned int i;
+
+ for (i = 0; i < ds->num_ports; i++)
+ memcpy(ps->ports[i].mib, dsa_loop_mibs,
+ sizeof(dsa_loop_mibs));
+
+ dev_dbg(ds->dev, "%s\n", __func__);
+
+ return dsa_loop_setup_devlink_resources(ds);
+}
+
+static void dsa_loop_teardown(struct dsa_switch *ds)
+{
+ dsa_devlink_resources_unregister(ds);
+}
+
+static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS && sset != ETH_SS_PHY_STATS)
+ return 0;
+
+ return __DSA_LOOP_CNT_MAX;
+}
+
+static void dsa_loop_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data)
+{
+ struct dsa_loop_priv *ps = ds->priv;
+ unsigned int i;
+
+ if (stringset != ETH_SS_STATS && stringset != ETH_SS_PHY_STATS)
+ return;
+
+ for (i = 0; i < __DSA_LOOP_CNT_MAX; i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ ps->ports[port].mib[i].name, ETH_GSTRING_LEN);
+}
+
+static void dsa_loop_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct dsa_loop_priv *ps = ds->priv;
+ unsigned int i;
+
+ for (i = 0; i < __DSA_LOOP_CNT_MAX; i++)
+ data[i] = ps->ports[port].mib[i].val;
+}
+
+static int dsa_loop_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct dsa_loop_priv *ps = ds->priv;
+ struct mii_bus *bus = ps->bus;
+ int ret;
+
+ ret = mdiobus_read_nested(bus, ps->port_base + port, regnum);
+ if (ret < 0)
+ ps->ports[port].mib[DSA_LOOP_PHY_READ_ERR].val++;
+ else
+ ps->ports[port].mib[DSA_LOOP_PHY_READ_OK].val++;
+
+ return ret;
+}
+
+static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
+ int regnum, u16 value)
+{
+ struct dsa_loop_priv *ps = ds->priv;
+ struct mii_bus *bus = ps->bus;
+ int ret;
+
+ ret = mdiobus_write_nested(bus, ps->port_base + port, regnum, value);
+ if (ret < 0)
+ ps->ports[port].mib[DSA_LOOP_PHY_WRITE_ERR].val++;
+ else
+ ps->ports[port].mib[DSA_LOOP_PHY_WRITE_OK].val++;
+
+ return ret;
+}
+
+static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
+ __func__, port, bridge.dev->name);
+
+ return 0;
+}
+
+static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
+ __func__, port, bridge.dev->name);
+}
+
+static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ dev_dbg(ds->dev, "%s: port: %d, state: %d\n",
+ __func__, port, state);
+}
+
+static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ dev_dbg(ds->dev, "%s: port: %d, vlan_filtering: %d\n",
+ __func__, port, vlan_filtering);
+
+ return 0;
+}
+
+static int dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct dsa_loop_priv *ps = ds->priv;
+ struct mii_bus *bus = ps->bus;
+ struct dsa_loop_vlan *vl;
+
+ if (vlan->vid >= ARRAY_SIZE(ps->vlans))
+ return -ERANGE;
+
+ /* Just do a sleeping operation to make lockdep checks effective */
+ mdiobus_read(bus, ps->port_base + port, MII_BMSR);
+
+ vl = &ps->vlans[vlan->vid];
+
+ vl->members |= BIT(port);
+ if (untagged)
+ vl->untagged |= BIT(port);
+ else
+ vl->untagged &= ~BIT(port);
+
+ dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
+ __func__, port, vlan->vid, untagged ? "un" : "", pvid);
+
+ if (pvid)
+ ps->ports[port].pvid = vlan->vid;
+
+ return 0;
+}
+
+static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct dsa_loop_priv *ps = ds->priv;
+ u16 pvid = ps->ports[port].pvid;
+ struct mii_bus *bus = ps->bus;
+ struct dsa_loop_vlan *vl;
+
+ /* Just do a sleeping operation to make lockdep checks effective */
+ mdiobus_read(bus, ps->port_base + port, MII_BMSR);
+
+ vl = &ps->vlans[vlan->vid];
+
+ vl->members &= ~BIT(port);
+ if (untagged)
+ vl->untagged &= ~BIT(port);
+
+ if (pvid == vlan->vid)
+ pvid = 1;
+
+ dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n",
+ __func__, port, vlan->vid, untagged ? "un" : "", pvid);
+ ps->ports[port].pvid = pvid;
+
+ return 0;
+}
+
+static int dsa_loop_port_change_mtu(struct dsa_switch *ds, int port,
+ int new_mtu)
+{
+ struct dsa_loop_priv *priv = ds->priv;
+
+ priv->ports[port].mtu = new_mtu;
+
+ return 0;
+}
+
+static int dsa_loop_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return ETH_MAX_MTU;
+}
+
+static const struct dsa_switch_ops dsa_loop_driver = {
+ .get_tag_protocol = dsa_loop_get_protocol,
+ .setup = dsa_loop_setup,
+ .teardown = dsa_loop_teardown,
+ .get_strings = dsa_loop_get_strings,
+ .get_ethtool_stats = dsa_loop_get_ethtool_stats,
+ .get_sset_count = dsa_loop_get_sset_count,
+ .get_ethtool_phy_stats = dsa_loop_get_ethtool_stats,
+ .phy_read = dsa_loop_phy_read,
+ .phy_write = dsa_loop_phy_write,
+ .port_bridge_join = dsa_loop_port_bridge_join,
+ .port_bridge_leave = dsa_loop_port_bridge_leave,
+ .port_stp_state_set = dsa_loop_port_stp_state_set,
+ .port_vlan_filtering = dsa_loop_port_vlan_filtering,
+ .port_vlan_add = dsa_loop_port_vlan_add,
+ .port_vlan_del = dsa_loop_port_vlan_del,
+ .port_change_mtu = dsa_loop_port_change_mtu,
+ .port_max_mtu = dsa_loop_port_max_mtu,
+};
+
+static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
+{
+ struct dsa_loop_pdata *pdata = mdiodev->dev.platform_data;
+ struct dsa_loop_priv *ps;
+ struct dsa_switch *ds;
+ int ret;
+
+ if (!pdata)
+ return -ENODEV;
+
+ ds = devm_kzalloc(&mdiodev->dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ ds->dev = &mdiodev->dev;
+ ds->num_ports = DSA_LOOP_NUM_PORTS;
+
+ ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL);
+ if (!ps)
+ return -ENOMEM;
+
+ ps->netdev = dev_get_by_name(&init_net, pdata->netdev);
+ if (!ps->netdev)
+ return -EPROBE_DEFER;
+
+ pdata->cd.netdev[DSA_LOOP_CPU_PORT] = &ps->netdev->dev;
+
+ ds->dev = &mdiodev->dev;
+ ds->ops = &dsa_loop_driver;
+ ds->priv = ps;
+ ps->bus = mdiodev->bus;
+
+ dev_set_drvdata(&mdiodev->dev, ds);
+
+ ret = dsa_register_switch(ds);
+ if (!ret)
+ dev_info(&mdiodev->dev, "%s: 0x%0x\n",
+ pdata->name, pdata->enabled_ports);
+
+ return ret;
+}
+
+static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+ struct dsa_loop_priv *ps;
+
+ if (!ds)
+ return;
+
+ ps = ds->priv;
+
+ dsa_unregister_switch(ds);
+ dev_put(ps->netdev);
+}
+
+static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static struct mdio_driver dsa_loop_drv = {
+ .mdiodrv.driver = {
+ .name = "dsa-loop",
+ },
+ .probe = dsa_loop_drv_probe,
+ .remove = dsa_loop_drv_remove,
+ .shutdown = dsa_loop_drv_shutdown,
+};
+
+#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+
+static void dsa_loop_phydevs_unregister(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FIXED_PHYS; i++)
+ if (!IS_ERR(phydevs[i])) {
+ fixed_phy_unregister(phydevs[i]);
+ phy_device_free(phydevs[i]);
+ }
+}
+
+static int __init dsa_loop_init(void)
+{
+ struct fixed_phy_status status = {
+ .link = 1,
+ .speed = SPEED_100,
+ .duplex = DUPLEX_FULL,
+ };
+ unsigned int i, ret;
+
+ for (i = 0; i < NUM_FIXED_PHYS; i++)
+ phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
+
+ ret = mdio_driver_register(&dsa_loop_drv);
+ if (ret)
+ dsa_loop_phydevs_unregister();
+
+ return ret;
+}
+module_init(dsa_loop_init);
+
+static void __exit dsa_loop_exit(void)
+{
+ mdio_driver_unregister(&dsa_loop_drv);
+ dsa_loop_phydevs_unregister();
+}
+module_exit(dsa_loop_exit);
+
+MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Fainelli");
+MODULE_DESCRIPTION("DSA loopback driver");
diff --git a/drivers/net/dsa/dsa_loop.h b/drivers/net/dsa/dsa_loop.h
new file mode 100644
index 000000000..93e5c15d0
--- /dev/null
+++ b/drivers/net/dsa/dsa_loop.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DSA_LOOP_H
+#define __DSA_LOOP_H
+
+struct dsa_chip_data;
+
+struct dsa_loop_pdata {
+ /* Must be first, such that dsa_register_switch() can access this
+ * without gory pointer manipulations
+ */
+ struct dsa_chip_data cd;
+ const char *name;
+ unsigned int enabled_ports;
+ const char *netdev;
+};
+
+#define DSA_LOOP_NUM_PORTS 6
+#define DSA_LOOP_CPU_PORT (DSA_LOOP_NUM_PORTS - 1)
+
+#endif /* __DSA_LOOP_H */
diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c
new file mode 100644
index 000000000..237066d30
--- /dev/null
+++ b/drivers/net/dsa/dsa_loop_bdinfo.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+
+#include "dsa_loop.h"
+
+static struct dsa_loop_pdata dsa_loop_pdata = {
+ .cd = {
+ .port_names[0] = "lan1",
+ .port_names[1] = "lan2",
+ .port_names[2] = "lan3",
+ .port_names[3] = "lan4",
+ .port_names[DSA_LOOP_CPU_PORT] = "cpu",
+ },
+ .name = "DSA mockup driver",
+ .enabled_ports = 0x1f,
+ .netdev = "eth0",
+};
+
+static const struct mdio_board_info bdinfo = {
+ .bus_id = "fixed-0",
+ .modalias = "dsa-loop",
+ .mdio_addr = 31,
+ .platform_data = &dsa_loop_pdata,
+};
+
+static int __init dsa_loop_bdinfo_init(void)
+{
+ return mdiobus_register_board_info(&bdinfo, 1);
+}
+arch_initcall(dsa_loop_bdinfo_init)
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/hirschmann/Kconfig b/drivers/net/dsa/hirschmann/Kconfig
new file mode 100644
index 000000000..9ea2c643f
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+config NET_DSA_HIRSCHMANN_HELLCREEK
+ tristate "Hirschmann Hellcreek TSN Switch support"
+ depends on HAS_IOMEM
+ depends on NET_DSA
+ depends on PTP_1588_CLOCK
+ depends on LEDS_CLASS
+ depends on NET_SCH_TAPRIO
+ select NET_DSA_TAG_HELLCREEK
+ help
+ This driver adds support for Hirschmann Hellcreek TSN switches.
diff --git a/drivers/net/dsa/hirschmann/Makefile b/drivers/net/dsa/hirschmann/Makefile
new file mode 100644
index 000000000..f4075c299
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK) += hellcreek_sw.o
+hellcreek_sw-objs := hellcreek.o
+hellcreek_sw-objs += hellcreek_ptp.o
+hellcreek_sw-objs += hellcreek_hwtstamp.o
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
new file mode 100644
index 000000000..951f7935c
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -0,0 +1,2123 @@
+// SPDX-License-Identifier: (GPL-2.0 or MIT)
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019-2021 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <net/dsa.h>
+
+#include "hellcreek.h"
+#include "hellcreek_ptp.h"
+#include "hellcreek_hwtstamp.h"
+
+static const struct hellcreek_counter hellcreek_counter[] = {
+ { 0x00, "RxFiltered", },
+ { 0x01, "RxOctets1k", },
+ { 0x02, "RxVTAG", },
+ { 0x03, "RxL2BAD", },
+ { 0x04, "RxOverloadDrop", },
+ { 0x05, "RxUC", },
+ { 0x06, "RxMC", },
+ { 0x07, "RxBC", },
+ { 0x08, "RxRS<64", },
+ { 0x09, "RxRS64", },
+ { 0x0a, "RxRS65_127", },
+ { 0x0b, "RxRS128_255", },
+ { 0x0c, "RxRS256_511", },
+ { 0x0d, "RxRS512_1023", },
+ { 0x0e, "RxRS1024_1518", },
+ { 0x0f, "RxRS>1518", },
+ { 0x10, "TxTailDropQueue0", },
+ { 0x11, "TxTailDropQueue1", },
+ { 0x12, "TxTailDropQueue2", },
+ { 0x13, "TxTailDropQueue3", },
+ { 0x14, "TxTailDropQueue4", },
+ { 0x15, "TxTailDropQueue5", },
+ { 0x16, "TxTailDropQueue6", },
+ { 0x17, "TxTailDropQueue7", },
+ { 0x18, "RxTrafficClass0", },
+ { 0x19, "RxTrafficClass1", },
+ { 0x1a, "RxTrafficClass2", },
+ { 0x1b, "RxTrafficClass3", },
+ { 0x1c, "RxTrafficClass4", },
+ { 0x1d, "RxTrafficClass5", },
+ { 0x1e, "RxTrafficClass6", },
+ { 0x1f, "RxTrafficClass7", },
+ { 0x21, "TxOctets1k", },
+ { 0x22, "TxVTAG", },
+ { 0x23, "TxL2BAD", },
+ { 0x25, "TxUC", },
+ { 0x26, "TxMC", },
+ { 0x27, "TxBC", },
+ { 0x28, "TxTS<64", },
+ { 0x29, "TxTS64", },
+ { 0x2a, "TxTS65_127", },
+ { 0x2b, "TxTS128_255", },
+ { 0x2c, "TxTS256_511", },
+ { 0x2d, "TxTS512_1023", },
+ { 0x2e, "TxTS1024_1518", },
+ { 0x2f, "TxTS>1518", },
+ { 0x30, "TxTrafficClassOverrun0", },
+ { 0x31, "TxTrafficClassOverrun1", },
+ { 0x32, "TxTrafficClassOverrun2", },
+ { 0x33, "TxTrafficClassOverrun3", },
+ { 0x34, "TxTrafficClassOverrun4", },
+ { 0x35, "TxTrafficClassOverrun5", },
+ { 0x36, "TxTrafficClassOverrun6", },
+ { 0x37, "TxTrafficClassOverrun7", },
+ { 0x38, "TxTrafficClass0", },
+ { 0x39, "TxTrafficClass1", },
+ { 0x3a, "TxTrafficClass2", },
+ { 0x3b, "TxTrafficClass3", },
+ { 0x3c, "TxTrafficClass4", },
+ { 0x3d, "TxTrafficClass5", },
+ { 0x3e, "TxTrafficClass6", },
+ { 0x3f, "TxTrafficClass7", },
+};
+
+static u16 hellcreek_read(struct hellcreek *hellcreek, unsigned int offset)
+{
+ return readw(hellcreek->base + offset);
+}
+
+static u16 hellcreek_read_ctrl(struct hellcreek *hellcreek)
+{
+ return readw(hellcreek->base + HR_CTRL_C);
+}
+
+static u16 hellcreek_read_stat(struct hellcreek *hellcreek)
+{
+ return readw(hellcreek->base + HR_SWSTAT);
+}
+
+static void hellcreek_write(struct hellcreek *hellcreek, u16 data,
+ unsigned int offset)
+{
+ writew(data, hellcreek->base + offset);
+}
+
+static void hellcreek_select_port(struct hellcreek *hellcreek, int port)
+{
+ u16 val = port << HR_PSEL_PTWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
+static void hellcreek_select_prio(struct hellcreek *hellcreek, int prio)
+{
+ u16 val = prio << HR_PSEL_PRTCWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
+static void hellcreek_select_port_prio(struct hellcreek *hellcreek, int port,
+ int prio)
+{
+ u16 val = port << HR_PSEL_PTWSEL_SHIFT;
+
+ val |= prio << HR_PSEL_PRTCWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
+static void hellcreek_select_counter(struct hellcreek *hellcreek, int counter)
+{
+ u16 val = counter << HR_CSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_CSEL);
+
+ /* Data sheet states to wait at least 20 internal clock cycles */
+ ndelay(200);
+}
+
+static void hellcreek_select_vlan(struct hellcreek *hellcreek, int vid,
+ bool pvid)
+{
+ u16 val = 0;
+
+ /* Set pvid bit first */
+ if (pvid)
+ val |= HR_VIDCFG_PVID;
+ hellcreek_write(hellcreek, val, HR_VIDCFG);
+
+ /* Set vlan */
+ val |= vid << HR_VIDCFG_VID_SHIFT;
+ hellcreek_write(hellcreek, val, HR_VIDCFG);
+}
+
+static void hellcreek_select_tgd(struct hellcreek *hellcreek, int port)
+{
+ u16 val = port << TR_TGDSEL_TDGSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, TR_TGDSEL);
+}
+
+static int hellcreek_wait_until_ready(struct hellcreek *hellcreek)
+{
+ u16 val;
+
+ /* Wait up to 1ms, although 3 us should be enough */
+ return readx_poll_timeout(hellcreek_read_ctrl, hellcreek,
+ val, val & HR_CTRL_C_READY,
+ 3, 1000);
+}
+
+static int hellcreek_wait_until_transitioned(struct hellcreek *hellcreek)
+{
+ u16 val;
+
+ return readx_poll_timeout_atomic(hellcreek_read_ctrl, hellcreek,
+ val, !(val & HR_CTRL_C_TRANSITION),
+ 1, 1000);
+}
+
+static int hellcreek_wait_fdb_ready(struct hellcreek *hellcreek)
+{
+ u16 val;
+
+ return readx_poll_timeout_atomic(hellcreek_read_stat, hellcreek,
+ val, !(val & HR_SWSTAT_BUSY),
+ 1, 1000);
+}
+
+static int hellcreek_detect(struct hellcreek *hellcreek)
+{
+ u16 id, rel_low, rel_high, date_low, date_high, tgd_ver;
+ u8 tgd_maj, tgd_min;
+ u32 rel, date;
+
+ id = hellcreek_read(hellcreek, HR_MODID_C);
+ rel_low = hellcreek_read(hellcreek, HR_REL_L_C);
+ rel_high = hellcreek_read(hellcreek, HR_REL_H_C);
+ date_low = hellcreek_read(hellcreek, HR_BLD_L_C);
+ date_high = hellcreek_read(hellcreek, HR_BLD_H_C);
+ tgd_ver = hellcreek_read(hellcreek, TR_TGDVER);
+
+ if (id != hellcreek->pdata->module_id)
+ return -ENODEV;
+
+ rel = rel_low | (rel_high << 16);
+ date = date_low | (date_high << 16);
+ tgd_maj = (tgd_ver & TR_TGDVER_REV_MAJ_MASK) >> TR_TGDVER_REV_MAJ_SHIFT;
+ tgd_min = (tgd_ver & TR_TGDVER_REV_MIN_MASK) >> TR_TGDVER_REV_MIN_SHIFT;
+
+ dev_info(hellcreek->dev, "Module ID=%02x Release=%04x Date=%04x TGD Version=%02x.%02x\n",
+ id, rel, date, tgd_maj, tgd_min);
+
+ return 0;
+}
+
+static void hellcreek_feature_detect(struct hellcreek *hellcreek)
+{
+ u16 features;
+
+ features = hellcreek_read(hellcreek, HR_FEABITS0);
+
+ /* Only detect the size of the FDB table. The size and current
+ * utilization can be queried via devlink.
+ */
+ hellcreek->fdb_entries = ((features & HR_FEABITS0_FDBBINS_MASK) >>
+ HR_FEABITS0_FDBBINS_SHIFT) * 32;
+}
+
+static enum dsa_tag_protocol hellcreek_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_HELLCREEK;
+}
+
+static int hellcreek_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ u16 val;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Enable port %d\n", port);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ val = hellcreek_port->ptcfg;
+ val |= HR_PTCFG_ADMIN_EN;
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return 0;
+}
+
+static void hellcreek_port_disable(struct dsa_switch *ds, int port)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ u16 val;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Disable port %d\n", port);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ val = hellcreek_port->ptcfg;
+ val &= ~HR_PTCFG_ADMIN_EN;
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
+ const struct hellcreek_counter *counter = &hellcreek_counter[i];
+
+ strscpy(data + i * ETH_GSTRING_LEN,
+ counter->name, ETH_GSTRING_LEN);
+ }
+}
+
+static int hellcreek_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(hellcreek_counter);
+}
+
+static void hellcreek_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ int i;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
+ const struct hellcreek_counter *counter = &hellcreek_counter[i];
+ u8 offset = counter->offset + port * 64;
+ u16 high, low;
+ u64 value;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_counter(hellcreek, offset);
+
+ /* The registers are locked internally by selecting the
+ * counter. So low and high can be read without reading high
+ * again.
+ */
+ high = hellcreek_read(hellcreek, HR_CRDH);
+ low = hellcreek_read(hellcreek, HR_CRDL);
+ value = ((u64)high << 16) | low;
+
+ hellcreek_port->counter_values[i] += value;
+ data[i] = hellcreek_port->counter_values[i];
+
+ mutex_unlock(&hellcreek->reg_lock);
+ }
+}
+
+static u16 hellcreek_private_vid(int port)
+{
+ return VLAN_N_VID - port + 1;
+}
+
+static int hellcreek_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ int i;
+
+ dev_dbg(hellcreek->dev, "VLAN prepare for port %d\n", port);
+
+ /* Restriction: Make sure that nobody uses the "private" VLANs. These
+ * VLANs are internally used by the driver to ensure port
+ * separation. Thus, they cannot be used by someone else.
+ */
+ for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
+ const u16 restricted_vid = hellcreek_private_vid(i);
+
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ if (vlan->vid == restricted_vid) {
+ NL_SET_ERR_MSG_MOD(extack, "VID restricted by driver");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static void hellcreek_select_vlan_params(struct hellcreek *hellcreek, int port,
+ int *shift, int *mask)
+{
+ switch (port) {
+ case 0:
+ *shift = HR_VIDMBRCFG_P0MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P0MBR_MASK;
+ break;
+ case 1:
+ *shift = HR_VIDMBRCFG_P1MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P1MBR_MASK;
+ break;
+ case 2:
+ *shift = HR_VIDMBRCFG_P2MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P2MBR_MASK;
+ break;
+ case 3:
+ *shift = HR_VIDMBRCFG_P3MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P3MBR_MASK;
+ break;
+ default:
+ *shift = *mask = 0;
+ dev_err(hellcreek->dev, "Unknown port %d selected!\n", port);
+ }
+}
+
+static void hellcreek_apply_vlan(struct hellcreek *hellcreek, int port, u16 vid,
+ bool pvid, bool untagged)
+{
+ int shift, mask;
+ u16 val;
+
+ dev_dbg(hellcreek->dev, "Apply VLAN: port=%d vid=%u pvid=%d untagged=%d",
+ port, vid, pvid, untagged);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ hellcreek_select_vlan(hellcreek, vid, pvid);
+
+ /* Setup port vlan membership */
+ hellcreek_select_vlan_params(hellcreek, port, &shift, &mask);
+ val = hellcreek->vidmbrcfg[vid];
+ val &= ~mask;
+ if (untagged)
+ val |= HELLCREEK_VLAN_UNTAGGED_MEMBER << shift;
+ else
+ val |= HELLCREEK_VLAN_TAGGED_MEMBER << shift;
+
+ hellcreek_write(hellcreek, val, HR_VIDMBRCFG);
+ hellcreek->vidmbrcfg[vid] = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_unapply_vlan(struct hellcreek *hellcreek, int port,
+ u16 vid)
+{
+ int shift, mask;
+ u16 val;
+
+ dev_dbg(hellcreek->dev, "Unapply VLAN: port=%d vid=%u\n", port, vid);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_vlan(hellcreek, vid, false);
+
+ /* Setup port vlan membership */
+ hellcreek_select_vlan_params(hellcreek, port, &shift, &mask);
+ val = hellcreek->vidmbrcfg[vid];
+ val &= ~mask;
+ val |= HELLCREEK_VLAN_NO_MEMBER << shift;
+
+ hellcreek_write(hellcreek, val, HR_VIDMBRCFG);
+ hellcreek->vidmbrcfg[vid] = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static int hellcreek_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct hellcreek *hellcreek = ds->priv;
+ int err;
+
+ err = hellcreek_vlan_prepare(ds, port, vlan, extack);
+ if (err)
+ return err;
+
+ dev_dbg(hellcreek->dev, "Add VLAN %d on port %d, %s, %s\n",
+ vlan->vid, port, untagged ? "untagged" : "tagged",
+ pvid ? "PVID" : "no PVID");
+
+ hellcreek_apply_vlan(hellcreek, port, vlan->vid, pvid, untagged);
+
+ return 0;
+}
+
+static int hellcreek_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ dev_dbg(hellcreek->dev, "Remove VLAN %d on port %d\n", vlan->vid, port);
+
+ hellcreek_unapply_vlan(hellcreek, port, vlan->vid);
+
+ return 0;
+}
+
+static void hellcreek_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ const char *new_state;
+ u16 val;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_port = &hellcreek->ports[port];
+ val = hellcreek_port->ptcfg;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ new_state = "DISABLED";
+ val |= HR_PTCFG_BLOCKED;
+ val &= ~HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_BLOCKING:
+ new_state = "BLOCKING";
+ val |= HR_PTCFG_BLOCKED;
+ val &= ~HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_LISTENING:
+ new_state = "LISTENING";
+ val |= HR_PTCFG_BLOCKED;
+ val &= ~HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_LEARNING:
+ new_state = "LEARNING";
+ val |= HR_PTCFG_BLOCKED;
+ val |= HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_FORWARDING:
+ new_state = "FORWARDING";
+ val &= ~HR_PTCFG_BLOCKED;
+ val |= HR_PTCFG_LEARNING_EN;
+ break;
+ default:
+ new_state = "UNKNOWN";
+ }
+
+ hellcreek_select_port(hellcreek, port);
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ dev_dbg(hellcreek->dev, "Configured STP state for port %d: %s\n",
+ port, new_state);
+}
+
+static void hellcreek_setup_ingressflt(struct hellcreek *hellcreek, int port,
+ bool enable)
+{
+ struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
+ u16 ptcfg;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ ptcfg = hellcreek_port->ptcfg;
+
+ if (enable)
+ ptcfg |= HR_PTCFG_INGRESSFLT;
+ else
+ ptcfg &= ~HR_PTCFG_INGRESSFLT;
+
+ hellcreek_select_port(hellcreek, port);
+ hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
+ hellcreek_port->ptcfg = ptcfg;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_setup_vlan_awareness(struct hellcreek *hellcreek,
+ bool enable)
+{
+ u16 swcfg;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ swcfg = hellcreek->swcfg;
+
+ if (enable)
+ swcfg |= HR_SWCFG_VLAN_UNAWARE;
+ else
+ swcfg &= ~HR_SWCFG_VLAN_UNAWARE;
+
+ hellcreek_write(hellcreek, swcfg, HR_SWCFG);
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+/* Default setup for DSA: VLAN <X>: CPU and Port <X> egress untagged. */
+static void hellcreek_setup_vlan_membership(struct dsa_switch *ds, int port,
+ bool enabled)
+{
+ const u16 vid = hellcreek_private_vid(port);
+ int upstream = dsa_upstream_port(ds, port);
+ struct hellcreek *hellcreek = ds->priv;
+
+ /* Apply vid to port as egress untagged and port vlan id */
+ if (enabled)
+ hellcreek_apply_vlan(hellcreek, port, vid, true, true);
+ else
+ hellcreek_unapply_vlan(hellcreek, port, vid);
+
+ /* Apply vid to cpu port as well */
+ if (enabled)
+ hellcreek_apply_vlan(hellcreek, upstream, vid, false, true);
+ else
+ hellcreek_unapply_vlan(hellcreek, upstream, vid);
+}
+
+static void hellcreek_port_set_ucast_flood(struct hellcreek *hellcreek,
+ int port, bool enable)
+{
+ struct hellcreek_port *hellcreek_port;
+ u16 val;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "%s unicast flooding on port %d\n",
+ enable ? "Enable" : "Disable", port);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ val = hellcreek_port->ptcfg;
+ if (enable)
+ val &= ~HR_PTCFG_UUC_FLT;
+ else
+ val |= HR_PTCFG_UUC_FLT;
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_port_set_mcast_flood(struct hellcreek *hellcreek,
+ int port, bool enable)
+{
+ struct hellcreek_port *hellcreek_port;
+ u16 val;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "%s multicast flooding on port %d\n",
+ enable ? "Enable" : "Disable", port);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ val = hellcreek_port->ptcfg;
+ if (enable)
+ val &= ~HR_PTCFG_UMC_FLT;
+ else
+ val |= HR_PTCFG_UMC_FLT;
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static int hellcreek_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hellcreek_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ if (flags.mask & BR_FLOOD)
+ hellcreek_port_set_ucast_flood(hellcreek, port,
+ !!(flags.val & BR_FLOOD));
+
+ if (flags.mask & BR_MCAST_FLOOD)
+ hellcreek_port_set_mcast_flood(hellcreek, port,
+ !!(flags.val & BR_MCAST_FLOOD));
+
+ return 0;
+}
+
+static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ dev_dbg(hellcreek->dev, "Port %d joins a bridge\n", port);
+
+ /* When joining a vlan_filtering bridge, keep the switch VLAN aware */
+ if (!ds->vlan_filtering)
+ hellcreek_setup_vlan_awareness(hellcreek, false);
+
+ /* Drop private vlans */
+ hellcreek_setup_vlan_membership(ds, port, false);
+
+ return 0;
+}
+
+static void hellcreek_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ dev_dbg(hellcreek->dev, "Port %d leaves a bridge\n", port);
+
+ /* Enable VLAN awareness */
+ hellcreek_setup_vlan_awareness(hellcreek, true);
+
+ /* Enable private vlans */
+ hellcreek_setup_vlan_membership(ds, port, true);
+}
+
+static int __hellcreek_fdb_add(struct hellcreek *hellcreek,
+ const struct hellcreek_fdb_entry *entry)
+{
+ u16 meta = 0;
+
+ dev_dbg(hellcreek->dev, "Add static FDB entry: MAC=%pM, MASK=0x%02x, "
+ "OBT=%d, PASS_BLOCKED=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac,
+ entry->portmask, entry->is_obt, entry->pass_blocked,
+ entry->reprio_en, entry->reprio_tc);
+
+ /* Add mac address */
+ hellcreek_write(hellcreek, entry->mac[1] | (entry->mac[0] << 8), HR_FDBWDH);
+ hellcreek_write(hellcreek, entry->mac[3] | (entry->mac[2] << 8), HR_FDBWDM);
+ hellcreek_write(hellcreek, entry->mac[5] | (entry->mac[4] << 8), HR_FDBWDL);
+
+ /* Meta data */
+ meta |= entry->portmask << HR_FDBWRM0_PORTMASK_SHIFT;
+ if (entry->is_obt)
+ meta |= HR_FDBWRM0_OBT;
+ if (entry->pass_blocked)
+ meta |= HR_FDBWRM0_PASS_BLOCKED;
+ if (entry->reprio_en) {
+ meta |= HR_FDBWRM0_REPRIO_EN;
+ meta |= entry->reprio_tc << HR_FDBWRM0_REPRIO_TC_SHIFT;
+ }
+ hellcreek_write(hellcreek, meta, HR_FDBWRM0);
+
+ /* Commit */
+ hellcreek_write(hellcreek, 0x00, HR_FDBWRCMD);
+
+ /* Wait until done */
+ return hellcreek_wait_fdb_ready(hellcreek);
+}
+
+static int __hellcreek_fdb_del(struct hellcreek *hellcreek,
+ const struct hellcreek_fdb_entry *entry)
+{
+ dev_dbg(hellcreek->dev, "Delete FDB entry: MAC=%pM!\n", entry->mac);
+
+ /* Delete by matching idx */
+ hellcreek_write(hellcreek, entry->idx | HR_FDBWRCMD_FDBDEL, HR_FDBWRCMD);
+
+ /* Wait until done */
+ return hellcreek_wait_fdb_ready(hellcreek);
+}
+
+static void hellcreek_populate_fdb_entry(struct hellcreek *hellcreek,
+ struct hellcreek_fdb_entry *entry,
+ size_t idx)
+{
+ unsigned char addr[ETH_ALEN];
+ u16 meta, mac;
+
+ /* Read values */
+ meta = hellcreek_read(hellcreek, HR_FDBMDRD);
+ mac = hellcreek_read(hellcreek, HR_FDBRDL);
+ addr[5] = mac & 0xff;
+ addr[4] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDM);
+ addr[3] = mac & 0xff;
+ addr[2] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDH);
+ addr[1] = mac & 0xff;
+ addr[0] = (mac & 0xff00) >> 8;
+
+ /* Populate @entry */
+ memcpy(entry->mac, addr, sizeof(addr));
+ entry->idx = idx;
+ entry->portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >>
+ HR_FDBMDRD_PORTMASK_SHIFT;
+ entry->age = (meta & HR_FDBMDRD_AGE_MASK) >>
+ HR_FDBMDRD_AGE_SHIFT;
+ entry->is_obt = !!(meta & HR_FDBMDRD_OBT);
+ entry->pass_blocked = !!(meta & HR_FDBMDRD_PASS_BLOCKED);
+ entry->is_static = !!(meta & HR_FDBMDRD_STATIC);
+ entry->reprio_tc = (meta & HR_FDBMDRD_REPRIO_TC_MASK) >>
+ HR_FDBMDRD_REPRIO_TC_SHIFT;
+ entry->reprio_en = !!(meta & HR_FDBMDRD_REPRIO_EN);
+}
+
+/* Retrieve the index of a FDB entry by mac address. Currently we search through
+ * the complete table in hardware. If that's too slow, we might have to cache
+ * the complete FDB table in software.
+ */
+static int hellcreek_fdb_get(struct hellcreek *hellcreek,
+ const unsigned char *dest,
+ struct hellcreek_fdb_entry *entry)
+{
+ size_t i;
+
+ /* Set read pointer to zero: The read of HR_FDBMAX (read-only register)
+ * should reset the internal pointer. But, that doesn't work. The vendor
+ * suggested a subsequent write as workaround. Same for HR_FDBRDH below.
+ */
+ hellcreek_read(hellcreek, HR_FDBMAX);
+ hellcreek_write(hellcreek, 0x00, HR_FDBMAX);
+
+ /* We have to read the complete table, because the switch/driver might
+ * enter new entries anywhere.
+ */
+ for (i = 0; i < hellcreek->fdb_entries; ++i) {
+ struct hellcreek_fdb_entry tmp = { 0 };
+
+ /* Read entry */
+ hellcreek_populate_fdb_entry(hellcreek, &tmp, i);
+
+ /* Force next entry */
+ hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
+
+ if (memcmp(tmp.mac, dest, ETH_ALEN))
+ continue;
+
+ /* Match found */
+ memcpy(entry, &tmp, sizeof(*entry));
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int hellcreek_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct hellcreek_fdb_entry entry = { 0 };
+ struct hellcreek *hellcreek = ds->priv;
+ int ret;
+
+ dev_dbg(hellcreek->dev, "Add FDB entry for MAC=%pM\n", addr);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ ret = hellcreek_fdb_get(hellcreek, addr, &entry);
+ if (ret) {
+ /* Not found */
+ memcpy(entry.mac, addr, sizeof(entry.mac));
+ entry.portmask = BIT(port);
+
+ ret = __hellcreek_fdb_add(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
+ goto out;
+ }
+ } else {
+ /* Found */
+ ret = __hellcreek_fdb_del(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to delete FDB entry!\n");
+ goto out;
+ }
+
+ entry.portmask |= BIT(port);
+
+ ret = __hellcreek_fdb_add(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static int hellcreek_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct hellcreek_fdb_entry entry = { 0 };
+ struct hellcreek *hellcreek = ds->priv;
+ int ret;
+
+ dev_dbg(hellcreek->dev, "Delete FDB entry for MAC=%pM\n", addr);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ ret = hellcreek_fdb_get(hellcreek, addr, &entry);
+ if (ret) {
+ /* Not found */
+ dev_err(hellcreek->dev, "FDB entry for deletion not found!\n");
+ } else {
+ /* Found */
+ ret = __hellcreek_fdb_del(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to delete FDB entry!\n");
+ goto out;
+ }
+
+ entry.portmask &= ~BIT(port);
+
+ if (entry.portmask != 0x00) {
+ ret = __hellcreek_fdb_add(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
+ goto out;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ u16 entries;
+ int ret = 0;
+ size_t i;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ /* Set read pointer to zero: The read of HR_FDBMAX (read-only register)
+ * should reset the internal pointer. But, that doesn't work. The vendor
+ * suggested a subsequent write as workaround. Same for HR_FDBRDH below.
+ */
+ entries = hellcreek_read(hellcreek, HR_FDBMAX);
+ hellcreek_write(hellcreek, 0x00, HR_FDBMAX);
+
+ dev_dbg(hellcreek->dev, "FDB dump for port %d, entries=%d!\n", port, entries);
+
+ /* Read table */
+ for (i = 0; i < hellcreek->fdb_entries; ++i) {
+ struct hellcreek_fdb_entry entry = { 0 };
+
+ /* Read entry */
+ hellcreek_populate_fdb_entry(hellcreek, &entry, i);
+
+ /* Force next entry */
+ hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
+
+ /* Check valid */
+ if (is_zero_ether_addr(entry.mac))
+ continue;
+
+ /* Check port mask */
+ if (!(entry.portmask & BIT(port)))
+ continue;
+
+ ret = cb(entry.mac, 0, entry.is_static, data);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ dev_dbg(hellcreek->dev, "%s VLAN filtering on port %d\n",
+ vlan_filtering ? "Enable" : "Disable", port);
+
+ /* Configure port to drop packages with not known vids */
+ hellcreek_setup_ingressflt(hellcreek, port, vlan_filtering);
+
+ /* Enable VLAN awareness on the switch. This save due to
+ * ds->vlan_filtering_is_global.
+ */
+ hellcreek_setup_vlan_awareness(hellcreek, vlan_filtering);
+
+ return 0;
+}
+
+static int hellcreek_enable_ip_core(struct hellcreek *hellcreek)
+{
+ int ret;
+ u16 val;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ val = hellcreek_read(hellcreek, HR_CTRL_C);
+ val |= HR_CTRL_C_ENABLE;
+ hellcreek_write(hellcreek, val, HR_CTRL_C);
+ ret = hellcreek_wait_until_transitioned(hellcreek);
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static void hellcreek_setup_cpu_and_tunnel_port(struct hellcreek *hellcreek)
+{
+ struct hellcreek_port *tunnel_port = &hellcreek->ports[TUNNEL_PORT];
+ struct hellcreek_port *cpu_port = &hellcreek->ports[CPU_PORT];
+ u16 ptcfg = 0;
+
+ ptcfg |= HR_PTCFG_LEARNING_EN | HR_PTCFG_ADMIN_EN;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, CPU_PORT);
+ hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
+
+ hellcreek_select_port(hellcreek, TUNNEL_PORT);
+ hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
+
+ cpu_port->ptcfg = ptcfg;
+ tunnel_port->ptcfg = ptcfg;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek)
+{
+ int i;
+
+ /* The switch has multiple egress queues per port. The queue is selected
+ * via the PCP field in the VLAN header. The switch internally deals
+ * with traffic classes instead of PCP values and this mapping is
+ * configurable.
+ *
+ * The default mapping is (PCP - TC):
+ * 7 - 7
+ * 6 - 6
+ * 5 - 5
+ * 4 - 4
+ * 3 - 3
+ * 2 - 1
+ * 1 - 0
+ * 0 - 2
+ *
+ * The default should be an identity mapping.
+ */
+
+ for (i = 0; i < 8; ++i) {
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_prio(hellcreek, i);
+ hellcreek_write(hellcreek,
+ i << HR_PRTCCFG_PCP_TC_MAP_SHIFT,
+ HR_PRTCCFG);
+
+ mutex_unlock(&hellcreek->reg_lock);
+ }
+}
+
+static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
+{
+ static struct hellcreek_fdb_entry l2_ptp = {
+ /* MAC: 01-1B-19-00-00-00 */
+ .mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry udp4_ptp = {
+ /* MAC: 01-00-5E-00-01-81 */
+ .mac = { 0x01, 0x00, 0x5e, 0x00, 0x01, 0x81 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry udp6_ptp = {
+ /* MAC: 33-33-00-00-01-81 */
+ .mac = { 0x33, 0x33, 0x00, 0x00, 0x01, 0x81 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry l2_p2p = {
+ /* MAC: 01-80-C2-00-00-0E */
+ .mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 1,
+ .is_static = 1,
+ .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry udp4_p2p = {
+ /* MAC: 01-00-5E-00-00-6B */
+ .mac = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x6b },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 1,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry udp6_p2p = {
+ /* MAC: 33-33-00-00-00-6B */
+ .mac = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x6b },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 1,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry stp = {
+ /* MAC: 01-80-C2-00-00-00 */
+ .mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 1,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ int ret;
+
+ mutex_lock(&hellcreek->reg_lock);
+ ret = __hellcreek_fdb_add(hellcreek, &l2_ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp4_ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp6_ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &l2_p2p);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp4_p2p);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp6_p2p);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &stp);
+out:
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static int hellcreek_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ int ret;
+
+ ret = devlink_info_driver_name_put(req, "hellcreek");
+ if (ret)
+ return ret;
+
+ return devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ hellcreek->pdata->name);
+}
+
+static u64 hellcreek_devlink_vlan_table_get(void *priv)
+{
+ struct hellcreek *hellcreek = priv;
+ u64 count = 0;
+ int i;
+
+ mutex_lock(&hellcreek->reg_lock);
+ for (i = 0; i < VLAN_N_VID; ++i)
+ if (hellcreek->vidmbrcfg[i])
+ count++;
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return count;
+}
+
+static u64 hellcreek_devlink_fdb_table_get(void *priv)
+{
+ struct hellcreek *hellcreek = priv;
+ u64 count = 0;
+
+ /* Reading this register has side effects. Synchronize against the other
+ * FDB operations.
+ */
+ mutex_lock(&hellcreek->reg_lock);
+ count = hellcreek_read(hellcreek, HR_FDBMAX);
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return count;
+}
+
+static int hellcreek_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_vlan_params;
+ struct devlink_resource_size_params size_fdb_params;
+ struct hellcreek *hellcreek = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_vlan_params, VLAN_N_VID,
+ VLAN_N_VID,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ devlink_resource_size_params_init(&size_fdb_params,
+ hellcreek->fdb_entries,
+ hellcreek->fdb_entries,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "VLAN", VLAN_N_VID,
+ HELLCREEK_DEVLINK_PARAM_ID_VLAN_TABLE,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_vlan_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "FDB", hellcreek->fdb_entries,
+ HELLCREEK_DEVLINK_PARAM_ID_FDB_TABLE,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_fdb_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ HELLCREEK_DEVLINK_PARAM_ID_VLAN_TABLE,
+ hellcreek_devlink_vlan_table_get,
+ hellcreek);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ HELLCREEK_DEVLINK_PARAM_ID_FDB_TABLE,
+ hellcreek_devlink_fdb_table_get,
+ hellcreek);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+
+ return err;
+}
+
+static int hellcreek_devlink_region_vlan_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct hellcreek_devlink_vlan_entry *table, *entry;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct hellcreek *hellcreek = ds->priv;
+ int i;
+
+ table = kcalloc(VLAN_N_VID, sizeof(*entry), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ entry = table;
+
+ mutex_lock(&hellcreek->reg_lock);
+ for (i = 0; i < VLAN_N_VID; ++i, ++entry) {
+ entry->member = hellcreek->vidmbrcfg[i];
+ entry->vid = i;
+ }
+ mutex_unlock(&hellcreek->reg_lock);
+
+ *data = (u8 *)table;
+
+ return 0;
+}
+
+static int hellcreek_devlink_region_fdb_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct hellcreek_fdb_entry *table, *entry;
+ struct hellcreek *hellcreek = ds->priv;
+ size_t i;
+
+ table = kcalloc(hellcreek->fdb_entries, sizeof(*entry), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ entry = table;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ /* Start table read */
+ hellcreek_read(hellcreek, HR_FDBMAX);
+ hellcreek_write(hellcreek, 0x00, HR_FDBMAX);
+
+ for (i = 0; i < hellcreek->fdb_entries; ++i, ++entry) {
+ /* Read current entry */
+ hellcreek_populate_fdb_entry(hellcreek, entry, i);
+
+ /* Advance read pointer */
+ hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
+ }
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ *data = (u8 *)table;
+
+ return 0;
+}
+
+static struct devlink_region_ops hellcreek_region_vlan_ops = {
+ .name = "vlan",
+ .snapshot = hellcreek_devlink_region_vlan_snapshot,
+ .destructor = kfree,
+};
+
+static struct devlink_region_ops hellcreek_region_fdb_ops = {
+ .name = "fdb",
+ .snapshot = hellcreek_devlink_region_fdb_snapshot,
+ .destructor = kfree,
+};
+
+static int hellcreek_setup_devlink_regions(struct dsa_switch *ds)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct devlink_region_ops *ops;
+ struct devlink_region *region;
+ u64 size;
+ int ret;
+
+ /* VLAN table */
+ size = VLAN_N_VID * sizeof(struct hellcreek_devlink_vlan_entry);
+ ops = &hellcreek_region_vlan_ops;
+
+ region = dsa_devlink_region_create(ds, ops, 1, size);
+ if (IS_ERR(region))
+ return PTR_ERR(region);
+
+ hellcreek->vlan_region = region;
+
+ /* FDB table */
+ size = hellcreek->fdb_entries * sizeof(struct hellcreek_fdb_entry);
+ ops = &hellcreek_region_fdb_ops;
+
+ region = dsa_devlink_region_create(ds, ops, 1, size);
+ if (IS_ERR(region)) {
+ ret = PTR_ERR(region);
+ goto err_fdb;
+ }
+
+ hellcreek->fdb_region = region;
+
+ return 0;
+
+err_fdb:
+ dsa_devlink_region_destroy(hellcreek->vlan_region);
+
+ return ret;
+}
+
+static void hellcreek_teardown_devlink_regions(struct dsa_switch *ds)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ dsa_devlink_region_destroy(hellcreek->fdb_region);
+ dsa_devlink_region_destroy(hellcreek->vlan_region);
+}
+
+static int hellcreek_setup(struct dsa_switch *ds)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ u16 swcfg = 0;
+ int ret, i;
+
+ dev_dbg(hellcreek->dev, "Set up the switch\n");
+
+ /* Let's go */
+ ret = hellcreek_enable_ip_core(hellcreek);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to enable IP core!\n");
+ return ret;
+ }
+
+ /* Enable CPU/Tunnel ports */
+ hellcreek_setup_cpu_and_tunnel_port(hellcreek);
+
+ /* Switch config: Keep defaults, enable FDB aging and learning and tag
+ * each frame from/to cpu port for DSA tagging. Also enable the length
+ * aware shaping mode. This eliminates the need for Qbv guard bands.
+ */
+ swcfg |= HR_SWCFG_FDBAGE_EN |
+ HR_SWCFG_FDBLRN_EN |
+ HR_SWCFG_ALWAYS_OBT |
+ (HR_SWCFG_LAS_ON << HR_SWCFG_LAS_MODE_SHIFT);
+ hellcreek->swcfg = swcfg;
+ hellcreek_write(hellcreek, swcfg, HR_SWCFG);
+
+ /* Initial vlan membership to reflect port separation */
+ for (i = 0; i < ds->num_ports; ++i) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ hellcreek_setup_vlan_membership(ds, i, true);
+ }
+
+ /* Configure PCP <-> TC mapping */
+ hellcreek_setup_tc_identity_mapping(hellcreek);
+
+ /* The VLAN awareness is a global switch setting. Therefore, mixed vlan
+ * filtering setups are not supported.
+ */
+ ds->vlan_filtering_is_global = true;
+ ds->needs_standalone_vlan_filtering = true;
+
+ /* Intercept _all_ PTP multicast traffic */
+ ret = hellcreek_setup_fdb(hellcreek);
+ if (ret) {
+ dev_err(hellcreek->dev,
+ "Failed to insert static PTP FDB entries\n");
+ return ret;
+ }
+
+ /* Register devlink resources with DSA */
+ ret = hellcreek_setup_devlink_resources(ds);
+ if (ret) {
+ dev_err(hellcreek->dev,
+ "Failed to setup devlink resources!\n");
+ return ret;
+ }
+
+ ret = hellcreek_setup_devlink_regions(ds);
+ if (ret) {
+ dev_err(hellcreek->dev,
+ "Failed to setup devlink regions!\n");
+ goto err_regions;
+ }
+
+ return 0;
+
+err_regions:
+ dsa_devlink_resources_unregister(ds);
+
+ return ret;
+}
+
+static void hellcreek_teardown(struct dsa_switch *ds)
+{
+ hellcreek_teardown_devlink_regions(ds);
+ dsa_devlink_resources_unregister(ds);
+}
+
+static void hellcreek_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RGMII, config->supported_interfaces);
+
+ /* Include GMII - the hardware does not support this interface
+ * mode, but it's the default interface mode for phylib, so we
+ * need it for compatibility with existing DT.
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
+
+ /* The MAC settings are a hardware configuration option and cannot be
+ * changed at run time or by strapping. Therefore the attached PHYs
+ * should be programmed to only advertise settings which are supported
+ * by the hardware.
+ */
+ if (hellcreek->pdata->is_100_mbits)
+ config->mac_capabilities = MAC_100FD;
+ else
+ config->mac_capabilities = MAC_1000FD;
+}
+
+static int
+hellcreek_port_prechangeupper(struct dsa_switch *ds, int port,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ bool used = true;
+ int ret = -EBUSY;
+ u16 vid;
+ int i;
+
+ dev_dbg(hellcreek->dev, "Pre change upper for port %d\n", port);
+
+ /*
+ * Deny VLAN devices on top of lan ports with the same VLAN ids, because
+ * it breaks the port separation due to the private VLANs. Example:
+ *
+ * lan0.100 *and* lan1.100 cannot be used in parallel. However, lan0.99
+ * and lan1.100 works.
+ */
+
+ if (!is_vlan_dev(info->upper_dev))
+ return 0;
+
+ vid = vlan_dev_vlan_id(info->upper_dev);
+
+ /* For all ports, check bitmaps */
+ mutex_lock(&hellcreek->vlan_lock);
+ for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ if (port == i)
+ continue;
+
+ used = used && test_bit(vid, hellcreek->ports[i].vlan_dev_bitmap);
+ }
+
+ if (used)
+ goto out;
+
+ /* Update bitmap */
+ set_bit(vid, hellcreek->ports[port].vlan_dev_bitmap);
+
+ ret = 0;
+
+out:
+ mutex_unlock(&hellcreek->vlan_lock);
+
+ return ret;
+}
+
+static void hellcreek_setup_maxsdu(struct hellcreek *hellcreek, int port,
+ const struct tc_taprio_qopt_offload *schedule)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u32 max_sdu = schedule->max_sdu[tc] + VLAN_ETH_HLEN - ETH_FCS_LEN;
+ u16 val;
+
+ if (!schedule->max_sdu[tc])
+ continue;
+
+ dev_dbg(hellcreek->dev, "Configure max-sdu %u for tc %d on port %d\n",
+ max_sdu, tc, port);
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (max_sdu & HR_PTPRTCCFG_MAXSDU_MASK) << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
+static void hellcreek_reset_maxsdu(struct hellcreek *hellcreek, int port)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u16 val;
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (HELLCREEK_DEFAULT_MAX_SDU & HR_PTPRTCCFG_MAXSDU_MASK)
+ << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
+static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
+ const struct tc_taprio_qopt_offload *schedule)
+{
+ const struct tc_taprio_sched_entry *cur, *initial, *next;
+ size_t i;
+
+ cur = initial = &schedule->entries[0];
+ next = cur + 1;
+
+ for (i = 1; i <= schedule->num_entries; ++i) {
+ u16 data;
+ u8 gates;
+
+ if (i == schedule->num_entries)
+ gates = initial->gate_mask ^
+ cur->gate_mask;
+ else
+ gates = next->gate_mask ^
+ cur->gate_mask;
+
+ data = gates;
+
+ if (i == schedule->num_entries)
+ data |= TR_GCLDAT_GCLWRLAST;
+
+ /* Gates states */
+ hellcreek_write(hellcreek, data, TR_GCLDAT);
+
+ /* Time interval */
+ hellcreek_write(hellcreek,
+ cur->interval & 0x0000ffff,
+ TR_GCLTIL);
+ hellcreek_write(hellcreek,
+ (cur->interval & 0xffff0000) >> 16,
+ TR_GCLTIH);
+
+ /* Commit entry */
+ data = ((i - 1) << TR_GCLCMD_GCLWRADR_SHIFT) |
+ (initial->gate_mask <<
+ TR_GCLCMD_INIT_GATE_STATES_SHIFT);
+ hellcreek_write(hellcreek, data, TR_GCLCMD);
+
+ cur++;
+ next++;
+ }
+}
+
+static void hellcreek_set_cycle_time(struct hellcreek *hellcreek,
+ const struct tc_taprio_qopt_offload *schedule)
+{
+ u32 cycle_time = schedule->cycle_time;
+
+ hellcreek_write(hellcreek, cycle_time & 0x0000ffff, TR_CTWRL);
+ hellcreek_write(hellcreek, (cycle_time & 0xffff0000) >> 16, TR_CTWRH);
+}
+
+static void hellcreek_switch_schedule(struct hellcreek *hellcreek,
+ ktime_t start_time)
+{
+ struct timespec64 ts = ktime_to_timespec64(start_time);
+
+ /* Start schedule at this point of time */
+ hellcreek_write(hellcreek, ts.tv_nsec & 0x0000ffff, TR_ESTWRL);
+ hellcreek_write(hellcreek, (ts.tv_nsec & 0xffff0000) >> 16, TR_ESTWRH);
+
+ /* Arm timer, set seconds and switch schedule */
+ hellcreek_write(hellcreek, TR_ESTCMD_ESTARM | TR_ESTCMD_ESTSWCFG |
+ ((ts.tv_sec & TR_ESTCMD_ESTSEC_MASK) <<
+ TR_ESTCMD_ESTSEC_SHIFT), TR_ESTCMD);
+}
+
+static bool hellcreek_schedule_startable(struct hellcreek *hellcreek, int port)
+{
+ struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
+ s64 base_time_ns, current_ns;
+
+ /* The switch allows a schedule to be started only eight seconds within
+ * the future. Therefore, check the current PTP time if the schedule is
+ * startable or not.
+ */
+
+ /* Use the "cached" time. That should be alright, as it's updated quite
+ * frequently in the PTP code.
+ */
+ mutex_lock(&hellcreek->ptp_lock);
+ current_ns = hellcreek->seconds * NSEC_PER_SEC + hellcreek->last_ts;
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ /* Calculate difference to admin base time */
+ base_time_ns = ktime_to_ns(hellcreek_port->current_schedule->base_time);
+
+ return base_time_ns - current_ns < (s64)4 * NSEC_PER_SEC;
+}
+
+static void hellcreek_start_schedule(struct hellcreek *hellcreek, int port)
+{
+ struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
+ ktime_t base_time, current_time;
+ s64 current_ns;
+ u32 cycle_time;
+
+ /* First select port */
+ hellcreek_select_tgd(hellcreek, port);
+
+ /* Forward base time into the future if needed */
+ mutex_lock(&hellcreek->ptp_lock);
+ current_ns = hellcreek->seconds * NSEC_PER_SEC + hellcreek->last_ts;
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ current_time = ns_to_ktime(current_ns);
+ base_time = hellcreek_port->current_schedule->base_time;
+ cycle_time = hellcreek_port->current_schedule->cycle_time;
+
+ if (ktime_compare(current_time, base_time) > 0) {
+ s64 n;
+
+ n = div64_s64(ktime_sub_ns(current_time, base_time),
+ cycle_time);
+ base_time = ktime_add_ns(base_time, (n + 1) * cycle_time);
+ }
+
+ /* Set admin base time and switch schedule */
+ hellcreek_switch_schedule(hellcreek, base_time);
+
+ taprio_offload_free(hellcreek_port->current_schedule);
+ hellcreek_port->current_schedule = NULL;
+
+ dev_dbg(hellcreek->dev, "Armed EST timer for port %d\n",
+ hellcreek_port->port);
+}
+
+static void hellcreek_check_schedule(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct hellcreek_port *hellcreek_port;
+ struct hellcreek *hellcreek;
+ bool startable;
+
+ hellcreek_port = dw_to_hellcreek_port(dw);
+ hellcreek = hellcreek_port->hellcreek;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ /* Check starting time */
+ startable = hellcreek_schedule_startable(hellcreek,
+ hellcreek_port->port);
+ if (startable) {
+ hellcreek_start_schedule(hellcreek, hellcreek_port->port);
+ mutex_unlock(&hellcreek->reg_lock);
+ return;
+ }
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ /* Reschedule */
+ schedule_delayed_work(&hellcreek_port->schedule_work,
+ HELLCREEK_SCHEDULE_PERIOD);
+}
+
+static int hellcreek_port_set_schedule(struct dsa_switch *ds, int port,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ bool startable;
+ u16 ctrl;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Configure traffic schedule on port %d\n",
+ port);
+
+ /* First cancel delayed work */
+ cancel_delayed_work_sync(&hellcreek_port->schedule_work);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ if (hellcreek_port->current_schedule) {
+ taprio_offload_free(hellcreek_port->current_schedule);
+ hellcreek_port->current_schedule = NULL;
+ }
+ hellcreek_port->current_schedule = taprio_offload_get(taprio);
+
+ /* Configure max sdu */
+ hellcreek_setup_maxsdu(hellcreek, port, hellcreek_port->current_schedule);
+
+ /* Select tdg */
+ hellcreek_select_tgd(hellcreek, port);
+
+ /* Enable gating and keep defaults */
+ ctrl = (0xff << TR_TGDCTRL_ADMINGATESTATES_SHIFT) | TR_TGDCTRL_GATE_EN;
+ hellcreek_write(hellcreek, ctrl, TR_TGDCTRL);
+
+ /* Cancel pending schedule */
+ hellcreek_write(hellcreek, 0x00, TR_ESTCMD);
+
+ /* Setup a new schedule */
+ hellcreek_setup_gcl(hellcreek, port, hellcreek_port->current_schedule);
+
+ /* Configure cycle time */
+ hellcreek_set_cycle_time(hellcreek, hellcreek_port->current_schedule);
+
+ /* Check starting time */
+ startable = hellcreek_schedule_startable(hellcreek, port);
+ if (startable) {
+ hellcreek_start_schedule(hellcreek, port);
+ mutex_unlock(&hellcreek->reg_lock);
+ return 0;
+ }
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ /* Schedule periodic schedule check */
+ schedule_delayed_work(&hellcreek_port->schedule_work,
+ HELLCREEK_SCHEDULE_PERIOD);
+
+ return 0;
+}
+
+static int hellcreek_port_del_schedule(struct dsa_switch *ds, int port)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Remove traffic schedule on port %d\n", port);
+
+ /* First cancel delayed work */
+ cancel_delayed_work_sync(&hellcreek_port->schedule_work);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ if (hellcreek_port->current_schedule) {
+ taprio_offload_free(hellcreek_port->current_schedule);
+ hellcreek_port->current_schedule = NULL;
+ }
+
+ /* Reset max sdu */
+ hellcreek_reset_maxsdu(hellcreek, port);
+
+ /* Select tgd */
+ hellcreek_select_tgd(hellcreek, port);
+
+ /* Disable gating and return to regular switching flow */
+ hellcreek_write(hellcreek, 0xff << TR_TGDCTRL_ADMINGATESTATES_SHIFT,
+ TR_TGDCTRL);
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return 0;
+}
+
+static bool hellcreek_validate_schedule(struct hellcreek *hellcreek,
+ struct tc_taprio_qopt_offload *schedule)
+{
+ size_t i;
+
+ /* Does this hellcreek version support Qbv in hardware? */
+ if (!hellcreek->pdata->qbv_support)
+ return false;
+
+ /* cycle time can only be 32bit */
+ if (schedule->cycle_time > (u32)-1)
+ return false;
+
+ /* cycle time extension is not supported */
+ if (schedule->cycle_time_extension)
+ return false;
+
+ /* Only set command is supported */
+ for (i = 0; i < schedule->num_entries; ++i)
+ if (schedule->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return false;
+
+ return true;
+}
+
+static int hellcreek_tc_query_caps(struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return hellcreek_tc_query_caps(type_data);
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_qopt_offload *taprio = type_data;
+
+ if (!hellcreek_validate_schedule(hellcreek, taprio))
+ return -EOPNOTSUPP;
+
+ if (taprio->enable)
+ return hellcreek_port_set_schedule(ds, port, taprio);
+
+ return hellcreek_port_del_schedule(ds, port);
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct dsa_switch_ops hellcreek_ds_ops = {
+ .devlink_info_get = hellcreek_devlink_info_get,
+ .get_ethtool_stats = hellcreek_get_ethtool_stats,
+ .get_sset_count = hellcreek_get_sset_count,
+ .get_strings = hellcreek_get_strings,
+ .get_tag_protocol = hellcreek_get_tag_protocol,
+ .get_ts_info = hellcreek_get_ts_info,
+ .phylink_get_caps = hellcreek_phylink_get_caps,
+ .port_bridge_flags = hellcreek_bridge_flags,
+ .port_bridge_join = hellcreek_port_bridge_join,
+ .port_bridge_leave = hellcreek_port_bridge_leave,
+ .port_disable = hellcreek_port_disable,
+ .port_enable = hellcreek_port_enable,
+ .port_fdb_add = hellcreek_fdb_add,
+ .port_fdb_del = hellcreek_fdb_del,
+ .port_fdb_dump = hellcreek_fdb_dump,
+ .port_hwtstamp_set = hellcreek_port_hwtstamp_set,
+ .port_hwtstamp_get = hellcreek_port_hwtstamp_get,
+ .port_pre_bridge_flags = hellcreek_pre_bridge_flags,
+ .port_prechangeupper = hellcreek_port_prechangeupper,
+ .port_rxtstamp = hellcreek_port_rxtstamp,
+ .port_setup_tc = hellcreek_port_setup_tc,
+ .port_stp_state_set = hellcreek_port_stp_state_set,
+ .port_txtstamp = hellcreek_port_txtstamp,
+ .port_vlan_add = hellcreek_vlan_add,
+ .port_vlan_del = hellcreek_vlan_del,
+ .port_vlan_filtering = hellcreek_vlan_filtering,
+ .setup = hellcreek_setup,
+ .teardown = hellcreek_teardown,
+};
+
+static int hellcreek_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hellcreek *hellcreek;
+ struct resource *res;
+ int ret, i;
+
+ hellcreek = devm_kzalloc(dev, sizeof(*hellcreek), GFP_KERNEL);
+ if (!hellcreek)
+ return -ENOMEM;
+
+ hellcreek->vidmbrcfg = devm_kcalloc(dev, VLAN_N_VID,
+ sizeof(*hellcreek->vidmbrcfg),
+ GFP_KERNEL);
+ if (!hellcreek->vidmbrcfg)
+ return -ENOMEM;
+
+ hellcreek->pdata = of_device_get_match_data(dev);
+
+ hellcreek->ports = devm_kcalloc(dev, hellcreek->pdata->num_ports,
+ sizeof(*hellcreek->ports),
+ GFP_KERNEL);
+ if (!hellcreek->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
+ struct hellcreek_port *port = &hellcreek->ports[i];
+
+ port->counter_values =
+ devm_kcalloc(dev,
+ ARRAY_SIZE(hellcreek_counter),
+ sizeof(*port->counter_values),
+ GFP_KERNEL);
+ if (!port->counter_values)
+ return -ENOMEM;
+
+ port->vlan_dev_bitmap = devm_bitmap_zalloc(dev, VLAN_N_VID,
+ GFP_KERNEL);
+ if (!port->vlan_dev_bitmap)
+ return -ENOMEM;
+
+ port->hellcreek = hellcreek;
+ port->port = i;
+
+ INIT_DELAYED_WORK(&port->schedule_work,
+ hellcreek_check_schedule);
+ }
+
+ mutex_init(&hellcreek->reg_lock);
+ mutex_init(&hellcreek->vlan_lock);
+ mutex_init(&hellcreek->ptp_lock);
+
+ hellcreek->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsn");
+ if (!res) {
+ dev_err(dev, "No memory region provided!\n");
+ return -ENODEV;
+ }
+
+ hellcreek->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hellcreek->base))
+ return PTR_ERR(hellcreek->base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ptp");
+ if (!res) {
+ dev_err(dev, "No PTP memory region provided!\n");
+ return -ENODEV;
+ }
+
+ hellcreek->ptp_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hellcreek->ptp_base))
+ return PTR_ERR(hellcreek->ptp_base);
+
+ ret = hellcreek_detect(hellcreek);
+ if (ret) {
+ dev_err(dev, "No (known) chip found!\n");
+ return ret;
+ }
+
+ ret = hellcreek_wait_until_ready(hellcreek);
+ if (ret) {
+ dev_err(dev, "Switch didn't become ready!\n");
+ return ret;
+ }
+
+ hellcreek_feature_detect(hellcreek);
+
+ hellcreek->ds = devm_kzalloc(dev, sizeof(*hellcreek->ds), GFP_KERNEL);
+ if (!hellcreek->ds)
+ return -ENOMEM;
+
+ hellcreek->ds->dev = dev;
+ hellcreek->ds->priv = hellcreek;
+ hellcreek->ds->ops = &hellcreek_ds_ops;
+ hellcreek->ds->num_ports = hellcreek->pdata->num_ports;
+ hellcreek->ds->num_tx_queues = HELLCREEK_NUM_EGRESS_QUEUES;
+
+ ret = dsa_register_switch(hellcreek->ds);
+ if (ret) {
+ dev_err_probe(dev, ret, "Unable to register switch\n");
+ return ret;
+ }
+
+ ret = hellcreek_ptp_setup(hellcreek);
+ if (ret) {
+ dev_err(dev, "Failed to setup PTP!\n");
+ goto err_ptp_setup;
+ }
+
+ ret = hellcreek_hwtstamp_setup(hellcreek);
+ if (ret) {
+ dev_err(dev, "Failed to setup hardware timestamping!\n");
+ goto err_tstamp_setup;
+ }
+
+ platform_set_drvdata(pdev, hellcreek);
+
+ return 0;
+
+err_tstamp_setup:
+ hellcreek_ptp_free(hellcreek);
+err_ptp_setup:
+ dsa_unregister_switch(hellcreek->ds);
+
+ return ret;
+}
+
+static int hellcreek_remove(struct platform_device *pdev)
+{
+ struct hellcreek *hellcreek = platform_get_drvdata(pdev);
+
+ if (!hellcreek)
+ return 0;
+
+ hellcreek_hwtstamp_free(hellcreek);
+ hellcreek_ptp_free(hellcreek);
+ dsa_unregister_switch(hellcreek->ds);
+
+ return 0;
+}
+
+static void hellcreek_shutdown(struct platform_device *pdev)
+{
+ struct hellcreek *hellcreek = platform_get_drvdata(pdev);
+
+ if (!hellcreek)
+ return;
+
+ dsa_switch_shutdown(hellcreek->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct hellcreek_platform_data de1soc_r1_pdata = {
+ .name = "r4c30",
+ .num_ports = 4,
+ .is_100_mbits = 1,
+ .qbv_support = 1,
+ .qbv_on_cpu_port = 1,
+ .qbu_support = 0,
+ .module_id = 0x4c30,
+};
+
+static const struct of_device_id hellcreek_of_match[] = {
+ {
+ .compatible = "hirschmann,hellcreek-de1soc-r1",
+ .data = &de1soc_r1_pdata,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, hellcreek_of_match);
+
+static struct platform_driver hellcreek_driver = {
+ .probe = hellcreek_probe,
+ .remove = hellcreek_remove,
+ .shutdown = hellcreek_shutdown,
+ .driver = {
+ .name = "hellcreek",
+ .of_match_table = hellcreek_of_match,
+ },
+};
+module_platform_driver(hellcreek_driver);
+
+MODULE_AUTHOR("Kurt Kanzenbach <kurt@linutronix.de>");
+MODULE_DESCRIPTION("Hirschmann Hellcreek driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
new file mode 100644
index 000000000..4a678f7d6
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019-2021 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#ifndef _HELLCREEK_H_
+#define _HELLCREEK_H_
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/leds.h>
+#include <linux/platform_data/hirschmann-hellcreek.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <net/dsa.h>
+#include <net/pkt_sched.h>
+
+/* Ports:
+ * - 0: CPU
+ * - 1: Tunnel
+ * - 2: TSN front port 1
+ * - 3: TSN front port 2
+ * - ...
+ */
+#define CPU_PORT 0
+#define TUNNEL_PORT 1
+
+#define HELLCREEK_VLAN_NO_MEMBER 0x0
+#define HELLCREEK_VLAN_UNTAGGED_MEMBER 0x1
+#define HELLCREEK_VLAN_TAGGED_MEMBER 0x3
+#define HELLCREEK_NUM_EGRESS_QUEUES 8
+#define HELLCREEK_DEFAULT_MAX_SDU 1536
+
+/* Register definitions */
+#define HR_MODID_C (0 * 2)
+#define HR_REL_L_C (1 * 2)
+#define HR_REL_H_C (2 * 2)
+#define HR_BLD_L_C (3 * 2)
+#define HR_BLD_H_C (4 * 2)
+#define HR_CTRL_C (5 * 2)
+#define HR_CTRL_C_READY BIT(14)
+#define HR_CTRL_C_TRANSITION BIT(13)
+#define HR_CTRL_C_ENABLE BIT(0)
+
+#define HR_PSEL (0xa6 * 2)
+#define HR_PSEL_PTWSEL_SHIFT 4
+#define HR_PSEL_PTWSEL_MASK GENMASK(5, 4)
+#define HR_PSEL_PRTCWSEL_SHIFT 0
+#define HR_PSEL_PRTCWSEL_MASK GENMASK(2, 0)
+
+#define HR_PTCFG (0xa7 * 2)
+#define HR_PTCFG_MLIMIT_EN BIT(13)
+#define HR_PTCFG_UMC_FLT BIT(10)
+#define HR_PTCFG_UUC_FLT BIT(9)
+#define HR_PTCFG_UNTRUST BIT(8)
+#define HR_PTCFG_TAG_REQUIRED BIT(7)
+#define HR_PTCFG_PPRIO_SHIFT 4
+#define HR_PTCFG_PPRIO_MASK GENMASK(6, 4)
+#define HR_PTCFG_INGRESSFLT BIT(3)
+#define HR_PTCFG_BLOCKED BIT(2)
+#define HR_PTCFG_LEARNING_EN BIT(1)
+#define HR_PTCFG_ADMIN_EN BIT(0)
+
+#define HR_PRTCCFG (0xa8 * 2)
+#define HR_PRTCCFG_PCP_TC_MAP_SHIFT 0
+#define HR_PRTCCFG_PCP_TC_MAP_MASK GENMASK(2, 0)
+
+#define HR_PTPRTCCFG (0xa9 * 2)
+#define HR_PTPRTCCFG_SET_QTRACK BIT(15)
+#define HR_PTPRTCCFG_REJECT BIT(14)
+#define HR_PTPRTCCFG_MAXSDU_SHIFT 0
+#define HR_PTPRTCCFG_MAXSDU_MASK GENMASK(10, 0)
+
+#define HR_CSEL (0x8d * 2)
+#define HR_CSEL_SHIFT 0
+#define HR_CSEL_MASK GENMASK(7, 0)
+#define HR_CRDL (0x8e * 2)
+#define HR_CRDH (0x8f * 2)
+
+#define HR_SWTRC_CFG (0x90 * 2)
+#define HR_SWTRC0 (0x91 * 2)
+#define HR_SWTRC1 (0x92 * 2)
+#define HR_PFREE (0x93 * 2)
+#define HR_MFREE (0x94 * 2)
+
+#define HR_FDBAGE (0x97 * 2)
+#define HR_FDBMAX (0x98 * 2)
+#define HR_FDBRDL (0x99 * 2)
+#define HR_FDBRDM (0x9a * 2)
+#define HR_FDBRDH (0x9b * 2)
+
+#define HR_FDBMDRD (0x9c * 2)
+#define HR_FDBMDRD_PORTMASK_SHIFT 0
+#define HR_FDBMDRD_PORTMASK_MASK GENMASK(3, 0)
+#define HR_FDBMDRD_AGE_SHIFT 4
+#define HR_FDBMDRD_AGE_MASK GENMASK(7, 4)
+#define HR_FDBMDRD_OBT BIT(8)
+#define HR_FDBMDRD_PASS_BLOCKED BIT(9)
+#define HR_FDBMDRD_STATIC BIT(11)
+#define HR_FDBMDRD_REPRIO_TC_SHIFT 12
+#define HR_FDBMDRD_REPRIO_TC_MASK GENMASK(14, 12)
+#define HR_FDBMDRD_REPRIO_EN BIT(15)
+
+#define HR_FDBWDL (0x9d * 2)
+#define HR_FDBWDM (0x9e * 2)
+#define HR_FDBWDH (0x9f * 2)
+#define HR_FDBWRM0 (0xa0 * 2)
+#define HR_FDBWRM0_PORTMASK_SHIFT 0
+#define HR_FDBWRM0_PORTMASK_MASK GENMASK(3, 0)
+#define HR_FDBWRM0_OBT BIT(8)
+#define HR_FDBWRM0_PASS_BLOCKED BIT(9)
+#define HR_FDBWRM0_REPRIO_TC_SHIFT 12
+#define HR_FDBWRM0_REPRIO_TC_MASK GENMASK(14, 12)
+#define HR_FDBWRM0_REPRIO_EN BIT(15)
+#define HR_FDBWRM1 (0xa1 * 2)
+
+#define HR_FDBWRCMD (0xa2 * 2)
+#define HR_FDBWRCMD_FDBDEL BIT(9)
+
+#define HR_SWCFG (0xa3 * 2)
+#define HR_SWCFG_GM_STATEMD BIT(15)
+#define HR_SWCFG_LAS_MODE_SHIFT 12
+#define HR_SWCFG_LAS_MODE_MASK GENMASK(13, 12)
+#define HR_SWCFG_LAS_OFF (0x00)
+#define HR_SWCFG_LAS_ON (0x01)
+#define HR_SWCFG_LAS_STATIC (0x10)
+#define HR_SWCFG_CT_EN BIT(11)
+#define HR_SWCFG_VLAN_UNAWARE BIT(10)
+#define HR_SWCFG_ALWAYS_OBT BIT(9)
+#define HR_SWCFG_FDBAGE_EN BIT(5)
+#define HR_SWCFG_FDBLRN_EN BIT(4)
+
+#define HR_SWSTAT (0xa4 * 2)
+#define HR_SWSTAT_FAIL BIT(4)
+#define HR_SWSTAT_BUSY BIT(0)
+
+#define HR_SWCMD (0xa5 * 2)
+#define HW_SWCMD_FLUSH BIT(0)
+
+#define HR_VIDCFG (0xaa * 2)
+#define HR_VIDCFG_VID_SHIFT 0
+#define HR_VIDCFG_VID_MASK GENMASK(11, 0)
+#define HR_VIDCFG_PVID BIT(12)
+
+#define HR_VIDMBRCFG (0xab * 2)
+#define HR_VIDMBRCFG_P0MBR_SHIFT 0
+#define HR_VIDMBRCFG_P0MBR_MASK GENMASK(1, 0)
+#define HR_VIDMBRCFG_P1MBR_SHIFT 2
+#define HR_VIDMBRCFG_P1MBR_MASK GENMASK(3, 2)
+#define HR_VIDMBRCFG_P2MBR_SHIFT 4
+#define HR_VIDMBRCFG_P2MBR_MASK GENMASK(5, 4)
+#define HR_VIDMBRCFG_P3MBR_SHIFT 6
+#define HR_VIDMBRCFG_P3MBR_MASK GENMASK(7, 6)
+
+#define HR_FEABITS0 (0xac * 2)
+#define HR_FEABITS0_FDBBINS_SHIFT 4
+#define HR_FEABITS0_FDBBINS_MASK GENMASK(7, 4)
+#define HR_FEABITS0_PCNT_SHIFT 8
+#define HR_FEABITS0_PCNT_MASK GENMASK(11, 8)
+#define HR_FEABITS0_MCNT_SHIFT 12
+#define HR_FEABITS0_MCNT_MASK GENMASK(15, 12)
+
+#define TR_QTRACK (0xb1 * 2)
+#define TR_TGDVER (0xb3 * 2)
+#define TR_TGDVER_REV_MIN_MASK GENMASK(7, 0)
+#define TR_TGDVER_REV_MIN_SHIFT 0
+#define TR_TGDVER_REV_MAJ_MASK GENMASK(15, 8)
+#define TR_TGDVER_REV_MAJ_SHIFT 8
+#define TR_TGDSEL (0xb4 * 2)
+#define TR_TGDSEL_TDGSEL_MASK GENMASK(1, 0)
+#define TR_TGDSEL_TDGSEL_SHIFT 0
+#define TR_TGDCTRL (0xb5 * 2)
+#define TR_TGDCTRL_GATE_EN BIT(0)
+#define TR_TGDCTRL_CYC_SNAP BIT(4)
+#define TR_TGDCTRL_SNAP_EST BIT(5)
+#define TR_TGDCTRL_ADMINGATESTATES_MASK GENMASK(15, 8)
+#define TR_TGDCTRL_ADMINGATESTATES_SHIFT 8
+#define TR_TGDSTAT0 (0xb6 * 2)
+#define TR_TGDSTAT1 (0xb7 * 2)
+#define TR_ESTWRL (0xb8 * 2)
+#define TR_ESTWRH (0xb9 * 2)
+#define TR_ESTCMD (0xba * 2)
+#define TR_ESTCMD_ESTSEC_MASK GENMASK(2, 0)
+#define TR_ESTCMD_ESTSEC_SHIFT 0
+#define TR_ESTCMD_ESTARM BIT(4)
+#define TR_ESTCMD_ESTSWCFG BIT(5)
+#define TR_EETWRL (0xbb * 2)
+#define TR_EETWRH (0xbc * 2)
+#define TR_EETCMD (0xbd * 2)
+#define TR_EETCMD_EETSEC_MASK GEMASK(2, 0)
+#define TR_EETCMD_EETSEC_SHIFT 0
+#define TR_EETCMD_EETARM BIT(4)
+#define TR_CTWRL (0xbe * 2)
+#define TR_CTWRH (0xbf * 2)
+#define TR_LCNSL (0xc1 * 2)
+#define TR_LCNSH (0xc2 * 2)
+#define TR_LCS (0xc3 * 2)
+#define TR_GCLDAT (0xc4 * 2)
+#define TR_GCLDAT_GCLWRGATES_MASK GENMASK(7, 0)
+#define TR_GCLDAT_GCLWRGATES_SHIFT 0
+#define TR_GCLDAT_GCLWRLAST BIT(8)
+#define TR_GCLDAT_GCLOVRI BIT(9)
+#define TR_GCLTIL (0xc5 * 2)
+#define TR_GCLTIH (0xc6 * 2)
+#define TR_GCLCMD (0xc7 * 2)
+#define TR_GCLCMD_GCLWRADR_MASK GENMASK(7, 0)
+#define TR_GCLCMD_GCLWRADR_SHIFT 0
+#define TR_GCLCMD_INIT_GATE_STATES_MASK GENMASK(15, 8)
+#define TR_GCLCMD_INIT_GATE_STATES_SHIFT 8
+
+struct hellcreek_counter {
+ u8 offset;
+ const char *name;
+};
+
+struct hellcreek;
+
+/* State flags for hellcreek_port_hwtstamp::state */
+enum {
+ HELLCREEK_HWTSTAMP_ENABLED,
+ HELLCREEK_HWTSTAMP_TX_IN_PROGRESS,
+};
+
+/* A structure to hold hardware timestamping information per port */
+struct hellcreek_port_hwtstamp {
+ /* Timestamping state */
+ unsigned long state;
+
+ /* Resources for receive timestamping */
+ struct sk_buff_head rx_queue; /* For synchronization messages */
+
+ /* Resources for transmit timestamping */
+ unsigned long tx_tstamp_start;
+ struct sk_buff *tx_skb;
+
+ /* Current timestamp configuration */
+ struct hwtstamp_config tstamp_config;
+};
+
+struct hellcreek_port {
+ struct hellcreek *hellcreek;
+ unsigned long *vlan_dev_bitmap;
+ int port;
+ u16 ptcfg; /* ptcfg shadow */
+ u64 *counter_values;
+
+ /* Per-port timestamping resources */
+ struct hellcreek_port_hwtstamp port_hwtstamp;
+
+ /* Per-port Qbv schedule information */
+ struct tc_taprio_qopt_offload *current_schedule;
+ struct delayed_work schedule_work;
+};
+
+struct hellcreek_fdb_entry {
+ size_t idx;
+ unsigned char mac[ETH_ALEN];
+ u8 portmask;
+ u8 age;
+ u8 is_obt;
+ u8 pass_blocked;
+ u8 is_static;
+ u8 reprio_tc;
+ u8 reprio_en;
+};
+
+struct hellcreek {
+ const struct hellcreek_platform_data *pdata;
+ struct device *dev;
+ struct dsa_switch *ds;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct hellcreek_port *ports;
+ struct delayed_work overflow_work;
+ struct led_classdev led_is_gm;
+ struct led_classdev led_sync_good;
+ struct mutex reg_lock; /* Switch IP register lock */
+ struct mutex vlan_lock; /* VLAN bitmaps lock */
+ struct mutex ptp_lock; /* PTP IP register lock */
+ struct devlink_region *vlan_region;
+ struct devlink_region *fdb_region;
+ void __iomem *base;
+ void __iomem *ptp_base;
+ u16 swcfg; /* swcfg shadow */
+ u8 *vidmbrcfg; /* vidmbrcfg shadow */
+ u64 seconds; /* PTP seconds */
+ u64 last_ts; /* Used for overflow detection */
+ u16 status_out; /* ptp.status_out shadow */
+ size_t fdb_entries;
+};
+
+/* A Qbv schedule can only started up to 8 seconds in the future. If the delta
+ * between the base time and the current ptp time is larger than 8 seconds, then
+ * use periodic work to check for the schedule to be started. The delayed work
+ * cannot be armed directly to $base_time - 8 + X, because for large deltas the
+ * PTP frequency matters.
+ */
+#define HELLCREEK_SCHEDULE_PERIOD (2 * HZ)
+#define dw_to_hellcreek_port(dw) \
+ container_of(dw, struct hellcreek_port, schedule_work)
+
+/* Devlink resources */
+enum hellcreek_devlink_resource_id {
+ HELLCREEK_DEVLINK_PARAM_ID_VLAN_TABLE,
+ HELLCREEK_DEVLINK_PARAM_ID_FDB_TABLE,
+};
+
+struct hellcreek_devlink_vlan_entry {
+ u16 vid;
+ u16 member;
+};
+
+#endif /* _HELLCREEK_H_ */
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
new file mode 100644
index 000000000..ffd06cf8c
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ * Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#include <linux/ptp_classify.h>
+
+#include "hellcreek.h"
+#include "hellcreek_hwtstamp.h"
+#include "hellcreek_ptp.h"
+
+int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ info->phc_index = hellcreek->ptp_clock ?
+ ptp_clock_index(hellcreek->ptp_clock) : -1;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ /* enabled tx timestamping */
+ info->tx_types = BIT(HWTSTAMP_TX_ON);
+
+ /* L2 & L4 PTPv2 event rx messages are timestamped */
+ info->rx_filters = BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+/* Enabling/disabling TX and RX HW timestamping for different PTP messages is
+ * not available in the switch. Thus, this function only serves as a check if
+ * the user requested what is actually available or not
+ */
+static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
+ struct hwtstamp_config *config)
+{
+ struct hellcreek_port_hwtstamp *ps =
+ &hellcreek->ports[port].port_hwtstamp;
+ bool tx_tstamp_enable = false;
+ bool rx_tstamp_enable = false;
+
+ /* Interaction with the timestamp hardware is prevented here. It is
+ * enabled when this config function ends successfully
+ */
+ clear_bit_unlock(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ tx_tstamp_enable = true;
+ break;
+
+ /* TX HW timestamping can't be disabled on the switch */
+ case HWTSTAMP_TX_OFF:
+ config->tx_type = HWTSTAMP_TX_ON;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ /* RX HW timestamping can't be disabled on the switch */
+ case HWTSTAMP_FILTER_NONE:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ rx_tstamp_enable = true;
+ break;
+
+ /* RX HW timestamping can't be enabled for all messages on the switch */
+ case HWTSTAMP_FILTER_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ if (!tx_tstamp_enable)
+ return -ERANGE;
+
+ if (!rx_tstamp_enable)
+ return -ERANGE;
+
+ /* If this point is reached, then the requested hwtstamp config is
+ * compatible with the hwtstamp offered by the switch. Therefore,
+ * enable the interaction with the HW timestamping
+ */
+ set_bit(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
+
+ return 0;
+}
+
+int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct hwtstamp_config config;
+ int err;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = hellcreek_set_hwtstamp_config(hellcreek, port, &config);
+ if (err)
+ return err;
+
+ /* Save the chosen configuration to be returned later */
+ memcpy(&ps->tstamp_config, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct hwtstamp_config *config;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+ config = &ps->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/* Returns a pointer to the PTP header if the caller should time stamp, or NULL
+ * if the caller should not.
+ */
+static struct ptp_header *hellcreek_should_tstamp(struct hellcreek *hellcreek,
+ int port, struct sk_buff *skb,
+ unsigned int type)
+{
+ struct hellcreek_port_hwtstamp *ps =
+ &hellcreek->ports[port].port_hwtstamp;
+ struct ptp_header *hdr;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return NULL;
+
+ if (!test_bit(HELLCREEK_HWTSTAMP_ENABLED, &ps->state))
+ return NULL;
+
+ return hdr;
+}
+
+static u64 hellcreek_get_reserved_field(const struct ptp_header *hdr)
+{
+ return be32_to_cpu(hdr->reserved2);
+}
+
+static void hellcreek_clear_reserved_field(struct ptp_header *hdr)
+{
+ hdr->reserved2 = 0;
+}
+
+static int hellcreek_ptp_hwtstamp_available(struct hellcreek *hellcreek,
+ unsigned int ts_reg)
+{
+ u16 status;
+
+ status = hellcreek_ptp_read(hellcreek, ts_reg);
+
+ if (status & PR_TS_STATUS_TS_LOST)
+ dev_err(hellcreek->dev,
+ "Tx time stamp lost! This should never happen!\n");
+
+ /* If hwtstamp is not available, this means the previous hwtstamp was
+ * successfully read, and the one we need is not yet available
+ */
+ return (status & PR_TS_STATUS_TS_AVAIL) ? 1 : 0;
+}
+
+/* Get nanoseconds timestamp from timestamping unit */
+static u64 hellcreek_ptp_hwtstamp_read(struct hellcreek *hellcreek,
+ unsigned int ts_reg)
+{
+ u16 nsl, nsh;
+
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsl = hellcreek_ptp_read(hellcreek, ts_reg);
+
+ return (u64)nsl | ((u64)nsh << 16);
+}
+
+static int hellcreek_txtstamp_work(struct hellcreek *hellcreek,
+ struct hellcreek_port_hwtstamp *ps, int port)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ unsigned int status_reg, data_reg;
+ struct sk_buff *tmp_skb;
+ int ts_status;
+ u64 ns = 0;
+
+ if (!ps->tx_skb)
+ return 0;
+
+ switch (port) {
+ case 2:
+ status_reg = PR_TS_TX_P1_STATUS_C;
+ data_reg = PR_TS_TX_P1_DATA_C;
+ break;
+ case 3:
+ status_reg = PR_TS_TX_P2_STATUS_C;
+ data_reg = PR_TS_TX_P2_DATA_C;
+ break;
+ default:
+ dev_err(hellcreek->dev, "Wrong port for timestamping!\n");
+ return 0;
+ }
+
+ ts_status = hellcreek_ptp_hwtstamp_available(hellcreek, status_reg);
+
+ /* Not available yet? */
+ if (ts_status == 0) {
+ /* Check whether the operation of reading the tx timestamp has
+ * exceeded its allowed period
+ */
+ if (time_is_before_jiffies(ps->tx_tstamp_start +
+ TX_TSTAMP_TIMEOUT)) {
+ dev_err(hellcreek->dev,
+ "Timeout while waiting for Tx timestamp!\n");
+ goto free_and_clear_skb;
+ }
+
+ /* The timestamp should be available quickly, while getting it
+ * in high priority. Restart the work
+ */
+ return 1;
+ }
+
+ mutex_lock(&hellcreek->ptp_lock);
+ ns = hellcreek_ptp_hwtstamp_read(hellcreek, data_reg);
+ ns += hellcreek_ptp_gettime_seconds(hellcreek, ns);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ /* Now we have the timestamp in nanoseconds, store it in the correct
+ * structure in order to send it to the user
+ */
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+
+ tmp_skb = ps->tx_skb;
+ ps->tx_skb = NULL;
+
+ /* skb_complete_tx_timestamp() frees up the client to make another
+ * timestampable transmit. We have to be ready for it by clearing the
+ * ps->tx_skb "flag" beforehand
+ */
+ clear_bit_unlock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
+
+ /* Deliver a clone of the original outgoing tx_skb with tx hwtstamp */
+ skb_complete_tx_timestamp(tmp_skb, &shhwtstamps);
+
+ return 0;
+
+free_and_clear_skb:
+ dev_kfree_skb_any(ps->tx_skb);
+ ps->tx_skb = NULL;
+ clear_bit_unlock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
+
+ return 0;
+}
+
+static void hellcreek_get_rxts(struct hellcreek *hellcreek,
+ struct hellcreek_port_hwtstamp *ps,
+ struct sk_buff *skb, struct sk_buff_head *rxq,
+ int port)
+{
+ struct skb_shared_hwtstamps *shwt;
+ struct sk_buff_head received;
+ unsigned long flags;
+
+ /* The latched timestamp belongs to one of the received frames. */
+ __skb_queue_head_init(&received);
+
+ /* Lock & disable interrupts */
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ /* Add the reception queue "rxq" to the "received" queue an reintialize
+ * "rxq". From now on, we deal with "received" not with "rxq"
+ */
+ skb_queue_splice_tail_init(rxq, &received);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ for (; skb; skb = __skb_dequeue(&received)) {
+ struct ptp_header *hdr;
+ unsigned int type;
+ u64 ns;
+
+ /* Get nanoseconds from ptp packet */
+ type = SKB_PTP_TYPE(skb);
+ hdr = ptp_parse_header(skb, type);
+ ns = hellcreek_get_reserved_field(hdr);
+ hellcreek_clear_reserved_field(hdr);
+
+ /* Add seconds part */
+ mutex_lock(&hellcreek->ptp_lock);
+ ns += hellcreek_ptp_gettime_seconds(hellcreek, ns);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ /* Save time stamp */
+ shwt = skb_hwtstamps(skb);
+ memset(shwt, 0, sizeof(*shwt));
+ shwt->hwtstamp = ns_to_ktime(ns);
+ netif_rx(skb);
+ }
+}
+
+static void hellcreek_rxtstamp_work(struct hellcreek *hellcreek,
+ struct hellcreek_port_hwtstamp *ps,
+ int port)
+{
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&ps->rx_queue);
+ if (skb)
+ hellcreek_get_rxts(hellcreek, ps, skb, &ps->rx_queue, port);
+}
+
+long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ struct dsa_switch *ds = hellcreek->ds;
+ int i, restart = 0;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ struct hellcreek_port_hwtstamp *ps;
+
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ ps = &hellcreek->ports[i].port_hwtstamp;
+
+ if (test_bit(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state))
+ restart |= hellcreek_txtstamp_work(hellcreek, ps, i);
+
+ hellcreek_rxtstamp_work(hellcreek, ps, i);
+ }
+
+ return restart ? 1 : -1;
+}
+
+void hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct ptp_header *hdr;
+ struct sk_buff *clone;
+ unsigned int type;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return;
+
+ /* Make sure the message is a PTP message that needs to be timestamped
+ * and the interaction with the HW timestamping is enabled. If not, stop
+ * here
+ */
+ hdr = hellcreek_should_tstamp(hellcreek, port, skb, type);
+ if (!hdr)
+ return;
+
+ clone = skb_clone_sk(skb);
+ if (!clone)
+ return;
+
+ if (test_and_set_bit_lock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS,
+ &ps->state)) {
+ kfree_skb(clone);
+ return;
+ }
+
+ ps->tx_skb = clone;
+
+ /* store the number of ticks occurred since system start-up till this
+ * moment
+ */
+ ps->tx_tstamp_start = jiffies;
+
+ ptp_schedule_worker(hellcreek->ptp_clock, 0);
+}
+
+bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct ptp_header *hdr;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+
+ /* This check only fails if the user did not initialize hardware
+ * timestamping beforehand.
+ */
+ if (ps->tstamp_config.rx_filter != HWTSTAMP_FILTER_PTP_V2_EVENT)
+ return false;
+
+ /* Make sure the message is a PTP message that needs to be timestamped
+ * and the interaction with the HW timestamping is enabled. If not, stop
+ * here
+ */
+ hdr = hellcreek_should_tstamp(hellcreek, port, skb, type);
+ if (!hdr)
+ return false;
+
+ SKB_PTP_TYPE(skb) = type;
+
+ skb_queue_tail(&ps->rx_queue, skb);
+
+ ptp_schedule_worker(hellcreek->ptp_clock, 0);
+
+ return true;
+}
+
+static void hellcreek_hwtstamp_port_setup(struct hellcreek *hellcreek, int port)
+{
+ struct hellcreek_port_hwtstamp *ps =
+ &hellcreek->ports[port].port_hwtstamp;
+
+ skb_queue_head_init(&ps->rx_queue);
+}
+
+int hellcreek_hwtstamp_setup(struct hellcreek *hellcreek)
+{
+ struct dsa_switch *ds = hellcreek->ds;
+ int i;
+
+ /* Initialize timestamping ports. */
+ for (i = 0; i < ds->num_ports; ++i) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ hellcreek_hwtstamp_port_setup(hellcreek, i);
+ }
+
+ /* Select the synchronized clock as the source timekeeper for the
+ * timestamps and enable inline timestamping.
+ */
+ hellcreek_ptp_write(hellcreek, PR_SETTINGS_C_TS_SRC_TK_MASK |
+ PR_SETTINGS_C_RES3TS,
+ PR_SETTINGS_C);
+
+ return 0;
+}
+
+void hellcreek_hwtstamp_free(struct hellcreek *hellcreek)
+{
+ /* Nothing todo */
+}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
new file mode 100644
index 000000000..71af77efb
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kurt Kanzenbach <kurt@linutronix.de>
+ * Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ */
+
+#ifndef _HELLCREEK_HWTSTAMP_H_
+#define _HELLCREEK_HWTSTAMP_H_
+
+#include <net/dsa.h>
+#include "hellcreek.h"
+
+/* Timestamp Register */
+#define PR_TS_RX_P1_STATUS_C (0x1d * 2)
+#define PR_TS_RX_P1_DATA_C (0x1e * 2)
+#define PR_TS_TX_P1_STATUS_C (0x1f * 2)
+#define PR_TS_TX_P1_DATA_C (0x20 * 2)
+#define PR_TS_RX_P2_STATUS_C (0x25 * 2)
+#define PR_TS_RX_P2_DATA_C (0x26 * 2)
+#define PR_TS_TX_P2_STATUS_C (0x27 * 2)
+#define PR_TS_TX_P2_DATA_C (0x28 * 2)
+
+#define PR_TS_STATUS_TS_AVAIL BIT(2)
+#define PR_TS_STATUS_TS_LOST BIT(3)
+
+#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
+
+/* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX
+ * timestamp. When working properly, hardware will produce a timestamp
+ * within 1ms. Software may enounter delays, so the timeout is set
+ * accordingly.
+ */
+#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
+
+int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+
+bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type);
+void hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb);
+
+int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info);
+
+long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp);
+
+int hellcreek_hwtstamp_setup(struct hellcreek *chip);
+void hellcreek_hwtstamp_free(struct hellcreek *chip);
+
+#endif /* _HELLCREEK_HWTSTAMP_H_ */
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
new file mode 100644
index 000000000..b28baab6d
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ * Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#include <linux/ptp_clock_kernel.h>
+#include "hellcreek.h"
+#include "hellcreek_ptp.h"
+#include "hellcreek_hwtstamp.h"
+
+u16 hellcreek_ptp_read(struct hellcreek *hellcreek, unsigned int offset)
+{
+ return readw(hellcreek->ptp_base + offset);
+}
+
+void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data,
+ unsigned int offset)
+{
+ writew(data, hellcreek->ptp_base + offset);
+}
+
+/* Get nanoseconds from PTP clock */
+static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek)
+{
+ u16 nsl, nsh;
+
+ /* Take a snapshot */
+ hellcreek_ptp_write(hellcreek, PR_COMMAND_C_SS, PR_COMMAND_C);
+
+ /* The time of the day is saved as 96 bits. However, due to hardware
+ * limitations the seconds are not or only partly kept in the PTP
+ * core. Currently only three bits for the seconds are available. That's
+ * why only the nanoseconds are used and the seconds are tracked in
+ * software. Anyway due to internal locking all five registers should be
+ * read.
+ */
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsl = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+
+ return (u64)nsl | ((u64)nsh << 16);
+}
+
+static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek)
+{
+ u64 ns;
+
+ ns = hellcreek_ptp_clock_read(hellcreek);
+ if (ns < hellcreek->last_ts)
+ hellcreek->seconds++;
+ hellcreek->last_ts = ns;
+ ns += hellcreek->seconds * NSEC_PER_SEC;
+
+ return ns;
+}
+
+/* Retrieve the seconds parts in nanoseconds for a packet timestamped with @ns.
+ * There has to be a check whether an overflow occurred between the packet
+ * arrival and now. If so use the correct seconds (-1) for calculating the
+ * packet arrival time.
+ */
+u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns)
+{
+ u64 s;
+
+ __hellcreek_ptp_gettime(hellcreek);
+ if (hellcreek->last_ts > ns)
+ s = hellcreek->seconds * NSEC_PER_SEC;
+ else
+ s = (hellcreek->seconds - 1) * NSEC_PER_SEC;
+
+ return s;
+}
+
+static int hellcreek_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u64 ns;
+
+ mutex_lock(&hellcreek->ptp_lock);
+ ns = __hellcreek_ptp_gettime(hellcreek);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int hellcreek_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u16 secl, nsh, nsl;
+
+ secl = ts->tv_sec & 0xffff;
+ nsh = ((u32)ts->tv_nsec & 0xffff0000) >> 16;
+ nsl = ts->tv_nsec & 0xffff;
+
+ mutex_lock(&hellcreek->ptp_lock);
+
+ /* Update overflow data structure */
+ hellcreek->seconds = ts->tv_sec;
+ hellcreek->last_ts = ts->tv_nsec;
+
+ /* Set time in clock */
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, secl, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, nsh, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, nsl, PR_CLOCK_WRITE_C);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ return 0;
+}
+
+static int hellcreek_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u16 negative = 0, addendh, addendl;
+ u32 addend;
+ u64 adj;
+
+ if (scaled_ppm < 0) {
+ negative = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* IP-Core adjusts the nominal frequency by adding or subtracting 1 ns
+ * from the 8 ns (period of the oscillator) every time the accumulator
+ * register overflows. The value stored in the addend register is added
+ * to the accumulator register every 8 ns.
+ *
+ * addend value = (2^30 * accumulator_overflow_rate) /
+ * oscillator_frequency
+ * where:
+ *
+ * oscillator_frequency = 125 MHz
+ * accumulator_overflow_rate = 125 MHz * scaled_ppm * 2^-16 * 10^-6 * 8
+ */
+ adj = scaled_ppm;
+ adj <<= 11;
+ addend = (u32)div_u64(adj, 15625);
+
+ addendh = (addend & 0xffff0000) >> 16;
+ addendl = addend & 0xffff;
+
+ negative = (negative << 15) & 0x8000;
+
+ mutex_lock(&hellcreek->ptp_lock);
+
+ /* Set drift register */
+ hellcreek_ptp_write(hellcreek, negative, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, addendh, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, addendl, PR_CLOCK_DRIFT_C);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ return 0;
+}
+
+static int hellcreek_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u16 negative = 0, counth, countl;
+ u32 count_val;
+
+ /* If the offset is larger than IP-Core slow offset resources. Don't
+ * consider slow adjustment. Rather, add the offset directly to the
+ * current time
+ */
+ if (abs(delta) > MAX_SLOW_OFFSET_ADJ) {
+ struct timespec64 now, then = ns_to_timespec64(delta);
+
+ hellcreek_ptp_gettime(ptp, &now);
+ now = timespec64_add(now, then);
+ hellcreek_ptp_settime(ptp, &now);
+
+ return 0;
+ }
+
+ if (delta < 0) {
+ negative = 1;
+ delta = -delta;
+ }
+
+ /* 'count_val' does not exceed the maximum register size (2^30) */
+ count_val = div_s64(delta, MAX_NS_PER_STEP);
+
+ counth = (count_val & 0xffff0000) >> 16;
+ countl = count_val & 0xffff;
+
+ negative = (negative << 15) & 0x8000;
+
+ mutex_lock(&hellcreek->ptp_lock);
+
+ /* Set offset write register */
+ hellcreek_ptp_write(hellcreek, negative, PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, MAX_NS_PER_STEP, PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, MIN_CLK_CYCLES_BETWEEN_STEPS,
+ PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, countl, PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, counth, PR_CLOCK_OFFSET_C);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ return 0;
+}
+
+static int hellcreek_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static void hellcreek_ptp_overflow_check(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct hellcreek *hellcreek;
+
+ hellcreek = dw_overflow_to_hellcreek(dw);
+
+ mutex_lock(&hellcreek->ptp_lock);
+ __hellcreek_ptp_gettime(hellcreek);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ schedule_delayed_work(&hellcreek->overflow_work,
+ HELLCREEK_OVERFLOW_PERIOD);
+}
+
+static enum led_brightness hellcreek_get_brightness(struct hellcreek *hellcreek,
+ int led)
+{
+ return (hellcreek->status_out & led) ? 1 : 0;
+}
+
+static void hellcreek_set_brightness(struct hellcreek *hellcreek, int led,
+ enum led_brightness b)
+{
+ mutex_lock(&hellcreek->ptp_lock);
+
+ if (b)
+ hellcreek->status_out |= led;
+ else
+ hellcreek->status_out &= ~led;
+
+ hellcreek_ptp_write(hellcreek, hellcreek->status_out, STATUS_OUT);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+}
+
+static void hellcreek_led_sync_good_set(struct led_classdev *ldev,
+ enum led_brightness b)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_sync_good);
+
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_SYNC_GOOD, b);
+}
+
+static enum led_brightness hellcreek_led_sync_good_get(struct led_classdev *ldev)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_sync_good);
+
+ return hellcreek_get_brightness(hellcreek, STATUS_OUT_SYNC_GOOD);
+}
+
+static void hellcreek_led_is_gm_set(struct led_classdev *ldev,
+ enum led_brightness b)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_is_gm);
+
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, b);
+}
+
+static enum led_brightness hellcreek_led_is_gm_get(struct led_classdev *ldev)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_is_gm);
+
+ return hellcreek_get_brightness(hellcreek, STATUS_OUT_IS_GM);
+}
+
+/* There two available LEDs internally called sync_good and is_gm. However, the
+ * user might want to use a different label and specify the default state. Take
+ * those properties from device tree.
+ */
+static int hellcreek_led_setup(struct hellcreek *hellcreek)
+{
+ struct device_node *leds, *led = NULL;
+ const char *label, *state;
+ int ret = -EINVAL;
+
+ of_node_get(hellcreek->dev->of_node);
+ leds = of_find_node_by_name(hellcreek->dev->of_node, "leds");
+ if (!leds) {
+ dev_err(hellcreek->dev, "No LEDs specified in device tree!\n");
+ return ret;
+ }
+
+ hellcreek->status_out = 0;
+
+ led = of_get_next_available_child(leds, led);
+ if (!led) {
+ dev_err(hellcreek->dev, "First LED not specified!\n");
+ goto out;
+ }
+
+ ret = of_property_read_string(led, "label", &label);
+ hellcreek->led_sync_good.name = ret ? "sync_good" : label;
+
+ ret = of_property_read_string(led, "default-state", &state);
+ if (!ret) {
+ if (!strcmp(state, "on"))
+ hellcreek->led_sync_good.brightness = 1;
+ else if (!strcmp(state, "off"))
+ hellcreek->led_sync_good.brightness = 0;
+ else if (!strcmp(state, "keep"))
+ hellcreek->led_sync_good.brightness =
+ hellcreek_get_brightness(hellcreek,
+ STATUS_OUT_SYNC_GOOD);
+ }
+
+ hellcreek->led_sync_good.max_brightness = 1;
+ hellcreek->led_sync_good.brightness_set = hellcreek_led_sync_good_set;
+ hellcreek->led_sync_good.brightness_get = hellcreek_led_sync_good_get;
+
+ led = of_get_next_available_child(leds, led);
+ if (!led) {
+ dev_err(hellcreek->dev, "Second LED not specified!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_property_read_string(led, "label", &label);
+ hellcreek->led_is_gm.name = ret ? "is_gm" : label;
+
+ ret = of_property_read_string(led, "default-state", &state);
+ if (!ret) {
+ if (!strcmp(state, "on"))
+ hellcreek->led_is_gm.brightness = 1;
+ else if (!strcmp(state, "off"))
+ hellcreek->led_is_gm.brightness = 0;
+ else if (!strcmp(state, "keep"))
+ hellcreek->led_is_gm.brightness =
+ hellcreek_get_brightness(hellcreek,
+ STATUS_OUT_IS_GM);
+ }
+
+ hellcreek->led_is_gm.max_brightness = 1;
+ hellcreek->led_is_gm.brightness_set = hellcreek_led_is_gm_set;
+ hellcreek->led_is_gm.brightness_get = hellcreek_led_is_gm_get;
+
+ /* Set initial state */
+ if (hellcreek->led_sync_good.brightness == 1)
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_SYNC_GOOD, 1);
+ if (hellcreek->led_is_gm.brightness == 1)
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, 1);
+
+ /* Register both leds */
+ led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
+ led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
+
+ ret = 0;
+
+out:
+ of_node_put(leds);
+
+ return ret;
+}
+
+int hellcreek_ptp_setup(struct hellcreek *hellcreek)
+{
+ u16 status;
+ int ret;
+
+ /* Set up the overflow work */
+ INIT_DELAYED_WORK(&hellcreek->overflow_work,
+ hellcreek_ptp_overflow_check);
+
+ /* Setup PTP clock */
+ hellcreek->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(hellcreek->ptp_clock_info.name,
+ sizeof(hellcreek->ptp_clock_info.name),
+ dev_name(hellcreek->dev));
+
+ /* IP-Core can add up to 0.5 ns per 8 ns cycle, which means
+ * accumulator_overflow_rate shall not exceed 62.5 MHz (which adjusts
+ * the nominal frequency by 6.25%)
+ */
+ hellcreek->ptp_clock_info.max_adj = 62500000;
+ hellcreek->ptp_clock_info.n_alarm = 0;
+ hellcreek->ptp_clock_info.n_pins = 0;
+ hellcreek->ptp_clock_info.n_ext_ts = 0;
+ hellcreek->ptp_clock_info.n_per_out = 0;
+ hellcreek->ptp_clock_info.pps = 0;
+ hellcreek->ptp_clock_info.adjfine = hellcreek_ptp_adjfine;
+ hellcreek->ptp_clock_info.adjtime = hellcreek_ptp_adjtime;
+ hellcreek->ptp_clock_info.gettime64 = hellcreek_ptp_gettime;
+ hellcreek->ptp_clock_info.settime64 = hellcreek_ptp_settime;
+ hellcreek->ptp_clock_info.enable = hellcreek_ptp_enable;
+ hellcreek->ptp_clock_info.do_aux_work = hellcreek_hwtstamp_work;
+
+ hellcreek->ptp_clock = ptp_clock_register(&hellcreek->ptp_clock_info,
+ hellcreek->dev);
+ if (IS_ERR(hellcreek->ptp_clock))
+ return PTR_ERR(hellcreek->ptp_clock);
+
+ /* Enable the offset correction process, if no offset correction is
+ * already taking place
+ */
+ status = hellcreek_ptp_read(hellcreek, PR_CLOCK_STATUS_C);
+ if (!(status & PR_CLOCK_STATUS_C_OFS_ACT))
+ hellcreek_ptp_write(hellcreek,
+ status | PR_CLOCK_STATUS_C_ENA_OFS,
+ PR_CLOCK_STATUS_C);
+
+ /* Enable the drift correction process */
+ hellcreek_ptp_write(hellcreek, status | PR_CLOCK_STATUS_C_ENA_DRIFT,
+ PR_CLOCK_STATUS_C);
+
+ /* LED setup */
+ ret = hellcreek_led_setup(hellcreek);
+ if (ret) {
+ if (hellcreek->ptp_clock)
+ ptp_clock_unregister(hellcreek->ptp_clock);
+ return ret;
+ }
+
+ schedule_delayed_work(&hellcreek->overflow_work,
+ HELLCREEK_OVERFLOW_PERIOD);
+
+ return 0;
+}
+
+void hellcreek_ptp_free(struct hellcreek *hellcreek)
+{
+ led_classdev_unregister(&hellcreek->led_is_gm);
+ led_classdev_unregister(&hellcreek->led_sync_good);
+ cancel_delayed_work_sync(&hellcreek->overflow_work);
+ if (hellcreek->ptp_clock)
+ ptp_clock_unregister(hellcreek->ptp_clock);
+ hellcreek->ptp_clock = NULL;
+}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.h b/drivers/net/dsa/hirschmann/hellcreek_ptp.h
new file mode 100644
index 000000000..0b51392c7
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kurt Kanzenbach <kurt@linutronix.de>
+ * Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ */
+
+#ifndef _HELLCREEK_PTP_H_
+#define _HELLCREEK_PTP_H_
+
+#include <linux/bitops.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "hellcreek.h"
+
+/* Every jump in time is 7 ns */
+#define MAX_NS_PER_STEP 7L
+
+/* Correct offset at every clock cycle */
+#define MIN_CLK_CYCLES_BETWEEN_STEPS 0
+
+/* Maximum available slow offset resources */
+#define MAX_SLOW_OFFSET_ADJ \
+ ((unsigned long long)((1 << 30) - 1) * MAX_NS_PER_STEP)
+
+/* four times a second overflow check */
+#define HELLCREEK_OVERFLOW_PERIOD (HZ / 4)
+
+/* PTP Register */
+#define PR_SETTINGS_C (0x09 * 2)
+#define PR_SETTINGS_C_RES3TS BIT(4)
+#define PR_SETTINGS_C_TS_SRC_TK_SHIFT 8
+#define PR_SETTINGS_C_TS_SRC_TK_MASK GENMASK(9, 8)
+#define PR_COMMAND_C (0x0a * 2)
+#define PR_COMMAND_C_SS BIT(0)
+
+#define PR_CLOCK_STATUS_C (0x0c * 2)
+#define PR_CLOCK_STATUS_C_ENA_DRIFT BIT(12)
+#define PR_CLOCK_STATUS_C_OFS_ACT BIT(13)
+#define PR_CLOCK_STATUS_C_ENA_OFS BIT(14)
+
+#define PR_CLOCK_READ_C (0x0d * 2)
+#define PR_CLOCK_WRITE_C (0x0e * 2)
+#define PR_CLOCK_OFFSET_C (0x0f * 2)
+#define PR_CLOCK_DRIFT_C (0x10 * 2)
+
+#define PR_SS_FREE_DATA_C (0x12 * 2)
+#define PR_SS_SYNT_DATA_C (0x14 * 2)
+#define PR_SS_SYNC_DATA_C (0x16 * 2)
+#define PR_SS_DRAC_DATA_C (0x18 * 2)
+
+#define STATUS_OUT (0x60 * 2)
+#define STATUS_OUT_SYNC_GOOD BIT(0)
+#define STATUS_OUT_IS_GM BIT(1)
+
+int hellcreek_ptp_setup(struct hellcreek *hellcreek);
+void hellcreek_ptp_free(struct hellcreek *hellcreek);
+u16 hellcreek_ptp_read(struct hellcreek *hellcreek, unsigned int offset);
+void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data,
+ unsigned int offset);
+u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns);
+
+#define ptp_to_hellcreek(ptp) \
+ container_of(ptp, struct hellcreek, ptp_clock_info)
+
+#define dw_overflow_to_hellcreek(dw) \
+ container_of(dw, struct hellcreek, overflow_work)
+
+#define led_to_hellcreek(ldev, led) \
+ container_of(ldev, struct hellcreek, led)
+
+#endif /* _HELLCREEK_PTP_H_ */
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
new file mode 100644
index 000000000..a08e7cbb2
--- /dev/null
+++ b/drivers/net/dsa/lan9303-core.c
@@ -0,0 +1,1416 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 Pengutronix, Juergen Borleis <kernel@pengutronix.de>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regmap.h>
+#include <linux/mutex.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+
+#include "lan9303.h"
+
+#define LAN9303_NUM_PORTS 3
+
+/* 13.2 System Control and Status Registers
+ * Multiply register number by 4 to get address offset.
+ */
+#define LAN9303_CHIP_REV 0x14
+# define LAN9303_CHIP_ID 0x9303
+# define LAN9352_CHIP_ID 0x9352
+# define LAN9353_CHIP_ID 0x9353
+# define LAN9354_CHIP_ID 0x9354
+# define LAN9355_CHIP_ID 0x9355
+#define LAN9303_IRQ_CFG 0x15
+# define LAN9303_IRQ_CFG_IRQ_ENABLE BIT(8)
+# define LAN9303_IRQ_CFG_IRQ_POL BIT(4)
+# define LAN9303_IRQ_CFG_IRQ_TYPE BIT(0)
+#define LAN9303_INT_STS 0x16
+# define LAN9303_INT_STS_PHY_INT2 BIT(27)
+# define LAN9303_INT_STS_PHY_INT1 BIT(26)
+#define LAN9303_INT_EN 0x17
+# define LAN9303_INT_EN_PHY_INT2_EN BIT(27)
+# define LAN9303_INT_EN_PHY_INT1_EN BIT(26)
+#define LAN9303_BYTE_ORDER 0x19
+#define LAN9303_HW_CFG 0x1D
+# define LAN9303_HW_CFG_READY BIT(27)
+# define LAN9303_HW_CFG_AMDX_EN_PORT2 BIT(26)
+# define LAN9303_HW_CFG_AMDX_EN_PORT1 BIT(25)
+#define LAN9303_PMI_DATA 0x29
+#define LAN9303_PMI_ACCESS 0x2A
+# define LAN9303_PMI_ACCESS_PHY_ADDR(x) (((x) & 0x1f) << 11)
+# define LAN9303_PMI_ACCESS_MIIRINDA(x) (((x) & 0x1f) << 6)
+# define LAN9303_PMI_ACCESS_MII_BUSY BIT(0)
+# define LAN9303_PMI_ACCESS_MII_WRITE BIT(1)
+#define LAN9303_MANUAL_FC_1 0x68
+#define LAN9303_MANUAL_FC_2 0x69
+#define LAN9303_MANUAL_FC_0 0x6a
+#define LAN9303_SWITCH_CSR_DATA 0x6b
+#define LAN9303_SWITCH_CSR_CMD 0x6c
+#define LAN9303_SWITCH_CSR_CMD_BUSY BIT(31)
+#define LAN9303_SWITCH_CSR_CMD_RW BIT(30)
+#define LAN9303_SWITCH_CSR_CMD_LANES (BIT(19) | BIT(18) | BIT(17) | BIT(16))
+#define LAN9303_VIRT_PHY_BASE 0x70
+#define LAN9303_VIRT_SPECIAL_CTRL 0x77
+#define LAN9303_VIRT_SPECIAL_TURBO BIT(10) /*Turbo MII Enable*/
+
+/*13.4 Switch Fabric Control and Status Registers
+ * Accessed indirectly via SWITCH_CSR_CMD, SWITCH_CSR_DATA.
+ */
+#define LAN9303_SW_DEV_ID 0x0000
+#define LAN9303_SW_RESET 0x0001
+#define LAN9303_SW_RESET_RESET BIT(0)
+#define LAN9303_SW_IMR 0x0004
+#define LAN9303_SW_IPR 0x0005
+#define LAN9303_MAC_VER_ID_0 0x0400
+#define LAN9303_MAC_RX_CFG_0 0x0401
+# define LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES BIT(1)
+# define LAN9303_MAC_RX_CFG_X_RX_ENABLE BIT(0)
+#define LAN9303_MAC_RX_UNDSZE_CNT_0 0x0410
+#define LAN9303_MAC_RX_64_CNT_0 0x0411
+#define LAN9303_MAC_RX_127_CNT_0 0x0412
+#define LAN9303_MAC_RX_255_CNT_0 0x413
+#define LAN9303_MAC_RX_511_CNT_0 0x0414
+#define LAN9303_MAC_RX_1023_CNT_0 0x0415
+#define LAN9303_MAC_RX_MAX_CNT_0 0x0416
+#define LAN9303_MAC_RX_OVRSZE_CNT_0 0x0417
+#define LAN9303_MAC_RX_PKTOK_CNT_0 0x0418
+#define LAN9303_MAC_RX_CRCERR_CNT_0 0x0419
+#define LAN9303_MAC_RX_MULCST_CNT_0 0x041a
+#define LAN9303_MAC_RX_BRDCST_CNT_0 0x041b
+#define LAN9303_MAC_RX_PAUSE_CNT_0 0x041c
+#define LAN9303_MAC_RX_FRAG_CNT_0 0x041d
+#define LAN9303_MAC_RX_JABB_CNT_0 0x041e
+#define LAN9303_MAC_RX_ALIGN_CNT_0 0x041f
+#define LAN9303_MAC_RX_PKTLEN_CNT_0 0x0420
+#define LAN9303_MAC_RX_GOODPKTLEN_CNT_0 0x0421
+#define LAN9303_MAC_RX_SYMBL_CNT_0 0x0422
+#define LAN9303_MAC_RX_CTLFRM_CNT_0 0x0423
+
+#define LAN9303_MAC_TX_CFG_0 0x0440
+# define LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT (21 << 2)
+# define LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE BIT(1)
+# define LAN9303_MAC_TX_CFG_X_TX_ENABLE BIT(0)
+#define LAN9303_MAC_TX_DEFER_CNT_0 0x0451
+#define LAN9303_MAC_TX_PAUSE_CNT_0 0x0452
+#define LAN9303_MAC_TX_PKTOK_CNT_0 0x0453
+#define LAN9303_MAC_TX_64_CNT_0 0x0454
+#define LAN9303_MAC_TX_127_CNT_0 0x0455
+#define LAN9303_MAC_TX_255_CNT_0 0x0456
+#define LAN9303_MAC_TX_511_CNT_0 0x0457
+#define LAN9303_MAC_TX_1023_CNT_0 0x0458
+#define LAN9303_MAC_TX_MAX_CNT_0 0x0459
+#define LAN9303_MAC_TX_UNDSZE_CNT_0 0x045a
+#define LAN9303_MAC_TX_PKTLEN_CNT_0 0x045c
+#define LAN9303_MAC_TX_BRDCST_CNT_0 0x045d
+#define LAN9303_MAC_TX_MULCST_CNT_0 0x045e
+#define LAN9303_MAC_TX_LATECOL_0 0x045f
+#define LAN9303_MAC_TX_EXCOL_CNT_0 0x0460
+#define LAN9303_MAC_TX_SNGLECOL_CNT_0 0x0461
+#define LAN9303_MAC_TX_MULTICOL_CNT_0 0x0462
+#define LAN9303_MAC_TX_TOTALCOL_CNT_0 0x0463
+
+#define LAN9303_MAC_VER_ID_1 0x0800
+#define LAN9303_MAC_RX_CFG_1 0x0801
+#define LAN9303_MAC_TX_CFG_1 0x0840
+#define LAN9303_MAC_VER_ID_2 0x0c00
+#define LAN9303_MAC_RX_CFG_2 0x0c01
+#define LAN9303_MAC_TX_CFG_2 0x0c40
+#define LAN9303_SWE_ALR_CMD 0x1800
+# define LAN9303_ALR_CMD_MAKE_ENTRY BIT(2)
+# define LAN9303_ALR_CMD_GET_FIRST BIT(1)
+# define LAN9303_ALR_CMD_GET_NEXT BIT(0)
+#define LAN9303_SWE_ALR_WR_DAT_0 0x1801
+#define LAN9303_SWE_ALR_WR_DAT_1 0x1802
+# define LAN9303_ALR_DAT1_VALID BIT(26)
+# define LAN9303_ALR_DAT1_END_OF_TABL BIT(25)
+# define LAN9303_ALR_DAT1_AGE_OVERRID BIT(25)
+# define LAN9303_ALR_DAT1_STATIC BIT(24)
+# define LAN9303_ALR_DAT1_PORT_BITOFFS 16
+# define LAN9303_ALR_DAT1_PORT_MASK (7 << LAN9303_ALR_DAT1_PORT_BITOFFS)
+#define LAN9303_SWE_ALR_RD_DAT_0 0x1805
+#define LAN9303_SWE_ALR_RD_DAT_1 0x1806
+#define LAN9303_SWE_ALR_CMD_STS 0x1808
+# define ALR_STS_MAKE_PEND BIT(0)
+#define LAN9303_SWE_VLAN_CMD 0x180b
+# define LAN9303_SWE_VLAN_CMD_RNW BIT(5)
+# define LAN9303_SWE_VLAN_CMD_PVIDNVLAN BIT(4)
+#define LAN9303_SWE_VLAN_WR_DATA 0x180c
+#define LAN9303_SWE_VLAN_RD_DATA 0x180e
+# define LAN9303_SWE_VLAN_MEMBER_PORT2 BIT(17)
+# define LAN9303_SWE_VLAN_UNTAG_PORT2 BIT(16)
+# define LAN9303_SWE_VLAN_MEMBER_PORT1 BIT(15)
+# define LAN9303_SWE_VLAN_UNTAG_PORT1 BIT(14)
+# define LAN9303_SWE_VLAN_MEMBER_PORT0 BIT(13)
+# define LAN9303_SWE_VLAN_UNTAG_PORT0 BIT(12)
+#define LAN9303_SWE_VLAN_CMD_STS 0x1810
+#define LAN9303_SWE_GLB_INGRESS_CFG 0x1840
+# define LAN9303_SWE_GLB_INGR_IGMP_TRAP BIT(7)
+# define LAN9303_SWE_GLB_INGR_IGMP_PORT(p) BIT(10 + p)
+#define LAN9303_SWE_PORT_STATE 0x1843
+# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT2 (0)
+# define LAN9303_SWE_PORT_STATE_LEARNING_PORT2 BIT(5)
+# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT2 BIT(4)
+# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT1 (0)
+# define LAN9303_SWE_PORT_STATE_LEARNING_PORT1 BIT(3)
+# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT1 BIT(2)
+# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT0 (0)
+# define LAN9303_SWE_PORT_STATE_LEARNING_PORT0 BIT(1)
+# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT0 BIT(0)
+# define LAN9303_SWE_PORT_STATE_DISABLED_PORT0 (3)
+#define LAN9303_SWE_PORT_MIRROR 0x1846
+# define LAN9303_SWE_PORT_MIRROR_SNIFF_ALL BIT(8)
+# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT2 BIT(7)
+# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT1 BIT(6)
+# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT0 BIT(5)
+# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT2 BIT(4)
+# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT1 BIT(3)
+# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT0 BIT(2)
+# define LAN9303_SWE_PORT_MIRROR_ENABLE_RX_MIRRORING BIT(1)
+# define LAN9303_SWE_PORT_MIRROR_ENABLE_TX_MIRRORING BIT(0)
+# define LAN9303_SWE_PORT_MIRROR_DISABLED 0
+#define LAN9303_SWE_INGRESS_PORT_TYPE 0x1847
+#define LAN9303_SWE_INGRESS_PORT_TYPE_VLAN 3
+#define LAN9303_BM_CFG 0x1c00
+#define LAN9303_BM_EGRSS_PORT_TYPE 0x1c0c
+# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT2 (BIT(17) | BIT(16))
+# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT1 (BIT(9) | BIT(8))
+# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0 (BIT(1) | BIT(0))
+
+#define LAN9303_SWITCH_PORT_REG(port, reg0) (0x400 * (port) + (reg0))
+
+/* the built-in PHYs are of type LAN911X */
+#define MII_LAN911X_SPECIAL_MODES 0x12
+#define MII_LAN911X_SPECIAL_CONTROL_STATUS 0x1f
+
+static const struct regmap_range lan9303_valid_regs[] = {
+ regmap_reg_range(0x14, 0x17), /* misc, interrupt */
+ regmap_reg_range(0x19, 0x19), /* endian test */
+ regmap_reg_range(0x1d, 0x1d), /* hardware config */
+ regmap_reg_range(0x23, 0x24), /* general purpose timer */
+ regmap_reg_range(0x27, 0x27), /* counter */
+ regmap_reg_range(0x29, 0x2a), /* PMI index regs */
+ regmap_reg_range(0x68, 0x6a), /* flow control */
+ regmap_reg_range(0x6b, 0x6c), /* switch fabric indirect regs */
+ regmap_reg_range(0x6d, 0x6f), /* misc */
+ regmap_reg_range(0x70, 0x77), /* virtual phy */
+ regmap_reg_range(0x78, 0x7a), /* GPIO */
+ regmap_reg_range(0x7c, 0x7e), /* MAC & reset */
+ regmap_reg_range(0x80, 0xb7), /* switch fabric direct regs (wr only) */
+};
+
+static const struct regmap_range lan9303_reserved_ranges[] = {
+ regmap_reg_range(0x00, 0x13),
+ regmap_reg_range(0x18, 0x18),
+ regmap_reg_range(0x1a, 0x1c),
+ regmap_reg_range(0x1e, 0x22),
+ regmap_reg_range(0x25, 0x26),
+ regmap_reg_range(0x28, 0x28),
+ regmap_reg_range(0x2b, 0x67),
+ regmap_reg_range(0x7b, 0x7b),
+ regmap_reg_range(0x7f, 0x7f),
+ regmap_reg_range(0xb8, 0xff),
+};
+
+const struct regmap_access_table lan9303_register_set = {
+ .yes_ranges = lan9303_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(lan9303_valid_regs),
+ .no_ranges = lan9303_reserved_ranges,
+ .n_no_ranges = ARRAY_SIZE(lan9303_reserved_ranges),
+};
+EXPORT_SYMBOL(lan9303_register_set);
+
+static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
+{
+ int ret, i;
+
+ /* we can lose arbitration for the I2C case, because the device
+ * tries to detect and read an external EEPROM after reset and acts as
+ * a master on the shared I2C bus itself. This conflicts with our
+ * attempts to access the device as a slave at the same moment.
+ */
+ for (i = 0; i < 5; i++) {
+ ret = regmap_read(regmap, offset, reg);
+ if (!ret)
+ return 0;
+ if (ret != -EAGAIN)
+ break;
+ msleep(500);
+ }
+
+ return -EIO;
+}
+
+static int lan9303_read_wait(struct lan9303 *chip, int offset, u32 mask)
+{
+ int i;
+
+ for (i = 0; i < 25; i++) {
+ u32 reg;
+ int ret;
+
+ ret = lan9303_read(chip->regmap, offset, &reg);
+ if (ret) {
+ dev_err(chip->dev, "%s failed to read offset %d: %d\n",
+ __func__, offset, ret);
+ return ret;
+ }
+ if (!(reg & mask))
+ return 0;
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int lan9303_virt_phy_reg_read(struct lan9303 *chip, int regnum)
+{
+ int ret;
+ u32 val;
+
+ if (regnum > MII_EXPANSION)
+ return -EINVAL;
+
+ ret = lan9303_read(chip->regmap, LAN9303_VIRT_PHY_BASE + regnum, &val);
+ if (ret)
+ return ret;
+
+ return val & 0xffff;
+}
+
+static int lan9303_virt_phy_reg_write(struct lan9303 *chip, int regnum, u16 val)
+{
+ if (regnum > MII_EXPANSION)
+ return -EINVAL;
+
+ return regmap_write(chip->regmap, LAN9303_VIRT_PHY_BASE + regnum, val);
+}
+
+static int lan9303_indirect_phy_wait_for_completion(struct lan9303 *chip)
+{
+ return lan9303_read_wait(chip, LAN9303_PMI_ACCESS,
+ LAN9303_PMI_ACCESS_MII_BUSY);
+}
+
+static int lan9303_indirect_phy_read(struct lan9303 *chip, int addr, int regnum)
+{
+ int ret;
+ u32 val;
+
+ val = LAN9303_PMI_ACCESS_PHY_ADDR(addr);
+ val |= LAN9303_PMI_ACCESS_MIIRINDA(regnum);
+
+ mutex_lock(&chip->indirect_mutex);
+
+ ret = lan9303_indirect_phy_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ /* start the MII read cycle */
+ ret = regmap_write(chip->regmap, LAN9303_PMI_ACCESS, val);
+ if (ret)
+ goto on_error;
+
+ ret = lan9303_indirect_phy_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ /* read the result of this operation */
+ ret = lan9303_read(chip->regmap, LAN9303_PMI_DATA, &val);
+ if (ret)
+ goto on_error;
+
+ mutex_unlock(&chip->indirect_mutex);
+
+ return val & 0xffff;
+
+on_error:
+ mutex_unlock(&chip->indirect_mutex);
+ return ret;
+}
+
+static int lan9303_indirect_phy_write(struct lan9303 *chip, int addr,
+ int regnum, u16 val)
+{
+ int ret;
+ u32 reg;
+
+ reg = LAN9303_PMI_ACCESS_PHY_ADDR(addr);
+ reg |= LAN9303_PMI_ACCESS_MIIRINDA(regnum);
+ reg |= LAN9303_PMI_ACCESS_MII_WRITE;
+
+ mutex_lock(&chip->indirect_mutex);
+
+ ret = lan9303_indirect_phy_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ /* write the data first... */
+ ret = regmap_write(chip->regmap, LAN9303_PMI_DATA, val);
+ if (ret)
+ goto on_error;
+
+ /* ...then start the MII write cycle */
+ ret = regmap_write(chip->regmap, LAN9303_PMI_ACCESS, reg);
+
+on_error:
+ mutex_unlock(&chip->indirect_mutex);
+ return ret;
+}
+
+const struct lan9303_phy_ops lan9303_indirect_phy_ops = {
+ .phy_read = lan9303_indirect_phy_read,
+ .phy_write = lan9303_indirect_phy_write,
+};
+EXPORT_SYMBOL_GPL(lan9303_indirect_phy_ops);
+
+static int lan9303_switch_wait_for_completion(struct lan9303 *chip)
+{
+ return lan9303_read_wait(chip, LAN9303_SWITCH_CSR_CMD,
+ LAN9303_SWITCH_CSR_CMD_BUSY);
+}
+
+static int lan9303_write_switch_reg(struct lan9303 *chip, u16 regnum, u32 val)
+{
+ u32 reg;
+ int ret;
+
+ reg = regnum;
+ reg |= LAN9303_SWITCH_CSR_CMD_LANES;
+ reg |= LAN9303_SWITCH_CSR_CMD_BUSY;
+
+ mutex_lock(&chip->indirect_mutex);
+
+ ret = lan9303_switch_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_DATA, val);
+ if (ret) {
+ dev_err(chip->dev, "Failed to write csr data reg: %d\n", ret);
+ goto on_error;
+ }
+
+ /* trigger write */
+ ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_CMD, reg);
+ if (ret)
+ dev_err(chip->dev, "Failed to write csr command reg: %d\n",
+ ret);
+
+on_error:
+ mutex_unlock(&chip->indirect_mutex);
+ return ret;
+}
+
+static int lan9303_read_switch_reg(struct lan9303 *chip, u16 regnum, u32 *val)
+{
+ u32 reg;
+ int ret;
+
+ reg = regnum;
+ reg |= LAN9303_SWITCH_CSR_CMD_LANES;
+ reg |= LAN9303_SWITCH_CSR_CMD_RW;
+ reg |= LAN9303_SWITCH_CSR_CMD_BUSY;
+
+ mutex_lock(&chip->indirect_mutex);
+
+ ret = lan9303_switch_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ /* trigger read */
+ ret = regmap_write(chip->regmap, LAN9303_SWITCH_CSR_CMD, reg);
+ if (ret) {
+ dev_err(chip->dev, "Failed to write csr command reg: %d\n",
+ ret);
+ goto on_error;
+ }
+
+ ret = lan9303_switch_wait_for_completion(chip);
+ if (ret)
+ goto on_error;
+
+ ret = lan9303_read(chip->regmap, LAN9303_SWITCH_CSR_DATA, val);
+ if (ret)
+ dev_err(chip->dev, "Failed to read csr data reg: %d\n", ret);
+on_error:
+ mutex_unlock(&chip->indirect_mutex);
+ return ret;
+}
+
+static int lan9303_write_switch_reg_mask(struct lan9303 *chip, u16 regnum,
+ u32 val, u32 mask)
+{
+ int ret;
+ u32 reg;
+
+ ret = lan9303_read_switch_reg(chip, regnum, &reg);
+ if (ret)
+ return ret;
+
+ reg = (reg & ~mask) | val;
+
+ return lan9303_write_switch_reg(chip, regnum, reg);
+}
+
+static int lan9303_write_switch_port(struct lan9303 *chip, int port,
+ u16 regnum, u32 val)
+{
+ return lan9303_write_switch_reg(
+ chip, LAN9303_SWITCH_PORT_REG(port, regnum), val);
+}
+
+static int lan9303_read_switch_port(struct lan9303 *chip, int port,
+ u16 regnum, u32 *val)
+{
+ return lan9303_read_switch_reg(
+ chip, LAN9303_SWITCH_PORT_REG(port, regnum), val);
+}
+
+static int lan9303_detect_phy_setup(struct lan9303 *chip)
+{
+ int reg;
+
+ /* Calculate chip->phy_addr_base:
+ * Depending on the 'phy_addr_sel_strap' setting, the three phys are
+ * using IDs 0-1-2 or IDs 1-2-3. We cannot read back the
+ * 'phy_addr_sel_strap' setting directly, so we need a test, which
+ * configuration is active:
+ * Special reg 18 of phy 3 reads as 0x0000, if 'phy_addr_sel_strap' is 0
+ * and the IDs are 0-1-2, else it contains something different from
+ * 0x0000, which means 'phy_addr_sel_strap' is 1 and the IDs are 1-2-3.
+ * 0xffff is returned on MDIO read with no response.
+ */
+ reg = chip->ops->phy_read(chip, 3, MII_LAN911X_SPECIAL_MODES);
+ if (reg < 0) {
+ dev_err(chip->dev, "Failed to detect phy config: %d\n", reg);
+ return reg;
+ }
+
+ chip->phy_addr_base = reg != 0 && reg != 0xffff;
+
+ dev_dbg(chip->dev, "Phy setup '%s' detected\n",
+ chip->phy_addr_base ? "1-2-3" : "0-1-2");
+
+ return 0;
+}
+
+/* Map ALR-port bits to port bitmap, and back */
+static const int alrport_2_portmap[] = {1, 2, 4, 0, 3, 5, 6, 7 };
+static const int portmap_2_alrport[] = {3, 0, 1, 4, 2, 5, 6, 7 };
+
+/* Return pointer to first free ALR cache entry, return NULL if none */
+static struct lan9303_alr_cache_entry *
+lan9303_alr_cache_find_free(struct lan9303 *chip)
+{
+ int i;
+ struct lan9303_alr_cache_entry *entr = chip->alr_cache;
+
+ for (i = 0; i < LAN9303_NUM_ALR_RECORDS; i++, entr++)
+ if (entr->port_map == 0)
+ return entr;
+
+ return NULL;
+}
+
+/* Return pointer to ALR cache entry matching MAC address */
+static struct lan9303_alr_cache_entry *
+lan9303_alr_cache_find_mac(struct lan9303 *chip, const u8 *mac_addr)
+{
+ int i;
+ struct lan9303_alr_cache_entry *entr = chip->alr_cache;
+
+ BUILD_BUG_ON_MSG(sizeof(struct lan9303_alr_cache_entry) & 1,
+ "ether_addr_equal require u16 alignment");
+
+ for (i = 0; i < LAN9303_NUM_ALR_RECORDS; i++, entr++)
+ if (ether_addr_equal(entr->mac_addr, mac_addr))
+ return entr;
+
+ return NULL;
+}
+
+static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno, u32 mask)
+{
+ int i;
+
+ for (i = 0; i < 25; i++) {
+ u32 reg;
+
+ lan9303_read_switch_reg(chip, regno, &reg);
+ if (!(reg & mask))
+ return 0;
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
+{
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_0, dat0);
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_1, dat1);
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
+ LAN9303_ALR_CMD_MAKE_ENTRY);
+ lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND);
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
+
+ return 0;
+}
+
+typedef int alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
+ int portmap, void *ctx);
+
+static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
+{
+ int ret = 0, i;
+
+ mutex_lock(&chip->alr_mutex);
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
+ LAN9303_ALR_CMD_GET_FIRST);
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
+
+ for (i = 1; i < LAN9303_NUM_ALR_RECORDS; i++) {
+ u32 dat0, dat1;
+ int alrport, portmap;
+
+ lan9303_read_switch_reg(chip, LAN9303_SWE_ALR_RD_DAT_0, &dat0);
+ lan9303_read_switch_reg(chip, LAN9303_SWE_ALR_RD_DAT_1, &dat1);
+ if (dat1 & LAN9303_ALR_DAT1_END_OF_TABL)
+ break;
+
+ alrport = (dat1 & LAN9303_ALR_DAT1_PORT_MASK) >>
+ LAN9303_ALR_DAT1_PORT_BITOFFS;
+ portmap = alrport_2_portmap[alrport];
+
+ ret = cb(chip, dat0, dat1, portmap, ctx);
+ if (ret)
+ break;
+
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
+ LAN9303_ALR_CMD_GET_NEXT);
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
+ }
+ mutex_unlock(&chip->alr_mutex);
+
+ return ret;
+}
+
+static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
+{
+ mac[0] = (dat0 >> 0) & 0xff;
+ mac[1] = (dat0 >> 8) & 0xff;
+ mac[2] = (dat0 >> 16) & 0xff;
+ mac[3] = (dat0 >> 24) & 0xff;
+ mac[4] = (dat1 >> 0) & 0xff;
+ mac[5] = (dat1 >> 8) & 0xff;
+}
+
+struct del_port_learned_ctx {
+ int port;
+};
+
+/* Clear learned (non-static) entry on given port */
+static int alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
+ u32 dat1, int portmap, void *ctx)
+{
+ struct del_port_learned_ctx *del_ctx = ctx;
+ int port = del_ctx->port;
+
+ if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC))
+ return 0;
+
+ /* learned entries has only one port, we can just delete */
+ dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */
+ lan9303_alr_make_entry_raw(chip, dat0, dat1);
+
+ return 0;
+}
+
+struct port_fdb_dump_ctx {
+ int port;
+ void *data;
+ dsa_fdb_dump_cb_t *cb;
+};
+
+static int alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
+ u32 dat1, int portmap, void *ctx)
+{
+ struct port_fdb_dump_ctx *dump_ctx = ctx;
+ u8 mac[ETH_ALEN];
+ bool is_static;
+
+ if ((BIT(dump_ctx->port) & portmap) == 0)
+ return 0;
+
+ alr_reg_to_mac(dat0, dat1, mac);
+ is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC);
+ return dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
+}
+
+/* Set a static ALR entry. Delete entry if port_map is zero */
+static void lan9303_alr_set_entry(struct lan9303 *chip, const u8 *mac,
+ u8 port_map, bool stp_override)
+{
+ u32 dat0, dat1, alr_port;
+
+ dev_dbg(chip->dev, "%s(%pM, %d)\n", __func__, mac, port_map);
+ dat1 = LAN9303_ALR_DAT1_STATIC;
+ if (port_map)
+ dat1 |= LAN9303_ALR_DAT1_VALID;
+ /* otherwise no ports: delete entry */
+ if (stp_override)
+ dat1 |= LAN9303_ALR_DAT1_AGE_OVERRID;
+
+ alr_port = portmap_2_alrport[port_map & 7];
+ dat1 &= ~LAN9303_ALR_DAT1_PORT_MASK;
+ dat1 |= alr_port << LAN9303_ALR_DAT1_PORT_BITOFFS;
+
+ dat0 = 0;
+ dat0 |= (mac[0] << 0);
+ dat0 |= (mac[1] << 8);
+ dat0 |= (mac[2] << 16);
+ dat0 |= (mac[3] << 24);
+
+ dat1 |= (mac[4] << 0);
+ dat1 |= (mac[5] << 8);
+
+ lan9303_alr_make_entry_raw(chip, dat0, dat1);
+}
+
+/* Add port to static ALR entry, create new static entry if needed */
+static int lan9303_alr_add_port(struct lan9303 *chip, const u8 *mac, int port,
+ bool stp_override)
+{
+ struct lan9303_alr_cache_entry *entr;
+
+ mutex_lock(&chip->alr_mutex);
+ entr = lan9303_alr_cache_find_mac(chip, mac);
+ if (!entr) { /*New entry */
+ entr = lan9303_alr_cache_find_free(chip);
+ if (!entr) {
+ mutex_unlock(&chip->alr_mutex);
+ return -ENOSPC;
+ }
+ ether_addr_copy(entr->mac_addr, mac);
+ }
+ entr->port_map |= BIT(port);
+ entr->stp_override = stp_override;
+ lan9303_alr_set_entry(chip, mac, entr->port_map, stp_override);
+ mutex_unlock(&chip->alr_mutex);
+
+ return 0;
+}
+
+/* Delete static port from ALR entry, delete entry if last port */
+static int lan9303_alr_del_port(struct lan9303 *chip, const u8 *mac, int port)
+{
+ struct lan9303_alr_cache_entry *entr;
+
+ mutex_lock(&chip->alr_mutex);
+ entr = lan9303_alr_cache_find_mac(chip, mac);
+ if (!entr)
+ goto out; /* no static entry found */
+
+ entr->port_map &= ~BIT(port);
+ if (entr->port_map == 0) /* zero means its free again */
+ eth_zero_addr(entr->mac_addr);
+ lan9303_alr_set_entry(chip, mac, entr->port_map, entr->stp_override);
+
+out:
+ mutex_unlock(&chip->alr_mutex);
+ return 0;
+}
+
+static int lan9303_disable_processing_port(struct lan9303 *chip,
+ unsigned int port)
+{
+ int ret;
+
+ /* disable RX, but keep register reset default values else */
+ ret = lan9303_write_switch_port(chip, port, LAN9303_MAC_RX_CFG_0,
+ LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES);
+ if (ret)
+ return ret;
+
+ /* disable TX, but keep register reset default values else */
+ return lan9303_write_switch_port(chip, port, LAN9303_MAC_TX_CFG_0,
+ LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT |
+ LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE);
+}
+
+static int lan9303_enable_processing_port(struct lan9303 *chip,
+ unsigned int port)
+{
+ int ret;
+
+ /* enable RX and keep register reset default values else */
+ ret = lan9303_write_switch_port(chip, port, LAN9303_MAC_RX_CFG_0,
+ LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES |
+ LAN9303_MAC_RX_CFG_X_RX_ENABLE);
+ if (ret)
+ return ret;
+
+ /* enable TX and keep register reset default values else */
+ return lan9303_write_switch_port(chip, port, LAN9303_MAC_TX_CFG_0,
+ LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT |
+ LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE |
+ LAN9303_MAC_TX_CFG_X_TX_ENABLE);
+}
+
+/* forward special tagged packets from port 0 to port 1 *or* port 2 */
+static int lan9303_setup_tagging(struct lan9303 *chip)
+{
+ int ret;
+ u32 val;
+ /* enable defining the destination port via special VLAN tagging
+ * for port 0
+ */
+ ret = lan9303_write_switch_reg(chip, LAN9303_SWE_INGRESS_PORT_TYPE,
+ LAN9303_SWE_INGRESS_PORT_TYPE_VLAN);
+ if (ret)
+ return ret;
+
+ /* tag incoming packets at port 1 and 2 on their way to port 0 to be
+ * able to discover their source port
+ */
+ val = LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0;
+ return lan9303_write_switch_reg(chip, LAN9303_BM_EGRSS_PORT_TYPE, val);
+}
+
+/* We want a special working switch:
+ * - do not forward packets between port 1 and 2
+ * - forward everything from port 1 to port 0
+ * - forward everything from port 2 to port 0
+ */
+static int lan9303_separate_ports(struct lan9303 *chip)
+{
+ int ret;
+
+ lan9303_alr_del_port(chip, eth_stp_addr, 0);
+ ret = lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_MIRROR,
+ LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT0 |
+ LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT1 |
+ LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT2 |
+ LAN9303_SWE_PORT_MIRROR_ENABLE_RX_MIRRORING |
+ LAN9303_SWE_PORT_MIRROR_SNIFF_ALL);
+ if (ret)
+ return ret;
+
+ /* prevent port 1 and 2 from forwarding packets by their own */
+ return lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_STATE,
+ LAN9303_SWE_PORT_STATE_FORWARDING_PORT0 |
+ LAN9303_SWE_PORT_STATE_BLOCKING_PORT1 |
+ LAN9303_SWE_PORT_STATE_BLOCKING_PORT2);
+}
+
+static void lan9303_bridge_ports(struct lan9303 *chip)
+{
+ /* ports bridged: remove mirroring */
+ lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_MIRROR,
+ LAN9303_SWE_PORT_MIRROR_DISABLED);
+
+ lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_STATE,
+ chip->swe_port_state);
+ lan9303_alr_add_port(chip, eth_stp_addr, 0, true);
+}
+
+static void lan9303_handle_reset(struct lan9303 *chip)
+{
+ if (!chip->reset_gpio)
+ return;
+
+ if (chip->reset_duration != 0)
+ msleep(chip->reset_duration);
+
+ /* release (deassert) reset and activate the device */
+ gpiod_set_value_cansleep(chip->reset_gpio, 0);
+}
+
+/* stop processing packets for all ports */
+static int lan9303_disable_processing(struct lan9303 *chip)
+{
+ int p;
+
+ for (p = 1; p < LAN9303_NUM_PORTS; p++) {
+ int ret = lan9303_disable_processing_port(chip, p);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan9303_check_device(struct lan9303 *chip)
+{
+ int ret;
+ u32 reg;
+
+ ret = lan9303_read(chip->regmap, LAN9303_CHIP_REV, &reg);
+ if (ret) {
+ dev_err(chip->dev, "failed to read chip revision register: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (((reg >> 16) != LAN9303_CHIP_ID) &&
+ ((reg >> 16) != LAN9354_CHIP_ID)) {
+ dev_err(chip->dev, "unexpected device found: LAN%4.4X\n",
+ reg >> 16);
+ return -ENODEV;
+ }
+
+ /* The default state of the LAN9303 device is to forward packets between
+ * all ports (if not configured differently by an external EEPROM).
+ * The initial state of a DSA device must be forwarding packets only
+ * between the external and the internal ports and no forwarding
+ * between the external ports. In preparation we stop packet handling
+ * at all for now until the LAN9303 device is re-programmed accordingly.
+ */
+ ret = lan9303_disable_processing(chip);
+ if (ret)
+ dev_warn(chip->dev, "failed to disable switching %d\n", ret);
+
+ dev_info(chip->dev, "Found LAN%4.4X rev. %u\n", (reg >> 16), reg & 0xffff);
+
+ ret = lan9303_detect_phy_setup(chip);
+ if (ret) {
+ dev_err(chip->dev,
+ "failed to discover phy bootstrap setup: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* ---------------------------- DSA -----------------------------------*/
+
+static enum dsa_tag_protocol lan9303_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_LAN9303;
+}
+
+static int lan9303_setup(struct dsa_switch *ds)
+{
+ struct lan9303 *chip = ds->priv;
+ int ret;
+
+ /* Make sure that port 0 is the cpu port */
+ if (!dsa_is_cpu_port(ds, 0)) {
+ dev_err(chip->dev, "port 0 is not the CPU port\n");
+ return -EINVAL;
+ }
+
+ ret = lan9303_setup_tagging(chip);
+ if (ret)
+ dev_err(chip->dev, "failed to setup port tagging %d\n", ret);
+
+ ret = lan9303_separate_ports(chip);
+ if (ret)
+ dev_err(chip->dev, "failed to separate ports %d\n", ret);
+
+ ret = lan9303_enable_processing_port(chip, 0);
+ if (ret)
+ dev_err(chip->dev, "failed to re-enable switching %d\n", ret);
+
+ /* Trap IGMP to port 0 */
+ ret = lan9303_write_switch_reg_mask(chip, LAN9303_SWE_GLB_INGRESS_CFG,
+ LAN9303_SWE_GLB_INGR_IGMP_TRAP |
+ LAN9303_SWE_GLB_INGR_IGMP_PORT(0),
+ LAN9303_SWE_GLB_INGR_IGMP_PORT(1) |
+ LAN9303_SWE_GLB_INGR_IGMP_PORT(2));
+ if (ret)
+ dev_err(chip->dev, "failed to setup IGMP trap %d\n", ret);
+
+ return 0;
+}
+
+struct lan9303_mib_desc {
+ unsigned int offset; /* offset of first MAC */
+ const char *name;
+};
+
+static const struct lan9303_mib_desc lan9303_mib[] = {
+ { .offset = LAN9303_MAC_RX_BRDCST_CNT_0, .name = "RxBroad", },
+ { .offset = LAN9303_MAC_RX_PAUSE_CNT_0, .name = "RxPause", },
+ { .offset = LAN9303_MAC_RX_MULCST_CNT_0, .name = "RxMulti", },
+ { .offset = LAN9303_MAC_RX_PKTOK_CNT_0, .name = "RxOk", },
+ { .offset = LAN9303_MAC_RX_CRCERR_CNT_0, .name = "RxCrcErr", },
+ { .offset = LAN9303_MAC_RX_ALIGN_CNT_0, .name = "RxAlignErr", },
+ { .offset = LAN9303_MAC_RX_JABB_CNT_0, .name = "RxJabber", },
+ { .offset = LAN9303_MAC_RX_FRAG_CNT_0, .name = "RxFragment", },
+ { .offset = LAN9303_MAC_RX_64_CNT_0, .name = "Rx64Byte", },
+ { .offset = LAN9303_MAC_RX_127_CNT_0, .name = "Rx128Byte", },
+ { .offset = LAN9303_MAC_RX_255_CNT_0, .name = "Rx256Byte", },
+ { .offset = LAN9303_MAC_RX_511_CNT_0, .name = "Rx512Byte", },
+ { .offset = LAN9303_MAC_RX_1023_CNT_0, .name = "Rx1024Byte", },
+ { .offset = LAN9303_MAC_RX_MAX_CNT_0, .name = "RxMaxByte", },
+ { .offset = LAN9303_MAC_RX_PKTLEN_CNT_0, .name = "RxByteCnt", },
+ { .offset = LAN9303_MAC_RX_SYMBL_CNT_0, .name = "RxSymbolCnt", },
+ { .offset = LAN9303_MAC_RX_CTLFRM_CNT_0, .name = "RxCfs", },
+ { .offset = LAN9303_MAC_RX_OVRSZE_CNT_0, .name = "RxOverFlow", },
+ { .offset = LAN9303_MAC_TX_UNDSZE_CNT_0, .name = "TxShort", },
+ { .offset = LAN9303_MAC_TX_BRDCST_CNT_0, .name = "TxBroad", },
+ { .offset = LAN9303_MAC_TX_PAUSE_CNT_0, .name = "TxPause", },
+ { .offset = LAN9303_MAC_TX_MULCST_CNT_0, .name = "TxMulti", },
+ { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "RxShort", },
+ { .offset = LAN9303_MAC_TX_64_CNT_0, .name = "Tx64Byte", },
+ { .offset = LAN9303_MAC_TX_127_CNT_0, .name = "Tx128Byte", },
+ { .offset = LAN9303_MAC_TX_255_CNT_0, .name = "Tx256Byte", },
+ { .offset = LAN9303_MAC_TX_511_CNT_0, .name = "Tx512Byte", },
+ { .offset = LAN9303_MAC_TX_1023_CNT_0, .name = "Tx1024Byte", },
+ { .offset = LAN9303_MAC_TX_MAX_CNT_0, .name = "TxMaxByte", },
+ { .offset = LAN9303_MAC_TX_PKTLEN_CNT_0, .name = "TxByteCnt", },
+ { .offset = LAN9303_MAC_TX_PKTOK_CNT_0, .name = "TxOk", },
+ { .offset = LAN9303_MAC_TX_TOTALCOL_CNT_0, .name = "TxCollision", },
+ { .offset = LAN9303_MAC_TX_MULTICOL_CNT_0, .name = "TxMultiCol", },
+ { .offset = LAN9303_MAC_TX_SNGLECOL_CNT_0, .name = "TxSingleCol", },
+ { .offset = LAN9303_MAC_TX_EXCOL_CNT_0, .name = "TxExcCol", },
+ { .offset = LAN9303_MAC_TX_DEFER_CNT_0, .name = "TxDefer", },
+ { .offset = LAN9303_MAC_TX_LATECOL_0, .name = "TxLateCol", },
+};
+
+static void lan9303_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data)
+{
+ unsigned int u;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
+ strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct lan9303 *chip = ds->priv;
+ unsigned int u;
+
+ for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
+ u32 reg;
+ int ret;
+
+ ret = lan9303_read_switch_port(
+ chip, port, lan9303_mib[u].offset, &reg);
+
+ if (ret) {
+ dev_warn(chip->dev, "Reading status port %d reg %u failed\n",
+ port, lan9303_mib[u].offset);
+ reg = 0;
+ }
+ data[u] = reg;
+ }
+}
+
+static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(lan9303_mib);
+}
+
+static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ struct lan9303 *chip = ds->priv;
+ int phy_base = chip->phy_addr_base;
+
+ if (phy == phy_base)
+ return lan9303_virt_phy_reg_read(chip, regnum);
+ if (phy > phy_base + 2)
+ return -ENODEV;
+
+ return chip->ops->phy_read(chip, phy, regnum);
+}
+
+static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ struct lan9303 *chip = ds->priv;
+ int phy_base = chip->phy_addr_base;
+
+ if (phy == phy_base)
+ return lan9303_virt_phy_reg_write(chip, regnum, val);
+ if (phy > phy_base + 2)
+ return -ENODEV;
+
+ return chip->ops->phy_write(chip, phy, regnum, val);
+}
+
+static void lan9303_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct lan9303 *chip = ds->priv;
+ int ctl;
+
+ if (!phy_is_pseudo_fixed_link(phydev))
+ return;
+
+ ctl = lan9303_phy_read(ds, port, MII_BMCR);
+
+ ctl &= ~BMCR_ANENABLE;
+
+ if (phydev->speed == SPEED_100)
+ ctl |= BMCR_SPEED100;
+ else if (phydev->speed == SPEED_10)
+ ctl &= ~BMCR_SPEED100;
+ else
+ dev_err(ds->dev, "unsupported speed: %d\n", phydev->speed);
+
+ if (phydev->duplex == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ else
+ ctl &= ~BMCR_FULLDPLX;
+
+ lan9303_phy_write(ds, port, MII_BMCR, ctl);
+
+ if (port == chip->phy_addr_base) {
+ /* Virtual Phy: Remove Turbo 200Mbit mode */
+ lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl);
+
+ ctl &= ~LAN9303_VIRT_SPECIAL_TURBO;
+ regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, ctl);
+ }
+}
+
+static int lan9303_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct lan9303 *chip = ds->priv;
+
+ if (!dsa_port_is_user(dp))
+ return 0;
+
+ vlan_vid_add(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
+
+ return lan9303_enable_processing_port(chip, port);
+}
+
+static void lan9303_port_disable(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct lan9303 *chip = ds->priv;
+
+ if (!dsa_port_is_user(dp))
+ return;
+
+ vlan_vid_del(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
+
+ lan9303_disable_processing_port(chip, port);
+ lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
+}
+
+static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(port %d)\n", __func__, port);
+ if (dsa_port_bridge_same(dsa_to_port(ds, 1), dsa_to_port(ds, 2))) {
+ lan9303_bridge_ports(chip);
+ chip->is_bridged = true; /* unleash stp_state_set() */
+ }
+
+ return 0;
+}
+
+static void lan9303_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(port %d)\n", __func__, port);
+ if (chip->is_bridged) {
+ lan9303_separate_ports(chip);
+ chip->is_bridged = false;
+ }
+}
+
+static void lan9303_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ int portmask, portstate;
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(port %d, state %d)\n",
+ __func__, port, state);
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ portstate = LAN9303_SWE_PORT_STATE_DISABLED_PORT0;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ portstate = LAN9303_SWE_PORT_STATE_BLOCKING_PORT0;
+ break;
+ case BR_STATE_LEARNING:
+ portstate = LAN9303_SWE_PORT_STATE_LEARNING_PORT0;
+ break;
+ case BR_STATE_FORWARDING:
+ portstate = LAN9303_SWE_PORT_STATE_FORWARDING_PORT0;
+ break;
+ default:
+ portstate = LAN9303_SWE_PORT_STATE_DISABLED_PORT0;
+ dev_err(chip->dev, "unknown stp state: port %d, state %d\n",
+ port, state);
+ }
+
+ portmask = 0x3 << (port * 2);
+ portstate <<= (port * 2);
+
+ chip->swe_port_state = (chip->swe_port_state & ~portmask) | portstate;
+
+ if (chip->is_bridged)
+ lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_STATE,
+ chip->swe_port_state);
+ /* else: touching SWE_PORT_STATE would break port separation */
+}
+
+static void lan9303_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct lan9303 *chip = ds->priv;
+ struct del_port_learned_ctx del_ctx = {
+ .port = port,
+ };
+
+ dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
+ lan9303_alr_loop(chip, alr_loop_cb_del_port_learned, &del_ctx);
+}
+
+static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
+
+ return lan9303_alr_add_port(chip, addr, port, false);
+}
+
+static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
+ lan9303_alr_del_port(chip, addr, port);
+
+ return 0;
+}
+
+static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct lan9303 *chip = ds->priv;
+ struct port_fdb_dump_ctx dump_ctx = {
+ .port = port,
+ .data = data,
+ .cb = cb,
+ };
+
+ dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
+ return lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
+}
+
+static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, mdb->addr,
+ mdb->vid);
+ if (mdb->vid)
+ return -EOPNOTSUPP;
+ if (lan9303_alr_cache_find_mac(chip, mdb->addr))
+ return 0;
+ if (!lan9303_alr_cache_find_free(chip))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct lan9303 *chip = ds->priv;
+ int err;
+
+ err = lan9303_port_mdb_prepare(ds, port, mdb);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, mdb->addr,
+ mdb->vid);
+ return lan9303_alr_add_port(chip, mdb->addr, port, false);
+}
+
+static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, mdb->addr,
+ mdb->vid);
+ if (mdb->vid)
+ return -EOPNOTSUPP;
+ lan9303_alr_del_port(chip, mdb->addr, port);
+
+ return 0;
+}
+
+static const struct dsa_switch_ops lan9303_switch_ops = {
+ .get_tag_protocol = lan9303_get_tag_protocol,
+ .setup = lan9303_setup,
+ .get_strings = lan9303_get_strings,
+ .phy_read = lan9303_phy_read,
+ .phy_write = lan9303_phy_write,
+ .adjust_link = lan9303_adjust_link,
+ .get_ethtool_stats = lan9303_get_ethtool_stats,
+ .get_sset_count = lan9303_get_sset_count,
+ .port_enable = lan9303_port_enable,
+ .port_disable = lan9303_port_disable,
+ .port_bridge_join = lan9303_port_bridge_join,
+ .port_bridge_leave = lan9303_port_bridge_leave,
+ .port_stp_state_set = lan9303_port_stp_state_set,
+ .port_fast_age = lan9303_port_fast_age,
+ .port_fdb_add = lan9303_port_fdb_add,
+ .port_fdb_del = lan9303_port_fdb_del,
+ .port_fdb_dump = lan9303_port_fdb_dump,
+ .port_mdb_add = lan9303_port_mdb_add,
+ .port_mdb_del = lan9303_port_mdb_del,
+};
+
+static int lan9303_register_switch(struct lan9303 *chip)
+{
+ int base;
+
+ chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
+ if (!chip->ds)
+ return -ENOMEM;
+
+ chip->ds->dev = chip->dev;
+ chip->ds->num_ports = LAN9303_NUM_PORTS;
+ chip->ds->priv = chip;
+ chip->ds->ops = &lan9303_switch_ops;
+ base = chip->phy_addr_base;
+ chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
+
+ return dsa_register_switch(chip->ds);
+}
+
+static int lan9303_probe_reset_gpio(struct lan9303 *chip,
+ struct device_node *np)
+{
+ chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(chip->reset_gpio))
+ return PTR_ERR(chip->reset_gpio);
+
+ if (!chip->reset_gpio) {
+ dev_dbg(chip->dev, "No reset GPIO defined\n");
+ return 0;
+ }
+
+ chip->reset_duration = 200;
+
+ if (np) {
+ of_property_read_u32(np, "reset-duration",
+ &chip->reset_duration);
+ } else {
+ dev_dbg(chip->dev, "reset duration defaults to 200 ms\n");
+ }
+
+ /* A sane reset duration should not be longer than 1s */
+ if (chip->reset_duration > 1000)
+ chip->reset_duration = 1000;
+
+ return 0;
+}
+
+int lan9303_probe(struct lan9303 *chip, struct device_node *np)
+{
+ int ret;
+ u32 reg;
+
+ mutex_init(&chip->indirect_mutex);
+ mutex_init(&chip->alr_mutex);
+
+ ret = lan9303_probe_reset_gpio(chip, np);
+ if (ret)
+ return ret;
+
+ lan9303_handle_reset(chip);
+
+ /* First read to the device. This is a Dummy read to ensure MDIO */
+ /* access is in 32-bit sync. */
+ ret = lan9303_read(chip->regmap, LAN9303_BYTE_ORDER, &reg);
+ if (ret) {
+ dev_err(chip->dev, "failed to access the device: %d\n",
+ ret);
+ if (!chip->reset_gpio) {
+ dev_dbg(chip->dev,
+ "hint: maybe failed due to missing reset GPIO\n");
+ }
+ return ret;
+ }
+
+ ret = lan9303_check_device(chip);
+ if (ret)
+ return ret;
+
+ ret = lan9303_register_switch(chip);
+ if (ret) {
+ dev_dbg(chip->dev, "Failed to register switch: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(lan9303_probe);
+
+int lan9303_remove(struct lan9303 *chip)
+{
+ int rc;
+
+ rc = lan9303_disable_processing(chip);
+ if (rc != 0)
+ dev_warn(chip->dev, "shutting down failed\n");
+
+ dsa_unregister_switch(chip->ds);
+
+ /* assert reset to the whole device to prevent it from doing anything */
+ gpiod_set_value_cansleep(chip->reset_gpio, 1);
+ gpiod_unexport(chip->reset_gpio);
+
+ return 0;
+}
+EXPORT_SYMBOL(lan9303_remove);
+
+void lan9303_shutdown(struct lan9303 *chip)
+{
+ dsa_switch_shutdown(chip->ds);
+}
+EXPORT_SYMBOL(lan9303_shutdown);
+
+MODULE_AUTHOR("Juergen Borleis <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Core driver for SMSC/Microchip LAN9303 three port ethernet switch");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lan9303.h b/drivers/net/dsa/lan9303.h
new file mode 100644
index 000000000..c7f73efa5
--- /dev/null
+++ b/drivers/net/dsa/lan9303.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/regmap.h>
+#include <linux/device.h>
+#include <net/dsa.h>
+
+#include <linux/dsa/lan9303.h>
+
+extern const struct regmap_access_table lan9303_register_set;
+extern const struct lan9303_phy_ops lan9303_indirect_phy_ops;
+
+int lan9303_probe(struct lan9303 *chip, struct device_node *np);
+int lan9303_remove(struct lan9303 *chip);
+void lan9303_shutdown(struct lan9303 *chip);
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
new file mode 100644
index 000000000..7d746cd9c
--- /dev/null
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 Pengutronix, Juergen Borleis <kernel@pengutronix.de>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+
+#include "lan9303.h"
+
+struct lan9303_i2c {
+ struct i2c_client *device;
+ struct lan9303 chip;
+};
+
+static const struct regmap_config lan9303_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 1,
+ .can_multi_write = true,
+ .max_register = 0x0ff, /* address bits 0..1 are not used */
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+
+ .volatile_table = &lan9303_register_set,
+ .wr_table = &lan9303_register_set,
+ .rd_table = &lan9303_register_set,
+
+ .cache_type = REGCACHE_NONE,
+};
+
+static int lan9303_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lan9303_i2c *sw_dev;
+ int ret;
+
+ sw_dev = devm_kzalloc(&client->dev, sizeof(struct lan9303_i2c),
+ GFP_KERNEL);
+ if (!sw_dev)
+ return -ENOMEM;
+
+ sw_dev->chip.regmap = devm_regmap_init_i2c(client,
+ &lan9303_i2c_regmap_config);
+ if (IS_ERR(sw_dev->chip.regmap)) {
+ ret = PTR_ERR(sw_dev->chip.regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* link forward and backward */
+ sw_dev->device = client;
+ i2c_set_clientdata(client, sw_dev);
+ sw_dev->chip.dev = &client->dev;
+
+ sw_dev->chip.ops = &lan9303_indirect_phy_ops;
+
+ ret = lan9303_probe(&sw_dev->chip, client->dev.of_node);
+ if (ret != 0)
+ return ret;
+
+ dev_info(&client->dev, "LAN9303 I2C driver loaded successfully\n");
+
+ return 0;
+}
+
+static void lan9303_i2c_remove(struct i2c_client *client)
+{
+ struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
+
+ if (!sw_dev)
+ return;
+
+ lan9303_remove(&sw_dev->chip);
+}
+
+static void lan9303_i2c_shutdown(struct i2c_client *client)
+{
+ struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
+
+ if (!sw_dev)
+ return;
+
+ lan9303_shutdown(&sw_dev->chip);
+
+ i2c_set_clientdata(client, NULL);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct i2c_device_id lan9303_i2c_id[] = {
+ { "lan9303", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, lan9303_i2c_id);
+
+static const struct of_device_id lan9303_i2c_of_match[] = {
+ { .compatible = "smsc,lan9303-i2c", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lan9303_i2c_of_match);
+
+static struct i2c_driver lan9303_i2c_driver = {
+ .driver = {
+ .name = "LAN9303_I2C",
+ .of_match_table = of_match_ptr(lan9303_i2c_of_match),
+ },
+ .probe = lan9303_i2c_probe,
+ .remove = lan9303_i2c_remove,
+ .shutdown = lan9303_i2c_shutdown,
+ .id_table = lan9303_i2c_id,
+};
+module_i2c_driver(lan9303_i2c_driver);
+
+MODULE_AUTHOR("Juergen Borleis <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Driver for SMSC/Microchip LAN9303 three port ethernet switch in I2C managed mode");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
new file mode 100644
index 000000000..47484f55e
--- /dev/null
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 Pengutronix, Juergen Borleis <kernel@pengutronix.de>
+ *
+ * Partially based on a patch from
+ * Copyright (c) 2014 Stefan Roese <sr@denx.de>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#include "lan9303.h"
+
+/* Generate phy-addr and -reg from the input address */
+#define PHY_ADDR(x) ((((x) >> 6) + 0x10) & 0x1f)
+#define PHY_REG(x) (((x) >> 1) & 0x1f)
+
+struct lan9303_mdio {
+ struct mdio_device *device;
+ struct lan9303 chip;
+};
+
+static void lan9303_mdio_real_write(struct mdio_device *mdio, int reg, u16 val)
+{
+ mdio->bus->write(mdio->bus, PHY_ADDR(reg), PHY_REG(reg), val);
+}
+
+static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
+{
+ struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+
+ reg <<= 2; /* reg num to offset */
+ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
+ lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
+ mutex_unlock(&sw_dev->device->bus->mdio_lock);
+
+ return 0;
+}
+
+static u16 lan9303_mdio_real_read(struct mdio_device *mdio, int reg)
+{
+ return mdio->bus->read(mdio->bus, PHY_ADDR(reg), PHY_REG(reg));
+}
+
+static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
+{
+ struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
+
+ reg <<= 2; /* reg num to offset */
+ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ *val = lan9303_mdio_real_read(sw_dev->device, reg);
+ *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
+ mutex_unlock(&sw_dev->device->bus->mdio_lock);
+
+ return 0;
+}
+
+static int lan9303_mdio_phy_write(struct lan9303 *chip, int phy, int reg,
+ u16 val)
+{
+ struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
+
+ return mdiobus_write_nested(sw_dev->device->bus, phy, reg, val);
+}
+
+static int lan9303_mdio_phy_read(struct lan9303 *chip, int phy, int reg)
+{
+ struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
+
+ return mdiobus_read_nested(sw_dev->device->bus, phy, reg);
+}
+
+static const struct lan9303_phy_ops lan9303_mdio_phy_ops = {
+ .phy_read = lan9303_mdio_phy_read,
+ .phy_write = lan9303_mdio_phy_write,
+};
+
+static const struct regmap_config lan9303_mdio_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 1,
+ .can_multi_write = true,
+ .max_register = 0x0ff, /* address bits 0..1 are not used */
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+
+ .volatile_table = &lan9303_register_set,
+ .wr_table = &lan9303_register_set,
+ .rd_table = &lan9303_register_set,
+
+ .reg_read = lan9303_mdio_read,
+ .reg_write = lan9303_mdio_write,
+
+ .cache_type = REGCACHE_NONE,
+};
+
+static int lan9303_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct lan9303_mdio *sw_dev;
+ int ret;
+
+ sw_dev = devm_kzalloc(&mdiodev->dev, sizeof(struct lan9303_mdio),
+ GFP_KERNEL);
+ if (!sw_dev)
+ return -ENOMEM;
+
+ sw_dev->chip.regmap = devm_regmap_init(&mdiodev->dev, NULL, sw_dev,
+ &lan9303_mdio_regmap_config);
+ if (IS_ERR(sw_dev->chip.regmap)) {
+ ret = PTR_ERR(sw_dev->chip.regmap);
+ dev_err(&mdiodev->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ /* link forward and backward */
+ sw_dev->device = mdiodev;
+ dev_set_drvdata(&mdiodev->dev, sw_dev);
+ sw_dev->chip.dev = &mdiodev->dev;
+
+ sw_dev->chip.ops = &lan9303_mdio_phy_ops;
+
+ ret = lan9303_probe(&sw_dev->chip, mdiodev->dev.of_node);
+ if (ret != 0)
+ return ret;
+
+ dev_info(&mdiodev->dev, "LAN9303 MDIO driver loaded successfully\n");
+
+ return 0;
+}
+
+static void lan9303_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct lan9303_mdio *sw_dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (!sw_dev)
+ return;
+
+ lan9303_remove(&sw_dev->chip);
+}
+
+static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct lan9303_mdio *sw_dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (!sw_dev)
+ return;
+
+ lan9303_shutdown(&sw_dev->chip);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct of_device_id lan9303_mdio_of_match[] = {
+ { .compatible = "smsc,lan9303-mdio" },
+ { .compatible = "microchip,lan9354-mdio" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lan9303_mdio_of_match);
+
+static struct mdio_driver lan9303_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "LAN9303_MDIO",
+ .of_match_table = of_match_ptr(lan9303_mdio_of_match),
+ },
+ .probe = lan9303_mdio_probe,
+ .remove = lan9303_mdio_remove,
+ .shutdown = lan9303_mdio_shutdown,
+};
+mdio_module_driver(lan9303_mdio_driver);
+
+MODULE_AUTHOR("Stefan Roese <sr@denx.de>, Juergen Borleis <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Driver for SMSC/Microchip LAN9303 three port ethernet switch in MDIO managed mode");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
new file mode 100644
index 000000000..05ecaa007
--- /dev/null
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -0,0 +1,2287 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
+ *
+ * Copyright (C) 2010 Lantiq Deutschland
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * The VLAN and bridge model the GSWIP hardware uses does not directly
+ * matches the model DSA uses.
+ *
+ * The hardware has 64 possible table entries for bridges with one VLAN
+ * ID, one flow id and a list of ports for each bridge. All entries which
+ * match the same flow ID are combined in the mac learning table, they
+ * act as one global bridge.
+ * The hardware does not support VLAN filter on the port, but on the
+ * bridge, this driver converts the DSA model to the hardware.
+ *
+ * The CPU gets all the exception frames which do not match any forwarding
+ * rule and the CPU port is also added to all bridges. This makes it possible
+ * to handle all the special cases easily in software.
+ * At the initialization the driver allocates one bridge table entry for
+ * each switch port which is used when the port is used without an
+ * explicit bridge. This prevents the frames from being forwarded
+ * between all LAN ports by default.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <net/dsa.h>
+#include <dt-bindings/mips/lantiq_rcu_gphy.h>
+
+#include "lantiq_pce.h"
+
+/* GSWIP MDIO Registers */
+#define GSWIP_MDIO_GLOB 0x00
+#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
+#define GSWIP_MDIO_CTRL 0x08
+#define GSWIP_MDIO_CTRL_BUSY BIT(12)
+#define GSWIP_MDIO_CTRL_RD BIT(11)
+#define GSWIP_MDIO_CTRL_WR BIT(10)
+#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
+#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
+#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
+#define GSWIP_MDIO_READ 0x09
+#define GSWIP_MDIO_WRITE 0x0A
+#define GSWIP_MDIO_MDC_CFG0 0x0B
+#define GSWIP_MDIO_MDC_CFG1 0x0C
+#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
+#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
+#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
+#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
+#define GSWIP_MDIO_PHY_LINK_UP 0x2000
+#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
+#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
+#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
+#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
+#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
+#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
+#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
+#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
+#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
+#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
+#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
+#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
+#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
+#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
+#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
+#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
+ GSWIP_MDIO_PHY_FCONRX_MASK | \
+ GSWIP_MDIO_PHY_FCONTX_MASK | \
+ GSWIP_MDIO_PHY_LINK_MASK | \
+ GSWIP_MDIO_PHY_SPEED_MASK | \
+ GSWIP_MDIO_PHY_FDUP_MASK)
+
+/* GSWIP MII Registers */
+#define GSWIP_MII_CFGp(p) (0x2 * (p))
+#define GSWIP_MII_CFG_RESET BIT(15)
+#define GSWIP_MII_CFG_EN BIT(14)
+#define GSWIP_MII_CFG_ISOLATE BIT(13)
+#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
+#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
+#define GSWIP_MII_CFG_RMII_CLK BIT(7)
+#define GSWIP_MII_CFG_MODE_MIIP 0x0
+#define GSWIP_MII_CFG_MODE_MIIM 0x1
+#define GSWIP_MII_CFG_MODE_RMIIP 0x2
+#define GSWIP_MII_CFG_MODE_RMIIM 0x3
+#define GSWIP_MII_CFG_MODE_RGMII 0x4
+#define GSWIP_MII_CFG_MODE_GMII 0x9
+#define GSWIP_MII_CFG_MODE_MASK 0xf
+#define GSWIP_MII_CFG_RATE_M2P5 0x00
+#define GSWIP_MII_CFG_RATE_M25 0x10
+#define GSWIP_MII_CFG_RATE_M125 0x20
+#define GSWIP_MII_CFG_RATE_M50 0x30
+#define GSWIP_MII_CFG_RATE_AUTO 0x40
+#define GSWIP_MII_CFG_RATE_MASK 0x70
+#define GSWIP_MII_PCDU0 0x01
+#define GSWIP_MII_PCDU1 0x03
+#define GSWIP_MII_PCDU5 0x05
+#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
+#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
+
+/* GSWIP Core Registers */
+#define GSWIP_SWRES 0x000
+#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
+#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
+#define GSWIP_VERSION 0x013
+#define GSWIP_VERSION_REV_SHIFT 0
+#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
+#define GSWIP_VERSION_MOD_SHIFT 8
+#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
+#define GSWIP_VERSION_2_0 0x100
+#define GSWIP_VERSION_2_1 0x021
+#define GSWIP_VERSION_2_2 0x122
+#define GSWIP_VERSION_2_2_ETC 0x022
+
+#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
+#define GSWIP_BM_RAM_ADDR 0x044
+#define GSWIP_BM_RAM_CTRL 0x045
+#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
+#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
+#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_BM_QUEUE_GCTRL 0x04A
+#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
+/* buffer management Port Configuration Register */
+#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
+#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
+#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
+/* buffer management Port Control Register */
+#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
+#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
+#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
+
+/* PCE */
+#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
+#define GSWIP_PCE_TBL_MASK 0x448
+#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
+#define GSWIP_PCE_TBL_ADDR 0x44E
+#define GSWIP_PCE_TBL_CTRL 0x44F
+#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
+#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
+#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
+#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
+#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
+#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
+#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
+#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
+#define GSWIP_PCE_GCTRL_0 0x456
+#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
+#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
+#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
+#define GSWIP_PCE_GCTRL_1 0x457
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
+#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
+#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
+#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
+#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
+#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
+#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
+#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
+#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
+#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
+#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
+#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
+#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
+#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
+#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
+#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
+#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
+#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
+
+#define GSWIP_MAC_FLEN 0x8C5
+#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
+#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
+#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
+#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
+#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
+#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
+#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
+#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
+#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
+#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
+#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
+#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
+#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
+#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
+#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
+
+/* Ethernet Switch Fetch DMA Port Control Register */
+#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
+#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
+#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
+#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+
+/* Ethernet Switch Store DMA Port Control Register */
+#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
+#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
+#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
+#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
+
+#define GSWIP_TABLE_ACTIVE_VLAN 0x01
+#define GSWIP_TABLE_VLAN_MAPPING 0x02
+#define GSWIP_TABLE_MAC_BRIDGE 0x0b
+#define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
+
+#define XRX200_GPHY_FW_ALIGN (16 * 1024)
+
+/* Maximum packet size supported by the switch. In theory this should be 10240,
+ * but long packets currently cause lock-ups with an MTU of over 2526. Medium
+ * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
+ * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
+ * packet reception. This is probably caused by the PPA engine, which is on the
+ * RX part of the device. Packet transmission works properly up to 10240.
+ */
+#define GSWIP_MAX_PACKET_LENGTH 2400
+
+struct gswip_hw_info {
+ int max_ports;
+ int cpu_port;
+ const struct dsa_switch_ops *ops;
+};
+
+struct xway_gphy_match_data {
+ char *fe_firmware_name;
+ char *ge_firmware_name;
+};
+
+struct gswip_gphy_fw {
+ struct clk *clk_gate;
+ struct reset_control *reset;
+ u32 fw_addr_offset;
+ char *fw_name;
+};
+
+struct gswip_vlan {
+ struct net_device *bridge;
+ u16 vid;
+ u8 fid;
+};
+
+struct gswip_priv {
+ __iomem void *gswip;
+ __iomem void *mdio;
+ __iomem void *mii;
+ const struct gswip_hw_info *hw_info;
+ const struct xway_gphy_match_data *gphy_fw_name_cfg;
+ struct dsa_switch *ds;
+ struct device *dev;
+ struct regmap *rcu_regmap;
+ struct gswip_vlan vlans[64];
+ int num_gphy_fw;
+ struct gswip_gphy_fw *gphy_fw;
+ u32 port_vlan_filter;
+ struct mutex pce_table_lock;
+};
+
+struct gswip_pce_table_entry {
+ u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
+ u16 table; // PCE_TBL_CTRL.ADDR = pData->table
+ u16 key[8];
+ u16 val[5];
+ u16 mask;
+ u8 gmap;
+ bool type;
+ bool valid;
+ bool key_mode;
+};
+
+struct gswip_rmon_cnt_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
+
+static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
+ /** Receive Packet Count (only packets that are accepted and not discarded). */
+ MIB_DESC(1, 0x1F, "RxGoodPkts"),
+ MIB_DESC(1, 0x23, "RxUnicastPkts"),
+ MIB_DESC(1, 0x22, "RxMulticastPkts"),
+ MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
+ MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
+ MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
+ MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
+ MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
+ MIB_DESC(1, 0x20, "RxGoodPausePkts"),
+ MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
+ MIB_DESC(1, 0x12, "Rx64BytePkts"),
+ MIB_DESC(1, 0x13, "Rx127BytePkts"),
+ MIB_DESC(1, 0x14, "Rx255BytePkts"),
+ MIB_DESC(1, 0x15, "Rx511BytePkts"),
+ MIB_DESC(1, 0x16, "Rx1023BytePkts"),
+ /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
+ MIB_DESC(1, 0x17, "RxMaxBytePkts"),
+ MIB_DESC(1, 0x18, "RxDroppedPkts"),
+ MIB_DESC(1, 0x19, "RxFilteredPkts"),
+ MIB_DESC(2, 0x24, "RxGoodBytes"),
+ MIB_DESC(2, 0x26, "RxBadBytes"),
+ MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
+ MIB_DESC(1, 0x0C, "TxGoodPkts"),
+ MIB_DESC(1, 0x06, "TxUnicastPkts"),
+ MIB_DESC(1, 0x07, "TxMulticastPkts"),
+ MIB_DESC(1, 0x00, "Tx64BytePkts"),
+ MIB_DESC(1, 0x01, "Tx127BytePkts"),
+ MIB_DESC(1, 0x02, "Tx255BytePkts"),
+ MIB_DESC(1, 0x03, "Tx511BytePkts"),
+ MIB_DESC(1, 0x04, "Tx1023BytePkts"),
+ /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
+ MIB_DESC(1, 0x05, "TxMaxBytePkts"),
+ MIB_DESC(1, 0x08, "TxSingleCollCount"),
+ MIB_DESC(1, 0x09, "TxMultCollCount"),
+ MIB_DESC(1, 0x0A, "TxLateCollCount"),
+ MIB_DESC(1, 0x0B, "TxExcessCollCount"),
+ MIB_DESC(1, 0x0D, "TxPauseCount"),
+ MIB_DESC(1, 0x10, "TxDroppedPkts"),
+ MIB_DESC(2, 0x0E, "TxGoodBytes"),
+};
+
+static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
+{
+ return __raw_readl(priv->gswip + (offset * 4));
+}
+
+static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
+{
+ __raw_writel(val, priv->gswip + (offset * 4));
+}
+
+static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
+ u32 offset)
+{
+ u32 val = gswip_switch_r(priv, offset);
+
+ val &= ~(clear);
+ val |= set;
+ gswip_switch_w(priv, val, offset);
+}
+
+static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
+ u32 cleared)
+{
+ u32 val;
+
+ return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
+ (val & cleared) == 0, 20, 50000);
+}
+
+static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
+{
+ return __raw_readl(priv->mdio + (offset * 4));
+}
+
+static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
+{
+ __raw_writel(val, priv->mdio + (offset * 4));
+}
+
+static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
+ u32 offset)
+{
+ u32 val = gswip_mdio_r(priv, offset);
+
+ val &= ~(clear);
+ val |= set;
+ gswip_mdio_w(priv, val, offset);
+}
+
+static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
+{
+ return __raw_readl(priv->mii + (offset * 4));
+}
+
+static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
+{
+ __raw_writel(val, priv->mii + (offset * 4));
+}
+
+static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
+ u32 offset)
+{
+ u32 val = gswip_mii_r(priv, offset);
+
+ val &= ~(clear);
+ val |= set;
+ gswip_mii_w(priv, val, offset);
+}
+
+static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
+ int port)
+{
+ /* There's no MII_CFG register for the CPU port */
+ if (!dsa_is_cpu_port(priv->ds, port))
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
+}
+
+static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
+ int port)
+{
+ switch (port) {
+ case 0:
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
+ break;
+ case 1:
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
+ break;
+ case 5:
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
+ break;
+ }
+}
+
+static int gswip_mdio_poll(struct gswip_priv *priv)
+{
+ int cnt = 100;
+
+ while (likely(cnt--)) {
+ u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
+
+ if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
+ return 0;
+ usleep_range(20, 40);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct gswip_priv *priv = bus->priv;
+ int err;
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
+ gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
+ ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
+ (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
+ GSWIP_MDIO_CTRL);
+
+ return 0;
+}
+
+static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
+{
+ struct gswip_priv *priv = bus->priv;
+ int err;
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
+ ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
+ (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
+ GSWIP_MDIO_CTRL);
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ return gswip_mdio_r(priv, GSWIP_MDIO_READ);
+}
+
+static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
+{
+ struct dsa_switch *ds = priv->ds;
+ int err;
+
+ ds->slave_mii_bus = mdiobus_alloc();
+ if (!ds->slave_mii_bus)
+ return -ENOMEM;
+
+ ds->slave_mii_bus->priv = priv;
+ ds->slave_mii_bus->read = gswip_mdio_rd;
+ ds->slave_mii_bus->write = gswip_mdio_wr;
+ ds->slave_mii_bus->name = "lantiq,xrx200-mdio";
+ snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
+ dev_name(priv->dev));
+ ds->slave_mii_bus->parent = priv->dev;
+ ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
+
+ err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ if (err)
+ mdiobus_free(ds->slave_mii_bus);
+
+ return err;
+}
+
+static int gswip_pce_table_entry_read(struct gswip_priv *priv,
+ struct gswip_pce_table_entry *tbl)
+{
+ int i;
+ int err;
+ u16 crtl;
+ u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
+
+ mutex_lock(&priv->pce_table_lock);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err) {
+ mutex_unlock(&priv->pce_table_lock);
+ return err;
+ }
+
+ gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
+ gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
+ GSWIP_PCE_TBL_CTRL);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err) {
+ mutex_unlock(&priv->pce_table_lock);
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
+ tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
+
+ for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
+ tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
+
+ tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
+
+ crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
+
+ tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
+ tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
+ tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
+
+ mutex_unlock(&priv->pce_table_lock);
+
+ return 0;
+}
+
+static int gswip_pce_table_entry_write(struct gswip_priv *priv,
+ struct gswip_pce_table_entry *tbl)
+{
+ int i;
+ int err;
+ u16 crtl;
+ u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
+
+ mutex_lock(&priv->pce_table_lock);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err) {
+ mutex_unlock(&priv->pce_table_lock);
+ return err;
+ }
+
+ gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
+ gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode,
+ GSWIP_PCE_TBL_CTRL);
+
+ for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
+ gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
+
+ for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
+ gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
+
+ gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode,
+ GSWIP_PCE_TBL_CTRL);
+
+ gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
+
+ crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
+ crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
+ GSWIP_PCE_TBL_CTRL_GMAP_MASK);
+ if (tbl->type)
+ crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
+ if (tbl->valid)
+ crtl |= GSWIP_PCE_TBL_CTRL_VLD;
+ crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
+ crtl |= GSWIP_PCE_TBL_CTRL_BAS;
+ gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+
+ mutex_unlock(&priv->pce_table_lock);
+
+ return err;
+}
+
+/* Add the LAN port into a bridge with the CPU port by
+ * default. This prevents automatic forwarding of
+ * packages between the LAN ports when no explicit
+ * bridge is configured.
+ */
+static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int err;
+
+ if (port >= max_ports) {
+ dev_err(priv->dev, "single port for %i supported\n", port);
+ return -EIO;
+ }
+
+ vlan_active.index = port + 1;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.key[0] = 0; /* vid */
+ vlan_active.val[0] = port + 1 /* fid */;
+ vlan_active.valid = add;
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
+ return err;
+ }
+
+ if (!add)
+ return 0;
+
+ vlan_mapping.index = port + 1;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ vlan_mapping.val[0] = 0 /* vid */;
+ vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
+ vlan_mapping.val[2] = 0;
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int gswip_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct gswip_priv *priv = ds->priv;
+ int err;
+
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ err = gswip_add_single_port_br(priv, port, true);
+ if (err)
+ return err;
+ }
+
+ /* RMON Counter Enable for port */
+ gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
+
+ /* enable port fetch/store dma & VLAN Modification */
+ gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
+ GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
+ GSWIP_FDMA_PCTRLp(port));
+ gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
+ GSWIP_SDMA_PCTRLp(port));
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ u32 mdio_phy = 0;
+
+ if (phydev)
+ mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
+
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
+ }
+
+ return 0;
+}
+
+static void gswip_port_disable(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (!dsa_is_user_port(ds, port))
+ return;
+
+ gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
+ GSWIP_FDMA_PCTRLp(port));
+ gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
+ GSWIP_SDMA_PCTRLp(port));
+}
+
+static int gswip_pce_load_microcode(struct gswip_priv *priv)
+{
+ int i;
+ int err;
+
+ gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
+ gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
+ gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
+ gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
+ GSWIP_PCE_TBL_VAL(0));
+ gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
+ GSWIP_PCE_TBL_VAL(1));
+ gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
+ GSWIP_PCE_TBL_VAL(2));
+ gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
+ GSWIP_PCE_TBL_VAL(3));
+
+ /* start the table access: */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
+ GSWIP_PCE_TBL_CTRL);
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ return err;
+ }
+
+ /* tell the switch that the microcode is loaded */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
+ GSWIP_PCE_GCTRL_0);
+
+ return 0;
+}
+
+static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+
+ /* Do not allow changing the VLAN filtering options while in bridge */
+ if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Dynamic toggling of vlan_filtering not supported");
+ return -EIO;
+ }
+
+ if (vlan_filtering) {
+ /* Use port based VLAN tag */
+ gswip_switch_mask(priv,
+ GSWIP_PCE_VCTRL_VSR,
+ GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR,
+ GSWIP_PCE_VCTRL(port));
+ gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
+ GSWIP_PCE_PCTRL_0p(port));
+ } else {
+ /* Use port based VLAN tag */
+ gswip_switch_mask(priv,
+ GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR,
+ GSWIP_PCE_VCTRL_VSR,
+ GSWIP_PCE_VCTRL(port));
+ gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
+ GSWIP_PCE_PCTRL_0p(port));
+ }
+
+ return 0;
+}
+
+static int gswip_setup(struct dsa_switch *ds)
+{
+ struct gswip_priv *priv = ds->priv;
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ int i;
+ int err;
+
+ gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
+ usleep_range(5000, 10000);
+ gswip_switch_w(priv, 0, GSWIP_SWRES);
+
+ /* disable port fetch/store dma on all ports */
+ for (i = 0; i < priv->hw_info->max_ports; i++) {
+ gswip_port_disable(ds, i);
+ gswip_port_vlan_filtering(ds, i, false, NULL);
+ }
+
+ /* enable Switch */
+ gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
+
+ err = gswip_pce_load_microcode(priv);
+ if (err) {
+ dev_err(priv->dev, "writing PCE microcode failed, %i", err);
+ return err;
+ }
+
+ /* Default unknown Broadcast/Multicast/Unicast port maps */
+ gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
+ gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
+ gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
+
+ /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
+ * interoperability problem with this auto polling mechanism because
+ * their status registers think that the link is in a different state
+ * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
+ * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
+ * auto polling state machine consider the link being negotiated with
+ * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
+ * to the switch port being completely dead (RX and TX are both not
+ * working).
+ * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
+ * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
+ * it would work fine for a few minutes to hours and then stop, on
+ * other device it would no traffic could be sent or received at all.
+ * Testing shows that when PHY auto polling is disabled these problems
+ * go away.
+ */
+ gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
+
+ /* Configure the MDIO Clock 2.5 MHz */
+ gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
+
+ /* Disable the xMII interface and clear it's isolation bit */
+ for (i = 0; i < priv->hw_info->max_ports; i++)
+ gswip_mii_mask_cfg(priv,
+ GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
+ 0, i);
+
+ /* enable special tag insertion on cpu port */
+ gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
+ GSWIP_FDMA_PCTRLp(cpu_port));
+
+ /* accept special tag in ingress direction */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
+ GSWIP_PCE_PCTRL_0p(cpu_port));
+
+ gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
+ GSWIP_BM_QUEUE_GCTRL);
+
+ /* VLAN aware Switching */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
+
+ /* Flush MAC Table */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_MTFL);
+ if (err) {
+ dev_err(priv->dev, "MAC flushing didn't finish\n");
+ return err;
+ }
+
+ ds->mtu_enforcement_ingress = true;
+
+ gswip_port_enable(ds, cpu_port, NULL);
+
+ ds->configure_vlan_while_not_filtering = false;
+
+ return 0;
+}
+
+static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_GSWIP;
+}
+
+static int gswip_vlan_active_create(struct gswip_priv *priv,
+ struct net_device *bridge,
+ int fid, u16 vid)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int idx = -1;
+ int err;
+ int i;
+
+ /* Look for a free slot */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (!priv->vlans[i].bridge) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (idx == -1)
+ return -ENOSPC;
+
+ if (fid == -1)
+ fid = idx;
+
+ vlan_active.index = idx;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.key[0] = vid;
+ vlan_active.val[0] = fid;
+ vlan_active.valid = true;
+
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
+ return err;
+ }
+
+ priv->vlans[idx].bridge = bridge;
+ priv->vlans[idx].vid = vid;
+ priv->vlans[idx].fid = fid;
+
+ return idx;
+}
+
+static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ int err;
+
+ vlan_active.index = idx;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.valid = false;
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err)
+ dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
+ priv->vlans[idx].bridge = NULL;
+
+ return err;
+}
+
+static int gswip_vlan_add_unaware(struct gswip_priv *priv,
+ struct net_device *bridge, int port)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ bool active_vlan_created = false;
+ int idx = -1;
+ int i;
+ int err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ idx = i;
+ break;
+ }
+ }
+
+ /* If this bridge is not programmed yet, add a Active VLAN table
+ * entry in a free slot and prepare the VLAN mapping table entry.
+ */
+ if (idx == -1) {
+ idx = gswip_vlan_active_create(priv, bridge, -1, 0);
+ if (idx < 0)
+ return idx;
+ active_vlan_created = true;
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ /* VLAN ID byte, maps to the VLAN ID of vlan active table */
+ vlan_mapping.val[0] = 0;
+ } else {
+ /* Read the existing VLAN mapping entry from the switch */
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /* Update the VLAN mapping entry and write it to the switch */
+ vlan_mapping.val[1] |= BIT(cpu_port);
+ vlan_mapping.val[1] |= BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ /* In case an Active VLAN was creaetd delete it again */
+ if (active_vlan_created)
+ gswip_vlan_active_remove(priv, idx);
+ return err;
+ }
+
+ gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
+ return 0;
+}
+
+static int gswip_vlan_add_aware(struct gswip_priv *priv,
+ struct net_device *bridge, int port,
+ u16 vid, bool untagged,
+ bool pvid)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ bool active_vlan_created = false;
+ int idx = -1;
+ int fid = -1;
+ int i;
+ int err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ if (fid != -1 && fid != priv->vlans[i].fid)
+ dev_err(priv->dev, "one bridge with multiple flow ids\n");
+ fid = priv->vlans[i].fid;
+ if (priv->vlans[i].vid == vid) {
+ idx = i;
+ break;
+ }
+ }
+ }
+
+ /* If this bridge is not programmed yet, add a Active VLAN table
+ * entry in a free slot and prepare the VLAN mapping table entry.
+ */
+ if (idx == -1) {
+ idx = gswip_vlan_active_create(priv, bridge, fid, vid);
+ if (idx < 0)
+ return idx;
+ active_vlan_created = true;
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ /* VLAN ID byte, maps to the VLAN ID of vlan active table */
+ vlan_mapping.val[0] = vid;
+ } else {
+ /* Read the existing VLAN mapping entry from the switch */
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ vlan_mapping.val[0] = vid;
+ /* Update the VLAN mapping entry and write it to the switch */
+ vlan_mapping.val[1] |= BIT(cpu_port);
+ vlan_mapping.val[2] |= BIT(cpu_port);
+ vlan_mapping.val[1] |= BIT(port);
+ if (untagged)
+ vlan_mapping.val[2] &= ~BIT(port);
+ else
+ vlan_mapping.val[2] |= BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ /* In case an Active VLAN was creaetd delete it again */
+ if (active_vlan_created)
+ gswip_vlan_active_remove(priv, idx);
+ return err;
+ }
+
+ if (pvid)
+ gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
+
+ return 0;
+}
+
+static int gswip_vlan_remove(struct gswip_priv *priv,
+ struct net_device *bridge, int port,
+ u16 vid, bool pvid, bool vlan_aware)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ int idx = -1;
+ int i;
+ int err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ (!vlan_aware || priv->vlans[i].vid == vid)) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (idx == -1) {
+ dev_err(priv->dev, "bridge to leave does not exists\n");
+ return -ENOENT;
+ }
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ vlan_mapping.val[1] &= ~BIT(port);
+ vlan_mapping.val[2] &= ~BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ /* In case all ports are removed from the bridge, remove the VLAN */
+ if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
+ err = gswip_vlan_active_remove(priv, idx);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /* GSWIP 2.2 (GRX300) and later program here the VID directly. */
+ if (pvid)
+ gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
+
+ return 0;
+}
+
+static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *br = bridge.dev;
+ struct gswip_priv *priv = ds->priv;
+ int err;
+
+ /* When the bridge uses VLAN filtering we have to configure VLAN
+ * specific bridges. No bridge is configured here.
+ */
+ if (!br_vlan_enabled(br)) {
+ err = gswip_vlan_add_unaware(priv, br, port);
+ if (err)
+ return err;
+ priv->port_vlan_filter &= ~BIT(port);
+ } else {
+ priv->port_vlan_filter |= BIT(port);
+ }
+ return gswip_add_single_port_br(priv, port, false);
+}
+
+static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct net_device *br = bridge.dev;
+ struct gswip_priv *priv = ds->priv;
+
+ gswip_add_single_port_br(priv, port, true);
+
+ /* When the bridge uses VLAN filtering we have to configure VLAN
+ * specific bridges. No bridge is configured here.
+ */
+ if (!br_vlan_enabled(br))
+ gswip_vlan_remove(priv, br, port, 0, true, false);
+}
+
+static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int pos = max_ports;
+ int i, idx = -1;
+
+ /* We only support VLAN filtering on bridges */
+ if (!dsa_is_cpu_port(ds, port) && !bridge)
+ return -EOPNOTSUPP;
+
+ /* Check if there is already a page for this VLAN */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ priv->vlans[i].vid == vlan->vid) {
+ idx = i;
+ break;
+ }
+ }
+
+ /* If this VLAN is not programmed yet, we have to reserve
+ * one entry in the VLAN table. Make sure we start at the
+ * next position round.
+ */
+ if (idx == -1) {
+ /* Look for a free slot */
+ for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
+ if (!priv->vlans[pos].bridge) {
+ idx = pos;
+ pos++;
+ break;
+ }
+ }
+
+ if (idx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
+ return -ENOSPC;
+ }
+ }
+
+ return 0;
+}
+
+static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ int err;
+
+ err = gswip_port_vlan_prepare(ds, port, vlan, extack);
+ if (err)
+ return err;
+
+ /* We have to receive all packets on the CPU port and should not
+ * do any VLAN filtering here. This is also called with bridge
+ * NULL and then we do not know for which bridge to configure
+ * this.
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return gswip_vlan_add_aware(priv, bridge, port, vlan->vid,
+ untagged, pvid);
+}
+
+static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+ /* We have to receive all packets on the CPU port and should not
+ * do any VLAN filtering here. This is also called with bridge
+ * NULL and then we do not know for which bridge to configure
+ * this.
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true);
+}
+
+static void gswip_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ int i;
+ int err;
+
+ for (i = 0; i < 2048; i++) {
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.index = i;
+
+ err = gswip_pce_table_entry_read(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to read mac bridge: %d\n",
+ err);
+ return;
+ }
+
+ if (!mac_bridge.valid)
+ continue;
+
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
+ continue;
+
+ if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
+ continue;
+
+ mac_bridge.valid = false;
+ err = gswip_pce_table_entry_write(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to write mac bridge: %d\n",
+ err);
+ return;
+ }
+ }
+}
+
+static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct gswip_priv *priv = ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
+ GSWIP_SDMA_PCTRLp(port));
+ return;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
+ break;
+ default:
+ dev_err(priv->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
+ GSWIP_SDMA_PCTRLp(port));
+ gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
+ GSWIP_PCE_PCTRL_0p(port));
+}
+
+static int gswip_port_fdb(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, bool add)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int fid = -1;
+ int i;
+ int err;
+
+ if (!bridge)
+ return -EINVAL;
+
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ fid = priv->vlans[i].fid;
+ break;
+ }
+ }
+
+ if (fid == -1) {
+ dev_err(priv->dev, "Port not part of a bridge\n");
+ return -EINVAL;
+ }
+
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.key_mode = true;
+ mac_bridge.key[0] = addr[5] | (addr[4] << 8);
+ mac_bridge.key[1] = addr[3] | (addr[2] << 8);
+ mac_bridge.key[2] = addr[1] | (addr[0] << 8);
+ mac_bridge.key[3] = fid;
+ mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
+ mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
+ mac_bridge.valid = add;
+
+ err = gswip_pce_table_entry_write(priv, &mac_bridge);
+ if (err)
+ dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
+
+ return err;
+}
+
+static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ return gswip_port_fdb(ds, port, addr, vid, true);
+}
+
+static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ return gswip_port_fdb(ds, port, addr, vid, false);
+}
+
+static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ unsigned char addr[6];
+ int i;
+ int err;
+
+ for (i = 0; i < 2048; i++) {
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.index = i;
+
+ err = gswip_pce_table_entry_read(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev,
+ "failed to read mac bridge entry %d: %d\n",
+ i, err);
+ return err;
+ }
+
+ if (!mac_bridge.valid)
+ continue;
+
+ addr[5] = mac_bridge.key[0] & 0xff;
+ addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
+ addr[3] = mac_bridge.key[1] & 0xff;
+ addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
+ addr[1] = mac_bridge.key[2] & 0xff;
+ addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
+ if (mac_bridge.val[0] & BIT(port)) {
+ err = cb(addr, 0, true, data);
+ if (err)
+ return err;
+ }
+ } else {
+ if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
+ err = cb(addr, 0, false, data);
+ if (err)
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* Includes 8 bytes for special header. */
+ return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct gswip_priv *priv = ds->priv;
+ int cpu_port = priv->hw_info->cpu_port;
+
+ /* CPU port always has maximum mtu of user ports, so use it to set
+ * switch frame size, including 8 byte special header.
+ */
+ if (port == cpu_port) {
+ new_mtu += 8;
+ gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
+ GSWIP_MAC_FLEN);
+ }
+
+ /* Enable MLEN for ports with non-standard MTUs, including the special
+ * header on the CPU port added above.
+ */
+ if (new_mtu != ETH_DATA_LEN)
+ gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
+ GSWIP_MAC_CTRL_2p(port));
+ else
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
+ GSWIP_MAC_CTRL_2p(port));
+
+ return 0;
+}
+
+static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ case 0:
+ case 1:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ break;
+
+ case 2:
+ case 3:
+ case 4:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+
+ case 5:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+ }
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+}
+
+static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ case 0:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ break;
+
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+
+ case 5:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ break;
+ }
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+}
+
+static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
+{
+ u32 mdio_phy;
+
+ if (link)
+ mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
+ else
+ mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
+
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
+}
+
+static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
+ phy_interface_t interface)
+{
+ u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
+
+ switch (speed) {
+ case SPEED_10:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ mii_cfg = GSWIP_MII_CFG_RATE_M50;
+ else
+ mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+ break;
+
+ case SPEED_100:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ mii_cfg = GSWIP_MII_CFG_RATE_M50;
+ else
+ mii_cfg = GSWIP_MII_CFG_RATE_M25;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+ break;
+
+ case SPEED_1000:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
+
+ mii_cfg = GSWIP_MII_CFG_RATE_M125;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
+ break;
+ }
+
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
+ GSWIP_MAC_CTRL_0p(port));
+}
+
+static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
+{
+ u32 mac_ctrl_0, mdio_phy;
+
+ if (duplex == DUPLEX_FULL) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
+ mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
+ } else {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
+ mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
+ }
+
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
+ GSWIP_MAC_CTRL_0p(port));
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
+}
+
+static void gswip_port_set_pause(struct gswip_priv *priv, int port,
+ bool tx_pause, bool rx_pause)
+{
+ u32 mac_ctrl_0, mdio_phy;
+
+ if (tx_pause && rx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+ GSWIP_MDIO_PHY_FCONRX_EN;
+ } else if (tx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+ GSWIP_MDIO_PHY_FCONRX_DIS;
+ } else if (rx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+ GSWIP_MDIO_PHY_FCONRX_EN;
+ } else {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+ GSWIP_MDIO_PHY_FCONRX_DIS;
+ }
+
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
+ mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
+ gswip_mdio_mask(priv,
+ GSWIP_MDIO_PHY_FCONTX_MASK |
+ GSWIP_MDIO_PHY_FCONRX_MASK,
+ mdio_phy, GSWIP_MDIO_PHYp(port));
+}
+
+static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct gswip_priv *priv = ds->priv;
+ u32 miicfg = 0;
+
+ miicfg |= GSWIP_MII_CFG_LDCLKDIS;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_INTERNAL:
+ miicfg |= GSWIP_MII_CFG_MODE_MIIM;
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ miicfg |= GSWIP_MII_CFG_MODE_MIIP;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ miicfg |= GSWIP_MII_CFG_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ miicfg |= GSWIP_MII_CFG_MODE_GMII;
+ break;
+ default:
+ dev_err(ds->dev,
+ "Unsupported interface: %d\n", state->interface);
+ return;
+ }
+
+ gswip_mii_mask_cfg(priv,
+ GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
+ GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
+ miicfg, port);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
+ GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
+ break;
+ default:
+ break;
+ }
+}
+
+static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
+
+ if (!dsa_is_cpu_port(ds, port))
+ gswip_port_set_link(priv, port, false);
+}
+
+static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ gswip_port_set_link(priv, port, true);
+ gswip_port_set_speed(priv, port, speed, interface);
+ gswip_port_set_duplex(priv, port, duplex);
+ gswip_port_set_pause(priv, port, tx_pause, rx_pause);
+ }
+
+ gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
+}
+
+static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
+ strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name,
+ ETH_GSTRING_LEN);
+}
+
+static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
+ u32 index)
+{
+ u32 result;
+ int err;
+
+ gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
+ gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
+ GSWIP_BM_RAM_CTRL_OPMOD,
+ table | GSWIP_BM_RAM_CTRL_BAS,
+ GSWIP_BM_RAM_CTRL);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
+ GSWIP_BM_RAM_CTRL_BAS);
+ if (err) {
+ dev_err(priv->dev, "timeout while reading table: %u, index: %u",
+ table, index);
+ return 0;
+ }
+
+ result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
+ result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
+
+ return result;
+}
+
+static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct gswip_priv *priv = ds->priv;
+ const struct gswip_rmon_cnt_desc *rmon_cnt;
+ int i;
+ u64 high;
+
+ for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
+ rmon_cnt = &gswip_rmon_cnt[i];
+
+ data[i] = gswip_bcm_ram_entry_read(priv, port,
+ rmon_cnt->offset);
+ if (rmon_cnt->size == 2) {
+ high = gswip_bcm_ram_entry_read(priv, port,
+ rmon_cnt->offset + 1);
+ data[i] |= high << 32;
+ }
+ }
+}
+
+static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(gswip_rmon_cnt);
+}
+
+static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
+ .get_tag_protocol = gswip_get_tag_protocol,
+ .setup = gswip_setup,
+ .port_enable = gswip_port_enable,
+ .port_disable = gswip_port_disable,
+ .port_bridge_join = gswip_port_bridge_join,
+ .port_bridge_leave = gswip_port_bridge_leave,
+ .port_fast_age = gswip_port_fast_age,
+ .port_vlan_filtering = gswip_port_vlan_filtering,
+ .port_vlan_add = gswip_port_vlan_add,
+ .port_vlan_del = gswip_port_vlan_del,
+ .port_stp_state_set = gswip_port_stp_state_set,
+ .port_fdb_add = gswip_port_fdb_add,
+ .port_fdb_del = gswip_port_fdb_del,
+ .port_fdb_dump = gswip_port_fdb_dump,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
+ .phylink_get_caps = gswip_xrx200_phylink_get_caps,
+ .phylink_mac_config = gswip_phylink_mac_config,
+ .phylink_mac_link_down = gswip_phylink_mac_link_down,
+ .phylink_mac_link_up = gswip_phylink_mac_link_up,
+ .get_strings = gswip_get_strings,
+ .get_ethtool_stats = gswip_get_ethtool_stats,
+ .get_sset_count = gswip_get_sset_count,
+};
+
+static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
+ .get_tag_protocol = gswip_get_tag_protocol,
+ .setup = gswip_setup,
+ .port_enable = gswip_port_enable,
+ .port_disable = gswip_port_disable,
+ .port_bridge_join = gswip_port_bridge_join,
+ .port_bridge_leave = gswip_port_bridge_leave,
+ .port_fast_age = gswip_port_fast_age,
+ .port_vlan_filtering = gswip_port_vlan_filtering,
+ .port_vlan_add = gswip_port_vlan_add,
+ .port_vlan_del = gswip_port_vlan_del,
+ .port_stp_state_set = gswip_port_stp_state_set,
+ .port_fdb_add = gswip_port_fdb_add,
+ .port_fdb_del = gswip_port_fdb_del,
+ .port_fdb_dump = gswip_port_fdb_dump,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
+ .phylink_get_caps = gswip_xrx300_phylink_get_caps,
+ .phylink_mac_config = gswip_phylink_mac_config,
+ .phylink_mac_link_down = gswip_phylink_mac_link_down,
+ .phylink_mac_link_up = gswip_phylink_mac_link_up,
+ .get_strings = gswip_get_strings,
+ .get_ethtool_stats = gswip_get_ethtool_stats,
+ .get_sset_count = gswip_get_sset_count,
+};
+
+static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
+ .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
+};
+
+static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
+ .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
+};
+
+static const struct xway_gphy_match_data xrx300_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
+ .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
+};
+
+static const struct of_device_id xway_gphy_match[] = {
+ { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
+ { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
+ { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
+ { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
+ { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
+ {},
+};
+
+static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
+{
+ struct device *dev = priv->dev;
+ const struct firmware *fw;
+ void *fw_addr;
+ dma_addr_t dma_addr;
+ dma_addr_t dev_addr;
+ size_t size;
+ int ret;
+
+ ret = clk_prepare_enable(gphy_fw->clk_gate);
+ if (ret)
+ return ret;
+
+ reset_control_assert(gphy_fw->reset);
+
+ /* The vendor BSP uses a 200ms delay after asserting the reset line.
+ * Without this some users are observing that the PHY is not coming up
+ * on the MDIO bus.
+ */
+ msleep(200);
+
+ ret = request_firmware(&fw, gphy_fw->fw_name, dev);
+ if (ret) {
+ dev_err(dev, "failed to load firmware: %s, error: %i\n",
+ gphy_fw->fw_name, ret);
+ return ret;
+ }
+
+ /* GPHY cores need the firmware code in a persistent and contiguous
+ * memory area with a 16 kB boundary aligned start address.
+ */
+ size = fw->size + XRX200_GPHY_FW_ALIGN;
+
+ fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
+ if (fw_addr) {
+ fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
+ dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
+ memcpy(fw_addr, fw->data, fw->size);
+ } else {
+ dev_err(dev, "failed to alloc firmware memory\n");
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ release_firmware(fw);
+
+ ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
+ if (ret)
+ return ret;
+
+ reset_control_deassert(gphy_fw->reset);
+
+ return ret;
+}
+
+static int gswip_gphy_fw_probe(struct gswip_priv *priv,
+ struct gswip_gphy_fw *gphy_fw,
+ struct device_node *gphy_fw_np, int i)
+{
+ struct device *dev = priv->dev;
+ u32 gphy_mode;
+ int ret;
+ char gphyname[10];
+
+ snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
+
+ gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
+ if (IS_ERR(gphy_fw->clk_gate)) {
+ dev_err(dev, "Failed to lookup gate clock\n");
+ return PTR_ERR(gphy_fw->clk_gate);
+ }
+
+ ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
+ /* Default to GE mode */
+ if (ret)
+ gphy_mode = GPHY_MODE_GE;
+
+ switch (gphy_mode) {
+ case GPHY_MODE_FE:
+ gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
+ break;
+ case GPHY_MODE_GE:
+ gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
+ break;
+ default:
+ dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
+ return -EINVAL;
+ }
+
+ gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
+ if (IS_ERR(gphy_fw->reset))
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
+ "Failed to lookup gphy reset\n");
+
+ return gswip_gphy_fw_load(priv, gphy_fw);
+}
+
+static void gswip_gphy_fw_remove(struct gswip_priv *priv,
+ struct gswip_gphy_fw *gphy_fw)
+{
+ int ret;
+
+ /* check if the device was fully probed */
+ if (!gphy_fw->fw_name)
+ return;
+
+ ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
+ if (ret)
+ dev_err(priv->dev, "can not reset GPHY FW pointer");
+
+ clk_disable_unprepare(gphy_fw->clk_gate);
+
+ reset_control_put(gphy_fw->reset);
+}
+
+static int gswip_gphy_fw_list(struct gswip_priv *priv,
+ struct device_node *gphy_fw_list_np, u32 version)
+{
+ struct device *dev = priv->dev;
+ struct device_node *gphy_fw_np;
+ const struct of_device_id *match;
+ int err;
+ int i = 0;
+
+ /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
+ * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
+ * needs a different GPHY firmware.
+ */
+ if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
+ switch (version) {
+ case GSWIP_VERSION_2_0:
+ priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
+ break;
+ case GSWIP_VERSION_2_1:
+ priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
+ break;
+ default:
+ dev_err(dev, "unknown GSWIP version: 0x%x", version);
+ return -ENOENT;
+ }
+ }
+
+ match = of_match_node(xway_gphy_match, gphy_fw_list_np);
+ if (match && match->data)
+ priv->gphy_fw_name_cfg = match->data;
+
+ if (!priv->gphy_fw_name_cfg) {
+ dev_err(dev, "GPHY compatible type not supported");
+ return -ENOENT;
+ }
+
+ priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
+ if (!priv->num_gphy_fw)
+ return -ENOENT;
+
+ priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
+ "lantiq,rcu");
+ if (IS_ERR(priv->rcu_regmap))
+ return PTR_ERR(priv->rcu_regmap);
+
+ priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
+ sizeof(*priv->gphy_fw),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!priv->gphy_fw)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
+ err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
+ gphy_fw_np, i);
+ if (err) {
+ of_node_put(gphy_fw_np);
+ goto remove_gphy;
+ }
+ i++;
+ }
+
+ /* The standalone PHY11G requires 300ms to be fully
+ * initialized and ready for any MDIO communication after being
+ * taken out of reset. For the SoC-internal GPHY variant there
+ * is no (known) documentation for the minimum time after a
+ * reset. Use the same value as for the standalone variant as
+ * some users have reported internal PHYs not being detected
+ * without any delay.
+ */
+ msleep(300);
+
+ return 0;
+
+remove_gphy:
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+ return err;
+}
+
+static int gswip_probe(struct platform_device *pdev)
+{
+ struct gswip_priv *priv;
+ struct device_node *np, *mdio_np, *gphy_fw_np;
+ struct device *dev = &pdev->dev;
+ int err;
+ int i;
+ u32 version;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->gswip = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->gswip))
+ return PTR_ERR(priv->gswip);
+
+ priv->mdio = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->mdio))
+ return PTR_ERR(priv->mdio);
+
+ priv->mii = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(priv->mii))
+ return PTR_ERR(priv->mii);
+
+ priv->hw_info = of_device_get_match_data(dev);
+ if (!priv->hw_info)
+ return -EINVAL;
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->hw_info->max_ports;
+ priv->ds->priv = priv;
+ priv->ds->ops = priv->hw_info->ops;
+ priv->dev = dev;
+ mutex_init(&priv->pce_table_lock);
+ version = gswip_switch_r(priv, GSWIP_VERSION);
+
+ np = dev->of_node;
+ switch (version) {
+ case GSWIP_VERSION_2_0:
+ case GSWIP_VERSION_2_1:
+ if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
+ return -EINVAL;
+ break;
+ case GSWIP_VERSION_2_2:
+ case GSWIP_VERSION_2_2_ETC:
+ if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
+ !of_device_is_compatible(np, "lantiq,xrx330-gswip"))
+ return -EINVAL;
+ break;
+ default:
+ dev_err(dev, "unknown GSWIP version: 0x%x", version);
+ return -ENOENT;
+ }
+
+ /* bring up the mdio bus */
+ gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
+ if (gphy_fw_np) {
+ err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
+ of_node_put(gphy_fw_np);
+ if (err) {
+ dev_err(dev, "gphy fw probe failed\n");
+ return err;
+ }
+ }
+
+ /* bring up the mdio bus */
+ mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
+ if (mdio_np) {
+ err = gswip_mdio(priv, mdio_np);
+ if (err) {
+ dev_err(dev, "mdio probe failed\n");
+ goto put_mdio_node;
+ }
+ }
+
+ err = dsa_register_switch(priv->ds);
+ if (err) {
+ dev_err(dev, "dsa switch register failed: %i\n", err);
+ goto mdio_bus;
+ }
+ if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
+ dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
+ priv->hw_info->cpu_port);
+ err = -EINVAL;
+ goto disable_switch;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ dev_info(dev, "probed GSWIP version %lx mod %lx\n",
+ (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
+ (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
+ return 0;
+
+disable_switch:
+ gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
+ dsa_unregister_switch(priv->ds);
+mdio_bus:
+ if (mdio_np) {
+ mdiobus_unregister(priv->ds->slave_mii_bus);
+ mdiobus_free(priv->ds->slave_mii_bus);
+ }
+put_mdio_node:
+ of_node_put(mdio_np);
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+ return err;
+}
+
+static int gswip_remove(struct platform_device *pdev)
+{
+ struct gswip_priv *priv = platform_get_drvdata(pdev);
+ int i;
+
+ if (!priv)
+ return 0;
+
+ /* disable the switch */
+ gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
+
+ dsa_unregister_switch(priv->ds);
+
+ if (priv->ds->slave_mii_bus) {
+ mdiobus_unregister(priv->ds->slave_mii_bus);
+ of_node_put(priv->ds->slave_mii_bus->dev.of_node);
+ mdiobus_free(priv->ds->slave_mii_bus);
+ }
+
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+
+ return 0;
+}
+
+static void gswip_shutdown(struct platform_device *pdev)
+{
+ struct gswip_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct gswip_hw_info gswip_xrx200 = {
+ .max_ports = 7,
+ .cpu_port = 6,
+ .ops = &gswip_xrx200_switch_ops,
+};
+
+static const struct gswip_hw_info gswip_xrx300 = {
+ .max_ports = 7,
+ .cpu_port = 6,
+ .ops = &gswip_xrx300_switch_ops,
+};
+
+static const struct of_device_id gswip_of_match[] = {
+ { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
+ { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
+ { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gswip_of_match);
+
+static struct platform_driver gswip_driver = {
+ .probe = gswip_probe,
+ .remove = gswip_remove,
+ .shutdown = gswip_shutdown,
+ .driver = {
+ .name = "gswip",
+ .of_match_table = gswip_of_match,
+ },
+};
+
+module_platform_driver(gswip_driver);
+
+MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lantiq_pce.h b/drivers/net/dsa/lantiq_pce.h
new file mode 100644
index 000000000..e2be31f36
--- /dev/null
+++ b/drivers/net/dsa/lantiq_pce.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCE microcode extracted from UGW 7.1.1 switch api
+ *
+ * Copyright (c) 2012, 2014, 2015 Lantiq Deutschland GmbH
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+enum {
+ OUT_MAC0 = 0,
+ OUT_MAC1,
+ OUT_MAC2,
+ OUT_MAC3,
+ OUT_MAC4,
+ OUT_MAC5,
+ OUT_ETHTYP,
+ OUT_VTAG0,
+ OUT_VTAG1,
+ OUT_ITAG0,
+ OUT_ITAG1, /*10 */
+ OUT_ITAG2,
+ OUT_ITAG3,
+ OUT_IP0,
+ OUT_IP1,
+ OUT_IP2,
+ OUT_IP3,
+ OUT_SIP0,
+ OUT_SIP1,
+ OUT_SIP2,
+ OUT_SIP3, /*20*/
+ OUT_SIP4,
+ OUT_SIP5,
+ OUT_SIP6,
+ OUT_SIP7,
+ OUT_DIP0,
+ OUT_DIP1,
+ OUT_DIP2,
+ OUT_DIP3,
+ OUT_DIP4,
+ OUT_DIP5, /*30*/
+ OUT_DIP6,
+ OUT_DIP7,
+ OUT_SESID,
+ OUT_PROT,
+ OUT_APP0,
+ OUT_APP1,
+ OUT_IGMP0,
+ OUT_IGMP1,
+ OUT_IPOFF, /*39*/
+ OUT_NONE = 63,
+};
+
+/* parser's microcode length type */
+#define INSTR 0
+#define IPV6 1
+#define LENACCU 2
+
+/* parser's microcode flag type */
+enum {
+ FLAG_ITAG = 0,
+ FLAG_VLAN,
+ FLAG_SNAP,
+ FLAG_PPPOE,
+ FLAG_IPV6,
+ FLAG_IPV6FL,
+ FLAG_IPV4,
+ FLAG_IGMP,
+ FLAG_TU,
+ FLAG_HOP,
+ FLAG_NN1, /*10 */
+ FLAG_NN2,
+ FLAG_END,
+ FLAG_NO, /*13*/
+};
+
+struct gswip_pce_microcode {
+ u16 val_3;
+ u16 val_2;
+ u16 val_1;
+ u16 val_0;
+};
+
+#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
+ { val, msk, ((ns) << 10 | (out) << 4 | (len) >> 1),\
+ ((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 }
+static const struct gswip_pce_microcode gswip_pce_microcode[] = {
+ /* value mask ns fields L type flags ipv4_len */
+ MC_ENTRY(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0),
+ MC_ENTRY(0x8100, 0xFFFF, 2, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ MC_ENTRY(0x88A8, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ MC_ENTRY(0x8100, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ MC_ENTRY(0x8864, 0xFFFF, 17, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0800, 0xFFFF, 21, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x86DD, 0xFFFF, 22, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x8863, 0xFFFF, 16, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0xF800, 10, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 40, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0600, 0x0600, 40, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 12, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0xAAAA, 0xFFFF, 14, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0300, 0xFF00, 41, OUT_NONE, 0, INSTR, FLAG_SNAP, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_DIP7, 3, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 18, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0),
+ MC_ENTRY(0x0021, 0xFFFF, 21, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0057, 0xFFFF, 22, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x4000, 0xF000, 24, OUT_IP0, 4, INSTR, FLAG_IPV4, 1),
+ MC_ENTRY(0x6000, 0xF000, 27, OUT_IP0, 3, INSTR, FLAG_IPV6, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 25, OUT_IP3, 2, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 26, OUT_SIP0, 4, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, LENACCU, FLAG_NO, 0),
+ MC_ENTRY(0x1100, 0xFF00, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0600, 0xFF00, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_HOP, 0),
+ MC_ENTRY(0x2B00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN1, 0),
+ MC_ENTRY(0x3C00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN2, 0),
+ MC_ENTRY(0x0000, 0x0000, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x00E0, 35, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_HOP, 0),
+ MC_ENTRY(0x2B00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN1, 0),
+ MC_ENTRY(0x3C00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN2, 0),
+ MC_ENTRY(0x0000, 0x0000, 40, OUT_PROT, 1, IPV6, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 40, OUT_SIP0, 16, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_APP0, 4, INSTR, FLAG_IGMP, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+};
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
new file mode 100644
index 000000000..06b1efdb5
--- /dev/null
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
+ tristate "Microchip KSZ8795/KSZ9477/LAN937x series switch support"
+ depends on NET_DSA
+ select NET_DSA_TAG_KSZ
+ help
+ This driver adds support for Microchip KSZ9477 series switch and
+ KSZ8795/KSZ88x3 switch chips.
+
+config NET_DSA_MICROCHIP_KSZ9477_I2C
+ tristate "KSZ series I2C connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && I2C
+ select REGMAP_I2C
+ help
+ Select to enable support for registering switches configured through I2C.
+
+config NET_DSA_MICROCHIP_KSZ_SPI
+ tristate "KSZ series SPI connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && SPI
+ select REGMAP_SPI
+ help
+ Select to enable support for registering switches configured through SPI.
+
+config NET_DSA_MICROCHIP_KSZ8863_SMI
+ tristate "KSZ series SMI connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON
+ select MDIO_BITBANG
+ help
+ Select to enable support for registering switches configured through
+ Microchip SMI. It supports the KSZ8863 and KSZ8873 switch.
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
new file mode 100644
index 000000000..28873559e
--- /dev/null
+++ b/drivers/net/dsa/microchip/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o
+ksz_switch-objs := ksz_common.o
+ksz_switch-objs += ksz9477.o
+ksz_switch-objs += ksz8795.o
+ksz_switch-objs += lan937x_main.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C) += ksz9477_i2c.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_SPI) += ksz_spi.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI) += ksz8863_smi.o
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
new file mode 100644
index 000000000..28137c4bf
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Microchip KSZ8XXX series register access
+ *
+ * Copyright (C) 2020 Pengutronix, Michael Grzeschik <kernel@pengutronix.de>
+ */
+
+#ifndef __KSZ8XXX_H
+#define __KSZ8XXX_H
+
+#include <linux/types.h>
+#include <net/dsa.h>
+#include "ksz_common.h"
+
+int ksz8_setup(struct dsa_switch *ds);
+u32 ksz8_get_port_addr(int port, int offset);
+void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member);
+void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port);
+void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
+void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
+void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze);
+void ksz8_port_init_cnt(struct ksz_device *dev, int port);
+int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ksz8_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz8_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ struct netlink_ext_ack *extack);
+int ksz8_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz8_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void ksz8_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+int ksz8_get_stp_reg(void);
+void ksz8_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+void ksz8_config_cpu_port(struct dsa_switch *ds);
+int ksz8_enable_stp_addr(struct ksz_device *dev);
+int ksz8_reset_switch(struct ksz_device *dev);
+int ksz8_switch_detect(struct ksz_device *dev);
+int ksz8_switch_init(struct ksz_device *dev);
+void ksz8_switch_exit(struct ksz_device *dev);
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
new file mode 100644
index 000000000..c63e082dc
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -0,0 +1,1420 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ8795 switch driver
+ *
+ * Copyright (C) 2017 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/microchip-ksz.h>
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/micrel_phy.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+#include <linux/phylink.h>
+
+#include "ksz_common.h"
+#include "ksz8795_reg.h"
+#include "ksz8.h"
+
+static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
+}
+
+static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
+ bool set)
+{
+ regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
+ bits, set ? bits : 0);
+}
+
+static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
+{
+ const u16 *regs;
+ u16 ctrl_addr;
+ int ret = 0;
+
+ regs = dev->info->regs;
+
+ mutex_lock(&dev->alu_mutex);
+
+ ctrl_addr = IND_ACC_TABLE(table) | addr;
+ ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+ if (!ret)
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+int ksz8_reset_switch(struct ksz_device *dev)
+{
+ if (ksz_is_ksz88x3(dev)) {
+ /* reset switch */
+ ksz_cfg(dev, KSZ8863_REG_SW_RESET,
+ KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, true);
+ ksz_cfg(dev, KSZ8863_REG_SW_RESET,
+ KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, false);
+ } else {
+ /* reset switch */
+ ksz_write8(dev, REG_POWER_MANAGEMENT_1,
+ SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S);
+ ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0);
+ }
+
+ return 0;
+}
+
+static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
+{
+ u8 hi, lo;
+
+ /* Number of queues can only be 1, 2, or 4. */
+ switch (queue) {
+ case 4:
+ case 3:
+ queue = PORT_QUEUE_SPLIT_4;
+ break;
+ case 2:
+ queue = PORT_QUEUE_SPLIT_2;
+ break;
+ default:
+ queue = PORT_QUEUE_SPLIT_1;
+ }
+ ksz_pread8(dev, port, REG_PORT_CTRL_0, &lo);
+ ksz_pread8(dev, port, P_DROP_TAG_CTRL, &hi);
+ lo &= ~PORT_QUEUE_SPLIT_L;
+ if (queue & PORT_QUEUE_SPLIT_2)
+ lo |= PORT_QUEUE_SPLIT_L;
+ hi &= ~PORT_QUEUE_SPLIT_H;
+ if (queue & PORT_QUEUE_SPLIT_4)
+ hi |= PORT_QUEUE_SPLIT_H;
+ ksz_pwrite8(dev, port, REG_PORT_CTRL_0, lo);
+ ksz_pwrite8(dev, port, P_DROP_TAG_CTRL, hi);
+
+ /* Default is port based for egress rate limit. */
+ if (queue != PORT_QUEUE_SPLIT_1)
+ ksz_cfg(dev, REG_SW_CTRL_19, SW_OUT_RATE_LIMIT_QUEUE_BASED,
+ true);
+}
+
+void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
+{
+ const u32 *masks;
+ const u16 *regs;
+ u16 ctrl_addr;
+ u32 data;
+ u8 check;
+ int loop;
+
+ masks = dev->info->masks;
+ regs = dev->info->regs;
+
+ ctrl_addr = addr + dev->info->reg_mib_cnt * port;
+ ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+
+ /* It is almost guaranteed to always read the valid bit because of
+ * slow SPI speed.
+ */
+ for (loop = 2; loop > 0; loop--) {
+ ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check);
+
+ if (check & masks[MIB_COUNTER_VALID]) {
+ ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
+ if (check & masks[MIB_COUNTER_OVERFLOW])
+ *cnt += MIB_COUNTER_VALUE + 1;
+ *cnt += data & MIB_COUNTER_VALUE;
+ break;
+ }
+ }
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
+{
+ const u32 *masks;
+ const u16 *regs;
+ u16 ctrl_addr;
+ u32 data;
+ u8 check;
+ int loop;
+
+ masks = dev->info->masks;
+ regs = dev->info->regs;
+
+ addr -= dev->info->reg_mib_cnt;
+ ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port;
+ ctrl_addr += addr + KSZ8795_MIB_TOTAL_RX_0;
+ ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+
+ /* It is almost guaranteed to always read the valid bit because of
+ * slow SPI speed.
+ */
+ for (loop = 2; loop > 0; loop--) {
+ ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check);
+
+ if (check & masks[MIB_COUNTER_VALID]) {
+ ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
+ if (addr < 2) {
+ u64 total;
+
+ total = check & MIB_TOTAL_BYTES_H;
+ total <<= 32;
+ *cnt += total;
+ *cnt += data;
+ if (check & masks[MIB_COUNTER_OVERFLOW]) {
+ total = MIB_TOTAL_BYTES_H + 1;
+ total <<= 32;
+ *cnt += total;
+ }
+ } else {
+ if (check & masks[MIB_COUNTER_OVERFLOW])
+ *cnt += MIB_PACKET_DROPPED + 1;
+ *cnt += data & MIB_PACKET_DROPPED;
+ }
+ break;
+ }
+ }
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
+{
+ u32 *last = (u32 *)dropped;
+ const u16 *regs;
+ u16 ctrl_addr;
+ u32 data;
+ u32 cur;
+
+ regs = dev->info->regs;
+
+ addr -= dev->info->reg_mib_cnt;
+ ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
+ KSZ8863_MIB_PACKET_DROPPED_RX_0;
+ ctrl_addr += port;
+ ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
+ mutex_unlock(&dev->alu_mutex);
+
+ data &= MIB_PACKET_DROPPED;
+ cur = last[addr];
+ if (data != cur) {
+ last[addr] = data;
+ if (data < cur)
+ data += MIB_PACKET_DROPPED + 1;
+ data -= cur;
+ *cnt += data;
+ }
+}
+
+void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
+{
+ if (ksz_is_ksz88x3(dev))
+ ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt);
+ else
+ ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt);
+}
+
+void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+{
+ if (ksz_is_ksz88x3(dev))
+ return;
+
+ /* enable the port for flush/freeze function */
+ if (freeze)
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
+ ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FREEZE, freeze);
+
+ /* disable the port after freeze is done */
+ if (!freeze)
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
+}
+
+void ksz8_port_init_cnt(struct ksz_device *dev, int port)
+{
+ struct ksz_port_mib *mib = &dev->ports[port].mib;
+ u64 *dropped;
+
+ if (!ksz_is_ksz88x3(dev)) {
+ /* flush all enabled port MIB counters */
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
+ ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true);
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
+ }
+
+ mib->cnt_ptr = 0;
+
+ /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
+ while (mib->cnt_ptr < dev->info->reg_mib_cnt) {
+ dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
+ &mib->counters[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+
+ /* last one in storage */
+ dropped = &mib->counters[dev->info->mib_cnt];
+
+ /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
+ while (mib->cnt_ptr < dev->info->mib_cnt) {
+ dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
+ dropped, &mib->counters[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+}
+
+static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
+{
+ const u16 *regs;
+ u16 ctrl_addr;
+
+ regs = dev->info->regs;
+
+ ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ksz_read64(dev, regs[REG_IND_DATA_HI], data);
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static void ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
+{
+ const u16 *regs;
+ u16 ctrl_addr;
+
+ regs = dev->info->regs;
+
+ ctrl_addr = IND_ACC_TABLE(table) | addr;
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write64(dev, regs[REG_IND_DATA_HI], data);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
+{
+ int timeout = 100;
+ const u32 *masks;
+ const u16 *regs;
+
+ masks = dev->info->masks;
+ regs = dev->info->regs;
+
+ do {
+ ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
+ timeout--;
+ } while ((*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) && timeout);
+
+ /* Entry is not ready for accessing. */
+ if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) {
+ return -EAGAIN;
+ /* Entry is ready for accessing. */
+ } else {
+ ksz_read8(dev, regs[REG_IND_DATA_8], data);
+
+ /* There is no valid entry in the table. */
+ if (*data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY])
+ return -ENXIO;
+ }
+ return 0;
+}
+
+int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries)
+{
+ u32 data_hi, data_lo;
+ const u8 *shifts;
+ const u32 *masks;
+ const u16 *regs;
+ u16 ctrl_addr;
+ u8 data;
+ int rc;
+
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
+
+ ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+
+ rc = ksz8_valid_dyn_entry(dev, &data);
+ if (rc == -EAGAIN) {
+ if (addr == 0)
+ *entries = 0;
+ } else if (rc == -ENXIO) {
+ *entries = 0;
+ /* At least one valid entry in the table. */
+ } else {
+ u64 buf = 0;
+ int cnt;
+
+ ksz_read64(dev, regs[REG_IND_DATA_HI], &buf);
+ data_hi = (u32)(buf >> 32);
+ data_lo = (u32)buf;
+
+ /* Check out how many valid entry in the table. */
+ cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H];
+ cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H];
+ cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >>
+ shifts[DYNAMIC_MAC_ENTRIES];
+ *entries = cnt + 1;
+
+ *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >>
+ shifts[DYNAMIC_MAC_FID];
+ *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >>
+ shifts[DYNAMIC_MAC_SRC_PORT];
+ *timestamp = (data_hi & masks[DYNAMIC_MAC_TABLE_TIMESTAMP]) >>
+ shifts[DYNAMIC_MAC_TIMESTAMP];
+
+ mac_addr[5] = (u8)data_lo;
+ mac_addr[4] = (u8)(data_lo >> 8);
+ mac_addr[3] = (u8)(data_lo >> 16);
+ mac_addr[2] = (u8)(data_lo >> 24);
+
+ mac_addr[1] = (u8)data_hi;
+ mac_addr[0] = (u8)(data_hi >> 8);
+ rc = 0;
+ }
+ mutex_unlock(&dev->alu_mutex);
+
+ return rc;
+}
+
+static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu, bool *valid)
+{
+ u32 data_hi, data_lo;
+ const u8 *shifts;
+ const u32 *masks;
+ u64 data;
+
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
+ ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
+ data_hi = data >> 32;
+ data_lo = (u32)data;
+
+ if (!(data_hi & (masks[STATIC_MAC_TABLE_VALID] |
+ masks[STATIC_MAC_TABLE_OVERRIDE]))) {
+ *valid = false;
+ return 0;
+ }
+
+ alu->mac[5] = (u8)data_lo;
+ alu->mac[4] = (u8)(data_lo >> 8);
+ alu->mac[3] = (u8)(data_lo >> 16);
+ alu->mac[2] = (u8)(data_lo >> 24);
+ alu->mac[1] = (u8)data_hi;
+ alu->mac[0] = (u8)(data_hi >> 8);
+ alu->port_forward =
+ (data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
+ shifts[STATIC_MAC_FWD_PORTS];
+ alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
+
+ /* KSZ8795 family switches have STATIC_MAC_TABLE_USE_FID and
+ * STATIC_MAC_TABLE_FID definitions off by 1 when doing read on the
+ * static MAC table compared to doing write.
+ */
+ if (ksz_is_ksz87xx(dev))
+ data_hi >>= 1;
+ alu->is_static = true;
+ alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
+ alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
+ shifts[STATIC_MAC_FID];
+
+ *valid = true;
+
+ return 0;
+}
+
+void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
+{
+ u32 data_hi, data_lo;
+ const u8 *shifts;
+ const u32 *masks;
+ u64 data;
+
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
+ data_lo = ((u32)alu->mac[2] << 24) |
+ ((u32)alu->mac[3] << 16) |
+ ((u32)alu->mac[4] << 8) | alu->mac[5];
+ data_hi = ((u32)alu->mac[0] << 8) | alu->mac[1];
+ data_hi |= (u32)alu->port_forward << shifts[STATIC_MAC_FWD_PORTS];
+
+ if (alu->is_override)
+ data_hi |= masks[STATIC_MAC_TABLE_OVERRIDE];
+ if (alu->is_use_fid) {
+ data_hi |= masks[STATIC_MAC_TABLE_USE_FID];
+ data_hi |= (u32)alu->fid << shifts[STATIC_MAC_FID];
+ }
+ if (alu->is_static)
+ data_hi |= masks[STATIC_MAC_TABLE_VALID];
+ else
+ data_hi &= ~masks[STATIC_MAC_TABLE_OVERRIDE];
+
+ data = (u64)data_hi << 32 | data_lo;
+ ksz8_w_table(dev, TABLE_STATIC_MAC, addr, data);
+}
+
+static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
+ u8 *member, u8 *valid)
+{
+ const u8 *shifts;
+ const u32 *masks;
+
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
+ *fid = vlan & masks[VLAN_TABLE_FID];
+ *member = (vlan & masks[VLAN_TABLE_MEMBERSHIP]) >>
+ shifts[VLAN_TABLE_MEMBERSHIP_S];
+ *valid = !!(vlan & masks[VLAN_TABLE_VALID]);
+}
+
+static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid,
+ u16 *vlan)
+{
+ const u8 *shifts;
+ const u32 *masks;
+
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
+ *vlan = fid;
+ *vlan |= (u16)member << shifts[VLAN_TABLE_MEMBERSHIP_S];
+ if (valid)
+ *vlan |= masks[VLAN_TABLE_VALID];
+}
+
+static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
+{
+ const u8 *shifts;
+ u64 data;
+ int i;
+
+ shifts = dev->info->shifts;
+
+ ksz8_r_table(dev, TABLE_VLAN, addr, &data);
+ addr *= 4;
+ for (i = 0; i < 4; i++) {
+ dev->vlan_cache[addr + i].table[0] = (u16)data;
+ data >>= shifts[VLAN_TABLE];
+ }
+}
+
+static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
+{
+ int index;
+ u16 *data;
+ u16 addr;
+ u64 buf;
+
+ data = (u16 *)&buf;
+ addr = vid / 4;
+ index = vid & 3;
+ ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
+ *vlan = data[index];
+}
+
+static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
+{
+ int index;
+ u16 *data;
+ u16 addr;
+ u64 buf;
+
+ data = (u16 *)&buf;
+ addr = vid / 4;
+ index = vid & 3;
+ ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
+ data[index] = vlan;
+ dev->vlan_cache[vid].table[0] = vlan;
+ ksz8_w_table(dev, TABLE_VLAN, addr, buf);
+}
+
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+{
+ u8 restart, speed, ctrl, link;
+ int processed = true;
+ const u16 *regs;
+ u8 val1, val2;
+ u16 data = 0;
+ u8 p = phy;
+ int ret;
+
+ regs = dev->info->regs;
+
+ switch (reg) {
+ case MII_BMCR:
+ ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
+ if (restart & PORT_PHY_LOOPBACK)
+ data |= BMCR_LOOPBACK;
+ if (ctrl & PORT_FORCE_100_MBIT)
+ data |= BMCR_SPEED100;
+ if (ksz_is_ksz88x3(dev)) {
+ if ((ctrl & PORT_AUTO_NEG_ENABLE))
+ data |= BMCR_ANENABLE;
+ } else {
+ if (!(ctrl & PORT_AUTO_NEG_DISABLE))
+ data |= BMCR_ANENABLE;
+ }
+ if (restart & PORT_POWER_DOWN)
+ data |= BMCR_PDOWN;
+ if (restart & PORT_AUTO_NEG_RESTART)
+ data |= BMCR_ANRESTART;
+ if (ctrl & PORT_FORCE_FULL_DUPLEX)
+ data |= BMCR_FULLDPLX;
+ if (speed & PORT_HP_MDIX)
+ data |= KSZ886X_BMCR_HP_MDIX;
+ if (restart & PORT_FORCE_MDIX)
+ data |= KSZ886X_BMCR_FORCE_MDI;
+ if (restart & PORT_AUTO_MDIX_DISABLE)
+ data |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
+ if (restart & PORT_TX_DISABLE)
+ data |= KSZ886X_BMCR_DISABLE_TRANSMIT;
+ if (restart & PORT_LED_OFF)
+ data |= KSZ886X_BMCR_DISABLE_LED;
+ break;
+ case MII_BMSR:
+ ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (ret)
+ return ret;
+
+ data = BMSR_100FULL |
+ BMSR_100HALF |
+ BMSR_10FULL |
+ BMSR_10HALF |
+ BMSR_ANEGCAPABLE;
+ if (link & PORT_AUTO_NEG_COMPLETE)
+ data |= BMSR_ANEGCOMPLETE;
+ if (link & PORT_STAT_LINK_GOOD)
+ data |= BMSR_LSTATUS;
+ break;
+ case MII_PHYSID1:
+ data = KSZ8795_ID_HI;
+ break;
+ case MII_PHYSID2:
+ if (ksz_is_ksz88x3(dev))
+ data = KSZ8863_ID_LO;
+ else
+ data = KSZ8795_ID_LO;
+ break;
+ case MII_ADVERTISE:
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
+ data = ADVERTISE_CSMA;
+ if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
+ data |= ADVERTISE_PAUSE_CAP;
+ if (ctrl & PORT_AUTO_NEG_100BTX_FD)
+ data |= ADVERTISE_100FULL;
+ if (ctrl & PORT_AUTO_NEG_100BTX)
+ data |= ADVERTISE_100HALF;
+ if (ctrl & PORT_AUTO_NEG_10BT_FD)
+ data |= ADVERTISE_10FULL;
+ if (ctrl & PORT_AUTO_NEG_10BT)
+ data |= ADVERTISE_10HALF;
+ break;
+ case MII_LPA:
+ ret = ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
+ if (ret)
+ return ret;
+
+ data = LPA_SLCT;
+ if (link & PORT_REMOTE_SYM_PAUSE)
+ data |= LPA_PAUSE_CAP;
+ if (link & PORT_REMOTE_100BTX_FD)
+ data |= LPA_100FULL;
+ if (link & PORT_REMOTE_100BTX)
+ data |= LPA_100HALF;
+ if (link & PORT_REMOTE_10BT_FD)
+ data |= LPA_10FULL;
+ if (link & PORT_REMOTE_10BT)
+ data |= LPA_10HALF;
+ if (data & ~LPA_SLCT)
+ data |= LPA_LPACK;
+ break;
+ case PHY_REG_LINK_MD:
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ if (ret)
+ return ret;
+
+ if (val1 & PORT_START_CABLE_DIAG)
+ data |= PHY_START_CABLE_DIAG;
+
+ if (val1 & PORT_CABLE_10M_SHORT)
+ data |= PHY_CABLE_10M_SHORT;
+
+ data |= FIELD_PREP(PHY_CABLE_DIAG_RESULT_M,
+ FIELD_GET(PORT_CABLE_DIAG_RESULT_M, val1));
+
+ data |= FIELD_PREP(PHY_CABLE_FAULT_COUNTER_M,
+ (FIELD_GET(PORT_CABLE_FAULT_COUNTER_H, val1) << 8) |
+ FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2));
+ break;
+ case PHY_REG_PHY_CTRL:
+ ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (ret)
+ return ret;
+
+ if (link & PORT_MDIX_STATUS)
+ data |= KSZ886X_CTRL_MDIX_STAT;
+ break;
+ default:
+ processed = false;
+ break;
+ }
+ if (processed)
+ *val = data;
+
+ return 0;
+}
+
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+{
+ u8 restart, speed, ctrl, data;
+ const u16 *regs;
+ u8 p = phy;
+ int ret;
+
+ regs = dev->info->regs;
+
+ switch (reg) {
+ case MII_BMCR:
+
+ /* Do not support PHY reset function. */
+ if (val & BMCR_RESET)
+ break;
+ ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
+ data = speed;
+ if (val & KSZ886X_BMCR_HP_MDIX)
+ data |= PORT_HP_MDIX;
+ else
+ data &= ~PORT_HP_MDIX;
+
+ if (data != speed) {
+ ret = ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
+ if (ret)
+ return ret;
+ }
+
+ ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
+ data = ctrl;
+ if (ksz_is_ksz88x3(dev)) {
+ if ((val & BMCR_ANENABLE))
+ data |= PORT_AUTO_NEG_ENABLE;
+ else
+ data &= ~PORT_AUTO_NEG_ENABLE;
+ } else {
+ if (!(val & BMCR_ANENABLE))
+ data |= PORT_AUTO_NEG_DISABLE;
+ else
+ data &= ~PORT_AUTO_NEG_DISABLE;
+
+ /* Fiber port does not support auto-negotiation. */
+ if (dev->ports[p].fiber)
+ data |= PORT_AUTO_NEG_DISABLE;
+ }
+
+ if (val & BMCR_SPEED100)
+ data |= PORT_FORCE_100_MBIT;
+ else
+ data &= ~PORT_FORCE_100_MBIT;
+ if (val & BMCR_FULLDPLX)
+ data |= PORT_FORCE_FULL_DUPLEX;
+ else
+ data &= ~PORT_FORCE_FULL_DUPLEX;
+
+ if (data != ctrl) {
+ ret = ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
+ if (ret)
+ return ret;
+ }
+
+ ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
+ data = restart;
+ if (val & KSZ886X_BMCR_DISABLE_LED)
+ data |= PORT_LED_OFF;
+ else
+ data &= ~PORT_LED_OFF;
+ if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
+ data |= PORT_TX_DISABLE;
+ else
+ data &= ~PORT_TX_DISABLE;
+ if (val & BMCR_ANRESTART)
+ data |= PORT_AUTO_NEG_RESTART;
+ else
+ data &= ~(PORT_AUTO_NEG_RESTART);
+ if (val & BMCR_PDOWN)
+ data |= PORT_POWER_DOWN;
+ else
+ data &= ~PORT_POWER_DOWN;
+ if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
+ data |= PORT_AUTO_MDIX_DISABLE;
+ else
+ data &= ~PORT_AUTO_MDIX_DISABLE;
+ if (val & KSZ886X_BMCR_FORCE_MDI)
+ data |= PORT_FORCE_MDIX;
+ else
+ data &= ~PORT_FORCE_MDIX;
+ if (val & BMCR_LOOPBACK)
+ data |= PORT_PHY_LOOPBACK;
+ else
+ data &= ~PORT_PHY_LOOPBACK;
+
+ if (data != restart) {
+ ret = ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL],
+ data);
+ if (ret)
+ return ret;
+ }
+ break;
+ case MII_ADVERTISE:
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
+ data = ctrl;
+ data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
+ PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT_FD |
+ PORT_AUTO_NEG_10BT);
+ if (val & ADVERTISE_PAUSE_CAP)
+ data |= PORT_AUTO_NEG_SYM_PAUSE;
+ if (val & ADVERTISE_100FULL)
+ data |= PORT_AUTO_NEG_100BTX_FD;
+ if (val & ADVERTISE_100HALF)
+ data |= PORT_AUTO_NEG_100BTX;
+ if (val & ADVERTISE_10FULL)
+ data |= PORT_AUTO_NEG_10BT_FD;
+ if (val & ADVERTISE_10HALF)
+ data |= PORT_AUTO_NEG_10BT;
+
+ if (data != ctrl) {
+ ret = ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
+ if (ret)
+ return ret;
+ }
+ break;
+ case PHY_REG_LINK_MD:
+ if (val & PHY_START_CABLE_DIAG)
+ ksz_port_cfg(dev, p, REG_PORT_LINK_MD_CTRL, PORT_START_CABLE_DIAG, true);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
+{
+ u8 data;
+
+ ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+ data &= ~PORT_VLAN_MEMBERSHIP;
+ data |= (member & dev->port_mask);
+ ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
+}
+
+void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
+{
+ u8 learn[DSA_MAX_PORTS];
+ int first, index, cnt;
+ struct ksz_port *p;
+ const u16 *regs;
+
+ regs = dev->info->regs;
+
+ if ((uint)port < dev->info->port_cnt) {
+ first = port;
+ cnt = port + 1;
+ } else {
+ /* Flush all ports. */
+ first = 0;
+ cnt = dev->info->port_cnt;
+ }
+ for (index = first; index < cnt; index++) {
+ p = &dev->ports[index];
+ if (!p->on)
+ continue;
+ ksz_pread8(dev, index, regs[P_STP_CTRL], &learn[index]);
+ if (!(learn[index] & PORT_LEARN_DISABLE))
+ ksz_pwrite8(dev, index, regs[P_STP_CTRL],
+ learn[index] | PORT_LEARN_DISABLE);
+ }
+ ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
+ for (index = first; index < cnt; index++) {
+ p = &dev->ports[index];
+ if (!p->on)
+ continue;
+ if (!(learn[index] & PORT_LEARN_DISABLE))
+ ksz_pwrite8(dev, index, regs[P_STP_CTRL], learn[index]);
+ }
+}
+
+int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ int ret = 0;
+ u16 i = 0;
+ u16 entries = 0;
+ u8 timestamp = 0;
+ u8 fid;
+ u8 src_port;
+ u8 mac[ETH_ALEN];
+
+ do {
+ ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port,
+ &timestamp, &entries);
+ if (!ret && port == src_port) {
+ ret = cb(mac, fid, false, data);
+ if (ret)
+ break;
+ }
+ i++;
+ } while (i < entries);
+ if (i >= entries)
+ ret = 0;
+
+ return ret;
+}
+
+static int ksz8_add_sta_mac(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct alu_struct alu;
+ int index, ret;
+ int empty = 0;
+
+ alu.port_forward = 0;
+ for (index = 0; index < dev->info->num_statics; index++) {
+ bool valid;
+
+ ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
+ if (ret)
+ return ret;
+ if (!valid) {
+ /* Remember the first empty entry. */
+ if (!empty)
+ empty = index + 1;
+ continue;
+ }
+
+ if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
+ break;
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics && !empty)
+ return -ENOSPC;
+
+ /* add entry */
+ if (index == dev->info->num_statics) {
+ index = empty - 1;
+ memset(&alu, 0, sizeof(alu));
+ memcpy(alu.mac, addr, ETH_ALEN);
+ alu.is_static = true;
+ }
+ alu.port_forward |= BIT(port);
+ if (vid) {
+ alu.is_use_fid = true;
+
+ /* Need a way to map VID to FID. */
+ alu.fid = vid;
+ }
+ ksz8_w_sta_mac_table(dev, index, &alu);
+
+ return 0;
+}
+
+static int ksz8_del_sta_mac(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct alu_struct alu;
+ int index, ret;
+
+ for (index = 0; index < dev->info->num_statics; index++) {
+ bool valid;
+
+ ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
+ if (ret)
+ return ret;
+ if (!valid)
+ continue;
+
+ if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
+ break;
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics)
+ goto exit;
+
+ /* clear port */
+ alu.port_forward &= ~BIT(port);
+ if (!alu.port_forward)
+ alu.is_static = false;
+ ksz8_w_sta_mac_table(dev, index, &alu);
+
+exit:
+ return 0;
+}
+
+int ksz8_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ return ksz8_add_sta_mac(dev, port, mdb->addr, mdb->vid);
+}
+
+int ksz8_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ return ksz8_del_sta_mac(dev, port, mdb->addr, mdb->vid);
+}
+
+int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ struct netlink_ext_ack *extack)
+{
+ if (ksz_is_ksz88x3(dev))
+ return -ENOTSUPP;
+
+ /* Discard packets with VID not enabled on the switch */
+ ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
+
+ /* Discard packets with VID not enabled on the ingress port */
+ for (port = 0; port < dev->phy_port_cnt; ++port)
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER,
+ flag);
+
+ return 0;
+}
+
+static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
+{
+ if (ksz_is_ksz88x3(dev)) {
+ ksz_cfg(dev, REG_SW_INSERT_SRC_PVID,
+ 0x03 << (4 - 2 * port), state);
+ } else {
+ ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
+ }
+}
+
+int ksz8_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct ksz_port *p = &dev->ports[port];
+ u16 data, new_pvid = 0;
+ u8 fid, member, valid;
+
+ if (ksz_is_ksz88x3(dev))
+ return -ENOTSUPP;
+
+ /* If a VLAN is added with untagged flag different from the
+ * port's Remove Tag flag, we need to change the latter.
+ * Ignore VID 0, which is always untagged.
+ * Ignore CPU port, which will always be tagged.
+ */
+ if (untagged != p->remove_tag && vlan->vid != 0 &&
+ port != dev->cpu_port) {
+ unsigned int vid;
+
+ /* Reject attempts to add a VLAN that requires the
+ * Remove Tag flag to be changed, unless there are no
+ * other VLANs currently configured.
+ */
+ for (vid = 1; vid < dev->info->num_vlans; ++vid) {
+ /* Skip the VID we are going to add or reconfigure */
+ if (vid == vlan->vid)
+ continue;
+
+ ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0],
+ &fid, &member, &valid);
+ if (valid && (member & BIT(port)))
+ return -EINVAL;
+ }
+
+ ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+ p->remove_tag = untagged;
+ }
+
+ ksz8_r_vlan_table(dev, vlan->vid, &data);
+ ksz8_from_vlan(dev, data, &fid, &member, &valid);
+
+ /* First time to setup the VLAN entry. */
+ if (!valid) {
+ /* Need to find a way to map VID to FID. */
+ fid = 1;
+ valid = 1;
+ }
+ member |= BIT(port);
+
+ ksz8_to_vlan(dev, fid, member, valid, &data);
+ ksz8_w_vlan_table(dev, vlan->vid, data);
+
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ new_pvid = vlan->vid;
+
+ if (new_pvid) {
+ u16 vid;
+
+ ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
+ vid &= ~VLAN_VID_MASK;
+ vid |= new_pvid;
+ ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
+
+ ksz8_port_enable_pvid(dev, port, true);
+ }
+
+ return 0;
+}
+
+int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ u16 data, pvid;
+ u8 fid, member, valid;
+
+ if (ksz_is_ksz88x3(dev))
+ return -ENOTSUPP;
+
+ ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
+ pvid = pvid & 0xFFF;
+
+ ksz8_r_vlan_table(dev, vlan->vid, &data);
+ ksz8_from_vlan(dev, data, &fid, &member, &valid);
+
+ member &= ~BIT(port);
+
+ /* Invalidate the entry if no more member. */
+ if (!member) {
+ fid = 0;
+ valid = 0;
+ }
+
+ ksz8_to_vlan(dev, fid, member, valid, &data);
+ ksz8_w_vlan_table(dev, vlan->vid, data);
+
+ if (pvid == vlan->vid)
+ ksz8_port_enable_pvid(dev, port, false);
+
+ return 0;
+}
+
+int ksz8_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ if (ingress) {
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
+ dev->mirror_rx |= BIT(port);
+ } else {
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
+ dev->mirror_tx |= BIT(port);
+ }
+
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
+
+ /* configure mirror port */
+ if (dev->mirror_rx || dev->mirror_tx)
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, true);
+
+ return 0;
+}
+
+void ksz8_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ u8 data;
+
+ if (mirror->ingress) {
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
+ dev->mirror_rx &= ~BIT(port);
+ } else {
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
+ dev->mirror_tx &= ~BIT(port);
+ }
+
+ ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+
+ if (!dev->mirror_rx && !dev->mirror_tx)
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, false);
+}
+
+static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
+{
+ struct ksz_port *p = &dev->ports[port];
+
+ if (!p->interface && dev->compat_interface) {
+ dev_warn(dev->dev,
+ "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
+ "Please update your device tree.\n",
+ port);
+ p->interface = dev->compat_interface;
+ }
+}
+
+void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ struct dsa_switch *ds = dev->ds;
+ const u32 *masks;
+ u8 member;
+
+ masks = dev->info->masks;
+
+ /* enable broadcast storm limit */
+ ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
+
+ if (!ksz_is_ksz88x3(dev))
+ ksz8795_set_prio_queue(dev, port, 4);
+
+ /* disable DiffServ priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_ENABLE, false);
+
+ /* replace priority */
+ ksz_port_cfg(dev, port, P_802_1P_CTRL,
+ masks[PORT_802_1P_REMAPPING], false);
+
+ /* enable 802.1p priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true);
+
+ if (cpu_port) {
+ if (!ksz_is_ksz88x3(dev))
+ ksz8795_cpu_interface_select(dev, port);
+
+ member = dsa_user_ports(ds);
+ } else {
+ member = BIT(dsa_upstream_port(ds, port));
+ }
+
+ ksz8_cfg_port_member(dev, port, member);
+}
+
+void ksz8_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ const u32 *masks;
+ const u16 *regs;
+ u8 remote;
+ int i;
+
+ masks = dev->info->masks;
+ regs = dev->info->regs;
+
+ /* Switch marks the maximum frame with extra byte as oversize. */
+ ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true);
+ ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true);
+
+ p = &dev->ports[dev->cpu_port];
+ p->on = 1;
+
+ ksz8_port_setup(dev, dev->cpu_port, true);
+
+ for (i = 0; i < dev->phy_port_cnt; i++) {
+ p = &dev->ports[i];
+
+ ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+
+ /* Last port may be disabled. */
+ if (i == dev->phy_port_cnt)
+ break;
+ p->on = 1;
+ }
+ for (i = 0; i < dev->phy_port_cnt; i++) {
+ p = &dev->ports[i];
+ if (!p->on)
+ continue;
+ if (!ksz_is_ksz88x3(dev)) {
+ ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
+ if (remote & KSZ8_PORT_FIBER_MODE)
+ p->fiber = 1;
+ }
+ if (p->fiber)
+ ksz_port_cfg(dev, i, regs[P_STP_CTRL],
+ PORT_FORCE_FLOW_CTRL, true);
+ else
+ ksz_port_cfg(dev, i, regs[P_STP_CTRL],
+ PORT_FORCE_FLOW_CTRL, false);
+ }
+}
+
+static int ksz8_handle_global_errata(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+
+ /* KSZ87xx Errata DS80000687C.
+ * Module 2: Link drops with some EEE link partners.
+ * An issue with the EEE next page exchange between the
+ * KSZ879x/KSZ877x/KSZ876x and some EEE link partners may result in
+ * the link dropping.
+ */
+ if (dev->info->ksz87xx_eee_link_erratum)
+ ret = ksz8_ind_write8(dev, TABLE_EEE, REG_IND_EEE_GLOB2_HI, 0);
+
+ return ret;
+}
+
+int ksz8_enable_stp_addr(struct ksz_device *dev)
+{
+ struct alu_struct alu;
+
+ /* Setup STP address for STP operation. */
+ memset(&alu, 0, sizeof(alu));
+ ether_addr_copy(alu.mac, eth_stp_addr);
+ alu.is_static = true;
+ alu.is_override = true;
+ alu.port_forward = dev->info->cpu_ports;
+
+ ksz8_w_sta_mac_table(dev, 0, &alu);
+
+ return 0;
+}
+
+int ksz8_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int i;
+
+ ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_FLOW_CTRL, true);
+
+ /* Enable automatic fast aging when link changed detected. */
+ ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
+
+ /* Enable aggressive back off algorithm in half duplex mode. */
+ regmap_update_bits(dev->regmap[0], REG_SW_CTRL_1,
+ SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+
+ /*
+ * Make sure unicast VLAN boundary is set as default and
+ * enable no excessive collision drop.
+ */
+ regmap_update_bits(dev->regmap[0], REG_SW_CTRL_2,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+
+ ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
+
+ ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
+
+ if (!ksz_is_ksz88x3(dev))
+ ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
+
+ for (i = 0; i < (dev->info->num_vlans / 4); i++)
+ ksz8_r_vlan_entries(dev, i);
+
+ return ksz8_handle_global_errata(ds);
+}
+
+void ksz8_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_10 | MAC_100;
+
+ /* Silicon Errata Sheet (DS80000830A):
+ * "Port 1 does not respond to received flow control PAUSE frames"
+ * So, disable Pause support on "Port 1" (port == 0) for all ksz88x3
+ * switches.
+ */
+ if (!ksz_is_ksz88x3(dev) || port)
+ config->mac_capabilities |= MAC_SYM_PAUSE;
+
+ /* Asym pause is not supported on KSZ8863 and KSZ8873 */
+ if (!ksz_is_ksz88x3(dev))
+ config->mac_capabilities |= MAC_ASYM_PAUSE;
+}
+
+u32 ksz8_get_port_addr(int port, int offset)
+{
+ return PORT_CTRL_ADDR(port, offset);
+}
+
+int ksz8_switch_init(struct ksz_device *dev)
+{
+ dev->cpu_port = fls(dev->info->cpu_ports) - 1;
+ dev->phy_port_cnt = dev->info->port_cnt - 1;
+ dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | dev->info->cpu_ports;
+
+ /* We rely on software untagging on the CPU port, so that we
+ * can support both tagged and untagged VLANs
+ */
+ dev->ds->untag_bridge_pvid = true;
+
+ /* VLAN filtering is partly controlled by the global VLAN
+ * Enable flag
+ */
+ dev->ds->vlan_filtering_is_global = true;
+
+ return 0;
+}
+
+void ksz8_switch_exit(struct ksz_device *dev)
+{
+ ksz8_reset_switch(dev);
+}
+
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
new file mode 100644
index 000000000..77487d611
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -0,0 +1,812 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Microchip KSZ8795 register definitions
+ *
+ * Copyright (c) 2017 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#ifndef __KSZ8795_REG_H
+#define __KSZ8795_REG_H
+
+#define KS_PORT_M 0x1F
+
+#define KS_PRIO_M 0x3
+#define KS_PRIO_S 2
+
+#define SW_REVISION_M 0x0E
+#define SW_REVISION_S 1
+
+#define KSZ8863_REG_SW_RESET 0x43
+
+#define KSZ8863_GLOBAL_SOFTWARE_RESET BIT(4)
+#define KSZ8863_PCS_RESET BIT(0)
+
+#define REG_SW_CTRL_0 0x02
+
+#define SW_NEW_BACKOFF BIT(7)
+#define SW_GLOBAL_RESET BIT(6)
+#define SW_FLUSH_DYN_MAC_TABLE BIT(5)
+#define SW_FLUSH_STA_MAC_TABLE BIT(4)
+#define SW_LINK_AUTO_AGING BIT(0)
+
+#define REG_SW_CTRL_1 0x03
+
+#define SW_HUGE_PACKET BIT(6)
+#define SW_TX_FLOW_CTRL_DISABLE BIT(5)
+#define SW_RX_FLOW_CTRL_DISABLE BIT(4)
+#define SW_CHECK_LENGTH BIT(3)
+#define SW_AGING_ENABLE BIT(2)
+#define SW_FAST_AGING BIT(1)
+#define SW_AGGR_BACKOFF BIT(0)
+
+#define REG_SW_CTRL_2 0x04
+
+#define UNICAST_VLAN_BOUNDARY BIT(7)
+#define SW_BACK_PRESSURE BIT(5)
+#define FAIR_FLOW_CTRL BIT(4)
+#define NO_EXC_COLLISION_DROP BIT(3)
+#define SW_LEGAL_PACKET_DISABLE BIT(1)
+
+#define REG_SW_CTRL_3 0x05
+ #define WEIGHTED_FAIR_QUEUE_ENABLE BIT(3)
+
+#define SW_VLAN_ENABLE BIT(7)
+#define SW_IGMP_SNOOP BIT(6)
+#define SW_MIRROR_RX_TX BIT(0)
+
+#define REG_SW_CTRL_4 0x06
+
+#define SW_HALF_DUPLEX_FLOW_CTRL BIT(7)
+#define SW_HALF_DUPLEX BIT(6)
+#define SW_FLOW_CTRL BIT(5)
+#define SW_10_MBIT BIT(4)
+#define SW_REPLACE_VID BIT(3)
+
+#define REG_SW_CTRL_5 0x07
+
+#define REG_SW_CTRL_6 0x08
+
+#define SW_MIB_COUNTER_FLUSH BIT(7)
+#define SW_MIB_COUNTER_FREEZE BIT(6)
+#define SW_MIB_COUNTER_CTRL_ENABLE KS_PORT_M
+
+#define REG_SW_CTRL_9 0x0B
+
+#define SPI_CLK_125_MHZ 0x80
+#define SPI_CLK_62_5_MHZ 0x40
+#define SPI_CLK_31_25_MHZ 0x00
+
+#define SW_LED_MODE_M 0x3
+#define SW_LED_MODE_S 4
+#define SW_LED_LINK_ACT_SPEED 0
+#define SW_LED_LINK_ACT 1
+#define SW_LED_LINK_ACT_DUPLEX 2
+#define SW_LED_LINK_DUPLEX 3
+
+#define REG_SW_CTRL_10 0x0C
+
+#define SW_PASS_PAUSE BIT(0)
+
+#define REG_SW_CTRL_11 0x0D
+
+#define REG_POWER_MANAGEMENT_1 0x0E
+
+#define SW_PLL_POWER_DOWN BIT(5)
+#define SW_POWER_MANAGEMENT_MODE_M 0x3
+#define SW_POWER_MANAGEMENT_MODE_S 3
+#define SW_POWER_NORMAL 0
+#define SW_ENERGY_DETECTION 1
+#define SW_SOFTWARE_POWER_DOWN 2
+
+#define REG_POWER_MANAGEMENT_2 0x0F
+
+#define REG_PORT_1_CTRL_0 0x10
+#define REG_PORT_2_CTRL_0 0x20
+#define REG_PORT_3_CTRL_0 0x30
+#define REG_PORT_4_CTRL_0 0x40
+#define REG_PORT_5_CTRL_0 0x50
+
+#define PORT_BROADCAST_STORM BIT(7)
+#define PORT_DIFFSERV_ENABLE BIT(6)
+#define PORT_802_1P_ENABLE BIT(5)
+#define PORT_BASED_PRIO_S 3
+#define PORT_BASED_PRIO_M KS_PRIO_M
+#define PORT_BASED_PRIO_0 0
+#define PORT_BASED_PRIO_1 1
+#define PORT_BASED_PRIO_2 2
+#define PORT_BASED_PRIO_3 3
+#define PORT_INSERT_TAG BIT(2)
+#define PORT_REMOVE_TAG BIT(1)
+#define PORT_QUEUE_SPLIT_L BIT(0)
+
+#define REG_PORT_1_CTRL_1 0x11
+#define REG_PORT_2_CTRL_1 0x21
+#define REG_PORT_3_CTRL_1 0x31
+#define REG_PORT_4_CTRL_1 0x41
+#define REG_PORT_5_CTRL_1 0x51
+
+#define PORT_MIRROR_SNIFFER BIT(7)
+#define PORT_MIRROR_RX BIT(6)
+#define PORT_MIRROR_TX BIT(5)
+#define PORT_VLAN_MEMBERSHIP KS_PORT_M
+
+#define REG_PORT_1_CTRL_2 0x12
+#define REG_PORT_2_CTRL_2 0x22
+#define REG_PORT_3_CTRL_2 0x32
+#define REG_PORT_4_CTRL_2 0x42
+#define REG_PORT_5_CTRL_2 0x52
+
+#define PORT_INGRESS_FILTER BIT(6)
+#define PORT_DISCARD_NON_VID BIT(5)
+#define PORT_FORCE_FLOW_CTRL BIT(4)
+#define PORT_BACK_PRESSURE BIT(3)
+
+#define REG_PORT_1_CTRL_3 0x13
+#define REG_PORT_2_CTRL_3 0x23
+#define REG_PORT_3_CTRL_3 0x33
+#define REG_PORT_4_CTRL_3 0x43
+#define REG_PORT_5_CTRL_3 0x53
+#define REG_PORT_1_CTRL_4 0x14
+#define REG_PORT_2_CTRL_4 0x24
+#define REG_PORT_3_CTRL_4 0x34
+#define REG_PORT_4_CTRL_4 0x44
+#define REG_PORT_5_CTRL_4 0x54
+
+#define PORT_DEFAULT_VID 0x0001
+
+#define REG_PORT_1_CTRL_5 0x15
+#define REG_PORT_2_CTRL_5 0x25
+#define REG_PORT_3_CTRL_5 0x35
+#define REG_PORT_4_CTRL_5 0x45
+#define REG_PORT_5_CTRL_5 0x55
+
+#define PORT_ACL_ENABLE BIT(2)
+#define PORT_AUTHEN_MODE 0x3
+#define PORT_AUTHEN_PASS 0
+#define PORT_AUTHEN_BLOCK 1
+#define PORT_AUTHEN_TRAP 2
+
+#define REG_PORT_5_CTRL_6 0x56
+
+#define PORT_MII_INTERNAL_CLOCK BIT(7)
+#define PORT_GMII_MAC_MODE BIT(2)
+
+#define REG_PORT_1_CTRL_7 0x17
+#define REG_PORT_2_CTRL_7 0x27
+#define REG_PORT_3_CTRL_7 0x37
+#define REG_PORT_4_CTRL_7 0x47
+
+#define PORT_AUTO_NEG_ASYM_PAUSE BIT(5)
+#define PORT_AUTO_NEG_SYM_PAUSE BIT(4)
+#define PORT_AUTO_NEG_100BTX_FD BIT(3)
+#define PORT_AUTO_NEG_100BTX BIT(2)
+#define PORT_AUTO_NEG_10BT_FD BIT(1)
+#define PORT_AUTO_NEG_10BT BIT(0)
+
+#define REG_PORT_1_STATUS_0 0x18
+#define REG_PORT_2_STATUS_0 0x28
+#define REG_PORT_3_STATUS_0 0x38
+#define REG_PORT_4_STATUS_0 0x48
+
+/* For KSZ8765. */
+#define PORT_REMOTE_ASYM_PAUSE BIT(5)
+#define PORT_REMOTE_SYM_PAUSE BIT(4)
+#define PORT_REMOTE_100BTX_FD BIT(3)
+#define PORT_REMOTE_100BTX BIT(2)
+#define PORT_REMOTE_10BT_FD BIT(1)
+#define PORT_REMOTE_10BT BIT(0)
+
+#define REG_PORT_1_STATUS_1 0x19
+#define REG_PORT_2_STATUS_1 0x29
+#define REG_PORT_3_STATUS_1 0x39
+#define REG_PORT_4_STATUS_1 0x49
+
+#define PORT_HP_MDIX BIT(7)
+#define PORT_REVERSED_POLARITY BIT(5)
+#define PORT_TX_FLOW_CTRL BIT(4)
+#define PORT_RX_FLOW_CTRL BIT(3)
+#define PORT_STAT_SPEED_100MBIT BIT(2)
+#define PORT_STAT_FULL_DUPLEX BIT(1)
+
+#define PORT_REMOTE_FAULT BIT(0)
+
+#define REG_PORT_1_LINK_MD_CTRL 0x1A
+#define REG_PORT_2_LINK_MD_CTRL 0x2A
+#define REG_PORT_3_LINK_MD_CTRL 0x3A
+#define REG_PORT_4_LINK_MD_CTRL 0x4A
+
+#define PORT_CABLE_10M_SHORT BIT(7)
+#define PORT_CABLE_DIAG_RESULT_M GENMASK(6, 5)
+#define PORT_CABLE_DIAG_RESULT_S 5
+#define PORT_CABLE_STAT_NORMAL 0
+#define PORT_CABLE_STAT_OPEN 1
+#define PORT_CABLE_STAT_SHORT 2
+#define PORT_CABLE_STAT_FAILED 3
+#define PORT_START_CABLE_DIAG BIT(4)
+#define PORT_FORCE_LINK BIT(3)
+#define PORT_POWER_SAVING BIT(2)
+#define PORT_PHY_REMOTE_LOOPBACK BIT(1)
+#define PORT_CABLE_FAULT_COUNTER_H 0x01
+
+#define REG_PORT_1_LINK_MD_RESULT 0x1B
+#define REG_PORT_2_LINK_MD_RESULT 0x2B
+#define REG_PORT_3_LINK_MD_RESULT 0x3B
+#define REG_PORT_4_LINK_MD_RESULT 0x4B
+
+#define PORT_CABLE_FAULT_COUNTER_L 0xFF
+#define PORT_CABLE_FAULT_COUNTER 0x1FF
+
+#define REG_PORT_1_CTRL_9 0x1C
+#define REG_PORT_2_CTRL_9 0x2C
+#define REG_PORT_3_CTRL_9 0x3C
+#define REG_PORT_4_CTRL_9 0x4C
+
+#define PORT_AUTO_NEG_ENABLE BIT(7)
+#define PORT_AUTO_NEG_DISABLE BIT(7)
+#define PORT_FORCE_100_MBIT BIT(6)
+#define PORT_FORCE_FULL_DUPLEX BIT(5)
+
+#define REG_PORT_1_CTRL_10 0x1D
+#define REG_PORT_2_CTRL_10 0x2D
+#define REG_PORT_3_CTRL_10 0x3D
+#define REG_PORT_4_CTRL_10 0x4D
+
+#define PORT_LED_OFF BIT(7)
+#define PORT_TX_DISABLE BIT(6)
+#define PORT_AUTO_NEG_RESTART BIT(5)
+#define PORT_POWER_DOWN BIT(3)
+#define PORT_AUTO_MDIX_DISABLE BIT(2)
+#define PORT_FORCE_MDIX BIT(1)
+#define PORT_MAC_LOOPBACK BIT(0)
+
+#define REG_PORT_1_STATUS_2 0x1E
+#define REG_PORT_2_STATUS_2 0x2E
+#define REG_PORT_3_STATUS_2 0x3E
+#define REG_PORT_4_STATUS_2 0x4E
+
+#define PORT_MDIX_STATUS BIT(7)
+#define PORT_AUTO_NEG_COMPLETE BIT(6)
+#define PORT_STAT_LINK_GOOD BIT(5)
+
+#define REG_PORT_1_STATUS_3 0x1F
+#define REG_PORT_2_STATUS_3 0x2F
+#define REG_PORT_3_STATUS_3 0x3F
+#define REG_PORT_4_STATUS_3 0x4F
+
+#define PORT_PHY_LOOPBACK BIT(7)
+#define PORT_PHY_ISOLATE BIT(5)
+#define PORT_PHY_SOFT_RESET BIT(4)
+#define PORT_PHY_FORCE_LINK BIT(3)
+#define PORT_PHY_MODE_M 0x7
+#define PHY_MODE_IN_AUTO_NEG 1
+#define PHY_MODE_10BT_HALF 2
+#define PHY_MODE_100BT_HALF 3
+#define PHY_MODE_10BT_FULL 5
+#define PHY_MODE_100BT_FULL 6
+#define PHY_MODE_ISOLDATE 7
+
+#define REG_PORT_CTRL_0 0x00
+#define REG_PORT_CTRL_1 0x01
+#define REG_PORT_CTRL_2 0x02
+#define REG_PORT_CTRL_VID 0x03
+
+#define REG_PORT_CTRL_5 0x05
+
+#define REG_PORT_STATUS_1 0x09
+#define REG_PORT_LINK_MD_CTRL 0x0A
+#define REG_PORT_LINK_MD_RESULT 0x0B
+#define REG_PORT_CTRL_9 0x0C
+#define REG_PORT_CTRL_10 0x0D
+#define REG_PORT_STATUS_3 0x0F
+
+#define REG_PORT_CTRL_12 0xA0
+#define REG_PORT_CTRL_13 0xA1
+#define REG_PORT_RATE_CTRL_3 0xA2
+#define REG_PORT_RATE_CTRL_2 0xA3
+#define REG_PORT_RATE_CTRL_1 0xA4
+#define REG_PORT_RATE_CTRL_0 0xA5
+#define REG_PORT_RATE_LIMIT 0xA6
+#define REG_PORT_IN_RATE_0 0xA7
+#define REG_PORT_IN_RATE_1 0xA8
+#define REG_PORT_IN_RATE_2 0xA9
+#define REG_PORT_IN_RATE_3 0xAA
+#define REG_PORT_OUT_RATE_0 0xAB
+#define REG_PORT_OUT_RATE_1 0xAC
+#define REG_PORT_OUT_RATE_2 0xAD
+#define REG_PORT_OUT_RATE_3 0xAE
+
+#define PORT_CTRL_ADDR(port, addr) \
+ ((addr) + REG_PORT_1_CTRL_0 + (port) * \
+ (REG_PORT_2_CTRL_0 - REG_PORT_1_CTRL_0))
+
+#define REG_SW_MAC_ADDR_0 0x68
+#define REG_SW_MAC_ADDR_1 0x69
+#define REG_SW_MAC_ADDR_2 0x6A
+#define REG_SW_MAC_ADDR_3 0x6B
+#define REG_SW_MAC_ADDR_4 0x6C
+#define REG_SW_MAC_ADDR_5 0x6D
+
+#define TABLE_EXT_SELECT_S 5
+#define TABLE_EEE_V 1
+#define TABLE_ACL_V 2
+#define TABLE_PME_V 4
+#define TABLE_LINK_MD_V 5
+#define TABLE_EEE (TABLE_EEE_V << TABLE_EXT_SELECT_S)
+#define TABLE_ACL (TABLE_ACL_V << TABLE_EXT_SELECT_S)
+#define TABLE_PME (TABLE_PME_V << TABLE_EXT_SELECT_S)
+#define TABLE_LINK_MD (TABLE_LINK_MD << TABLE_EXT_SELECT_S)
+#define TABLE_READ BIT(4)
+#define TABLE_SELECT_S 2
+#define TABLE_STATIC_MAC_V 0
+#define TABLE_VLAN_V 1
+#define TABLE_DYNAMIC_MAC_V 2
+#define TABLE_MIB_V 3
+#define TABLE_STATIC_MAC (TABLE_STATIC_MAC_V << TABLE_SELECT_S)
+#define TABLE_VLAN (TABLE_VLAN_V << TABLE_SELECT_S)
+#define TABLE_DYNAMIC_MAC (TABLE_DYNAMIC_MAC_V << TABLE_SELECT_S)
+#define TABLE_MIB (TABLE_MIB_V << TABLE_SELECT_S)
+
+#define REG_IND_CTRL_1 0x6F
+
+#define TABLE_ENTRY_MASK 0x03FF
+#define TABLE_EXT_ENTRY_MASK 0x0FFF
+
+#define REG_IND_DATA_5 0x73
+#define REG_IND_DATA_2 0x76
+#define REG_IND_DATA_1 0x77
+#define REG_IND_DATA_0 0x78
+
+#define REG_IND_DATA_PME_EEE_ACL 0xA0
+
+#define REG_INT_STATUS 0x7C
+#define REG_INT_ENABLE 0x7D
+
+#define INT_PME BIT(4)
+
+#define REG_ACL_INT_STATUS 0x7E
+#define REG_ACL_INT_ENABLE 0x7F
+
+#define INT_PORT_5 BIT(4)
+#define INT_PORT_4 BIT(3)
+#define INT_PORT_3 BIT(2)
+#define INT_PORT_2 BIT(1)
+#define INT_PORT_1 BIT(0)
+
+#define INT_PORT_ALL \
+ (INT_PORT_5 | INT_PORT_4 | INT_PORT_3 | INT_PORT_2 | INT_PORT_1)
+
+#define REG_SW_CTRL_12 0x80
+#define REG_SW_CTRL_13 0x81
+
+#define SWITCH_802_1P_MASK 3
+#define SWITCH_802_1P_BASE 3
+#define SWITCH_802_1P_SHIFT 2
+
+#define SW_802_1P_MAP_M KS_PRIO_M
+#define SW_802_1P_MAP_S KS_PRIO_S
+
+#define REG_SWITCH_CTRL_14 0x82
+
+#define SW_PRIO_MAPPING_M KS_PRIO_M
+#define SW_PRIO_MAPPING_S 6
+#define SW_PRIO_MAP_3_HI 0
+#define SW_PRIO_MAP_2_HI 2
+#define SW_PRIO_MAP_0_LO 3
+
+#define REG_SW_CTRL_15 0x83
+#define REG_SW_CTRL_16 0x84
+#define REG_SW_CTRL_17 0x85
+#define REG_SW_CTRL_18 0x86
+
+#define SW_SELF_ADDR_FILTER_ENABLE BIT(6)
+
+#define REG_SW_UNK_UCAST_CTRL 0x83
+#define REG_SW_UNK_MCAST_CTRL 0x84
+#define REG_SW_UNK_VID_CTRL 0x85
+#define REG_SW_UNK_IP_MCAST_CTRL 0x86
+
+#define SW_UNK_FWD_ENABLE BIT(5)
+#define SW_UNK_FWD_MAP KS_PORT_M
+
+#define REG_SW_CTRL_19 0x87
+
+#define SW_IN_RATE_LIMIT_PERIOD_M 0x3
+#define SW_IN_RATE_LIMIT_PERIOD_S 4
+#define SW_IN_RATE_LIMIT_16_MS 0
+#define SW_IN_RATE_LIMIT_64_MS 1
+#define SW_IN_RATE_LIMIT_256_MS 2
+#define SW_OUT_RATE_LIMIT_QUEUE_BASED BIT(3)
+#define SW_INS_TAG_ENABLE BIT(2)
+
+#define REG_TOS_PRIO_CTRL_0 0x90
+#define REG_TOS_PRIO_CTRL_1 0x91
+#define REG_TOS_PRIO_CTRL_2 0x92
+#define REG_TOS_PRIO_CTRL_3 0x93
+#define REG_TOS_PRIO_CTRL_4 0x94
+#define REG_TOS_PRIO_CTRL_5 0x95
+#define REG_TOS_PRIO_CTRL_6 0x96
+#define REG_TOS_PRIO_CTRL_7 0x97
+#define REG_TOS_PRIO_CTRL_8 0x98
+#define REG_TOS_PRIO_CTRL_9 0x99
+#define REG_TOS_PRIO_CTRL_10 0x9A
+#define REG_TOS_PRIO_CTRL_11 0x9B
+#define REG_TOS_PRIO_CTRL_12 0x9C
+#define REG_TOS_PRIO_CTRL_13 0x9D
+#define REG_TOS_PRIO_CTRL_14 0x9E
+#define REG_TOS_PRIO_CTRL_15 0x9F
+
+#define TOS_PRIO_M KS_PRIO_M
+#define TOS_PRIO_S KS_PRIO_S
+
+#define REG_SW_CTRL_20 0xA3
+
+#define SW_GMII_DRIVE_STRENGTH_S 4
+#define SW_DRIVE_STRENGTH_M 0x7
+#define SW_DRIVE_STRENGTH_2MA 0
+#define SW_DRIVE_STRENGTH_4MA 1
+#define SW_DRIVE_STRENGTH_8MA 2
+#define SW_DRIVE_STRENGTH_12MA 3
+#define SW_DRIVE_STRENGTH_16MA 4
+#define SW_DRIVE_STRENGTH_20MA 5
+#define SW_DRIVE_STRENGTH_24MA 6
+#define SW_DRIVE_STRENGTH_28MA 7
+#define SW_MII_DRIVE_STRENGTH_S 0
+
+#define REG_SW_CTRL_21 0xA4
+
+#define SW_IPV6_MLD_OPTION BIT(3)
+#define SW_IPV6_MLD_SNOOP BIT(2)
+
+#define REG_PORT_1_CTRL_12 0xB0
+#define REG_PORT_2_CTRL_12 0xC0
+#define REG_PORT_3_CTRL_12 0xD0
+#define REG_PORT_4_CTRL_12 0xE0
+#define REG_PORT_5_CTRL_12 0xF0
+
+#define PORT_PASS_ALL BIT(6)
+#define PORT_INS_TAG_FOR_PORT_5_S 3
+#define PORT_INS_TAG_FOR_PORT_5 BIT(3)
+#define PORT_INS_TAG_FOR_PORT_4 BIT(2)
+#define PORT_INS_TAG_FOR_PORT_3 BIT(1)
+#define PORT_INS_TAG_FOR_PORT_2 BIT(0)
+
+#define REG_PORT_1_CTRL_13 0xB1
+#define REG_PORT_2_CTRL_13 0xC1
+#define REG_PORT_3_CTRL_13 0xD1
+#define REG_PORT_4_CTRL_13 0xE1
+#define REG_PORT_5_CTRL_13 0xF1
+
+#define PORT_QUEUE_SPLIT_H BIT(1)
+#define PORT_QUEUE_SPLIT_1 0
+#define PORT_QUEUE_SPLIT_2 1
+#define PORT_QUEUE_SPLIT_4 2
+#define PORT_DROP_TAG BIT(0)
+
+#define REG_PORT_1_CTRL_14 0xB2
+#define REG_PORT_2_CTRL_14 0xC2
+#define REG_PORT_3_CTRL_14 0xD2
+#define REG_PORT_4_CTRL_14 0xE2
+#define REG_PORT_5_CTRL_14 0xF2
+#define REG_PORT_1_CTRL_15 0xB3
+#define REG_PORT_2_CTRL_15 0xC3
+#define REG_PORT_3_CTRL_15 0xD3
+#define REG_PORT_4_CTRL_15 0xE3
+#define REG_PORT_5_CTRL_15 0xF3
+#define REG_PORT_1_CTRL_16 0xB4
+#define REG_PORT_2_CTRL_16 0xC4
+#define REG_PORT_3_CTRL_16 0xD4
+#define REG_PORT_4_CTRL_16 0xE4
+#define REG_PORT_5_CTRL_16 0xF4
+#define REG_PORT_1_CTRL_17 0xB5
+#define REG_PORT_2_CTRL_17 0xC5
+#define REG_PORT_3_CTRL_17 0xD5
+#define REG_PORT_4_CTRL_17 0xE5
+#define REG_PORT_5_CTRL_17 0xF5
+
+#define REG_PORT_1_RATE_CTRL_3 0xB2
+#define REG_PORT_1_RATE_CTRL_2 0xB3
+#define REG_PORT_1_RATE_CTRL_1 0xB4
+#define REG_PORT_1_RATE_CTRL_0 0xB5
+#define REG_PORT_2_RATE_CTRL_3 0xC2
+#define REG_PORT_2_RATE_CTRL_2 0xC3
+#define REG_PORT_2_RATE_CTRL_1 0xC4
+#define REG_PORT_2_RATE_CTRL_0 0xC5
+#define REG_PORT_3_RATE_CTRL_3 0xD2
+#define REG_PORT_3_RATE_CTRL_2 0xD3
+#define REG_PORT_3_RATE_CTRL_1 0xD4
+#define REG_PORT_3_RATE_CTRL_0 0xD5
+#define REG_PORT_4_RATE_CTRL_3 0xE2
+#define REG_PORT_4_RATE_CTRL_2 0xE3
+#define REG_PORT_4_RATE_CTRL_1 0xE4
+#define REG_PORT_4_RATE_CTRL_0 0xE5
+#define REG_PORT_5_RATE_CTRL_3 0xF2
+#define REG_PORT_5_RATE_CTRL_2 0xF3
+#define REG_PORT_5_RATE_CTRL_1 0xF4
+#define REG_PORT_5_RATE_CTRL_0 0xF5
+
+#define RATE_CTRL_ENABLE BIT(7)
+#define RATE_RATIO_M (BIT(7) - 1)
+
+#define PORT_OUT_RATE_ENABLE BIT(7)
+
+#define REG_PORT_1_RATE_LIMIT 0xB6
+#define REG_PORT_2_RATE_LIMIT 0xC6
+#define REG_PORT_3_RATE_LIMIT 0xD6
+#define REG_PORT_4_RATE_LIMIT 0xE6
+#define REG_PORT_5_RATE_LIMIT 0xF6
+
+#define PORT_IN_PORT_BASED_S 6
+#define PORT_RATE_PACKET_BASED_S 5
+#define PORT_IN_FLOW_CTRL_S 4
+#define PORT_IN_LIMIT_MODE_M 0x3
+#define PORT_IN_LIMIT_MODE_S 2
+#define PORT_COUNT_IFG_S 1
+#define PORT_COUNT_PREAMBLE_S 0
+#define PORT_IN_PORT_BASED BIT(PORT_IN_PORT_BASED_S)
+#define PORT_RATE_PACKET_BASED BIT(PORT_RATE_PACKET_BASED_S)
+#define PORT_IN_FLOW_CTRL BIT(PORT_IN_FLOW_CTRL_S)
+#define PORT_IN_ALL 0
+#define PORT_IN_UNICAST 1
+#define PORT_IN_MULTICAST 2
+#define PORT_IN_BROADCAST 3
+#define PORT_COUNT_IFG BIT(PORT_COUNT_IFG_S)
+#define PORT_COUNT_PREAMBLE BIT(PORT_COUNT_PREAMBLE_S)
+
+#define REG_PORT_1_IN_RATE_0 0xB7
+#define REG_PORT_2_IN_RATE_0 0xC7
+#define REG_PORT_3_IN_RATE_0 0xD7
+#define REG_PORT_4_IN_RATE_0 0xE7
+#define REG_PORT_5_IN_RATE_0 0xF7
+#define REG_PORT_1_IN_RATE_1 0xB8
+#define REG_PORT_2_IN_RATE_1 0xC8
+#define REG_PORT_3_IN_RATE_1 0xD8
+#define REG_PORT_4_IN_RATE_1 0xE8
+#define REG_PORT_5_IN_RATE_1 0xF8
+#define REG_PORT_1_IN_RATE_2 0xB9
+#define REG_PORT_2_IN_RATE_2 0xC9
+#define REG_PORT_3_IN_RATE_2 0xD9
+#define REG_PORT_4_IN_RATE_2 0xE9
+#define REG_PORT_5_IN_RATE_2 0xF9
+#define REG_PORT_1_IN_RATE_3 0xBA
+#define REG_PORT_2_IN_RATE_3 0xCA
+#define REG_PORT_3_IN_RATE_3 0xDA
+#define REG_PORT_4_IN_RATE_3 0xEA
+#define REG_PORT_5_IN_RATE_3 0xFA
+
+#define PORT_IN_RATE_ENABLE BIT(7)
+#define PORT_RATE_LIMIT_M (BIT(7) - 1)
+
+#define REG_PORT_1_OUT_RATE_0 0xBB
+#define REG_PORT_2_OUT_RATE_0 0xCB
+#define REG_PORT_3_OUT_RATE_0 0xDB
+#define REG_PORT_4_OUT_RATE_0 0xEB
+#define REG_PORT_5_OUT_RATE_0 0xFB
+#define REG_PORT_1_OUT_RATE_1 0xBC
+#define REG_PORT_2_OUT_RATE_1 0xCC
+#define REG_PORT_3_OUT_RATE_1 0xDC
+#define REG_PORT_4_OUT_RATE_1 0xEC
+#define REG_PORT_5_OUT_RATE_1 0xFC
+#define REG_PORT_1_OUT_RATE_2 0xBD
+#define REG_PORT_2_OUT_RATE_2 0xCD
+#define REG_PORT_3_OUT_RATE_2 0xDD
+#define REG_PORT_4_OUT_RATE_2 0xED
+#define REG_PORT_5_OUT_RATE_2 0xFD
+#define REG_PORT_1_OUT_RATE_3 0xBE
+#define REG_PORT_2_OUT_RATE_3 0xCE
+#define REG_PORT_3_OUT_RATE_3 0xDE
+#define REG_PORT_4_OUT_RATE_3 0xEE
+#define REG_PORT_5_OUT_RATE_3 0xFE
+
+/* 88x3 specific */
+
+#define REG_SW_INSERT_SRC_PVID 0xC2
+
+/* PME */
+
+#define SW_PME_OUTPUT_ENABLE BIT(1)
+#define SW_PME_ACTIVE_HIGH BIT(0)
+
+#define PORT_MAGIC_PACKET_DETECT BIT(2)
+#define PORT_LINK_UP_DETECT BIT(1)
+#define PORT_ENERGY_DETECT BIT(0)
+
+/* ACL */
+
+#define ACL_FIRST_RULE_M 0xF
+
+#define ACL_MODE_M 0x3
+#define ACL_MODE_S 4
+#define ACL_MODE_DISABLE 0
+#define ACL_MODE_LAYER_2 1
+#define ACL_MODE_LAYER_3 2
+#define ACL_MODE_LAYER_4 3
+#define ACL_ENABLE_M 0x3
+#define ACL_ENABLE_S 2
+#define ACL_ENABLE_2_COUNT 0
+#define ACL_ENABLE_2_TYPE 1
+#define ACL_ENABLE_2_MAC 2
+#define ACL_ENABLE_2_BOTH 3
+#define ACL_ENABLE_3_IP 1
+#define ACL_ENABLE_3_SRC_DST_COMP 2
+#define ACL_ENABLE_4_PROTOCOL 0
+#define ACL_ENABLE_4_TCP_PORT_COMP 1
+#define ACL_ENABLE_4_UDP_PORT_COMP 2
+#define ACL_ENABLE_4_TCP_SEQN_COMP 3
+#define ACL_SRC BIT(1)
+#define ACL_EQUAL BIT(0)
+
+#define ACL_MAX_PORT 0xFFFF
+
+#define ACL_MIN_PORT 0xFFFF
+#define ACL_IP_ADDR 0xFFFFFFFF
+#define ACL_TCP_SEQNUM 0xFFFFFFFF
+
+#define ACL_RESERVED 0xF8
+#define ACL_PORT_MODE_M 0x3
+#define ACL_PORT_MODE_S 1
+#define ACL_PORT_MODE_DISABLE 0
+#define ACL_PORT_MODE_EITHER 1
+#define ACL_PORT_MODE_IN_RANGE 2
+#define ACL_PORT_MODE_OUT_OF_RANGE 3
+
+#define ACL_TCP_FLAG_ENABLE BIT(0)
+
+#define ACL_TCP_FLAG_M 0xFF
+
+#define ACL_TCP_FLAG 0xFF
+#define ACL_ETH_TYPE 0xFFFF
+#define ACL_IP_M 0xFFFFFFFF
+
+#define ACL_PRIO_MODE_M 0x3
+#define ACL_PRIO_MODE_S 6
+#define ACL_PRIO_MODE_DISABLE 0
+#define ACL_PRIO_MODE_HIGHER 1
+#define ACL_PRIO_MODE_LOWER 2
+#define ACL_PRIO_MODE_REPLACE 3
+#define ACL_PRIO_M 0x7
+#define ACL_PRIO_S 3
+#define ACL_VLAN_PRIO_REPLACE BIT(2)
+#define ACL_VLAN_PRIO_M 0x7
+#define ACL_VLAN_PRIO_HI_M 0x3
+
+#define ACL_VLAN_PRIO_LO_M 0x8
+#define ACL_VLAN_PRIO_S 7
+#define ACL_MAP_MODE_M 0x3
+#define ACL_MAP_MODE_S 5
+#define ACL_MAP_MODE_DISABLE 0
+#define ACL_MAP_MODE_OR 1
+#define ACL_MAP_MODE_AND 2
+#define ACL_MAP_MODE_REPLACE 3
+#define ACL_MAP_PORT_M 0x1F
+
+#define ACL_CNT_M (BIT(11) - 1)
+#define ACL_CNT_S 5
+#define ACL_MSEC_UNIT BIT(4)
+#define ACL_INTR_MODE BIT(3)
+
+#define REG_PORT_ACL_BYTE_EN_MSB 0x10
+
+#define ACL_BYTE_EN_MSB_M 0x3F
+
+#define REG_PORT_ACL_BYTE_EN_LSB 0x11
+
+#define ACL_ACTION_START 0xA
+#define ACL_ACTION_LEN 2
+#define ACL_INTR_CNT_START 0xB
+#define ACL_RULESET_START 0xC
+#define ACL_RULESET_LEN 2
+#define ACL_TABLE_LEN 14
+
+#define ACL_ACTION_ENABLE 0x000C
+#define ACL_MATCH_ENABLE 0x1FF0
+#define ACL_RULESET_ENABLE 0x2003
+#define ACL_BYTE_ENABLE ((ACL_BYTE_EN_MSB_M << 8) | 0xFF)
+#define ACL_MODE_ENABLE (0x10 << 8)
+
+#define REG_PORT_ACL_CTRL_0 0x12
+
+#define PORT_ACL_WRITE_DONE BIT(6)
+#define PORT_ACL_READ_DONE BIT(5)
+#define PORT_ACL_WRITE BIT(4)
+#define PORT_ACL_INDEX_M 0xF
+
+#define REG_PORT_ACL_CTRL_1 0x13
+
+#define PORT_ACL_FORCE_DLR_MISS BIT(0)
+
+#define KSZ8795_ID_HI 0x0022
+#define KSZ8795_ID_LO 0x1550
+#define KSZ8863_ID_LO 0x1430
+
+#define KSZ8795_SW_ID 0x8795
+
+#define PHY_REG_LINK_MD 0x1D
+
+#define PHY_START_CABLE_DIAG BIT(15)
+#define PHY_CABLE_DIAG_RESULT_M GENMASK(14, 13)
+#define PHY_CABLE_DIAG_RESULT 0x6000
+#define PHY_CABLE_STAT_NORMAL 0x0000
+#define PHY_CABLE_STAT_OPEN 0x2000
+#define PHY_CABLE_STAT_SHORT 0x4000
+#define PHY_CABLE_STAT_FAILED 0x6000
+#define PHY_CABLE_10M_SHORT BIT(12)
+#define PHY_CABLE_FAULT_COUNTER_M GENMASK(8, 0)
+
+#define PHY_REG_PHY_CTRL 0x1F
+
+#define PHY_MODE_M 0x7
+#define PHY_MODE_S 8
+#define PHY_STAT_REVERSED_POLARITY BIT(5)
+#define PHY_STAT_MDIX BIT(4)
+#define PHY_FORCE_LINK BIT(3)
+#define PHY_POWER_SAVING_ENABLE BIT(2)
+#define PHY_REMOTE_LOOPBACK BIT(1)
+
+/* Chip resource */
+
+#define PRIO_QUEUES 4
+
+#define KS_PRIO_IN_REG 4
+
+#define MIB_COUNTER_NUM 0x20
+
+/* Common names used by other drivers */
+
+#define P_BCAST_STORM_CTRL REG_PORT_CTRL_0
+#define P_PRIO_CTRL REG_PORT_CTRL_0
+#define P_TAG_CTRL REG_PORT_CTRL_0
+#define P_MIRROR_CTRL REG_PORT_CTRL_1
+#define P_802_1P_CTRL REG_PORT_CTRL_2
+#define P_PASS_ALL_CTRL REG_PORT_CTRL_12
+#define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12
+#define P_DROP_TAG_CTRL REG_PORT_CTRL_13
+#define P_RATE_LIMIT_CTRL REG_PORT_RATE_LIMIT
+
+#define S_UNKNOWN_DA_CTRL REG_SWITCH_CTRL_12
+#define S_FORWARD_INVALID_VID_CTRL REG_FORWARD_INVALID_VID
+
+#define S_FLUSH_TABLE_CTRL REG_SW_CTRL_0
+#define S_LINK_AGING_CTRL REG_SW_CTRL_0
+#define S_HUGE_PACKET_CTRL REG_SW_CTRL_1
+#define S_MIRROR_CTRL REG_SW_CTRL_3
+#define S_REPLACE_VID_CTRL REG_SW_CTRL_4
+#define S_PASS_PAUSE_CTRL REG_SW_CTRL_10
+#define S_802_1P_PRIO_CTRL REG_SW_CTRL_12
+#define S_TOS_PRIO_CTRL REG_TOS_PRIO_CTRL_0
+#define S_IPV6_MLD_CTRL REG_SW_CTRL_21
+
+#define IND_ACC_TABLE(table) ((table) << 8)
+
+/* */
+#define REG_IND_EEE_GLOB2_LO 0x34
+#define REG_IND_EEE_GLOB2_HI 0x35
+
+/**
+ * MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
+ * MIB_TOTAL_BYTES 00-0000000F-FFFFFFFF
+ * MIB_PACKET_DROPPED 00-00000000-0000FFFF
+ * MIB_COUNTER_VALID 00-00000020-00000000
+ * MIB_COUNTER_OVERFLOW 00-00000040-00000000
+ */
+
+#define MIB_COUNTER_VALUE 0x3FFFFFFF
+
+#define KSZ8795_MIB_TOTAL_RX_0 0x100
+#define KSZ8795_MIB_TOTAL_TX_0 0x101
+#define KSZ8795_MIB_TOTAL_RX_1 0x104
+#define KSZ8795_MIB_TOTAL_TX_1 0x105
+
+#define KSZ8863_MIB_PACKET_DROPPED_TX_0 0x100
+#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x105
+
+#define MIB_PACKET_DROPPED 0x0000FFFF
+
+#define MIB_TOTAL_BYTES_H 0x0000000F
+
+#define TAIL_TAG_OVERRIDE BIT(6)
+#define TAIL_TAG_LOOKUP BIT(7)
+
+#define FID_ENTRIES 128
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
new file mode 100644
index 000000000..ed77ac222
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ8863 series register access through SMI
+ *
+ * Copyright (C) 2019 Pengutronix, Michael Grzeschik <kernel@pengutronix.de>
+ */
+
+#include "ksz8.h"
+#include "ksz_common.h"
+
+/* Serial Management Interface (SMI) uses the following frame format:
+ *
+ * preamble|start|Read/Write| PHY | REG |TA| Data bits | Idle
+ * |frame| OP code |address |address| | |
+ * read | 32x1´s | 01 | 00 | 1xRRR | RRRRR |Z0| 00000000DDDDDDDD | Z
+ * write| 32x1´s | 01 | 00 | 0xRRR | RRRRR |10| xxxxxxxxDDDDDDDD | Z
+ *
+ */
+
+#define SMI_KSZ88XX_READ_PHY BIT(4)
+
+static int ksz8863_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct ksz_device *dev = ctx;
+ struct mdio_device *mdev;
+ u8 reg = *(u8 *)reg_buf;
+ u8 *val = val_buf;
+ int i, ret = 0;
+
+ mdev = dev->priv;
+
+ mutex_lock_nested(&mdev->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ for (i = 0; i < val_len; i++) {
+ int tmp = reg + i;
+
+ ret = __mdiobus_read(mdev->bus, ((tmp & 0xE0) >> 5) |
+ SMI_KSZ88XX_READ_PHY, tmp);
+ if (ret < 0)
+ goto out;
+
+ val[i] = ret;
+ }
+ ret = 0;
+
+ out:
+ mutex_unlock(&mdev->bus->mdio_lock);
+
+ return ret;
+}
+
+static int ksz8863_mdio_write(void *ctx, const void *data, size_t count)
+{
+ struct ksz_device *dev = ctx;
+ struct mdio_device *mdev;
+ int i, ret = 0;
+ u32 reg;
+ u8 *val;
+
+ mdev = dev->priv;
+
+ val = (u8 *)(data + 4);
+ reg = *(u32 *)data;
+
+ mutex_lock_nested(&mdev->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ for (i = 0; i < (count - 4); i++) {
+ int tmp = reg + i;
+
+ ret = __mdiobus_write(mdev->bus, ((tmp & 0xE0) >> 5),
+ tmp, val[i]);
+ if (ret < 0)
+ goto out;
+ }
+
+ out:
+ mutex_unlock(&mdev->bus->mdio_lock);
+
+ return ret;
+}
+
+static const struct regmap_bus regmap_smi[] = {
+ {
+ .read = ksz8863_mdio_read,
+ .write = ksz8863_mdio_write,
+ },
+ {
+ .read = ksz8863_mdio_read,
+ .write = ksz8863_mdio_write,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+ },
+ {
+ .read = ksz8863_mdio_read,
+ .write = ksz8863_mdio_write,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+ }
+};
+
+static const struct regmap_config ksz8863_regmap_config[] = {
+ {
+ .name = "#8",
+ .reg_bits = 8,
+ .pad_bits = 24,
+ .val_bits = 8,
+ .cache_type = REGCACHE_NONE,
+ .lock = ksz_regmap_lock,
+ .unlock = ksz_regmap_unlock,
+ },
+ {
+ .name = "#16",
+ .reg_bits = 8,
+ .pad_bits = 24,
+ .val_bits = 16,
+ .cache_type = REGCACHE_NONE,
+ .lock = ksz_regmap_lock,
+ .unlock = ksz_regmap_unlock,
+ },
+ {
+ .name = "#32",
+ .reg_bits = 8,
+ .pad_bits = 24,
+ .val_bits = 32,
+ .cache_type = REGCACHE_NONE,
+ .lock = ksz_regmap_lock,
+ .unlock = ksz_regmap_unlock,
+ }
+};
+
+static int ksz8863_smi_probe(struct mdio_device *mdiodev)
+{
+ struct regmap_config rc;
+ struct ksz_device *dev;
+ int ret;
+ int i;
+
+ dev = ksz_switch_alloc(&mdiodev->dev, mdiodev);
+ if (!dev)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(ksz8863_regmap_config); i++) {
+ rc = ksz8863_regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ dev->regmap[i] = devm_regmap_init(&mdiodev->dev,
+ &regmap_smi[i], dev,
+ &rc);
+ if (IS_ERR(dev->regmap[i])) {
+ ret = PTR_ERR(dev->regmap[i]);
+ dev_err(&mdiodev->dev,
+ "Failed to initialize regmap%i: %d\n",
+ ksz8863_regmap_config[i].val_bits, ret);
+ return ret;
+ }
+ }
+
+ if (mdiodev->dev.platform_data)
+ dev->pdata = mdiodev->dev.platform_data;
+
+ ret = ksz_switch_register(dev);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(&mdiodev->dev, dev);
+
+ return 0;
+}
+
+static void ksz8863_smi_remove(struct mdio_device *mdiodev)
+{
+ struct ksz_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (dev)
+ ksz_switch_remove(dev);
+}
+
+static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
+{
+ struct ksz_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (dev)
+ dsa_switch_shutdown(dev->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static const struct of_device_id ksz8863_dt_ids[] = {
+ {
+ .compatible = "microchip,ksz8863",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
+ {
+ .compatible = "microchip,ksz8873",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ksz8863_dt_ids);
+
+static struct mdio_driver ksz8863_driver = {
+ .probe = ksz8863_smi_probe,
+ .remove = ksz8863_smi_remove,
+ .shutdown = ksz8863_smi_shutdown,
+ .mdiodrv.driver = {
+ .name = "ksz8863-switch",
+ .of_match_table = ksz8863_dt_ids,
+ },
+};
+
+mdio_module_driver(ksz8863_driver);
+
+MODULE_AUTHOR("Michael Grzeschik <m.grzeschik@pengutronix.de>");
+MODULE_DESCRIPTION("Microchip KSZ8863 SMI Switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
new file mode 100644
index 000000000..a73697147
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -0,0 +1,1202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ9477 switch driver main logic
+ *
+ * Copyright (C) 2017-2019 Microchip Technology Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/platform_data/microchip-ksz.h>
+#include <linux/phy.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "ksz9477_reg.h"
+#include "ksz_common.h"
+#include "ksz9477.h"
+
+static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
+}
+
+static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
+ bool set)
+{
+ regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
+ bits, set ? bits : 0);
+}
+
+static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
+{
+ regmap_update_bits(dev->regmap[2], addr, bits, set ? bits : 0);
+}
+
+static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
+ u32 bits, bool set)
+{
+ regmap_update_bits(dev->regmap[2], PORT_CTRL_ADDR(port, offset),
+ bits, set ? bits : 0);
+}
+
+int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
+{
+ u16 frame_size, max_frame = 0;
+ int i;
+
+ frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ /* Cache the per-port MTU setting */
+ dev->ports[port].max_frame = frame_size;
+
+ for (i = 0; i < dev->info->port_cnt; i++)
+ max_frame = max(max_frame, dev->ports[i].max_frame);
+
+ return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2,
+ REG_SW_MTU_MASK, max_frame);
+}
+
+int ksz9477_max_mtu(struct ksz_device *dev, int port)
+{
+ return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(dev->regmap[0], REG_SW_VLAN_CTRL,
+ val, !(val & VLAN_START), 10, 1000);
+}
+
+static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
+ u32 *vlan_table)
+{
+ int ret;
+
+ mutex_lock(&dev->vlan_mutex);
+
+ ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
+ ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
+
+ /* wait to be cleared */
+ ret = ksz9477_wait_vlan_ctrl_ready(dev);
+ if (ret) {
+ dev_dbg(dev->dev, "Failed to read vlan table\n");
+ goto exit;
+ }
+
+ ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
+ ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
+ ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
+
+ ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
+
+exit:
+ mutex_unlock(&dev->vlan_mutex);
+
+ return ret;
+}
+
+static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
+ u32 *vlan_table)
+{
+ int ret;
+
+ mutex_lock(&dev->vlan_mutex);
+
+ ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
+ ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
+ ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
+
+ ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
+ ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
+
+ /* wait to be cleared */
+ ret = ksz9477_wait_vlan_ctrl_ready(dev);
+ if (ret) {
+ dev_dbg(dev->dev, "Failed to write vlan table\n");
+ goto exit;
+ }
+
+ ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
+
+ /* update vlan cache table */
+ dev->vlan_cache[vid].table[0] = vlan_table[0];
+ dev->vlan_cache[vid].table[1] = vlan_table[1];
+ dev->vlan_cache[vid].table[2] = vlan_table[2];
+
+exit:
+ mutex_unlock(&dev->vlan_mutex);
+
+ return ret;
+}
+
+static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
+{
+ ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
+ ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
+ ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
+ ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
+}
+
+static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
+{
+ ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
+ ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
+ ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
+ ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
+}
+
+static int ksz9477_wait_alu_ready(struct ksz_device *dev)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(dev->regmap[2], REG_SW_ALU_CTRL__4,
+ val, !(val & ALU_START), 10, 1000);
+}
+
+static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(dev->regmap[2],
+ REG_SW_ALU_STAT_CTRL__4,
+ val, !(val & ALU_STAT_START),
+ 10, 1000);
+}
+
+int ksz9477_reset_switch(struct ksz_device *dev)
+{
+ u8 data8;
+ u32 data32;
+
+ /* reset switch */
+ ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
+
+ /* turn off SPI DO Edge select */
+ regmap_update_bits(dev->regmap[0], REG_SW_GLOBAL_SERIAL_CTRL_0,
+ SPI_AUTO_EDGE_DETECTION, 0);
+
+ /* default configuration */
+ ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
+ data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
+ SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
+ ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+
+ /* disable interrupts */
+ ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+ ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
+ ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+
+ /* KSZ9893 compatible chips do not support refclk configuration */
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID)
+ return 0;
+
+ data8 = SW_ENABLE_REFCLKO;
+ if (dev->synclko_disable)
+ data8 = 0;
+ else if (dev->synclko_125)
+ data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
+ ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
+
+ return 0;
+}
+
+void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
+{
+ struct ksz_port *p = &dev->ports[port];
+ unsigned int val;
+ u32 data;
+ int ret;
+
+ /* retain the flush/freeze bit */
+ data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
+ data |= MIB_COUNTER_READ;
+ data |= (addr << MIB_COUNTER_INDEX_S);
+ ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
+
+ ret = regmap_read_poll_timeout(dev->regmap[2],
+ PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4),
+ val, !(val & MIB_COUNTER_READ), 10, 1000);
+ /* failed to read MIB. get out of loop */
+ if (ret) {
+ dev_dbg(dev->dev, "Failed to get MIB\n");
+ return;
+ }
+
+ /* count resets upon read */
+ ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
+ *cnt += data;
+}
+
+void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
+{
+ addr = dev->info->mib_names[addr].index;
+ ksz9477_r_mib_cnt(dev, port, addr, cnt);
+}
+
+void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+{
+ u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
+ struct ksz_port *p = &dev->ports[port];
+
+ /* enable/disable the port for flush/freeze function */
+ mutex_lock(&p->mib.cnt_mutex);
+ ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val);
+
+ /* used by MIB counter reading code to know freeze is enabled */
+ p->freeze = freeze;
+ mutex_unlock(&p->mib.cnt_mutex);
+}
+
+void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
+{
+ struct ksz_port_mib *mib = &dev->ports[port].mib;
+
+ /* flush all enabled port MIB counters */
+ mutex_lock(&mib->cnt_mutex);
+ ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
+ MIB_COUNTER_FLUSH_FREEZE);
+ ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
+ ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
+ mutex_unlock(&mib->cnt_mutex);
+}
+
+static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
+ u16 *data)
+{
+ /* KSZ8563R do not have extended registers but BMSR_ESTATEN and
+ * BMSR_ERCAP bits are set.
+ */
+ if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
+ *data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
+}
+
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
+{
+ u16 val = 0xffff;
+ int ret;
+
+ /* No real PHY after this. Simulate the PHY.
+ * A fixed PHY can be setup in the device tree, but this function is
+ * still called for that port during initialization.
+ * For RGMII PHY there is no way to access it so the fixed PHY should
+ * be used. For SGMII PHY the supporting code will be added later.
+ */
+ if (!dev->info->internal_phy[addr]) {
+ struct ksz_port *p = &dev->ports[addr];
+
+ switch (reg) {
+ case MII_BMCR:
+ val = 0x1140;
+ break;
+ case MII_BMSR:
+ val = 0x796d;
+ break;
+ case MII_PHYSID1:
+ val = 0x0022;
+ break;
+ case MII_PHYSID2:
+ val = 0x1631;
+ break;
+ case MII_ADVERTISE:
+ val = 0x05e1;
+ break;
+ case MII_LPA:
+ val = 0xc5e1;
+ break;
+ case MII_CTRL1000:
+ val = 0x0700;
+ break;
+ case MII_STAT1000:
+ if (p->phydev.speed == SPEED_1000)
+ val = 0x3800;
+ else
+ val = 0;
+ break;
+ }
+ } else {
+ ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ if (ret)
+ return ret;
+
+ ksz9477_r_phy_quirks(dev, addr, reg, &val);
+ }
+
+ *data = val;
+
+ return 0;
+}
+
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
+{
+ /* No real PHY after this. */
+ if (!dev->info->internal_phy[addr])
+ return 0;
+
+ return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+}
+
+void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
+{
+ ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
+}
+
+void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
+{
+ const u16 *regs = dev->info->regs;
+ u8 data;
+
+ regmap_update_bits(dev->regmap[0], REG_SW_LUE_CTRL_2,
+ SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
+ SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
+
+ if (port < dev->info->port_cnt) {
+ /* flush individual port */
+ ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
+ if (!(data & PORT_LEARN_DISABLE))
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL],
+ data | PORT_LEARN_DISABLE);
+ ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
+ } else {
+ /* flush all */
+ ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
+ }
+}
+
+int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack)
+{
+ if (flag) {
+ ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
+ PORT_VLAN_LOOKUP_VID_0, true);
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
+ } else {
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
+ ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
+ PORT_VLAN_LOOKUP_VID_0, false);
+ }
+
+ return 0;
+}
+
+int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ u32 vlan_table[3];
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ int err;
+
+ err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table");
+ return err;
+ }
+
+ vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
+ if (untagged)
+ vlan_table[1] |= BIT(port);
+ else
+ vlan_table[1] &= ~BIT(port);
+ vlan_table[1] &= ~(BIT(dev->cpu_port));
+
+ vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
+
+ err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table");
+ return err;
+ }
+
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
+
+ return 0;
+}
+
+int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ u32 vlan_table[3];
+ u16 pvid;
+
+ ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
+ pvid = pvid & 0xFFF;
+
+ if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to get vlan table\n");
+ return -ETIMEDOUT;
+ }
+
+ vlan_table[2] &= ~BIT(port);
+
+ if (pvid == vlan->vid)
+ pvid = 1;
+
+ if (untagged)
+ vlan_table[1] &= ~BIT(port);
+
+ if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to set vlan table\n");
+ return -ETIMEDOUT;
+ }
+
+ ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
+
+ return 0;
+}
+
+int ksz9477_fdb_add(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ u32 alu_table[4];
+ u32 data;
+ int ret = 0;
+
+ mutex_lock(&dev->alu_mutex);
+
+ /* find any entry with mac & vid */
+ data = vid << ALU_FID_INDEX_S;
+ data |= ((addr[0] << 8) | addr[1]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
+
+ data = ((addr[2] << 24) | (addr[3] << 16));
+ data |= ((addr[4] << 8) | addr[5]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
+
+ /* start read operation */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_ready(dev);
+ if (ret) {
+ dev_dbg(dev->dev, "Failed to read ALU\n");
+ goto exit;
+ }
+
+ /* read ALU entry */
+ ksz9477_read_table(dev, alu_table);
+
+ /* update ALU entry */
+ alu_table[0] = ALU_V_STATIC_VALID;
+ alu_table[1] |= BIT(port);
+ if (vid)
+ alu_table[1] |= ALU_V_USE_FID;
+ alu_table[2] = (vid << ALU_V_FID_S);
+ alu_table[2] |= ((addr[0] << 8) | addr[1]);
+ alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
+ alu_table[3] |= ((addr[4] << 8) | addr[5]);
+
+ ksz9477_write_table(dev, alu_table);
+
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_ready(dev);
+ if (ret)
+ dev_dbg(dev->dev, "Failed to write ALU\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ u32 alu_table[4];
+ u32 data;
+ int ret = 0;
+
+ mutex_lock(&dev->alu_mutex);
+
+ /* read any entry with mac & vid */
+ data = vid << ALU_FID_INDEX_S;
+ data |= ((addr[0] << 8) | addr[1]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
+
+ data = ((addr[2] << 24) | (addr[3] << 16));
+ data |= ((addr[4] << 8) | addr[5]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
+
+ /* start read operation */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_ready(dev);
+ if (ret) {
+ dev_dbg(dev->dev, "Failed to read ALU\n");
+ goto exit;
+ }
+
+ ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
+ if (alu_table[0] & ALU_V_STATIC_VALID) {
+ ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
+ ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
+ ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
+
+ /* clear forwarding port */
+ alu_table[1] &= ~BIT(port);
+
+ /* if there is no port to forward, clear table */
+ if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
+ alu_table[0] = 0;
+ alu_table[1] = 0;
+ alu_table[2] = 0;
+ alu_table[3] = 0;
+ }
+ } else {
+ alu_table[0] = 0;
+ alu_table[1] = 0;
+ alu_table[2] = 0;
+ alu_table[3] = 0;
+ }
+
+ ksz9477_write_table(dev, alu_table);
+
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_ready(dev);
+ if (ret)
+ dev_dbg(dev->dev, "Failed to write ALU\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
+{
+ alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
+ alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
+ alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
+ alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
+ ALU_V_PRIO_AGE_CNT_M;
+ alu->mstp = alu_table[0] & ALU_V_MSTP_M;
+
+ alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
+ alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
+ alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
+
+ alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
+
+ alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
+ alu->mac[1] = alu_table[2] & 0xFF;
+ alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
+ alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
+ alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
+ alu->mac[5] = alu_table[3] & 0xFF;
+}
+
+int ksz9477_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ int ret = 0;
+ u32 ksz_data;
+ u32 alu_table[4];
+ struct alu_struct alu;
+ int timeout;
+
+ mutex_lock(&dev->alu_mutex);
+
+ /* start ALU search */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
+
+ do {
+ timeout = 1000;
+ do {
+ ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
+ if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
+ break;
+ usleep_range(1, 10);
+ } while (timeout-- > 0);
+
+ if (!timeout) {
+ dev_dbg(dev->dev, "Failed to search ALU\n");
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ if (!(ksz_data & ALU_VALID))
+ continue;
+
+ /* read ALU table */
+ ksz9477_read_table(dev, alu_table);
+
+ ksz9477_convert_alu(&alu, alu_table);
+
+ if (alu.port_forward & BIT(port)) {
+ ret = cb(alu.mac, alu.fid, alu.is_static, data);
+ if (ret)
+ goto exit;
+ }
+ } while (ksz_data & ALU_START);
+
+exit:
+
+ /* stop ALU search */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+int ksz9477_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ u32 static_table[4];
+ const u8 *shifts;
+ const u32 *masks;
+ u32 data;
+ int index;
+ u32 mac_hi, mac_lo;
+ int err = 0;
+
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
+ mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
+ mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
+ mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
+
+ mutex_lock(&dev->alu_mutex);
+
+ for (index = 0; index < dev->info->num_statics; index++) {
+ /* find empty slot first */
+ data = (index << shifts[ALU_STAT_INDEX]) |
+ masks[ALU_STAT_READ] | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ err = ksz9477_wait_alu_sta_ready(dev);
+ if (err) {
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+ goto exit;
+ }
+
+ /* read ALU static table */
+ ksz9477_read_table(dev, static_table);
+
+ if (static_table[0] & ALU_V_STATIC_VALID) {
+ /* check this has same vid & mac address */
+ if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
+ ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
+ static_table[3] == mac_lo) {
+ /* found matching one */
+ break;
+ }
+ } else {
+ /* found empty one */
+ break;
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics) {
+ err = -ENOSPC;
+ goto exit;
+ }
+
+ /* add entry */
+ static_table[0] = ALU_V_STATIC_VALID;
+ static_table[1] |= BIT(port);
+ if (mdb->vid)
+ static_table[1] |= ALU_V_USE_FID;
+ static_table[2] = (mdb->vid << ALU_V_FID_S);
+ static_table[2] |= mac_hi;
+ static_table[3] = mac_lo;
+
+ ksz9477_write_table(dev, static_table);
+
+ data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ if (ksz9477_wait_alu_sta_ready(dev))
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+ return err;
+}
+
+int ksz9477_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ u32 static_table[4];
+ const u8 *shifts;
+ const u32 *masks;
+ u32 data;
+ int index;
+ int ret = 0;
+ u32 mac_hi, mac_lo;
+
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
+ mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
+ mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
+ mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
+
+ mutex_lock(&dev->alu_mutex);
+
+ for (index = 0; index < dev->info->num_statics; index++) {
+ /* find empty slot first */
+ data = (index << shifts[ALU_STAT_INDEX]) |
+ masks[ALU_STAT_READ] | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret) {
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+ goto exit;
+ }
+
+ /* read ALU static table */
+ ksz9477_read_table(dev, static_table);
+
+ if (static_table[0] & ALU_V_STATIC_VALID) {
+ /* check this has same vid & mac address */
+
+ if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
+ ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
+ static_table[3] == mac_lo) {
+ /* found matching one */
+ break;
+ }
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics)
+ goto exit;
+
+ /* clear port */
+ static_table[1] &= ~BIT(port);
+
+ if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
+ /* delete entry */
+ static_table[0] = 0;
+ static_table[1] = 0;
+ static_table[2] = 0;
+ static_table[3] = 0;
+ }
+
+ ksz9477_write_table(dev, static_table);
+
+ data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret)
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ u8 data;
+ int p;
+
+ /* Limit to one sniffer port
+ * Check if any of the port is already set for sniffing
+ * If yes, instruct the user to remove the previous entry & exit
+ */
+ for (p = 0; p < dev->info->port_cnt; p++) {
+ /* Skip the current sniffing port */
+ if (p == mirror->to_local_port)
+ continue;
+
+ ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
+
+ if (data & PORT_MIRROR_SNIFFER) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sniffer port is already configured, delete existing rules & retry");
+ return -EBUSY;
+ }
+ }
+
+ if (ingress)
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
+ else
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
+
+ /* configure mirror port */
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, true);
+
+ ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
+
+ return 0;
+}
+
+void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ bool in_use = false;
+ u8 data;
+ int p;
+
+ if (mirror->ingress)
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
+ else
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
+
+
+ /* Check if any of the port is still referring to sniffer port */
+ for (p = 0; p < dev->info->port_cnt; p++) {
+ ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
+
+ if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
+ in_use = true;
+ break;
+ }
+ }
+
+ /* delete sniffing if there are no other mirroring rules */
+ if (!in_use)
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, false);
+}
+
+static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
+{
+ phy_interface_t interface;
+ bool gbit;
+
+ if (dev->info->internal_phy[port])
+ return PHY_INTERFACE_MODE_NA;
+
+ gbit = ksz_get_gbit(dev, port);
+
+ interface = ksz_get_xmii(dev, port, gbit);
+
+ return interface;
+}
+
+static void ksz9477_port_mmd_write(struct ksz_device *dev, int port,
+ u8 dev_addr, u16 reg_addr, u16 val)
+{
+ ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP,
+ MMD_SETUP(PORT_MMD_OP_INDEX, dev_addr));
+ ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, reg_addr);
+ ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP,
+ MMD_SETUP(PORT_MMD_OP_DATA_NO_INCR, dev_addr));
+ ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, val);
+}
+
+static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
+{
+ /* Apply PHY settings to address errata listed in
+ * KSZ9477, KSZ9897, KSZ9896, KSZ9567, KSZ8565
+ * Silicon Errata and Data Sheet Clarification documents:
+ *
+ * Register settings are needed to improve PHY receive performance
+ */
+ ksz9477_port_mmd_write(dev, port, 0x01, 0x6f, 0xdd0b);
+ ksz9477_port_mmd_write(dev, port, 0x01, 0x8f, 0x6032);
+ ksz9477_port_mmd_write(dev, port, 0x01, 0x9d, 0x248c);
+ ksz9477_port_mmd_write(dev, port, 0x01, 0x75, 0x0060);
+ ksz9477_port_mmd_write(dev, port, 0x01, 0xd3, 0x7777);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x06, 0x3008);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x08, 0x2001);
+
+ /* Transmit waveform amplitude can be improved
+ * (1000BASE-T, 100BASE-TX, 10BASE-Te)
+ */
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x04, 0x00d0);
+
+ /* Energy Efficient Ethernet (EEE) feature select must
+ * be manually disabled (except on KSZ8565 which is 100Mbit)
+ */
+ if (dev->info->gbit_capable[port])
+ ksz9477_port_mmd_write(dev, port, 0x07, 0x3c, 0x0000);
+
+ /* Register settings are required to meet data sheet
+ * supply current specifications
+ */
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x13, 0x6eff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x14, 0xe6ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x15, 0x6eff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x16, 0xe6ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x17, 0x00ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x18, 0x43ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x19, 0xc3ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x1a, 0x6fff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x1b, 0x07ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x1c, 0x0fff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x1d, 0xe7ff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x1e, 0xefff);
+ ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee);
+}
+
+void ksz9477_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE;
+
+ if (dev->info->gbit_capable[port])
+ config->mac_capabilities |= MAC_1000FD;
+}
+
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+{
+ u32 secs = msecs / 1000;
+ u8 value;
+ u8 data;
+ int ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
+ if (ret < 0)
+ return ret;
+
+ data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
+ if (ret < 0)
+ return ret;
+
+ value &= ~SW_AGE_CNT_M;
+ value |= FIELD_PREP(SW_AGE_CNT_M, data);
+
+ return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+}
+
+void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ struct dsa_switch *ds = dev->ds;
+ u16 data16;
+ u8 member;
+
+ /* enable tag tail for host port */
+ if (cpu_port)
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
+ true);
+
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
+
+ /* set back pressure */
+ ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
+
+ /* enable broadcast storm limit */
+ ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
+
+ /* disable DiffServ priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
+
+ /* replace priority */
+ ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
+ false);
+ ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
+ MTI_PVID_REPLACE, false);
+
+ /* enable 802.1p priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
+
+ if (dev->info->internal_phy[port]) {
+ /* do not force flow control */
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
+ false);
+
+ if (dev->info->phy_errata_9477)
+ ksz9477_phy_errata_setup(dev, port);
+ } else {
+ /* force flow control */
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
+ true);
+ }
+
+ if (cpu_port)
+ member = dsa_user_ports(ds);
+ else
+ member = BIT(dsa_upstream_port(ds, port));
+
+ ksz9477_cfg_port_member(dev, port, member);
+
+ /* clear pending interrupts */
+ if (dev->info->internal_phy[port])
+ ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
+}
+
+void ksz9477_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ int i;
+
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ if (dsa_is_cpu_port(ds, i) &&
+ (dev->info->cpu_ports & (1 << i))) {
+ phy_interface_t interface;
+ const char *prev_msg;
+ const char *prev_mode;
+
+ dev->cpu_port = i;
+ p = &dev->ports[i];
+
+ /* Read from XMII register to determine host port
+ * interface. If set specifically in device tree
+ * note the difference to help debugging.
+ */
+ interface = ksz9477_get_interface(dev, i);
+ if (!p->interface) {
+ if (dev->compat_interface) {
+ dev_warn(dev->dev,
+ "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
+ "Please update your device tree.\n",
+ i);
+ p->interface = dev->compat_interface;
+ } else {
+ p->interface = interface;
+ }
+ }
+ if (interface && interface != p->interface) {
+ prev_msg = " instead of ";
+ prev_mode = phy_modes(interface);
+ } else {
+ prev_msg = "";
+ prev_mode = "";
+ }
+ dev_info(dev->dev,
+ "Port%d: using phy mode %s%s%s\n",
+ i,
+ phy_modes(p->interface),
+ prev_msg,
+ prev_mode);
+
+ /* enable cpu port */
+ ksz9477_port_setup(dev, i, true);
+ }
+ }
+
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ if (i == dev->cpu_port)
+ continue;
+ ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+ }
+}
+
+int ksz9477_enable_stp_addr(struct ksz_device *dev)
+{
+ const u32 *masks;
+ u32 data;
+ int ret;
+
+ masks = dev->info->masks;
+
+ /* Enable Reserved multicast table */
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
+
+ /* Set the Override bit for forwarding BPDU packet to CPU */
+ ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
+ ALU_V_OVERRIDE | BIT(dev->cpu_port));
+ if (ret < 0)
+ return ret;
+
+ data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
+
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ksz9477_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+
+ /* Required for port partitioning. */
+ ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
+ true);
+
+ /* Do not work correctly with tail tagging. */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
+
+ /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
+
+ /* Now we can configure default MTU value */
+ ret = regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, REG_SW_MTU_MASK,
+ VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
+ if (ret)
+ return ret;
+
+ /* queue based egress rate limit */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
+
+ /* enable global MIB counter freeze function */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+
+ return 0;
+}
+
+u32 ksz9477_get_port_addr(int port, int offset)
+{
+ return PORT_CTRL_ADDR(port, offset);
+}
+
+int ksz9477_switch_init(struct ksz_device *dev)
+{
+ u8 data8;
+ int ret;
+
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
+ /* turn off SPI DO Edge select */
+ ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
+ if (ret)
+ return ret;
+
+ data8 &= ~SPI_AUTO_EDGE_DETECTION;
+ ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void ksz9477_switch_exit(struct ksz_device *dev)
+{
+ ksz9477_reset_switch(dev);
+}
+
+MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
new file mode 100644
index 000000000..00862c4cf
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Microchip KSZ9477 series Header file
+ *
+ * Copyright (C) 2017-2022 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ9477_H
+#define __KSZ9477_H
+
+#include <net/dsa.h>
+#include "ksz_common.h"
+
+int ksz9477_setup(struct dsa_switch *ds);
+u32 ksz9477_get_port_addr(int port, int offset);
+void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member);
+void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port);
+void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
+void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze);
+void ksz9477_port_init_cnt(struct ksz_device *dev, int port);
+int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack);
+int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+int ksz9477_get_stp_reg(void);
+void ksz9477_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+int ksz9477_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ksz9477_fdb_add(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+int ksz9477_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz9477_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu);
+int ksz9477_max_mtu(struct ksz_device *dev, int port);
+void ksz9477_config_cpu_port(struct dsa_switch *ds);
+int ksz9477_enable_stp_addr(struct ksz_device *dev);
+int ksz9477_reset_switch(struct ksz_device *dev);
+int ksz9477_dsa_init(struct ksz_device *dev);
+int ksz9477_switch_init(struct ksz_device *dev);
+void ksz9477_switch_exit(struct ksz_device *dev);
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
new file mode 100644
index 000000000..aae1dadef
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ9477 series register access through I2C
+ *
+ * Copyright (C) 2018-2019 Microchip Technology Inc.
+ */
+
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "ksz_common.h"
+
+KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0);
+
+static int ksz9477_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *i2c_id)
+{
+ struct regmap_config rc;
+ struct ksz_device *dev;
+ int i, ret;
+
+ dev = ksz_switch_alloc(&i2c->dev, i2c);
+ if (!dev)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
+ rc = ksz9477_regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ dev->regmap[i] = devm_regmap_init_i2c(i2c, &rc);
+ if (IS_ERR(dev->regmap[i])) {
+ ret = PTR_ERR(dev->regmap[i]);
+ dev_err(&i2c->dev,
+ "Failed to initialize regmap%i: %d\n",
+ ksz9477_regmap_config[i].val_bits, ret);
+ return ret;
+ }
+ }
+
+ if (i2c->dev.platform_data)
+ dev->pdata = i2c->dev.platform_data;
+
+ ret = ksz_switch_register(dev);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ i2c_set_clientdata(i2c, dev);
+
+ return 0;
+}
+
+static void ksz9477_i2c_remove(struct i2c_client *i2c)
+{
+ struct ksz_device *dev = i2c_get_clientdata(i2c);
+
+ if (dev)
+ ksz_switch_remove(dev);
+}
+
+static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
+{
+ struct ksz_device *dev = i2c_get_clientdata(i2c);
+
+ if (!dev)
+ return;
+
+ if (dev->dev_ops->reset)
+ dev->dev_ops->reset(dev);
+
+ dsa_switch_shutdown(dev->ds);
+
+ i2c_set_clientdata(i2c, NULL);
+}
+
+static const struct i2c_device_id ksz9477_i2c_id[] = {
+ { "ksz9477-switch", 0 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id);
+
+static const struct of_device_id ksz9477_dt_ids[] = {
+ {
+ .compatible = "microchip,ksz9477",
+ .data = &ksz_switch_chips[KSZ9477]
+ },
+ {
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
+ {
+ .compatible = "microchip,ksz9897",
+ .data = &ksz_switch_chips[KSZ9897]
+ },
+ {
+ .compatible = "microchip,ksz9893",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9563",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz8563",
+ .data = &ksz_switch_chips[KSZ8563]
+ },
+ {
+ .compatible = "microchip,ksz9567",
+ .data = &ksz_switch_chips[KSZ9567]
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+
+static struct i2c_driver ksz9477_i2c_driver = {
+ .driver = {
+ .name = "ksz9477-switch",
+ .of_match_table = of_match_ptr(ksz9477_dt_ids),
+ },
+ .probe = ksz9477_i2c_probe,
+ .remove = ksz9477_i2c_remove,
+ .shutdown = ksz9477_i2c_shutdown,
+ .id_table = ksz9477_i2c_id,
+};
+
+module_i2c_driver(ksz9477_i2c_driver);
+
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch I2C access Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
new file mode 100644
index 000000000..53c68d286
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -0,0 +1,1620 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Microchip KSZ9477 register definitions
+ *
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ9477_REGS_H
+#define __KSZ9477_REGS_H
+
+#define KS_PRIO_M 0x7
+#define KS_PRIO_S 4
+
+/* 0 - Operation */
+#define REG_CHIP_ID0__1 0x0000
+
+#define REG_CHIP_ID1__1 0x0001
+
+#define FAMILY_ID 0x95
+#define FAMILY_ID_94 0x94
+#define FAMILY_ID_95 0x95
+#define FAMILY_ID_85 0x85
+#define FAMILY_ID_98 0x98
+#define FAMILY_ID_88 0x88
+
+#define REG_CHIP_ID2__1 0x0002
+
+#define CHIP_ID_66 0x66
+#define CHIP_ID_67 0x67
+#define CHIP_ID_77 0x77
+#define CHIP_ID_93 0x93
+#define CHIP_ID_96 0x96
+#define CHIP_ID_97 0x97
+
+#define REG_CHIP_ID3__1 0x0003
+
+#define SWITCH_REVISION_M 0x0F
+#define SWITCH_REVISION_S 4
+#define SWITCH_RESET 0x01
+
+#define REG_SW_PME_CTRL 0x0006
+
+#define PME_ENABLE BIT(1)
+#define PME_POLARITY BIT(0)
+
+#define REG_GLOBAL_OPTIONS 0x000F
+
+#define SW_GIGABIT_ABLE BIT(6)
+#define SW_REDUNDANCY_ABLE BIT(5)
+#define SW_AVB_ABLE BIT(4)
+#define SW_9567_RL_5_2 0xC
+#define SW_9477_SL_5_2 0xD
+
+#define SW_9896_GL_5_1 0xB
+#define SW_9896_RL_5_1 0x8
+#define SW_9896_SL_5_1 0x9
+
+#define SW_9895_GL_4_1 0x7
+#define SW_9895_RL_4_1 0x4
+#define SW_9895_SL_4_1 0x5
+
+#define SW_9896_RL_4_2 0x6
+
+#define SW_9893_RL_2_1 0x0
+#define SW_9893_SL_2_1 0x1
+#define SW_9893_GL_2_1 0x3
+
+#define SW_QW_ABLE BIT(5)
+#define SW_9893_RN_2_1 0xC
+
+#define REG_SW_INT_STATUS__4 0x0010
+#define REG_SW_INT_MASK__4 0x0014
+
+#define LUE_INT BIT(31)
+#define TRIG_TS_INT BIT(30)
+#define APB_TIMEOUT_INT BIT(29)
+
+#define SWITCH_INT_MASK (TRIG_TS_INT | APB_TIMEOUT_INT)
+
+#define REG_SW_PORT_INT_STATUS__4 0x0018
+#define REG_SW_PORT_INT_MASK__4 0x001C
+#define REG_SW_PHY_INT_STATUS 0x0020
+#define REG_SW_PHY_INT_ENABLE 0x0024
+
+/* 1 - Global */
+#define REG_SW_GLOBAL_SERIAL_CTRL_0 0x0100
+#define SW_SPARE_REG_2 BIT(7)
+#define SW_SPARE_REG_1 BIT(6)
+#define SW_SPARE_REG_0 BIT(5)
+#define SW_BIG_ENDIAN BIT(4)
+#define SPI_AUTO_EDGE_DETECTION BIT(1)
+#define SPI_CLOCK_OUT_RISING_EDGE BIT(0)
+
+#define REG_SW_GLOBAL_OUTPUT_CTRL__1 0x0103
+#define SW_ENABLE_REFCLKO BIT(1)
+#define SW_REFCLKO_IS_125MHZ BIT(0)
+
+#define REG_SW_IBA__4 0x0104
+
+#define SW_IBA_ENABLE BIT(31)
+#define SW_IBA_DA_MATCH BIT(30)
+#define SW_IBA_INIT BIT(29)
+#define SW_IBA_QID_M 0xF
+#define SW_IBA_QID_S 22
+#define SW_IBA_PORT_M 0x2F
+#define SW_IBA_PORT_S 16
+#define SW_IBA_FRAME_TPID_M 0xFFFF
+
+#define REG_SW_APB_TIMEOUT_ADDR__4 0x0108
+
+#define APB_TIMEOUT_ACKNOWLEDGE BIT(31)
+
+#define REG_SW_IBA_SYNC__1 0x010C
+
+#define REG_SW_IO_STRENGTH__1 0x010D
+#define SW_DRIVE_STRENGTH_M 0x7
+#define SW_DRIVE_STRENGTH_2MA 0
+#define SW_DRIVE_STRENGTH_4MA 1
+#define SW_DRIVE_STRENGTH_8MA 2
+#define SW_DRIVE_STRENGTH_12MA 3
+#define SW_DRIVE_STRENGTH_16MA 4
+#define SW_DRIVE_STRENGTH_20MA 5
+#define SW_DRIVE_STRENGTH_24MA 6
+#define SW_DRIVE_STRENGTH_28MA 7
+#define SW_HI_SPEED_DRIVE_STRENGTH_S 4
+#define SW_LO_SPEED_DRIVE_STRENGTH_S 0
+
+#define REG_SW_IBA_STATUS__4 0x0110
+
+#define SW_IBA_REQ BIT(31)
+#define SW_IBA_RESP BIT(30)
+#define SW_IBA_DA_MISMATCH BIT(14)
+#define SW_IBA_FMT_MISMATCH BIT(13)
+#define SW_IBA_CODE_ERROR BIT(12)
+#define SW_IBA_CMD_ERROR BIT(11)
+#define SW_IBA_CMD_LOC_M (BIT(6) - 1)
+
+#define REG_SW_IBA_STATES__4 0x0114
+
+#define SW_IBA_BUF_STATE_S 30
+#define SW_IBA_CMD_STATE_S 28
+#define SW_IBA_RESP_STATE_S 26
+#define SW_IBA_STATE_M 0x3
+#define SW_IBA_PACKET_SIZE_M 0x7F
+#define SW_IBA_PACKET_SIZE_S 16
+#define SW_IBA_FMT_ID_M 0xFFFF
+
+#define REG_SW_IBA_RESULT__4 0x0118
+
+#define SW_IBA_SIZE_S 24
+
+#define SW_IBA_RETRY_CNT_M (BIT(5) - 1)
+
+/* 2 - PHY */
+#define REG_SW_POWER_MANAGEMENT_CTRL 0x0201
+
+#define SW_PLL_POWER_DOWN BIT(5)
+#define SW_POWER_DOWN_MODE 0x3
+#define SW_ENERGY_DETECTION 1
+#define SW_SOFT_POWER_DOWN 2
+#define SW_POWER_SAVING 3
+
+/* 3 - Operation Control */
+#define REG_SW_OPERATION 0x0300
+
+#define SW_DOUBLE_TAG BIT(7)
+#define SW_RESET BIT(1)
+
+#define REG_SW_MAC_ADDR_0 0x0302
+#define REG_SW_MAC_ADDR_1 0x0303
+#define REG_SW_MAC_ADDR_2 0x0304
+#define REG_SW_MAC_ADDR_3 0x0305
+#define REG_SW_MAC_ADDR_4 0x0306
+#define REG_SW_MAC_ADDR_5 0x0307
+
+#define REG_SW_MTU__2 0x0308
+#define REG_SW_MTU_MASK GENMASK(13, 0)
+
+#define REG_SW_ISP_TPID__2 0x030A
+
+#define REG_SW_HSR_TPID__2 0x030C
+
+#define REG_AVB_STRATEGY__2 0x030E
+
+#define SW_SHAPING_CREDIT_ACCT BIT(1)
+#define SW_POLICING_CREDIT_ACCT BIT(0)
+
+#define REG_SW_LUE_CTRL_0 0x0310
+
+#define SW_VLAN_ENABLE BIT(7)
+#define SW_DROP_INVALID_VID BIT(6)
+#define SW_AGE_CNT_M GENMASK(5, 3)
+#define SW_AGE_CNT_S 3
+#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
+#define SW_RESV_MCAST_ENABLE BIT(2)
+#define SW_HASH_OPTION_M 0x03
+#define SW_HASH_OPTION_CRC 1
+#define SW_HASH_OPTION_XOR 2
+#define SW_HASH_OPTION_DIRECT 3
+
+#define REG_SW_LUE_CTRL_1 0x0311
+
+#define UNICAST_LEARN_DISABLE BIT(7)
+#define SW_SRC_ADDR_FILTER BIT(6)
+#define SW_FLUSH_STP_TABLE BIT(5)
+#define SW_FLUSH_MSTP_TABLE BIT(4)
+#define SW_FWD_MCAST_SRC_ADDR BIT(3)
+#define SW_AGING_ENABLE BIT(2)
+#define SW_FAST_AGING BIT(1)
+#define SW_LINK_AUTO_AGING BIT(0)
+
+#define REG_SW_LUE_CTRL_2 0x0312
+
+#define SW_TRAP_DOUBLE_TAG BIT(6)
+#define SW_EGRESS_VLAN_FILTER_DYN BIT(5)
+#define SW_EGRESS_VLAN_FILTER_STA BIT(4)
+#define SW_FLUSH_OPTION_M 0x3
+#define SW_FLUSH_OPTION_S 2
+#define SW_FLUSH_OPTION_DYN_MAC 1
+#define SW_FLUSH_OPTION_STA_MAC 2
+#define SW_FLUSH_OPTION_BOTH 3
+#define SW_PRIO_M 0x3
+#define SW_PRIO_DA 0
+#define SW_PRIO_SA 1
+#define SW_PRIO_HIGHEST_DA_SA 2
+#define SW_PRIO_LOWEST_DA_SA 3
+
+#define REG_SW_LUE_CTRL_3 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
+
+#define REG_SW_LUE_INT_STATUS 0x0314
+#define REG_SW_LUE_INT_ENABLE 0x0315
+
+#define LEARN_FAIL_INT BIT(2)
+#define ALMOST_FULL_INT BIT(1)
+#define WRITE_FAIL_INT BIT(0)
+
+#define REG_SW_LUE_INDEX_0__2 0x0316
+
+#define ENTRY_INDEX_M 0x0FFF
+
+#define REG_SW_LUE_INDEX_1__2 0x0318
+
+#define FAIL_INDEX_M 0x03FF
+
+#define REG_SW_LUE_INDEX_2__2 0x031A
+
+#define REG_SW_LUE_UNK_UCAST_CTRL__4 0x0320
+
+#define SW_UNK_UCAST_ENABLE BIT(31)
+
+#define REG_SW_LUE_UNK_MCAST_CTRL__4 0x0324
+
+#define SW_UNK_MCAST_ENABLE BIT(31)
+
+#define REG_SW_LUE_UNK_VID_CTRL__4 0x0328
+
+#define SW_UNK_VID_ENABLE BIT(31)
+
+#define REG_SW_MAC_CTRL_0 0x0330
+
+#define SW_NEW_BACKOFF BIT(7)
+#define SW_CHECK_LENGTH BIT(3)
+#define SW_PAUSE_UNH_MODE BIT(1)
+#define SW_AGGR_BACKOFF BIT(0)
+
+#define REG_SW_MAC_CTRL_1 0x0331
+
+#define SW_BACK_PRESSURE BIT(5)
+#define FAIR_FLOW_CTRL BIT(4)
+#define NO_EXC_COLLISION_DROP BIT(3)
+#define SW_JUMBO_PACKET BIT(2)
+#define SW_LEGAL_PACKET_DISABLE BIT(1)
+#define SW_PASS_SHORT_FRAME BIT(0)
+
+#define REG_SW_MAC_CTRL_2 0x0332
+
+#define SW_REPLACE_VID BIT(3)
+
+#define REG_SW_MAC_CTRL_3 0x0333
+
+#define REG_SW_MAC_CTRL_4 0x0334
+
+#define SW_PASS_PAUSE BIT(3)
+
+#define REG_SW_MAC_CTRL_5 0x0335
+
+#define SW_OUT_RATE_LIMIT_QUEUE_BASED BIT(3)
+
+#define REG_SW_MAC_CTRL_6 0x0336
+
+#define SW_MIB_COUNTER_FLUSH BIT(7)
+#define SW_MIB_COUNTER_FREEZE BIT(6)
+
+#define REG_SW_MAC_802_1P_MAP_0 0x0338
+#define REG_SW_MAC_802_1P_MAP_1 0x0339
+#define REG_SW_MAC_802_1P_MAP_2 0x033A
+#define REG_SW_MAC_802_1P_MAP_3 0x033B
+
+#define SW_802_1P_MAP_M KS_PRIO_M
+#define SW_802_1P_MAP_S KS_PRIO_S
+
+#define REG_SW_MAC_ISP_CTRL 0x033C
+
+#define REG_SW_MAC_TOS_CTRL 0x033E
+
+#define SW_TOS_DSCP_REMARK BIT(1)
+#define SW_TOS_DSCP_REMAP BIT(0)
+
+#define REG_SW_MAC_TOS_PRIO_0 0x0340
+#define REG_SW_MAC_TOS_PRIO_1 0x0341
+#define REG_SW_MAC_TOS_PRIO_2 0x0342
+#define REG_SW_MAC_TOS_PRIO_3 0x0343
+#define REG_SW_MAC_TOS_PRIO_4 0x0344
+#define REG_SW_MAC_TOS_PRIO_5 0x0345
+#define REG_SW_MAC_TOS_PRIO_6 0x0346
+#define REG_SW_MAC_TOS_PRIO_7 0x0347
+#define REG_SW_MAC_TOS_PRIO_8 0x0348
+#define REG_SW_MAC_TOS_PRIO_9 0x0349
+#define REG_SW_MAC_TOS_PRIO_10 0x034A
+#define REG_SW_MAC_TOS_PRIO_11 0x034B
+#define REG_SW_MAC_TOS_PRIO_12 0x034C
+#define REG_SW_MAC_TOS_PRIO_13 0x034D
+#define REG_SW_MAC_TOS_PRIO_14 0x034E
+#define REG_SW_MAC_TOS_PRIO_15 0x034F
+#define REG_SW_MAC_TOS_PRIO_16 0x0350
+#define REG_SW_MAC_TOS_PRIO_17 0x0351
+#define REG_SW_MAC_TOS_PRIO_18 0x0352
+#define REG_SW_MAC_TOS_PRIO_19 0x0353
+#define REG_SW_MAC_TOS_PRIO_20 0x0354
+#define REG_SW_MAC_TOS_PRIO_21 0x0355
+#define REG_SW_MAC_TOS_PRIO_22 0x0356
+#define REG_SW_MAC_TOS_PRIO_23 0x0357
+#define REG_SW_MAC_TOS_PRIO_24 0x0358
+#define REG_SW_MAC_TOS_PRIO_25 0x0359
+#define REG_SW_MAC_TOS_PRIO_26 0x035A
+#define REG_SW_MAC_TOS_PRIO_27 0x035B
+#define REG_SW_MAC_TOS_PRIO_28 0x035C
+#define REG_SW_MAC_TOS_PRIO_29 0x035D
+#define REG_SW_MAC_TOS_PRIO_30 0x035E
+#define REG_SW_MAC_TOS_PRIO_31 0x035F
+
+#define REG_SW_MRI_CTRL_0 0x0370
+
+#define SW_IGMP_SNOOP BIT(6)
+#define SW_IPV6_MLD_OPTION BIT(3)
+#define SW_IPV6_MLD_SNOOP BIT(2)
+#define SW_MIRROR_RX_TX BIT(0)
+
+#define REG_SW_CLASS_D_IP_CTRL__4 0x0374
+
+#define SW_CLASS_D_IP_ENABLE BIT(31)
+
+#define REG_SW_MRI_CTRL_8 0x0378
+
+#define SW_NO_COLOR_S 6
+#define SW_RED_COLOR_S 4
+#define SW_YELLOW_COLOR_S 2
+#define SW_GREEN_COLOR_S 0
+#define SW_COLOR_M 0x3
+
+#define REG_SW_QM_CTRL__4 0x0390
+
+#define PRIO_SCHEME_SELECT_M KS_PRIO_M
+#define PRIO_SCHEME_SELECT_S 6
+#define PRIO_MAP_3_HI 0
+#define PRIO_MAP_2_HI 2
+#define PRIO_MAP_0_LO 3
+#define UNICAST_VLAN_BOUNDARY BIT(1)
+
+#define REG_SW_EEE_QM_CTRL__2 0x03C0
+
+#define REG_SW_EEE_TXQ_WAIT_TIME__2 0x03C2
+
+/* 4 - */
+#define REG_SW_VLAN_ENTRY__4 0x0400
+
+#define VLAN_VALID BIT(31)
+#define VLAN_FORWARD_OPTION BIT(27)
+#define VLAN_PRIO_M KS_PRIO_M
+#define VLAN_PRIO_S 24
+#define VLAN_MSTP_M 0x7
+#define VLAN_MSTP_S 12
+#define VLAN_FID_M 0x7F
+
+#define REG_SW_VLAN_ENTRY_UNTAG__4 0x0404
+#define REG_SW_VLAN_ENTRY_PORTS__4 0x0408
+
+#define REG_SW_VLAN_ENTRY_INDEX__2 0x040C
+
+#define VLAN_INDEX_M 0x0FFF
+
+#define REG_SW_VLAN_CTRL 0x040E
+
+#define VLAN_START BIT(7)
+#define VLAN_ACTION 0x3
+#define VLAN_WRITE 1
+#define VLAN_READ 2
+#define VLAN_CLEAR 3
+
+#define REG_SW_ALU_INDEX_0 0x0410
+
+#define ALU_FID_INDEX_S 16
+#define ALU_MAC_ADDR_HI 0xFFFF
+
+#define REG_SW_ALU_INDEX_1 0x0414
+
+#define ALU_DIRECT_INDEX_M (BIT(12) - 1)
+
+#define REG_SW_ALU_CTRL__4 0x0418
+
+#define ALU_VALID_CNT_M (BIT(14) - 1)
+#define ALU_VALID_CNT_S 16
+#define ALU_START BIT(7)
+#define ALU_VALID BIT(6)
+#define ALU_DIRECT BIT(2)
+#define ALU_ACTION 0x3
+#define ALU_WRITE 1
+#define ALU_READ 2
+#define ALU_SEARCH 3
+
+#define REG_SW_ALU_STAT_CTRL__4 0x041C
+
+#define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1)
+#define ALU_STAT_START BIT(7)
+#define ALU_RESV_MCAST_ADDR BIT(1)
+
+#define REG_SW_ALU_VAL_A 0x0420
+
+#define ALU_V_STATIC_VALID BIT(31)
+#define ALU_V_SRC_FILTER BIT(30)
+#define ALU_V_DST_FILTER BIT(29)
+#define ALU_V_PRIO_AGE_CNT_M (BIT(3) - 1)
+#define ALU_V_PRIO_AGE_CNT_S 26
+#define ALU_V_MSTP_M 0x7
+
+#define REG_SW_ALU_VAL_B 0x0424
+
+#define ALU_V_OVERRIDE BIT(31)
+#define ALU_V_USE_FID BIT(30)
+#define ALU_V_PORT_MAP (BIT(24) - 1)
+
+#define REG_SW_ALU_VAL_C 0x0428
+
+#define ALU_V_FID_M (BIT(16) - 1)
+#define ALU_V_FID_S 16
+#define ALU_V_MAC_ADDR_HI 0xFFFF
+
+#define REG_SW_ALU_VAL_D 0x042C
+
+#define REG_HSR_ALU_INDEX_0 0x0440
+
+#define REG_HSR_ALU_INDEX_1 0x0444
+
+#define HSR_DST_MAC_INDEX_LO_S 16
+#define HSR_SRC_MAC_INDEX_HI 0xFFFF
+
+#define REG_HSR_ALU_INDEX_2 0x0448
+
+#define HSR_INDEX_MAX BIT(9)
+#define HSR_DIRECT_INDEX_M (HSR_INDEX_MAX - 1)
+
+#define REG_HSR_ALU_INDEX_3 0x044C
+
+#define HSR_PATH_INDEX_M (BIT(4) - 1)
+
+#define REG_HSR_ALU_CTRL__4 0x0450
+
+#define HSR_VALID_CNT_M (BIT(14) - 1)
+#define HSR_VALID_CNT_S 16
+#define HSR_START BIT(7)
+#define HSR_VALID BIT(6)
+#define HSR_SEARCH_END BIT(5)
+#define HSR_DIRECT BIT(2)
+#define HSR_ACTION 0x3
+#define HSR_WRITE 1
+#define HSR_READ 2
+#define HSR_SEARCH 3
+
+#define REG_HSR_ALU_VAL_A 0x0454
+
+#define HSR_V_STATIC_VALID BIT(31)
+#define HSR_V_AGE_CNT_M (BIT(3) - 1)
+#define HSR_V_AGE_CNT_S 26
+#define HSR_V_PATH_ID_M (BIT(4) - 1)
+
+#define REG_HSR_ALU_VAL_B 0x0458
+
+#define REG_HSR_ALU_VAL_C 0x045C
+
+#define HSR_V_DST_MAC_ADDR_LO_S 16
+#define HSR_V_SRC_MAC_ADDR_HI 0xFFFF
+
+#define REG_HSR_ALU_VAL_D 0x0460
+
+#define REG_HSR_ALU_VAL_E 0x0464
+
+#define HSR_V_START_SEQ_1_S 16
+#define HSR_V_START_SEQ_2_S 0
+
+#define REG_HSR_ALU_VAL_F 0x0468
+
+#define HSR_V_EXP_SEQ_1_S 16
+#define HSR_V_EXP_SEQ_2_S 0
+
+#define REG_HSR_ALU_VAL_G 0x046C
+
+#define HSR_V_SEQ_CNT_1_S 16
+#define HSR_V_SEQ_CNT_2_S 0
+
+#define HSR_V_SEQ_M (BIT(16) - 1)
+
+/* 5 - PTP Clock */
+#define REG_PTP_CLK_CTRL 0x0500
+
+#define PTP_STEP_ADJ BIT(6)
+#define PTP_STEP_DIR BIT(5)
+#define PTP_READ_TIME BIT(4)
+#define PTP_LOAD_TIME BIT(3)
+#define PTP_CLK_ADJ_ENABLE BIT(2)
+#define PTP_CLK_ENABLE BIT(1)
+#define PTP_CLK_RESET BIT(0)
+
+#define REG_PTP_RTC_SUB_NANOSEC__2 0x0502
+
+#define PTP_RTC_SUB_NANOSEC_M 0x0007
+
+#define REG_PTP_RTC_NANOSEC 0x0504
+#define REG_PTP_RTC_NANOSEC_H 0x0504
+#define REG_PTP_RTC_NANOSEC_L 0x0506
+
+#define REG_PTP_RTC_SEC 0x0508
+#define REG_PTP_RTC_SEC_H 0x0508
+#define REG_PTP_RTC_SEC_L 0x050A
+
+#define REG_PTP_SUBNANOSEC_RATE 0x050C
+#define REG_PTP_SUBNANOSEC_RATE_H 0x050C
+
+#define PTP_RATE_DIR BIT(31)
+#define PTP_TMP_RATE_ENABLE BIT(30)
+
+#define REG_PTP_SUBNANOSEC_RATE_L 0x050E
+
+#define REG_PTP_RATE_DURATION 0x0510
+#define REG_PTP_RATE_DURATION_H 0x0510
+#define REG_PTP_RATE_DURATION_L 0x0512
+
+#define REG_PTP_MSG_CONF1 0x0514
+
+#define PTP_802_1AS BIT(7)
+#define PTP_ENABLE BIT(6)
+#define PTP_ETH_ENABLE BIT(5)
+#define PTP_IPV4_UDP_ENABLE BIT(4)
+#define PTP_IPV6_UDP_ENABLE BIT(3)
+#define PTP_TC_P2P BIT(2)
+#define PTP_MASTER BIT(1)
+#define PTP_1STEP BIT(0)
+
+#define REG_PTP_MSG_CONF2 0x0516
+
+#define PTP_UNICAST_ENABLE BIT(12)
+#define PTP_ALTERNATE_MASTER BIT(11)
+#define PTP_ALL_HIGH_PRIO BIT(10)
+#define PTP_SYNC_CHECK BIT(9)
+#define PTP_DELAY_CHECK BIT(8)
+#define PTP_PDELAY_CHECK BIT(7)
+#define PTP_DROP_SYNC_DELAY_REQ BIT(5)
+#define PTP_DOMAIN_CHECK BIT(4)
+#define PTP_UDP_CHECKSUM BIT(2)
+
+#define REG_PTP_DOMAIN_VERSION 0x0518
+#define PTP_VERSION_M 0xFF00
+#define PTP_DOMAIN_M 0x00FF
+
+#define REG_PTP_UNIT_INDEX__4 0x0520
+
+#define PTP_UNIT_M 0xF
+
+#define PTP_GPIO_INDEX_S 16
+#define PTP_TSI_INDEX_S 8
+#define PTP_TOU_INDEX_S 0
+
+#define REG_PTP_TRIG_STATUS__4 0x0524
+
+#define TRIG_ERROR_S 16
+#define TRIG_DONE_S 0
+
+#define REG_PTP_INT_STATUS__4 0x0528
+
+#define TRIG_INT_S 16
+#define TS_INT_S 0
+
+#define TRIG_UNIT_M 0x7
+#define TS_UNIT_M 0x3
+
+#define REG_PTP_CTRL_STAT__4 0x052C
+
+#define GPIO_IN BIT(7)
+#define GPIO_OUT BIT(6)
+#define TS_INT_ENABLE BIT(5)
+#define TRIG_ACTIVE BIT(4)
+#define TRIG_ENABLE BIT(3)
+#define TRIG_RESET BIT(2)
+#define TS_ENABLE BIT(1)
+#define TS_RESET BIT(0)
+
+#define GPIO_CTRL_M (GPIO_IN | GPIO_OUT)
+
+#define TRIG_CTRL_M \
+ (TRIG_ACTIVE | TRIG_ENABLE | TRIG_RESET)
+
+#define TS_CTRL_M \
+ (TS_INT_ENABLE | TS_ENABLE | TS_RESET)
+
+#define REG_TRIG_TARGET_NANOSEC 0x0530
+#define REG_TRIG_TARGET_SEC 0x0534
+
+#define REG_TRIG_CTRL__4 0x0538
+
+#define TRIG_CASCADE_ENABLE BIT(31)
+#define TRIG_CASCADE_TAIL BIT(30)
+#define TRIG_CASCADE_UPS_M 0xF
+#define TRIG_CASCADE_UPS_S 26
+#define TRIG_NOW BIT(25)
+#define TRIG_NOTIFY BIT(24)
+#define TRIG_EDGE BIT(23)
+#define TRIG_PATTERN_S 20
+#define TRIG_PATTERN_M 0x7
+#define TRIG_NEG_EDGE 0
+#define TRIG_POS_EDGE 1
+#define TRIG_NEG_PULSE 2
+#define TRIG_POS_PULSE 3
+#define TRIG_NEG_PERIOD 4
+#define TRIG_POS_PERIOD 5
+#define TRIG_REG_OUTPUT 6
+#define TRIG_GPO_S 16
+#define TRIG_GPO_M 0xF
+#define TRIG_CASCADE_ITERATE_CNT_M 0xFFFF
+
+#define REG_TRIG_CYCLE_WIDTH 0x053C
+
+#define REG_TRIG_CYCLE_CNT 0x0540
+
+#define TRIG_CYCLE_CNT_M 0xFFFF
+#define TRIG_CYCLE_CNT_S 16
+#define TRIG_BIT_PATTERN_M 0xFFFF
+
+#define REG_TRIG_ITERATE_TIME 0x0544
+
+#define REG_TRIG_PULSE_WIDTH__4 0x0548
+
+#define TRIG_PULSE_WIDTH_M 0x00FFFFFF
+
+#define REG_TS_CTRL_STAT__4 0x0550
+
+#define TS_EVENT_DETECT_M 0xF
+#define TS_EVENT_DETECT_S 17
+#define TS_EVENT_OVERFLOW BIT(16)
+#define TS_GPI_M 0xF
+#define TS_GPI_S 8
+#define TS_DETECT_RISE BIT(7)
+#define TS_DETECT_FALL BIT(6)
+#define TS_DETECT_S 6
+#define TS_CASCADE_TAIL BIT(5)
+#define TS_CASCADE_UPS_M 0xF
+#define TS_CASCADE_UPS_S 1
+#define TS_CASCADE_ENABLE BIT(0)
+
+#define DETECT_RISE (TS_DETECT_RISE >> TS_DETECT_S)
+#define DETECT_FALL (TS_DETECT_FALL >> TS_DETECT_S)
+
+#define REG_TS_EVENT_0_NANOSEC 0x0554
+#define REG_TS_EVENT_0_SEC 0x0558
+#define REG_TS_EVENT_0_SUB_NANOSEC 0x055C
+
+#define REG_TS_EVENT_1_NANOSEC 0x0560
+#define REG_TS_EVENT_1_SEC 0x0564
+#define REG_TS_EVENT_1_SUB_NANOSEC 0x0568
+
+#define REG_TS_EVENT_2_NANOSEC 0x056C
+#define REG_TS_EVENT_2_SEC 0x0570
+#define REG_TS_EVENT_2_SUB_NANOSEC 0x0574
+
+#define REG_TS_EVENT_3_NANOSEC 0x0578
+#define REG_TS_EVENT_3_SEC 0x057C
+#define REG_TS_EVENT_3_SUB_NANOSEC 0x0580
+
+#define REG_TS_EVENT_4_NANOSEC 0x0584
+#define REG_TS_EVENT_4_SEC 0x0588
+#define REG_TS_EVENT_4_SUB_NANOSEC 0x058C
+
+#define REG_TS_EVENT_5_NANOSEC 0x0590
+#define REG_TS_EVENT_5_SEC 0x0594
+#define REG_TS_EVENT_5_SUB_NANOSEC 0x0598
+
+#define REG_TS_EVENT_6_NANOSEC 0x059C
+#define REG_TS_EVENT_6_SEC 0x05A0
+#define REG_TS_EVENT_6_SUB_NANOSEC 0x05A4
+
+#define REG_TS_EVENT_7_NANOSEC 0x05A8
+#define REG_TS_EVENT_7_SEC 0x05AC
+#define REG_TS_EVENT_7_SUB_NANOSEC 0x05B0
+
+#define TS_EVENT_EDGE_M 0x1
+#define TS_EVENT_EDGE_S 30
+#define TS_EVENT_NANOSEC_M (BIT(30) - 1)
+
+#define TS_EVENT_SUB_NANOSEC_M 0x7
+
+#define TS_EVENT_SAMPLE \
+ (REG_TS_EVENT_1_NANOSEC - REG_TS_EVENT_0_NANOSEC)
+
+#define PORT_CTRL_ADDR(port, addr) ((addr) | (((port) + 1) << 12))
+
+#define REG_GLOBAL_RR_INDEX__1 0x0600
+
+/* DLR */
+#define REG_DLR_SRC_PORT__4 0x0604
+
+#define DLR_SRC_PORT_UNICAST BIT(31)
+#define DLR_SRC_PORT_M 0x3
+#define DLR_SRC_PORT_BOTH 0
+#define DLR_SRC_PORT_EACH 1
+
+#define REG_DLR_IP_ADDR__4 0x0608
+
+#define REG_DLR_CTRL__1 0x0610
+
+#define DLR_RESET_SEQ_ID BIT(3)
+#define DLR_BACKUP_AUTO_ON BIT(2)
+#define DLR_BEACON_TX_ENABLE BIT(1)
+#define DLR_ASSIST_ENABLE BIT(0)
+
+#define REG_DLR_STATE__1 0x0611
+
+#define DLR_NODE_STATE_M 0x3
+#define DLR_NODE_STATE_S 1
+#define DLR_NODE_STATE_IDLE 0
+#define DLR_NODE_STATE_FAULT 1
+#define DLR_NODE_STATE_NORMAL 2
+#define DLR_RING_STATE_FAULT 0
+#define DLR_RING_STATE_NORMAL 1
+
+#define REG_DLR_PRECEDENCE__1 0x0612
+
+#define REG_DLR_BEACON_INTERVAL__4 0x0614
+
+#define REG_DLR_BEACON_TIMEOUT__4 0x0618
+
+#define REG_DLR_TIMEOUT_WINDOW__4 0x061C
+
+#define DLR_TIMEOUT_WINDOW_M (BIT(22) - 1)
+
+#define REG_DLR_VLAN_ID__2 0x0620
+
+#define DLR_VLAN_ID_M (BIT(12) - 1)
+
+#define REG_DLR_DEST_ADDR_0 0x0622
+#define REG_DLR_DEST_ADDR_1 0x0623
+#define REG_DLR_DEST_ADDR_2 0x0624
+#define REG_DLR_DEST_ADDR_3 0x0625
+#define REG_DLR_DEST_ADDR_4 0x0626
+#define REG_DLR_DEST_ADDR_5 0x0627
+
+#define REG_DLR_PORT_MAP__4 0x0628
+
+#define REG_DLR_CLASS__1 0x062C
+
+#define DLR_FRAME_QID_M 0x3
+
+/* HSR */
+#define REG_HSR_PORT_MAP__4 0x0640
+
+#define REG_HSR_ALU_CTRL_0__1 0x0644
+
+#define HSR_DUPLICATE_DISCARD BIT(7)
+#define HSR_NODE_UNICAST BIT(6)
+#define HSR_AGE_CNT_DEFAULT_M 0x7
+#define HSR_AGE_CNT_DEFAULT_S 3
+#define HSR_LEARN_MCAST_DISABLE BIT(2)
+#define HSR_HASH_OPTION_M 0x3
+#define HSR_HASH_DISABLE 0
+#define HSR_HASH_UPPER_BITS 1
+#define HSR_HASH_LOWER_BITS 2
+#define HSR_HASH_XOR_BOTH_BITS 3
+
+#define REG_HSR_ALU_CTRL_1__1 0x0645
+
+#define HSR_LEARN_UCAST_DISABLE BIT(7)
+#define HSR_FLUSH_TABLE BIT(5)
+#define HSR_PROC_MCAST_SRC BIT(3)
+#define HSR_AGING_ENABLE BIT(2)
+
+#define REG_HSR_ALU_CTRL_2__2 0x0646
+
+#define REG_HSR_ALU_AGE_PERIOD__4 0x0648
+
+#define REG_HSR_ALU_INT_STATUS__1 0x064C
+#define REG_HSR_ALU_INT_MASK__1 0x064D
+
+#define HSR_WINDOW_OVERFLOW_INT BIT(3)
+#define HSR_LEARN_FAIL_INT BIT(2)
+#define HSR_ALMOST_FULL_INT BIT(1)
+#define HSR_WRITE_FAIL_INT BIT(0)
+
+#define REG_HSR_ALU_ENTRY_0__2 0x0650
+
+#define HSR_ENTRY_INDEX_M (BIT(10) - 1)
+#define HSR_FAIL_INDEX_M (BIT(8) - 1)
+
+#define REG_HSR_ALU_ENTRY_1__2 0x0652
+
+#define HSR_FAIL_LEARN_INDEX_M (BIT(8) - 1)
+
+#define REG_HSR_ALU_ENTRY_3__2 0x0654
+
+#define HSR_CPU_ACCESS_ENTRY_INDEX_M (BIT(8) - 1)
+
+/* 0 - Operation */
+#define REG_PORT_DEFAULT_VID 0x0000
+
+#define REG_PORT_CUSTOM_VID 0x0002
+#define REG_PORT_AVB_SR_1_VID 0x0004
+#define REG_PORT_AVB_SR_2_VID 0x0006
+
+#define REG_PORT_AVB_SR_1_TYPE 0x0008
+#define REG_PORT_AVB_SR_2_TYPE 0x000A
+
+#define REG_PORT_PME_STATUS 0x0013
+#define REG_PORT_PME_CTRL 0x0017
+
+#define PME_WOL_MAGICPKT BIT(2)
+#define PME_WOL_LINKUP BIT(1)
+#define PME_WOL_ENERGY BIT(0)
+
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_SGMII_INT BIT(3)
+#define PORT_PTP_INT BIT(2)
+#define PORT_PHY_INT BIT(1)
+#define PORT_ACL_INT BIT(0)
+
+#define PORT_INT_MASK \
+ (PORT_SGMII_INT | PORT_PTP_INT | PORT_PHY_INT | PORT_ACL_INT)
+
+#define REG_PORT_CTRL_0 0x0020
+
+#define PORT_MAC_LOOPBACK BIT(7)
+#define PORT_FORCE_TX_FLOW_CTRL BIT(4)
+#define PORT_FORCE_RX_FLOW_CTRL BIT(3)
+#define PORT_TAIL_TAG_ENABLE BIT(2)
+#define PORT_QUEUE_SPLIT_ENABLE 0x3
+
+#define REG_PORT_CTRL_1 0x0021
+
+#define PORT_SRP_ENABLE 0x3
+
+#define REG_PORT_STATUS_0 0x0030
+
+#define PORT_INTF_SPEED_M 0x3
+#define PORT_INTF_SPEED_S 3
+#define PORT_INTF_FULL_DUPLEX BIT(2)
+#define PORT_TX_FLOW_CTRL BIT(1)
+#define PORT_RX_FLOW_CTRL BIT(0)
+
+#define REG_PORT_STATUS_1 0x0034
+
+/* 1 - PHY */
+#define REG_PORT_PHY_CTRL 0x0100
+
+#define PORT_PHY_RESET BIT(15)
+#define PORT_PHY_LOOPBACK BIT(14)
+#define PORT_SPEED_100MBIT BIT(13)
+#define PORT_AUTO_NEG_ENABLE BIT(12)
+#define PORT_POWER_DOWN BIT(11)
+#define PORT_ISOLATE BIT(10)
+#define PORT_AUTO_NEG_RESTART BIT(9)
+#define PORT_FULL_DUPLEX BIT(8)
+#define PORT_COLLISION_TEST BIT(7)
+#define PORT_SPEED_1000MBIT BIT(6)
+
+#define REG_PORT_PHY_STATUS 0x0102
+
+#define PORT_100BT4_CAPABLE BIT(15)
+#define PORT_100BTX_FD_CAPABLE BIT(14)
+#define PORT_100BTX_CAPABLE BIT(13)
+#define PORT_10BT_FD_CAPABLE BIT(12)
+#define PORT_10BT_CAPABLE BIT(11)
+#define PORT_EXTENDED_STATUS BIT(8)
+#define PORT_MII_SUPPRESS_CAPABLE BIT(6)
+#define PORT_AUTO_NEG_ACKNOWLEDGE BIT(5)
+#define PORT_REMOTE_FAULT BIT(4)
+#define PORT_AUTO_NEG_CAPABLE BIT(3)
+#define PORT_LINK_STATUS BIT(2)
+#define PORT_JABBER_DETECT BIT(1)
+#define PORT_EXTENDED_CAPABILITY BIT(0)
+
+#define REG_PORT_PHY_ID_HI 0x0104
+#define REG_PORT_PHY_ID_LO 0x0106
+
+#define KSZ9477_ID_HI 0x0022
+#define KSZ9477_ID_LO 0x1622
+
+#define REG_PORT_PHY_AUTO_NEGOTIATION 0x0108
+
+#define PORT_AUTO_NEG_NEXT_PAGE BIT(15)
+#define PORT_AUTO_NEG_REMOTE_FAULT BIT(13)
+#define PORT_AUTO_NEG_ASYM_PAUSE BIT(11)
+#define PORT_AUTO_NEG_SYM_PAUSE BIT(10)
+#define PORT_AUTO_NEG_100BT4 BIT(9)
+#define PORT_AUTO_NEG_100BTX_FD BIT(8)
+#define PORT_AUTO_NEG_100BTX BIT(7)
+#define PORT_AUTO_NEG_10BT_FD BIT(6)
+#define PORT_AUTO_NEG_10BT BIT(5)
+#define PORT_AUTO_NEG_SELECTOR 0x001F
+#define PORT_AUTO_NEG_802_3 0x0001
+
+#define PORT_AUTO_NEG_PAUSE \
+ (PORT_AUTO_NEG_ASYM_PAUSE | PORT_AUTO_NEG_SYM_PAUSE)
+
+#define REG_PORT_PHY_REMOTE_CAPABILITY 0x010A
+
+#define PORT_REMOTE_NEXT_PAGE BIT(15)
+#define PORT_REMOTE_ACKNOWLEDGE BIT(14)
+#define PORT_REMOTE_REMOTE_FAULT BIT(13)
+#define PORT_REMOTE_ASYM_PAUSE BIT(11)
+#define PORT_REMOTE_SYM_PAUSE BIT(10)
+#define PORT_REMOTE_100BTX_FD BIT(8)
+#define PORT_REMOTE_100BTX BIT(7)
+#define PORT_REMOTE_10BT_FD BIT(6)
+#define PORT_REMOTE_10BT BIT(5)
+
+#define REG_PORT_PHY_1000_CTRL 0x0112
+
+#define PORT_AUTO_NEG_MANUAL BIT(12)
+#define PORT_AUTO_NEG_MASTER BIT(11)
+#define PORT_AUTO_NEG_MASTER_PREFERRED BIT(10)
+#define PORT_AUTO_NEG_1000BT_FD BIT(9)
+#define PORT_AUTO_NEG_1000BT BIT(8)
+
+#define REG_PORT_PHY_1000_STATUS 0x0114
+
+#define PORT_MASTER_FAULT BIT(15)
+#define PORT_LOCAL_MASTER BIT(14)
+#define PORT_LOCAL_RX_OK BIT(13)
+#define PORT_REMOTE_RX_OK BIT(12)
+#define PORT_REMOTE_1000BT_FD BIT(11)
+#define PORT_REMOTE_1000BT BIT(10)
+#define PORT_REMOTE_IDLE_CNT_M 0x0F
+
+#define PORT_PHY_1000_STATIC_STATUS \
+ (PORT_LOCAL_RX_OK | \
+ PORT_REMOTE_RX_OK | \
+ PORT_REMOTE_1000BT_FD | \
+ PORT_REMOTE_1000BT)
+
+#define REG_PORT_PHY_MMD_SETUP 0x011A
+
+#define PORT_MMD_OP_MODE_M 0x3
+#define PORT_MMD_OP_MODE_S 14
+#define PORT_MMD_OP_INDEX 0
+#define PORT_MMD_OP_DATA_NO_INCR 1
+#define PORT_MMD_OP_DATA_INCR_RW 2
+#define PORT_MMD_OP_DATA_INCR_W 3
+#define PORT_MMD_DEVICE_ID_M 0x1F
+
+#define MMD_SETUP(mode, dev) \
+ (((u16)(mode) << PORT_MMD_OP_MODE_S) | (dev))
+
+#define REG_PORT_PHY_MMD_INDEX_DATA 0x011C
+
+#define MMD_DEVICE_ID_DSP 1
+
+#define MMD_DSP_SQI_CHAN_A 0xAC
+#define MMD_DSP_SQI_CHAN_B 0xAD
+#define MMD_DSP_SQI_CHAN_C 0xAE
+#define MMD_DSP_SQI_CHAN_D 0xAF
+
+#define DSP_SQI_ERR_DETECTED BIT(15)
+#define DSP_SQI_AVG_ERR 0x7FFF
+
+#define MMD_DEVICE_ID_COMMON 2
+
+#define MMD_DEVICE_ID_EEE_ADV 7
+
+#define MMD_EEE_ADV 0x3C
+#define EEE_ADV_100MBIT BIT(1)
+#define EEE_ADV_1GBIT BIT(2)
+
+#define MMD_EEE_LP_ADV 0x3D
+#define MMD_EEE_MSG_CODE 0x3F
+
+#define MMD_DEVICE_ID_AFED 0x1C
+
+#define REG_PORT_PHY_EXTENDED_STATUS 0x011E
+
+#define PORT_100BTX_FD_ABLE BIT(15)
+#define PORT_100BTX_ABLE BIT(14)
+#define PORT_10BT_FD_ABLE BIT(13)
+#define PORT_10BT_ABLE BIT(12)
+
+#define REG_PORT_SGMII_ADDR__4 0x0200
+#define PORT_SGMII_AUTO_INCR BIT(23)
+#define PORT_SGMII_DEVICE_ID_M 0x1F
+#define PORT_SGMII_DEVICE_ID_S 16
+#define PORT_SGMII_ADDR_M (BIT(21) - 1)
+
+#define REG_PORT_SGMII_DATA__4 0x0204
+#define PORT_SGMII_DATA_M (BIT(16) - 1)
+
+#define MMD_DEVICE_ID_PMA 0x01
+#define MMD_DEVICE_ID_PCS 0x03
+#define MMD_DEVICE_ID_PHY_XS 0x04
+#define MMD_DEVICE_ID_DTE_XS 0x05
+#define MMD_DEVICE_ID_AN 0x07
+#define MMD_DEVICE_ID_VENDOR_CTRL 0x1E
+#define MMD_DEVICE_ID_VENDOR_MII 0x1F
+
+#define SR_MII MMD_DEVICE_ID_VENDOR_MII
+
+#define MMD_SR_MII_CTRL 0x0000
+
+#define SR_MII_RESET BIT(15)
+#define SR_MII_LOOPBACK BIT(14)
+#define SR_MII_SPEED_100MBIT BIT(13)
+#define SR_MII_AUTO_NEG_ENABLE BIT(12)
+#define SR_MII_POWER_DOWN BIT(11)
+#define SR_MII_AUTO_NEG_RESTART BIT(9)
+#define SR_MII_FULL_DUPLEX BIT(8)
+#define SR_MII_SPEED_1000MBIT BIT(6)
+
+#define MMD_SR_MII_STATUS 0x0001
+#define MMD_SR_MII_ID_1 0x0002
+#define MMD_SR_MII_ID_2 0x0003
+#define MMD_SR_MII_AUTO_NEGOTIATION 0x0004
+
+#define SR_MII_AUTO_NEG_NEXT_PAGE BIT(15)
+#define SR_MII_AUTO_NEG_REMOTE_FAULT_M 0x3
+#define SR_MII_AUTO_NEG_REMOTE_FAULT_S 12
+#define SR_MII_AUTO_NEG_NO_ERROR 0
+#define SR_MII_AUTO_NEG_OFFLINE 1
+#define SR_MII_AUTO_NEG_LINK_FAILURE 2
+#define SR_MII_AUTO_NEG_ERROR 3
+#define SR_MII_AUTO_NEG_PAUSE_M 0x3
+#define SR_MII_AUTO_NEG_PAUSE_S 7
+#define SR_MII_AUTO_NEG_NO_PAUSE 0
+#define SR_MII_AUTO_NEG_ASYM_PAUSE_TX 1
+#define SR_MII_AUTO_NEG_SYM_PAUSE 2
+#define SR_MII_AUTO_NEG_ASYM_PAUSE_RX 3
+#define SR_MII_AUTO_NEG_HALF_DUPLEX BIT(6)
+#define SR_MII_AUTO_NEG_FULL_DUPLEX BIT(5)
+
+#define MMD_SR_MII_REMOTE_CAPABILITY 0x0005
+#define MMD_SR_MII_AUTO_NEG_EXP 0x0006
+#define MMD_SR_MII_AUTO_NEG_EXT 0x000F
+
+#define MMD_SR_MII_DIGITAL_CTRL_1 0x8000
+
+#define MMD_SR_MII_AUTO_NEG_CTRL 0x8001
+
+#define SR_MII_8_BIT BIT(8)
+#define SR_MII_SGMII_LINK_UP BIT(4)
+#define SR_MII_TX_CFG_PHY_MASTER BIT(3)
+#define SR_MII_PCS_MODE_M 0x3
+#define SR_MII_PCS_MODE_S 1
+#define SR_MII_PCS_SGMII 2
+#define SR_MII_AUTO_NEG_COMPLETE_INTR BIT(0)
+
+#define MMD_SR_MII_AUTO_NEG_STATUS 0x8002
+
+#define SR_MII_STAT_LINK_UP BIT(4)
+#define SR_MII_STAT_M 0x3
+#define SR_MII_STAT_S 2
+#define SR_MII_STAT_10_MBPS 0
+#define SR_MII_STAT_100_MBPS 1
+#define SR_MII_STAT_1000_MBPS 2
+#define SR_MII_STAT_FULL_DUPLEX BIT(1)
+
+#define MMD_SR_MII_PHY_CTRL 0x80A0
+
+#define SR_MII_PHY_LANE_SEL_M 0xF
+#define SR_MII_PHY_LANE_SEL_S 8
+#define SR_MII_PHY_WRITE BIT(1)
+#define SR_MII_PHY_START_BUSY BIT(0)
+
+#define MMD_SR_MII_PHY_ADDR 0x80A1
+
+#define SR_MII_PHY_ADDR_M (BIT(16) - 1)
+
+#define MMD_SR_MII_PHY_DATA 0x80A2
+
+#define SR_MII_PHY_DATA_M (BIT(16) - 1)
+
+#define SR_MII_PHY_JTAG_CHIP_ID_HI 0x000C
+#define SR_MII_PHY_JTAG_CHIP_ID_LO 0x000D
+
+#define REG_PORT_PHY_REMOTE_LB_LED 0x0122
+
+#define PORT_REMOTE_LOOPBACK BIT(8)
+#define PORT_LED_SELECT (3 << 6)
+#define PORT_LED_CTRL (3 << 4)
+#define PORT_LED_CTRL_TEST BIT(3)
+#define PORT_10BT_PREAMBLE BIT(2)
+#define PORT_LINK_MD_10BT_ENABLE BIT(1)
+#define PORT_LINK_MD_PASS BIT(0)
+
+#define REG_PORT_PHY_LINK_MD 0x0124
+
+#define PORT_START_CABLE_DIAG BIT(15)
+#define PORT_TX_DISABLE BIT(14)
+#define PORT_CABLE_DIAG_PAIR_M 0x3
+#define PORT_CABLE_DIAG_PAIR_S 12
+#define PORT_CABLE_DIAG_SELECT_M 0x3
+#define PORT_CABLE_DIAG_SELECT_S 10
+#define PORT_CABLE_DIAG_RESULT_M 0x3
+#define PORT_CABLE_DIAG_RESULT_S 8
+#define PORT_CABLE_STAT_NORMAL 0
+#define PORT_CABLE_STAT_OPEN 1
+#define PORT_CABLE_STAT_SHORT 2
+#define PORT_CABLE_STAT_FAILED 3
+#define PORT_CABLE_FAULT_COUNTER 0x00FF
+
+#define REG_PORT_PHY_PMA_STATUS 0x0126
+
+#define PORT_1000_LINK_GOOD BIT(1)
+#define PORT_100_LINK_GOOD BIT(0)
+
+#define REG_PORT_PHY_DIGITAL_STATUS 0x0128
+
+#define PORT_LINK_DETECT BIT(14)
+#define PORT_SIGNAL_DETECT BIT(13)
+#define PORT_PHY_STAT_MDI BIT(12)
+#define PORT_PHY_STAT_MASTER BIT(11)
+
+#define REG_PORT_PHY_RXER_COUNTER 0x012A
+
+#define REG_PORT_PHY_INT_ENABLE 0x0136
+#define REG_PORT_PHY_INT_STATUS 0x0137
+
+#define JABBER_INT BIT(7)
+#define RX_ERR_INT BIT(6)
+#define PAGE_RX_INT BIT(5)
+#define PARALLEL_DETECT_FAULT_INT BIT(4)
+#define LINK_PARTNER_ACK_INT BIT(3)
+#define LINK_DOWN_INT BIT(2)
+#define REMOTE_FAULT_INT BIT(1)
+#define LINK_UP_INT BIT(0)
+
+#define REG_PORT_PHY_DIGITAL_DEBUG_1 0x0138
+
+#define PORT_REG_CLK_SPEED_25_MHZ BIT(14)
+#define PORT_PHY_FORCE_MDI BIT(7)
+#define PORT_PHY_AUTO_MDIX_DISABLE BIT(6)
+
+/* Same as PORT_PHY_LOOPBACK */
+#define PORT_PHY_PCS_LOOPBACK BIT(0)
+
+#define REG_PORT_PHY_DIGITAL_DEBUG_2 0x013A
+
+#define REG_PORT_PHY_DIGITAL_DEBUG_3 0x013C
+
+#define PORT_100BT_FIXED_LATENCY BIT(15)
+
+#define REG_PORT_PHY_PHY_CTRL 0x013E
+
+#define PORT_INT_PIN_HIGH BIT(14)
+#define PORT_ENABLE_JABBER BIT(9)
+#define PORT_STAT_SPEED_1000MBIT BIT(6)
+#define PORT_STAT_SPEED_100MBIT BIT(5)
+#define PORT_STAT_SPEED_10MBIT BIT(4)
+#define PORT_STAT_FULL_DUPLEX BIT(3)
+
+/* Same as PORT_PHY_STAT_MASTER */
+#define PORT_STAT_MASTER BIT(2)
+#define PORT_RESET BIT(1)
+#define PORT_LINK_STATUS_FAIL BIT(0)
+
+/* 3 - xMII */
+#define PORT_SGMII_SEL BIT(7)
+#define PORT_GRXC_ENABLE BIT(0)
+
+#define PORT_RMII_CLK_SEL BIT(7)
+#define PORT_MII_SEL_EDGE BIT(5)
+
+/* 4 - MAC */
+#define REG_PORT_MAC_CTRL_0 0x0400
+
+#define PORT_BROADCAST_STORM BIT(1)
+#define PORT_JUMBO_FRAME BIT(0)
+
+#define REG_PORT_MAC_CTRL_1 0x0401
+
+#define PORT_BACK_PRESSURE BIT(3)
+#define PORT_PASS_ALL BIT(0)
+
+#define REG_PORT_MAC_CTRL_2 0x0402
+
+#define PORT_100BT_EEE_DISABLE BIT(7)
+#define PORT_1000BT_EEE_DISABLE BIT(6)
+
+#define REG_PORT_MAC_IN_RATE_LIMIT 0x0403
+
+#define PORT_IN_PORT_BASED_S 6
+#define PORT_RATE_PACKET_BASED_S 5
+#define PORT_IN_FLOW_CTRL_S 4
+#define PORT_COUNT_IFG_S 1
+#define PORT_COUNT_PREAMBLE_S 0
+#define PORT_IN_PORT_BASED BIT(6)
+#define PORT_IN_PACKET_BASED BIT(5)
+#define PORT_IN_FLOW_CTRL BIT(4)
+#define PORT_IN_LIMIT_MODE_M 0x3
+#define PORT_IN_LIMIT_MODE_S 2
+#define PORT_IN_ALL 0
+#define PORT_IN_UNICAST 1
+#define PORT_IN_MULTICAST 2
+#define PORT_IN_BROADCAST 3
+#define PORT_COUNT_IFG BIT(1)
+#define PORT_COUNT_PREAMBLE BIT(0)
+
+#define REG_PORT_IN_RATE_0 0x0410
+#define REG_PORT_IN_RATE_1 0x0411
+#define REG_PORT_IN_RATE_2 0x0412
+#define REG_PORT_IN_RATE_3 0x0413
+#define REG_PORT_IN_RATE_4 0x0414
+#define REG_PORT_IN_RATE_5 0x0415
+#define REG_PORT_IN_RATE_6 0x0416
+#define REG_PORT_IN_RATE_7 0x0417
+
+#define REG_PORT_OUT_RATE_0 0x0420
+#define REG_PORT_OUT_RATE_1 0x0421
+#define REG_PORT_OUT_RATE_2 0x0422
+#define REG_PORT_OUT_RATE_3 0x0423
+
+#define PORT_RATE_LIMIT_M (BIT(7) - 1)
+
+/* 5 - MIB Counters */
+#define REG_PORT_MIB_CTRL_STAT__4 0x0500
+
+#define MIB_COUNTER_READ BIT(25)
+#define MIB_COUNTER_FLUSH_FREEZE BIT(24)
+#define MIB_COUNTER_INDEX_M (BIT(8) - 1)
+#define MIB_COUNTER_INDEX_S 16
+#define MIB_COUNTER_DATA_HI_M 0xF
+
+#define REG_PORT_MIB_DATA 0x0504
+
+/* 6 - ACL */
+#define REG_PORT_ACL_0 0x0600
+
+#define ACL_FIRST_RULE_M 0xF
+
+#define REG_PORT_ACL_1 0x0601
+
+#define ACL_MODE_M 0x3
+#define ACL_MODE_S 4
+#define ACL_MODE_DISABLE 0
+#define ACL_MODE_LAYER_2 1
+#define ACL_MODE_LAYER_3 2
+#define ACL_MODE_LAYER_4 3
+#define ACL_ENABLE_M 0x3
+#define ACL_ENABLE_S 2
+#define ACL_ENABLE_2_COUNT 0
+#define ACL_ENABLE_2_TYPE 1
+#define ACL_ENABLE_2_MAC 2
+#define ACL_ENABLE_2_BOTH 3
+#define ACL_ENABLE_3_IP 1
+#define ACL_ENABLE_3_SRC_DST_COMP 2
+#define ACL_ENABLE_4_PROTOCOL 0
+#define ACL_ENABLE_4_TCP_PORT_COMP 1
+#define ACL_ENABLE_4_UDP_PORT_COMP 2
+#define ACL_ENABLE_4_TCP_SEQN_COMP 3
+#define ACL_SRC BIT(1)
+#define ACL_EQUAL BIT(0)
+
+#define REG_PORT_ACL_2 0x0602
+#define REG_PORT_ACL_3 0x0603
+
+#define ACL_MAX_PORT 0xFFFF
+
+#define REG_PORT_ACL_4 0x0604
+#define REG_PORT_ACL_5 0x0605
+
+#define ACL_MIN_PORT 0xFFFF
+#define ACL_IP_ADDR 0xFFFFFFFF
+#define ACL_TCP_SEQNUM 0xFFFFFFFF
+
+#define REG_PORT_ACL_6 0x0606
+
+#define ACL_RESERVED 0xF8
+#define ACL_PORT_MODE_M 0x3
+#define ACL_PORT_MODE_S 1
+#define ACL_PORT_MODE_DISABLE 0
+#define ACL_PORT_MODE_EITHER 1
+#define ACL_PORT_MODE_IN_RANGE 2
+#define ACL_PORT_MODE_OUT_OF_RANGE 3
+
+#define REG_PORT_ACL_7 0x0607
+
+#define ACL_TCP_FLAG_ENABLE BIT(0)
+
+#define REG_PORT_ACL_8 0x0608
+
+#define ACL_TCP_FLAG_M 0xFF
+
+#define REG_PORT_ACL_9 0x0609
+
+#define ACL_TCP_FLAG 0xFF
+#define ACL_ETH_TYPE 0xFFFF
+#define ACL_IP_M 0xFFFFFFFF
+
+#define REG_PORT_ACL_A 0x060A
+
+#define ACL_PRIO_MODE_M 0x3
+#define ACL_PRIO_MODE_S 6
+#define ACL_PRIO_MODE_DISABLE 0
+#define ACL_PRIO_MODE_HIGHER 1
+#define ACL_PRIO_MODE_LOWER 2
+#define ACL_PRIO_MODE_REPLACE 3
+#define ACL_PRIO_M KS_PRIO_M
+#define ACL_PRIO_S 3
+#define ACL_VLAN_PRIO_REPLACE BIT(2)
+#define ACL_VLAN_PRIO_M KS_PRIO_M
+#define ACL_VLAN_PRIO_HI_M 0x3
+
+#define REG_PORT_ACL_B 0x060B
+
+#define ACL_VLAN_PRIO_LO_M 0x8
+#define ACL_VLAN_PRIO_S 7
+#define ACL_MAP_MODE_M 0x3
+#define ACL_MAP_MODE_S 5
+#define ACL_MAP_MODE_DISABLE 0
+#define ACL_MAP_MODE_OR 1
+#define ACL_MAP_MODE_AND 2
+#define ACL_MAP_MODE_REPLACE 3
+
+#define ACL_CNT_M (BIT(11) - 1)
+#define ACL_CNT_S 5
+
+#define REG_PORT_ACL_C 0x060C
+
+#define REG_PORT_ACL_D 0x060D
+#define ACL_MSEC_UNIT BIT(6)
+#define ACL_INTR_MODE BIT(5)
+#define ACL_PORT_MAP 0x7F
+
+#define REG_PORT_ACL_E 0x060E
+#define REG_PORT_ACL_F 0x060F
+
+#define REG_PORT_ACL_BYTE_EN_MSB 0x0610
+#define REG_PORT_ACL_BYTE_EN_LSB 0x0611
+
+#define ACL_ACTION_START 0xA
+#define ACL_ACTION_LEN 4
+#define ACL_INTR_CNT_START 0xD
+#define ACL_RULESET_START 0xE
+#define ACL_RULESET_LEN 2
+#define ACL_TABLE_LEN 16
+
+#define ACL_ACTION_ENABLE 0x003C
+#define ACL_MATCH_ENABLE 0x7FC3
+#define ACL_RULESET_ENABLE 0x8003
+#define ACL_BYTE_ENABLE 0xFFFF
+
+#define REG_PORT_ACL_CTRL_0 0x0612
+
+#define PORT_ACL_WRITE_DONE BIT(6)
+#define PORT_ACL_READ_DONE BIT(5)
+#define PORT_ACL_WRITE BIT(4)
+#define PORT_ACL_INDEX_M 0xF
+
+#define REG_PORT_ACL_CTRL_1 0x0613
+
+/* 8 - Classification and Policing */
+#define REG_PORT_MRI_MIRROR_CTRL 0x0800
+
+#define PORT_MIRROR_RX BIT(6)
+#define PORT_MIRROR_TX BIT(5)
+#define PORT_MIRROR_SNIFFER BIT(1)
+
+#define REG_PORT_MRI_PRIO_CTRL 0x0801
+
+#define PORT_HIGHEST_PRIO BIT(7)
+#define PORT_OR_PRIO BIT(6)
+#define PORT_MAC_PRIO_ENABLE BIT(4)
+#define PORT_VLAN_PRIO_ENABLE BIT(3)
+#define PORT_802_1P_PRIO_ENABLE BIT(2)
+#define PORT_DIFFSERV_PRIO_ENABLE BIT(1)
+#define PORT_ACL_PRIO_ENABLE BIT(0)
+
+#define REG_PORT_MRI_MAC_CTRL 0x0802
+
+#define PORT_USER_PRIO_CEILING BIT(7)
+#define PORT_DROP_NON_VLAN BIT(4)
+#define PORT_DROP_TAG BIT(3)
+#define PORT_BASED_PRIO_M KS_PRIO_M
+#define PORT_BASED_PRIO_S 0
+
+#define REG_PORT_MRI_AUTHEN_CTRL 0x0803
+
+#define PORT_ACL_ENABLE BIT(2)
+#define PORT_AUTHEN_MODE 0x3
+#define PORT_AUTHEN_PASS 0
+#define PORT_AUTHEN_BLOCK 1
+#define PORT_AUTHEN_TRAP 2
+
+#define REG_PORT_MRI_INDEX__4 0x0804
+
+#define MRI_INDEX_P_M 0x7
+#define MRI_INDEX_P_S 16
+#define MRI_INDEX_Q_M 0x3
+#define MRI_INDEX_Q_S 0
+
+#define REG_PORT_MRI_TC_MAP__4 0x0808
+
+#define PORT_TC_MAP_M 0xf
+#define PORT_TC_MAP_S 4
+
+#define REG_PORT_MRI_POLICE_CTRL__4 0x080C
+
+#define POLICE_DROP_ALL BIT(10)
+#define POLICE_PACKET_TYPE_M 0x3
+#define POLICE_PACKET_TYPE_S 8
+#define POLICE_PACKET_DROPPED 0
+#define POLICE_PACKET_GREEN 1
+#define POLICE_PACKET_YELLOW 2
+#define POLICE_PACKET_RED 3
+#define PORT_BASED_POLICING BIT(7)
+#define NON_DSCP_COLOR_M 0x3
+#define NON_DSCP_COLOR_S 5
+#define COLOR_MARK_ENABLE BIT(4)
+#define COLOR_REMAP_ENABLE BIT(3)
+#define POLICE_DROP_SRP BIT(2)
+#define POLICE_COLOR_NOT_AWARE BIT(1)
+#define POLICE_ENABLE BIT(0)
+
+#define REG_PORT_POLICE_COLOR_0__4 0x0810
+#define REG_PORT_POLICE_COLOR_1__4 0x0814
+#define REG_PORT_POLICE_COLOR_2__4 0x0818
+#define REG_PORT_POLICE_COLOR_3__4 0x081C
+
+#define POLICE_COLOR_MAP_S 2
+#define POLICE_COLOR_MAP_M (BIT(POLICE_COLOR_MAP_S) - 1)
+
+#define REG_PORT_POLICE_RATE__4 0x0820
+
+#define POLICE_CIR_S 16
+#define POLICE_PIR_S 0
+
+#define REG_PORT_POLICE_BURST_SIZE__4 0x0824
+
+#define POLICE_BURST_SIZE_M 0x3FFF
+#define POLICE_CBS_S 16
+#define POLICE_PBS_S 0
+
+#define REG_PORT_WRED_PM_CTRL_0__4 0x0830
+
+#define WRED_PM_CTRL_M (BIT(11) - 1)
+
+#define WRED_PM_MAX_THRESHOLD_S 16
+#define WRED_PM_MIN_THRESHOLD_S 0
+
+#define REG_PORT_WRED_PM_CTRL_1__4 0x0834
+
+#define WRED_PM_MULTIPLIER_S 16
+#define WRED_PM_AVG_QUEUE_SIZE_S 0
+
+#define REG_PORT_WRED_QUEUE_CTRL_0__4 0x0840
+#define REG_PORT_WRED_QUEUE_CTRL_1__4 0x0844
+
+#define REG_PORT_WRED_QUEUE_PMON__4 0x0848
+
+#define WRED_RANDOM_DROP_ENABLE BIT(31)
+#define WRED_PMON_FLUSH BIT(30)
+#define WRED_DROP_GYR_DISABLE BIT(29)
+#define WRED_DROP_YR_DISABLE BIT(28)
+#define WRED_DROP_R_DISABLE BIT(27)
+#define WRED_DROP_ALL BIT(26)
+#define WRED_PMON_M (BIT(24) - 1)
+
+/* 9 - Shaping */
+
+#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900
+
+#define REG_PORT_MTI_QUEUE_CTRL_0__4 0x0904
+
+#define MTI_PVID_REPLACE BIT(0)
+
+#define REG_PORT_MTI_QUEUE_CTRL_0 0x0914
+
+#define MTI_SCHEDULE_MODE_M 0x3
+#define MTI_SCHEDULE_MODE_S 6
+#define MTI_SCHEDULE_STRICT_PRIO 0
+#define MTI_SCHEDULE_WRR 2
+#define MTI_SHAPING_M 0x3
+#define MTI_SHAPING_S 4
+#define MTI_SHAPING_OFF 0
+#define MTI_SHAPING_SRP 1
+#define MTI_SHAPING_TIME_AWARE 2
+
+#define REG_PORT_MTI_QUEUE_CTRL_1 0x0915
+
+#define MTI_TX_RATIO_M (BIT(7) - 1)
+
+#define REG_PORT_MTI_QUEUE_CTRL_2__2 0x0916
+#define REG_PORT_MTI_HI_WATER_MARK 0x0916
+#define REG_PORT_MTI_QUEUE_CTRL_3__2 0x0918
+#define REG_PORT_MTI_LO_WATER_MARK 0x0918
+#define REG_PORT_MTI_QUEUE_CTRL_4__2 0x091A
+#define REG_PORT_MTI_CREDIT_INCREMENT 0x091A
+
+/* A - QM */
+
+#define REG_PORT_QM_CTRL__4 0x0A00
+
+#define PORT_QM_DROP_PRIO_M 0x3
+
+#define REG_PORT_VLAN_MEMBERSHIP__4 0x0A04
+
+#define REG_PORT_QM_QUEUE_INDEX__4 0x0A08
+
+#define PORT_QM_QUEUE_INDEX_S 24
+#define PORT_QM_BURST_SIZE_S 16
+#define PORT_QM_MIN_RESV_SPACE_M (BIT(11) - 1)
+
+#define REG_PORT_QM_WATER_MARK__4 0x0A0C
+
+#define PORT_QM_HI_WATER_MARK_S 16
+#define PORT_QM_LO_WATER_MARK_S 0
+#define PORT_QM_WATER_MARK_M (BIT(11) - 1)
+
+#define REG_PORT_QM_TX_CNT_0__4 0x0A10
+
+#define PORT_QM_TX_CNT_USED_S 0
+#define PORT_QM_TX_CNT_M (BIT(11) - 1)
+
+#define REG_PORT_QM_TX_CNT_1__4 0x0A14
+
+#define PORT_QM_TX_CNT_CALCULATED_S 16
+#define PORT_QM_TX_CNT_AVAIL_S 0
+
+/* B - LUE */
+#define REG_PORT_LUE_CTRL 0x0B00
+
+#define PORT_VLAN_LOOKUP_VID_0 BIT(7)
+#define PORT_INGRESS_FILTER BIT(6)
+#define PORT_DISCARD_NON_VID BIT(5)
+#define PORT_MAC_BASED_802_1X BIT(4)
+#define PORT_SRC_ADDR_FILTER BIT(3)
+
+#define REG_PORT_LUE_MSTP_INDEX 0x0B01
+
+#define REG_PORT_LUE_MSTP_STATE 0x0B04
+
+/* C - PTP */
+
+#define REG_PTP_PORT_RX_DELAY__2 0x0C00
+#define REG_PTP_PORT_TX_DELAY__2 0x0C02
+#define REG_PTP_PORT_ASYM_DELAY__2 0x0C04
+
+#define REG_PTP_PORT_XDELAY_TS 0x0C08
+#define REG_PTP_PORT_XDELAY_TS_H 0x0C08
+#define REG_PTP_PORT_XDELAY_TS_L 0x0C0A
+
+#define REG_PTP_PORT_SYNC_TS 0x0C0C
+#define REG_PTP_PORT_SYNC_TS_H 0x0C0C
+#define REG_PTP_PORT_SYNC_TS_L 0x0C0E
+
+#define REG_PTP_PORT_PDRESP_TS 0x0C10
+#define REG_PTP_PORT_PDRESP_TS_H 0x0C10
+#define REG_PTP_PORT_PDRESP_TS_L 0x0C12
+
+#define REG_PTP_PORT_TX_INT_STATUS__2 0x0C14
+#define REG_PTP_PORT_TX_INT_ENABLE__2 0x0C16
+
+#define PTP_PORT_SYNC_INT BIT(15)
+#define PTP_PORT_XDELAY_REQ_INT BIT(14)
+#define PTP_PORT_PDELAY_RESP_INT BIT(13)
+
+#define REG_PTP_PORT_LINK_DELAY__4 0x0C18
+
+#define PRIO_QUEUES 4
+#define RX_PRIO_QUEUES 8
+
+#define KS_PRIO_IN_REG 2
+
+#define TOTAL_PORT_NUM 7
+
+#define KSZ9477_COUNTER_NUM 0x20
+#define TOTAL_KSZ9477_COUNTER_NUM (KSZ9477_COUNTER_NUM + 2 + 2)
+
+#define SWITCH_COUNTER_NUM KSZ9477_COUNTER_NUM
+#define TOTAL_SWITCH_COUNTER_NUM TOTAL_KSZ9477_COUNTER_NUM
+
+#define P_BCAST_STORM_CTRL REG_PORT_MAC_CTRL_0
+#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
+#define P_MIRROR_CTRL REG_PORT_MRI_MIRROR_CTRL
+#define P_PHY_CTRL REG_PORT_PHY_CTRL
+#define P_RATE_LIMIT_CTRL REG_PORT_MAC_IN_RATE_LIMIT
+
+#define S_LINK_AGING_CTRL REG_SW_LUE_CTRL_1
+#define S_MIRROR_CTRL REG_SW_MRI_CTRL_0
+#define S_REPLACE_VID_CTRL REG_SW_MAC_CTRL_2
+#define S_802_1P_PRIO_CTRL REG_SW_MAC_802_1P_MAP_0
+#define S_TOS_PRIO_CTRL REG_SW_MAC_TOS_PRIO_0
+#define S_FLUSH_TABLE_CTRL REG_SW_LUE_CTRL_1
+
+#define SW_FLUSH_DYN_MAC_TABLE SW_FLUSH_MSTP_TABLE
+
+#define MAX_TIMESTAMP_UNIT 2
+#define MAX_TRIG_UNIT 3
+#define MAX_TIMESTAMP_EVENT_UNIT 8
+#define MAX_GPIO 4
+
+#define PTP_TRIG_UNIT_M (BIT(MAX_TRIG_UNIT) - 1)
+#define PTP_TS_UNIT_M (BIT(MAX_TIMESTAMP_UNIT) - 1)
+
+#define KSZ9477_MAX_FRAME_SIZE 9000
+
+#endif /* KSZ9477_REGS_H */
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
new file mode 100644
index 000000000..dc9eea3c8
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -0,0 +1,3044 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip switch driver main logic
+ *
+ * Copyright (C) 2017-2019 Microchip Technology Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/microchip-ksz.h>
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_mdio.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/micrel_phy.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "ksz_common.h"
+#include "ksz8.h"
+#include "ksz9477.h"
+#include "lan937x.h"
+
+#define MIB_COUNTER_NUM 0x20
+
+struct ksz_stats_raw {
+ u64 rx_hi;
+ u64 rx_undersize;
+ u64 rx_fragments;
+ u64 rx_oversize;
+ u64 rx_jabbers;
+ u64 rx_symbol_err;
+ u64 rx_crc_err;
+ u64 rx_align_err;
+ u64 rx_mac_ctrl;
+ u64 rx_pause;
+ u64 rx_bcast;
+ u64 rx_mcast;
+ u64 rx_ucast;
+ u64 rx_64_or_less;
+ u64 rx_65_127;
+ u64 rx_128_255;
+ u64 rx_256_511;
+ u64 rx_512_1023;
+ u64 rx_1024_1522;
+ u64 rx_1523_2000;
+ u64 rx_2001;
+ u64 tx_hi;
+ u64 tx_late_col;
+ u64 tx_pause;
+ u64 tx_bcast;
+ u64 tx_mcast;
+ u64 tx_ucast;
+ u64 tx_deferred;
+ u64 tx_total_col;
+ u64 tx_exc_col;
+ u64 tx_single_col;
+ u64 tx_mult_col;
+ u64 rx_total;
+ u64 tx_total;
+ u64 rx_discards;
+ u64 tx_discards;
+};
+
+static const struct ksz_mib_names ksz88xx_mib_names[] = {
+ { 0x00, "rx" },
+ { 0x01, "rx_hi" },
+ { 0x02, "rx_undersize" },
+ { 0x03, "rx_fragments" },
+ { 0x04, "rx_oversize" },
+ { 0x05, "rx_jabbers" },
+ { 0x06, "rx_symbol_err" },
+ { 0x07, "rx_crc_err" },
+ { 0x08, "rx_align_err" },
+ { 0x09, "rx_mac_ctrl" },
+ { 0x0a, "rx_pause" },
+ { 0x0b, "rx_bcast" },
+ { 0x0c, "rx_mcast" },
+ { 0x0d, "rx_ucast" },
+ { 0x0e, "rx_64_or_less" },
+ { 0x0f, "rx_65_127" },
+ { 0x10, "rx_128_255" },
+ { 0x11, "rx_256_511" },
+ { 0x12, "rx_512_1023" },
+ { 0x13, "rx_1024_1522" },
+ { 0x14, "tx" },
+ { 0x15, "tx_hi" },
+ { 0x16, "tx_late_col" },
+ { 0x17, "tx_pause" },
+ { 0x18, "tx_bcast" },
+ { 0x19, "tx_mcast" },
+ { 0x1a, "tx_ucast" },
+ { 0x1b, "tx_deferred" },
+ { 0x1c, "tx_total_col" },
+ { 0x1d, "tx_exc_col" },
+ { 0x1e, "tx_single_col" },
+ { 0x1f, "tx_mult_col" },
+ { 0x100, "rx_discards" },
+ { 0x101, "tx_discards" },
+};
+
+static const struct ksz_mib_names ksz9477_mib_names[] = {
+ { 0x00, "rx_hi" },
+ { 0x01, "rx_undersize" },
+ { 0x02, "rx_fragments" },
+ { 0x03, "rx_oversize" },
+ { 0x04, "rx_jabbers" },
+ { 0x05, "rx_symbol_err" },
+ { 0x06, "rx_crc_err" },
+ { 0x07, "rx_align_err" },
+ { 0x08, "rx_mac_ctrl" },
+ { 0x09, "rx_pause" },
+ { 0x0A, "rx_bcast" },
+ { 0x0B, "rx_mcast" },
+ { 0x0C, "rx_ucast" },
+ { 0x0D, "rx_64_or_less" },
+ { 0x0E, "rx_65_127" },
+ { 0x0F, "rx_128_255" },
+ { 0x10, "rx_256_511" },
+ { 0x11, "rx_512_1023" },
+ { 0x12, "rx_1024_1522" },
+ { 0x13, "rx_1523_2000" },
+ { 0x14, "rx_2001" },
+ { 0x15, "tx_hi" },
+ { 0x16, "tx_late_col" },
+ { 0x17, "tx_pause" },
+ { 0x18, "tx_bcast" },
+ { 0x19, "tx_mcast" },
+ { 0x1A, "tx_ucast" },
+ { 0x1B, "tx_deferred" },
+ { 0x1C, "tx_total_col" },
+ { 0x1D, "tx_exc_col" },
+ { 0x1E, "tx_single_col" },
+ { 0x1F, "tx_mult_col" },
+ { 0x80, "rx_total" },
+ { 0x81, "tx_total" },
+ { 0x82, "rx_discards" },
+ { 0x83, "tx_discards" },
+};
+
+static const struct ksz_dev_ops ksz8_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8_r_phy,
+ .w_phy = ksz8_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+};
+
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause);
+
+static const struct ksz_dev_ops ksz9477_dev_ops = {
+ .setup = ksz9477_setup,
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = ksz9477_port_setup,
+ .set_ageing_time = ksz9477_set_ageing_time,
+ .r_phy = ksz9477_r_phy,
+ .w_phy = ksz9477_w_phy,
+ .r_mib_cnt = ksz9477_r_mib_cnt,
+ .r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
+ .vlan_filtering = ksz9477_port_vlan_filtering,
+ .vlan_add = ksz9477_port_vlan_add,
+ .vlan_del = ksz9477_port_vlan_del,
+ .mirror_add = ksz9477_port_mirror_add,
+ .mirror_del = ksz9477_port_mirror_del,
+ .get_caps = ksz9477_get_caps,
+ .fdb_dump = ksz9477_fdb_dump,
+ .fdb_add = ksz9477_fdb_add,
+ .fdb_del = ksz9477_fdb_del,
+ .mdb_add = ksz9477_mdb_add,
+ .mdb_del = ksz9477_mdb_del,
+ .change_mtu = ksz9477_change_mtu,
+ .max_mtu = ksz9477_max_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
+ .config_cpu_port = ksz9477_config_cpu_port,
+ .enable_stp_addr = ksz9477_enable_stp_addr,
+ .reset = ksz9477_reset_switch,
+ .init = ksz9477_switch_init,
+ .exit = ksz9477_switch_exit,
+};
+
+static const struct ksz_dev_ops lan937x_dev_ops = {
+ .setup = lan937x_setup,
+ .teardown = lan937x_teardown,
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = lan937x_port_setup,
+ .set_ageing_time = lan937x_set_ageing_time,
+ .r_phy = lan937x_r_phy,
+ .w_phy = lan937x_w_phy,
+ .r_mib_cnt = ksz9477_r_mib_cnt,
+ .r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
+ .vlan_filtering = ksz9477_port_vlan_filtering,
+ .vlan_add = ksz9477_port_vlan_add,
+ .vlan_del = ksz9477_port_vlan_del,
+ .mirror_add = ksz9477_port_mirror_add,
+ .mirror_del = ksz9477_port_mirror_del,
+ .get_caps = lan937x_phylink_get_caps,
+ .setup_rgmii_delay = lan937x_setup_rgmii_delay,
+ .fdb_dump = ksz9477_fdb_dump,
+ .fdb_add = ksz9477_fdb_add,
+ .fdb_del = ksz9477_fdb_del,
+ .mdb_add = ksz9477_mdb_add,
+ .mdb_del = ksz9477_mdb_del,
+ .change_mtu = lan937x_change_mtu,
+ .max_mtu = ksz9477_max_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
+ .config_cpu_port = lan937x_config_cpu_port,
+ .enable_stp_addr = ksz9477_enable_stp_addr,
+ .reset = lan937x_reset_switch,
+ .init = lan937x_switch_init,
+ .exit = lan937x_switch_exit,
+};
+
+static const u16 ksz8795_regs[] = {
+ [REG_IND_CTRL_0] = 0x6E,
+ [REG_IND_DATA_8] = 0x70,
+ [REG_IND_DATA_CHECK] = 0x72,
+ [REG_IND_DATA_HI] = 0x71,
+ [REG_IND_DATA_LO] = 0x75,
+ [REG_IND_MIB_CHECK] = 0x74,
+ [REG_IND_BYTE] = 0xA0,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x07,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x08,
+ [P_SPEED_STATUS] = 0x09,
+ [S_TAIL_TAG_CTRL] = 0x0C,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+ [P_XMII_CTRL_0] = 0x06,
+ [P_XMII_CTRL_1] = 0x06,
+};
+
+static const u32 ksz8795_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(7),
+ [SW_TAIL_TAG_ENABLE] = BIT(1),
+ [MIB_COUNTER_OVERFLOW] = BIT(6),
+ [MIB_COUNTER_VALID] = BIT(5),
+ [VLAN_TABLE_FID] = GENMASK(6, 0),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
+ [VLAN_TABLE_VALID] = BIT(12),
+ [STATIC_MAC_TABLE_VALID] = BIT(21),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(23),
+ [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(22),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(20, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(22, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(5),
+};
+
+static const u8 ksz8795_xmii_ctrl0[] = {
+ [P_MII_100MBIT] = 0,
+ [P_MII_10MBIT] = 1,
+ [P_MII_FULL_DUPLEX] = 0,
+ [P_MII_HALF_DUPLEX] = 1,
+};
+
+static const u8 ksz8795_xmii_ctrl1[] = {
+ [P_RGMII_SEL] = 3,
+ [P_GMII_SEL] = 2,
+ [P_RMII_SEL] = 1,
+ [P_MII_SEL] = 0,
+ [P_GMII_1GBIT] = 1,
+ [P_GMII_NOT_1GBIT] = 0,
+};
+
+static const u8 ksz8795_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 7,
+ [VLAN_TABLE] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 24,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 29,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 27,
+ [DYNAMIC_MAC_SRC_PORT] = 24,
+};
+
+static const u16 ksz8863_regs[] = {
+ [REG_IND_CTRL_0] = 0x79,
+ [REG_IND_DATA_8] = 0x7B,
+ [REG_IND_DATA_CHECK] = 0x7B,
+ [REG_IND_DATA_HI] = 0x7C,
+ [REG_IND_DATA_LO] = 0x80,
+ [REG_IND_MIB_CHECK] = 0x80,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x0F,
+ [S_TAIL_TAG_CTRL] = 0x03,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8863_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(3),
+ [SW_TAIL_TAG_ENABLE] = BIT(6),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(15, 12),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
+ [VLAN_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(21),
+ [STATIC_MAC_TABLE_FID] = GENMASK(25, 22),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(1, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(2),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 24),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
+};
+
+static u8 ksz8863_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 22,
+ [DYNAMIC_MAC_ENTRIES_H] = 8,
+ [DYNAMIC_MAC_ENTRIES] = 24,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 22,
+ [DYNAMIC_MAC_SRC_PORT] = 20,
+};
+
+static const u16 ksz9477_regs[] = {
+ [P_STP_CTRL] = 0x0B04,
+ [S_START_CTRL] = 0x0300,
+ [S_BROADCAST_CTRL] = 0x0332,
+ [S_MULTICAST_CTRL] = 0x0331,
+ [P_XMII_CTRL_0] = 0x0300,
+ [P_XMII_CTRL_1] = 0x0301,
+};
+
+static const u32 ksz9477_masks[] = {
+ [ALU_STAT_WRITE] = 0,
+ [ALU_STAT_READ] = 1,
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(3),
+};
+
+static const u8 ksz9477_shifts[] = {
+ [ALU_STAT_INDEX] = 16,
+};
+
+static const u8 ksz9477_xmii_ctrl0[] = {
+ [P_MII_100MBIT] = 1,
+ [P_MII_10MBIT] = 0,
+ [P_MII_FULL_DUPLEX] = 1,
+ [P_MII_HALF_DUPLEX] = 0,
+};
+
+static const u8 ksz9477_xmii_ctrl1[] = {
+ [P_RGMII_SEL] = 0,
+ [P_RMII_SEL] = 1,
+ [P_GMII_SEL] = 2,
+ [P_MII_SEL] = 3,
+ [P_GMII_1GBIT] = 0,
+ [P_GMII_NOT_1GBIT] = 1,
+};
+
+static const u32 lan937x_masks[] = {
+ [ALU_STAT_WRITE] = 1,
+ [ALU_STAT_READ] = 2,
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(3),
+};
+
+static const u8 lan937x_shifts[] = {
+ [ALU_STAT_INDEX] = 8,
+};
+
+static const struct regmap_range ksz8563_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x000f, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0104, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1004, 0x100b),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1021),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1111),
+ regmap_reg_range(0x111a, 0x111d),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a08),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2004, 0x200b),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2021),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2111),
+ regmap_reg_range(0x211a, 0x211d),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a08),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3004, 0x300b),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3021),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3300, 0x3301),
+ regmap_reg_range(0x3303, 0x3303),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a08),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+};
+
+static const struct regmap_access_table ksz8563_register_set = {
+ .yes_ranges = ksz8563_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz8563_valid_regs),
+};
+
+static const struct regmap_range ksz9477_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0444, 0x044b),
+ regmap_reg_range(0x0450, 0x046f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+ regmap_reg_range(0x0604, 0x060b),
+ regmap_reg_range(0x0610, 0x0612),
+ regmap_reg_range(0x0614, 0x062c),
+ regmap_reg_range(0x0640, 0x0645),
+ regmap_reg_range(0x0648, 0x064d),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1120, 0x112b),
+ regmap_reg_range(0x1134, 0x113b),
+ regmap_reg_range(0x113c, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1613),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1920, 0x1920),
+ regmap_reg_range(0x1923, 0x1927),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2120, 0x212b),
+ regmap_reg_range(0x2134, 0x213b),
+ regmap_reg_range(0x213c, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2613),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2920, 0x2920),
+ regmap_reg_range(0x2923, 0x2927),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3120, 0x312b),
+ regmap_reg_range(0x3134, 0x313b),
+ regmap_reg_range(0x313c, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3613),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3920, 0x3920),
+ regmap_reg_range(0x3923, 0x3927),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4120, 0x412b),
+ regmap_reg_range(0x4134, 0x413b),
+ regmap_reg_range(0x413c, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4613),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x491b),
+ regmap_reg_range(0x4920, 0x4920),
+ regmap_reg_range(0x4923, 0x4927),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+ regmap_reg_range(0x4c00, 0x4c05),
+ regmap_reg_range(0x4c08, 0x4c1b),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5120, 0x512b),
+ regmap_reg_range(0x5134, 0x513b),
+ regmap_reg_range(0x513c, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5613),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x591b),
+ regmap_reg_range(0x5920, 0x5920),
+ regmap_reg_range(0x5923, 0x5927),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+ regmap_reg_range(0x5c00, 0x5c05),
+ regmap_reg_range(0x5c08, 0x5c1b),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6613),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x691b),
+ regmap_reg_range(0x6920, 0x6920),
+ regmap_reg_range(0x6923, 0x6927),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+ regmap_reg_range(0x6c00, 0x6c05),
+ regmap_reg_range(0x6c08, 0x6c1b),
+
+ /* port 7 */
+ regmap_reg_range(0x7000, 0x7001),
+ regmap_reg_range(0x7013, 0x7013),
+ regmap_reg_range(0x7017, 0x7017),
+ regmap_reg_range(0x701b, 0x701b),
+ regmap_reg_range(0x701f, 0x7020),
+ regmap_reg_range(0x7030, 0x7030),
+ regmap_reg_range(0x7200, 0x7203),
+ regmap_reg_range(0x7206, 0x7207),
+ regmap_reg_range(0x7300, 0x7301),
+ regmap_reg_range(0x7400, 0x7401),
+ regmap_reg_range(0x7403, 0x7403),
+ regmap_reg_range(0x7410, 0x7417),
+ regmap_reg_range(0x7420, 0x7423),
+ regmap_reg_range(0x7500, 0x7507),
+ regmap_reg_range(0x7600, 0x7613),
+ regmap_reg_range(0x7800, 0x780f),
+ regmap_reg_range(0x7820, 0x7827),
+ regmap_reg_range(0x7830, 0x7837),
+ regmap_reg_range(0x7840, 0x784b),
+ regmap_reg_range(0x7900, 0x7907),
+ regmap_reg_range(0x7914, 0x791b),
+ regmap_reg_range(0x7920, 0x7920),
+ regmap_reg_range(0x7923, 0x7927),
+ regmap_reg_range(0x7a00, 0x7a03),
+ regmap_reg_range(0x7a04, 0x7a07),
+ regmap_reg_range(0x7b00, 0x7b01),
+ regmap_reg_range(0x7b04, 0x7b04),
+ regmap_reg_range(0x7c00, 0x7c05),
+ regmap_reg_range(0x7c08, 0x7c1b),
+};
+
+static const struct regmap_access_table ksz9477_register_set = {
+ .yes_ranges = ksz9477_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9477_valid_regs),
+};
+
+static const struct regmap_range ksz9896_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x0127),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x030b),
+ regmap_reg_range(0x0310, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x1915),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x2915),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3122, 0x3127),
+ regmap_reg_range(0x312a, 0x312b),
+ regmap_reg_range(0x3136, 0x3139),
+ regmap_reg_range(0x313e, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x3915),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4122, 0x4127),
+ regmap_reg_range(0x412a, 0x412b),
+ regmap_reg_range(0x4136, 0x4139),
+ regmap_reg_range(0x413e, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4612),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x4915),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5122, 0x5127),
+ regmap_reg_range(0x512a, 0x512b),
+ regmap_reg_range(0x5136, 0x5139),
+ regmap_reg_range(0x513e, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5612),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x5915),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6100, 0x6115),
+ regmap_reg_range(0x611a, 0x611f),
+ regmap_reg_range(0x6122, 0x6127),
+ regmap_reg_range(0x612a, 0x612b),
+ regmap_reg_range(0x6136, 0x6139),
+ regmap_reg_range(0x613e, 0x613f),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6612),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x6915),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+};
+
+static const struct regmap_access_table ksz9896_register_set = {
+ .yes_ranges = ksz9896_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9896_valid_regs),
+};
+
+const struct ksz_chip_data ksz_switch_chips[] = {
+ [KSZ8563] = {
+ .chip_id = KSZ8563_CHIP_ID,
+ .dev_name = "KSZ8563",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {false, false, true},
+ .wr_table = &ksz8563_register_set,
+ .rd_table = &ksz8563_register_set,
+ },
+
+ [KSZ8795] = {
+ .chip_id = KSZ8795_CHIP_ID,
+ .dev_name = "KSZ8795",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .ops = &ksz8_dev_ops,
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [KSZ8794] = {
+ /* WARNING
+ * =======
+ * KSZ8794 is similar to KSZ8795, except the port map
+ * contains a gap between external and CPU ports, the
+ * port map is NOT continuous. The per-port register
+ * map is shifted accordingly too, i.e. registers at
+ * offset 0x40 are NOT used on KSZ8794 and they ARE
+ * used on KSZ8795 for external port 3.
+ * external cpu
+ * KSZ8794 0,1,2 4
+ * KSZ8795 0,1,2,3 4
+ * KSZ8765 0,1,2,3 4
+ * port_cnt is configured as 5, even though it is 4
+ */
+ .chip_id = KSZ8794_CHIP_ID,
+ .dev_name = "KSZ8794",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .ops = &ksz8_dev_ops,
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, false, false},
+ },
+
+ [KSZ8765] = {
+ .chip_id = KSZ8765_CHIP_ID,
+ .dev_name = "KSZ8765",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .ops = &ksz8_dev_ops,
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [KSZ8830] = {
+ .chip_id = KSZ8830_CHIP_ID,
+ .dev_name = "KSZ8863/KSZ8873",
+ .num_vlans = 16,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x4, /* can be configured as cpu port */
+ .port_cnt = 3,
+ .ops = &ksz8_dev_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8863_regs,
+ .masks = ksz8863_masks,
+ .shifts = ksz8863_shifts,
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ },
+
+ [KSZ9477] = {
+ .chip_id = KSZ9477_CHIP_ID,
+ .dev_name = "KSZ9477",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 4,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, false},
+ .supports_rmii = {false, false, false, false,
+ false, true, false},
+ .supports_rgmii = {false, false, false, false,
+ false, true, false},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ .wr_table = &ksz9477_register_set,
+ .rd_table = &ksz9477_register_set,
+ },
+
+ [KSZ9896] = {
+ .chip_id = KSZ9896_CHIP_ID,
+ .dev_name = "KSZ9896",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x3F, /* can be configured as cpu port */
+ .port_cnt = 6, /* total physical port count */
+ .port_nirqs = 2,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true},
+ .supports_rmii = {false, false, false, false,
+ false, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true},
+ .internal_phy = {true, true, true, true,
+ true, false},
+ .gbit_capable = {true, true, true, true, true, true},
+ .wr_table = &ksz9896_register_set,
+ .rd_table = &ksz9896_register_set,
+ },
+
+ [KSZ9897] = {
+ .chip_id = KSZ9897_CHIP_ID,
+ .dev_name = "KSZ9897",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 2,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ },
+
+ [KSZ9893] = {
+ .chip_id = KSZ9893_CHIP_ID,
+ .dev_name = "KSZ9893",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .port_nirqs = 2,
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {true, true, true},
+ },
+
+ [KSZ9567] = {
+ .chip_id = KSZ9567_CHIP_ID,
+ .dev_name = "KSZ9567",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 3,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ },
+
+ [LAN9370] = {
+ .chip_id = LAN9370_CHIP_ID,
+ .dev_name = "LAN9370",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [LAN9371] = {
+ .chip_id = LAN9371_CHIP_ID,
+ .dev_name = "LAN9371",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 6, /* total physical port count */
+ .port_nirqs = 6,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true, true},
+ .supports_rmii = {false, false, false, false, true, true},
+ .supports_rgmii = {false, false, false, false, true, true},
+ .internal_phy = {true, true, true, true, false, false},
+ },
+
+ [LAN9372] = {
+ .chip_id = LAN9372_CHIP_ID,
+ .dev_name = "LAN9372",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, true,
+ false, false, true, true},
+ },
+
+ [LAN9373] = {
+ .chip_id = LAN9373_CHIP_ID,
+ .dev_name = "LAN9373",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x38, /* can be configured as cpu port */
+ .port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, false,
+ false, false, true, true},
+ },
+
+ [LAN9374] = {
+ .chip_id = LAN9374_CHIP_ID,
+ .dev_name = "LAN9374",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, true,
+ false, false, true, true},
+ },
+};
+EXPORT_SYMBOL_GPL(ksz_switch_chips);
+
+static const struct ksz_chip_data *ksz_lookup_info(unsigned int prod_num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
+ const struct ksz_chip_data *chip = &ksz_switch_chips[i];
+
+ if (chip->chip_id == prod_num)
+ return chip;
+ }
+
+ return NULL;
+}
+
+static int ksz_check_device_id(struct ksz_device *dev)
+{
+ const struct ksz_chip_data *dt_chip_data;
+
+ dt_chip_data = of_device_get_match_data(dev->dev);
+
+ /* Check for Device Tree and Chip ID */
+ if (dt_chip_data->chip_id != dev->chip_id) {
+ dev_err(dev->dev,
+ "Device tree specifies chip %s but found %s, please fix it!\n",
+ dt_chip_data->dev_name, dev->info->dev_name);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct ksz_device *dev = ds->priv;
+
+ config->legacy_pre_march2020 = false;
+
+ if (dev->info->supports_mii[port])
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+
+ if (dev->info->supports_rmii[port])
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+
+ if (dev->info->supports_rgmii[port])
+ phy_interface_set_rgmii(config->supported_interfaces);
+
+ if (dev->info->internal_phy[port]) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ /* Compatibility for phylib's default interface type when the
+ * phy-mode property is absent
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
+
+ if (dev->dev_ops->get_caps)
+ dev->dev_ops->get_caps(dev, port, config);
+}
+
+void ksz_r_mib_stats64(struct ksz_device *dev, int port)
+{
+ struct ethtool_pause_stats *pstats;
+ struct rtnl_link_stats64 *stats;
+ struct ksz_stats_raw *raw;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+ stats = &mib->stats64;
+ pstats = &mib->pause_stats;
+ raw = (struct ksz_stats_raw *)mib->counters;
+
+ spin_lock(&mib->stats64_lock);
+
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast +
+ raw->rx_pause;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast +
+ raw->tx_pause;
+
+ /* HW counters are counting bytes + FCS which is not acceptable
+ * for rtnl_link_stats64 interface
+ */
+ stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN;
+ stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN;
+
+ stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
+ raw->rx_oversize;
+
+ stats->rx_crc_errors = raw->rx_crc_err;
+ stats->rx_frame_errors = raw->rx_align_err;
+ stats->rx_dropped = raw->rx_discards;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_dropped;
+
+ stats->tx_window_errors = raw->tx_late_col;
+ stats->tx_fifo_errors = raw->tx_discards;
+ stats->tx_aborted_errors = raw->tx_exc_col;
+ stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
+ stats->tx_aborted_errors;
+
+ stats->multicast = raw->rx_mcast;
+ stats->collisions = raw->tx_total_col;
+
+ pstats->tx_pause_frames = raw->tx_pause;
+ pstats->rx_pause_frames = raw->rx_pause;
+
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(s, &mib->stats64, sizeof(*s));
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(pause_stats, &mib->pause_stats, sizeof(*pause_stats));
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf)
+{
+ struct ksz_device *dev = ds->priv;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < dev->info->mib_cnt; i++) {
+ memcpy(buf + i * ETH_GSTRING_LEN,
+ dev->info->mib_names[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static void ksz_update_port_member(struct ksz_device *dev, int port)
+{
+ struct ksz_port *p = &dev->ports[port];
+ struct dsa_switch *ds = dev->ds;
+ u8 port_member = 0, cpu_port;
+ const struct dsa_port *dp;
+ int i, j;
+
+ if (!dsa_is_user_port(ds, port))
+ return;
+
+ dp = dsa_to_port(ds, port);
+ cpu_port = BIT(dsa_upstream_port(ds, port));
+
+ for (i = 0; i < ds->num_ports; i++) {
+ const struct dsa_port *other_dp = dsa_to_port(ds, i);
+ struct ksz_port *other_p = &dev->ports[i];
+ u8 val = 0;
+
+ if (!dsa_is_user_port(ds, i))
+ continue;
+ if (port == i)
+ continue;
+ if (!dsa_port_bridge_same(dp, other_dp))
+ continue;
+ if (other_p->stp_state != BR_STATE_FORWARDING)
+ continue;
+
+ if (p->stp_state == BR_STATE_FORWARDING) {
+ val |= BIT(port);
+ port_member |= BIT(i);
+ }
+
+ /* Retain port [i]'s relationship to other ports than [port] */
+ for (j = 0; j < ds->num_ports; j++) {
+ const struct dsa_port *third_dp;
+ struct ksz_port *third_p;
+
+ if (j == i)
+ continue;
+ if (j == port)
+ continue;
+ if (!dsa_is_user_port(ds, j))
+ continue;
+ third_p = &dev->ports[j];
+ if (third_p->stp_state != BR_STATE_FORWARDING)
+ continue;
+ third_dp = dsa_to_port(ds, j);
+ if (dsa_port_bridge_same(other_dp, third_dp))
+ val |= BIT(j);
+ }
+
+ dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
+ }
+
+ dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
+}
+
+static int ksz_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ksz_device *dev = bus->priv;
+ u16 val;
+ int ret;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ ret = dev->dev_ops->r_phy(dev, addr, regnum, &val);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
+static int ksz_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->w_phy(dev, addr, regnum, val);
+}
+
+static int ksz_irq_phy_setup(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy;
+ int irq;
+ int ret;
+
+ for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) {
+ if (BIT(phy) & ds->phys_mii_mask) {
+ irq = irq_find_mapping(dev->ports[phy].pirq.domain,
+ PORT_SRC_PHY_INT);
+ if (irq < 0) {
+ ret = irq;
+ goto out;
+ }
+ ds->slave_mii_bus->irq[phy] = irq;
+ }
+ }
+ return 0;
+out:
+ while (phy--)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+
+ return ret;
+}
+
+static void ksz_irq_phy_free(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy;
+
+ for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+}
+
+static int ksz_mdio_register(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret;
+
+ mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
+ if (!mdio_np)
+ return 0;
+
+ bus = devm_mdiobus_alloc(ds->dev);
+ if (!bus) {
+ of_node_put(mdio_np);
+ return -ENOMEM;
+ }
+
+ bus->priv = dev;
+ bus->read = ksz_sw_mdio_read;
+ bus->write = ksz_sw_mdio_write;
+ bus->name = "ksz slave smi";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ bus->parent = ds->dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+
+ ds->slave_mii_bus = bus;
+
+ if (dev->irq > 0) {
+ ret = ksz_irq_phy_setup(dev);
+ if (ret) {
+ of_node_put(mdio_np);
+ return ret;
+ }
+ }
+
+ ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(ds->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ if (dev->irq > 0)
+ ksz_irq_phy_free(dev);
+ }
+
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+static void ksz_irq_mask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked |= BIT(d->hwirq);
+}
+
+static void ksz_irq_unmask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked &= ~BIT(d->hwirq);
+}
+
+static void ksz_irq_bus_lock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&kirq->dev->lock_irq);
+}
+
+static void ksz_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+ struct ksz_device *dev = kirq->dev;
+ int ret;
+
+ ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
+ if (ret)
+ dev_err(dev->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&dev->lock_irq);
+}
+
+static const struct irq_chip ksz_irq_chip = {
+ .name = "ksz-irq",
+ .irq_mask = ksz_irq_mask,
+ .irq_unmask = ksz_irq_unmask,
+ .irq_bus_lock = ksz_irq_bus_lock,
+ .irq_bus_sync_unlock = ksz_irq_bus_sync_unlock,
+};
+
+static int ksz_irq_domain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &ksz_irq_chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ksz_irq_domain_ops = {
+ .map = ksz_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void ksz_irq_free(struct ksz_irq *kirq)
+{
+ int irq, virq;
+
+ free_irq(kirq->irq_num, kirq);
+
+ for (irq = 0; irq < kirq->nirqs; irq++) {
+ virq = irq_find_mapping(kirq->domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(kirq->domain);
+}
+
+static irqreturn_t ksz_irq_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_irq *kirq = dev_id;
+ unsigned int nhandled = 0;
+ struct ksz_device *dev;
+ unsigned int sub_irq;
+ u8 data;
+ int ret;
+ u8 n;
+
+ dev = kirq->dev;
+
+ /* Read interrupt status register */
+ ret = ksz_read8(dev, kirq->reg_status, &data);
+ if (ret)
+ goto out;
+
+ for (n = 0; n < kirq->nirqs; ++n) {
+ if (data & BIT(n)) {
+ sub_irq = irq_find_mapping(kirq->domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
+{
+ int ret, n;
+
+ kirq->dev = dev;
+ kirq->masked = ~0;
+
+ kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0,
+ &ksz_irq_domain_ops, kirq);
+ if (!kirq->domain)
+ return -ENOMEM;
+
+ for (n = 0; n < kirq->nirqs; n++)
+ irq_create_mapping(kirq->domain, n);
+
+ ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn,
+ IRQF_ONESHOT, kirq->name, kirq);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ ksz_irq_free(kirq);
+
+ return ret;
+}
+
+static int ksz_girq_setup(struct ksz_device *dev)
+{
+ struct ksz_irq *girq = &dev->girq;
+
+ girq->nirqs = dev->info->port_cnt;
+ girq->reg_mask = REG_SW_PORT_INT_MASK__1;
+ girq->reg_status = REG_SW_PORT_INT_STATUS__1;
+ snprintf(girq->name, sizeof(girq->name), "global_port_irq");
+
+ girq->irq_num = dev->irq;
+
+ return ksz_irq_common_setup(dev, girq);
+}
+
+static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
+{
+ struct ksz_irq *pirq = &dev->ports[p].pirq;
+
+ pirq->nirqs = dev->info->port_nirqs;
+ pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK);
+ pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS);
+ snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
+
+ pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
+ if (pirq->irq_num < 0)
+ return pirq->irq_num;
+
+ return ksz_irq_common_setup(dev, pirq);
+}
+
+static int ksz_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+ struct ksz_port *p;
+ const u16 *regs;
+ int ret;
+
+ regs = dev->info->regs;
+
+ dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
+ dev->info->num_vlans, GFP_KERNEL);
+ if (!dev->vlan_cache)
+ return -ENOMEM;
+
+ ret = dev->dev_ops->reset(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ /* set broadcast storm protection 10% rate */
+ regmap_update_bits(dev->regmap[1], regs[S_BROADCAST_CTRL],
+ BROADCAST_STORM_RATE,
+ (BROADCAST_STORM_VALUE *
+ BROADCAST_STORM_PROT_RATE) / 100);
+
+ dev->dev_ops->config_cpu_port(ds);
+
+ dev->dev_ops->enable_stp_addr(dev);
+
+ regmap_update_bits(dev->regmap[0], regs[S_MULTICAST_CTRL],
+ MULTICAST_STORM_DISABLE, MULTICAST_STORM_DISABLE);
+
+ ksz_init_mib_timer(dev);
+
+ ds->configure_vlan_while_not_filtering = false;
+
+ if (dev->dev_ops->setup) {
+ ret = dev->dev_ops->setup(ds);
+ if (ret)
+ return ret;
+ }
+
+ /* Start with learning disabled on standalone user ports, and enabled
+ * on the CPU port. In lack of other finer mechanisms, learning on the
+ * CPU port will avoid flooding bridge local addresses on the network
+ * in some cases.
+ */
+ p = &dev->ports[dev->cpu_port];
+ p->learning = true;
+
+ if (dev->irq > 0) {
+ ret = ksz_girq_setup(dev);
+ if (ret)
+ return ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ ret = ksz_pirq_setup(dev, dp->index);
+ if (ret)
+ goto out_girq;
+ }
+ }
+
+ ret = ksz_mdio_register(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to register the mdio");
+ goto out_pirq;
+ }
+
+ /* start switch */
+ regmap_update_bits(dev->regmap[0], regs[S_START_CTRL],
+ SW_START, SW_START);
+
+ return 0;
+
+out_pirq:
+ if (dev->irq > 0)
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+out_girq:
+ if (dev->irq > 0)
+ ksz_irq_free(&dev->girq);
+
+ return ret;
+}
+
+static void ksz_teardown(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ if (dev->irq > 0) {
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+
+ ksz_irq_free(&dev->girq);
+ }
+
+ if (dev->dev_ops->teardown)
+ dev->dev_ops->teardown(ds);
+}
+
+static void port_r_cnt(struct ksz_device *dev, int port)
+{
+ struct ksz_port_mib *mib = &dev->ports[port].mib;
+ u64 *dropped;
+
+ /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
+ while (mib->cnt_ptr < dev->info->reg_mib_cnt) {
+ dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
+ &mib->counters[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+
+ /* last one in storage */
+ dropped = &mib->counters[dev->info->mib_cnt];
+
+ /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
+ while (mib->cnt_ptr < dev->info->mib_cnt) {
+ dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
+ dropped, &mib->counters[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+ mib->cnt_ptr = 0;
+}
+
+static void ksz_mib_read_work(struct work_struct *work)
+{
+ struct ksz_device *dev = container_of(work, struct ksz_device,
+ mib_read.work);
+ struct ksz_port_mib *mib;
+ struct ksz_port *p;
+ int i;
+
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ if (dsa_is_unused_port(dev->ds, i))
+ continue;
+
+ p = &dev->ports[i];
+ mib = &p->mib;
+ mutex_lock(&mib->cnt_mutex);
+
+ /* Only read MIB counters when the port is told to do.
+ * If not, read only dropped counters when link is not up.
+ */
+ if (!p->read) {
+ const struct dsa_port *dp = dsa_to_port(dev->ds, i);
+
+ if (!netif_carrier_ok(dp->slave))
+ mib->cnt_ptr = dev->info->reg_mib_cnt;
+ }
+ port_r_cnt(dev, i);
+ p->read = false;
+
+ if (dev->dev_ops->r_mib_stat64)
+ dev->dev_ops->r_mib_stat64(dev, i);
+
+ mutex_unlock(&mib->cnt_mutex);
+ }
+
+ schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
+}
+
+void ksz_init_mib_timer(struct ksz_device *dev)
+{
+ int i;
+
+ INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
+
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ struct ksz_port_mib *mib = &dev->ports[i].mib;
+
+ dev->dev_ops->port_init_cnt(dev, i);
+
+ mib->cnt_ptr = 0;
+ memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64));
+ }
+}
+
+static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
+{
+ struct ksz_device *dev = ds->priv;
+ u16 val = 0xffff;
+ int ret;
+
+ ret = dev->dev_ops->r_phy(dev, addr, reg, &val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ ret = dev->dev_ops->w_phy(dev, addr, reg, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->chip_id == KSZ8830_CHIP_ID) {
+ /* Silicon Errata Sheet (DS80000830A):
+ * Port 1 does not work with LinkMD Cable-Testing.
+ * Port 1 does not respond to received PAUSE control frames.
+ */
+ if (!port)
+ return MICREL_KSZ8_P1_ERRATA;
+ }
+
+ return 0;
+}
+
+static void ksz_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+
+ /* Read all MIB counters when the link is going down. */
+ p->read = true;
+ /* timer started */
+ if (dev->mib_read_interval)
+ schedule_delayed_work(&dev->mib_read, 0);
+}
+
+static int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return dev->info->mib_cnt;
+}
+
+static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *buf)
+{
+ const struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+ mutex_lock(&mib->cnt_mutex);
+
+ /* Only read dropped counters if no link. */
+ if (!netif_carrier_ok(dp->slave))
+ mib->cnt_ptr = dev->info->reg_mib_cnt;
+ port_r_cnt(dev, port);
+ memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64));
+ mutex_unlock(&mib->cnt_mutex);
+}
+
+static int ksz_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ /* port_stp_state_set() will be called after to put the port in
+ * appropriate state so there is no need to do anything.
+ */
+
+ return 0;
+}
+
+static void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ /* port_stp_state_set() will be called after to put the port in
+ * forwarding state so there is no need to do anything.
+ */
+}
+
+static void ksz_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ dev->dev_ops->flush_dyn_mac_table(dev, port);
+}
+
+static int ksz_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->set_ageing_time)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->set_ageing_time(dev, msecs);
+}
+
+static int ksz_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->fdb_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->fdb_add(dev, port, addr, vid, db);
+}
+
+static int ksz_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr,
+ u16 vid, struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->fdb_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->fdb_del(dev, port, addr, vid, db);
+}
+
+static int ksz_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->fdb_dump)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->fdb_dump(dev, port, cb, data);
+}
+
+static int ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mdb_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mdb_add(dev, port, mdb, db);
+}
+
+static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mdb_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mdb_del(dev, port, mdb, db);
+}
+
+static int ksz_enable_port(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ /* setup slave port */
+ dev->dev_ops->port_setup(dev, port, false);
+
+ /* port_stp_state_set() will be called after to enable the port so
+ * there is no need to do anything.
+ */
+
+ return 0;
+}
+
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ const u16 *regs;
+ u8 data;
+
+ regs = dev->info->regs;
+
+ ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+
+ p = &dev->ports[port];
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ break;
+ case BR_STATE_LEARNING:
+ data |= PORT_RX_ENABLE;
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_BLOCKING:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
+
+ p->stp_state = state;
+
+ ksz_update_port_member(dev, port);
+}
+
+static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~BR_LEARNING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+
+ if (flags.mask & BR_LEARNING) {
+ p->learning = !!(flags.val & BR_LEARNING);
+
+ /* Make the change take effect immediately */
+ ksz_port_stp_state_set(ds, port, p->stp_state);
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ struct ksz_device *dev = ds->priv;
+ enum dsa_tag_protocol proto = DSA_TAG_PROTO_NONE;
+
+ if (dev->chip_id == KSZ8795_CHIP_ID ||
+ dev->chip_id == KSZ8794_CHIP_ID ||
+ dev->chip_id == KSZ8765_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ8795;
+
+ if (dev->chip_id == KSZ8830_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID ||
+ dev->chip_id == KSZ9893_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ9893;
+
+ if (dev->chip_id == KSZ9477_CHIP_ID ||
+ dev->chip_id == KSZ9896_CHIP_ID ||
+ dev->chip_id == KSZ9897_CHIP_ID ||
+ dev->chip_id == KSZ9567_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ9477;
+
+ if (is_lan937x(dev))
+ proto = DSA_TAG_PROTO_LAN937X_VALUE;
+
+ return proto;
+}
+
+static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool flag, struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_filtering)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_filtering(dev, port, flag, extack);
+}
+
+static int ksz_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_add(dev, port, vlan, extack);
+}
+
+static int ksz_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_del(dev, port, vlan);
+}
+
+static int ksz_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mirror_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mirror_add(dev, port, mirror, ingress, extack);
+}
+
+static void ksz_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->dev_ops->mirror_del)
+ dev->dev_ops->mirror_del(dev, port, mirror);
+}
+
+static int ksz_change_mtu(struct dsa_switch *ds, int port, int mtu)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->change_mtu)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->change_mtu(dev, port, mtu);
+}
+
+static int ksz_max_mtu(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->max_mtu)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->max_mtu(dev, port);
+}
+
+static void ksz_set_xmii(struct ksz_device *dev, int port,
+ phy_interface_t interface)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ struct ksz_port *p = &dev->ports[port];
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ data8 &= ~(P_MII_SEL_M | P_RGMII_ID_IG_ENABLE |
+ P_RGMII_ID_EG_ENABLE);
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ data8 |= bitval[P_MII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ data8 |= bitval[P_RMII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ data8 |= bitval[P_GMII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ data8 |= bitval[P_RGMII_SEL];
+ /* On KSZ9893, disable RGMII in-band status support */
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID)
+ data8 &= ~P_MII_MAC_MODE;
+ break;
+ default:
+ dev_err(dev->dev, "Unsupported interface '%s' for port %d\n",
+ phy_modes(interface), port);
+ return;
+ }
+
+ if (p->rgmii_tx_val)
+ data8 |= P_RGMII_ID_EG_ENABLE;
+
+ if (p->rgmii_rx_val)
+ data8 |= P_RGMII_ID_IG_ENABLE;
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8);
+}
+
+phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ phy_interface_t interface;
+ u8 data8;
+ u8 val;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ val = FIELD_GET(P_MII_SEL_M, data8);
+
+ if (val == bitval[P_MII_SEL]) {
+ if (gbit)
+ interface = PHY_INTERFACE_MODE_GMII;
+ else
+ interface = PHY_INTERFACE_MODE_MII;
+ } else if (val == bitval[P_RMII_SEL]) {
+ interface = PHY_INTERFACE_MODE_RGMII;
+ } else {
+ interface = PHY_INTERFACE_MODE_RGMII;
+ if (data8 & P_RGMII_ID_EG_ENABLE)
+ interface = PHY_INTERFACE_MODE_RGMII_TXID;
+ if (data8 & P_RGMII_ID_IG_ENABLE) {
+ interface = PHY_INTERFACE_MODE_RGMII_RXID;
+ if (data8 & P_RGMII_ID_EG_ENABLE)
+ interface = PHY_INTERFACE_MODE_RGMII_ID;
+ }
+ }
+
+ return interface;
+}
+
+static void ksz_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (ksz_is_ksz88x3(dev))
+ return;
+
+ /* Internal PHYs */
+ if (dev->info->internal_phy[port])
+ return;
+
+ if (phylink_autoneg_inband(mode)) {
+ dev_err(dev->dev, "In-band AN not supported!\n");
+ return;
+ }
+
+ ksz_set_xmii(dev, port, state->interface);
+
+ if (dev->dev_ops->phylink_mac_config)
+ dev->dev_ops->phylink_mac_config(dev, port, mode, state);
+
+ if (dev->dev_ops->setup_rgmii_delay)
+ dev->dev_ops->setup_rgmii_delay(dev, port);
+}
+
+bool ksz_get_gbit(struct ksz_device *dev, int port)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ bool gbit = false;
+ u8 data8;
+ bool val;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ val = FIELD_GET(P_GMII_1GBIT_M, data8);
+
+ if (val == bitval[P_GMII_1GBIT])
+ gbit = true;
+
+ return gbit;
+}
+
+static void ksz_set_gbit(struct ksz_device *dev, int port, bool gbit)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ data8 &= ~P_GMII_1GBIT_M;
+
+ if (gbit)
+ data8 |= FIELD_PREP(P_GMII_1GBIT_M, bitval[P_GMII_1GBIT]);
+ else
+ data8 |= FIELD_PREP(P_GMII_1GBIT_M, bitval[P_GMII_NOT_1GBIT]);
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8);
+}
+
+static void ksz_set_100_10mbit(struct ksz_device *dev, int port, int speed)
+{
+ const u8 *bitval = dev->info->xmii_ctrl0;
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_0], &data8);
+
+ data8 &= ~P_MII_100MBIT_M;
+
+ if (speed == SPEED_100)
+ data8 |= FIELD_PREP(P_MII_100MBIT_M, bitval[P_MII_100MBIT]);
+ else
+ data8 |= FIELD_PREP(P_MII_100MBIT_M, bitval[P_MII_10MBIT]);
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_0], data8);
+}
+
+static void ksz_port_set_xmii_speed(struct ksz_device *dev, int port, int speed)
+{
+ if (speed == SPEED_1000)
+ ksz_set_gbit(dev, port, true);
+ else
+ ksz_set_gbit(dev, port, false);
+
+ if (speed == SPEED_100 || speed == SPEED_10)
+ ksz_set_100_10mbit(dev, port, speed);
+}
+
+static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ const u8 *bitval = dev->info->xmii_ctrl0;
+ const u32 *masks = dev->info->masks;
+ const u16 *regs = dev->info->regs;
+ u8 mask;
+ u8 val;
+
+ mask = P_MII_DUPLEX_M | masks[P_MII_TX_FLOW_CTRL] |
+ masks[P_MII_RX_FLOW_CTRL];
+
+ if (duplex == DUPLEX_FULL)
+ val = FIELD_PREP(P_MII_DUPLEX_M, bitval[P_MII_FULL_DUPLEX]);
+ else
+ val = FIELD_PREP(P_MII_DUPLEX_M, bitval[P_MII_HALF_DUPLEX]);
+
+ if (tx_pause)
+ val |= masks[P_MII_TX_FLOW_CTRL];
+
+ if (rx_pause)
+ val |= masks[P_MII_RX_FLOW_CTRL];
+
+ ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
+}
+
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ struct ksz_port *p;
+
+ p = &dev->ports[port];
+
+ /* Internal PHYs */
+ if (dev->info->internal_phy[port])
+ return;
+
+ p->phydev.speed = speed;
+
+ ksz_port_set_xmii_speed(dev, port, speed);
+
+ ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
+}
+
+static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->dev_ops->phylink_mac_link_up)
+ dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface,
+ phydev, speed, duplex,
+ tx_pause, rx_pause);
+}
+
+static int ksz_switch_detect(struct ksz_device *dev)
+{
+ u8 id1, id2, id4;
+ u16 id16;
+ u32 id32;
+ int ret;
+
+ /* read chip id */
+ ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
+ if (ret)
+ return ret;
+
+ id1 = FIELD_GET(SW_FAMILY_ID_M, id16);
+ id2 = FIELD_GET(SW_CHIP_ID_M, id16);
+
+ switch (id1) {
+ case KSZ87_FAMILY_ID:
+ if (id2 == KSZ87_CHIP_ID_95) {
+ u8 val;
+
+ dev->chip_id = KSZ8795_CHIP_ID;
+
+ ksz_read8(dev, KSZ8_PORT_STATUS_0, &val);
+ if (val & KSZ8_PORT_FIBER_MODE)
+ dev->chip_id = KSZ8765_CHIP_ID;
+ } else if (id2 == KSZ87_CHIP_ID_94) {
+ dev->chip_id = KSZ8794_CHIP_ID;
+ } else {
+ return -ENODEV;
+ }
+ break;
+ case KSZ88_FAMILY_ID:
+ if (id2 == KSZ88_CHIP_ID_63)
+ dev->chip_id = KSZ8830_CHIP_ID;
+ else
+ return -ENODEV;
+ break;
+ default:
+ ret = ksz_read32(dev, REG_CHIP_ID0, &id32);
+ if (ret)
+ return ret;
+
+ dev->chip_rev = FIELD_GET(SW_REV_ID_M, id32);
+ id32 &= ~0xFF;
+
+ switch (id32) {
+ case KSZ9477_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case LAN9370_CHIP_ID:
+ case LAN9371_CHIP_ID:
+ case LAN9372_CHIP_ID:
+ case LAN9373_CHIP_ID:
+ case LAN9374_CHIP_ID:
+ dev->chip_id = id32;
+ break;
+ case KSZ9893_CHIP_ID:
+ ret = ksz_read8(dev, REG_CHIP_ID4,
+ &id4);
+ if (ret)
+ return ret;
+
+ if (id4 == SKU_ID_KSZ8563)
+ dev->chip_id = KSZ8563_CHIP_ID;
+ else
+ dev->chip_id = KSZ9893_CHIP_ID;
+
+ break;
+ default:
+ dev_err(dev->dev,
+ "unsupported switch detected %x)\n", id32);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static const struct dsa_switch_ops ksz_switch_ops = {
+ .get_tag_protocol = ksz_get_tag_protocol,
+ .get_phy_flags = ksz_get_phy_flags,
+ .setup = ksz_setup,
+ .teardown = ksz_teardown,
+ .phy_read = ksz_phy_read16,
+ .phy_write = ksz_phy_write16,
+ .phylink_get_caps = ksz_phylink_get_caps,
+ .phylink_mac_config = ksz_phylink_mac_config,
+ .phylink_mac_link_up = ksz_phylink_mac_link_up,
+ .phylink_mac_link_down = ksz_mac_link_down,
+ .port_enable = ksz_enable_port,
+ .set_ageing_time = ksz_set_ageing_time,
+ .get_strings = ksz_get_strings,
+ .get_ethtool_stats = ksz_get_ethtool_stats,
+ .get_sset_count = ksz_sset_count,
+ .port_bridge_join = ksz_port_bridge_join,
+ .port_bridge_leave = ksz_port_bridge_leave,
+ .port_stp_state_set = ksz_port_stp_state_set,
+ .port_pre_bridge_flags = ksz_port_pre_bridge_flags,
+ .port_bridge_flags = ksz_port_bridge_flags,
+ .port_fast_age = ksz_port_fast_age,
+ .port_vlan_filtering = ksz_port_vlan_filtering,
+ .port_vlan_add = ksz_port_vlan_add,
+ .port_vlan_del = ksz_port_vlan_del,
+ .port_fdb_dump = ksz_port_fdb_dump,
+ .port_fdb_add = ksz_port_fdb_add,
+ .port_fdb_del = ksz_port_fdb_del,
+ .port_mdb_add = ksz_port_mdb_add,
+ .port_mdb_del = ksz_port_mdb_del,
+ .port_mirror_add = ksz_port_mirror_add,
+ .port_mirror_del = ksz_port_mirror_del,
+ .get_stats64 = ksz_get_stats64,
+ .get_pause_stats = ksz_get_pause_stats,
+ .port_change_mtu = ksz_change_mtu,
+ .port_max_mtu = ksz_max_mtu,
+};
+
+struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
+{
+ struct dsa_switch *ds;
+ struct ksz_device *swdev;
+
+ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return NULL;
+
+ ds->dev = base;
+ ds->num_ports = DSA_MAX_PORTS;
+ ds->ops = &ksz_switch_ops;
+
+ swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
+ if (!swdev)
+ return NULL;
+
+ ds->priv = swdev;
+ swdev->dev = base;
+
+ swdev->ds = ds;
+ swdev->priv = priv;
+
+ return swdev;
+}
+EXPORT_SYMBOL(ksz_switch_alloc);
+
+static void ksz_parse_rgmii_delay(struct ksz_device *dev, int port_num,
+ struct device_node *port_dn)
+{
+ phy_interface_t phy_mode = dev->ports[port_num].interface;
+ int rx_delay = -1, tx_delay = -1;
+
+ if (!phy_interface_mode_is_rgmii(phy_mode))
+ return;
+
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
+
+ if (rx_delay == -1 && tx_delay == -1) {
+ dev_warn(dev->dev,
+ "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
+ "please update device tree to specify \"rx-internal-delay-ps\" and "
+ "\"tx-internal-delay-ps\"",
+ port_num);
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ rx_delay = 2000;
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ tx_delay = 2000;
+ }
+
+ if (rx_delay < 0)
+ rx_delay = 0;
+ if (tx_delay < 0)
+ tx_delay = 0;
+
+ dev->ports[port_num].rgmii_rx_val = rx_delay;
+ dev->ports[port_num].rgmii_tx_val = tx_delay;
+}
+
+int ksz_switch_register(struct ksz_device *dev)
+{
+ const struct ksz_chip_data *info;
+ struct device_node *port, *ports;
+ phy_interface_t interface;
+ unsigned int port_num;
+ int ret;
+ int i;
+
+ if (dev->pdata)
+ dev->chip_id = dev->pdata->chip_id;
+
+ dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(dev->reset_gpio))
+ return PTR_ERR(dev->reset_gpio);
+
+ if (dev->reset_gpio) {
+ gpiod_set_value_cansleep(dev->reset_gpio, 1);
+ usleep_range(10000, 12000);
+ gpiod_set_value_cansleep(dev->reset_gpio, 0);
+ msleep(100);
+ }
+
+ mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->regmap_mutex);
+ mutex_init(&dev->alu_mutex);
+ mutex_init(&dev->vlan_mutex);
+
+ ret = ksz_switch_detect(dev);
+ if (ret)
+ return ret;
+
+ info = ksz_lookup_info(dev->chip_id);
+ if (!info)
+ return -ENODEV;
+
+ /* Update the compatible info with the probed one */
+ dev->info = info;
+
+ dev_info(dev->dev, "found switch: %s, rev %i\n",
+ dev->info->dev_name, dev->chip_rev);
+
+ ret = ksz_check_device_id(dev);
+ if (ret)
+ return ret;
+
+ dev->dev_ops = dev->info->ops;
+
+ ret = dev->dev_ops->init(dev);
+ if (ret)
+ return ret;
+
+ dev->ports = devm_kzalloc(dev->dev,
+ dev->info->port_cnt * sizeof(struct ksz_port),
+ GFP_KERNEL);
+ if (!dev->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ spin_lock_init(&dev->ports[i].mib.stats64_lock);
+ mutex_init(&dev->ports[i].mib.cnt_mutex);
+ dev->ports[i].mib.counters =
+ devm_kzalloc(dev->dev,
+ sizeof(u64) * (dev->info->mib_cnt + 1),
+ GFP_KERNEL);
+ if (!dev->ports[i].mib.counters)
+ return -ENOMEM;
+
+ dev->ports[i].ksz_dev = dev;
+ dev->ports[i].num = i;
+ }
+
+ /* set the real number of ports */
+ dev->ds->num_ports = dev->info->port_cnt;
+
+ /* Host port interface will be self detected, or specifically set in
+ * device tree.
+ */
+ for (port_num = 0; port_num < dev->info->port_cnt; ++port_num)
+ dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
+ if (dev->dev->of_node) {
+ ret = of_get_phy_mode(dev->dev->of_node, &interface);
+ if (ret == 0)
+ dev->compat_interface = interface;
+ ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports");
+ if (!ports)
+ ports = of_get_child_by_name(dev->dev->of_node, "ports");
+ if (ports) {
+ for_each_available_child_of_node(ports, port) {
+ if (of_property_read_u32(port, "reg",
+ &port_num))
+ continue;
+ if (!(dev->port_mask & BIT(port_num))) {
+ of_node_put(port);
+ of_node_put(ports);
+ return -EINVAL;
+ }
+ of_get_phy_mode(port,
+ &dev->ports[port_num].interface);
+
+ ksz_parse_rgmii_delay(dev, port_num, port);
+ }
+ of_node_put(ports);
+ }
+ dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
+ "microchip,synclko-125");
+ dev->synclko_disable = of_property_read_bool(dev->dev->of_node,
+ "microchip,synclko-disable");
+ if (dev->synclko_125 && dev->synclko_disable) {
+ dev_err(dev->dev, "inconsistent synclko settings\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = dsa_register_switch(dev->ds);
+ if (ret) {
+ dev->dev_ops->exit(dev);
+ return ret;
+ }
+
+ /* Read MIB counters every 30 seconds to avoid overflow. */
+ dev->mib_read_interval = msecs_to_jiffies(5000);
+
+ /* Start the MIB timer. */
+ schedule_delayed_work(&dev->mib_read, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL(ksz_switch_register);
+
+void ksz_switch_remove(struct ksz_device *dev)
+{
+ /* timer started */
+ if (dev->mib_read_interval) {
+ dev->mib_read_interval = 0;
+ cancel_delayed_work_sync(&dev->mib_read);
+ }
+
+ dev->dev_ops->exit(dev);
+ dsa_unregister_switch(dev->ds);
+
+ if (dev->reset_gpio)
+ gpiod_set_value_cansleep(dev->reset_gpio, 1);
+
+}
+EXPORT_SYMBOL(ksz_switch_remove);
+
+MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
new file mode 100644
index 000000000..d1b2db8e6
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -0,0 +1,632 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip switch driver common header
+ *
+ * Copyright (C) 2017-2019 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ_COMMON_H
+#define __KSZ_COMMON_H
+
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+#include <linux/irq.h>
+
+#define KSZ_MAX_NUM_PORTS 8
+
+struct ksz_device;
+
+struct vlan_table {
+ u32 table[3];
+};
+
+struct ksz_port_mib {
+ struct mutex cnt_mutex; /* structure access */
+ u8 cnt_ptr;
+ u64 *counters;
+ struct rtnl_link_stats64 stats64;
+ struct ethtool_pause_stats pause_stats;
+ struct spinlock stats64_lock;
+};
+
+struct ksz_mib_names {
+ int index;
+ char string[ETH_GSTRING_LEN];
+};
+
+struct ksz_chip_data {
+ u32 chip_id;
+ const char *dev_name;
+ int num_vlans;
+ int num_alus;
+ int num_statics;
+ int cpu_ports;
+ int port_cnt;
+ u8 port_nirqs;
+ const struct ksz_dev_ops *ops;
+ bool phy_errata_9477;
+ bool ksz87xx_eee_link_erratum;
+ const struct ksz_mib_names *mib_names;
+ int mib_cnt;
+ u8 reg_mib_cnt;
+ const u16 *regs;
+ const u32 *masks;
+ const u8 *shifts;
+ const u8 *xmii_ctrl0;
+ const u8 *xmii_ctrl1;
+ int stp_ctrl_reg;
+ int broadcast_ctrl_reg;
+ int multicast_ctrl_reg;
+ int start_ctrl_reg;
+ bool supports_mii[KSZ_MAX_NUM_PORTS];
+ bool supports_rmii[KSZ_MAX_NUM_PORTS];
+ bool supports_rgmii[KSZ_MAX_NUM_PORTS];
+ bool internal_phy[KSZ_MAX_NUM_PORTS];
+ bool gbit_capable[KSZ_MAX_NUM_PORTS];
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+};
+
+struct ksz_irq {
+ u16 masked;
+ u16 reg_mask;
+ u16 reg_status;
+ struct irq_domain *domain;
+ int nirqs;
+ int irq_num;
+ char name[16];
+ struct ksz_device *dev;
+};
+
+struct ksz_port {
+ bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
+ bool learning;
+ int stp_state;
+ struct phy_device phydev;
+
+ u32 on:1; /* port is not disabled by hardware */
+ u32 fiber:1; /* port is fiber */
+ u32 force:1;
+ u32 read:1; /* read MIB counters in background */
+ u32 freeze:1; /* MIB counter freeze is enabled */
+
+ struct ksz_port_mib mib;
+ phy_interface_t interface;
+ u16 max_frame;
+ u32 rgmii_tx_val;
+ u32 rgmii_rx_val;
+ struct ksz_device *ksz_dev;
+ struct ksz_irq pirq;
+ u8 num;
+};
+
+struct ksz_device {
+ struct dsa_switch *ds;
+ struct ksz_platform_data *pdata;
+ const struct ksz_chip_data *info;
+
+ struct mutex dev_mutex; /* device access */
+ struct mutex regmap_mutex; /* regmap access */
+ struct mutex alu_mutex; /* ALU access */
+ struct mutex vlan_mutex; /* vlan access */
+ const struct ksz_dev_ops *dev_ops;
+
+ struct device *dev;
+ struct regmap *regmap[3];
+
+ void *priv;
+ int irq;
+
+ struct gpio_desc *reset_gpio; /* Optional reset GPIO */
+
+ /* chip specific data */
+ u32 chip_id;
+ u8 chip_rev;
+ int cpu_port; /* port connected to CPU */
+ int phy_port_cnt;
+ phy_interface_t compat_interface;
+ bool synclko_125;
+ bool synclko_disable;
+
+ struct vlan_table *vlan_cache;
+
+ struct ksz_port *ports;
+ struct delayed_work mib_read;
+ unsigned long mib_read_interval;
+ u16 mirror_rx;
+ u16 mirror_tx;
+ u16 port_mask;
+ struct mutex lock_irq; /* IRQ Access */
+ struct ksz_irq girq;
+};
+
+/* List of supported models */
+enum ksz_model {
+ KSZ8563,
+ KSZ8795,
+ KSZ8794,
+ KSZ8765,
+ KSZ8830,
+ KSZ9477,
+ KSZ9896,
+ KSZ9897,
+ KSZ9893,
+ KSZ9567,
+ LAN9370,
+ LAN9371,
+ LAN9372,
+ LAN9373,
+ LAN9374,
+};
+
+enum ksz_chip_id {
+ KSZ8563_CHIP_ID = 0x8563,
+ KSZ8795_CHIP_ID = 0x8795,
+ KSZ8794_CHIP_ID = 0x8794,
+ KSZ8765_CHIP_ID = 0x8765,
+ KSZ8830_CHIP_ID = 0x8830,
+ KSZ9477_CHIP_ID = 0x00947700,
+ KSZ9896_CHIP_ID = 0x00989600,
+ KSZ9897_CHIP_ID = 0x00989700,
+ KSZ9893_CHIP_ID = 0x00989300,
+ KSZ9567_CHIP_ID = 0x00956700,
+ LAN9370_CHIP_ID = 0x00937000,
+ LAN9371_CHIP_ID = 0x00937100,
+ LAN9372_CHIP_ID = 0x00937200,
+ LAN9373_CHIP_ID = 0x00937300,
+ LAN9374_CHIP_ID = 0x00937400,
+};
+
+enum ksz_regs {
+ REG_IND_CTRL_0,
+ REG_IND_DATA_8,
+ REG_IND_DATA_CHECK,
+ REG_IND_DATA_HI,
+ REG_IND_DATA_LO,
+ REG_IND_MIB_CHECK,
+ REG_IND_BYTE,
+ P_FORCE_CTRL,
+ P_LINK_STATUS,
+ P_LOCAL_CTRL,
+ P_NEG_RESTART_CTRL,
+ P_REMOTE_STATUS,
+ P_SPEED_STATUS,
+ S_TAIL_TAG_CTRL,
+ P_STP_CTRL,
+ S_START_CTRL,
+ S_BROADCAST_CTRL,
+ S_MULTICAST_CTRL,
+ P_XMII_CTRL_0,
+ P_XMII_CTRL_1,
+};
+
+enum ksz_masks {
+ PORT_802_1P_REMAPPING,
+ SW_TAIL_TAG_ENABLE,
+ MIB_COUNTER_OVERFLOW,
+ MIB_COUNTER_VALID,
+ VLAN_TABLE_FID,
+ VLAN_TABLE_MEMBERSHIP,
+ VLAN_TABLE_VALID,
+ STATIC_MAC_TABLE_VALID,
+ STATIC_MAC_TABLE_USE_FID,
+ STATIC_MAC_TABLE_FID,
+ STATIC_MAC_TABLE_OVERRIDE,
+ STATIC_MAC_TABLE_FWD_PORTS,
+ DYNAMIC_MAC_TABLE_ENTRIES_H,
+ DYNAMIC_MAC_TABLE_MAC_EMPTY,
+ DYNAMIC_MAC_TABLE_NOT_READY,
+ DYNAMIC_MAC_TABLE_ENTRIES,
+ DYNAMIC_MAC_TABLE_FID,
+ DYNAMIC_MAC_TABLE_SRC_PORT,
+ DYNAMIC_MAC_TABLE_TIMESTAMP,
+ ALU_STAT_WRITE,
+ ALU_STAT_READ,
+ P_MII_TX_FLOW_CTRL,
+ P_MII_RX_FLOW_CTRL,
+};
+
+enum ksz_shifts {
+ VLAN_TABLE_MEMBERSHIP_S,
+ VLAN_TABLE,
+ STATIC_MAC_FWD_PORTS,
+ STATIC_MAC_FID,
+ DYNAMIC_MAC_ENTRIES_H,
+ DYNAMIC_MAC_ENTRIES,
+ DYNAMIC_MAC_FID,
+ DYNAMIC_MAC_TIMESTAMP,
+ DYNAMIC_MAC_SRC_PORT,
+ ALU_STAT_INDEX,
+};
+
+enum ksz_xmii_ctrl0 {
+ P_MII_100MBIT,
+ P_MII_10MBIT,
+ P_MII_FULL_DUPLEX,
+ P_MII_HALF_DUPLEX,
+};
+
+enum ksz_xmii_ctrl1 {
+ P_RGMII_SEL,
+ P_RMII_SEL,
+ P_GMII_SEL,
+ P_MII_SEL,
+ P_GMII_1GBIT,
+ P_GMII_NOT_1GBIT,
+};
+
+struct alu_struct {
+ /* entry 1 */
+ u8 is_static:1;
+ u8 is_src_filter:1;
+ u8 is_dst_filter:1;
+ u8 prio_age:3;
+ u32 _reserv_0_1:23;
+ u8 mstp:3;
+ /* entry 2 */
+ u8 is_override:1;
+ u8 is_use_fid:1;
+ u32 _reserv_1_1:23;
+ u8 port_forward:7;
+ /* entry 3 & 4*/
+ u32 _reserv_2_1:9;
+ u8 fid:7;
+ u8 mac[ETH_ALEN];
+};
+
+struct ksz_dev_ops {
+ int (*setup)(struct dsa_switch *ds);
+ void (*teardown)(struct dsa_switch *ds);
+ u32 (*get_port_addr)(int port, int offset);
+ void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
+ void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
+ void (*port_cleanup)(struct ksz_device *dev, int port);
+ void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
+ int (*set_ageing_time)(struct ksz_device *dev, unsigned int msecs);
+ int (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+ int (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+ void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
+ u64 *cnt);
+ void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+ void (*r_mib_stat64)(struct ksz_device *dev, int port);
+ int (*vlan_filtering)(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack);
+ int (*vlan_add)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+ int (*vlan_del)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+ int (*mirror_add)(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+ void (*mirror_del)(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+ int (*fdb_add)(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+ int (*fdb_del)(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+ int (*fdb_dump)(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+ int (*mdb_add)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ int (*mdb_del)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ void (*get_caps)(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+ int (*change_mtu)(struct ksz_device *dev, int port, int mtu);
+ int (*max_mtu)(struct ksz_device *dev, int port);
+ void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
+ void (*port_init_cnt)(struct ksz_device *dev, int port);
+ void (*phylink_mac_config)(struct ksz_device *dev, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+ void (*phylink_mac_link_up)(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause);
+ void (*setup_rgmii_delay)(struct ksz_device *dev, int port);
+ void (*config_cpu_port)(struct dsa_switch *ds);
+ int (*enable_stp_addr)(struct ksz_device *dev);
+ int (*reset)(struct ksz_device *dev);
+ int (*init)(struct ksz_device *dev);
+ void (*exit)(struct ksz_device *dev);
+};
+
+struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
+int ksz_switch_register(struct ksz_device *dev);
+void ksz_switch_remove(struct ksz_device *dev);
+
+void ksz_init_mib_timer(struct ksz_device *dev);
+void ksz_r_mib_stats64(struct ksz_device *dev, int port);
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+bool ksz_get_gbit(struct ksz_device *dev, int port);
+phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit);
+extern const struct ksz_chip_data ksz_switch_chips[];
+
+/* Common register access functions */
+
+static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
+{
+ unsigned int value;
+ int ret = regmap_read(dev->regmap[0], reg, &value);
+
+ if (ret)
+ dev_err(dev->dev, "can't read 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ *val = value;
+ return ret;
+}
+
+static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
+{
+ unsigned int value;
+ int ret = regmap_read(dev->regmap[1], reg, &value);
+
+ if (ret)
+ dev_err(dev->dev, "can't read 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ *val = value;
+ return ret;
+}
+
+static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
+{
+ unsigned int value;
+ int ret = regmap_read(dev->regmap[2], reg, &value);
+
+ if (ret)
+ dev_err(dev->dev, "can't read 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ *val = value;
+ return ret;
+}
+
+static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
+{
+ u32 value[2];
+ int ret;
+
+ ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
+ if (ret)
+ dev_err(dev->dev, "can't read 64bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+ else
+ *val = (u64)value[0] << 32 | value[1];
+
+ return ret;
+}
+
+static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+ int ret;
+
+ ret = regmap_write(dev->regmap[0], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
+}
+
+static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
+{
+ int ret;
+
+ ret = regmap_write(dev->regmap[1], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
+}
+
+static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
+{
+ int ret;
+
+ ret = regmap_write(dev->regmap[2], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
+}
+
+static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
+{
+ u32 val[2];
+
+ /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */
+ value = swab64(value);
+ val[0] = swab32(value & 0xffffffffULL);
+ val[1] = swab32(value >> 32ULL);
+
+ return regmap_bulk_write(dev->regmap[2], reg, val, 2);
+}
+
+static inline int ksz_pread8(struct ksz_device *dev, int port, int offset,
+ u8 *data)
+{
+ return ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline int ksz_pread16(struct ksz_device *dev, int port, int offset,
+ u16 *data)
+{
+ return ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline int ksz_pread32(struct ksz_device *dev, int port, int offset,
+ u32 *data)
+{
+ return ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline int ksz_pwrite8(struct ksz_device *dev, int port, int offset,
+ u8 data)
+{
+ return ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline int ksz_pwrite16(struct ksz_device *dev, int port, int offset,
+ u16 data)
+{
+ return ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
+}
+
+static inline int ksz_pwrite32(struct ksz_device *dev, int port, int offset,
+ u32 data)
+{
+ return ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
+}
+
+static inline void ksz_prmw8(struct ksz_device *dev, int port, int offset,
+ u8 mask, u8 val)
+{
+ regmap_update_bits(dev->regmap[0],
+ dev->dev_ops->get_port_addr(port, offset),
+ mask, val);
+}
+
+static inline void ksz_regmap_lock(void *__mtx)
+{
+ struct mutex *mtx = __mtx;
+ mutex_lock(mtx);
+}
+
+static inline void ksz_regmap_unlock(void *__mtx)
+{
+ struct mutex *mtx = __mtx;
+ mutex_unlock(mtx);
+}
+
+static inline bool ksz_is_ksz87xx(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8795_CHIP_ID ||
+ dev->chip_id == KSZ8794_CHIP_ID ||
+ dev->chip_id == KSZ8765_CHIP_ID;
+}
+
+static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8830_CHIP_ID;
+}
+
+static inline int is_lan937x(struct ksz_device *dev)
+{
+ return dev->chip_id == LAN9370_CHIP_ID ||
+ dev->chip_id == LAN9371_CHIP_ID ||
+ dev->chip_id == LAN9372_CHIP_ID ||
+ dev->chip_id == LAN9373_CHIP_ID ||
+ dev->chip_id == LAN9374_CHIP_ID;
+}
+
+/* STP State Defines */
+#define PORT_TX_ENABLE BIT(2)
+#define PORT_RX_ENABLE BIT(1)
+#define PORT_LEARN_DISABLE BIT(0)
+
+/* Switch ID Defines */
+#define REG_CHIP_ID0 0x00
+
+#define SW_FAMILY_ID_M GENMASK(15, 8)
+#define KSZ87_FAMILY_ID 0x87
+#define KSZ88_FAMILY_ID 0x88
+
+#define KSZ8_PORT_STATUS_0 0x08
+#define KSZ8_PORT_FIBER_MODE BIT(7)
+
+#define SW_CHIP_ID_M GENMASK(7, 4)
+#define KSZ87_CHIP_ID_94 0x6
+#define KSZ87_CHIP_ID_95 0x9
+#define KSZ88_CHIP_ID_63 0x3
+
+#define SW_REV_ID_M GENMASK(7, 4)
+
+/* KSZ9893, KSZ9563, KSZ8563 specific register */
+#define REG_CHIP_ID4 0x0f
+#define SKU_ID_KSZ8563 0x3c
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROT_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+#define BROADCAST_STORM_RATE_HI 0x07
+#define BROADCAST_STORM_RATE_LO 0xFF
+#define BROADCAST_STORM_RATE 0x07FF
+
+#define MULTICAST_STORM_DISABLE BIT(6)
+
+#define SW_START 0x01
+
+/* xMII configuration */
+#define P_MII_DUPLEX_M BIT(6)
+#define P_MII_100MBIT_M BIT(4)
+
+#define P_GMII_1GBIT_M BIT(6)
+#define P_RGMII_ID_IG_ENABLE BIT(4)
+#define P_RGMII_ID_EG_ENABLE BIT(3)
+#define P_MII_MAC_MODE BIT(2)
+#define P_MII_SEL_M 0x3
+
+/* Interrupt */
+#define REG_SW_PORT_INT_STATUS__1 0x001B
+#define REG_SW_PORT_INT_MASK__1 0x001F
+
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_SRC_PHY_INT 1
+
+/* Regmap tables generation */
+#define KSZ_SPI_OP_RD 3
+#define KSZ_SPI_OP_WR 2
+
+#define swabnot_used(x) 0
+
+#define KSZ_SPI_OP_FLAG_MASK(opcode, swp, regbits, regpad) \
+ swab##swp((opcode) << ((regbits) + (regpad)))
+
+#define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \
+ { \
+ .name = #width, \
+ .val_bits = (width), \
+ .reg_stride = 1, \
+ .reg_bits = (regbits) + (regalign), \
+ .pad_bits = (regpad), \
+ .max_register = BIT(regbits) - 1, \
+ .cache_type = REGCACHE_NONE, \
+ .read_flag_mask = \
+ KSZ_SPI_OP_FLAG_MASK(KSZ_SPI_OP_RD, swp, \
+ regbits, regpad), \
+ .write_flag_mask = \
+ KSZ_SPI_OP_FLAG_MASK(KSZ_SPI_OP_WR, swp, \
+ regbits, regpad), \
+ .lock = ksz_regmap_lock, \
+ .unlock = ksz_regmap_unlock, \
+ .reg_format_endian = REGMAP_ENDIAN_BIG, \
+ .val_format_endian = REGMAP_ENDIAN_BIG \
+ }
+
+#define KSZ_REGMAP_TABLE(ksz, swp, regbits, regpad, regalign) \
+ static const struct regmap_config ksz##_regmap_config[] = { \
+ KSZ_REGMAP_ENTRY(8, swp, (regbits), (regpad), (regalign)), \
+ KSZ_REGMAP_ENTRY(16, swp, (regbits), (regpad), (regalign)), \
+ KSZ_REGMAP_ENTRY(32, swp, (regbits), (regpad), (regalign)), \
+ }
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
new file mode 100644
index 000000000..1b6ab891b
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Microchip ksz series register access through SPI
+ *
+ * Copyright (C) 2017 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "ksz_common.h"
+
+#define KSZ8795_SPI_ADDR_SHIFT 12
+#define KSZ8795_SPI_ADDR_ALIGN 3
+#define KSZ8795_SPI_TURNAROUND_SHIFT 1
+
+#define KSZ8863_SPI_ADDR_SHIFT 8
+#define KSZ8863_SPI_ADDR_ALIGN 8
+#define KSZ8863_SPI_TURNAROUND_SHIFT 0
+
+#define KSZ9477_SPI_ADDR_SHIFT 24
+#define KSZ9477_SPI_ADDR_ALIGN 3
+#define KSZ9477_SPI_TURNAROUND_SHIFT 5
+
+KSZ_REGMAP_TABLE(ksz8795, 16, KSZ8795_SPI_ADDR_SHIFT,
+ KSZ8795_SPI_TURNAROUND_SHIFT, KSZ8795_SPI_ADDR_ALIGN);
+
+KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
+ KSZ8863_SPI_TURNAROUND_SHIFT, KSZ8863_SPI_ADDR_ALIGN);
+
+KSZ_REGMAP_TABLE(ksz9477, 32, KSZ9477_SPI_ADDR_SHIFT,
+ KSZ9477_SPI_TURNAROUND_SHIFT, KSZ9477_SPI_ADDR_ALIGN);
+
+static int ksz_spi_probe(struct spi_device *spi)
+{
+ const struct regmap_config *regmap_config;
+ const struct ksz_chip_data *chip;
+ struct device *ddev = &spi->dev;
+ struct regmap_config rc;
+ struct ksz_device *dev;
+ int i, ret = 0;
+
+ dev = ksz_switch_alloc(&spi->dev, spi);
+ if (!dev)
+ return -ENOMEM;
+
+ chip = device_get_match_data(ddev);
+ if (!chip)
+ return -EINVAL;
+
+ if (chip->chip_id == KSZ8830_CHIP_ID)
+ regmap_config = ksz8863_regmap_config;
+ else if (chip->chip_id == KSZ8795_CHIP_ID ||
+ chip->chip_id == KSZ8794_CHIP_ID ||
+ chip->chip_id == KSZ8765_CHIP_ID)
+ regmap_config = ksz8795_regmap_config;
+ else
+ regmap_config = ksz9477_regmap_config;
+
+ for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
+ rc = regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ rc.wr_table = chip->wr_table;
+ rc.rd_table = chip->rd_table;
+ dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
+
+ if (IS_ERR(dev->regmap[i])) {
+ ret = PTR_ERR(dev->regmap[i]);
+ dev_err(&spi->dev,
+ "Failed to initialize regmap%i: %d\n",
+ regmap_config[i].val_bits, ret);
+ return ret;
+ }
+ }
+
+ if (spi->dev.platform_data)
+ dev->pdata = spi->dev.platform_data;
+
+ /* setup spi */
+ spi->mode = SPI_MODE_3;
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
+ dev->irq = spi->irq;
+
+ ret = ksz_switch_register(dev);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, dev);
+
+ return 0;
+}
+
+static void ksz_spi_remove(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ ksz_switch_remove(dev);
+}
+
+static void ksz_spi_shutdown(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (!dev)
+ return;
+
+ if (dev->dev_ops->reset)
+ dev->dev_ops->reset(dev);
+
+ dsa_switch_shutdown(dev->ds);
+
+ spi_set_drvdata(spi, NULL);
+}
+
+static const struct of_device_id ksz_dt_ids[] = {
+ {
+ .compatible = "microchip,ksz8765",
+ .data = &ksz_switch_chips[KSZ8765]
+ },
+ {
+ .compatible = "microchip,ksz8794",
+ .data = &ksz_switch_chips[KSZ8794]
+ },
+ {
+ .compatible = "microchip,ksz8795",
+ .data = &ksz_switch_chips[KSZ8795]
+ },
+ {
+ .compatible = "microchip,ksz8863",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
+ {
+ .compatible = "microchip,ksz8873",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
+ {
+ .compatible = "microchip,ksz9477",
+ .data = &ksz_switch_chips[KSZ9477]
+ },
+ {
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
+ {
+ .compatible = "microchip,ksz9897",
+ .data = &ksz_switch_chips[KSZ9897]
+ },
+ {
+ .compatible = "microchip,ksz9893",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9563",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz8563",
+ .data = &ksz_switch_chips[KSZ8563]
+ },
+ {
+ .compatible = "microchip,ksz9567",
+ .data = &ksz_switch_chips[KSZ9567]
+ },
+ {
+ .compatible = "microchip,lan9370",
+ .data = &ksz_switch_chips[LAN9370]
+ },
+ {
+ .compatible = "microchip,lan9371",
+ .data = &ksz_switch_chips[LAN9371]
+ },
+ {
+ .compatible = "microchip,lan9372",
+ .data = &ksz_switch_chips[LAN9372]
+ },
+ {
+ .compatible = "microchip,lan9373",
+ .data = &ksz_switch_chips[LAN9373]
+ },
+ {
+ .compatible = "microchip,lan9374",
+ .data = &ksz_switch_chips[LAN9374]
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ksz_dt_ids);
+
+static const struct spi_device_id ksz_spi_ids[] = {
+ { "ksz8765" },
+ { "ksz8794" },
+ { "ksz8795" },
+ { "ksz8863" },
+ { "ksz8873" },
+ { "ksz9477" },
+ { "ksz9896" },
+ { "ksz9897" },
+ { "ksz9893" },
+ { "ksz9563" },
+ { "ksz8563" },
+ { "ksz9567" },
+ { "lan9370" },
+ { "lan9371" },
+ { "lan9372" },
+ { "lan9373" },
+ { "lan9374" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
+
+static struct spi_driver ksz_spi_driver = {
+ .driver = {
+ .name = "ksz-switch",
+ .owner = THIS_MODULE,
+ .of_match_table = ksz_dt_ids,
+ },
+ .id_table = ksz_spi_ids,
+ .probe = ksz_spi_probe,
+ .remove = ksz_spi_remove,
+ .shutdown = ksz_spi_shutdown,
+};
+
+module_spi_driver(ksz_spi_driver);
+
+MODULE_ALIAS("spi:ksz9477");
+MODULE_ALIAS("spi:ksz9896");
+MODULE_ALIAS("spi:ksz9897");
+MODULE_ALIAS("spi:ksz9893");
+MODULE_ALIAS("spi:ksz9563");
+MODULE_ALIAS("spi:ksz8563");
+MODULE_ALIAS("spi:ksz9567");
+MODULE_ALIAS("spi:lan937x");
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
+MODULE_DESCRIPTION("Microchip ksz Series Switch SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/lan937x.h b/drivers/net/dsa/microchip/lan937x.h
new file mode 100644
index 000000000..8e9e66d67
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip lan937x dev ops headers
+ * Copyright (C) 2019-2022 Microchip Technology Inc.
+ */
+
+#ifndef __LAN937X_CFG_H
+#define __LAN937X_CFG_H
+
+int lan937x_reset_switch(struct ksz_device *dev);
+int lan937x_setup(struct dsa_switch *ds);
+void lan937x_teardown(struct dsa_switch *ds);
+void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+void lan937x_config_cpu_port(struct dsa_switch *ds);
+int lan937x_switch_init(struct ksz_device *dev);
+void lan937x_switch_exit(struct ksz_device *dev);
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu);
+void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port);
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
+#endif
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
new file mode 100644
index 000000000..7e4f307a0
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Microchip LAN937X switch driver main logic
+ * Copyright (C) 2019-2022 Microchip Technology Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/math.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "lan937x_reg.h"
+#include "ksz_common.h"
+#include "lan937x.h"
+
+static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ return regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
+}
+
+static int lan937x_port_cfg(struct ksz_device *dev, int port, int offset,
+ u8 bits, bool set)
+{
+ return regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
+ bits, set ? bits : 0);
+}
+
+static int lan937x_enable_spi_indirect_access(struct ksz_device *dev)
+{
+ u16 data16;
+ int ret;
+
+ /* Enable Phy access through SPI */
+ ret = lan937x_cfg(dev, REG_GLOBAL_CTRL_0, SW_PHY_REG_BLOCK, false);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_read16(dev, REG_VPHY_SPECIAL_CTRL__2, &data16);
+ if (ret < 0)
+ return ret;
+
+ /* Allow SPI access */
+ data16 |= VPHY_SPI_INDIRECT_ENABLE;
+
+ return ksz_write16(dev, REG_VPHY_SPECIAL_CTRL__2, data16);
+}
+
+static int lan937x_vphy_ind_addr_wr(struct ksz_device *dev, int addr, int reg)
+{
+ u16 addr_base = REG_PORT_T1_PHY_CTRL_BASE;
+ u16 temp;
+
+ /* get register address based on the logical port */
+ temp = PORT_CTRL_ADDR(addr, (addr_base + (reg << 2)));
+
+ return ksz_write16(dev, REG_VPHY_IND_ADDR__2, temp);
+}
+
+static int lan937x_internal_phy_write(struct ksz_device *dev, int addr, int reg,
+ u16 val)
+{
+ unsigned int value;
+ int ret;
+
+ /* Check for internal phy port */
+ if (!dev->info->internal_phy[addr])
+ return -EOPNOTSUPP;
+
+ ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Write the data to be written to the VPHY reg */
+ ret = ksz_write16(dev, REG_VPHY_IND_DATA__2, val);
+ if (ret < 0)
+ return ret;
+
+ /* Write the Write En and Busy bit */
+ ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2,
+ (VPHY_IND_WRITE | VPHY_IND_BUSY));
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(dev->regmap[1], REG_VPHY_IND_CTRL__2,
+ value, !(value & VPHY_IND_BUSY), 10,
+ 1000);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to write phy register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan937x_internal_phy_read(struct ksz_device *dev, int addr, int reg,
+ u16 *val)
+{
+ unsigned int value;
+ int ret;
+
+ /* Check for internal phy port, return 0xffff for non-existent phy */
+ if (!dev->info->internal_phy[addr])
+ return 0xffff;
+
+ ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Write Read and Busy bit to start the transaction */
+ ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2, VPHY_IND_BUSY);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(dev->regmap[1], REG_VPHY_IND_CTRL__2,
+ value, !(value & VPHY_IND_BUSY), 10,
+ 1000);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to read phy register\n");
+ return ret;
+ }
+
+ /* Read the VPHY register which has the PHY data */
+ return ksz_read16(dev, REG_VPHY_IND_DATA__2, val);
+}
+
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
+{
+ return lan937x_internal_phy_read(dev, addr, reg, data);
+}
+
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
+{
+ return lan937x_internal_phy_write(dev, addr, reg, val);
+}
+
+int lan937x_reset_switch(struct ksz_device *dev)
+{
+ u32 data32;
+ int ret;
+
+ /* reset switch */
+ ret = lan937x_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
+ if (ret < 0)
+ return ret;
+
+ /* Enable Auto Aging */
+ ret = lan937x_cfg(dev, REG_SW_LUE_CTRL_1, SW_LINK_AUTO_AGING, true);
+ if (ret < 0)
+ return ret;
+
+ /* disable interrupts */
+ ret = ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_write32(dev, REG_SW_INT_STATUS__4, POR_READY_INT);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0xFF);
+ if (ret < 0)
+ return ret;
+
+ return ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+}
+
+void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ const u32 *masks = dev->info->masks;
+ const u16 *regs = dev->info->regs;
+ struct dsa_switch *ds = dev->ds;
+ u8 member;
+
+ /* enable tag tail for host port */
+ if (cpu_port)
+ lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_TAIL_TAG_ENABLE, true);
+
+ /* set back pressure for half duplex */
+ lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
+ true);
+
+ /* enable 802.1p priority */
+ lan937x_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
+
+ if (!dev->info->internal_phy[port])
+ lan937x_port_cfg(dev, port, regs[P_XMII_CTRL_0],
+ masks[P_MII_TX_FLOW_CTRL] |
+ masks[P_MII_RX_FLOW_CTRL],
+ true);
+
+ if (cpu_port)
+ member = dsa_user_ports(ds);
+ else
+ member = BIT(dsa_upstream_port(ds, port));
+
+ dev->dev_ops->cfg_port_member(dev, port, member);
+}
+
+void lan937x_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ if (dev->info->cpu_ports & (1 << dp->index)) {
+ dev->cpu_port = dp->index;
+
+ /* enable cpu port */
+ lan937x_port_setup(dev, dp->index, true);
+ }
+ }
+
+ dsa_switch_for_each_user_port(dp, ds) {
+ ksz_port_stp_state_set(ds, dp->index, BR_STATE_DISABLED);
+ }
+}
+
+int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
+{
+ struct dsa_switch *ds = dev->ds;
+ int ret;
+
+ new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(ds, port))
+ new_mtu += LAN937X_TAG_LEN;
+
+ if (new_mtu >= FR_MIN_SIZE)
+ ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
+ PORT_JUMBO_PACKET, true);
+ else
+ ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
+ PORT_JUMBO_PACKET, false);
+ if (ret < 0) {
+ dev_err(ds->dev, "failed to enable jumbo\n");
+ return ret;
+ }
+
+ /* Write the frame size in PORT_MAX_FR_SIZE register */
+ ksz_pwrite16(dev, port, PORT_MAX_FR_SIZE, new_mtu);
+
+ return 0;
+}
+
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+{
+ u32 secs = msecs / 1000;
+ u32 value;
+ int ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_19_8_M, secs);
+
+ return ksz_write16(dev, REG_SW_AGE_PERIOD__2, value);
+}
+
+static void lan937x_set_tune_adj(struct ksz_device *dev, int port,
+ u16 reg, u8 val)
+{
+ u16 data16;
+
+ ksz_pread16(dev, port, reg, &data16);
+
+ /* Update tune Adjust */
+ data16 |= FIELD_PREP(PORT_TUNE_ADJ, val);
+ ksz_pwrite16(dev, port, reg, data16);
+
+ /* write DLL reset to take effect */
+ data16 |= PORT_DLL_RESET;
+ ksz_pwrite16(dev, port, reg, data16);
+}
+
+static void lan937x_set_rgmii_tx_delay(struct ksz_device *dev, int port)
+{
+ u8 val;
+
+ /* Apply different codes based on the ports as per characterization
+ * results
+ */
+ val = (port == LAN937X_RGMII_1_PORT) ? RGMII_1_TX_DELAY_2NS :
+ RGMII_2_TX_DELAY_2NS;
+
+ lan937x_set_tune_adj(dev, port, REG_PORT_XMII_CTRL_5, val);
+}
+
+static void lan937x_set_rgmii_rx_delay(struct ksz_device *dev, int port)
+{
+ u8 val;
+
+ val = (port == LAN937X_RGMII_1_PORT) ? RGMII_1_RX_DELAY_2NS :
+ RGMII_2_RX_DELAY_2NS;
+
+ lan937x_set_tune_adj(dev, port, REG_PORT_XMII_CTRL_4, val);
+}
+
+void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_100FD;
+
+ if (dev->info->supports_rgmii[port]) {
+ /* MII/RMII/RGMII ports */
+ config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_100HD | MAC_10 | MAC_1000FD;
+ }
+}
+
+void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port)
+{
+ struct ksz_port *p = &dev->ports[port];
+
+ if (p->rgmii_tx_val) {
+ lan937x_set_rgmii_tx_delay(dev, port);
+ dev_info(dev->dev, "Applied rgmii tx delay for the port %d\n",
+ port);
+ }
+
+ if (p->rgmii_rx_val) {
+ lan937x_set_rgmii_rx_delay(dev, port);
+ dev_info(dev->dev, "Applied rgmii rx delay for the port %d\n",
+ port);
+ }
+}
+
+int lan937x_switch_init(struct ksz_device *dev)
+{
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
+ return 0;
+}
+
+int lan937x_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ /* enable Indirect Access from SPI to the VPHY registers */
+ ret = lan937x_enable_spi_indirect_access(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to enable spi indirect access");
+ return ret;
+ }
+
+ /* The VLAN aware is a global setting. Mixed vlan
+ * filterings are not supported.
+ */
+ ds->vlan_filtering_is_global = true;
+
+ /* Enable aggressive back off for half duplex & UNH mode */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_0,
+ (SW_PAUSE_UNH_MODE | SW_NEW_BACKOFF | SW_AGGR_BACKOFF),
+ true);
+
+ /* If NO_EXC_COLLISION_DROP bit is set, the switch will not drop
+ * packets when 16 or more collisions occur
+ */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true);
+
+ /* enable global MIB counter freeze function */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+
+ /* disable CLK125 & CLK25, 1: disable, 0: enable */
+ lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
+ (SW_CLK125_ENB | SW_CLK25_ENB), true);
+
+ return 0;
+}
+
+void lan937x_teardown(struct dsa_switch *ds)
+{
+
+}
+
+void lan937x_switch_exit(struct ksz_device *dev)
+{
+ lan937x_reset_switch(dev);
+}
+
+MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
+MODULE_DESCRIPTION("Microchip LAN937x Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
new file mode 100644
index 000000000..5bc16a4c4
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip LAN937X switch register definitions
+ * Copyright (C) 2019-2021 Microchip Technology Inc.
+ */
+#ifndef __LAN937X_REG_H
+#define __LAN937X_REG_H
+
+#define PORT_CTRL_ADDR(port, addr) ((addr) | (((port) + 1) << 12))
+
+/* 0 - Operation */
+#define REG_GLOBAL_CTRL_0 0x0007
+
+#define SW_PHY_REG_BLOCK BIT(7)
+#define SW_FAST_MODE BIT(3)
+#define SW_FAST_MODE_OVERRIDE BIT(2)
+
+#define REG_SW_INT_STATUS__4 0x0010
+#define REG_SW_INT_MASK__4 0x0014
+
+#define LUE_INT BIT(31)
+#define TRIG_TS_INT BIT(30)
+#define APB_TIMEOUT_INT BIT(29)
+#define OVER_TEMP_INT BIT(28)
+#define HSR_INT BIT(27)
+#define PIO_INT BIT(26)
+#define POR_READY_INT BIT(25)
+
+#define SWITCH_INT_MASK \
+ (LUE_INT | TRIG_TS_INT | APB_TIMEOUT_INT | OVER_TEMP_INT | HSR_INT | \
+ PIO_INT | POR_READY_INT)
+
+#define REG_SW_PORT_INT_STATUS__4 0x0018
+#define REG_SW_PORT_INT_MASK__4 0x001C
+
+/* 1 - Global */
+#define REG_SW_GLOBAL_OUTPUT_CTRL__1 0x0103
+#define SW_CLK125_ENB BIT(1)
+#define SW_CLK25_ENB BIT(0)
+
+/* 3 - Operation Control */
+#define REG_SW_OPERATION 0x0300
+
+#define SW_DOUBLE_TAG BIT(7)
+#define SW_OVER_TEMP_ENABLE BIT(2)
+#define SW_RESET BIT(1)
+
+#define REG_SW_LUE_CTRL_0 0x0310
+
+#define SW_VLAN_ENABLE BIT(7)
+#define SW_DROP_INVALID_VID BIT(6)
+#define SW_AGE_CNT_M 0x7
+#define SW_AGE_CNT_S 3
+#define SW_RESV_MCAST_ENABLE BIT(2)
+
+#define REG_SW_LUE_CTRL_1 0x0311
+
+#define UNICAST_LEARN_DISABLE BIT(7)
+#define SW_FLUSH_STP_TABLE BIT(5)
+#define SW_FLUSH_MSTP_TABLE BIT(4)
+#define SW_SRC_ADDR_FILTER BIT(3)
+#define SW_AGING_ENABLE BIT(2)
+#define SW_FAST_AGING BIT(1)
+#define SW_LINK_AUTO_AGING BIT(0)
+
+#define REG_SW_AGE_PERIOD__1 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
+
+#define REG_SW_AGE_PERIOD__2 0x0320
+#define SW_AGE_PERIOD_19_8_M GENMASK(19, 8)
+
+#define REG_SW_MAC_CTRL_0 0x0330
+#define SW_NEW_BACKOFF BIT(7)
+#define SW_PAUSE_UNH_MODE BIT(1)
+#define SW_AGGR_BACKOFF BIT(0)
+
+#define REG_SW_MAC_CTRL_1 0x0331
+#define SW_SHORT_IFG BIT(7)
+#define MULTICAST_STORM_DISABLE BIT(6)
+#define SW_BACK_PRESSURE BIT(5)
+#define FAIR_FLOW_CTRL BIT(4)
+#define NO_EXC_COLLISION_DROP BIT(3)
+#define SW_LEGAL_PACKET_DISABLE BIT(1)
+#define SW_PASS_SHORT_FRAME BIT(0)
+
+#define REG_SW_MAC_CTRL_6 0x0336
+#define SW_MIB_COUNTER_FLUSH BIT(7)
+#define SW_MIB_COUNTER_FREEZE BIT(6)
+
+/* 4 - LUE */
+#define REG_SW_ALU_STAT_CTRL__4 0x041C
+
+#define REG_SW_ALU_VAL_B 0x0424
+#define ALU_V_OVERRIDE BIT(31)
+#define ALU_V_USE_FID BIT(30)
+#define ALU_V_PORT_MAP 0xFF
+
+/* 7 - VPhy */
+#define REG_VPHY_IND_ADDR__2 0x075C
+#define REG_VPHY_IND_DATA__2 0x0760
+
+#define REG_VPHY_IND_CTRL__2 0x0768
+
+#define VPHY_IND_WRITE BIT(1)
+#define VPHY_IND_BUSY BIT(0)
+
+#define REG_VPHY_SPECIAL_CTRL__2 0x077C
+#define VPHY_SMI_INDIRECT_ENABLE BIT(15)
+#define VPHY_SW_LOOPBACK BIT(14)
+#define VPHY_MDIO_INTERNAL_ENABLE BIT(13)
+#define VPHY_SPI_INDIRECT_ENABLE BIT(12)
+#define VPHY_PORT_MODE_M 0x3
+#define VPHY_PORT_MODE_S 8
+#define VPHY_MODE_RGMII 0
+#define VPHY_MODE_MII_PHY 1
+#define VPHY_MODE_SGMII 2
+#define VPHY_MODE_RMII_PHY 3
+#define VPHY_SW_COLLISION_TEST BIT(7)
+#define VPHY_SPEED_DUPLEX_STAT_M 0x7
+#define VPHY_SPEED_DUPLEX_STAT_S 2
+#define VPHY_SPEED_1000 BIT(4)
+#define VPHY_SPEED_100 BIT(3)
+#define VPHY_FULL_DUPLEX BIT(2)
+
+/* Port Registers */
+
+/* 0 - Operation */
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_TAS_INT BIT(5)
+#define PORT_QCI_INT BIT(4)
+#define PORT_SGMII_INT BIT(3)
+#define PORT_PTP_INT BIT(2)
+#define PORT_PHY_INT BIT(1)
+#define PORT_ACL_INT BIT(0)
+
+#define PORT_SRC_PHY_INT 1
+
+#define REG_PORT_CTRL_0 0x0020
+
+#define PORT_MAC_LOOPBACK BIT(7)
+#define PORT_MAC_REMOTE_LOOPBACK BIT(6)
+#define PORT_K2L_INSERT_ENABLE BIT(5)
+#define PORT_K2L_DEBUG_ENABLE BIT(4)
+#define PORT_TAIL_TAG_ENABLE BIT(2)
+#define PORT_QUEUE_SPLIT_ENABLE 0x3
+
+/* 1 - Phy */
+#define REG_PORT_T1_PHY_CTRL_BASE 0x0100
+
+/* 3 - xMII */
+#define PORT_SGMII_SEL BIT(7)
+#define PORT_GRXC_ENABLE BIT(0)
+
+#define PORT_MII_SEL_EDGE BIT(5)
+
+#define REG_PORT_XMII_CTRL_4 0x0304
+#define REG_PORT_XMII_CTRL_5 0x0306
+
+#define PORT_DLL_RESET BIT(15)
+#define PORT_TUNE_ADJ GENMASK(13, 7)
+
+/* 4 - MAC */
+#define REG_PORT_MAC_CTRL_0 0x0400
+#define PORT_CHECK_LENGTH BIT(2)
+#define PORT_BROADCAST_STORM BIT(1)
+#define PORT_JUMBO_PACKET BIT(0)
+
+#define REG_PORT_MAC_CTRL_1 0x0401
+#define PORT_BACK_PRESSURE BIT(3)
+#define PORT_PASS_ALL BIT(0)
+
+#define PORT_MAX_FR_SIZE 0x404
+#define FR_MIN_SIZE 1522
+
+/* 8 - Classification and Policing */
+#define REG_PORT_MRI_PRIO_CTRL 0x0801
+#define PORT_HIGHEST_PRIO BIT(7)
+#define PORT_OR_PRIO BIT(6)
+#define PORT_MAC_PRIO_ENABLE BIT(4)
+#define PORT_VLAN_PRIO_ENABLE BIT(3)
+#define PORT_802_1P_PRIO_ENABLE BIT(2)
+#define PORT_DIFFSERV_PRIO_ENABLE BIT(1)
+#define PORT_ACL_PRIO_ENABLE BIT(0)
+
+#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
+
+/* The port number as per the datasheet */
+#define RGMII_2_PORT_NUM 5
+#define RGMII_1_PORT_NUM 6
+
+#define LAN937X_RGMII_2_PORT (RGMII_2_PORT_NUM - 1)
+#define LAN937X_RGMII_1_PORT (RGMII_1_PORT_NUM - 1)
+
+#define RGMII_1_TX_DELAY_2NS 2
+#define RGMII_2_TX_DELAY_2NS 0
+#define RGMII_1_RX_DELAY_2NS 0x1B
+#define RGMII_2_RX_DELAY_2NS 0x14
+
+#define LAN937X_TAG_LEN 2
+
+#endif
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
new file mode 100644
index 000000000..b988c8a40
--- /dev/null
+++ b/drivers/net/dsa/mt7530.c
@@ -0,0 +1,3417 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Mediatek MT7530 DSA Switch driver
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ */
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/iopoll.h>
+#include <linux/mdio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phylink.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <net/dsa.h>
+
+#include "mt7530.h"
+
+static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mt753x_pcs, pcs);
+}
+
+/* String, offset, and register size in bytes if different from 4 bytes */
+static const struct mt7530_mib_desc mt7530_mib[] = {
+ MIB_DESC(1, 0x00, "TxDrop"),
+ MIB_DESC(1, 0x04, "TxCrcErr"),
+ MIB_DESC(1, 0x08, "TxUnicast"),
+ MIB_DESC(1, 0x0c, "TxMulticast"),
+ MIB_DESC(1, 0x10, "TxBroadcast"),
+ MIB_DESC(1, 0x14, "TxCollision"),
+ MIB_DESC(1, 0x18, "TxSingleCollision"),
+ MIB_DESC(1, 0x1c, "TxMultipleCollision"),
+ MIB_DESC(1, 0x20, "TxDeferred"),
+ MIB_DESC(1, 0x24, "TxLateCollision"),
+ MIB_DESC(1, 0x28, "TxExcessiveCollistion"),
+ MIB_DESC(1, 0x2c, "TxPause"),
+ MIB_DESC(1, 0x30, "TxPktSz64"),
+ MIB_DESC(1, 0x34, "TxPktSz65To127"),
+ MIB_DESC(1, 0x38, "TxPktSz128To255"),
+ MIB_DESC(1, 0x3c, "TxPktSz256To511"),
+ MIB_DESC(1, 0x40, "TxPktSz512To1023"),
+ MIB_DESC(1, 0x44, "Tx1024ToMax"),
+ MIB_DESC(2, 0x48, "TxBytes"),
+ MIB_DESC(1, 0x60, "RxDrop"),
+ MIB_DESC(1, 0x64, "RxFiltering"),
+ MIB_DESC(1, 0x68, "RxUnicast"),
+ MIB_DESC(1, 0x6c, "RxMulticast"),
+ MIB_DESC(1, 0x70, "RxBroadcast"),
+ MIB_DESC(1, 0x74, "RxAlignErr"),
+ MIB_DESC(1, 0x78, "RxCrcErr"),
+ MIB_DESC(1, 0x7c, "RxUnderSizeErr"),
+ MIB_DESC(1, 0x80, "RxFragErr"),
+ MIB_DESC(1, 0x84, "RxOverSzErr"),
+ MIB_DESC(1, 0x88, "RxJabberErr"),
+ MIB_DESC(1, 0x8c, "RxPause"),
+ MIB_DESC(1, 0x90, "RxPktSz64"),
+ MIB_DESC(1, 0x94, "RxPktSz65To127"),
+ MIB_DESC(1, 0x98, "RxPktSz128To255"),
+ MIB_DESC(1, 0x9c, "RxPktSz256To511"),
+ MIB_DESC(1, 0xa0, "RxPktSz512To1023"),
+ MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"),
+ MIB_DESC(2, 0xa8, "RxBytes"),
+ MIB_DESC(1, 0xb0, "RxCtrlDrop"),
+ MIB_DESC(1, 0xb4, "RxIngressDrop"),
+ MIB_DESC(1, 0xb8, "RxArlDrop"),
+};
+
+/* Since phy_device has not yet been created and
+ * phy_{read,write}_mmd_indirect is not available, we provide our own
+ * core_{read,write}_mmd_indirect with core_{clear,write,set} wrappers
+ * to complete this function.
+ */
+static int
+core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad)
+{
+ struct mii_bus *bus = priv->bus;
+ int value, ret;
+
+ /* Write the desired MMD Devad */
+ ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+ if (ret < 0)
+ goto err;
+
+ /* Write the desired MMD register address */
+ ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+ if (ret < 0)
+ goto err;
+
+ /* Select the Function : DATA with no post increment */
+ ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+ if (ret < 0)
+ goto err;
+
+ /* Read the content of the MMD's selected register */
+ value = bus->read(bus, 0, MII_MMD_DATA);
+
+ return value;
+err:
+ dev_err(&bus->dev, "failed to read mmd register\n");
+
+ return ret;
+}
+
+static int
+core_write_mmd_indirect(struct mt7530_priv *priv, int prtad,
+ int devad, u32 data)
+{
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ /* Write the desired MMD Devad */
+ ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+ if (ret < 0)
+ goto err;
+
+ /* Write the desired MMD register address */
+ ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+ if (ret < 0)
+ goto err;
+
+ /* Select the Function : DATA with no post increment */
+ ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+ if (ret < 0)
+ goto err;
+
+ /* Write the data into MMD's selected register */
+ ret = bus->write(bus, 0, MII_MMD_DATA, data);
+err:
+ if (ret < 0)
+ dev_err(&bus->dev,
+ "failed to write mmd register\n");
+ return ret;
+}
+
+static void
+core_write(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ struct mii_bus *bus = priv->bus;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
+
+ mutex_unlock(&bus->mdio_lock);
+}
+
+static void
+core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
+{
+ struct mii_bus *bus = priv->bus;
+ u32 val;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2);
+ val &= ~mask;
+ val |= set;
+ core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
+
+ mutex_unlock(&bus->mdio_lock);
+}
+
+static void
+core_set(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ core_rmw(priv, reg, 0, val);
+}
+
+static void
+core_clear(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ core_rmw(priv, reg, val, 0);
+}
+
+static int
+mt7530_mii_write(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 page, r, lo, hi;
+ int ret;
+
+ page = (reg >> 6) & 0x3ff;
+ r = (reg >> 2) & 0xf;
+ lo = val & 0xffff;
+ hi = val >> 16;
+
+ /* MT7530 uses 31 as the pseudo port */
+ ret = bus->write(bus, 0x1f, 0x1f, page);
+ if (ret < 0)
+ goto err;
+
+ ret = bus->write(bus, 0x1f, r, lo);
+ if (ret < 0)
+ goto err;
+
+ ret = bus->write(bus, 0x1f, 0x10, hi);
+err:
+ if (ret < 0)
+ dev_err(&bus->dev,
+ "failed to write mt7530 register\n");
+ return ret;
+}
+
+static u32
+mt7530_mii_read(struct mt7530_priv *priv, u32 reg)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 page, r, lo, hi;
+ int ret;
+
+ page = (reg >> 6) & 0x3ff;
+ r = (reg >> 2) & 0xf;
+
+ /* MT7530 uses 31 as the pseudo port */
+ ret = bus->write(bus, 0x1f, 0x1f, page);
+ if (ret < 0) {
+ dev_err(&bus->dev,
+ "failed to read mt7530 register\n");
+ return ret;
+ }
+
+ lo = bus->read(bus, 0x1f, r);
+ hi = bus->read(bus, 0x1f, 0x10);
+
+ return (hi << 16) | (lo & 0xffff);
+}
+
+static void
+mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ struct mii_bus *bus = priv->bus;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ mt7530_mii_write(priv, reg, val);
+
+ mutex_unlock(&bus->mdio_lock);
+}
+
+static u32
+_mt7530_unlocked_read(struct mt7530_dummy_poll *p)
+{
+ return mt7530_mii_read(p->priv, p->reg);
+}
+
+static u32
+_mt7530_read(struct mt7530_dummy_poll *p)
+{
+ struct mii_bus *bus = p->priv->bus;
+ u32 val;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ val = mt7530_mii_read(p->priv, p->reg);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return val;
+}
+
+static u32
+mt7530_read(struct mt7530_priv *priv, u32 reg)
+{
+ struct mt7530_dummy_poll p;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, reg);
+ return _mt7530_read(&p);
+}
+
+static void
+mt7530_rmw(struct mt7530_priv *priv, u32 reg,
+ u32 mask, u32 set)
+{
+ struct mii_bus *bus = priv->bus;
+ u32 val;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ val = mt7530_mii_read(priv, reg);
+ val &= ~mask;
+ val |= set;
+ mt7530_mii_write(priv, reg, val);
+
+ mutex_unlock(&bus->mdio_lock);
+}
+
+static void
+mt7530_set(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ mt7530_rmw(priv, reg, 0, val);
+}
+
+static void
+mt7530_clear(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ mt7530_rmw(priv, reg, val, 0);
+}
+
+static int
+mt7530_fdb_cmd(struct mt7530_priv *priv, enum mt7530_fdb_cmd cmd, u32 *rsp)
+{
+ u32 val;
+ int ret;
+ struct mt7530_dummy_poll p;
+
+ /* Set the command operating upon the MAC address entries */
+ val = ATC_BUSY | ATC_MAT(0) | cmd;
+ mt7530_write(priv, MT7530_ATC, val);
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_ATC);
+ ret = readx_poll_timeout(_mt7530_read, &p, val,
+ !(val & ATC_BUSY), 20, 20000);
+ if (ret < 0) {
+ dev_err(priv->dev, "reset timeout\n");
+ return ret;
+ }
+
+ /* Additional sanity for read command if the specified
+ * entry is invalid
+ */
+ val = mt7530_read(priv, MT7530_ATC);
+ if ((cmd == MT7530_FDB_READ) && (val & ATC_INVALID))
+ return -EINVAL;
+
+ if (rsp)
+ *rsp = val;
+
+ return 0;
+}
+
+static void
+mt7530_fdb_read(struct mt7530_priv *priv, struct mt7530_fdb *fdb)
+{
+ u32 reg[3];
+ int i;
+
+ /* Read from ARL table into an array */
+ for (i = 0; i < 3; i++) {
+ reg[i] = mt7530_read(priv, MT7530_TSRA1 + (i * 4));
+
+ dev_dbg(priv->dev, "%s(%d) reg[%d]=0x%x\n",
+ __func__, __LINE__, i, reg[i]);
+ }
+
+ fdb->vid = (reg[1] >> CVID) & CVID_MASK;
+ fdb->aging = (reg[2] >> AGE_TIMER) & AGE_TIMER_MASK;
+ fdb->port_mask = (reg[2] >> PORT_MAP) & PORT_MAP_MASK;
+ fdb->mac[0] = (reg[0] >> MAC_BYTE_0) & MAC_BYTE_MASK;
+ fdb->mac[1] = (reg[0] >> MAC_BYTE_1) & MAC_BYTE_MASK;
+ fdb->mac[2] = (reg[0] >> MAC_BYTE_2) & MAC_BYTE_MASK;
+ fdb->mac[3] = (reg[0] >> MAC_BYTE_3) & MAC_BYTE_MASK;
+ fdb->mac[4] = (reg[1] >> MAC_BYTE_4) & MAC_BYTE_MASK;
+ fdb->mac[5] = (reg[1] >> MAC_BYTE_5) & MAC_BYTE_MASK;
+ fdb->noarp = ((reg[2] >> ENT_STATUS) & ENT_STATUS_MASK) == STATIC_ENT;
+}
+
+static void
+mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
+ u8 port_mask, const u8 *mac,
+ u8 aging, u8 type)
+{
+ u32 reg[3] = { 0 };
+ int i;
+
+ reg[1] |= vid & CVID_MASK;
+ reg[1] |= ATA2_IVL;
+ reg[1] |= ATA2_FID(FID_BRIDGED);
+ reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER;
+ reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP;
+ /* STATIC_ENT indicate that entry is static wouldn't
+ * be aged out and STATIC_EMP specified as erasing an
+ * entry
+ */
+ reg[2] |= (type & ENT_STATUS_MASK) << ENT_STATUS;
+ reg[1] |= mac[5] << MAC_BYTE_5;
+ reg[1] |= mac[4] << MAC_BYTE_4;
+ reg[0] |= mac[3] << MAC_BYTE_3;
+ reg[0] |= mac[2] << MAC_BYTE_2;
+ reg[0] |= mac[1] << MAC_BYTE_1;
+ reg[0] |= mac[0] << MAC_BYTE_0;
+
+ /* Write array into the ARL table */
+ for (i = 0; i < 3; i++)
+ mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
+}
+
+/* Set up switch core clock for MT7530 */
+static void mt7530_pll_setup(struct mt7530_priv *priv)
+{
+ /* Disable core clock */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+
+ /* Disable PLL */
+ core_write(priv, CORE_GSWPLL_GRP1, 0);
+
+ /* Set core clock into 500Mhz */
+ core_write(priv, CORE_GSWPLL_GRP2,
+ RG_GSWPLL_POSDIV_500M(1) |
+ RG_GSWPLL_FBKDIV_500M(25));
+
+ /* Enable PLL */
+ core_write(priv, CORE_GSWPLL_GRP1,
+ RG_GSWPLL_EN_PRE |
+ RG_GSWPLL_POSDIV_200M(2) |
+ RG_GSWPLL_FBKDIV_200M(32));
+
+ udelay(20);
+
+ /* Enable core clock */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+}
+
+/* Setup port 6 interface mode and TRGMII TX circuit */
+static int
+mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 ncpo1, ssc_delta, trgint, xtal;
+
+ xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
+
+ if (xtal == HWTRAP_XTAL_20MHZ) {
+ dev_err(priv->dev,
+ "%s: MT7530 with a 20MHz XTAL is not supported!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ trgint = 0;
+ break;
+ case PHY_INTERFACE_MODE_TRGMII:
+ trgint = 1;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ssc_delta = 0x57;
+ else
+ ssc_delta = 0x87;
+ if (priv->id == ID_MT7621) {
+ /* PLL frequency: 125MHz: 1.0GBit */
+ if (xtal == HWTRAP_XTAL_40MHZ)
+ ncpo1 = 0x0640;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ncpo1 = 0x0a00;
+ } else { /* PLL frequency: 250MHz: 2.0Gbit */
+ if (xtal == HWTRAP_XTAL_40MHZ)
+ ncpo1 = 0x0c80;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ncpo1 = 0x1400;
+ }
+ break;
+ default:
+ dev_err(priv->dev, "xMII interface %d not supported\n",
+ interface);
+ return -EINVAL;
+ }
+
+ mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
+ P6_INTF_MODE(trgint));
+
+ if (trgint) {
+ /* Disable the MT7530 TRGMII clocks */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+
+ /* Setup the MT7530 TRGMII Tx Clock */
+ core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+ core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+ core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP4,
+ RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
+ RG_SYSPLL_BIAS_LPF_EN);
+ core_write(priv, CORE_PLL_GROUP2,
+ RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
+ RG_SYSPLL_POSDIV(1));
+ core_write(priv, CORE_PLL_GROUP7,
+ RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
+ RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+
+ /* Enable the MT7530 TRGMII clocks */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ }
+
+ return 0;
+}
+
+static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
+{
+ u32 val;
+
+ val = mt7530_read(priv, MT7531_TOP_SIG_SR);
+
+ return (val & PAD_DUAL_SGMII_EN) != 0;
+}
+
+static int
+mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
+{
+ return 0;
+}
+
+static void
+mt7531_pll_setup(struct mt7530_priv *priv)
+{
+ u32 top_sig;
+ u32 hwstrap;
+ u32 xtal;
+ u32 val;
+
+ if (mt7531_dual_sgmii_supported(priv))
+ return;
+
+ val = mt7530_read(priv, MT7531_CREV);
+ top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
+ hwstrap = mt7530_read(priv, MT7531_HWTRAP);
+ if ((val & CHIP_REV_M) > 0)
+ xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ :
+ HWTRAP_XTAL_FSEL_25MHZ;
+ else
+ xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK;
+
+ /* Step 1 : Disable MT7531 COREPLL */
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val &= ~EN_COREPLL;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+
+ /* Step 2: switch to XTAL output */
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val |= SW_CLKSW;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_EN;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+
+ /* Step 3: disable PLLGP and enable program PLLGP */
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val |= SW_PLLGP;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+
+ /* Step 4: program COREPLL output frequency to 500MHz */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_POSDIV_M;
+ val |= 2 << RG_COREPLL_POSDIV_S;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ usleep_range(25, 35);
+
+ switch (xtal) {
+ case HWTRAP_XTAL_FSEL_25MHZ:
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_M;
+ val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ break;
+ case HWTRAP_XTAL_FSEL_40MHZ:
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_M;
+ val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ break;
+ }
+
+ /* Set feedback divide ratio update signal to high */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val |= RG_COREPLL_SDM_PCW_CHG;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ /* Wait for at least 16 XTAL clocks */
+ usleep_range(10, 20);
+
+ /* Step 5: set feedback divide ratio update signal to low */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_CHG;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+
+ /* Enable 325M clock for SGMII */
+ mt7530_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
+
+ /* Enable 250SSC clock for RGMII */
+ mt7530_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
+
+ /* Step 6: Enable MT7531 PLL */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val |= RG_COREPLL_EN;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val |= EN_COREPLL;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+ usleep_range(25, 35);
+}
+
+static void
+mt7530_mib_reset(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_FLUSH);
+ mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
+}
+
+static int mt7530_phy_read(struct mt7530_priv *priv, int port, int regnum)
+{
+ return mdiobus_read_nested(priv->bus, port, regnum);
+}
+
+static int mt7530_phy_write(struct mt7530_priv *priv, int port, int regnum,
+ u16 val)
+{
+ return mdiobus_write_nested(priv->bus, port, regnum, val);
+}
+
+static int
+mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
+ int regnum)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ u32 reg, val;
+ int ret;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad) | regnum;
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_READ | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad);
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ ret = val & MT7531_MDIO_RW_DATA_MASK;
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
+ int regnum, u32 data)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ u32 val, reg;
+ int ret;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad) | regnum;
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_WRITE | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad) | data;
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ int ret;
+ u32 val;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ val = MT7531_MDIO_CL22_READ | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_REG_ADDR(regnum);
+
+ mt7530_mii_write(priv, MT7531_PHY_IAC, val | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ ret = val & MT7531_MDIO_RW_DATA_MASK;
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum,
+ u16 data)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ int ret;
+ u32 reg;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
+ !(reg & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL22_WRITE | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_REG_ADDR(regnum) | data;
+
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
+ !(reg & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_phy_read(struct mt7530_priv *priv, int port, int regnum)
+{
+ int devad;
+ int ret;
+
+ if (regnum & MII_ADDR_C45) {
+ devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ ret = mt7531_ind_c45_phy_read(priv, port, devad,
+ regnum & MII_REGADDR_C45_MASK);
+ } else {
+ ret = mt7531_ind_c22_phy_read(priv, port, regnum);
+ }
+
+ return ret;
+}
+
+static int
+mt7531_ind_phy_write(struct mt7530_priv *priv, int port, int regnum,
+ u16 data)
+{
+ int devad;
+ int ret;
+
+ if (regnum & MII_ADDR_C45) {
+ devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ ret = mt7531_ind_c45_phy_write(priv, port, devad,
+ regnum & MII_REGADDR_C45_MASK,
+ data);
+ } else {
+ ret = mt7531_ind_c22_phy_write(priv, port, regnum, data);
+ }
+
+ return ret;
+}
+
+static int
+mt753x_phy_read(struct mii_bus *bus, int port, int regnum)
+{
+ struct mt7530_priv *priv = bus->priv;
+
+ return priv->info->phy_read(priv, port, regnum);
+}
+
+static int
+mt753x_phy_write(struct mii_bus *bus, int port, int regnum, u16 val)
+{
+ struct mt7530_priv *priv = bus->priv;
+
+ return priv->info->phy_write(priv, port, regnum, val);
+}
+
+static void
+mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++)
+ strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name,
+ ETH_GSTRING_LEN);
+}
+
+static void
+mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct mt7530_priv *priv = ds->priv;
+ const struct mt7530_mib_desc *mib;
+ u32 reg, i;
+ u64 hi;
+
+ for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) {
+ mib = &mt7530_mib[i];
+ reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset;
+
+ data[i] = mt7530_read(priv, reg);
+ if (mib->size == 2) {
+ hi = mt7530_read(priv, reg + 4);
+ data[i] |= hi << 32;
+ }
+ }
+}
+
+static int
+mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(mt7530_mib);
+}
+
+static int
+mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct mt7530_priv *priv = ds->priv;
+ unsigned int secs = msecs / 1000;
+ unsigned int tmp_age_count;
+ unsigned int error = -1;
+ unsigned int age_count;
+ unsigned int age_unit;
+
+ /* Applied timer is (AGE_CNT + 1) * (AGE_UNIT + 1) seconds */
+ if (secs < 1 || secs > (AGE_CNT_MAX + 1) * (AGE_UNIT_MAX + 1))
+ return -ERANGE;
+
+ /* iterate through all possible age_count to find the closest pair */
+ for (tmp_age_count = 0; tmp_age_count <= AGE_CNT_MAX; ++tmp_age_count) {
+ unsigned int tmp_age_unit = secs / (tmp_age_count + 1) - 1;
+
+ if (tmp_age_unit <= AGE_UNIT_MAX) {
+ unsigned int tmp_error = secs -
+ (tmp_age_count + 1) * (tmp_age_unit + 1);
+
+ /* found a closer pair */
+ if (error > tmp_error) {
+ error = tmp_error;
+ age_count = tmp_age_count;
+ age_unit = tmp_age_unit;
+ }
+
+ /* found the exact match, so break the loop */
+ if (!error)
+ break;
+ }
+ }
+
+ mt7530_write(priv, MT7530_AAC, AGE_CNT(age_count) | AGE_UNIT(age_unit));
+
+ return 0;
+}
+
+static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u8 tx_delay = 0;
+ int val;
+
+ mutex_lock(&priv->reg_mutex);
+
+ val = mt7530_read(priv, MT7530_MHWTRAP);
+
+ val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS;
+ val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL;
+
+ switch (priv->p5_intf_sel) {
+ case P5_INTF_SEL_PHY_P0:
+ /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */
+ val |= MHWTRAP_PHY0_SEL;
+ fallthrough;
+ case P5_INTF_SEL_PHY_P4:
+ /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */
+ val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS;
+
+ /* Setup the MAC by default for the cpu port */
+ mt7530_write(priv, MT7530_PMCR_P(5), 0x56300);
+ break;
+ case P5_INTF_SEL_GMAC5:
+ /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */
+ val &= ~MHWTRAP_P5_DIS;
+ break;
+ case P5_DISABLED:
+ interface = PHY_INTERFACE_MODE_NA;
+ break;
+ default:
+ dev_err(ds->dev, "Unsupported p5_intf_sel %d\n",
+ priv->p5_intf_sel);
+ goto unlock_exit;
+ }
+
+ /* Setup RGMII settings */
+ if (phy_interface_mode_is_rgmii(interface)) {
+ val |= MHWTRAP_P5_RGMII_MODE;
+
+ /* P5 RGMII RX Clock Control: delay setting for 1000M */
+ mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN);
+
+ /* Don't set delay in DSA mode */
+ if (!dsa_is_dsa_port(priv->ds, 5) &&
+ (interface == PHY_INTERFACE_MODE_RGMII_TXID ||
+ interface == PHY_INTERFACE_MODE_RGMII_ID))
+ tx_delay = 4; /* n * 0.5 ns */
+
+ /* P5 RGMII TX Clock Control: delay x */
+ mt7530_write(priv, MT7530_P5RGMIITXCR,
+ CSR_RGMII_TXC_CFG(0x10 + tx_delay));
+
+ /* reduce P5 RGMII Tx driving, 8mA */
+ mt7530_write(priv, MT7530_IO_DRV_CR,
+ P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1));
+ }
+
+ mt7530_write(priv, MT7530_MHWTRAP, val);
+
+ dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n",
+ val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface));
+
+ priv->p5_interface = interface;
+
+unlock_exit:
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static void
+mt753x_trap_frames(struct mt7530_priv *priv)
+{
+ /* Trap BPDUs to the CPU port(s) */
+ mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
+ MT753X_BPDU_CPU_ONLY);
+
+ /* Trap 802.1X PAE frames to the CPU port(s) */
+ mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK,
+ MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY));
+
+ /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
+ mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK,
+ MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
+}
+
+static int
+mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int ret;
+
+ /* Setup max capability of CPU port at first */
+ if (priv->info->cpu_port_config) {
+ ret = priv->info->cpu_port_config(ds, port);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable Mediatek header mode on the cpu port */
+ mt7530_write(priv, MT7530_PVC_P(port),
+ PORT_SPEC_TAG);
+
+ /* Enable flooding on the CPU port */
+ mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
+ UNU_FFP(BIT(port)));
+
+ /* Set CPU port number */
+ if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
+ mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
+
+ /* CPU port gets connected to all user ports of
+ * the switch.
+ */
+ mt7530_write(priv, MT7530_PCR_P(port),
+ PCR_MATRIX(dsa_user_ports(priv->ds)));
+
+ /* Set to fallback mode for independent VLAN learning */
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_FALLBACK_MODE);
+
+ return 0;
+}
+
+static int
+mt7530_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct mt7530_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+
+ /* Allow the user port gets connected to the cpu port and also
+ * restore the port matrix if the port is the member of a certain
+ * bridge.
+ */
+ if (dsa_port_is_user(dp)) {
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ priv->ports[port].pm |= PCR_MATRIX(BIT(cpu_dp->index));
+ }
+ priv->ports[port].enable = true;
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
+ priv->ports[port].pm);
+ mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static void
+mt7530_port_disable(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+
+ /* Clear up all port matrix which could be restored in the next
+ * enablement for the port.
+ */
+ priv->ports[port].enable = false;
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
+ PCR_MATRIX_CLR);
+ mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct mii_bus *bus = priv->bus;
+ int length;
+ u32 val;
+
+ /* When a new MTU is set, DSA always set the CPU port's MTU to the
+ * largest MTU of the slave ports. Because the switch only has a global
+ * RX length register, only allowing CPU port here is enough.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ val = mt7530_mii_read(priv, MT7530_GMACCR);
+ val &= ~MAX_RX_PKT_LEN_MASK;
+
+ /* RX length also includes Ethernet header, MTK tag, and FCS length */
+ length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN;
+ if (length <= 1522) {
+ val |= MAX_RX_PKT_LEN_1522;
+ } else if (length <= 1536) {
+ val |= MAX_RX_PKT_LEN_1536;
+ } else if (length <= 1552) {
+ val |= MAX_RX_PKT_LEN_1552;
+ } else {
+ val &= ~MAX_RX_JUMBO_MASK;
+ val |= MAX_RX_JUMBO(DIV_ROUND_UP(length, 1024));
+ val |= MAX_RX_PKT_LEN_JUMBO;
+ }
+
+ mt7530_mii_write(priv, MT7530_GMACCR, val);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return 0;
+}
+
+static int
+mt7530_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return MT7530_MAX_MTU;
+}
+
+static void
+mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = MT7530_STP_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ stp_state = MT7530_STP_BLOCKING;
+ break;
+ case BR_STATE_LISTENING:
+ stp_state = MT7530_STP_LISTENING;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = MT7530_STP_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ stp_state = MT7530_STP_FORWARDING;
+ break;
+ }
+
+ mt7530_rmw(priv, MT7530_SSP_P(port), FID_PST_MASK(FID_BRIDGED),
+ FID_PST(FID_BRIDGED, stp_state));
+}
+
+static int
+mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (flags.mask & BR_LEARNING)
+ mt7530_rmw(priv, MT7530_PSC_P(port), SA_DIS,
+ flags.val & BR_LEARNING ? 0 : SA_DIS);
+
+ if (flags.mask & BR_FLOOD)
+ mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)),
+ flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0);
+
+ if (flags.mask & BR_MCAST_FLOOD)
+ mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)),
+ flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0);
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)),
+ flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0);
+
+ return 0;
+}
+
+static int
+mt7530_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ u32 port_bitmap = BIT(cpu_dp->index);
+ struct mt7530_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ int other_port = other_dp->index;
+
+ if (dp == other_dp)
+ continue;
+
+ /* Add this port to the port matrix of the other ports in the
+ * same bridge. If the port is disabled, port matrix is kept
+ * and not being setup until the port becomes enabled.
+ */
+ if (!dsa_port_offloads_bridge(other_dp, &bridge))
+ continue;
+
+ if (priv->ports[other_port].enable)
+ mt7530_set(priv, MT7530_PCR_P(other_port),
+ PCR_MATRIX(BIT(port)));
+ priv->ports[other_port].pm |= PCR_MATRIX(BIT(port));
+
+ port_bitmap |= BIT(other_port);
+ }
+
+ /* Add the all other ports to this port matrix. */
+ if (priv->ports[port].enable)
+ mt7530_rmw(priv, MT7530_PCR_P(port),
+ PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap));
+ priv->ports[port].pm |= PCR_MATRIX(port_bitmap);
+
+ /* Set to fallback mode for independent VLAN learning */
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_FALLBACK_MODE);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static void
+mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+ bool all_user_ports_removed = true;
+ int i;
+
+ /* This is called after .port_bridge_leave when leaving a VLAN-aware
+ * bridge. Don't set standalone ports to fallback mode.
+ */
+ if (dsa_port_bridge_dev_get(dsa_to_port(ds, port)))
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_FALLBACK_MODE);
+
+ mt7530_rmw(priv, MT7530_PVC_P(port),
+ VLAN_ATTR_MASK | PVC_EG_TAG_MASK | ACC_FRM_MASK,
+ VLAN_ATTR(MT7530_VLAN_TRANSPARENT) |
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT) |
+ MT7530_VLAN_ACC_ALL);
+
+ /* Set PVID to 0 */
+ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
+ G0_PORT_VID_DEF);
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ if (dsa_is_user_port(ds, i) &&
+ dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
+ all_user_ports_removed = false;
+ break;
+ }
+ }
+
+ /* CPU port also does the same thing until all user ports belonging to
+ * the CPU port get out of VLAN filtering mode.
+ */
+ if (all_user_ports_removed) {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ mt7530_write(priv, MT7530_PCR_P(cpu_dp->index),
+ PCR_MATRIX(dsa_user_ports(priv->ds)));
+ mt7530_write(priv, MT7530_PVC_P(cpu_dp->index), PORT_SPEC_TAG
+ | PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+}
+
+static void
+mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* Trapped into security mode allows packet forwarding through VLAN
+ * table lookup.
+ */
+ if (dsa_is_user_port(ds, port)) {
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_SECURITY_MODE);
+ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
+ G0_PORT_VID(priv->ports[port].pvid));
+
+ /* Only accept tagged frames if PVID is not set */
+ if (!priv->ports[port].pvid)
+ mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
+ MT7530_VLAN_ACC_TAGGED);
+
+ /* Set the port as a user port which is to be able to recognize
+ * VID from incoming packets before fetching entry within the
+ * VLAN table.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port),
+ VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER) |
+ PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ } else {
+ /* Also set CPU ports to the "user" VLAN port attribute, to
+ * allow VLAN classification, but keep the EG_TAG attribute as
+ * "consistent" (i.o.w. don't change its value) for packets
+ * received by the switch from the CPU, so that tagged packets
+ * are forwarded to user ports as tagged, and untagged as
+ * untagged.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER));
+ }
+}
+
+static void
+mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ struct mt7530_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ int other_port = other_dp->index;
+
+ if (dp == other_dp)
+ continue;
+
+ /* Remove this port from the port matrix of the other ports
+ * in the same bridge. If the port is disabled, port matrix
+ * is kept and not being setup until the port becomes enabled.
+ */
+ if (!dsa_port_offloads_bridge(other_dp, &bridge))
+ continue;
+
+ if (priv->ports[other_port].enable)
+ mt7530_clear(priv, MT7530_PCR_P(other_port),
+ PCR_MATRIX(BIT(port)));
+ priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port));
+ }
+
+ /* Set the cpu port to be the only one in the port matrix of
+ * this port.
+ */
+ if (priv->ports[port].enable)
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
+ PCR_MATRIX(BIT(cpu_dp->index)));
+ priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index));
+
+ /* When a port is removed from the bridge, the port would be set up
+ * back to the default as is at initial boot which is a VLAN-unaware
+ * port.
+ */
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_MATRIX_MODE);
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+mt7530_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int ret;
+ u8 port_mask = BIT(port);
+
+ mutex_lock(&priv->reg_mutex);
+ mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT);
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int
+mt7530_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int ret;
+ u8 port_mask = BIT(port);
+
+ mutex_lock(&priv->reg_mutex);
+ mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_EMP);
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int
+mt7530_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct mt7530_fdb _fdb = { 0 };
+ int cnt = MT7530_NUM_FDB_RECORDS;
+ int ret = 0;
+ u32 rsp = 0;
+
+ mutex_lock(&priv->reg_mutex);
+
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_START, &rsp);
+ if (ret < 0)
+ goto err;
+
+ do {
+ if (rsp & ATC_SRCH_HIT) {
+ mt7530_fdb_read(priv, &_fdb);
+ if (_fdb.port_mask & BIT(port)) {
+ ret = cb(_fdb.mac, _fdb.vid, _fdb.noarp,
+ data);
+ if (ret < 0)
+ break;
+ }
+ }
+ } while (--cnt &&
+ !(rsp & ATC_SRCH_END) &&
+ !mt7530_fdb_cmd(priv, MT7530_FDB_NEXT, &rsp));
+err:
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static int
+mt7530_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct mt7530_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+ u8 port_mask = 0;
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP);
+ if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL))
+ port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP)
+ & PORT_MAP_MASK;
+
+ port_mask |= BIT(port);
+ mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT);
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int
+mt7530_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct mt7530_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+ u8 port_mask = 0;
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ mt7530_fdb_write(priv, vid, 0, addr, 0, STATIC_EMP);
+ if (!mt7530_fdb_cmd(priv, MT7530_FDB_READ, NULL))
+ port_mask = (mt7530_read(priv, MT7530_ATRD) >> PORT_MAP)
+ & PORT_MAP_MASK;
+
+ port_mask &= ~BIT(port);
+ mt7530_fdb_write(priv, vid, port_mask, addr, -1,
+ port_mask ? STATIC_ENT : STATIC_EMP);
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int
+mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
+{
+ struct mt7530_dummy_poll p;
+ u32 val;
+ int ret;
+
+ val = VTCR_BUSY | VTCR_FUNC(cmd) | vid;
+ mt7530_write(priv, MT7530_VTCR, val);
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR);
+ ret = readx_poll_timeout(_mt7530_read, &p, val,
+ !(val & VTCR_BUSY), 20, 20000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ return ret;
+ }
+
+ val = mt7530_read(priv, MT7530_VTCR);
+ if (val & VTCR_INVALID) {
+ dev_err(priv->dev, "read VTCR invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ if (vlan_filtering) {
+ /* The port is being kept as VLAN-unaware port when bridge is
+ * set up with vlan_filtering not being set, Otherwise, the
+ * port and the corresponding CPU port is required the setup
+ * for becoming a VLAN-aware port.
+ */
+ mt7530_port_set_vlan_aware(ds, port);
+ mt7530_port_set_vlan_aware(ds, cpu_dp->index);
+ } else {
+ mt7530_port_set_vlan_unaware(ds, port);
+ }
+
+ return 0;
+}
+
+static void
+mt7530_hw_vlan_add(struct mt7530_priv *priv,
+ struct mt7530_hw_vlan_entry *entry)
+{
+ struct dsa_port *dp = dsa_to_port(priv->ds, entry->port);
+ u8 new_members;
+ u32 val;
+
+ new_members = entry->old_members | BIT(entry->port);
+
+ /* Validate the entry with independent learning, create egress tag per
+ * VLAN and joining the port as one of the port members.
+ */
+ val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | FID(FID_BRIDGED) |
+ VLAN_VALID;
+ mt7530_write(priv, MT7530_VAWD1, val);
+
+ /* Decide whether adding tag or not for those outgoing packets from the
+ * port inside the VLAN.
+ * CPU port is always taken as a tagged port for serving more than one
+ * VLANs across and also being applied with egress type stack mode for
+ * that VLAN tags would be appended after hardware special tag used as
+ * DSA tag.
+ */
+ if (dsa_port_is_cpu(dp))
+ val = MT7530_VLAN_EGRESS_STACK;
+ else if (entry->untagged)
+ val = MT7530_VLAN_EGRESS_UNTAG;
+ else
+ val = MT7530_VLAN_EGRESS_TAG;
+ mt7530_rmw(priv, MT7530_VAWD2,
+ ETAG_CTRL_P_MASK(entry->port),
+ ETAG_CTRL_P(entry->port, val));
+}
+
+static void
+mt7530_hw_vlan_del(struct mt7530_priv *priv,
+ struct mt7530_hw_vlan_entry *entry)
+{
+ u8 new_members;
+ u32 val;
+
+ new_members = entry->old_members & ~BIT(entry->port);
+
+ val = mt7530_read(priv, MT7530_VAWD1);
+ if (!(val & VLAN_VALID)) {
+ dev_err(priv->dev,
+ "Cannot be deleted due to invalid entry\n");
+ return;
+ }
+
+ if (new_members) {
+ val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) |
+ VLAN_VALID;
+ mt7530_write(priv, MT7530_VAWD1, val);
+ } else {
+ mt7530_write(priv, MT7530_VAWD1, 0);
+ mt7530_write(priv, MT7530_VAWD2, 0);
+ }
+}
+
+static void
+mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid,
+ struct mt7530_hw_vlan_entry *entry,
+ mt7530_vlan_op vlan_op)
+{
+ u32 val;
+
+ /* Fetch entry */
+ mt7530_vlan_cmd(priv, MT7530_VTCR_RD_VID, vid);
+
+ val = mt7530_read(priv, MT7530_VAWD1);
+
+ entry->old_members = (val >> PORT_MEM_SHFT) & PORT_MEM_MASK;
+
+ /* Manipulate entry */
+ vlan_op(priv, entry);
+
+ /* Flush result to hardware */
+ mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid);
+}
+
+static int
+mt7530_setup_vlan0(struct mt7530_priv *priv)
+{
+ u32 val;
+
+ /* Validate the entry with independent learning, keep the original
+ * ingress tag attribute.
+ */
+ val = IVL_MAC | EG_CON | PORT_MEM(MT7530_ALL_MEMBERS) | FID(FID_BRIDGED) |
+ VLAN_VALID;
+ mt7530_write(priv, MT7530_VAWD1, val);
+
+ return mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, 0);
+}
+
+static int
+mt7530_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct mt7530_hw_vlan_entry new_entry;
+ struct mt7530_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+
+ mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
+ mt7530_hw_vlan_update(priv, vlan->vid, &new_entry, mt7530_hw_vlan_add);
+
+ if (pvid) {
+ priv->ports[port].pvid = vlan->vid;
+
+ /* Accept all frames if PVID is set */
+ mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
+ MT7530_VLAN_ACC_ALL);
+
+ /* Only configure PVID if VLAN filtering is enabled */
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ mt7530_rmw(priv, MT7530_PPBV1_P(port),
+ G0_PORT_VID_MASK,
+ G0_PORT_VID(vlan->vid));
+ } else if (vlan->vid && priv->ports[port].pvid == vlan->vid) {
+ /* This VLAN is overwritten without PVID, so unset it */
+ priv->ports[port].pvid = G0_PORT_VID_DEF;
+
+ /* Only accept tagged frames if the port is VLAN-aware */
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
+ MT7530_VLAN_ACC_TAGGED);
+
+ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
+ G0_PORT_VID_DEF);
+ }
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static int
+mt7530_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct mt7530_hw_vlan_entry target_entry;
+ struct mt7530_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+
+ mt7530_hw_vlan_entry_init(&target_entry, port, 0);
+ mt7530_hw_vlan_update(priv, vlan->vid, &target_entry,
+ mt7530_hw_vlan_del);
+
+ /* PVID is being restored to the default whenever the PVID port
+ * is being removed from the VLAN.
+ */
+ if (priv->ports[port].pvid == vlan->vid) {
+ priv->ports[port].pvid = G0_PORT_VID_DEF;
+
+ /* Only accept tagged frames if the port is VLAN-aware */
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
+ MT7530_VLAN_ACC_TAGGED);
+
+ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
+ G0_PORT_VID_DEF);
+ }
+
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static int mt753x_mirror_port_get(unsigned int id, u32 val)
+{
+ return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
+ MIRROR_PORT(val);
+}
+
+static int mt753x_mirror_port_set(unsigned int id, u32 val)
+{
+ return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
+ MIRROR_PORT(val);
+}
+
+static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int monitor_port;
+ u32 val;
+
+ /* Check for existent entry */
+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+ return -EEXIST;
+
+ val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
+
+ /* MT7530 only supports one monitor port */
+ monitor_port = mt753x_mirror_port_get(priv->id, val);
+ if (val & MT753X_MIRROR_EN(priv->id) &&
+ monitor_port != mirror->to_local_port)
+ return -EEXIST;
+
+ val |= MT753X_MIRROR_EN(priv->id);
+ val &= ~MT753X_MIRROR_MASK(priv->id);
+ val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port);
+ mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
+
+ val = mt7530_read(priv, MT7530_PCR_P(port));
+ if (ingress) {
+ val |= PORT_RX_MIR;
+ priv->mirror_rx |= BIT(port);
+ } else {
+ val |= PORT_TX_MIR;
+ priv->mirror_tx |= BIT(port);
+ }
+ mt7530_write(priv, MT7530_PCR_P(port), val);
+
+ return 0;
+}
+
+static void mt753x_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 val;
+
+ val = mt7530_read(priv, MT7530_PCR_P(port));
+ if (mirror->ingress) {
+ val &= ~PORT_RX_MIR;
+ priv->mirror_rx &= ~BIT(port);
+ } else {
+ val &= ~PORT_TX_MIR;
+ priv->mirror_tx &= ~BIT(port);
+ }
+ mt7530_write(priv, MT7530_PCR_P(port), val);
+
+ if (!priv->mirror_rx && !priv->mirror_tx) {
+ val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
+ val &= ~MT753X_MIRROR_EN(priv->id);
+ mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
+ }
+}
+
+static enum dsa_tag_protocol
+mtk_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_MTK;
+}
+
+#ifdef CONFIG_GPIOLIB
+static inline u32
+mt7530_gpio_to_bit(unsigned int offset)
+{
+ /* Map GPIO offset to register bit
+ * [ 2: 0] port 0 LED 0..2 as GPIO 0..2
+ * [ 6: 4] port 1 LED 0..2 as GPIO 3..5
+ * [10: 8] port 2 LED 0..2 as GPIO 6..8
+ * [14:12] port 3 LED 0..2 as GPIO 9..11
+ * [18:16] port 4 LED 0..2 as GPIO 12..14
+ */
+ return BIT(offset + offset / 3);
+}
+
+static int
+mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit);
+}
+
+static void
+mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ if (value)
+ mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
+ else
+ mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
+}
+
+static int
+mt7530_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ return (mt7530_read(priv, MT7530_LED_GPIO_DIR) & bit) ?
+ GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
+static int
+mt7530_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ mt7530_clear(priv, MT7530_LED_GPIO_OE, bit);
+ mt7530_clear(priv, MT7530_LED_GPIO_DIR, bit);
+
+ return 0;
+}
+
+static int
+mt7530_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct mt7530_priv *priv = gpiochip_get_data(gc);
+ u32 bit = mt7530_gpio_to_bit(offset);
+
+ mt7530_set(priv, MT7530_LED_GPIO_DIR, bit);
+
+ if (value)
+ mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
+ else
+ mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
+
+ mt7530_set(priv, MT7530_LED_GPIO_OE, bit);
+
+ return 0;
+}
+
+static int
+mt7530_setup_gpio(struct mt7530_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct gpio_chip *gc;
+
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
+
+ mt7530_write(priv, MT7530_LED_GPIO_OE, 0);
+ mt7530_write(priv, MT7530_LED_GPIO_DIR, 0);
+ mt7530_write(priv, MT7530_LED_IO_MODE, 0);
+
+ gc->label = "mt7530";
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->get_direction = mt7530_gpio_get_direction;
+ gc->direction_input = mt7530_gpio_direction_input;
+ gc->direction_output = mt7530_gpio_direction_output;
+ gc->get = mt7530_gpio_get;
+ gc->set = mt7530_gpio_set;
+ gc->base = -1;
+ gc->ngpio = 15;
+ gc->can_sleep = true;
+
+ return devm_gpiochip_add_data(dev, gc, priv);
+}
+#endif /* CONFIG_GPIOLIB */
+
+static irqreturn_t
+mt7530_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mt7530_priv *priv = dev_id;
+ bool handled = false;
+ u32 val;
+ int p;
+
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ val = mt7530_mii_read(priv, MT7530_SYS_INT_STS);
+ mt7530_mii_write(priv, MT7530_SYS_INT_STS, val);
+ mutex_unlock(&priv->bus->mdio_lock);
+
+ for (p = 0; p < MT7530_NUM_PHYS; p++) {
+ if (BIT(p) & val) {
+ unsigned int irq;
+
+ irq = irq_find_mapping(priv->irq_domain, p);
+ handle_nested_irq(irq);
+ handled = true;
+ }
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static void
+mt7530_irq_mask(struct irq_data *d)
+{
+ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
+
+ priv->irq_enable &= ~BIT(d->hwirq);
+}
+
+static void
+mt7530_irq_unmask(struct irq_data *d)
+{
+ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
+
+ priv->irq_enable |= BIT(d->hwirq);
+}
+
+static void
+mt7530_irq_bus_lock(struct irq_data *d)
+{
+ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
+
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+}
+
+static void
+mt7530_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
+
+ mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
+ mutex_unlock(&priv->bus->mdio_lock);
+}
+
+static struct irq_chip mt7530_irq_chip = {
+ .name = KBUILD_MODNAME,
+ .irq_mask = mt7530_irq_mask,
+ .irq_unmask = mt7530_irq_unmask,
+ .irq_bus_lock = mt7530_irq_bus_lock,
+ .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock,
+};
+
+static int
+mt7530_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, true);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mt7530_irq_domain_ops = {
+ .map = mt7530_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static void
+mt7530_setup_mdio_irq(struct mt7530_priv *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ int p;
+
+ for (p = 0; p < MT7530_NUM_PHYS; p++) {
+ if (BIT(p) & ds->phys_mii_mask) {
+ unsigned int irq;
+
+ irq = irq_create_mapping(priv->irq_domain, p);
+ ds->slave_mii_bus->irq[p] = irq;
+ }
+ }
+}
+
+static int
+mt7530_setup_irq(struct mt7530_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ if (!of_property_read_bool(np, "interrupt-controller")) {
+ dev_info(dev, "no interrupt support\n");
+ return 0;
+ }
+
+ priv->irq = of_irq_get(np, 0);
+ if (priv->irq <= 0) {
+ dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq);
+ return priv->irq ? : -EINVAL;
+ }
+
+ priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
+ &mt7530_irq_domain_ops, priv);
+ if (!priv->irq_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ /* This register must be set for MT7530 to properly fire interrupts */
+ if (priv->id != ID_MT7531)
+ mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
+
+ ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
+ IRQF_ONESHOT, KBUILD_MODNAME, priv);
+ if (ret) {
+ irq_domain_remove(priv->irq_domain);
+ dev_err(dev, "failed to request IRQ: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+mt7530_free_mdio_irq(struct mt7530_priv *priv)
+{
+ int p;
+
+ for (p = 0; p < MT7530_NUM_PHYS; p++) {
+ if (BIT(p) & priv->ds->phys_mii_mask) {
+ unsigned int irq;
+
+ irq = irq_find_mapping(priv->irq_domain, p);
+ irq_dispose_mapping(irq);
+ }
+ }
+}
+
+static void
+mt7530_free_irq_common(struct mt7530_priv *priv)
+{
+ free_irq(priv->irq, priv);
+ irq_domain_remove(priv->irq_domain);
+}
+
+static void
+mt7530_free_irq(struct mt7530_priv *priv)
+{
+ mt7530_free_mdio_irq(priv);
+ mt7530_free_irq_common(priv);
+}
+
+static int
+mt7530_setup_mdio(struct mt7530_priv *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ struct device *dev = priv->dev;
+ struct mii_bus *bus;
+ static int idx;
+ int ret;
+
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
+ return -ENOMEM;
+
+ ds->slave_mii_bus = bus;
+ bus->priv = priv;
+ bus->name = KBUILD_MODNAME "-mii";
+ snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
+ bus->read = mt753x_phy_read;
+ bus->write = mt753x_phy_write;
+ bus->parent = dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+
+ if (priv->irq)
+ mt7530_setup_mdio_irq(priv);
+
+ ret = devm_mdiobus_register(dev, bus);
+ if (ret) {
+ dev_err(dev, "failed to register MDIO bus: %d\n", ret);
+ if (priv->irq)
+ mt7530_free_mdio_irq(priv);
+ }
+
+ return ret;
+}
+
+static int
+mt7530_setup(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct device_node *dn = NULL;
+ struct device_node *phy_node;
+ struct device_node *mac_np;
+ struct mt7530_dummy_poll p;
+ phy_interface_t interface;
+ struct dsa_port *cpu_dp;
+ u32 id, val;
+ int ret, i;
+
+ /* The parent node of master netdev which holds the common system
+ * controller also is the container for two GMACs nodes representing
+ * as two netdev instances.
+ */
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ dn = cpu_dp->master->dev.of_node->parent;
+ /* It doesn't matter which CPU port is found first,
+ * their masters should share the same parent OF node
+ */
+ break;
+ }
+
+ if (!dn) {
+ dev_err(ds->dev, "parent OF node of DSA master not found");
+ return -EINVAL;
+ }
+
+ ds->assisted_learning_on_cpu_port = true;
+ ds->mtu_enforcement_ingress = true;
+
+ if (priv->id == ID_MT7530) {
+ regulator_set_voltage(priv->core_pwr, 1000000, 1000000);
+ ret = regulator_enable(priv->core_pwr);
+ if (ret < 0) {
+ dev_err(priv->dev,
+ "Failed to enable core power: %d\n", ret);
+ return ret;
+ }
+
+ regulator_set_voltage(priv->io_pwr, 3300000, 3300000);
+ ret = regulator_enable(priv->io_pwr);
+ if (ret < 0) {
+ dev_err(priv->dev, "Failed to enable io pwr: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ /* Reset whole chip through gpio pin or memory-mapped registers for
+ * different type of hardware
+ */
+ if (priv->mcm) {
+ reset_control_assert(priv->rstc);
+ usleep_range(1000, 1100);
+ reset_control_deassert(priv->rstc);
+ } else {
+ gpiod_set_value_cansleep(priv->reset, 0);
+ usleep_range(1000, 1100);
+ gpiod_set_value_cansleep(priv->reset, 1);
+ }
+
+ /* Waiting for MT7530 got to stable */
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
+ 20, 1000000);
+ if (ret < 0) {
+ dev_err(priv->dev, "reset timeout\n");
+ return ret;
+ }
+
+ id = mt7530_read(priv, MT7530_CREV);
+ id >>= CHIP_NAME_SHIFT;
+ if (id != MT7530_ID) {
+ dev_err(priv->dev, "chip %x can't be supported\n", id);
+ return -ENODEV;
+ }
+
+ /* Reset the switch through internal reset */
+ mt7530_write(priv, MT7530_SYS_CTRL,
+ SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
+ SYS_CTRL_REG_RST);
+
+ mt7530_pll_setup(priv);
+
+ /* Lower Tx driving for TRGMII path */
+ for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+ TD_DM_DRVP(8) | TD_DM_DRVN(8));
+
+ for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+ RD_TAP_MASK, RD_TAP(16));
+
+ /* Enable port 6 */
+ val = mt7530_read(priv, MT7530_MHWTRAP);
+ val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
+ val |= MHWTRAP_MANUAL;
+ mt7530_write(priv, MT7530_MHWTRAP, val);
+
+ priv->p6_interface = PHY_INTERFACE_MODE_NA;
+
+ mt753x_trap_frames(priv);
+
+ /* Enable and reset MIB counters */
+ mt7530_mib_reset(ds);
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
+ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
+ PCR_MATRIX_CLR);
+
+ /* Disable learning by default on all ports */
+ mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
+
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = mt753x_cpu_port_enable(ds, i);
+ if (ret)
+ return ret;
+ } else {
+ mt7530_port_disable(ds, i);
+
+ /* Set default PVID to 0 on all user ports */
+ mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
+ G0_PORT_VID_DEF);
+ }
+ /* Enable consistent egress tag */
+ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+
+ /* Setup VLAN ID 0 for VLAN-unaware bridges */
+ ret = mt7530_setup_vlan0(priv);
+ if (ret)
+ return ret;
+
+ /* Setup port 5 */
+ priv->p5_intf_sel = P5_DISABLED;
+ interface = PHY_INTERFACE_MODE_NA;
+
+ if (!dsa_is_unused_port(ds, 5)) {
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
+ ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface);
+ if (ret && ret != -ENODEV)
+ return ret;
+ } else {
+ /* Scan the ethernet nodes. look for GMAC1, lookup used phy */
+ for_each_child_of_node(dn, mac_np) {
+ if (!of_device_is_compatible(mac_np,
+ "mediatek,eth-mac"))
+ continue;
+
+ ret = of_property_read_u32(mac_np, "reg", &id);
+ if (ret < 0 || id != 1)
+ continue;
+
+ phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
+ if (!phy_node)
+ continue;
+
+ if (phy_node->parent == priv->dev->of_node->parent) {
+ ret = of_get_phy_mode(mac_np, &interface);
+ if (ret && ret != -ENODEV) {
+ of_node_put(mac_np);
+ of_node_put(phy_node);
+ return ret;
+ }
+ id = of_mdio_parse_addr(ds->dev, phy_node);
+ if (id == 0)
+ priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
+ if (id == 4)
+ priv->p5_intf_sel = P5_INTF_SEL_PHY_P4;
+ }
+ of_node_put(mac_np);
+ of_node_put(phy_node);
+ break;
+ }
+ }
+
+#ifdef CONFIG_GPIOLIB
+ if (of_property_read_bool(priv->dev->of_node, "gpio-controller")) {
+ ret = mt7530_setup_gpio(priv);
+ if (ret)
+ return ret;
+ }
+#endif /* CONFIG_GPIOLIB */
+
+ mt7530_setup_port5(ds, interface);
+
+ /* Flush the FDB table */
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+mt7531_setup_common(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *cpu_dp;
+ int ret, i;
+
+ /* BPDU to CPU port */
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
+ BIT(cpu_dp->index));
+ break;
+ }
+
+ mt753x_trap_frames(priv);
+
+ /* Enable and reset MIB counters */
+ mt7530_mib_reset(ds);
+
+ /* Disable flooding on all ports */
+ mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK |
+ UNU_FFP_MASK);
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
+ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
+ PCR_MATRIX_CLR);
+
+ /* Disable learning by default on all ports */
+ mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
+
+ mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
+
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = mt753x_cpu_port_enable(ds, i);
+ if (ret)
+ return ret;
+ } else {
+ mt7530_port_disable(ds, i);
+
+ /* Set default PVID to 0 on all user ports */
+ mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
+ G0_PORT_VID_DEF);
+ }
+
+ /* Enable consistent egress tag */
+ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+
+ /* Flush the FDB table */
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+mt7531_setup(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct mt7530_dummy_poll p;
+ u32 val, id;
+ int ret, i;
+
+ /* Reset whole chip through gpio pin or memory-mapped registers for
+ * different type of hardware
+ */
+ if (priv->mcm) {
+ reset_control_assert(priv->rstc);
+ usleep_range(1000, 1100);
+ reset_control_deassert(priv->rstc);
+ } else {
+ gpiod_set_value_cansleep(priv->reset, 0);
+ usleep_range(1000, 1100);
+ gpiod_set_value_cansleep(priv->reset, 1);
+ }
+
+ /* Waiting for MT7530 got to stable */
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
+ 20, 1000000);
+ if (ret < 0) {
+ dev_err(priv->dev, "reset timeout\n");
+ return ret;
+ }
+
+ id = mt7530_read(priv, MT7531_CREV);
+ id >>= CHIP_NAME_SHIFT;
+
+ if (id != MT7531_ID) {
+ dev_err(priv->dev, "chip %x can't be supported\n", id);
+ return -ENODEV;
+ }
+
+ /* all MACs must be forced link-down before sw reset */
+ for (i = 0; i < MT7530_NUM_PORTS; i++)
+ mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
+
+ /* Reset the switch through internal reset */
+ mt7530_write(priv, MT7530_SYS_CTRL,
+ SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
+ SYS_CTRL_REG_RST);
+
+ mt7531_pll_setup(priv);
+
+ if (mt7531_dual_sgmii_supported(priv)) {
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
+
+ /* Let ds->slave_mii_bus be able to access external phy. */
+ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
+ MT7531_EXT_P_MDC_11);
+ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
+ MT7531_EXT_P_MDIO_12);
+ } else {
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
+ }
+ dev_dbg(ds->dev, "P5 support %s interface\n",
+ p5_intf_modes(priv->p5_intf_sel));
+
+ mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
+ MT7531_GPIO0_INTERRUPT);
+
+ /* Let phylink decide the interface later. */
+ priv->p5_interface = PHY_INTERFACE_MODE_NA;
+ priv->p6_interface = PHY_INTERFACE_MODE_NA;
+
+ /* Enable PHY core PLL, since phy_device has not yet been created
+ * provided for phy_[read,write]_mmd_indirect is called, we provide
+ * our own mt7531_ind_mmd_phy_[read,write] to complete this
+ * function.
+ */
+ val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
+ MDIO_MMD_VEND2, CORE_PLL_GROUP4);
+ val |= MT7531_PHY_PLL_BYPASS_MODE;
+ val &= ~MT7531_PHY_PLL_OFF;
+ mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
+ CORE_PLL_GROUP4, val);
+
+ mt7531_setup_common(ds);
+
+ /* Setup VLAN ID 0 for VLAN-unaware bridges */
+ ret = mt7530_setup_vlan0(priv);
+ if (ret)
+ return ret;
+
+ ds->assisted_learning_on_cpu_port = true;
+ ds->mtu_enforcement_ingress = true;
+
+ return 0;
+}
+
+static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ case 0 ... 4: /* Internal phy */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ break;
+
+ case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ break;
+
+ case 6: /* 1st cpu port */
+ __set_bit(PHY_INTERFACE_MODE_RGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_TRGMII,
+ config->supported_interfaces);
+ break;
+ }
+}
+
+static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
+{
+ return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
+}
+
+static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ switch (port) {
+ case 0 ... 4: /* Internal phy */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ break;
+
+ case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
+ if (mt7531_is_rgmii_port(priv, port)) {
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+ }
+ fallthrough;
+
+ case 6: /* 1st cpu port supports sgmii/8023z only */
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_2500FD;
+ break;
+ }
+}
+
+static int
+mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->pad_setup(ds, state->interface);
+}
+
+static int
+mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* Only need to setup port5. */
+ if (port != 5)
+ return 0;
+
+ mt7530_setup_port5(priv->ds, interface);
+
+ return 0;
+}
+
+static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ u32 val;
+
+ if (!mt7531_is_rgmii_port(priv, port)) {
+ dev_err(priv->dev, "RGMII mode is not available for port %d\n",
+ port);
+ return -EINVAL;
+ }
+
+ val = mt7530_read(priv, MT7531_CLKGEN_CTRL);
+ val |= GP_CLK_EN;
+ val &= ~GP_MODE_MASK;
+ val |= GP_MODE(MT7531_GP_MODE_RGMII);
+ val &= ~CLK_SKEW_IN_MASK;
+ val |= CLK_SKEW_IN(MT7531_CLK_SKEW_NO_CHG);
+ val &= ~CLK_SKEW_OUT_MASK;
+ val |= CLK_SKEW_OUT(MT7531_CLK_SKEW_NO_CHG);
+ val |= TXCLK_NO_REVERSE | RXCLK_NO_DELAY;
+
+ /* Do not adjust rgmii delay when vendor phy driver presents. */
+ if (!phydev || phy_driver_is_genphy(phydev)) {
+ val &= ~(TXCLK_NO_REVERSE | RXCLK_NO_DELAY);
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= TXCLK_NO_REVERSE;
+ val |= RXCLK_NO_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val |= TXCLK_NO_REVERSE;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val |= RXCLK_NO_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
+
+ return 0;
+}
+
+static void mt7531_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex)
+{
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
+ unsigned int val;
+
+ /* For adjusting speed and duplex of SGMII force mode. */
+ if (interface != PHY_INTERFACE_MODE_SGMII ||
+ phylink_autoneg_inband(mode))
+ return;
+
+ /* SGMII force mode setting */
+ val = mt7530_read(priv, MT7531_SGMII_MODE(port));
+ val &= ~MT7531_SGMII_IF_MODE_MASK;
+
+ switch (speed) {
+ case SPEED_10:
+ val |= MT7531_SGMII_FORCE_SPEED_10;
+ break;
+ case SPEED_100:
+ val |= MT7531_SGMII_FORCE_SPEED_100;
+ break;
+ case SPEED_1000:
+ val |= MT7531_SGMII_FORCE_SPEED_1000;
+ break;
+ }
+
+ /* MT7531 SGMII 1G force mode can only work in full duplex mode,
+ * no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
+ *
+ * The speed check is unnecessary as the MAC capabilities apply
+ * this restriction. --rmk
+ */
+ if ((speed == SPEED_10 || speed == SPEED_100) &&
+ duplex != DUPLEX_FULL)
+ val |= MT7531_SGMII_FORCE_HALF_DUPLEX;
+
+ mt7530_write(priv, MT7531_SGMII_MODE(port), val);
+}
+
+static bool mt753x_is_mac_port(u32 port)
+{
+ return (port == 5 || port == 6);
+}
+
+static int mt7531_sgmii_setup_mode_force(struct mt7530_priv *priv, u32 port,
+ phy_interface_t interface)
+{
+ u32 val;
+
+ if (!mt753x_is_mac_port(port))
+ return -EINVAL;
+
+ mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
+ MT7531_SGMII_PHYA_PWD);
+
+ val = mt7530_read(priv, MT7531_PHYA_CTRL_SIGNAL3(port));
+ val &= ~MT7531_RG_TPHY_SPEED_MASK;
+ /* Setup 2.5 times faster clock for 2.5Gbps data speeds with 10B/8B
+ * encoding.
+ */
+ val |= (interface == PHY_INTERFACE_MODE_2500BASEX) ?
+ MT7531_RG_TPHY_SPEED_3_125G : MT7531_RG_TPHY_SPEED_1_25G;
+ mt7530_write(priv, MT7531_PHYA_CTRL_SIGNAL3(port), val);
+
+ mt7530_clear(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
+
+ /* MT7531 SGMII 1G and 2.5G force mode can only work in full duplex
+ * mode, no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
+ */
+ mt7530_rmw(priv, MT7531_SGMII_MODE(port),
+ MT7531_SGMII_IF_MODE_MASK | MT7531_SGMII_REMOTE_FAULT_DIS,
+ MT7531_SGMII_FORCE_SPEED_1000);
+
+ mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
+
+ return 0;
+}
+
+static int mt7531_sgmii_setup_mode_an(struct mt7530_priv *priv, int port,
+ phy_interface_t interface)
+{
+ if (!mt753x_is_mac_port(port))
+ return -EINVAL;
+
+ mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
+ MT7531_SGMII_PHYA_PWD);
+
+ mt7530_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
+ MT7531_RG_TPHY_SPEED_MASK, MT7531_RG_TPHY_SPEED_1_25G);
+
+ mt7530_set(priv, MT7531_SGMII_MODE(port),
+ MT7531_SGMII_REMOTE_FAULT_DIS |
+ MT7531_SGMII_SPEED_DUPLEX_AN);
+
+ mt7530_rmw(priv, MT7531_PCS_SPEED_ABILITY(port),
+ MT7531_SGMII_TX_CONFIG_MASK, 1);
+
+ mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
+
+ mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_RESTART);
+
+ mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
+
+ return 0;
+}
+
+static void mt7531_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
+ u32 val;
+
+ /* Only restart AN when AN is enabled */
+ val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ if (val & MT7531_SGMII_AN_ENABLE) {
+ val |= MT7531_SGMII_AN_RESTART;
+ mt7530_write(priv, MT7531_PCS_CONTROL_1(port), val);
+ }
+}
+
+static int
+mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct phy_device *phydev;
+ struct dsa_port *dp;
+
+ if (!mt753x_is_mac_port(port)) {
+ dev_err(priv->dev, "port %d is not a MAC port\n", port);
+ return -EINVAL;
+ }
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dp = dsa_to_port(ds, port);
+ phydev = dp->slave->phydev;
+ return mt7531_rgmii_setup(priv, port, interface, phydev);
+ case PHY_INTERFACE_MODE_SGMII:
+ return mt7531_sgmii_setup_mode_an(priv, port, interface);
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return mt7531_sgmii_setup_mode_force(priv, port, interface);
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static int
+mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->mac_port_config(ds, port, mode, state->interface);
+}
+
+static struct phylink_pcs *
+mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return &priv->pcs[port].pcs;
+
+ default:
+ return NULL;
+ }
+}
+
+static void
+mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 mcr_cur, mcr_new;
+
+ switch (port) {
+ case 0 ... 4: /* Internal phy */
+ if (state->interface != PHY_INTERFACE_MODE_GMII)
+ goto unsupported;
+ break;
+ case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
+ if (priv->p5_interface == state->interface)
+ break;
+
+ if (mt753x_mac_config(ds, port, mode, state) < 0)
+ goto unsupported;
+
+ if (priv->p5_intf_sel != P5_DISABLED)
+ priv->p5_interface = state->interface;
+ break;
+ case 6: /* 1st cpu port */
+ if (priv->p6_interface == state->interface)
+ break;
+
+ mt753x_pad_setup(ds, state);
+
+ if (mt753x_mac_config(ds, port, mode, state) < 0)
+ goto unsupported;
+
+ priv->p6_interface = state->interface;
+ break;
+ default:
+unsupported:
+ dev_err(ds->dev, "%s: unsupported %s port: %i\n",
+ __func__, phy_modes(state->interface), port);
+ return;
+ }
+
+ mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
+ mcr_new = mcr_cur;
+ mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
+ mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
+ PMCR_BACKPR_EN | PMCR_FORCE_MODE_ID(priv->id);
+
+ /* Are we connected to external phy */
+ if (port == 5 && dsa_is_user_port(ds, 5))
+ mcr_new |= PMCR_EXT_PHY;
+
+ if (mcr_new != mcr_cur)
+ mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
+}
+
+static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
+}
+
+static void mt753x_phylink_pcs_link_up(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex)
+{
+ if (pcs->ops->pcs_link_up)
+ pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex);
+}
+
+static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 mcr;
+
+ mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
+
+ /* MT753x MAC works in 1G full duplex mode for all up-clocked
+ * variants.
+ */
+ if (interface == PHY_INTERFACE_MODE_TRGMII ||
+ (phy_interface_mode_is_8023z(interface))) {
+ speed = SPEED_1000;
+ duplex = DUPLEX_FULL;
+ }
+
+ switch (speed) {
+ case SPEED_1000:
+ mcr |= PMCR_FORCE_SPEED_1000;
+ break;
+ case SPEED_100:
+ mcr |= PMCR_FORCE_SPEED_100;
+ break;
+ }
+ if (duplex == DUPLEX_FULL) {
+ mcr |= PMCR_FORCE_FDX;
+ if (tx_pause)
+ mcr |= PMCR_TX_FC_EN;
+ if (rx_pause)
+ mcr |= PMCR_RX_FC_EN;
+ }
+
+ if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
+ switch (speed) {
+ case SPEED_1000:
+ mcr |= PMCR_FORCE_EEE1G;
+ break;
+ case SPEED_100:
+ mcr |= PMCR_FORCE_EEE100;
+ break;
+ }
+ }
+
+ mt7530_set(priv, MT7530_PMCR_P(port), mcr);
+}
+
+static int
+mt7531_cpu_port_config(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+ phy_interface_t interface;
+ int speed;
+ int ret;
+
+ switch (port) {
+ case 5:
+ if (mt7531_is_rgmii_port(priv, port))
+ interface = PHY_INTERFACE_MODE_RGMII;
+ else
+ interface = PHY_INTERFACE_MODE_2500BASEX;
+
+ priv->p5_interface = interface;
+ break;
+ case 6:
+ interface = PHY_INTERFACE_MODE_2500BASEX;
+
+ priv->p6_interface = interface;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
+ speed = SPEED_2500;
+ else
+ speed = SPEED_1000;
+
+ ret = mt7531_mac_config(ds, port, MLO_AN_FIXED, interface);
+ if (ret)
+ return ret;
+ mt7530_write(priv, MT7530_PMCR_P(port),
+ PMCR_CPU_PORT_SETTING(priv->id));
+ mt753x_phylink_pcs_link_up(&priv->pcs[port].pcs, MLO_AN_FIXED,
+ interface, speed, DUPLEX_FULL);
+ mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
+ speed, DUPLEX_FULL, true, true);
+
+ return 0;
+}
+
+static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* This switch only supports full-duplex at 1Gbps */
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
+
+ if ((priv->id == ID_MT7531) && mt753x_is_mac_port(port))
+ config->mac_capabilities |= MAC_2500FD;
+
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
+
+ priv->info->mac_port_get_caps(ds, port, config);
+}
+
+static int mt753x_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ /* Autonegotiation is not supported in TRGMII nor 802.3z modes */
+ if (state->interface == PHY_INTERFACE_MODE_TRGMII ||
+ phy_interface_mode_is_8023z(state->interface))
+ phylink_clear(supported, Autoneg);
+
+ return 0;
+}
+
+static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
+ u32 pmsr;
+
+ pmsr = mt7530_read(priv, MT7530_PMSR_P(port));
+
+ state->link = (pmsr & PMSR_LINK);
+ state->an_complete = state->link;
+ state->duplex = !!(pmsr & PMSR_DPX);
+
+ switch (pmsr & PMSR_SPEED_MASK) {
+ case PMSR_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ case PMSR_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case PMSR_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ state->pause &= ~(MLO_PAUSE_RX | MLO_PAUSE_TX);
+ if (pmsr & PMSR_RX_FC)
+ state->pause |= MLO_PAUSE_RX;
+ if (pmsr & PMSR_TX_FC)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int
+mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
+ struct phylink_link_state *state)
+{
+ u32 status, val;
+ u16 config_reg;
+
+ status = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ state->link = !!(status & MT7531_SGMII_LINK_STATUS);
+ state->an_complete = !!(status & MT7531_SGMII_AN_COMPLETE);
+ if (state->interface == PHY_INTERFACE_MODE_SGMII &&
+ (status & MT7531_SGMII_AN_ENABLE)) {
+ val = mt7530_read(priv, MT7531_PCS_SPEED_ABILITY(port));
+ config_reg = val >> 16;
+
+ switch (config_reg & LPA_SGMII_SPD_MASK) {
+ case LPA_SGMII_1000:
+ state->speed = SPEED_1000;
+ break;
+ case LPA_SGMII_100:
+ state->speed = SPEED_100;
+ break;
+ case LPA_SGMII_10:
+ state->speed = SPEED_10;
+ break;
+ default:
+ dev_err(priv->dev, "invalid sgmii PHY speed\n");
+ state->link = false;
+ return -EINVAL;
+ }
+
+ if (config_reg & LPA_SGMII_FULL_DUPLEX)
+ state->duplex = DUPLEX_FULL;
+ else
+ state->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+static void
+mt7531_sgmii_pcs_get_state_inband(struct mt7530_priv *priv, int port,
+ struct phylink_link_state *state)
+{
+ unsigned int val;
+
+ val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ state->link = !!(val & MT7531_SGMII_LINK_STATUS);
+ if (!state->link)
+ return;
+
+ state->an_complete = state->link;
+
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_1000;
+
+ state->duplex = DUPLEX_FULL;
+ state->pause = MLO_PAUSE_NONE;
+}
+
+static void mt7531_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
+
+ if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+ mt7531_sgmii_pcs_get_state_an(priv, port, state);
+ return;
+ } else if ((state->interface == PHY_INTERFACE_MODE_1000BASEX) ||
+ (state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
+ mt7531_sgmii_pcs_get_state_inband(priv, port, state);
+ return;
+ }
+
+ state->link = false;
+}
+
+static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static void mt7530_pcs_an_restart(struct phylink_pcs *pcs)
+{
+}
+
+static const struct phylink_pcs_ops mt7530_pcs_ops = {
+ .pcs_validate = mt753x_pcs_validate,
+ .pcs_get_state = mt7530_pcs_get_state,
+ .pcs_config = mt753x_pcs_config,
+ .pcs_an_restart = mt7530_pcs_an_restart,
+};
+
+static const struct phylink_pcs_ops mt7531_pcs_ops = {
+ .pcs_validate = mt753x_pcs_validate,
+ .pcs_get_state = mt7531_pcs_get_state,
+ .pcs_config = mt753x_pcs_config,
+ .pcs_an_restart = mt7531_pcs_an_restart,
+ .pcs_link_up = mt7531_pcs_link_up,
+};
+
+static int
+mt753x_setup(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int i, ret;
+
+ /* Initialise the PCS devices */
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ priv->pcs[i].pcs.ops = priv->info->pcs_ops;
+ priv->pcs[i].priv = priv;
+ priv->pcs[i].port = i;
+ if (mt753x_is_mac_port(i))
+ priv->pcs[i].pcs.poll = 1;
+ }
+
+ ret = priv->info->sw_setup(ds);
+ if (ret)
+ return ret;
+
+ ret = mt7530_setup_irq(priv);
+ if (ret)
+ return ret;
+
+ ret = mt7530_setup_mdio(priv);
+ if (ret && priv->irq)
+ mt7530_free_irq_common(priv);
+
+ return ret;
+}
+
+static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port));
+
+ e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
+ e->tx_lpi_timer = GET_LPI_THRESH(eeecr);
+
+ return 0;
+}
+
+static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
+
+ if (e->tx_lpi_timer > 0xFFF)
+ return -EINVAL;
+
+ set = SET_LPI_THRESH(e->tx_lpi_timer);
+ if (!e->tx_lpi_enabled)
+ /* Force LPI Mode without a delay */
+ set |= LPI_MODE_EN;
+ mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set);
+
+ return 0;
+}
+
+static const struct dsa_switch_ops mt7530_switch_ops = {
+ .get_tag_protocol = mtk_get_tag_protocol,
+ .setup = mt753x_setup,
+ .get_strings = mt7530_get_strings,
+ .get_ethtool_stats = mt7530_get_ethtool_stats,
+ .get_sset_count = mt7530_get_sset_count,
+ .set_ageing_time = mt7530_set_ageing_time,
+ .port_enable = mt7530_port_enable,
+ .port_disable = mt7530_port_disable,
+ .port_change_mtu = mt7530_port_change_mtu,
+ .port_max_mtu = mt7530_port_max_mtu,
+ .port_stp_state_set = mt7530_stp_state_set,
+ .port_pre_bridge_flags = mt7530_port_pre_bridge_flags,
+ .port_bridge_flags = mt7530_port_bridge_flags,
+ .port_bridge_join = mt7530_port_bridge_join,
+ .port_bridge_leave = mt7530_port_bridge_leave,
+ .port_fdb_add = mt7530_port_fdb_add,
+ .port_fdb_del = mt7530_port_fdb_del,
+ .port_fdb_dump = mt7530_port_fdb_dump,
+ .port_mdb_add = mt7530_port_mdb_add,
+ .port_mdb_del = mt7530_port_mdb_del,
+ .port_vlan_filtering = mt7530_port_vlan_filtering,
+ .port_vlan_add = mt7530_port_vlan_add,
+ .port_vlan_del = mt7530_port_vlan_del,
+ .port_mirror_add = mt753x_port_mirror_add,
+ .port_mirror_del = mt753x_port_mirror_del,
+ .phylink_get_caps = mt753x_phylink_get_caps,
+ .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs,
+ .phylink_mac_config = mt753x_phylink_mac_config,
+ .phylink_mac_link_down = mt753x_phylink_mac_link_down,
+ .phylink_mac_link_up = mt753x_phylink_mac_link_up,
+ .get_mac_eee = mt753x_get_mac_eee,
+ .set_mac_eee = mt753x_set_mac_eee,
+};
+
+static const struct mt753x_info mt753x_table[] = {
+ [ID_MT7621] = {
+ .id = ID_MT7621,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7530_setup,
+ .phy_read = mt7530_phy_read,
+ .phy_write = mt7530_phy_write,
+ .pad_setup = mt7530_pad_clk_setup,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
+ .mac_port_config = mt7530_mac_config,
+ },
+ [ID_MT7530] = {
+ .id = ID_MT7530,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7530_setup,
+ .phy_read = mt7530_phy_read,
+ .phy_write = mt7530_phy_write,
+ .pad_setup = mt7530_pad_clk_setup,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
+ .mac_port_config = mt7530_mac_config,
+ },
+ [ID_MT7531] = {
+ .id = ID_MT7531,
+ .pcs_ops = &mt7531_pcs_ops,
+ .sw_setup = mt7531_setup,
+ .phy_read = mt7531_ind_phy_read,
+ .phy_write = mt7531_ind_phy_write,
+ .pad_setup = mt7531_pad_setup,
+ .cpu_port_config = mt7531_cpu_port_config,
+ .mac_port_get_caps = mt7531_mac_port_get_caps,
+ .mac_port_config = mt7531_mac_config,
+ },
+};
+
+static const struct of_device_id mt7530_of_match[] = {
+ { .compatible = "mediatek,mt7621", .data = &mt753x_table[ID_MT7621], },
+ { .compatible = "mediatek,mt7530", .data = &mt753x_table[ID_MT7530], },
+ { .compatible = "mediatek,mt7531", .data = &mt753x_table[ID_MT7531], },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mt7530_of_match);
+
+static int
+mt7530_probe(struct mdio_device *mdiodev)
+{
+ struct mt7530_priv *priv;
+ struct device_node *dn;
+
+ dn = mdiodev->dev.of_node;
+
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = &mdiodev->dev;
+ priv->ds->num_ports = MT7530_NUM_PORTS;
+
+ /* Use medatek,mcm property to distinguish hardware type that would
+ * casues a little bit differences on power-on sequence.
+ */
+ priv->mcm = of_property_read_bool(dn, "mediatek,mcm");
+ if (priv->mcm) {
+ dev_info(&mdiodev->dev, "MT7530 adapts as multi-chip module\n");
+
+ priv->rstc = devm_reset_control_get(&mdiodev->dev, "mcm");
+ if (IS_ERR(priv->rstc)) {
+ dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
+ return PTR_ERR(priv->rstc);
+ }
+ }
+
+ /* Get the hardware identifier from the devicetree node.
+ * We will need it for some of the clock and regulator setup.
+ */
+ priv->info = of_device_get_match_data(&mdiodev->dev);
+ if (!priv->info)
+ return -EINVAL;
+
+ /* Sanity check if these required device operations are filled
+ * properly.
+ */
+ if (!priv->info->sw_setup || !priv->info->pad_setup ||
+ !priv->info->phy_read || !priv->info->phy_write ||
+ !priv->info->mac_port_get_caps ||
+ !priv->info->mac_port_config)
+ return -EINVAL;
+
+ priv->id = priv->info->id;
+
+ if (priv->id == ID_MT7530) {
+ priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
+ if (IS_ERR(priv->core_pwr))
+ return PTR_ERR(priv->core_pwr);
+
+ priv->io_pwr = devm_regulator_get(&mdiodev->dev, "io");
+ if (IS_ERR(priv->io_pwr))
+ return PTR_ERR(priv->io_pwr);
+ }
+
+ /* Not MCM that indicates switch works as the remote standalone
+ * integrated circuit so the GPIO pin would be used to complete
+ * the reset, otherwise memory-mapped register accessing used
+ * through syscon provides in the case of MCM.
+ */
+ if (!priv->mcm) {
+ priv->reset = devm_gpiod_get_optional(&mdiodev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
+ return PTR_ERR(priv->reset);
+ }
+ }
+
+ priv->bus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+ priv->ds->priv = priv;
+ priv->ds->ops = &mt7530_switch_ops;
+ mutex_init(&priv->reg_mutex);
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ return dsa_register_switch(priv->ds);
+}
+
+static void
+mt7530_remove(struct mdio_device *mdiodev)
+{
+ struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ int ret = 0;
+
+ if (!priv)
+ return;
+
+ ret = regulator_disable(priv->core_pwr);
+ if (ret < 0)
+ dev_err(priv->dev,
+ "Failed to disable core power: %d\n", ret);
+
+ ret = regulator_disable(priv->io_pwr);
+ if (ret < 0)
+ dev_err(priv->dev, "Failed to disable io pwr: %d\n",
+ ret);
+
+ if (priv->irq)
+ mt7530_free_irq(priv);
+
+ dsa_unregister_switch(priv->ds);
+ mutex_destroy(&priv->reg_mutex);
+}
+
+static void mt7530_shutdown(struct mdio_device *mdiodev)
+{
+ struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static struct mdio_driver mt7530_mdio_driver = {
+ .probe = mt7530_probe,
+ .remove = mt7530_remove,
+ .shutdown = mt7530_shutdown,
+ .mdiodrv.driver = {
+ .name = "mt7530",
+ .of_match_table = mt7530_of_match,
+ },
+};
+
+mdio_module_driver(mt7530_mdio_driver);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
new file mode 100644
index 000000000..6202b0f8c
--- /dev/null
+++ b/drivers/net/dsa/mt7530.h
@@ -0,0 +1,872 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ */
+
+#ifndef __MT7530_H
+#define __MT7530_H
+
+#define MT7530_NUM_PORTS 7
+#define MT7530_NUM_PHYS 5
+#define MT7530_NUM_FDB_RECORDS 2048
+#define MT7530_ALL_MEMBERS 0xff
+
+#define MTK_HDR_LEN 4
+#define MT7530_MAX_MTU (15 * 1024 - ETH_HLEN - ETH_FCS_LEN - MTK_HDR_LEN)
+
+enum mt753x_id {
+ ID_MT7530 = 0,
+ ID_MT7621 = 1,
+ ID_MT7531 = 2,
+};
+
+#define NUM_TRGMII_CTRL 5
+
+#define TRGMII_BASE(x) (0x10000 + (x))
+
+/* Registers to ethsys access */
+#define ETHSYS_CLKCFG0 0x2c
+#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
+
+#define SYSC_REG_RSTCTRL 0x34
+#define RESET_MCM BIT(2)
+
+/* Registers to mac forward control for unknown frames */
+#define MT7530_MFC 0x10
+#define BC_FFP(x) (((x) & 0xff) << 24)
+#define BC_FFP_MASK BC_FFP(~0)
+#define UNM_FFP(x) (((x) & 0xff) << 16)
+#define UNM_FFP_MASK UNM_FFP(~0)
+#define UNU_FFP(x) (((x) & 0xff) << 8)
+#define UNU_FFP_MASK UNU_FFP(~0)
+#define CPU_EN BIT(7)
+#define CPU_PORT(x) ((x) << 4)
+#define CPU_MASK (0xf << 4)
+#define MIRROR_EN BIT(3)
+#define MIRROR_PORT(x) ((x) & 0x7)
+#define MIRROR_MASK 0x7
+
+/* Registers for CPU forward control */
+#define MT7531_CFC 0x4
+#define MT7531_MIRROR_EN BIT(19)
+#define MT7531_MIRROR_MASK (MIRROR_MASK << 16)
+#define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK)
+#define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16)
+#define MT7531_CPU_PMAP_MASK GENMASK(7, 0)
+
+#define MT753X_MIRROR_REG(id) (((id) == ID_MT7531) ? \
+ MT7531_CFC : MT7530_MFC)
+#define MT753X_MIRROR_EN(id) (((id) == ID_MT7531) ? \
+ MT7531_MIRROR_EN : MIRROR_EN)
+#define MT753X_MIRROR_MASK(id) (((id) == ID_MT7531) ? \
+ MT7531_MIRROR_MASK : MIRROR_MASK)
+
+/* Registers for BPDU and PAE frame control*/
+#define MT753X_BPC 0x24
+#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
+#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
+#define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x)
+
+/* Register for :03 and :0E MAC DA frame control */
+#define MT753X_RGAC2 0x2c
+#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
+#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
+
+enum mt753x_bpdu_port_fw {
+ MT753X_BPDU_FOLLOW_MFC,
+ MT753X_BPDU_CPU_EXCLUDE = 4,
+ MT753X_BPDU_CPU_INCLUDE = 5,
+ MT753X_BPDU_CPU_ONLY = 6,
+ MT753X_BPDU_DROP = 7,
+};
+
+/* Registers for address table access */
+#define MT7530_ATA1 0x74
+#define STATIC_EMP 0
+#define STATIC_ENT 3
+#define MT7530_ATA2 0x78
+#define ATA2_IVL BIT(15)
+#define ATA2_FID(x) (((x) & 0x7) << 12)
+
+/* Register for address table write data */
+#define MT7530_ATWD 0x7c
+
+/* Register for address table control */
+#define MT7530_ATC 0x80
+#define ATC_HASH (((x) & 0xfff) << 16)
+#define ATC_BUSY BIT(15)
+#define ATC_SRCH_END BIT(14)
+#define ATC_SRCH_HIT BIT(13)
+#define ATC_INVALID BIT(12)
+#define ATC_MAT(x) (((x) & 0xf) << 8)
+#define ATC_MAT_MACTAB ATC_MAT(0)
+
+enum mt7530_fdb_cmd {
+ MT7530_FDB_READ = 0,
+ MT7530_FDB_WRITE = 1,
+ MT7530_FDB_FLUSH = 2,
+ MT7530_FDB_START = 4,
+ MT7530_FDB_NEXT = 5,
+};
+
+/* Registers for table search read address */
+#define MT7530_TSRA1 0x84
+#define MAC_BYTE_0 24
+#define MAC_BYTE_1 16
+#define MAC_BYTE_2 8
+#define MAC_BYTE_3 0
+#define MAC_BYTE_MASK 0xff
+
+#define MT7530_TSRA2 0x88
+#define MAC_BYTE_4 24
+#define MAC_BYTE_5 16
+#define CVID 0
+#define CVID_MASK 0xfff
+
+#define MT7530_ATRD 0x8C
+#define AGE_TIMER 24
+#define AGE_TIMER_MASK 0xff
+#define PORT_MAP 4
+#define PORT_MAP_MASK 0xff
+#define ENT_STATUS 2
+#define ENT_STATUS_MASK 0x3
+
+/* Register for vlan table control */
+#define MT7530_VTCR 0x90
+#define VTCR_BUSY BIT(31)
+#define VTCR_INVALID BIT(16)
+#define VTCR_FUNC(x) (((x) & 0xf) << 12)
+#define VTCR_VID ((x) & 0xfff)
+
+enum mt7530_vlan_cmd {
+ /* Read/Write the specified VID entry from VAWD register based
+ * on VID.
+ */
+ MT7530_VTCR_RD_VID = 0,
+ MT7530_VTCR_WR_VID = 1,
+};
+
+/* Register for setup vlan and acl write data */
+#define MT7530_VAWD1 0x94
+#define PORT_STAG BIT(31)
+/* Independent VLAN Learning */
+#define IVL_MAC BIT(30)
+/* Egress Tag Consistent */
+#define EG_CON BIT(29)
+/* Per VLAN Egress Tag Control */
+#define VTAG_EN BIT(28)
+/* VLAN Member Control */
+#define PORT_MEM(x) (((x) & 0xff) << 16)
+/* Filter ID */
+#define FID(x) (((x) & 0x7) << 1)
+/* VLAN Entry Valid */
+#define VLAN_VALID BIT(0)
+#define PORT_MEM_SHFT 16
+#define PORT_MEM_MASK 0xff
+
+enum mt7530_fid {
+ FID_STANDALONE = 0,
+ FID_BRIDGED = 1,
+};
+
+#define MT7530_VAWD2 0x98
+/* Egress Tag Control */
+#define ETAG_CTRL_P(p, x) (((x) & 0x3) << ((p) << 1))
+#define ETAG_CTRL_P_MASK(p) ETAG_CTRL_P(p, 3)
+
+enum mt7530_vlan_egress_attr {
+ MT7530_VLAN_EGRESS_UNTAG = 0,
+ MT7530_VLAN_EGRESS_TAG = 2,
+ MT7530_VLAN_EGRESS_STACK = 3,
+};
+
+/* Register for address age control */
+#define MT7530_AAC 0xa0
+/* Disable ageing */
+#define AGE_DIS BIT(20)
+/* Age count */
+#define AGE_CNT_MASK GENMASK(19, 12)
+#define AGE_CNT_MAX 0xff
+#define AGE_CNT(x) (AGE_CNT_MASK & ((x) << 12))
+/* Age unit */
+#define AGE_UNIT_MASK GENMASK(11, 0)
+#define AGE_UNIT_MAX 0xfff
+#define AGE_UNIT(x) (AGE_UNIT_MASK & (x))
+
+/* Register for port STP state control */
+#define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100))
+#define FID_PST(fid, state) (((state) & 0x3) << ((fid) * 2))
+#define FID_PST_MASK(fid) FID_PST(fid, 0x3)
+
+enum mt7530_stp_state {
+ MT7530_STP_DISABLED = 0,
+ MT7530_STP_BLOCKING = 1,
+ MT7530_STP_LISTENING = 1,
+ MT7530_STP_LEARNING = 2,
+ MT7530_STP_FORWARDING = 3
+};
+
+/* Register for port control */
+#define MT7530_PCR_P(x) (0x2004 + ((x) * 0x100))
+#define PORT_TX_MIR BIT(9)
+#define PORT_RX_MIR BIT(8)
+#define PORT_VLAN(x) ((x) & 0x3)
+
+enum mt7530_port_mode {
+ /* Port Matrix Mode: Frames are forwarded by the PCR_MATRIX members. */
+ MT7530_PORT_MATRIX_MODE = PORT_VLAN(0),
+
+ /* Fallback Mode: Forward received frames with ingress ports that do
+ * not belong to the VLAN member. Frames whose VID is not listed on
+ * the VLAN table are forwarded by the PCR_MATRIX members.
+ */
+ MT7530_PORT_FALLBACK_MODE = PORT_VLAN(1),
+
+ /* Security Mode: Discard any frame due to ingress membership
+ * violation or VID missed on the VLAN table.
+ */
+ MT7530_PORT_SECURITY_MODE = PORT_VLAN(3),
+};
+
+#define PCR_MATRIX(x) (((x) & 0xff) << 16)
+#define PORT_PRI(x) (((x) & 0x7) << 24)
+#define EG_TAG(x) (((x) & 0x3) << 28)
+#define PCR_MATRIX_MASK PCR_MATRIX(0xff)
+#define PCR_MATRIX_CLR PCR_MATRIX(0)
+#define PCR_PORT_VLAN_MASK PORT_VLAN(3)
+
+/* Register for port security control */
+#define MT7530_PSC_P(x) (0x200c + ((x) * 0x100))
+#define SA_DIS BIT(4)
+
+/* Register for port vlan control */
+#define MT7530_PVC_P(x) (0x2010 + ((x) * 0x100))
+#define PORT_SPEC_TAG BIT(5)
+#define PVC_EG_TAG(x) (((x) & 0x7) << 8)
+#define PVC_EG_TAG_MASK PVC_EG_TAG(7)
+#define VLAN_ATTR(x) (((x) & 0x3) << 6)
+#define VLAN_ATTR_MASK VLAN_ATTR(3)
+#define ACC_FRM_MASK GENMASK(1, 0)
+
+enum mt7530_vlan_port_eg_tag {
+ MT7530_VLAN_EG_DISABLED = 0,
+ MT7530_VLAN_EG_CONSISTENT = 1,
+};
+
+enum mt7530_vlan_port_attr {
+ MT7530_VLAN_USER = 0,
+ MT7530_VLAN_TRANSPARENT = 3,
+};
+
+enum mt7530_vlan_port_acc_frm {
+ MT7530_VLAN_ACC_ALL = 0,
+ MT7530_VLAN_ACC_TAGGED = 1,
+ MT7530_VLAN_ACC_UNTAGGED = 2,
+};
+
+#define STAG_VPID (((x) & 0xffff) << 16)
+
+/* Register for port port-and-protocol based vlan 1 control */
+#define MT7530_PPBV1_P(x) (0x2014 + ((x) * 0x100))
+#define G0_PORT_VID(x) (((x) & 0xfff) << 0)
+#define G0_PORT_VID_MASK G0_PORT_VID(0xfff)
+#define G0_PORT_VID_DEF G0_PORT_VID(0)
+
+/* Register for port MAC control register */
+#define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100))
+#define PMCR_IFG_XMIT(x) (((x) & 0x3) << 18)
+#define PMCR_EXT_PHY BIT(17)
+#define PMCR_MAC_MODE BIT(16)
+#define PMCR_FORCE_MODE BIT(15)
+#define PMCR_TX_EN BIT(14)
+#define PMCR_RX_EN BIT(13)
+#define PMCR_BACKOFF_EN BIT(9)
+#define PMCR_BACKPR_EN BIT(8)
+#define PMCR_FORCE_EEE1G BIT(7)
+#define PMCR_FORCE_EEE100 BIT(6)
+#define PMCR_TX_FC_EN BIT(5)
+#define PMCR_RX_FC_EN BIT(4)
+#define PMCR_FORCE_SPEED_1000 BIT(3)
+#define PMCR_FORCE_SPEED_100 BIT(2)
+#define PMCR_FORCE_FDX BIT(1)
+#define PMCR_FORCE_LNK BIT(0)
+#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \
+ PMCR_FORCE_SPEED_1000)
+#define MT7531_FORCE_LNK BIT(31)
+#define MT7531_FORCE_SPD BIT(30)
+#define MT7531_FORCE_DPX BIT(29)
+#define MT7531_FORCE_RX_FC BIT(28)
+#define MT7531_FORCE_TX_FC BIT(27)
+#define MT7531_FORCE_MODE (MT7531_FORCE_LNK | \
+ MT7531_FORCE_SPD | \
+ MT7531_FORCE_DPX | \
+ MT7531_FORCE_RX_FC | \
+ MT7531_FORCE_TX_FC)
+#define PMCR_FORCE_MODE_ID(id) (((id) == ID_MT7531) ? \
+ MT7531_FORCE_MODE : \
+ PMCR_FORCE_MODE)
+#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
+ PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
+ PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
+ PMCR_FORCE_FDX | PMCR_FORCE_LNK | \
+ PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100)
+#define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \
+ PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
+ PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
+ PMCR_TX_EN | PMCR_RX_EN | \
+ PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
+ PMCR_FORCE_SPEED_1000 | \
+ PMCR_FORCE_FDX | PMCR_FORCE_LNK)
+
+#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100)
+#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24)
+#define WAKEUP_TIME_100(x) (((x) & 0xFF) << 16)
+#define LPI_THRESH_MASK GENMASK(15, 4)
+#define LPI_THRESH_SHT 4
+#define SET_LPI_THRESH(x) (((x) << LPI_THRESH_SHT) & LPI_THRESH_MASK)
+#define GET_LPI_THRESH(x) (((x) & LPI_THRESH_MASK) >> LPI_THRESH_SHT)
+#define LPI_MODE_EN BIT(0)
+
+#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
+#define PMSR_EEE1G BIT(7)
+#define PMSR_EEE100M BIT(6)
+#define PMSR_RX_FC BIT(5)
+#define PMSR_TX_FC BIT(4)
+#define PMSR_SPEED_1000 BIT(3)
+#define PMSR_SPEED_100 BIT(2)
+#define PMSR_SPEED_10 0x00
+#define PMSR_SPEED_MASK (PMSR_SPEED_100 | PMSR_SPEED_1000)
+#define PMSR_DPX BIT(1)
+#define PMSR_LINK BIT(0)
+
+/* Register for port debug count */
+#define MT7531_DBG_CNT(x) (0x3018 + (x) * 0x100)
+#define MT7531_DIS_CLR BIT(31)
+
+#define MT7530_GMACCR 0x30e0
+#define MAX_RX_JUMBO(x) ((x) << 2)
+#define MAX_RX_JUMBO_MASK GENMASK(5, 2)
+#define MAX_RX_PKT_LEN_MASK GENMASK(1, 0)
+#define MAX_RX_PKT_LEN_1522 0x0
+#define MAX_RX_PKT_LEN_1536 0x1
+#define MAX_RX_PKT_LEN_1552 0x2
+#define MAX_RX_PKT_LEN_JUMBO 0x3
+
+/* Register for MIB */
+#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
+#define MT7530_MIB_CCR 0x4fe0
+#define CCR_MIB_ENABLE BIT(31)
+#define CCR_RX_OCT_CNT_GOOD BIT(7)
+#define CCR_RX_OCT_CNT_BAD BIT(6)
+#define CCR_TX_OCT_CNT_GOOD BIT(5)
+#define CCR_TX_OCT_CNT_BAD BIT(4)
+#define CCR_MIB_FLUSH (CCR_RX_OCT_CNT_GOOD | \
+ CCR_RX_OCT_CNT_BAD | \
+ CCR_TX_OCT_CNT_GOOD | \
+ CCR_TX_OCT_CNT_BAD)
+#define CCR_MIB_ACTIVATE (CCR_MIB_ENABLE | \
+ CCR_RX_OCT_CNT_GOOD | \
+ CCR_RX_OCT_CNT_BAD | \
+ CCR_TX_OCT_CNT_GOOD | \
+ CCR_TX_OCT_CNT_BAD)
+
+/* MT7531 SGMII register group */
+#define MT7531_SGMII_REG_BASE 0x5000
+#define MT7531_SGMII_REG(p, r) (MT7531_SGMII_REG_BASE + \
+ ((p) - 5) * 0x1000 + (r))
+
+/* Register forSGMII PCS_CONTROL_1 */
+#define MT7531_PCS_CONTROL_1(p) MT7531_SGMII_REG(p, 0x00)
+#define MT7531_SGMII_LINK_STATUS BIT(18)
+#define MT7531_SGMII_AN_ENABLE BIT(12)
+#define MT7531_SGMII_AN_RESTART BIT(9)
+#define MT7531_SGMII_AN_COMPLETE BIT(21)
+
+/* Register for SGMII PCS_SPPED_ABILITY */
+#define MT7531_PCS_SPEED_ABILITY(p) MT7531_SGMII_REG(p, 0x08)
+#define MT7531_SGMII_TX_CONFIG_MASK GENMASK(15, 0)
+#define MT7531_SGMII_TX_CONFIG BIT(0)
+
+/* Register for SGMII_MODE */
+#define MT7531_SGMII_MODE(p) MT7531_SGMII_REG(p, 0x20)
+#define MT7531_SGMII_REMOTE_FAULT_DIS BIT(8)
+#define MT7531_SGMII_IF_MODE_MASK GENMASK(5, 1)
+#define MT7531_SGMII_FORCE_DUPLEX BIT(4)
+#define MT7531_SGMII_FORCE_SPEED_MASK GENMASK(3, 2)
+#define MT7531_SGMII_FORCE_SPEED_1000 BIT(3)
+#define MT7531_SGMII_FORCE_SPEED_100 BIT(2)
+#define MT7531_SGMII_FORCE_SPEED_10 0
+#define MT7531_SGMII_SPEED_DUPLEX_AN BIT(1)
+
+enum mt7531_sgmii_force_duplex {
+ MT7531_SGMII_FORCE_FULL_DUPLEX = 0,
+ MT7531_SGMII_FORCE_HALF_DUPLEX = 0x10,
+};
+
+/* Fields of QPHY_PWR_STATE_CTRL */
+#define MT7531_QPHY_PWR_STATE_CTRL(p) MT7531_SGMII_REG(p, 0xe8)
+#define MT7531_SGMII_PHYA_PWD BIT(4)
+
+/* Values of SGMII SPEED */
+#define MT7531_PHYA_CTRL_SIGNAL3(p) MT7531_SGMII_REG(p, 0x128)
+#define MT7531_RG_TPHY_SPEED_MASK (BIT(2) | BIT(3))
+#define MT7531_RG_TPHY_SPEED_1_25G 0x0
+#define MT7531_RG_TPHY_SPEED_3_125G BIT(2)
+
+/* Register for system reset */
+#define MT7530_SYS_CTRL 0x7000
+#define SYS_CTRL_PHY_RST BIT(2)
+#define SYS_CTRL_SW_RST BIT(1)
+#define SYS_CTRL_REG_RST BIT(0)
+
+/* Register for system interrupt */
+#define MT7530_SYS_INT_EN 0x7008
+
+/* Register for system interrupt status */
+#define MT7530_SYS_INT_STS 0x700c
+
+/* Register for PHY Indirect Access Control */
+#define MT7531_PHY_IAC 0x701C
+#define MT7531_PHY_ACS_ST BIT(31)
+#define MT7531_MDIO_REG_ADDR_MASK (0x1f << 25)
+#define MT7531_MDIO_PHY_ADDR_MASK (0x1f << 20)
+#define MT7531_MDIO_CMD_MASK (0x3 << 18)
+#define MT7531_MDIO_ST_MASK (0x3 << 16)
+#define MT7531_MDIO_RW_DATA_MASK (0xffff)
+#define MT7531_MDIO_REG_ADDR(x) (((x) & 0x1f) << 25)
+#define MT7531_MDIO_DEV_ADDR(x) (((x) & 0x1f) << 25)
+#define MT7531_MDIO_PHY_ADDR(x) (((x) & 0x1f) << 20)
+#define MT7531_MDIO_CMD(x) (((x) & 0x3) << 18)
+#define MT7531_MDIO_ST(x) (((x) & 0x3) << 16)
+
+enum mt7531_phy_iac_cmd {
+ MT7531_MDIO_ADDR = 0,
+ MT7531_MDIO_WRITE = 1,
+ MT7531_MDIO_READ = 2,
+ MT7531_MDIO_READ_CL45 = 3,
+};
+
+/* MDIO_ST: MDIO start field */
+enum mt7531_mdio_st {
+ MT7531_MDIO_ST_CL45 = 0,
+ MT7531_MDIO_ST_CL22 = 1,
+};
+
+#define MT7531_MDIO_CL22_READ (MT7531_MDIO_ST(MT7531_MDIO_ST_CL22) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_READ))
+#define MT7531_MDIO_CL22_WRITE (MT7531_MDIO_ST(MT7531_MDIO_ST_CL22) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_WRITE))
+#define MT7531_MDIO_CL45_ADDR (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_ADDR))
+#define MT7531_MDIO_CL45_READ (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_READ))
+#define MT7531_MDIO_CL45_WRITE (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_WRITE))
+
+/* Register for RGMII clock phase */
+#define MT7531_CLKGEN_CTRL 0x7500
+#define CLK_SKEW_OUT(x) (((x) & 0x3) << 8)
+#define CLK_SKEW_OUT_MASK GENMASK(9, 8)
+#define CLK_SKEW_IN(x) (((x) & 0x3) << 6)
+#define CLK_SKEW_IN_MASK GENMASK(7, 6)
+#define RXCLK_NO_DELAY BIT(5)
+#define TXCLK_NO_REVERSE BIT(4)
+#define GP_MODE(x) (((x) & 0x3) << 1)
+#define GP_MODE_MASK GENMASK(2, 1)
+#define GP_CLK_EN BIT(0)
+
+enum mt7531_gp_mode {
+ MT7531_GP_MODE_RGMII = 0,
+ MT7531_GP_MODE_MII = 1,
+ MT7531_GP_MODE_REV_MII = 2
+};
+
+enum mt7531_clk_skew {
+ MT7531_CLK_SKEW_NO_CHG = 0,
+ MT7531_CLK_SKEW_DLY_100PPS = 1,
+ MT7531_CLK_SKEW_DLY_200PPS = 2,
+ MT7531_CLK_SKEW_REVERSE = 3,
+};
+
+/* Register for hw trap status */
+#define MT7530_HWTRAP 0x7800
+#define HWTRAP_XTAL_MASK (BIT(10) | BIT(9))
+#define HWTRAP_XTAL_25MHZ (BIT(10) | BIT(9))
+#define HWTRAP_XTAL_40MHZ (BIT(10))
+#define HWTRAP_XTAL_20MHZ (BIT(9))
+
+#define MT7531_HWTRAP 0x7800
+#define HWTRAP_XTAL_FSEL_MASK BIT(7)
+#define HWTRAP_XTAL_FSEL_25MHZ BIT(7)
+#define HWTRAP_XTAL_FSEL_40MHZ 0
+/* Unique fields of (M)HWSTRAP for MT7531 */
+#define XTAL_FSEL_S 7
+#define XTAL_FSEL_M BIT(7)
+#define PHY_EN BIT(6)
+#define CHG_STRAP BIT(8)
+
+/* Register for hw trap modification */
+#define MT7530_MHWTRAP 0x7804
+#define MHWTRAP_PHY0_SEL BIT(20)
+#define MHWTRAP_MANUAL BIT(16)
+#define MHWTRAP_P5_MAC_SEL BIT(13)
+#define MHWTRAP_P6_DIS BIT(8)
+#define MHWTRAP_P5_RGMII_MODE BIT(7)
+#define MHWTRAP_P5_DIS BIT(6)
+#define MHWTRAP_PHY_ACCESS BIT(5)
+
+/* Register for TOP signal control */
+#define MT7530_TOP_SIG_CTRL 0x7808
+#define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16))
+
+#define MT7531_TOP_SIG_SR 0x780c
+#define PAD_DUAL_SGMII_EN BIT(1)
+#define PAD_MCM_SMI_EN BIT(0)
+
+#define MT7530_IO_DRV_CR 0x7810
+#define P5_IO_CLK_DRV(x) ((x) & 0x3)
+#define P5_IO_DATA_DRV(x) (((x) & 0x3) << 4)
+
+#define MT7531_CHIP_REV 0x781C
+
+#define MT7531_PLLGP_EN 0x7820
+#define EN_COREPLL BIT(2)
+#define SW_CLKSW BIT(1)
+#define SW_PLLGP BIT(0)
+
+#define MT7530_P6ECR 0x7830
+#define P6_INTF_MODE_MASK 0x3
+#define P6_INTF_MODE(x) ((x) & 0x3)
+
+#define MT7531_PLLGP_CR0 0x78a8
+#define RG_COREPLL_EN BIT(22)
+#define RG_COREPLL_POSDIV_S 23
+#define RG_COREPLL_POSDIV_M 0x3800000
+#define RG_COREPLL_SDM_PCW_S 1
+#define RG_COREPLL_SDM_PCW_M 0x3ffffe
+#define RG_COREPLL_SDM_PCW_CHG BIT(0)
+
+/* Registers for RGMII and SGMII PLL clock */
+#define MT7531_ANA_PLLGP_CR2 0x78b0
+#define MT7531_ANA_PLLGP_CR5 0x78bc
+
+/* Registers for TRGMII on the both side */
+#define MT7530_TRGMII_RCK_CTRL 0x7a00
+#define RX_RST BIT(31)
+#define RXC_DQSISEL BIT(30)
+#define DQSI1_TAP_MASK (0x7f << 8)
+#define DQSI0_TAP_MASK 0x7f
+#define DQSI1_TAP(x) (((x) & 0x7f) << 8)
+#define DQSI0_TAP(x) ((x) & 0x7f)
+
+#define MT7530_TRGMII_RCK_RTT 0x7a04
+#define DQS1_GATE BIT(31)
+#define DQS0_GATE BIT(30)
+
+#define MT7530_TRGMII_RD(x) (0x7a10 + (x) * 8)
+#define BSLIP_EN BIT(31)
+#define EDGE_CHK BIT(30)
+#define RD_TAP_MASK 0x7f
+#define RD_TAP(x) ((x) & 0x7f)
+
+#define MT7530_TRGMII_TXCTRL 0x7a40
+#define TRAIN_TXEN BIT(31)
+#define TXC_INV BIT(30)
+#define TX_RST BIT(28)
+
+#define MT7530_TRGMII_TD_ODT(i) (0x7a54 + 8 * (i))
+#define TD_DM_DRVP(x) ((x) & 0xf)
+#define TD_DM_DRVN(x) (((x) & 0xf) << 4)
+
+#define MT7530_TRGMII_TCK_CTRL 0x7a78
+#define TCK_TAP(x) (((x) & 0xf) << 8)
+
+#define MT7530_P5RGMIIRXCR 0x7b00
+#define CSR_RGMII_EDGE_ALIGN BIT(8)
+#define CSR_RGMII_RXC_0DEG_CFG(x) ((x) & 0xf)
+
+#define MT7530_P5RGMIITXCR 0x7b04
+#define CSR_RGMII_TXC_CFG(x) ((x) & 0x1f)
+
+/* Registers for GPIO mode */
+#define MT7531_GPIO_MODE0 0x7c0c
+#define MT7531_GPIO0_MASK GENMASK(3, 0)
+#define MT7531_GPIO0_INTERRUPT 1
+
+#define MT7531_GPIO_MODE1 0x7c10
+#define MT7531_GPIO11_RG_RXD2_MASK GENMASK(15, 12)
+#define MT7531_EXT_P_MDC_11 (2 << 12)
+#define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16)
+#define MT7531_EXT_P_MDIO_12 (2 << 16)
+
+/* Registers for LED GPIO control (MT7530 only)
+ * All registers follow this pattern:
+ * [ 2: 0] port 0
+ * [ 6: 4] port 1
+ * [10: 8] port 2
+ * [14:12] port 3
+ * [18:16] port 4
+ */
+
+/* LED enable, 0: Disable, 1: Enable (Default) */
+#define MT7530_LED_EN 0x7d00
+/* LED mode, 0: GPIO mode, 1: PHY mode (Default) */
+#define MT7530_LED_IO_MODE 0x7d04
+/* GPIO direction, 0: Input, 1: Output */
+#define MT7530_LED_GPIO_DIR 0x7d10
+/* GPIO output enable, 0: Disable, 1: Enable */
+#define MT7530_LED_GPIO_OE 0x7d14
+/* GPIO value, 0: Low, 1: High */
+#define MT7530_LED_GPIO_DATA 0x7d18
+
+#define MT7530_CREV 0x7ffc
+#define CHIP_NAME_SHIFT 16
+#define MT7530_ID 0x7530
+
+#define MT7531_CREV 0x781C
+#define CHIP_REV_M 0x0f
+#define MT7531_ID 0x7531
+
+/* Registers for core PLL access through mmd indirect */
+#define CORE_PLL_GROUP2 0x401
+#define RG_SYSPLL_EN_NORMAL BIT(15)
+#define RG_SYSPLL_VODEN BIT(14)
+#define RG_SYSPLL_LF BIT(13)
+#define RG_SYSPLL_RST_DLY(x) (((x) & 0x3) << 12)
+#define RG_SYSPLL_LVROD_EN BIT(10)
+#define RG_SYSPLL_PREDIV(x) (((x) & 0x3) << 8)
+#define RG_SYSPLL_POSDIV(x) (((x) & 0x3) << 5)
+#define RG_SYSPLL_FBKSEL BIT(4)
+#define RT_SYSPLL_EN_AFE_OLT BIT(0)
+
+#define CORE_PLL_GROUP4 0x403
+#define RG_SYSPLL_DDSFBK_EN BIT(12)
+#define RG_SYSPLL_BIAS_EN BIT(11)
+#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
+#define MT7531_PHY_PLL_OFF BIT(5)
+#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
+
+#define MT753X_CTRL_PHY_ADDR 0
+
+#define CORE_PLL_GROUP5 0x404
+#define RG_LCDDS_PCW_NCPO1(x) ((x) & 0xffff)
+
+#define CORE_PLL_GROUP6 0x405
+#define RG_LCDDS_PCW_NCPO0(x) ((x) & 0xffff)
+
+#define CORE_PLL_GROUP7 0x406
+#define RG_LCDDS_PWDB BIT(15)
+#define RG_LCDDS_ISO_EN BIT(13)
+#define RG_LCCDS_C(x) (((x) & 0x7) << 4)
+#define RG_LCDDS_PCW_NCPO_CHG BIT(3)
+
+#define CORE_PLL_GROUP10 0x409
+#define RG_LCDDS_SSC_DELTA(x) ((x) & 0xfff)
+
+#define CORE_PLL_GROUP11 0x40a
+#define RG_LCDDS_SSC_DELTA1(x) ((x) & 0xfff)
+
+#define CORE_GSWPLL_GRP1 0x40d
+#define RG_GSWPLL_PREDIV(x) (((x) & 0x3) << 14)
+#define RG_GSWPLL_POSDIV_200M(x) (((x) & 0x3) << 12)
+#define RG_GSWPLL_EN_PRE BIT(11)
+#define RG_GSWPLL_FBKSEL BIT(10)
+#define RG_GSWPLL_BP BIT(9)
+#define RG_GSWPLL_BR BIT(8)
+#define RG_GSWPLL_FBKDIV_200M(x) ((x) & 0xff)
+
+#define CORE_GSWPLL_GRP2 0x40e
+#define RG_GSWPLL_POSDIV_500M(x) (((x) & 0x3) << 8)
+#define RG_GSWPLL_FBKDIV_500M(x) ((x) & 0xff)
+
+#define CORE_TRGMII_GSW_CLK_CG 0x410
+#define REG_GSWCK_EN BIT(0)
+#define REG_TRGMIICK_EN BIT(1)
+
+#define MIB_DESC(_s, _o, _n) \
+ { \
+ .size = (_s), \
+ .offset = (_o), \
+ .name = (_n), \
+ }
+
+struct mt7530_mib_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+struct mt7530_fdb {
+ u16 vid;
+ u8 port_mask;
+ u8 aging;
+ u8 mac[6];
+ bool noarp;
+};
+
+/* struct mt7530_port - This is the main data structure for holding the state
+ * of the port.
+ * @enable: The status used for show port is enabled or not.
+ * @pm: The matrix used to show all connections with the port.
+ * @pvid: The VLAN specified is to be considered a PVID at ingress. Any
+ * untagged frames will be assigned to the related VLAN.
+ * @vlan_filtering: The flags indicating whether the port that can recognize
+ * VLAN-tagged frames.
+ */
+struct mt7530_port {
+ bool enable;
+ u32 pm;
+ u16 pvid;
+};
+
+/* Port 5 interface select definitions */
+enum p5_interface_select {
+ P5_DISABLED = 0,
+ P5_INTF_SEL_PHY_P0,
+ P5_INTF_SEL_PHY_P4,
+ P5_INTF_SEL_GMAC5,
+ P5_INTF_SEL_GMAC5_SGMII,
+};
+
+static const char *p5_intf_modes(unsigned int p5_interface)
+{
+ switch (p5_interface) {
+ case P5_DISABLED:
+ return "DISABLED";
+ case P5_INTF_SEL_PHY_P0:
+ return "PHY P0";
+ case P5_INTF_SEL_PHY_P4:
+ return "PHY P4";
+ case P5_INTF_SEL_GMAC5:
+ return "GMAC5";
+ case P5_INTF_SEL_GMAC5_SGMII:
+ return "GMAC5_SGMII";
+ default:
+ return "unknown";
+ }
+}
+
+struct mt7530_priv;
+
+struct mt753x_pcs {
+ struct phylink_pcs pcs;
+ struct mt7530_priv *priv;
+ int port;
+};
+
+/* struct mt753x_info - This is the main data structure for holding the specific
+ * part for each supported device
+ * @sw_setup: Holding the handler to a device initialization
+ * @phy_read: Holding the way reading PHY port
+ * @phy_write: Holding the way writing PHY port
+ * @pad_setup: Holding the way setting up the bus pad for a certain
+ * MAC port
+ * @phy_mode_supported: Check if the PHY type is being supported on a certain
+ * port
+ * @mac_port_validate: Holding the way to set addition validate type for a
+ * certan MAC port
+ * @mac_port_config: Holding the way setting up the PHY attribute to a
+ * certain MAC port
+ */
+struct mt753x_info {
+ enum mt753x_id id;
+
+ const struct phylink_pcs_ops *pcs_ops;
+
+ int (*sw_setup)(struct dsa_switch *ds);
+ int (*phy_read)(struct mt7530_priv *priv, int port, int regnum);
+ int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val);
+ int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
+ int (*cpu_port_config)(struct dsa_switch *ds, int port);
+ void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
+ void (*mac_port_validate)(struct dsa_switch *ds, int port,
+ phy_interface_t interface,
+ unsigned long *supported);
+ int (*mac_port_config)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
+};
+
+/* struct mt7530_priv - This is the main data structure for holding the state
+ * of the driver
+ * @dev: The device pointer
+ * @ds: The pointer to the dsa core structure
+ * @bus: The bus used for the device and built-in PHY
+ * @rstc: The pointer to reset control used by MCM
+ * @core_pwr: The power supplied into the core
+ * @io_pwr: The power supplied into the I/O
+ * @reset: The descriptor for GPIO line tied to its reset pin
+ * @mcm: Flag for distinguishing if standalone IC or module
+ * coupling
+ * @ports: Holding the state among ports
+ * @reg_mutex: The lock for protecting among process accessing
+ * registers
+ * @p6_interface Holding the current port 6 interface
+ * @p5_intf_sel: Holding the current port 5 interface select
+ *
+ * @irq: IRQ number of the switch
+ * @irq_domain: IRQ domain of the switch irq_chip
+ * @irq_enable: IRQ enable bits, synced to SYS_INT_EN
+ */
+struct mt7530_priv {
+ struct device *dev;
+ struct dsa_switch *ds;
+ struct mii_bus *bus;
+ struct reset_control *rstc;
+ struct regulator *core_pwr;
+ struct regulator *io_pwr;
+ struct gpio_desc *reset;
+ const struct mt753x_info *info;
+ unsigned int id;
+ bool mcm;
+ phy_interface_t p6_interface;
+ phy_interface_t p5_interface;
+ unsigned int p5_intf_sel;
+ u8 mirror_rx;
+ u8 mirror_tx;
+
+ struct mt7530_port ports[MT7530_NUM_PORTS];
+ struct mt753x_pcs pcs[MT7530_NUM_PORTS];
+ /* protect among processes for registers access*/
+ struct mutex reg_mutex;
+ int irq;
+ struct irq_domain *irq_domain;
+ u32 irq_enable;
+};
+
+struct mt7530_hw_vlan_entry {
+ int port;
+ u8 old_members;
+ bool untagged;
+};
+
+static inline void mt7530_hw_vlan_entry_init(struct mt7530_hw_vlan_entry *e,
+ int port, bool untagged)
+{
+ e->port = port;
+ e->untagged = untagged;
+}
+
+typedef void (*mt7530_vlan_op)(struct mt7530_priv *,
+ struct mt7530_hw_vlan_entry *);
+
+struct mt7530_hw_stats {
+ const char *string;
+ u16 reg;
+ u8 sizeof_stat;
+};
+
+struct mt7530_dummy_poll {
+ struct mt7530_priv *priv;
+ u32 reg;
+};
+
+static inline void INIT_MT7530_DUMMY_POLL(struct mt7530_dummy_poll *p,
+ struct mt7530_priv *priv, u32 reg)
+{
+ p->priv = priv;
+ p->reg = reg;
+}
+
+#endif /* __MT7530_H */
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
new file mode 100644
index 000000000..fdda62d6e
--- /dev/null
+++ b/drivers/net/dsa/mv88e6060.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * net/dsa/mv88e6060.c - Driver for Marvell 88e6060 switch chips
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ */
+
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include "mv88e6060.h"
+
+static int reg_read(struct mv88e6060_priv *priv, int addr, int reg)
+{
+ return mdiobus_read_nested(priv->bus, priv->sw_addr + addr, reg);
+}
+
+static int reg_write(struct mv88e6060_priv *priv, int addr, int reg, u16 val)
+{
+ return mdiobus_write_nested(priv->bus, priv->sw_addr + addr, reg, val);
+}
+
+static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr)
+{
+ int ret;
+
+ ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID);
+ if (ret >= 0) {
+ if (ret == PORT_SWITCH_ID_6060)
+ return "Marvell 88E6060 (A0)";
+ if (ret == PORT_SWITCH_ID_6060_R1 ||
+ ret == PORT_SWITCH_ID_6060_R2)
+ return "Marvell 88E6060 (B0)";
+ if ((ret & PORT_SWITCH_ID_6060_MASK) == PORT_SWITCH_ID_6060)
+ return "Marvell 88E6060";
+ }
+
+ return NULL;
+}
+
+static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol m)
+{
+ return DSA_TAG_PROTO_TRAILER;
+}
+
+static int mv88e6060_switch_reset(struct mv88e6060_priv *priv)
+{
+ int i;
+ int ret;
+ unsigned long timeout;
+
+ /* Set all ports to the disabled state. */
+ for (i = 0; i < MV88E6060_PORTS; i++) {
+ ret = reg_read(priv, REG_PORT(i), PORT_CONTROL);
+ if (ret < 0)
+ return ret;
+ ret = reg_write(priv, REG_PORT(i), PORT_CONTROL,
+ ret & ~PORT_CONTROL_STATE_MASK);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait for transmit queues to drain. */
+ usleep_range(2000, 4000);
+
+ /* Reset the switch. */
+ ret = reg_write(priv, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ GLOBAL_ATU_CONTROL_SWRESET |
+ GLOBAL_ATU_CONTROL_LEARNDIS);
+ if (ret)
+ return ret;
+
+ /* Wait up to one second for reset to complete. */
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
+ ret = reg_read(priv, REG_GLOBAL, GLOBAL_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if (ret & GLOBAL_STATUS_INIT_READY)
+ break;
+
+ usleep_range(1000, 2000);
+ }
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mv88e6060_setup_global(struct mv88e6060_priv *priv)
+{
+ int ret;
+
+ /* Disable discarding of frames with excessive collisions,
+ * set the maximum frame size to 1536 bytes, and mask all
+ * interrupt sources.
+ */
+ ret = reg_write(priv, REG_GLOBAL, GLOBAL_CONTROL,
+ GLOBAL_CONTROL_MAX_FRAME_1536);
+ if (ret)
+ return ret;
+
+ /* Disable automatic address learning.
+ */
+ return reg_write(priv, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ GLOBAL_ATU_CONTROL_LEARNDIS);
+}
+
+static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
+{
+ int addr = REG_PORT(p);
+ int ret;
+
+ if (dsa_is_unused_port(priv->ds, p))
+ return 0;
+
+ /* Do not force flow control, disable Ingress and Egress
+ * Header tagging, disable VLAN tunneling, and set the port
+ * state to Forwarding. Additionally, if this is the CPU
+ * port, enable Ingress and Egress Trailer tagging mode.
+ */
+ ret = reg_write(priv, addr, PORT_CONTROL,
+ dsa_is_cpu_port(priv->ds, p) ?
+ PORT_CONTROL_TRAILER |
+ PORT_CONTROL_INGRESS_MODE |
+ PORT_CONTROL_STATE_FORWARDING :
+ PORT_CONTROL_STATE_FORWARDING);
+ if (ret)
+ return ret;
+
+ /* Port based VLAN map: give each port its own address
+ * database, allow the CPU port to talk to each of the 'real'
+ * ports, and allow each of the 'real' ports to only talk to
+ * the CPU port.
+ */
+ ret = reg_write(priv, addr, PORT_VLAN_MAP,
+ ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
+ (dsa_is_cpu_port(priv->ds, p) ?
+ dsa_user_ports(priv->ds) :
+ BIT(dsa_to_port(priv->ds, p)->cpu_dp->index)));
+ if (ret)
+ return ret;
+
+ /* Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ return reg_write(priv, addr, PORT_ASSOC_VECTOR, BIT(p));
+}
+
+static int mv88e6060_setup_addr(struct mv88e6060_priv *priv)
+{
+ u8 addr[ETH_ALEN];
+ int ret;
+ u16 val;
+
+ eth_random_addr(addr);
+
+ val = addr[0] << 8 | addr[1];
+
+ /* The multicast bit is always transmitted as a zero, so the switch uses
+ * bit 8 for "DiffAddr", where 0 means all ports transmit the same SA.
+ */
+ val &= 0xfeff;
+
+ ret = reg_write(priv, REG_GLOBAL, GLOBAL_MAC_01, val);
+ if (ret)
+ return ret;
+
+ ret = reg_write(priv, REG_GLOBAL, GLOBAL_MAC_23,
+ (addr[2] << 8) | addr[3]);
+ if (ret)
+ return ret;
+
+ return reg_write(priv, REG_GLOBAL, GLOBAL_MAC_45,
+ (addr[4] << 8) | addr[5]);
+}
+
+static int mv88e6060_setup(struct dsa_switch *ds)
+{
+ struct mv88e6060_priv *priv = ds->priv;
+ int ret;
+ int i;
+
+ priv->ds = ds;
+
+ ret = mv88e6060_switch_reset(priv);
+ if (ret < 0)
+ return ret;
+
+ /* @@@ initialise atu */
+
+ ret = mv88e6060_setup_global(priv);
+ if (ret < 0)
+ return ret;
+
+ ret = mv88e6060_setup_addr(priv);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < MV88E6060_PORTS; i++) {
+ ret = mv88e6060_setup_port(priv, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv88e6060_port_to_phy_addr(int port)
+{
+ if (port >= 0 && port < MV88E6060_PORTS)
+ return port;
+ return -1;
+}
+
+static int mv88e6060_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct mv88e6060_priv *priv = ds->priv;
+ int addr;
+
+ addr = mv88e6060_port_to_phy_addr(port);
+ if (addr == -1)
+ return 0xffff;
+
+ return reg_read(priv, addr, regnum);
+}
+
+static int
+mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+ struct mv88e6060_priv *priv = ds->priv;
+ int addr;
+
+ addr = mv88e6060_port_to_phy_addr(port);
+ if (addr == -1)
+ return 0xffff;
+
+ return reg_write(priv, addr, regnum, val);
+}
+
+static const struct dsa_switch_ops mv88e6060_switch_ops = {
+ .get_tag_protocol = mv88e6060_get_tag_protocol,
+ .setup = mv88e6060_setup,
+ .phy_read = mv88e6060_phy_read,
+ .phy_write = mv88e6060_phy_write,
+};
+
+static int mv88e6060_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct mv88e6060_priv *priv;
+ struct dsa_switch *ds;
+ const char *name;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = mdiodev->bus;
+ priv->sw_addr = mdiodev->addr;
+
+ name = mv88e6060_get_name(priv->bus, priv->sw_addr);
+ if (!name)
+ return -ENODEV;
+
+ dev_info(dev, "switch %s detected\n", name);
+
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ ds->dev = dev;
+ ds->num_ports = MV88E6060_PORTS;
+ ds->priv = priv;
+ ds->dev = dev;
+ ds->ops = &mv88e6060_switch_ops;
+
+ dev_set_drvdata(dev, ds);
+
+ return dsa_register_switch(ds);
+}
+
+static void mv88e6060_remove(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_unregister_switch(ds);
+}
+
+static void mv88e6060_shutdown(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static const struct of_device_id mv88e6060_of_match[] = {
+ {
+ .compatible = "marvell,mv88e6060",
+ },
+ { /* sentinel */ },
+};
+
+static struct mdio_driver mv88e6060_driver = {
+ .probe = mv88e6060_probe,
+ .remove = mv88e6060_remove,
+ .shutdown = mv88e6060_shutdown,
+ .mdiodrv.driver = {
+ .name = "mv88e6060",
+ .of_match_table = mv88e6060_of_match,
+ },
+};
+
+mdio_module_driver(mv88e6060_driver);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mv88e6060");
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h
new file mode 100644
index 000000000..6c13c2421
--- /dev/null
+++ b/drivers/net/dsa/mv88e6060.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * drivers/net/dsa/mv88e6060.h - Marvell 88e6060 switch chip support
+ * Copyright (c) 2015 Neil Armstrong
+ *
+ * Based on mv88e6xxx.h
+ * Copyright (c) 2008 Marvell Semiconductor
+ */
+
+#ifndef __MV88E6060_H
+#define __MV88E6060_H
+
+#define MV88E6060_PORTS 6
+
+#define REG_PORT(p) (0x8 + (p))
+#define PORT_STATUS 0x00
+#define PORT_STATUS_PAUSE_EN BIT(15)
+#define PORT_STATUS_MY_PAUSE BIT(14)
+#define PORT_STATUS_FC (PORT_STATUS_MY_PAUSE | PORT_STATUS_PAUSE_EN)
+#define PORT_STATUS_RESOLVED BIT(13)
+#define PORT_STATUS_LINK BIT(12)
+#define PORT_STATUS_PORTMODE BIT(11)
+#define PORT_STATUS_PHYMODE BIT(10)
+#define PORT_STATUS_DUPLEX BIT(9)
+#define PORT_STATUS_SPEED BIT(8)
+#define PORT_SWITCH_ID 0x03
+#define PORT_SWITCH_ID_6060 0x0600
+#define PORT_SWITCH_ID_6060_MASK 0xfff0
+#define PORT_SWITCH_ID_6060_R1 0x0601
+#define PORT_SWITCH_ID_6060_R2 0x0602
+#define PORT_CONTROL 0x04
+#define PORT_CONTROL_FORCE_FLOW_CTRL BIT(15)
+#define PORT_CONTROL_TRAILER BIT(14)
+#define PORT_CONTROL_HEADER BIT(11)
+#define PORT_CONTROL_INGRESS_MODE BIT(8)
+#define PORT_CONTROL_VLAN_TUNNEL BIT(7)
+#define PORT_CONTROL_STATE_MASK 0x03
+#define PORT_CONTROL_STATE_DISABLED 0x00
+#define PORT_CONTROL_STATE_BLOCKING 0x01
+#define PORT_CONTROL_STATE_LEARNING 0x02
+#define PORT_CONTROL_STATE_FORWARDING 0x03
+#define PORT_VLAN_MAP 0x06
+#define PORT_VLAN_MAP_DBNUM_SHIFT 12
+#define PORT_VLAN_MAP_TABLE_MASK 0x1f
+#define PORT_ASSOC_VECTOR 0x0b
+#define PORT_ASSOC_VECTOR_MONITOR BIT(15)
+#define PORT_ASSOC_VECTOR_PAV_MASK 0x1f
+#define PORT_RX_CNTR 0x10
+#define PORT_TX_CNTR 0x11
+
+#define REG_GLOBAL 0x0f
+#define GLOBAL_STATUS 0x00
+#define GLOBAL_STATUS_SW_MODE_MASK (0x3 << 12)
+#define GLOBAL_STATUS_SW_MODE_0 (0x0 << 12)
+#define GLOBAL_STATUS_SW_MODE_1 (0x1 << 12)
+#define GLOBAL_STATUS_SW_MODE_2 (0x2 << 12)
+#define GLOBAL_STATUS_SW_MODE_3 (0x3 << 12)
+#define GLOBAL_STATUS_INIT_READY BIT(11)
+#define GLOBAL_STATUS_ATU_FULL BIT(3)
+#define GLOBAL_STATUS_ATU_DONE BIT(2)
+#define GLOBAL_STATUS_PHY_INT BIT(1)
+#define GLOBAL_STATUS_EEINT BIT(0)
+#define GLOBAL_MAC_01 0x01
+#define GLOBAL_MAC_01_DIFF_ADDR BIT(8)
+#define GLOBAL_MAC_23 0x02
+#define GLOBAL_MAC_45 0x03
+#define GLOBAL_CONTROL 0x04
+#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13)
+#define GLOBAL_CONTROL_MAX_FRAME_1536 BIT(10)
+#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9)
+#define GLOBAL_CONTROL_CTRMODE BIT(8)
+#define GLOBAL_CONTROL_ATU_FULL_EN BIT(3)
+#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2)
+#define GLOBAL_CONTROL_PHYINT_EN BIT(1)
+#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0)
+#define GLOBAL_ATU_CONTROL 0x0a
+#define GLOBAL_ATU_CONTROL_SWRESET BIT(15)
+#define GLOBAL_ATU_CONTROL_LEARNDIS BIT(14)
+#define GLOBAL_ATU_CONTROL_ATUSIZE_256 (0x0 << 12)
+#define GLOBAL_ATU_CONTROL_ATUSIZE_512 (0x1 << 12)
+#define GLOBAL_ATU_CONTROL_ATUSIZE_1024 (0x2 << 12)
+#define GLOBAL_ATU_CONTROL_ATE_AGE_SHIFT 4
+#define GLOBAL_ATU_CONTROL_ATE_AGE_MASK (0xff << 4)
+#define GLOBAL_ATU_CONTROL_ATE_AGE_5MIN (0x13 << 4)
+#define GLOBAL_ATU_OP 0x0b
+#define GLOBAL_ATU_OP_BUSY BIT(15)
+#define GLOBAL_ATU_OP_NOP (0 << 12)
+#define GLOBAL_ATU_OP_FLUSH_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_UNLOCKED ((2 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_UNLOCKED_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_DATA 0x0c
+#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3f0
+#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4
+#define GLOBAL_ATU_DATA_STATE_MASK 0x0f
+#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00
+#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e
+#define GLOBAL_ATU_DATA_STATE_UC_LOCKED 0x0f
+#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07
+#define GLOBAL_ATU_DATA_STATE_MC_LOCKED 0x0e
+#define GLOBAL_ATU_MAC_01 0x0d
+#define GLOBAL_ATU_MAC_23 0x0e
+#define GLOBAL_ATU_MAC_45 0x0f
+
+struct mv88e6060_priv {
+ /* MDIO bus and address on bus to use. When in single chip
+ * mode, address is 0, and the switch uses multiple addresses
+ * on the bus. When in multi-chip mode, the switch uses a
+ * single address which contains two registers used for
+ * indirect access to more registers.
+ */
+ struct mii_bus *bus;
+ int sw_addr;
+ struct dsa_switch *ds;
+};
+
+#endif
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
new file mode 100644
index 000000000..e3181d547
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_MV88E6XXX
+ tristate "Marvell 88E6xxx Ethernet switch fabric support"
+ depends on NET_DSA
+ select IRQ_DOMAIN
+ select NET_DSA_TAG_EDSA
+ select NET_DSA_TAG_DSA
+ help
+ This driver adds support for most of the Marvell 88E6xxx models of
+ Ethernet switch chips, except 88E6060.
+
+config NET_DSA_MV88E6XXX_PTP
+ bool "PTP support for Marvell 88E6xxx"
+ default n
+ depends on (NET_DSA_MV88E6XXX = y && PTP_1588_CLOCK = y) || \
+ (NET_DSA_MV88E6XXX = m && PTP_1588_CLOCK)
+ help
+ Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
+ chips that support it.
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
new file mode 100644
index 000000000..49bf358b9
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
+mv88e6xxx-objs := chip.o
+mv88e6xxx-objs += devlink.o
+mv88e6xxx-objs += global1.o
+mv88e6xxx-objs += global1_atu.o
+mv88e6xxx-objs += global1_vtu.o
+mv88e6xxx-objs += global2.o
+mv88e6xxx-objs += global2_avb.o
+mv88e6xxx-objs += global2_scratch.o
+mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += hwtstamp.o
+mv88e6xxx-objs += phy.o
+mv88e6xxx-objs += port.o
+mv88e6xxx-objs += port_hidden.o
+mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o
+mv88e6xxx-objs += serdes.o
+mv88e6xxx-objs += smi.o
+mv88e6xxx-objs += trace.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
new file mode 100644
index 000000000..ba906dfab
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -0,0 +1,7270 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88e6xxx Ethernet switch single-chip support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
+ *
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dsa/mv88e6xxx.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_bridge.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_data/mv88e6xxx.h>
+#include <linux/netdevice.h>
+#include <linux/gpio/consumer.h>
+#include <linux/phylink.h>
+#include <net/dsa.h>
+
+#include "chip.h"
+#include "devlink.h"
+#include "global1.h"
+#include "global2.h"
+#include "hwtstamp.h"
+#include "phy.h"
+#include "port.h"
+#include "ptp.h"
+#include "serdes.h"
+#include "smi.h"
+
+static void assert_reg_lock(struct mv88e6xxx_chip *chip)
+{
+ if (unlikely(!mutex_is_locked(&chip->reg_lock))) {
+ dev_err(chip->dev, "Switch registers lock not held!\n");
+ dump_stack();
+ }
+}
+
+int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val)
+{
+ int err;
+
+ assert_reg_lock(chip);
+
+ err = mv88e6xxx_smi_read(chip, addr, reg, val);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+ addr, reg, *val);
+
+ return 0;
+}
+
+int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
+{
+ int err;
+
+ assert_reg_lock(chip);
+
+ err = mv88e6xxx_smi_write(chip, addr, reg, val);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+ addr, reg, val);
+
+ return 0;
+}
+
+int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 mask, u16 val)
+{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(50);
+ u16 data;
+ int err;
+ int i;
+
+ /* There's no bus specific operation to wait for a mask. Even
+ * if the initial poll takes longer than 50ms, always do at
+ * least one more attempt.
+ */
+ for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
+ err = mv88e6xxx_read(chip, addr, reg, &data);
+ if (err)
+ return err;
+
+ if ((data & mask) == val)
+ return 0;
+
+ if (i < 2)
+ cpu_relax();
+ else
+ usleep_range(1000, 2000);
+ }
+
+ err = mv88e6xxx_read(chip, addr, reg, &data);
+ if (err)
+ return err;
+
+ if ((data & mask) == val)
+ return 0;
+
+ dev_err(chip->dev, "Timeout while waiting for switch\n");
+ return -ETIMEDOUT;
+}
+
+int mv88e6xxx_wait_bit(struct mv88e6xxx_chip *chip, int addr, int reg,
+ int bit, int val)
+{
+ return mv88e6xxx_wait_mask(chip, addr, reg, BIT(bit),
+ val ? BIT(bit) : 0x0000);
+}
+
+struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus;
+
+ mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
+ list);
+ if (!mdio_bus)
+ return NULL;
+
+ return mdio_bus->bus;
+}
+
+static void mv88e6xxx_g1_irq_mask(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int n = d->hwirq;
+
+ chip->g1_irq.masked |= (1 << n);
+}
+
+static void mv88e6xxx_g1_irq_unmask(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int n = d->hwirq;
+
+ chip->g1_irq.masked &= ~(1 << n);
+}
+
+static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
+{
+ unsigned int nhandled = 0;
+ unsigned int sub_irq;
+ unsigned int n;
+ u16 reg;
+ u16 ctl1;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &reg);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ goto out;
+
+ do {
+ for (n = 0; n < chip->g1_irq.nirqs; ++n) {
+ if (reg & (1 << n)) {
+ sub_irq = irq_find_mapping(chip->g1_irq.domain,
+ n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
+ if (err)
+ goto unlock;
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &reg);
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+ if (err)
+ goto out;
+ ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
+ } while (reg & ctl1);
+
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static irqreturn_t mv88e6xxx_g1_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_chip *chip = dev_id;
+
+ return mv88e6xxx_g1_irq_thread_work(chip);
+}
+
+static void mv88e6xxx_g1_irq_bus_lock(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+
+ mv88e6xxx_reg_lock(chip);
+}
+
+static void mv88e6xxx_g1_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+ u16 mask = GENMASK(chip->g1_irq.nirqs, 0);
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &reg);
+ if (err)
+ goto out;
+
+ reg &= ~mask;
+ reg |= (~chip->g1_irq.masked & mask);
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, reg);
+ if (err)
+ goto out;
+
+out:
+ mv88e6xxx_reg_unlock(chip);
+}
+
+static const struct irq_chip mv88e6xxx_g1_irq_chip = {
+ .name = "mv88e6xxx-g1",
+ .irq_mask = mv88e6xxx_g1_irq_mask,
+ .irq_unmask = mv88e6xxx_g1_irq_unmask,
+ .irq_bus_lock = mv88e6xxx_g1_irq_bus_lock,
+ .irq_bus_sync_unlock = mv88e6xxx_g1_irq_bus_sync_unlock,
+};
+
+static int mv88e6xxx_g1_irq_domain_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct mv88e6xxx_chip *chip = d->host_data;
+
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &chip->g1_irq.chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
+ .map = mv88e6xxx_g1_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+/* To be called with reg_lock held */
+static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
+{
+ int irq, virq;
+ u16 mask;
+
+ mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask);
+ mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
+ mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
+
+ for (irq = 0; irq < chip->g1_irq.nirqs; irq++) {
+ virq = irq_find_mapping(chip->g1_irq.domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(chip->g1_irq.domain);
+}
+
+static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
+{
+ /*
+ * free_irq must be called without reg_lock taken because the irq
+ * handler takes this lock, too.
+ */
+ free_irq(chip->irq, chip);
+
+ mv88e6xxx_reg_lock(chip);
+ mv88e6xxx_g1_irq_free_common(chip);
+ mv88e6xxx_reg_unlock(chip);
+}
+
+static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
+{
+ int err, irq, virq;
+ u16 reg, mask;
+
+ chip->g1_irq.nirqs = chip->info->g1_irqs;
+ chip->g1_irq.domain = irq_domain_add_simple(
+ NULL, chip->g1_irq.nirqs, 0,
+ &mv88e6xxx_g1_irq_domain_ops, chip);
+ if (!chip->g1_irq.domain)
+ return -ENOMEM;
+
+ for (irq = 0; irq < chip->g1_irq.nirqs; irq++)
+ irq_create_mapping(chip->g1_irq.domain, irq);
+
+ chip->g1_irq.chip = mv88e6xxx_g1_irq_chip;
+ chip->g1_irq.masked = ~0;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask);
+ if (err)
+ goto out_mapping;
+
+ mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
+ if (err)
+ goto out_disable;
+
+ /* Reading the interrupt status clears (most of) them */
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &reg);
+ if (err)
+ goto out_disable;
+
+ return 0;
+
+out_disable:
+ mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
+ mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
+
+out_mapping:
+ for (irq = 0; irq < 16; irq++) {
+ virq = irq_find_mapping(chip->g1_irq.domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(chip->g1_irq.domain);
+
+ return err;
+}
+
+static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
+{
+ static struct lock_class_key lock_key;
+ static struct lock_class_key request_key;
+ int err;
+
+ err = mv88e6xxx_g1_irq_setup_common(chip);
+ if (err)
+ return err;
+
+ /* These lock classes tells lockdep that global 1 irqs are in
+ * a different category than their parent GPIO, so it won't
+ * report false recursion.
+ */
+ irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
+
+ snprintf(chip->irq_name, sizeof(chip->irq_name),
+ "mv88e6xxx-%s", dev_name(chip->dev));
+
+ mv88e6xxx_reg_unlock(chip);
+ err = request_threaded_irq(chip->irq, NULL,
+ mv88e6xxx_g1_irq_thread_fn,
+ IRQF_ONESHOT | IRQF_SHARED,
+ chip->irq_name, chip);
+ mv88e6xxx_reg_lock(chip);
+ if (err)
+ mv88e6xxx_g1_irq_free_common(chip);
+
+ return err;
+}
+
+static void mv88e6xxx_irq_poll(struct kthread_work *work)
+{
+ struct mv88e6xxx_chip *chip = container_of(work,
+ struct mv88e6xxx_chip,
+ irq_poll_work.work);
+ mv88e6xxx_g1_irq_thread_work(chip);
+
+ kthread_queue_delayed_work(chip->kworker, &chip->irq_poll_work,
+ msecs_to_jiffies(100));
+}
+
+static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_irq_setup_common(chip);
+ if (err)
+ return err;
+
+ kthread_init_delayed_work(&chip->irq_poll_work,
+ mv88e6xxx_irq_poll);
+
+ chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev));
+ if (IS_ERR(chip->kworker))
+ return PTR_ERR(chip->kworker);
+
+ kthread_queue_delayed_work(chip->kworker, &chip->irq_poll_work,
+ msecs_to_jiffies(100));
+
+ return 0;
+}
+
+static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
+{
+ kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
+ kthread_destroy_worker(chip->kworker);
+
+ mv88e6xxx_reg_lock(chip);
+ mv88e6xxx_g1_irq_free_common(chip);
+ mv88e6xxx_reg_unlock(chip);
+}
+
+static int mv88e6xxx_port_config_interface(struct mv88e6xxx_chip *chip,
+ int port, phy_interface_t interface)
+{
+ int err;
+
+ if (chip->info->ops->port_set_rgmii_delay) {
+ err = chip->info->ops->port_set_rgmii_delay(chip, port,
+ interface);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ if (chip->info->ops->port_set_cmode) {
+ err = chip->info->ops->port_set_cmode(chip, port,
+ interface);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
+ int link, int speed, int duplex, int pause,
+ phy_interface_t mode)
+{
+ int err;
+
+ if (!chip->info->ops->port_set_link)
+ return 0;
+
+ /* Port's MAC control must not be changed unless the link is down */
+ err = chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
+ if (err)
+ return err;
+
+ if (chip->info->ops->port_set_speed_duplex) {
+ err = chip->info->ops->port_set_speed_duplex(chip, port,
+ speed, duplex);
+ if (err && err != -EOPNOTSUPP)
+ goto restore_link;
+ }
+
+ if (chip->info->ops->port_set_pause) {
+ err = chip->info->ops->port_set_pause(chip, port, pause);
+ if (err)
+ goto restore_link;
+ }
+
+ err = mv88e6xxx_port_config_interface(chip, port, mode);
+restore_link:
+ if (chip->info->ops->port_set_link(chip, port, link))
+ dev_err(chip->dev, "p%d: failed to restore MAC's link\n", port);
+
+ return err;
+}
+
+static int mv88e6xxx_phy_is_internal(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ return port < chip->info->num_internal_phys;
+}
+
+static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 reg;
+ int err;
+
+ /* The 88e6250 family does not have the PHY detect bit. Instead,
+ * report whether the port is internal.
+ */
+ if (chip->info->family == MV88E6XXX_FAMILY_6250)
+ return port < chip->info->num_internal_phys;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err) {
+ dev_err(chip->dev,
+ "p%d: %s: failed to read port status\n",
+ port, __func__);
+ return err;
+ }
+
+ return !!(reg & MV88E6XXX_PORT_STS_PHY_DETECT);
+}
+
+static int mv88e6xxx_serdes_pcs_get_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int lane;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane >= 0 && chip->info->ops->serdes_pcs_get_state)
+ err = chip->info->ops->serdes_pcs_get_state(chip, port, lane,
+ state);
+ else
+ err = -EOPNOTSUPP;
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise)
+{
+ const struct mv88e6xxx_ops *ops = chip->info->ops;
+ int lane;
+
+ if (ops->serdes_pcs_config) {
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane >= 0)
+ return ops->serdes_pcs_config(chip, port, lane, mode,
+ interface, advertise);
+ }
+
+ return 0;
+}
+
+static void mv88e6xxx_serdes_pcs_an_restart(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ const struct mv88e6xxx_ops *ops;
+ int err = 0;
+ int lane;
+
+ ops = chip->info->ops;
+
+ if (ops->serdes_pcs_an_restart) {
+ mv88e6xxx_reg_lock(chip);
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane >= 0)
+ err = ops->serdes_pcs_an_restart(chip, port, lane);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ dev_err(ds->dev, "p%d: failed to restart AN\n", port);
+ }
+}
+
+static int mv88e6xxx_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ unsigned int mode,
+ int speed, int duplex)
+{
+ const struct mv88e6xxx_ops *ops = chip->info->ops;
+ int lane;
+
+ if (!phylink_autoneg_inband(mode) && ops->serdes_pcs_link_up) {
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane >= 0)
+ return ops->serdes_pcs_link_up(chip, port, lane,
+ speed, duplex);
+ }
+
+ return 0;
+}
+
+static const u8 mv88e6185_phy_interface_modes[] = {
+ [MV88E6185_PORT_STS_CMODE_GMII_FD] = PHY_INTERFACE_MODE_GMII,
+ [MV88E6185_PORT_STS_CMODE_MII_100_FD_PS] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_MII_100] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_MII_10] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_SERDES] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6185_PORT_STS_CMODE_1000BASE_X] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6185_PORT_STS_CMODE_PHY] = PHY_INTERFACE_MODE_SGMII,
+};
+
+static void mv88e6095_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+
+ if (mv88e6xxx_phy_is_internal(chip->ds, port)) {
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ } else {
+ if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
+ mv88e6185_phy_interface_modes[cmode])
+ __set_bit(mv88e6185_phy_interface_modes[cmode],
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_1000FD;
+ }
+}
+
+static void mv88e6185_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
+ mv88e6185_phy_interface_modes[cmode])
+ __set_bit(mv88e6185_phy_interface_modes[cmode],
+ config->supported_interfaces);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+}
+
+static const u8 mv88e6xxx_phy_interface_modes[] = {
+ [MV88E6XXX_PORT_STS_CMODE_MII_PHY] = PHY_INTERFACE_MODE_MII,
+ [MV88E6XXX_PORT_STS_CMODE_MII] = PHY_INTERFACE_MODE_MII,
+ [MV88E6XXX_PORT_STS_CMODE_GMII] = PHY_INTERFACE_MODE_GMII,
+ [MV88E6XXX_PORT_STS_CMODE_RMII_PHY] = PHY_INTERFACE_MODE_RMII,
+ [MV88E6XXX_PORT_STS_CMODE_RMII] = PHY_INTERFACE_MODE_RMII,
+ [MV88E6XXX_PORT_STS_CMODE_100BASEX] = PHY_INTERFACE_MODE_100BASEX,
+ [MV88E6XXX_PORT_STS_CMODE_1000BASEX] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6XXX_PORT_STS_CMODE_SGMII] = PHY_INTERFACE_MODE_SGMII,
+ /* higher interface modes are not needed here, since ports supporting
+ * them are writable, and so the supported interfaces are filled in the
+ * corresponding .phylink_set_interfaces() implementation below
+ */
+};
+
+static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
+{
+ if (cmode < ARRAY_SIZE(mv88e6xxx_phy_interface_modes) &&
+ mv88e6xxx_phy_interface_modes[cmode])
+ __set_bit(mv88e6xxx_phy_interface_modes[cmode], supported);
+ else if (cmode == MV88E6XXX_PORT_STS_CMODE_RGMII)
+ phy_interface_set_rgmii(supported);
+}
+
+static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+}
+
+static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
+{
+ u16 reg, val;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ /* If PHY_DETECT is zero, then we are not in auto-media mode */
+ if (!(reg & MV88E6XXX_PORT_STS_PHY_DETECT))
+ return 0xf;
+
+ val = reg & ~MV88E6XXX_PORT_STS_PHY_DETECT;
+ err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, val);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &val);
+ if (err)
+ return err;
+
+ /* Restore PHY_DETECT value */
+ err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, reg);
+ if (err)
+ return err;
+
+ return val & MV88E6XXX_PORT_STS_CMODE_MASK;
+}
+
+static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+ int err, cmode;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* Port 4 supports automedia if the serdes is associated with it. */
+ if (port == 4) {
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err < 0)
+ dev_err(chip->dev, "p%d: failed to read scratch\n",
+ port);
+ if (err <= 0)
+ return;
+
+ cmode = mv88e6352_get_port4_serdes_cmode(chip);
+ if (cmode < 0)
+ dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
+ port);
+ else
+ mv88e6xxx_translate_cmode(cmode, supported);
+ }
+}
+
+static void mv88e6341_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ /* No ethtool bits for 200Mbps */
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* The C_Mode field is programmable on port 5 */
+ if (port == 5) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+
+ config->mac_capabilities |= MAC_2500FD;
+ }
+}
+
+static void mv88e6390_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ /* No ethtool bits for 200Mbps */
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* The C_Mode field is programmable on ports 9 and 10 */
+ if (port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+
+ config->mac_capabilities |= MAC_2500FD;
+ }
+}
+
+static void mv88e6390x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ mv88e6390_phylink_get_caps(chip, port, config);
+
+ /* For the 6x90X, ports 2-7 can be in automedia mode.
+ * (Note that 6x90 doesn't support RXAUI nor XAUI).
+ *
+ * Port 2 can also support 1000BASE-X in automedia mode if port 9 is
+ * configured for 1000BASE-X, SGMII or 2500BASE-X.
+ * Port 3-4 can also support 1000BASE-X in automedia mode if port 9 is
+ * configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
+ *
+ * Port 5 can also support 1000BASE-X in automedia mode if port 10 is
+ * configured for 1000BASE-X, SGMII or 2500BASE-X.
+ * Port 6-7 can also support 1000BASE-X in automedia mode if port 10 is
+ * configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
+ *
+ * For now, be permissive (as the old code was) and allow 1000BASE-X
+ * on ports 2..7.
+ */
+ if (port >= 2 && port <= 7)
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+
+ /* The C_Mode field can also be programmed for 10G speeds */
+ if (port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_XAUI, supported);
+ __set_bit(PHY_INTERFACE_MODE_RXAUI, supported);
+
+ config->mac_capabilities |= MAC_10000FD;
+ }
+}
+
+static void mv88e6393x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+ bool is_6191x =
+ chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6191X;
+
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* The C_Mode field can be programmed for ports 0, 9 and 10 */
+ if (port == 0 || port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+
+ /* 6191X supports >1G modes only on port 10 */
+ if (!is_6191x || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_5GBASER, supported);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, supported);
+ /* FIXME: USXGMII is not supported yet */
+ /* __set_bit(PHY_INTERFACE_MODE_USXGMII, supported); */
+
+ config->mac_capabilities |= MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
+ }
+ }
+
+ if (port == 0) {
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, supported);
+ }
+}
+
+static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ mv88e6xxx_reg_lock(chip);
+ chip->info->ops->phylink_get_caps(chip, port, config);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (mv88e6xxx_phy_is_internal(ds, port)) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ /* Internal ports with no phy-mode need GMII for PHYLIB */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
+}
+
+static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_port *p;
+ int err = 0;
+
+ p = &chip->ports[port];
+
+ mv88e6xxx_reg_lock(chip);
+
+ if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) {
+ /* In inband mode, the link may come up at any time while the
+ * link is not forced down. Force the link down while we
+ * reconfigure the interface mode.
+ */
+ if (mode == MLO_AN_INBAND &&
+ p->interface != state->interface &&
+ chip->info->ops->port_set_link)
+ chip->info->ops->port_set_link(chip, port,
+ LINK_FORCED_DOWN);
+
+ err = mv88e6xxx_port_config_interface(chip, port,
+ state->interface);
+ if (err && err != -EOPNOTSUPP)
+ goto err_unlock;
+
+ err = mv88e6xxx_serdes_pcs_config(chip, port, mode,
+ state->interface,
+ state->advertising);
+ /* FIXME: we should restart negotiation if something changed -
+ * which is something we get if we convert to using phylinks
+ * PCS operations.
+ */
+ if (err > 0)
+ err = 0;
+ }
+
+ /* Undo the forced down state above after completing configuration
+ * irrespective of its state on entry, which allows the link to come
+ * up in the in-band case where there is no separate SERDES. Also
+ * ensure that the link can come up if the PPU is in use and we are
+ * in PHY mode (we treat the PPU as an effective in-band mechanism.)
+ */
+ if (chip->info->ops->port_set_link &&
+ ((mode == MLO_AN_INBAND && p->interface != state->interface) ||
+ (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port))))
+ chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
+
+ p->interface = state->interface;
+
+err_unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err && err != -EOPNOTSUPP)
+ dev_err(ds->dev, "p%d: failed to configure MAC/PCS\n", port);
+}
+
+static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ const struct mv88e6xxx_ops *ops;
+ int err = 0;
+
+ ops = chip->info->ops;
+
+ mv88e6xxx_reg_lock(chip);
+ /* Force the link down if we know the port may not be automatically
+ * updated by the switch or if we are using fixed-link mode.
+ */
+ if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
+ mode == MLO_AN_FIXED) && ops->port_sync_link)
+ err = ops->port_sync_link(chip, port, mode, false);
+
+ if (!err && ops->port_set_speed_duplex)
+ err = ops->port_set_speed_duplex(chip, port, SPEED_UNFORCED,
+ DUPLEX_UNFORCED);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ dev_err(chip->dev,
+ "p%d: failed to force MAC link down\n", port);
+}
+
+static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ const struct mv88e6xxx_ops *ops;
+ int err = 0;
+
+ ops = chip->info->ops;
+
+ mv88e6xxx_reg_lock(chip);
+ /* Configure and force the link up if we know that the port may not
+ * automatically updated by the switch or if we are using fixed-link
+ * mode.
+ */
+ if (!mv88e6xxx_port_ppu_updates(chip, port) ||
+ mode == MLO_AN_FIXED) {
+ /* FIXME: for an automedia port, should we force the link
+ * down here - what if the link comes up due to "other" media
+ * while we're bringing the port up, how is the exclusivity
+ * handled in the Marvell hardware? E.g. port 2 on 88E6390
+ * shared between internal PHY and Serdes.
+ */
+ err = mv88e6xxx_serdes_pcs_link_up(chip, port, mode, speed,
+ duplex);
+ if (err)
+ goto error;
+
+ if (ops->port_set_speed_duplex) {
+ err = ops->port_set_speed_duplex(chip, port,
+ speed, duplex);
+ if (err && err != -EOPNOTSUPP)
+ goto error;
+ }
+
+ if (ops->port_sync_link)
+ err = ops->port_sync_link(chip, port, mode, true);
+ }
+error:
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err && err != -EOPNOTSUPP)
+ dev_err(ds->dev,
+ "p%d: failed to configure MAC link up\n", port);
+}
+
+static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
+{
+ if (!chip->info->ops->stats_snapshot)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->stats_snapshot(chip, port);
+}
+
+static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
+ { "in_good_octets", 8, 0x00, STATS_TYPE_BANK0, },
+ { "in_bad_octets", 4, 0x02, STATS_TYPE_BANK0, },
+ { "in_unicast", 4, 0x04, STATS_TYPE_BANK0, },
+ { "in_broadcasts", 4, 0x06, STATS_TYPE_BANK0, },
+ { "in_multicasts", 4, 0x07, STATS_TYPE_BANK0, },
+ { "in_pause", 4, 0x16, STATS_TYPE_BANK0, },
+ { "in_undersize", 4, 0x18, STATS_TYPE_BANK0, },
+ { "in_fragments", 4, 0x19, STATS_TYPE_BANK0, },
+ { "in_oversize", 4, 0x1a, STATS_TYPE_BANK0, },
+ { "in_jabber", 4, 0x1b, STATS_TYPE_BANK0, },
+ { "in_rx_error", 4, 0x1c, STATS_TYPE_BANK0, },
+ { "in_fcs_error", 4, 0x1d, STATS_TYPE_BANK0, },
+ { "out_octets", 8, 0x0e, STATS_TYPE_BANK0, },
+ { "out_unicast", 4, 0x10, STATS_TYPE_BANK0, },
+ { "out_broadcasts", 4, 0x13, STATS_TYPE_BANK0, },
+ { "out_multicasts", 4, 0x12, STATS_TYPE_BANK0, },
+ { "out_pause", 4, 0x15, STATS_TYPE_BANK0, },
+ { "excessive", 4, 0x11, STATS_TYPE_BANK0, },
+ { "collisions", 4, 0x1e, STATS_TYPE_BANK0, },
+ { "deferred", 4, 0x05, STATS_TYPE_BANK0, },
+ { "single", 4, 0x14, STATS_TYPE_BANK0, },
+ { "multiple", 4, 0x17, STATS_TYPE_BANK0, },
+ { "out_fcs_error", 4, 0x03, STATS_TYPE_BANK0, },
+ { "late", 4, 0x1f, STATS_TYPE_BANK0, },
+ { "hist_64bytes", 4, 0x08, STATS_TYPE_BANK0, },
+ { "hist_65_127bytes", 4, 0x09, STATS_TYPE_BANK0, },
+ { "hist_128_255bytes", 4, 0x0a, STATS_TYPE_BANK0, },
+ { "hist_256_511bytes", 4, 0x0b, STATS_TYPE_BANK0, },
+ { "hist_512_1023bytes", 4, 0x0c, STATS_TYPE_BANK0, },
+ { "hist_1024_max_bytes", 4, 0x0d, STATS_TYPE_BANK0, },
+ { "sw_in_discards", 4, 0x10, STATS_TYPE_PORT, },
+ { "sw_in_filtered", 2, 0x12, STATS_TYPE_PORT, },
+ { "sw_out_filtered", 2, 0x13, STATS_TYPE_PORT, },
+ { "in_discards", 4, 0x00, STATS_TYPE_BANK1, },
+ { "in_filtered", 4, 0x01, STATS_TYPE_BANK1, },
+ { "in_accepted", 4, 0x02, STATS_TYPE_BANK1, },
+ { "in_bad_accepted", 4, 0x03, STATS_TYPE_BANK1, },
+ { "in_good_avb_class_a", 4, 0x04, STATS_TYPE_BANK1, },
+ { "in_good_avb_class_b", 4, 0x05, STATS_TYPE_BANK1, },
+ { "in_bad_avb_class_a", 4, 0x06, STATS_TYPE_BANK1, },
+ { "in_bad_avb_class_b", 4, 0x07, STATS_TYPE_BANK1, },
+ { "tcam_counter_0", 4, 0x08, STATS_TYPE_BANK1, },
+ { "tcam_counter_1", 4, 0x09, STATS_TYPE_BANK1, },
+ { "tcam_counter_2", 4, 0x0a, STATS_TYPE_BANK1, },
+ { "tcam_counter_3", 4, 0x0b, STATS_TYPE_BANK1, },
+ { "in_da_unknown", 4, 0x0e, STATS_TYPE_BANK1, },
+ { "in_management", 4, 0x0f, STATS_TYPE_BANK1, },
+ { "out_queue_0", 4, 0x10, STATS_TYPE_BANK1, },
+ { "out_queue_1", 4, 0x11, STATS_TYPE_BANK1, },
+ { "out_queue_2", 4, 0x12, STATS_TYPE_BANK1, },
+ { "out_queue_3", 4, 0x13, STATS_TYPE_BANK1, },
+ { "out_queue_4", 4, 0x14, STATS_TYPE_BANK1, },
+ { "out_queue_5", 4, 0x15, STATS_TYPE_BANK1, },
+ { "out_queue_6", 4, 0x16, STATS_TYPE_BANK1, },
+ { "out_queue_7", 4, 0x17, STATS_TYPE_BANK1, },
+ { "out_cut_through", 4, 0x18, STATS_TYPE_BANK1, },
+ { "out_octets_a", 4, 0x1a, STATS_TYPE_BANK1, },
+ { "out_octets_b", 4, 0x1b, STATS_TYPE_BANK1, },
+ { "out_management", 4, 0x1f, STATS_TYPE_BANK1, },
+};
+
+static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_hw_stat *s,
+ int port, u16 bank1_select,
+ u16 histogram)
+{
+ u32 low;
+ u32 high = 0;
+ u16 reg = 0;
+ int err;
+ u64 value;
+
+ switch (s->type) {
+ case STATS_TYPE_PORT:
+ err = mv88e6xxx_port_read(chip, port, s->reg, &reg);
+ if (err)
+ return U64_MAX;
+
+ low = reg;
+ if (s->size == 4) {
+ err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg);
+ if (err)
+ return U64_MAX;
+ low |= ((u32)reg) << 16;
+ }
+ break;
+ case STATS_TYPE_BANK1:
+ reg = bank1_select;
+ fallthrough;
+ case STATS_TYPE_BANK0:
+ reg |= s->reg | histogram;
+ mv88e6xxx_g1_stats_read(chip, reg, &low);
+ if (s->size == 8)
+ mv88e6xxx_g1_stats_read(chip, reg + 1, &high);
+ break;
+ default:
+ return U64_MAX;
+ }
+ value = (((u64)high) << 32) | low;
+ return value;
+}
+
+static int mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t *data, int types)
+{
+ struct mv88e6xxx_hw_stat *stat;
+ int i, j;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ stat = &mv88e6xxx_hw_stats[i];
+ if (stat->type & types) {
+ memcpy(data + j * ETH_GSTRING_LEN, stat->string,
+ ETH_GSTRING_LEN);
+ j++;
+ }
+ }
+
+ return j;
+}
+
+static int mv88e6095_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t *data)
+{
+ return mv88e6xxx_stats_get_strings(chip, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_PORT);
+}
+
+static int mv88e6250_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t *data)
+{
+ return mv88e6xxx_stats_get_strings(chip, data, STATS_TYPE_BANK0);
+}
+
+static int mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t *data)
+{
+ return mv88e6xxx_stats_get_strings(chip, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_BANK1);
+}
+
+static const uint8_t *mv88e6xxx_atu_vtu_stats_strings[] = {
+ "atu_member_violation",
+ "atu_miss_violation",
+ "atu_full_violation",
+ "vtu_member_violation",
+ "vtu_miss_violation",
+};
+
+static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings); i++)
+ strscpy(data + i * ETH_GSTRING_LEN,
+ mv88e6xxx_atu_vtu_stats_strings[i],
+ ETH_GSTRING_LEN);
+}
+
+static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int count = 0;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ mv88e6xxx_reg_lock(chip);
+
+ if (chip->info->ops->stats_get_strings)
+ count = chip->info->ops->stats_get_strings(chip, data);
+
+ if (chip->info->ops->serdes_get_strings) {
+ data += count * ETH_GSTRING_LEN;
+ count = chip->info->ops->serdes_get_strings(chip, port, data);
+ }
+
+ data += count * ETH_GSTRING_LEN;
+ mv88e6xxx_atu_vtu_get_strings(data);
+
+ mv88e6xxx_reg_unlock(chip);
+}
+
+static int mv88e6xxx_stats_get_sset_count(struct mv88e6xxx_chip *chip,
+ int types)
+{
+ struct mv88e6xxx_hw_stat *stat;
+ int i, j;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ stat = &mv88e6xxx_hw_stats[i];
+ if (stat->type & types)
+ j++;
+ }
+ return j;
+}
+
+static int mv88e6095_stats_get_sset_count(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0 |
+ STATS_TYPE_PORT);
+}
+
+static int mv88e6250_stats_get_sset_count(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0);
+}
+
+static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0 |
+ STATS_TYPE_BANK1);
+}
+
+static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int serdes_count = 0;
+ int count = 0;
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ mv88e6xxx_reg_lock(chip);
+ if (chip->info->ops->stats_get_sset_count)
+ count = chip->info->ops->stats_get_sset_count(chip);
+ if (count < 0)
+ goto out;
+
+ if (chip->info->ops->serdes_get_sset_count)
+ serdes_count = chip->info->ops->serdes_get_sset_count(chip,
+ port);
+ if (serdes_count < 0) {
+ count = serdes_count;
+ goto out;
+ }
+ count += serdes_count;
+ count += ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings);
+
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return count;
+}
+
+static int mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data, int types,
+ u16 bank1_select, u16 histogram)
+{
+ struct mv88e6xxx_hw_stat *stat;
+ int i, j;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ stat = &mv88e6xxx_hw_stats[i];
+ if (stat->type & types) {
+ mv88e6xxx_reg_lock(chip);
+ data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
+ bank1_select,
+ histogram);
+ mv88e6xxx_reg_unlock(chip);
+
+ j++;
+ }
+ }
+ return j;
+}
+
+static int mv88e6095_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ return mv88e6xxx_stats_get_stats(chip, port, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_PORT,
+ 0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
+}
+
+static int mv88e6250_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ return mv88e6xxx_stats_get_stats(chip, port, data, STATS_TYPE_BANK0,
+ 0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
+}
+
+static int mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ return mv88e6xxx_stats_get_stats(chip, port, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
+ MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9,
+ MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
+}
+
+static int mv88e6390_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ return mv88e6xxx_stats_get_stats(chip, port, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
+ MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10,
+ 0);
+}
+
+static void mv88e6xxx_atu_vtu_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ *data++ = chip->ports[port].atu_member_violation;
+ *data++ = chip->ports[port].atu_miss_violation;
+ *data++ = chip->ports[port].atu_full_violation;
+ *data++ = chip->ports[port].vtu_member_violation;
+ *data++ = chip->ports[port].vtu_miss_violation;
+}
+
+static void mv88e6xxx_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ int count = 0;
+
+ if (chip->info->ops->stats_get_stats)
+ count = chip->info->ops->stats_get_stats(chip, port, data);
+
+ mv88e6xxx_reg_lock(chip);
+ if (chip->info->ops->serdes_get_stats) {
+ data += count;
+ count = chip->info->ops->serdes_get_stats(chip, port, data);
+ }
+ data += count;
+ mv88e6xxx_atu_vtu_get_stats(chip, port, data);
+ mv88e6xxx_reg_unlock(chip);
+}
+
+static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int ret;
+
+ mv88e6xxx_reg_lock(chip);
+
+ ret = mv88e6xxx_stats_snapshot(chip, port);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (ret < 0)
+ return;
+
+ mv88e6xxx_get_stats(chip, port, data);
+
+}
+
+static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int len;
+
+ len = 32 * sizeof(u16);
+ if (chip->info->ops->serdes_get_regs_len)
+ len += chip->info->ops->serdes_get_regs_len(chip, port);
+
+ return len;
+}
+
+static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
+ struct ethtool_regs *regs, void *_p)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+ u16 reg;
+ u16 *p = _p;
+ int i;
+
+ regs->version = chip->info->prod_num;
+
+ memset(p, 0xff, 32 * sizeof(u16));
+
+ mv88e6xxx_reg_lock(chip);
+
+ for (i = 0; i < 32; i++) {
+
+ err = mv88e6xxx_port_read(chip, port, i, &reg);
+ if (!err)
+ p[i] = reg;
+ }
+
+ if (chip->info->ops->serdes_get_regs)
+ chip->info->ops->serdes_get_regs(chip, port, &p[i]);
+
+ mv88e6xxx_reg_unlock(chip);
+}
+
+static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ /* Nothing to do on the port's MAC */
+ return 0;
+}
+
+static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ /* Nothing to do on the port's MAC */
+ return 0;
+}
+
+/* Mask of the local ports allowed to receive frames from a given fabric port */
+static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
+{
+ struct dsa_switch *ds = chip->ds;
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp, *other_dp;
+ bool found = false;
+ u16 pvlan;
+
+ /* dev is a physical switch */
+ if (dev <= dst->last_switch) {
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->ds->index == dev && dp->index == port) {
+ /* dp might be a DSA link or a user port, so it
+ * might or might not have a bridge.
+ * Use the "found" variable for both cases.
+ */
+ found = true;
+ break;
+ }
+ }
+ /* dev is a virtual bridge */
+ } else {
+ list_for_each_entry(dp, &dst->ports, list) {
+ unsigned int bridge_num = dsa_port_bridge_num_get(dp);
+
+ if (!bridge_num)
+ continue;
+
+ if (bridge_num + dst->last_switch != dev)
+ continue;
+
+ found = true;
+ break;
+ }
+ }
+
+ /* Prevent frames from unknown switch or virtual bridge */
+ if (!found)
+ return 0;
+
+ /* Frames from DSA links and CPU ports can egress any local port */
+ if (dp->type == DSA_PORT_TYPE_CPU || dp->type == DSA_PORT_TYPE_DSA)
+ return mv88e6xxx_port_mask(chip);
+
+ pvlan = 0;
+
+ /* Frames from standalone user ports can only egress on the
+ * upstream port.
+ */
+ if (!dsa_port_bridge_dev_get(dp))
+ return BIT(dsa_switch_upstream_port(ds));
+
+ /* Frames from bridged user ports can egress any local DSA
+ * links and CPU ports, as well as any local member of their
+ * bridge group.
+ */
+ dsa_switch_for_each_port(other_dp, ds)
+ if (other_dp->type == DSA_PORT_TYPE_CPU ||
+ other_dp->type == DSA_PORT_TYPE_DSA ||
+ dsa_port_bridge_same(dp, other_dp))
+ pvlan |= BIT(other_dp->index);
+
+ return pvlan;
+}
+
+static int mv88e6xxx_port_vlan_map(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 output_ports = mv88e6xxx_port_vlan(chip, chip->ds->index, port);
+
+ /* prevent frames from going back out of the port they came in on */
+ output_ports &= ~BIT(port);
+
+ return mv88e6xxx_port_set_vlan_map(chip, port, output_ports);
+}
+
+static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_set_state(chip, port, state);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ dev_err(ds->dev, "p%d: failed to update state\n", port);
+}
+
+static int mv88e6xxx_pri_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ if (chip->info->ops->ieee_pri_map) {
+ err = chip->info->ops->ieee_pri_map(chip);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->ip_pri_map) {
+ err = chip->info->ops->ip_pri_map(chip);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
+{
+ struct dsa_switch *ds = chip->ds;
+ int target, port;
+ int err;
+
+ if (!chip->info->global2_addr)
+ return 0;
+
+ /* Initialize the routing port to the 32 possible target devices */
+ for (target = 0; target < 32; target++) {
+ port = dsa_routing_port(ds, target);
+ if (port == ds->num_ports)
+ port = 0x1f;
+
+ err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->set_cascade_port) {
+ port = MV88E6XXX_CASCADE_PORT_MULTIPLE;
+ err = chip->info->ops->set_cascade_port(chip, port);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6xxx_g1_set_device_number(chip, chip->ds->index);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mv88e6xxx_trunk_setup(struct mv88e6xxx_chip *chip)
+{
+ /* Clear all trunk masks and mapping */
+ if (chip->info->global2_addr)
+ return mv88e6xxx_g2_trunk_clear(chip);
+
+ return 0;
+}
+
+static int mv88e6xxx_rmu_setup(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->rmu_disable)
+ return chip->info->ops->rmu_disable(chip);
+
+ return 0;
+}
+
+static int mv88e6xxx_pot_setup(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->pot_clear)
+ return chip->info->ops->pot_clear(chip);
+
+ return 0;
+}
+
+static int mv88e6xxx_rsvd2cpu_setup(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->mgmt_rsvd2cpu)
+ return chip->info->ops->mgmt_rsvd2cpu(chip);
+
+ return 0;
+}
+
+static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_atu_flush(chip, 0, true);
+ if (err)
+ return err;
+
+ /* The chips that have a "learn2all" bit in Global1, ATU
+ * Control are precisely those whose port registers have a
+ * Message Port bit in Port Control 1 and hence implement
+ * ->port_setup_message_port.
+ */
+ if (chip->info->ops->port_setup_message_port) {
+ err = mv88e6xxx_g1_atu_set_learn2all(chip, true);
+ if (err)
+ return err;
+ }
+
+ return mv88e6xxx_g1_atu_set_age_time(chip, 300000);
+}
+
+static int mv88e6xxx_irl_setup(struct mv88e6xxx_chip *chip)
+{
+ int port;
+ int err;
+
+ if (!chip->info->ops->irl_init_all)
+ return 0;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ /* Disable ingress rate limiting by resetting all per port
+ * ingress rate limit resources to their initial state.
+ */
+ err = chip->info->ops->irl_init_all(chip, port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_mac_setup(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->set_switch_mac) {
+ u8 addr[ETH_ALEN];
+
+ eth_random_addr(addr);
+
+ return chip->info->ops->set_switch_mac(chip, addr);
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
+{
+ struct dsa_switch_tree *dst = chip->ds->dst;
+ struct dsa_switch *ds;
+ struct dsa_port *dp;
+ u16 pvlan = 0;
+
+ if (!mv88e6xxx_has_pvt(chip))
+ return 0;
+
+ /* Skip the local source device, which uses in-chip port VLAN */
+ if (dev != chip->ds->index) {
+ pvlan = mv88e6xxx_port_vlan(chip, dev, port);
+
+ ds = dsa_switch_find(dst->index, dev);
+ dp = ds ? dsa_to_port(ds, port) : NULL;
+ if (dp && dp->lag) {
+ /* As the PVT is used to limit flooding of
+ * FORWARD frames, which use the LAG ID as the
+ * source port, we must translate dev/port to
+ * the special "LAG device" in the PVT, using
+ * the LAG ID (one-based) as the port number
+ * (zero-based).
+ */
+ dev = MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK;
+ port = dsa_port_lag_id_get(dp) - 1;
+ }
+ }
+
+ return mv88e6xxx_g2_pvt_write(chip, dev, port, pvlan);
+}
+
+static int mv88e6xxx_pvt_setup(struct mv88e6xxx_chip *chip)
+{
+ int dev, port;
+ int err;
+
+ if (!mv88e6xxx_has_pvt(chip))
+ return 0;
+
+ /* Clear 5 Bit Port for usage with Marvell Link Street devices:
+ * use 4 bits for the Src_Port/Src_Trunk and 5 bits for the Src_Dev.
+ */
+ err = mv88e6xxx_g2_misc_4_bit_port(chip);
+ if (err)
+ return err;
+
+ for (dev = 0; dev < MV88E6XXX_MAX_PVT_SWITCHES; ++dev) {
+ for (port = 0; port < MV88E6XXX_MAX_PVT_PORTS; ++port) {
+ err = mv88e6xxx_pvt_map(chip, dev, port);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_port_fast_age_fid(struct mv88e6xxx_chip *chip, int port,
+ u16 fid)
+{
+ if (dsa_to_port(chip->ds, port)->lag)
+ /* Hardware is incapable of fast-aging a LAG through a
+ * regular ATU move operation. Until we have something
+ * more fancy in place this is a no-op.
+ */
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_g1_atu_remove(chip, fid, port, false);
+}
+
+static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_fast_age_fid(chip, port, 0);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ dev_err(chip->ds->dev, "p%d: failed to flush ATU: %d\n",
+ port, err);
+}
+
+static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
+{
+ if (!mv88e6xxx_max_vid(chip))
+ return 0;
+
+ return mv88e6xxx_g1_vtu_flush(chip);
+}
+
+static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ int err;
+
+ if (!chip->info->ops->vtu_getnext)
+ return -EOPNOTSUPP;
+
+ entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip);
+ entry->valid = false;
+
+ err = chip->info->ops->vtu_getnext(chip, entry);
+
+ if (entry->vid != vid)
+ entry->valid = false;
+
+ return err;
+}
+
+static int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
+ int (*cb)(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv),
+ void *priv)
+{
+ struct mv88e6xxx_vtu_entry entry = {
+ .vid = mv88e6xxx_max_vid(chip),
+ .valid = false,
+ };
+ int err;
+
+ if (!chip->info->ops->vtu_getnext)
+ return -EOPNOTSUPP;
+
+ do {
+ err = chip->info->ops->vtu_getnext(chip, &entry);
+ if (err)
+ return err;
+
+ if (!entry.valid)
+ break;
+
+ err = cb(chip, &entry, priv);
+ if (err)
+ return err;
+ } while (entry.vid < mv88e6xxx_max_vid(chip));
+
+ return 0;
+}
+
+static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ if (!chip->info->ops->vtu_loadpurge)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->vtu_loadpurge(chip, entry);
+}
+
+static int mv88e6xxx_fid_map_vlan(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *_fid_bitmap)
+{
+ unsigned long *fid_bitmap = _fid_bitmap;
+
+ set_bit(entry->fid, fid_bitmap);
+ return 0;
+}
+
+int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
+{
+ bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
+
+ /* Every FID has an associated VID, so walking the VTU
+ * will discover the full set of FIDs in use.
+ */
+ return mv88e6xxx_vtu_walk(chip, mv88e6xxx_fid_map_vlan, fid_bitmap);
+}
+
+static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
+{
+ DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
+ int err;
+
+ err = mv88e6xxx_fid_map(chip, fid_bitmap);
+ if (err)
+ return err;
+
+ *fid = find_first_zero_bit(fid_bitmap, MV88E6XXX_N_FID);
+ if (unlikely(*fid >= mv88e6xxx_num_databases(chip)))
+ return -ENOSPC;
+
+ /* Clear the database */
+ return mv88e6xxx_g1_atu_flush(chip, *fid, true);
+}
+
+static int mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ if (!chip->info->ops->stu_loadpurge)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->stu_loadpurge(chip, entry);
+}
+
+static int mv88e6xxx_stu_setup(struct mv88e6xxx_chip *chip)
+{
+ struct mv88e6xxx_stu_entry stu = {
+ .valid = true,
+ .sid = 0
+ };
+
+ if (!mv88e6xxx_has_stu(chip))
+ return 0;
+
+ /* Make sure that SID 0 is always valid. This is used by VTU
+ * entries that do not make use of the STU, e.g. when creating
+ * a VLAN upper on a port that is also part of a VLAN
+ * filtering bridge.
+ */
+ return mv88e6xxx_stu_loadpurge(chip, &stu);
+}
+
+static int mv88e6xxx_sid_get(struct mv88e6xxx_chip *chip, u8 *sid)
+{
+ DECLARE_BITMAP(busy, MV88E6XXX_N_SID) = { 0 };
+ struct mv88e6xxx_mst *mst;
+
+ __set_bit(0, busy);
+
+ list_for_each_entry(mst, &chip->msts, node)
+ __set_bit(mst->stu.sid, busy);
+
+ *sid = find_first_zero_bit(busy, MV88E6XXX_N_SID);
+
+ return (*sid >= mv88e6xxx_max_sid(chip)) ? -ENOSPC : 0;
+}
+
+static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid)
+{
+ struct mv88e6xxx_mst *mst, *tmp;
+ int err;
+
+ if (!sid)
+ return 0;
+
+ list_for_each_entry_safe(mst, tmp, &chip->msts, node) {
+ if (mst->stu.sid != sid)
+ continue;
+
+ if (!refcount_dec_and_test(&mst->refcnt))
+ return 0;
+
+ mst->stu.valid = false;
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ if (err) {
+ refcount_set(&mst->refcnt, 1);
+ return err;
+ }
+
+ list_del(&mst->node);
+ kfree(mst);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int mv88e6xxx_mst_get(struct mv88e6xxx_chip *chip, struct net_device *br,
+ u16 msti, u8 *sid)
+{
+ struct mv88e6xxx_mst *mst;
+ int err, i;
+
+ if (!mv88e6xxx_has_stu(chip)) {
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (!msti) {
+ *sid = 0;
+ return 0;
+ }
+
+ list_for_each_entry(mst, &chip->msts, node) {
+ if (mst->br == br && mst->msti == msti) {
+ refcount_inc(&mst->refcnt);
+ *sid = mst->stu.sid;
+ return 0;
+ }
+ }
+
+ err = mv88e6xxx_sid_get(chip, sid);
+ if (err)
+ goto err;
+
+ mst = kzalloc(sizeof(*mst), GFP_KERNEL);
+ if (!mst) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&mst->node);
+ refcount_set(&mst->refcnt, 1);
+ mst->br = br;
+ mst->msti = msti;
+ mst->stu.valid = true;
+ mst->stu.sid = *sid;
+
+ /* The bridge starts out all ports in the disabled state. But
+ * a STU state of disabled means to go by the port-global
+ * state. So we set all user port's initial state to blocking,
+ * to match the bridge's behavior.
+ */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ mst->stu.state[i] = dsa_is_user_port(chip->ds, i) ?
+ MV88E6XXX_PORT_CTL0_STATE_BLOCKING :
+ MV88E6XXX_PORT_CTL0_STATE_DISABLED;
+
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ if (err)
+ goto err_free;
+
+ list_add_tail(&mst->node, &chip->msts);
+ return 0;
+
+err_free:
+ kfree(mst);
+err:
+ return err;
+}
+
+static int mv88e6xxx_port_mst_state_set(struct dsa_switch *ds, int port,
+ const struct switchdev_mst_state *st)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_mst *mst;
+ u8 state;
+ int err;
+
+ if (!mv88e6xxx_has_stu(chip))
+ return -EOPNOTSUPP;
+
+ switch (st->state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ state = MV88E6XXX_PORT_CTL0_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ state = MV88E6XXX_PORT_CTL0_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ state = MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ list_for_each_entry(mst, &chip->msts, node) {
+ if (mst->br == dsa_port_bridge_dev_get(dp) &&
+ mst->msti == st->msti) {
+ if (mst->stu.state[port] == state)
+ return 0;
+
+ mst->stu.state[port] = state;
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
+ u16 vid)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ int err;
+
+ /* DSA and CPU ports have to be members of multiple vlans */
+ if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
+ return 0;
+
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
+ if (err)
+ return err;
+
+ if (!vlan.valid)
+ return 0;
+
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *other_br;
+
+ if (vlan.member[other_dp->index] ==
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ continue;
+
+ if (dsa_port_bridge_same(dp, other_dp))
+ break; /* same bridge, check next VLAN */
+
+ other_br = dsa_port_bridge_dev_get(other_dp);
+ if (!other_br)
+ continue;
+
+ dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
+ port, vlan.vid, other_dp->index, netdev_name(other_br));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port)
+{
+ struct dsa_port *dp = dsa_to_port(chip->ds, port);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+ struct mv88e6xxx_port *p = &chip->ports[port];
+ u16 pvid = MV88E6XXX_VID_STANDALONE;
+ bool drop_untagged = false;
+ int err;
+
+ if (br) {
+ if (br_vlan_enabled(br)) {
+ pvid = p->bridge_pvid.vid;
+ drop_untagged = !p->bridge_pvid.valid;
+ } else {
+ pvid = MV88E6XXX_VID_BRIDGED;
+ }
+ }
+
+ err = mv88e6xxx_port_set_pvid(chip, port, pvid);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_drop_untagged(chip, port, drop_untagged);
+}
+
+static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u16 mode = vlan_filtering ? MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE :
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED;
+ int err;
+
+ if (!mv88e6xxx_max_vid(chip))
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ goto unlock;
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int
+mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ if (!mv88e6xxx_max_vid(chip))
+ return -EOPNOTSUPP;
+
+ /* If the requested port doesn't belong to the same bridge as the VLAN
+ * members, do not support it (yet) and fallback to software VLAN.
+ */
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
+ const unsigned char *addr, u16 vid,
+ u8 state)
+{
+ struct mv88e6xxx_atu_entry entry;
+ struct mv88e6xxx_vtu_entry vlan;
+ u16 fid;
+ int err;
+
+ /* Ports have two private address databases: one for when the port is
+ * standalone and one for when the port is under a bridge and the
+ * 802.1Q mode is disabled. When the port is standalone, DSA wants its
+ * address database to remain 100% empty, so we never load an ATU entry
+ * into a standalone port's database. Therefore, translate the null
+ * VLAN ID into the port's database used for VLAN-unaware bridging.
+ */
+ if (vid == 0) {
+ fid = MV88E6XXX_FID_BRIDGED;
+ } else {
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
+ if (err)
+ return err;
+
+ /* switchdev expects -EOPNOTSUPP to honor software VLANs */
+ if (!vlan.valid)
+ return -EOPNOTSUPP;
+
+ fid = vlan.fid;
+ }
+
+ entry.state = 0;
+ ether_addr_copy(entry.mac, addr);
+ eth_addr_dec(entry.mac);
+
+ err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry);
+ if (err)
+ return err;
+
+ /* Initialize a fresh ATU entry if it isn't found */
+ if (!entry.state || !ether_addr_equal(entry.mac, addr)) {
+ memset(&entry, 0, sizeof(entry));
+ ether_addr_copy(entry.mac, addr);
+ }
+
+ /* Purge the ATU entry only if no port is using it anymore */
+ if (!state) {
+ entry.portvec &= ~BIT(port);
+ if (!entry.portvec)
+ entry.state = 0;
+ } else {
+ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)
+ entry.portvec = BIT(port);
+ else
+ entry.portvec |= BIT(port);
+
+ entry.state = state;
+ }
+
+ return mv88e6xxx_g1_atu_loadpurge(chip, fid, &entry);
+}
+
+static int mv88e6xxx_policy_apply(struct mv88e6xxx_chip *chip, int port,
+ const struct mv88e6xxx_policy *policy)
+{
+ enum mv88e6xxx_policy_mapping mapping = policy->mapping;
+ enum mv88e6xxx_policy_action action = policy->action;
+ const u8 *addr = policy->addr;
+ u16 vid = policy->vid;
+ u8 state;
+ int err;
+ int id;
+
+ if (!chip->info->ops->port_set_policy)
+ return -EOPNOTSUPP;
+
+ switch (mapping) {
+ case MV88E6XXX_POLICY_MAPPING_DA:
+ case MV88E6XXX_POLICY_MAPPING_SA:
+ if (action == MV88E6XXX_POLICY_ACTION_NORMAL)
+ state = 0; /* Dissociate the port and address */
+ else if (action == MV88E6XXX_POLICY_ACTION_DISCARD &&
+ is_multicast_ether_addr(addr))
+ state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_POLICY;
+ else if (action == MV88E6XXX_POLICY_ACTION_DISCARD &&
+ is_unicast_ether_addr(addr))
+ state = MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_POLICY;
+ else
+ return -EOPNOTSUPP;
+
+ err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
+ state);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Skip the port's policy clearing if the mapping is still in use */
+ if (action == MV88E6XXX_POLICY_ACTION_NORMAL)
+ idr_for_each_entry(&chip->policies, policy, id)
+ if (policy->port == port &&
+ policy->mapping == mapping &&
+ policy->action != action)
+ return 0;
+
+ return chip->info->ops->port_set_policy(chip, port, mapping, action);
+}
+
+static int mv88e6xxx_policy_insert(struct mv88e6xxx_chip *chip, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct ethhdr *mac_entry = &fs->h_u.ether_spec;
+ struct ethhdr *mac_mask = &fs->m_u.ether_spec;
+ enum mv88e6xxx_policy_mapping mapping;
+ enum mv88e6xxx_policy_action action;
+ struct mv88e6xxx_policy *policy;
+ u16 vid = 0;
+ u8 *addr;
+ int err;
+ int id;
+
+ if (fs->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ action = MV88E6XXX_POLICY_ACTION_DISCARD;
+ else
+ return -EOPNOTSUPP;
+
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(mac_mask->h_dest) &&
+ is_zero_ether_addr(mac_mask->h_source)) {
+ mapping = MV88E6XXX_POLICY_MAPPING_DA;
+ addr = mac_entry->h_dest;
+ } else if (is_zero_ether_addr(mac_mask->h_dest) &&
+ !is_zero_ether_addr(mac_mask->h_source)) {
+ mapping = MV88E6XXX_POLICY_MAPPING_SA;
+ addr = mac_entry->h_source;
+ } else {
+ /* Cannot support DA and SA mapping in the same rule */
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if ((fs->flow_type & FLOW_EXT) && fs->m_ext.vlan_tci) {
+ if (fs->m_ext.vlan_tci != htons(0xffff))
+ return -EOPNOTSUPP;
+ vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
+ }
+
+ idr_for_each_entry(&chip->policies, policy, id) {
+ if (policy->port == port && policy->mapping == mapping &&
+ policy->action == action && policy->vid == vid &&
+ ether_addr_equal(policy->addr, addr))
+ return -EEXIST;
+ }
+
+ policy = devm_kzalloc(chip->dev, sizeof(*policy), GFP_KERNEL);
+ if (!policy)
+ return -ENOMEM;
+
+ fs->location = 0;
+ err = idr_alloc_u32(&chip->policies, policy, &fs->location, 0xffffffff,
+ GFP_KERNEL);
+ if (err) {
+ devm_kfree(chip->dev, policy);
+ return err;
+ }
+
+ memcpy(&policy->fs, fs, sizeof(*fs));
+ ether_addr_copy(policy->addr, addr);
+ policy->mapping = mapping;
+ policy->action = action;
+ policy->port = port;
+ policy->vid = vid;
+
+ err = mv88e6xxx_policy_apply(chip, port, policy);
+ if (err) {
+ idr_remove(&chip->policies, fs->location);
+ devm_kfree(chip->dev, policy);
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_get_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct ethtool_rx_flow_spec *fs = &rxnfc->fs;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_policy *policy;
+ int err;
+ int id;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXCLSRLCNT:
+ rxnfc->data = 0;
+ rxnfc->data |= RX_CLS_LOC_SPECIAL;
+ rxnfc->rule_cnt = 0;
+ idr_for_each_entry(&chip->policies, policy, id)
+ if (policy->port == port)
+ rxnfc->rule_cnt++;
+ err = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = -ENOENT;
+ policy = idr_find(&chip->policies, fs->location);
+ if (policy) {
+ memcpy(fs, &policy->fs, sizeof(*fs));
+ err = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ rxnfc->data = 0;
+ rxnfc->rule_cnt = 0;
+ idr_for_each_entry(&chip->policies, policy, id)
+ if (policy->port == port)
+ rule_locs[rxnfc->rule_cnt++] = id;
+ err = 0;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_set_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *rxnfc)
+{
+ struct ethtool_rx_flow_spec *fs = &rxnfc->fs;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_policy *policy;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = mv88e6xxx_policy_insert(chip, port, fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = -ENOENT;
+ policy = idr_remove(&chip->policies, fs->location);
+ if (policy) {
+ policy->action = MV88E6XXX_POLICY_ACTION_NORMAL;
+ err = mv88e6xxx_policy_apply(chip, port, policy);
+ devm_kfree(chip->dev, policy);
+ }
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_port_add_broadcast(struct mv88e6xxx_chip *chip, int port,
+ u16 vid)
+{
+ u8 state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC;
+ u8 broadcast[ETH_ALEN];
+
+ eth_broadcast_addr(broadcast);
+
+ return mv88e6xxx_port_db_load_purge(chip, port, broadcast, vid, state);
+}
+
+static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid)
+{
+ int port;
+ int err;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ struct dsa_port *dp = dsa_to_port(chip->ds, port);
+ struct net_device *brport;
+
+ if (dsa_is_unused_port(chip->ds, port))
+ continue;
+
+ brport = dsa_port_to_bridge_port(dp);
+ if (brport && !br_port_flag_is_set(brport, BR_BCAST_FLOOD))
+ /* Skip bridged user ports where broadcast
+ * flooding is disabled.
+ */
+ continue;
+
+ err = mv88e6xxx_port_add_broadcast(chip, port, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+struct mv88e6xxx_port_broadcast_sync_ctx {
+ int port;
+ bool flood;
+};
+
+static int
+mv88e6xxx_port_broadcast_sync_vlan(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *vlan,
+ void *_ctx)
+{
+ struct mv88e6xxx_port_broadcast_sync_ctx *ctx = _ctx;
+ u8 broadcast[ETH_ALEN];
+ u8 state;
+
+ if (ctx->flood)
+ state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC;
+ else
+ state = MV88E6XXX_G1_ATU_DATA_STATE_MC_UNUSED;
+
+ eth_broadcast_addr(broadcast);
+
+ return mv88e6xxx_port_db_load_purge(chip, ctx->port, broadcast,
+ vlan->vid, state);
+}
+
+static int mv88e6xxx_port_broadcast_sync(struct mv88e6xxx_chip *chip, int port,
+ bool flood)
+{
+ struct mv88e6xxx_port_broadcast_sync_ctx ctx = {
+ .port = port,
+ .flood = flood,
+ };
+ struct mv88e6xxx_vtu_entry vid0 = {
+ .vid = 0,
+ };
+ int err;
+
+ /* Update the port's private database... */
+ err = mv88e6xxx_port_broadcast_sync_vlan(chip, &vid0, &ctx);
+ if (err)
+ return err;
+
+ /* ...and the database for all VLANs. */
+ return mv88e6xxx_vtu_walk(chip, mv88e6xxx_port_broadcast_sync_vlan,
+ &ctx);
+}
+
+static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
+ u16 vid, u8 member, bool warn)
+{
+ const u8 non_member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+ struct mv88e6xxx_vtu_entry vlan;
+ int i, err;
+
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
+ if (err)
+ return err;
+
+ if (!vlan.valid) {
+ memset(&vlan, 0, sizeof(vlan));
+
+ if (vid == MV88E6XXX_VID_STANDALONE)
+ vlan.policy = true;
+
+ err = mv88e6xxx_atu_new(chip, &vlan.fid);
+ if (err)
+ return err;
+
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
+ if (i == port)
+ vlan.member[i] = member;
+ else
+ vlan.member[i] = non_member;
+
+ vlan.vid = vid;
+ vlan.valid = true;
+
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_broadcast_setup(chip, vlan.vid);
+ if (err)
+ return err;
+ } else if (vlan.member[port] != member) {
+ vlan.member[port] = member;
+
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err)
+ return err;
+ } else if (warn) {
+ dev_info(chip->dev, "p%d: already a member of VLAN %d\n",
+ port, vid);
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct mv88e6xxx_port *p = &chip->ports[port];
+ bool warn;
+ u8 member;
+ int err;
+
+ if (!vlan->vid)
+ return 0;
+
+ err = mv88e6xxx_port_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
+
+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED;
+ else if (untagged)
+ member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED;
+ else
+ member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED;
+
+ /* net/dsa/slave.c will call dsa_port_vlan_add() for the affected port
+ * and then the CPU port. Do not warn for duplicates for the CPU port.
+ */
+ warn = !dsa_is_cpu_port(ds, port) && !dsa_is_dsa_port(ds, port);
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_port_vlan_join(chip, port, vlan->vid, member, warn);
+ if (err) {
+ dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
+ vlan->vid, untagged ? 'u' : 't');
+ goto out;
+ }
+
+ if (pvid) {
+ p->bridge_pvid.vid = vlan->vid;
+ p->bridge_pvid.valid = true;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ goto out;
+ } else if (vlan->vid && p->bridge_pvid.vid == vlan->vid) {
+ /* The old pvid was reinstalled as a non-pvid VLAN */
+ p->bridge_pvid.valid = false;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ goto out;
+ }
+
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
+ int port, u16 vid)
+{
+ struct mv88e6xxx_vtu_entry vlan;
+ int i, err;
+
+ if (!vid)
+ return 0;
+
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
+ if (err)
+ return err;
+
+ /* If the VLAN doesn't exist in hardware or the port isn't a member,
+ * tell switchdev that this VLAN is likely handled in software.
+ */
+ if (!vlan.valid ||
+ vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ return -EOPNOTSUPP;
+
+ vlan.member[port] = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+
+ /* keep the VLAN unless all ports are excluded */
+ vlan.valid = false;
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ if (vlan.member[i] !=
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
+ vlan.valid = true;
+ break;
+ }
+ }
+
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err)
+ return err;
+
+ if (!vlan.valid) {
+ err = mv88e6xxx_mst_put(chip, vlan.sid);
+ if (err)
+ return err;
+ }
+
+ return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false);
+}
+
+static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_port *p = &chip->ports[port];
+ int err = 0;
+ u16 pvid;
+
+ if (!mv88e6xxx_max_vid(chip))
+ return -EOPNOTSUPP;
+
+ /* The ATU removal procedure needs the FID to be mapped in the VTU,
+ * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
+ * switchdev workqueue to ensure that all FDB entries are deleted
+ * before we remove the VLAN.
+ */
+ dsa_flush_workqueue();
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_vlan_leave(chip, port, vlan->vid);
+ if (err)
+ goto unlock;
+
+ if (vlan->vid == pvid) {
+ p->bridge_pvid.valid = false;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ goto unlock;
+ }
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_port_vlan_fast_age(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_fast_age_fid(chip, port, vlan.fid);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_vlan_msti_set(struct dsa_switch *ds,
+ struct dsa_bridge bridge,
+ const struct switchdev_vlan_msti *msti)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ u8 old_sid, new_sid;
+ int err;
+
+ if (!mv88e6xxx_has_stu(chip))
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_vtu_get(chip, msti->vid, &vlan);
+ if (err)
+ goto unlock;
+
+ if (!vlan.valid) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ old_sid = vlan.sid;
+
+ err = mv88e6xxx_mst_get(chip, bridge.dev, msti->msti, &new_sid);
+ if (err)
+ goto unlock;
+
+ if (new_sid != old_sid) {
+ vlan.sid = new_sid;
+
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err) {
+ mv88e6xxx_mst_put(chip, new_sid);
+ goto unlock;
+ }
+ }
+
+ err = mv88e6xxx_mst_put(chip, old_sid);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
+ MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, 0);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
+ u16 fid, u16 vid, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct mv88e6xxx_atu_entry addr;
+ bool is_static;
+ int err;
+
+ addr.state = 0;
+ eth_broadcast_addr(addr.mac);
+
+ do {
+ err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
+ if (err)
+ return err;
+
+ if (!addr.state)
+ break;
+
+ if (addr.trunk || (addr.portvec & BIT(port)) == 0)
+ continue;
+
+ if (!is_unicast_ether_addr(addr.mac))
+ continue;
+
+ is_static = (addr.state ==
+ MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
+ err = cb(addr.mac, vid, is_static, data);
+ if (err)
+ return err;
+ } while (!is_broadcast_ether_addr(addr.mac));
+
+ return err;
+}
+
+struct mv88e6xxx_port_db_dump_vlan_ctx {
+ int port;
+ dsa_fdb_dump_cb_t *cb;
+ void *data;
+};
+
+static int mv88e6xxx_port_db_dump_vlan(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *_data)
+{
+ struct mv88e6xxx_port_db_dump_vlan_ctx *ctx = _data;
+
+ return mv88e6xxx_port_db_dump_fid(chip, entry->fid, entry->vid,
+ ctx->port, ctx->cb, ctx->data);
+}
+
+static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct mv88e6xxx_port_db_dump_vlan_ctx ctx = {
+ .port = port,
+ .cb = cb,
+ .data = data,
+ };
+ u16 fid;
+ int err;
+
+ /* Dump port's default Filtering Information Database (VLAN ID 0) */
+ err = mv88e6xxx_port_get_fid(chip, port, &fid);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_db_dump_fid(chip, fid, 0, port, cb, data);
+ if (err)
+ return err;
+
+ return mv88e6xxx_vtu_walk(chip, mv88e6xxx_port_db_dump_vlan, &ctx);
+}
+
+static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_db_dump(chip, port, cb, data);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
+ struct dsa_bridge bridge)
+{
+ struct dsa_switch *ds = chip->ds;
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp;
+ int err;
+
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dsa_port_offloads_bridge(dp, &bridge)) {
+ if (dp->ds == ds) {
+ /* This is a local bridge group member,
+ * remap its Port VLAN Map.
+ */
+ err = mv88e6xxx_port_vlan_map(chip, dp->index);
+ if (err)
+ return err;
+ } else {
+ /* This is an external bridge group member,
+ * remap its cross-chip Port VLAN Table entry.
+ */
+ err = mv88e6xxx_pvt_map(chip, dp->ds->index,
+ dp->index);
+ if (err)
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Treat the software bridge as a virtual single-port switch behind the
+ * CPU and map in the PVT. First dst->last_switch elements are taken by
+ * physical switches, so start from beyond that range.
+ */
+static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
+ unsigned int bridge_num)
+{
+ u8 dev = bridge_num + ds->dst->last_switch;
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ return mv88e6xxx_pvt_map(chip, dev, 0);
+}
+
+static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_bridge_map(chip, bridge);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_set_map_da(chip, port, true);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ goto unlock;
+
+ if (mv88e6xxx_has_pvt(chip)) {
+ err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num);
+ if (err)
+ goto unlock;
+
+ *tx_fwd_offload = true;
+ }
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ if (bridge.tx_fwd_offload &&
+ mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num))
+ dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
+
+ if (mv88e6xxx_bridge_map(chip, bridge) ||
+ mv88e6xxx_port_vlan_map(chip, port))
+ dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
+
+ err = mv88e6xxx_port_set_map_da(chip, port, false);
+ if (err)
+ dev_err(ds->dev,
+ "port %d failed to restore map-DA: %pe\n",
+ port, ERR_PTR(err));
+
+ err = mv88e6xxx_port_commit_pvid(chip, port);
+ if (err)
+ dev_err(ds->dev,
+ "port %d failed to restore standalone pvid: %pe\n",
+ port, ERR_PTR(err));
+
+ mv88e6xxx_reg_unlock(chip);
+}
+
+static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
+ int tree_index, int sw_index,
+ int port, struct dsa_bridge bridge,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ if (tree_index != ds->dst->index)
+ return 0;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_pvt_map(chip, sw_index, port);
+ err = err ? : mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
+ int tree_index, int sw_index,
+ int port, struct dsa_bridge bridge)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ if (tree_index != ds->dst->index)
+ return;
+
+ mv88e6xxx_reg_lock(chip);
+ if (mv88e6xxx_pvt_map(chip, sw_index, port) ||
+ mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num))
+ dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
+ mv88e6xxx_reg_unlock(chip);
+}
+
+static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->reset)
+ return chip->info->ops->reset(chip);
+
+ return 0;
+}
+
+static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+{
+ struct gpio_desc *gpiod = chip->reset;
+
+ /* If there is a GPIO connected to the reset pin, toggle it */
+ if (gpiod) {
+ /* If the switch has just been reset and not yet completed
+ * loading EEPROM, the reset may interrupt the I2C transaction
+ * mid-byte, causing the first EEPROM read after the reset
+ * from the wrong location resulting in the switch booting
+ * to wrong mode and inoperable.
+ */
+ if (chip->info->ops->get_eeprom)
+ mv88e6xxx_g2_eeprom_wait(chip);
+
+ gpiod_set_value_cansleep(gpiod, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value_cansleep(gpiod, 0);
+ usleep_range(10000, 20000);
+
+ if (chip->info->ops->get_eeprom)
+ mv88e6xxx_g2_eeprom_wait(chip);
+ }
+}
+
+static int mv88e6xxx_disable_ports(struct mv88e6xxx_chip *chip)
+{
+ int i, err;
+
+ /* Set all ports to the Disabled state */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
+ err = mv88e6xxx_port_set_state(chip, i, BR_STATE_DISABLED);
+ if (err)
+ return err;
+ }
+
+ /* Wait for transmit queues to drain,
+ * i.e. 2ms for a maximum frame to be transmitted at 10 Mbps.
+ */
+ usleep_range(2000, 4000);
+
+ return 0;
+}
+
+static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_disable_ports(chip);
+ if (err)
+ return err;
+
+ mv88e6xxx_hardware_reset(chip);
+
+ return mv88e6xxx_software_reset(chip);
+}
+
+static int mv88e6xxx_set_port_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode frame,
+ enum mv88e6xxx_egress_mode egress, u16 etype)
+{
+ int err;
+
+ if (!chip->info->ops->port_set_frame_mode)
+ return -EOPNOTSUPP;
+
+ err = mv88e6xxx_port_set_egress_mode(chip, port, egress);
+ if (err)
+ return err;
+
+ err = chip->info->ops->port_set_frame_mode(chip, port, frame);
+ if (err)
+ return err;
+
+ if (chip->info->ops->port_set_ether_type)
+ return chip->info->ops->port_set_ether_type(chip, port, etype);
+
+ return 0;
+}
+
+static int mv88e6xxx_set_port_mode_normal(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_NORMAL,
+ MV88E6XXX_EGRESS_MODE_UNMODIFIED,
+ MV88E6XXX_PORT_ETH_TYPE_DEFAULT);
+}
+
+static int mv88e6xxx_set_port_mode_dsa(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_set_port_mode(chip, port, MV88E6XXX_FRAME_MODE_DSA,
+ MV88E6XXX_EGRESS_MODE_UNMODIFIED,
+ MV88E6XXX_PORT_ETH_TYPE_DEFAULT);
+}
+
+static int mv88e6xxx_set_port_mode_edsa(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_set_port_mode(chip, port,
+ MV88E6XXX_FRAME_MODE_ETHERTYPE,
+ MV88E6XXX_EGRESS_MODE_ETHERTYPE,
+ ETH_P_EDSA);
+}
+
+static int mv88e6xxx_setup_port_mode(struct mv88e6xxx_chip *chip, int port)
+{
+ if (dsa_is_dsa_port(chip->ds, port))
+ return mv88e6xxx_set_port_mode_dsa(chip, port);
+
+ if (dsa_is_user_port(chip->ds, port))
+ return mv88e6xxx_set_port_mode_normal(chip, port);
+
+ /* Setup CPU port mode depending on its supported tag format */
+ if (chip->tag_protocol == DSA_TAG_PROTO_DSA)
+ return mv88e6xxx_set_port_mode_dsa(chip, port);
+
+ if (chip->tag_protocol == DSA_TAG_PROTO_EDSA)
+ return mv88e6xxx_set_port_mode_edsa(chip, port);
+
+ return -EINVAL;
+}
+
+static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port)
+{
+ bool message = dsa_is_dsa_port(chip->ds, port);
+
+ return mv88e6xxx_port_set_message_port(chip, port, message);
+}
+
+static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ if (chip->info->ops->port_set_ucast_flood) {
+ err = chip->info->ops->port_set_ucast_flood(chip, port, true);
+ if (err)
+ return err;
+ }
+ if (chip->info->ops->port_set_mcast_flood) {
+ err = chip->info->ops->port_set_mcast_flood(chip, port, true);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static irqreturn_t mv88e6xxx_serdes_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_port *mvp = dev_id;
+ struct mv88e6xxx_chip *chip = mvp->chip;
+ irqreturn_t ret = IRQ_NONE;
+ int port = mvp->port;
+ int lane;
+
+ mv88e6xxx_reg_lock(chip);
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane >= 0)
+ ret = mv88e6xxx_serdes_irq_status(chip, port, lane);
+ mv88e6xxx_reg_unlock(chip);
+
+ return ret;
+}
+
+static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port,
+ int lane)
+{
+ struct mv88e6xxx_port *dev_id = &chip->ports[port];
+ unsigned int irq;
+ int err;
+
+ /* Nothing to request if this SERDES port has no IRQ */
+ irq = mv88e6xxx_serdes_irq_mapping(chip, port);
+ if (!irq)
+ return 0;
+
+ snprintf(dev_id->serdes_irq_name, sizeof(dev_id->serdes_irq_name),
+ "mv88e6xxx-%s-serdes-%d", dev_name(chip->dev), port);
+
+ /* Requesting the IRQ will trigger IRQ callbacks, so release the lock */
+ mv88e6xxx_reg_unlock(chip);
+ err = request_threaded_irq(irq, NULL, mv88e6xxx_serdes_irq_thread_fn,
+ IRQF_ONESHOT, dev_id->serdes_irq_name,
+ dev_id);
+ mv88e6xxx_reg_lock(chip);
+ if (err)
+ return err;
+
+ dev_id->serdes_irq = irq;
+
+ return mv88e6xxx_serdes_irq_enable(chip, port, lane);
+}
+
+static int mv88e6xxx_serdes_irq_free(struct mv88e6xxx_chip *chip, int port,
+ int lane)
+{
+ struct mv88e6xxx_port *dev_id = &chip->ports[port];
+ unsigned int irq = dev_id->serdes_irq;
+ int err;
+
+ /* Nothing to free if no IRQ has been requested */
+ if (!irq)
+ return 0;
+
+ err = mv88e6xxx_serdes_irq_disable(chip, port, lane);
+
+ /* Freeing the IRQ will trigger IRQ callbacks, so release the lock */
+ mv88e6xxx_reg_unlock(chip);
+ free_irq(irq, dev_id);
+ mv88e6xxx_reg_lock(chip);
+
+ dev_id->serdes_irq = 0;
+
+ return err;
+}
+
+static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port,
+ bool on)
+{
+ int lane;
+ int err;
+
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return 0;
+
+ if (on) {
+ err = mv88e6xxx_serdes_power_up(chip, port, lane);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_serdes_irq_request(chip, port, lane);
+ } else {
+ err = mv88e6xxx_serdes_irq_free(chip, port, lane);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_serdes_power_down(chip, port, lane);
+ }
+
+ return err;
+}
+
+static int mv88e6xxx_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port)
+{
+ int err;
+
+ if (!chip->info->ops->set_egress_port)
+ return -EOPNOTSUPP;
+
+ err = chip->info->ops->set_egress_port(chip, direction, port);
+ if (err)
+ return err;
+
+ if (direction == MV88E6XXX_EGRESS_DIR_INGRESS)
+ chip->ingress_dest_port = port;
+ else
+ chip->egress_dest_port = port;
+
+ return 0;
+}
+
+static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
+{
+ struct dsa_switch *ds = chip->ds;
+ int upstream_port;
+ int err;
+
+ upstream_port = dsa_upstream_port(ds, port);
+ if (chip->info->ops->port_set_upstream_port) {
+ err = chip->info->ops->port_set_upstream_port(chip, port,
+ upstream_port);
+ if (err)
+ return err;
+ }
+
+ if (port == upstream_port) {
+ if (chip->info->ops->set_cpu_port) {
+ err = chip->info->ops->set_cpu_port(chip,
+ upstream_port);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6xxx_set_egress_port(chip,
+ MV88E6XXX_EGRESS_DIR_INGRESS,
+ upstream_port);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ err = mv88e6xxx_set_egress_port(chip,
+ MV88E6XXX_EGRESS_DIR_EGRESS,
+ upstream_port);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
+{
+ struct device_node *phy_handle = NULL;
+ struct dsa_switch *ds = chip->ds;
+ phy_interface_t mode;
+ struct dsa_port *dp;
+ int tx_amp, speed;
+ int err;
+ u16 reg;
+
+ chip->ports[port].chip = chip;
+ chip->ports[port].port = port;
+
+ dp = dsa_to_port(ds, port);
+
+ /* MAC Forcing register: don't force link, speed, duplex or flow control
+ * state to any particular values on physical ports, but force the CPU
+ * port and all DSA ports to their maximum bandwidth and full duplex.
+ */
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+ struct phylink_config pl_config = {};
+ unsigned long caps;
+
+ chip->info->ops->phylink_get_caps(chip, port, &pl_config);
+
+ caps = pl_config.mac_capabilities;
+
+ if (chip->info->ops->port_max_speed_mode)
+ mode = chip->info->ops->port_max_speed_mode(port);
+ else
+ mode = PHY_INTERFACE_MODE_NA;
+
+ if (caps & MAC_10000FD)
+ speed = SPEED_10000;
+ else if (caps & MAC_5000FD)
+ speed = SPEED_5000;
+ else if (caps & MAC_2500FD)
+ speed = SPEED_2500;
+ else if (caps & MAC_1000)
+ speed = SPEED_1000;
+ else if (caps & MAC_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP,
+ speed, DUPLEX_FULL,
+ PAUSE_OFF, mode);
+ } else {
+ err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
+ SPEED_UNFORCED, DUPLEX_UNFORCED,
+ PAUSE_ON,
+ PHY_INTERFACE_MODE_NA);
+ }
+ if (err)
+ return err;
+
+ /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+ * disable Header mode, enable IGMP/MLD snooping, disable VLAN
+ * tunneling, determine priority by looking at 802.1p and IP
+ * priority fields (IP prio has precedence), and set STP state
+ * to Forwarding.
+ *
+ * If this is the CPU link, use DSA or EDSA tagging depending
+ * on which tagging mode was configured.
+ *
+ * If this is a link to another switch, use DSA tagging mode.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown unicasts and multicasts.
+ */
+ reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
+ MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+ /* Forward any IPv4 IGMP or IPv6 MLD frames received
+ * by a USER port to the CPU port to allow snooping.
+ */
+ if (dsa_is_user_port(ds, port))
+ reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_setup_port_mode(chip, port);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_setup_egress_floods(chip, port);
+ if (err)
+ return err;
+
+ /* Port Control 2: don't force a good FCS, set the MTU size to
+ * 10222 bytes, disable 802.1q tags checking, don't discard
+ * tagged or untagged frames on this port, skip destination
+ * address lookup on user ports, disable ARP mirroring and don't
+ * send a copy of all transmitted/received frames on this port
+ * to the CPU.
+ */
+ err = mv88e6xxx_port_set_map_da(chip, port, !dsa_is_user_port(ds, port));
+ if (err)
+ return err;
+
+ err = mv88e6xxx_setup_upstream_port(chip, port);
+ if (err)
+ return err;
+
+ /* On chips that support it, set all downstream DSA ports'
+ * VLAN policy to TRAP. In combination with loading
+ * MV88E6XXX_VID_STANDALONE as a policy entry in the VTU, this
+ * provides a better isolation barrier between standalone
+ * ports, as the ATU is bypassed on any intermediate switches
+ * between the incoming port and the CPU.
+ */
+ if (dsa_is_downstream_port(ds, port) &&
+ chip->info->ops->port_set_policy) {
+ err = chip->info->ops->port_set_policy(chip, port,
+ MV88E6XXX_POLICY_MAPPING_VTU,
+ MV88E6XXX_POLICY_ACTION_TRAP);
+ if (err)
+ return err;
+ }
+
+ /* User ports start out in standalone mode and 802.1Q is
+ * therefore disabled. On DSA ports, all valid VIDs are always
+ * loaded in the VTU - therefore, enable 802.1Q in order to take
+ * advantage of VLAN policy on chips that supports it.
+ */
+ err = mv88e6xxx_port_set_8021q_mode(chip, port,
+ dsa_is_user_port(ds, port) ?
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED :
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE);
+ if (err)
+ return err;
+
+ /* Bind MV88E6XXX_VID_STANDALONE to MV88E6XXX_FID_STANDALONE by
+ * virtue of the fact that mv88e6xxx_atu_new() will pick it as
+ * the first free FID. This will be used as the private PVID for
+ * unbridged ports. Shared (DSA and CPU) ports must also be
+ * members of this VID, in order to trap all frames assigned to
+ * it to the CPU.
+ */
+ err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_STANDALONE,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
+ false);
+ if (err)
+ return err;
+
+ /* Associate MV88E6XXX_VID_BRIDGED with MV88E6XXX_FID_BRIDGED in the
+ * ATU by virtue of the fact that mv88e6xxx_atu_new() will pick it as
+ * the first free FID after MV88E6XXX_FID_STANDALONE. This will be used
+ * as the private PVID on ports under a VLAN-unaware bridge.
+ * Shared (DSA and CPU) ports must also be members of it, to translate
+ * the VID from the DSA tag into MV88E6XXX_FID_BRIDGED, instead of
+ * relying on their port default FID.
+ */
+ err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_BRIDGED,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
+ false);
+ if (err)
+ return err;
+
+ if (chip->info->ops->port_set_jumbo_size) {
+ err = chip->info->ops->port_set_jumbo_size(chip, port, 10218);
+ if (err)
+ return err;
+ }
+
+ /* Port Association Vector: disable automatic address learning
+ * on all user ports since they start out in standalone
+ * mode. When joining a bridge, learning will be configured to
+ * match the bridge port settings. Enable learning on all
+ * DSA/CPU ports. NOTE: FROM_CPU frames always bypass the
+ * learning process.
+ *
+ * Disable HoldAt1, IntOnAgeOut, LockedPort, IgnoreWrongData,
+ * and RefreshLocked. I.e. setup standard automatic learning.
+ */
+ if (dsa_is_user_port(ds, port))
+ reg = 0;
+ else
+ reg = 1 << port;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR,
+ reg);
+ if (err)
+ return err;
+
+ /* Egress rate control 2: disable egress rate control. */
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_EGRESS_RATE_CTL2,
+ 0x0000);
+ if (err)
+ return err;
+
+ if (chip->info->ops->port_pause_limit) {
+ err = chip->info->ops->port_pause_limit(chip, port, 0, 0);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->port_disable_learn_limit) {
+ err = chip->info->ops->port_disable_learn_limit(chip, port);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->port_disable_pri_override) {
+ err = chip->info->ops->port_disable_pri_override(chip, port);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->port_tag_remap) {
+ err = chip->info->ops->port_tag_remap(chip, port);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->port_egress_rate_limiting) {
+ err = chip->info->ops->port_egress_rate_limiting(chip, port);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->port_setup_message_port) {
+ err = chip->info->ops->port_setup_message_port(chip, port);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->serdes_set_tx_amplitude) {
+ if (dp)
+ phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0);
+
+ if (phy_handle && !of_property_read_u32(phy_handle,
+ "tx-p2p-microvolt",
+ &tx_amp))
+ err = chip->info->ops->serdes_set_tx_amplitude(chip,
+ port, tx_amp);
+ if (phy_handle) {
+ of_node_put(phy_handle);
+ if (err)
+ return err;
+ }
+ }
+
+ /* Port based VLAN map: give each port the same default address
+ * database, and allow bidirectional communication between the
+ * CPU and DSA port(s), and the other ports.
+ */
+ err = mv88e6xxx_port_set_fid(chip, port, MV88E6XXX_FID_STANDALONE);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_vlan_map(chip, port);
+ if (err)
+ return err;
+
+ /* Default VLAN ID and priority: don't set a default VLAN
+ * ID, and set the default packet priority to zero.
+ */
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN, 0);
+}
+
+static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ if (chip->info->ops->port_set_jumbo_size)
+ return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+ else if (chip->info->ops->set_max_frame_size)
+ return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+ return ETH_DATA_LEN;
+}
+
+static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int ret = 0;
+
+ /* For families where we don't know how to alter the MTU,
+ * just accept any value up to ETH_DATA_LEN
+ */
+ if (!chip->info->ops->port_set_jumbo_size &&
+ !chip->info->ops->set_max_frame_size) {
+ if (new_mtu > ETH_DATA_LEN)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ new_mtu += EDSA_HLEN;
+
+ mv88e6xxx_reg_lock(chip);
+ if (chip->info->ops->port_set_jumbo_size)
+ ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
+ else if (chip->info->ops->set_max_frame_size)
+ ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
+ mv88e6xxx_reg_unlock(chip);
+
+ return ret;
+}
+
+static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_serdes_power(chip, port, true);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ mv88e6xxx_reg_lock(chip);
+ if (mv88e6xxx_serdes_power(chip, port, false))
+ dev_err(chip->dev, "failed to power off SERDES\n");
+ mv88e6xxx_reg_unlock(chip);
+}
+
+static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_g1_atu_set_age_time(chip, ageing_time);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ /* Initialize the statistics unit */
+ if (chip->info->ops->stats_set_histogram) {
+ err = chip->info->ops->stats_set_histogram(chip);
+ if (err)
+ return err;
+ }
+
+ return mv88e6xxx_g1_stats_clear(chip);
+}
+
+/* Check if the errata has already been applied. */
+static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
+{
+ int port;
+ int err;
+ u16 val;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ err = mv88e6xxx_port_hidden_read(chip, 0xf, port, 0, &val);
+ if (err) {
+ dev_err(chip->dev,
+ "Error reading hidden register: %d\n", err);
+ return false;
+ }
+ if (val != 0x01c0)
+ return false;
+ }
+
+ return true;
+}
+
+/* The 6390 copper ports have an errata which require poking magic
+ * values into undocumented hidden registers and then performing a
+ * software reset.
+ */
+static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
+{
+ int port;
+ int err;
+
+ if (mv88e6390_setup_errata_applied(chip))
+ return 0;
+
+ /* Set the ports into blocking mode */
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED);
+ if (err)
+ return err;
+ }
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ err = mv88e6xxx_port_hidden_write(chip, 0xf, port, 0, 0x01c0);
+ if (err)
+ return err;
+ }
+
+ return mv88e6xxx_software_reset(chip);
+}
+
+static void mv88e6xxx_teardown(struct dsa_switch *ds)
+{
+ mv88e6xxx_teardown_devlink_params(ds);
+ dsa_devlink_resources_unregister(ds);
+ mv88e6xxx_teardown_devlink_regions_global(ds);
+}
+
+static int mv88e6xxx_setup(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u8 cmode;
+ int err;
+ int i;
+
+ chip->ds = ds;
+ ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
+
+ /* Since virtual bridges are mapped in the PVT, the number we support
+ * depends on the physical switch topology. We need to let DSA figure
+ * that out and therefore we cannot set this at dsa_register_switch()
+ * time.
+ */
+ if (mv88e6xxx_has_pvt(chip))
+ ds->max_num_bridges = MV88E6XXX_MAX_PVT_SWITCHES -
+ ds->dst->last_switch - 1;
+
+ mv88e6xxx_reg_lock(chip);
+
+ if (chip->info->ops->setup_errata) {
+ err = chip->info->ops->setup_errata(chip);
+ if (err)
+ goto unlock;
+ }
+
+ /* Cache the cmode of each port. */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
+ if (chip->info->ops->port_get_cmode) {
+ err = chip->info->ops->port_get_cmode(chip, i, &cmode);
+ if (err)
+ goto unlock;
+
+ chip->ports[i].cmode = cmode;
+ }
+ }
+
+ err = mv88e6xxx_vtu_setup(chip);
+ if (err)
+ goto unlock;
+
+ /* Must be called after mv88e6xxx_vtu_setup (which flushes the
+ * VTU, thereby also flushing the STU).
+ */
+ err = mv88e6xxx_stu_setup(chip);
+ if (err)
+ goto unlock;
+
+ /* Setup Switch Port Registers */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
+ if (dsa_is_unused_port(ds, i))
+ continue;
+
+ /* Prevent the use of an invalid port. */
+ if (mv88e6xxx_is_invalid_port(chip, i)) {
+ dev_err(chip->dev, "port %d is invalid\n", i);
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ err = mv88e6xxx_setup_port(chip, i);
+ if (err)
+ goto unlock;
+ }
+
+ err = mv88e6xxx_irl_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_mac_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_phy_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_pvt_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_atu_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_broadcast_setup(chip, 0);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_pot_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_rmu_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_rsvd2cpu_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_trunk_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_devmap_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_pri_setup(chip);
+ if (err)
+ goto unlock;
+
+ /* Setup PTP Hardware Clock and timestamping */
+ if (chip->info->ptp_support) {
+ err = mv88e6xxx_ptp_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_hwtstamp_setup(chip);
+ if (err)
+ goto unlock;
+ }
+
+ err = mv88e6xxx_stats_setup(chip);
+ if (err)
+ goto unlock;
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ return err;
+
+ /* Have to be called without holding the register lock, since
+ * they take the devlink lock, and we later take the locks in
+ * the reverse order when getting/setting parameters or
+ * resource occupancy.
+ */
+ err = mv88e6xxx_setup_devlink_resources(ds);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_setup_devlink_params(ds);
+ if (err)
+ goto out_resources;
+
+ err = mv88e6xxx_setup_devlink_regions_global(ds);
+ if (err)
+ goto out_params;
+
+ return 0;
+
+out_params:
+ mv88e6xxx_teardown_devlink_params(ds);
+out_resources:
+ dsa_devlink_resources_unregister(ds);
+
+ return err;
+}
+
+static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
+{
+ return mv88e6xxx_setup_devlink_regions_port(ds, port);
+}
+
+static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
+{
+ mv88e6xxx_teardown_devlink_regions_port(ds, port);
+}
+
+/* prod_id for switch families which do not have a PHY model number */
+static const u16 family_prod_id_table[] = {
+ [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+ [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
+ [MV88E6XXX_FAMILY_6393] = MV88E6XXX_PORT_SWITCH_ID_PROD_6393X,
+};
+
+static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ u16 prod_id;
+ u16 val;
+ int err;
+
+ if (!chip->info->ops->phy_read)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
+ mv88e6xxx_reg_unlock(chip);
+
+ /* Some internal PHYs don't have a model number. */
+ if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
+ chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
+ prod_id = family_prod_id_table[chip->info->family];
+ if (prod_id)
+ val |= prod_id >> 4;
+ }
+
+ return err ? err : val;
+}
+
+static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ int err;
+
+ if (!chip->info->ops->phy_write)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_write(chip, bus, phy, reg, val);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
+ struct device_node *np,
+ bool external)
+{
+ static int index;
+ struct mv88e6xxx_mdio_bus *mdio_bus;
+ struct mii_bus *bus;
+ int err;
+
+ if (external) {
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ return err;
+ }
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_bus));
+ if (!bus)
+ return -ENOMEM;
+
+ mdio_bus = bus->priv;
+ mdio_bus->bus = bus;
+ mdio_bus->chip = chip;
+ INIT_LIST_HEAD(&mdio_bus->list);
+ mdio_bus->external = external;
+
+ if (np) {
+ bus->name = np->full_name;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%pOF", np);
+ } else {
+ bus->name = "mv88e6xxx SMI";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++);
+ }
+
+ bus->read = mv88e6xxx_mdio_read;
+ bus->write = mv88e6xxx_mdio_write;
+ bus->parent = chip->dev;
+
+ if (!external) {
+ err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
+ if (err)
+ goto out;
+ }
+
+ err = of_mdiobus_register(bus, np);
+ if (err) {
+ dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
+ mv88e6xxx_g2_irq_mdio_free(chip, bus);
+ goto out;
+ }
+
+ if (external)
+ list_add_tail(&mdio_bus->list, &chip->mdios);
+ else
+ list_add(&mdio_bus->list, &chip->mdios);
+
+ return 0;
+
+out:
+ mdiobus_free(bus);
+ return err;
+}
+
+static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
+
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus, *p;
+ struct mii_bus *bus;
+
+ list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) {
+ bus = mdio_bus->bus;
+
+ if (!mdio_bus->external)
+ mv88e6xxx_g2_irq_mdio_free(chip, bus);
+
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+ }
+}
+
+static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
+ struct device_node *np)
+{
+ struct device_node *child;
+ int err;
+
+ /* Always register one mdio bus for the internal/default mdio
+ * bus. This maybe represented in the device tree, but is
+ * optional.
+ */
+ child = of_get_child_by_name(np, "mdio");
+ err = mv88e6xxx_mdio_register(chip, child, false);
+ of_node_put(child);
+ if (err)
+ return err;
+
+ /* Walk the device tree, and see if there are any other nodes
+ * which say they are compatible with the external mdio
+ * bus.
+ */
+ for_each_available_child_of_node(np, child) {
+ if (of_device_is_compatible(
+ child, "marvell,mv88e6xxx-mdio-external")) {
+ err = mv88e6xxx_mdio_register(chip, child, true);
+ if (err) {
+ mv88e6xxx_mdios_unregister(chip);
+ of_node_put(child);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ return chip->eeprom_len;
+}
+
+static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ if (!chip->info->ops->get_eeprom)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->get_eeprom(chip, eeprom, data);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ return err;
+
+ eeprom->magic = 0xc3ec4951;
+
+ return 0;
+}
+
+static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ if (!chip->info->ops->set_eeprom)
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != 0xc3ec4951)
+ return -EINVAL;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->set_eeprom(chip, eeprom, data);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static const struct mv88e6xxx_ops mv88e6085_ops = {
+ /* MV88E6XXX_FAMILY_6097 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6185_phy_ppu_read,
+ .phy_write = mv88e6185_phy_ppu_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .ppu_enable = mv88e6185_g1_ppu_enable,
+ .ppu_disable = mv88e6185_g1_ppu_disable,
+ .reset = mv88e6185_g1_reset,
+ .rmu_disable = mv88e6085_g1_rmu_disable,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
+};
+
+static const struct mv88e6xxx_ops mv88e6095_ops = {
+ /* MV88E6XXX_FAMILY_6095 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6185_phy_ppu_read,
+ .phy_write = mv88e6185_phy_ppu_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6185_port_sync_link,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_set_frame_mode = mv88e6085_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6185_port_set_forward_unknown,
+ .port_set_mcast_flood = mv88e6185_port_set_default_forward,
+ .port_set_upstream_port = mv88e6095_port_set_upstream_port,
+ .port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
+ .serdes_power = mv88e6185_serdes_power,
+ .serdes_get_lane = mv88e6185_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state,
+ .ppu_enable = mv88e6185_g1_ppu_enable,
+ .ppu_disable = mv88e6185_g1_ppu_disable,
+ .reset = mv88e6185_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
+ .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
+};
+
+static const struct mv88e6xxx_ops mv88e6097_ops = {
+ /* MV88E6XXX_FAMILY_6097 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6185_port_sync_link,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .serdes_power = mv88e6185_serdes_power,
+ .serdes_get_lane = mv88e6185_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6097_serdes_irq_enable,
+ .serdes_irq_status = mv88e6097_serdes_irq_status,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6085_g1_rmu_disable,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
+};
+
+static const struct mv88e6xxx_ops mv88e6123_ops = {
+ /* MV88E6XXX_FAMILY_6165 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_set_frame_mode = mv88e6085_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
+};
+
+static const struct mv88e6xxx_ops mv88e6131_ops = {
+ /* MV88E6XXX_FAMILY_6185 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6185_phy_ppu_read,
+ .phy_write = mv88e6185_phy_ppu_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6185_port_set_forward_unknown,
+ .port_set_mcast_flood = mv88e6185_port_set_default_forward,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_upstream_port = mv88e6095_port_set_upstream_port,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_set_pause = mv88e6185_port_set_pause,
+ .port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
+ .ppu_enable = mv88e6185_g1_ppu_enable,
+ .set_cascade_port = mv88e6185_g1_set_cascade_port,
+ .ppu_disable = mv88e6185_g1_ppu_disable,
+ .reset = mv88e6185_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6141_ops = {
+ /* MV88E6XXX_FAMILY_6341 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
+ .port_max_speed_mode = mv88e6341_port_max_speed_mode,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6341_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
+ .serdes_get_lane = mv88e6341_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
+ .phylink_get_caps = mv88e6341_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6161_ops = {
+ /* MV88E6XXX_FAMILY_6165 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .avb_ops = &mv88e6165_avb_ops,
+ .ptp_ops = &mv88e6165_ptp_ops,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
+};
+
+static const struct mv88e6xxx_ops mv88e6165_ops = {
+ /* MV88E6XXX_FAMILY_6165 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6165_phy_read,
+ .phy_write = mv88e6165_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .avb_ops = &mv88e6165_avb_ops,
+ .ptp_ops = &mv88e6165_ptp_ops,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6171_ops = {
+ /* MV88E6XXX_FAMILY_6351 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6172_ops = {
+ /* MV88E6XXX_FAMILY_6352 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .serdes_get_lane = mv88e6352_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6352_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
+ .serdes_power = mv88e6352_serdes_power,
+ .serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6352_serdes_get_regs,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6175_ops = {
+ /* MV88E6XXX_FAMILY_6351 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6176_ops = {
+ /* MV88E6XXX_FAMILY_6352 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .serdes_get_lane = mv88e6352_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6352_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
+ .serdes_power = mv88e6352_serdes_power,
+ .serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6352_serdes_irq_enable,
+ .serdes_irq_status = mv88e6352_serdes_irq_status,
+ .serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6185_ops = {
+ /* MV88E6XXX_FAMILY_6185 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6185_phy_ppu_read,
+ .phy_write = mv88e6185_phy_ppu_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6185_port_sync_link,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_set_frame_mode = mv88e6085_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6185_port_set_forward_unknown,
+ .port_set_mcast_flood = mv88e6185_port_set_default_forward,
+ .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
+ .port_set_upstream_port = mv88e6095_port_set_upstream_port,
+ .port_set_pause = mv88e6185_port_set_pause,
+ .port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
+ .serdes_power = mv88e6185_serdes_power,
+ .serdes_get_lane = mv88e6185_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state,
+ .set_cascade_port = mv88e6185_g1_set_cascade_port,
+ .ppu_enable = mv88e6185_g1_ppu_enable,
+ .ppu_disable = mv88e6185_g1_ppu_disable,
+ .reset = mv88e6185_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
+};
+
+static const struct mv88e6xxx_ops mv88e6190_ops = {
+ /* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
+ .irl_init_all = mv88e6390_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
+ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_pause_limit = mv88e6390_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
+ .serdes_get_lane = mv88e6390_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6190x_ops = {
+ /* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
+ .irl_init_all = mv88e6390_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6390x_port_set_speed_duplex,
+ .port_max_speed_mode = mv88e6390x_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_pause_limit = mv88e6390_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390x_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
+ .serdes_get_lane = mv88e6390x_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .phylink_get_caps = mv88e6390x_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6191_ops = {
+ /* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
+ .irl_init_all = mv88e6390_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
+ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_pause_limit = mv88e6390_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
+ .serdes_get_lane = mv88e6390_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
+ .avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6240_ops = {
+ /* MV88E6XXX_FAMILY_6352 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .serdes_get_lane = mv88e6352_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6352_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
+ .serdes_power = mv88e6352_serdes_power,
+ .serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6352_serdes_irq_enable,
+ .serdes_irq_status = mv88e6352_serdes_irq_status,
+ .serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6250_ops = {
+ /* MV88E6XXX_FAMILY_6250 */
+ .ieee_pri_map = mv88e6250_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6250_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6250_stats_get_sset_count,
+ .stats_get_strings = mv88e6250_stats_get_strings,
+ .stats_get_stats = mv88e6250_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6250_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6250_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6250_ptp_ops,
+ .phylink_get_caps = mv88e6250_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6290_ops = {
+ /* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
+ .irl_init_all = mv88e6390_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
+ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_pause_limit = mv88e6390_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
+ .serdes_get_lane = mv88e6390_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6320_ops = {
+ /* MV88E6XXX_FAMILY_6320 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6320_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6321_ops = {
+ /* MV88E6XXX_FAMILY_6320 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6320_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+ .vtu_getnext = mv88e6185_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6341_ops = {
+ /* MV88E6XXX_FAMILY_6341 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
+ .port_max_speed_mode = mv88e6341_port_max_speed_mode,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6341_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
+ .serdes_get_lane = mv88e6341_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
+ .phylink_get_caps = mv88e6341_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6350_ops = {
+ /* MV88E6XXX_FAMILY_6351 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6351_ops = {
+ /* MV88E6XXX_FAMILY_6351 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6352_ops = {
+ /* MV88E6XXX_FAMILY_6352 */
+ .ieee_pri_map = mv88e6085_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ .stats_get_strings = mv88e6095_stats_get_strings,
+ .stats_get_stats = mv88e6095_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6097_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .serdes_get_lane = mv88e6352_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6352_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
+ .serdes_power = mv88e6352_serdes_power,
+ .serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6352_serdes_irq_enable,
+ .serdes_irq_status = mv88e6352_serdes_irq_status,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .serdes_get_sset_count = mv88e6352_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6352_serdes_get_strings,
+ .serdes_get_stats = mv88e6352_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6390_ops = {
+ /* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
+ .irl_init_all = mv88e6390_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
+ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6390_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
+ .serdes_get_lane = mv88e6390_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6390x_ops = {
+ /* MV88E6XXX_FAMILY_6390 */
+ .setup_errata = mv88e6390_setup_errata,
+ .irl_init_all = mv88e6390_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6390x_port_set_speed_duplex,
+ .port_max_speed_mode = mv88e6390x_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6390_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390x_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .set_egress_port = mv88e6390_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
+ .serdes_get_lane = mv88e6390x_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_get_caps = mv88e6390x_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_ops mv88e6393x_ops = {
+ /* MV88E6XXX_FAMILY_6393 */
+ .setup_errata = mv88e6393x_serdes_setup_errata,
+ .irl_init_all = mv88e6390_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed_duplex = mv88e6393x_port_set_speed_duplex,
+ .port_max_speed_mode = mv88e6393x_port_max_speed_mode,
+ .port_tag_remap = mv88e6390_port_tag_remap,
+ .port_set_policy = mv88e6393x_port_set_policy,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
+ .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
+ .port_set_ether_type = mv88e6393x_port_set_ether_type,
+ .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6390_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6393x_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
+ .port_set_upstream_port = mv88e6393x_port_set_upstream_port,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ /* .set_cpu_port is missing because this family does not support a global
+ * CPU port, only per port CPU port which is set via
+ * .port_set_upstream_port method.
+ */
+ .set_egress_port = mv88e6393x_set_egress_port,
+ .watchdog_ops = &mv88e6393x_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
+ .vtu_getnext = mv88e6390_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
+ .serdes_power = mv88e6393x_serdes_power,
+ .serdes_get_lane = mv88e6393x_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6393x_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6393x_serdes_irq_enable,
+ .serdes_irq_status = mv88e6393x_serdes_irq_status,
+ /* TODO: serdes stats */
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6390_avb_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_get_caps = mv88e6393x_phylink_get_caps,
+};
+
+static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ [MV88E6085] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6085,
+ .family = MV88E6XXX_FAMILY_6097,
+ .name = "Marvell 88E6085",
+ .num_databases = 4096,
+ .num_macs = 8192,
+ .num_ports = 10,
+ .num_internal_phys = 5,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 8,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .ops = &mv88e6085_ops,
+ },
+
+ [MV88E6095] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6095,
+ .family = MV88E6XXX_FAMILY_6095,
+ .name = "Marvell 88E6095/88E6095F",
+ .num_databases = 256,
+ .num_macs = 8192,
+ .num_ports = 11,
+ .num_internal_phys = 0,
+ .max_vid = 4095,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
+ .multi_chip = true,
+ .ops = &mv88e6095_ops,
+ },
+
+ [MV88E6097] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6097,
+ .family = MV88E6XXX_FAMILY_6097,
+ .name = "Marvell 88E6097/88E6097F",
+ .num_databases = 4096,
+ .num_macs = 8192,
+ .num_ports = 11,
+ .num_internal_phys = 8,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 8,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ops = &mv88e6097_ops,
+ },
+
+ [MV88E6123] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6123,
+ .family = MV88E6XXX_FAMILY_6165,
+ .name = "Marvell 88E6123",
+ .num_databases = 4096,
+ .num_macs = 1024,
+ .num_ports = 3,
+ .num_internal_phys = 5,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ops = &mv88e6123_ops,
+ },
+
+ [MV88E6131] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6131,
+ .family = MV88E6XXX_FAMILY_6185,
+ .name = "Marvell 88E6131",
+ .num_databases = 256,
+ .num_macs = 8192,
+ .num_ports = 8,
+ .num_internal_phys = 0,
+ .max_vid = 4095,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .atu_move_port_mask = 0xf,
+ .multi_chip = true,
+ .ops = &mv88e6131_ops,
+ },
+
+ [MV88E6141] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
+ .family = MV88E6XXX_FAMILY_6341,
+ .name = "Marvell 88E6141",
+ .num_databases = 4096,
+ .num_macs = 2048,
+ .num_ports = 6,
+ .num_internal_phys = 5,
+ .num_gpio = 11,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x10,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .atu_move_port_mask = 0x1f,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ops = &mv88e6141_ops,
+ },
+
+ [MV88E6161] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6161,
+ .family = MV88E6XXX_FAMILY_6165,
+ .name = "Marvell 88E6161",
+ .num_databases = 4096,
+ .num_macs = 1024,
+ .num_ports = 6,
+ .num_internal_phys = 5,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ptp_support = true,
+ .ops = &mv88e6161_ops,
+ },
+
+ [MV88E6165] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6165,
+ .family = MV88E6XXX_FAMILY_6165,
+ .name = "Marvell 88E6165",
+ .num_databases = 4096,
+ .num_macs = 8192,
+ .num_ports = 6,
+ .num_internal_phys = 0,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .ptp_support = true,
+ .ops = &mv88e6165_ops,
+ },
+
+ [MV88E6171] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6171,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6171",
+ .num_databases = 4096,
+ .num_macs = 8192,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ops = &mv88e6171_ops,
+ },
+
+ [MV88E6172] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6172,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6172",
+ .num_databases = 4096,
+ .num_macs = 8192,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .num_gpio = 15,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ops = &mv88e6172_ops,
+ },
+
+ [MV88E6175] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6175,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6175",
+ .num_databases = 4096,
+ .num_macs = 8192,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ops = &mv88e6175_ops,
+ },
+
+ [MV88E6176] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6176,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6176",
+ .num_databases = 4096,
+ .num_macs = 8192,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .num_gpio = 15,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ops = &mv88e6176_ops,
+ },
+
+ [MV88E6185] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6185,
+ .family = MV88E6XXX_FAMILY_6185,
+ .name = "Marvell 88E6185",
+ .num_databases = 256,
+ .num_macs = 8192,
+ .num_ports = 10,
+ .num_internal_phys = 0,
+ .max_vid = 4095,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 8,
+ .atu_move_port_mask = 0xf,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ops = &mv88e6185_ops,
+ },
+
+ [MV88E6190] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6190,
+ .family = MV88E6XXX_FAMILY_6390,
+ .name = "Marvell 88E6190",
+ .num_databases = 4096,
+ .num_macs = 16384,
+ .num_ports = 11, /* 10 + Z80 */
+ .num_internal_phys = 9,
+ .num_gpio = 16,
+ .max_vid = 8191,
+ .max_sid = 63,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .g1_irqs = 9,
+ .g2_irqs = 14,
+ .pvt = true,
+ .multi_chip = true,
+ .atu_move_port_mask = 0x1f,
+ .ops = &mv88e6190_ops,
+ },
+
+ [MV88E6190X] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6190X,
+ .family = MV88E6XXX_FAMILY_6390,
+ .name = "Marvell 88E6190X",
+ .num_databases = 4096,
+ .num_macs = 16384,
+ .num_ports = 11, /* 10 + Z80 */
+ .num_internal_phys = 9,
+ .num_gpio = 16,
+ .max_vid = 8191,
+ .max_sid = 63,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .g1_irqs = 9,
+ .g2_irqs = 14,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
+ .multi_chip = true,
+ .ops = &mv88e6190x_ops,
+ },
+
+ [MV88E6191] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6191,
+ .family = MV88E6XXX_FAMILY_6390,
+ .name = "Marvell 88E6191",
+ .num_databases = 4096,
+ .num_macs = 16384,
+ .num_ports = 11, /* 10 + Z80 */
+ .num_internal_phys = 9,
+ .max_vid = 8191,
+ .max_sid = 63,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .g1_irqs = 9,
+ .g2_irqs = 14,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
+ .multi_chip = true,
+ .ptp_support = true,
+ .ops = &mv88e6191_ops,
+ },
+
+ [MV88E6191X] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6191X,
+ .family = MV88E6XXX_FAMILY_6393,
+ .name = "Marvell 88E6191X",
+ .num_databases = 4096,
+ .num_ports = 11, /* 10 + Z80 */
+ .num_internal_phys = 9,
+ .max_vid = 8191,
+ .max_sid = 63,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .g1_irqs = 10,
+ .g2_irqs = 14,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
+ .multi_chip = true,
+ .ptp_support = true,
+ .ops = &mv88e6393x_ops,
+ },
+
+ [MV88E6193X] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6193X,
+ .family = MV88E6XXX_FAMILY_6393,
+ .name = "Marvell 88E6193X",
+ .num_databases = 4096,
+ .num_ports = 11, /* 10 + Z80 */
+ .num_internal_phys = 9,
+ .max_vid = 8191,
+ .max_sid = 63,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .g1_irqs = 10,
+ .g2_irqs = 14,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
+ .multi_chip = true,
+ .ptp_support = true,
+ .ops = &mv88e6393x_ops,
+ },
+
+ [MV88E6220] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6220,
+ .family = MV88E6XXX_FAMILY_6250,
+ .name = "Marvell 88E6220",
+ .num_databases = 64,
+
+ /* Ports 2-4 are not routed to pins
+ * => usable ports 0, 1, 5, 6
+ */
+ .num_ports = 7,
+ .num_internal_phys = 2,
+ .invalid_port_mask = BIT(2) | BIT(3) | BIT(4),
+ .max_vid = 4095,
+ .port_base_addr = 0x08,
+ .phy_base_addr = 0x00,
+ .global1_addr = 0x0f,
+ .global2_addr = 0x07,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .dual_chip = true,
+ .ptp_support = true,
+ .ops = &mv88e6250_ops,
+ },
+
+ [MV88E6240] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6240,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6240",
+ .num_databases = 4096,
+ .num_macs = 8192,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .num_gpio = 15,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ptp_support = true,
+ .ops = &mv88e6240_ops,
+ },
+
+ [MV88E6250] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6250,
+ .family = MV88E6XXX_FAMILY_6250,
+ .name = "Marvell 88E6250",
+ .num_databases = 64,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .max_vid = 4095,
+ .port_base_addr = 0x08,
+ .phy_base_addr = 0x00,
+ .global1_addr = 0x0f,
+ .global2_addr = 0x07,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .dual_chip = true,
+ .ptp_support = true,
+ .ops = &mv88e6250_ops,
+ },
+
+ [MV88E6290] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6290,
+ .family = MV88E6XXX_FAMILY_6390,
+ .name = "Marvell 88E6290",
+ .num_databases = 4096,
+ .num_ports = 11, /* 10 + Z80 */
+ .num_internal_phys = 9,
+ .num_gpio = 16,
+ .max_vid = 8191,
+ .max_sid = 63,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .g1_irqs = 9,
+ .g2_irqs = 14,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
+ .multi_chip = true,
+ .ptp_support = true,
+ .ops = &mv88e6290_ops,
+ },
+
+ [MV88E6320] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6320,
+ .family = MV88E6XXX_FAMILY_6320,
+ .name = "Marvell 88E6320",
+ .num_databases = 4096,
+ .num_macs = 8192,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .num_gpio = 15,
+ .max_vid = 4095,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 8,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ptp_support = true,
+ .ops = &mv88e6320_ops,
+ },
+
+ [MV88E6321] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6321,
+ .family = MV88E6XXX_FAMILY_6320,
+ .name = "Marvell 88E6321",
+ .num_databases = 4096,
+ .num_macs = 8192,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .num_gpio = 15,
+ .max_vid = 4095,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 8,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ptp_support = true,
+ .ops = &mv88e6321_ops,
+ },
+
+ [MV88E6341] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+ .family = MV88E6XXX_FAMILY_6341,
+ .name = "Marvell 88E6341",
+ .num_databases = 4096,
+ .num_macs = 2048,
+ .num_internal_phys = 5,
+ .num_ports = 6,
+ .num_gpio = 11,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x10,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .atu_move_port_mask = 0x1f,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ptp_support = true,
+ .ops = &mv88e6341_ops,
+ },
+
+ [MV88E6350] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6350,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6350",
+ .num_databases = 4096,
+ .num_macs = 8192,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ops = &mv88e6350_ops,
+ },
+
+ [MV88E6351] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6351,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6351",
+ .num_databases = 4096,
+ .num_macs = 8192,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ops = &mv88e6351_ops,
+ },
+
+ [MV88E6352] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6352,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6352",
+ .num_databases = 4096,
+ .num_macs = 8192,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .num_gpio = 15,
+ .max_vid = 4095,
+ .max_sid = 63,
+ .port_base_addr = 0x10,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_SUPPORTED,
+ .ptp_support = true,
+ .ops = &mv88e6352_ops,
+ },
+ [MV88E6390] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
+ .family = MV88E6XXX_FAMILY_6390,
+ .name = "Marvell 88E6390",
+ .num_databases = 4096,
+ .num_macs = 16384,
+ .num_ports = 11, /* 10 + Z80 */
+ .num_internal_phys = 9,
+ .num_gpio = 16,
+ .max_vid = 8191,
+ .max_sid = 63,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .g1_irqs = 9,
+ .g2_irqs = 14,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_UNDOCUMENTED,
+ .ptp_support = true,
+ .ops = &mv88e6390_ops,
+ },
+ [MV88E6390X] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6390X,
+ .family = MV88E6XXX_FAMILY_6390,
+ .name = "Marvell 88E6390X",
+ .num_databases = 4096,
+ .num_macs = 16384,
+ .num_ports = 11, /* 10 + Z80 */
+ .num_internal_phys = 9,
+ .num_gpio = 16,
+ .max_vid = 8191,
+ .max_sid = 63,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .g1_irqs = 9,
+ .g2_irqs = 14,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
+ .multi_chip = true,
+ .edsa_support = MV88E6XXX_EDSA_UNDOCUMENTED,
+ .ptp_support = true,
+ .ops = &mv88e6390x_ops,
+ },
+
+ [MV88E6393X] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6393X,
+ .family = MV88E6XXX_FAMILY_6393,
+ .name = "Marvell 88E6393X",
+ .num_databases = 4096,
+ .num_ports = 11, /* 10 + Z80 */
+ .num_internal_phys = 9,
+ .max_vid = 8191,
+ .max_sid = 63,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .g1_irqs = 10,
+ .g2_irqs = 14,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
+ .multi_chip = true,
+ .ptp_support = true,
+ .ops = &mv88e6393x_ops,
+ },
+};
+
+static const struct mv88e6xxx_info *mv88e6xxx_lookup_info(unsigned int prod_num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_table); ++i)
+ if (mv88e6xxx_table[i].prod_num == prod_num)
+ return &mv88e6xxx_table[i];
+
+ return NULL;
+}
+
+static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip)
+{
+ const struct mv88e6xxx_info *info;
+ unsigned int prod_num, rev;
+ u16 id;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_read(chip, 0, MV88E6XXX_PORT_SWITCH_ID, &id);
+ mv88e6xxx_reg_unlock(chip);
+ if (err)
+ return err;
+
+ prod_num = id & MV88E6XXX_PORT_SWITCH_ID_PROD_MASK;
+ rev = id & MV88E6XXX_PORT_SWITCH_ID_REV_MASK;
+
+ info = mv88e6xxx_lookup_info(prod_num);
+ if (!info)
+ return -ENODEV;
+
+ /* Update the compatible info with the probed one */
+ chip->info = info;
+
+ dev_info(chip->dev, "switch 0x%x detected: %s, revision %u\n",
+ chip->info->prod_num, chip->info->name, rev);
+
+ return 0;
+}
+
+static int mv88e6xxx_single_chip_detect(struct mv88e6xxx_chip *chip,
+ struct mdio_device *mdiodev)
+{
+ int err;
+
+ /* dual_chip takes precedence over single/multi-chip modes */
+ if (chip->info->dual_chip)
+ return -EINVAL;
+
+ /* If the mdio addr is 16 indicating the first port address of a switch
+ * (e.g. mv88e6*41) in single chip addressing mode the device may be
+ * configured in single chip addressing mode. Setup the smi access as
+ * single chip addressing mode and attempt to detect the model of the
+ * switch, if this fails the device is not configured in single chip
+ * addressing mode.
+ */
+ if (mdiodev->addr != 16)
+ return -EINVAL;
+
+ err = mv88e6xxx_smi_init(chip, mdiodev->bus, 0);
+ if (err)
+ return err;
+
+ return mv88e6xxx_detect(chip);
+}
+
+static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
+{
+ struct mv88e6xxx_chip *chip;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return NULL;
+
+ chip->dev = dev;
+
+ mutex_init(&chip->reg_lock);
+ INIT_LIST_HEAD(&chip->mdios);
+ idr_init(&chip->policies);
+ INIT_LIST_HEAD(&chip->msts);
+
+ return chip;
+}
+
+static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol m)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ return chip->tag_protocol;
+}
+
+static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ enum dsa_tag_protocol old_protocol;
+ struct dsa_port *cpu_dp;
+ int err;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_EDSA:
+ switch (chip->info->edsa_support) {
+ case MV88E6XXX_EDSA_UNSUPPORTED:
+ return -EPROTONOSUPPORT;
+ case MV88E6XXX_EDSA_UNDOCUMENTED:
+ dev_warn(chip->dev, "Relying on undocumented EDSA tagging behavior\n");
+ fallthrough;
+ case MV88E6XXX_EDSA_SUPPORTED:
+ break;
+ }
+ break;
+ case DSA_TAG_PROTO_DSA:
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ old_protocol = chip->tag_protocol;
+ chip->tag_protocol = proto;
+
+ mv88e6xxx_reg_lock(chip);
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
+ if (err) {
+ mv88e6xxx_reg_unlock(chip);
+ goto unwind;
+ }
+ }
+ mv88e6xxx_reg_unlock(chip);
+
+ return 0;
+
+unwind:
+ chip->tag_protocol = old_protocol;
+
+ mv88e6xxx_reg_lock(chip);
+ dsa_switch_for_each_cpu_port_continue_reverse(cpu_dp, ds)
+ mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
+ MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid, 0);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ enum mv88e6xxx_egress_direction direction = ingress ?
+ MV88E6XXX_EGRESS_DIR_INGRESS :
+ MV88E6XXX_EGRESS_DIR_EGRESS;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ bool other_mirrors = false;
+ int i;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ if ((ingress ? chip->ingress_dest_port : chip->egress_dest_port) !=
+ mirror->to_local_port) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ other_mirrors |= ingress ?
+ chip->ports[i].mirror_ingress :
+ chip->ports[i].mirror_egress;
+
+ /* Can't change egress port when other mirror is active */
+ if (other_mirrors) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mv88e6xxx_set_egress_port(chip, direction,
+ mirror->to_local_port);
+ if (err)
+ goto out;
+ }
+
+ err = mv88e6xxx_port_set_mirror(chip, port, direction, true);
+out:
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static void mv88e6xxx_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ enum mv88e6xxx_egress_direction direction = mirror->ingress ?
+ MV88E6XXX_EGRESS_DIR_INGRESS :
+ MV88E6XXX_EGRESS_DIR_EGRESS;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ bool other_mirrors = false;
+ int i;
+
+ mutex_lock(&chip->reg_lock);
+ if (mv88e6xxx_port_set_mirror(chip, port, direction, false))
+ dev_err(ds->dev, "p%d: failed to disable mirroring\n", port);
+
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ other_mirrors |= mirror->ingress ?
+ chip->ports[i].mirror_ingress :
+ chip->ports[i].mirror_egress;
+
+ /* Reset egress port when no other mirror is active */
+ if (!other_mirrors) {
+ if (mv88e6xxx_set_egress_port(chip, direction,
+ dsa_upstream_port(ds, port)))
+ dev_err(ds->dev, "failed to set egress port\n");
+ }
+
+ mutex_unlock(&chip->reg_lock);
+}
+
+static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ const struct mv88e6xxx_ops *ops;
+
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_BCAST_FLOOD | BR_PORT_LOCKED))
+ return -EINVAL;
+
+ ops = chip->info->ops;
+
+ if ((flags.mask & BR_FLOOD) && !ops->port_set_ucast_flood)
+ return -EINVAL;
+
+ if ((flags.mask & BR_MCAST_FLOOD) && !ops->port_set_mcast_flood)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err = -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+
+ if (flags.mask & BR_LEARNING) {
+ bool learning = !!(flags.val & BR_LEARNING);
+ u16 pav = learning ? (1 << port) : 0;
+
+ err = mv88e6xxx_port_set_assoc_vector(chip, port, pav);
+ if (err)
+ goto out;
+ }
+
+ if (flags.mask & BR_FLOOD) {
+ bool unicast = !!(flags.val & BR_FLOOD);
+
+ err = chip->info->ops->port_set_ucast_flood(chip, port,
+ unicast);
+ if (err)
+ goto out;
+ }
+
+ if (flags.mask & BR_MCAST_FLOOD) {
+ bool multicast = !!(flags.val & BR_MCAST_FLOOD);
+
+ err = chip->info->ops->port_set_mcast_flood(chip, port,
+ multicast);
+ if (err)
+ goto out;
+ }
+
+ if (flags.mask & BR_BCAST_FLOOD) {
+ bool broadcast = !!(flags.val & BR_BCAST_FLOOD);
+
+ err = mv88e6xxx_port_broadcast_sync(chip, port, broadcast);
+ if (err)
+ goto out;
+ }
+
+ if (flags.mask & BR_PORT_LOCKED) {
+ bool locked = !!(flags.val & BR_PORT_LOCKED);
+
+ err = mv88e6xxx_port_set_lock(chip, port, locked);
+ if (err)
+ goto out;
+ }
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp;
+ int members = 0;
+
+ if (!mv88e6xxx_has_lag(chip)) {
+ NL_SET_ERR_MSG_MOD(extack, "Chip does not support LAG offload");
+ return false;
+ }
+
+ if (!lag.id)
+ return false;
+
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
+ /* Includes the port joining the LAG */
+ members++;
+
+ if (members > 8) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 8 LAG ports");
+ return false;
+ }
+
+ /* We could potentially relax this to include active
+ * backup in the future.
+ */
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
+ return false;
+ }
+
+ /* Ideally we would also validate that the hash type matches
+ * the hardware. Alas, this is always set to unknown on team
+ * interfaces.
+ */
+ return true;
+}
+
+static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct dsa_lag lag)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp;
+ u16 map = 0;
+ int id;
+
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
+
+ /* Build the map of all ports to distribute flows destined for
+ * this LAG. This can be either a local user port, or a DSA
+ * port if the LAG port is on a remote chip.
+ */
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
+ map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
+
+ return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
+}
+
+static const u8 mv88e6xxx_lag_mask_table[8][8] = {
+ /* Row number corresponds to the number of active members in a
+ * LAG. Each column states which of the eight hash buckets are
+ * mapped to the column:th port in the LAG.
+ *
+ * Example: In a LAG with three active ports, the second port
+ * ([2][1]) would be selected for traffic mapped to buckets
+ * 3,4,5 (0x38).
+ */
+ { 0xff, 0, 0, 0, 0, 0, 0, 0 },
+ { 0x0f, 0xf0, 0, 0, 0, 0, 0, 0 },
+ { 0x07, 0x38, 0xc0, 0, 0, 0, 0, 0 },
+ { 0x03, 0x0c, 0x30, 0xc0, 0, 0, 0, 0 },
+ { 0x03, 0x0c, 0x30, 0x40, 0x80, 0, 0, 0 },
+ { 0x03, 0x0c, 0x10, 0x20, 0x40, 0x80, 0, 0 },
+ { 0x03, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0 },
+ { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 },
+};
+
+static void mv88e6xxx_lag_set_port_mask(u16 *mask, int port,
+ int num_tx, int nth)
+{
+ u8 active = 0;
+ int i;
+
+ num_tx = num_tx <= 8 ? num_tx : 8;
+ if (nth < num_tx)
+ active = mv88e6xxx_lag_mask_table[num_tx - 1][nth];
+
+ for (i = 0; i < 8; i++) {
+ if (BIT(i) & active)
+ mask[i] |= BIT(port);
+ }
+}
+
+static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ unsigned int id, num_tx;
+ struct dsa_port *dp;
+ struct dsa_lag *lag;
+ int i, err, nth;
+ u16 mask[8];
+ u16 ivec;
+
+ /* Assume no port is a member of any LAG. */
+ ivec = BIT(mv88e6xxx_num_ports(chip)) - 1;
+
+ /* Disable all masks for ports that _are_ members of a LAG. */
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->lag)
+ continue;
+
+ ivec &= ~BIT(dp->index);
+ }
+
+ for (i = 0; i < 8; i++)
+ mask[i] = ivec;
+
+ /* Enable the correct subset of masks for all LAG ports that
+ * are in the Tx set.
+ */
+ dsa_lags_foreach_id(id, ds->dst) {
+ lag = dsa_lag_by_id(ds->dst, id);
+ if (!lag)
+ continue;
+
+ num_tx = 0;
+ dsa_lag_foreach_port(dp, ds->dst, lag) {
+ if (dp->lag_tx_enabled)
+ num_tx++;
+ }
+
+ if (!num_tx)
+ continue;
+
+ nth = 0;
+ dsa_lag_foreach_port(dp, ds->dst, lag) {
+ if (!dp->lag_tx_enabled)
+ continue;
+
+ if (dp->ds == ds)
+ mv88e6xxx_lag_set_port_mask(mask, dp->index,
+ num_tx, nth);
+
+ nth++;
+ }
+ }
+
+ for (i = 0; i < 8; i++) {
+ err = mv88e6xxx_g2_trunk_mask_write(chip, i, true, mask[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
+ struct dsa_lag lag)
+{
+ int err;
+
+ err = mv88e6xxx_lag_sync_masks(ds);
+
+ if (!err)
+ err = mv88e6xxx_lag_sync_map(ds, lag);
+
+ return err;
+}
+
+static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_lag_sync_masks(ds);
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err, id;
+
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
+ return -EOPNOTSUPP;
+
+ /* DSA LAG IDs are one-based */
+ id = lag.id - 1;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_port_set_trunk(chip, port, true, id);
+ if (err)
+ goto err_unlock;
+
+ err = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ if (err)
+ goto err_clear_trunk;
+
+ mv88e6xxx_reg_unlock(chip);
+ return 0;
+
+err_clear_trunk:
+ mv88e6xxx_port_set_trunk(chip, port, false, 0);
+err_unlock:
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err_sync, err_trunk;
+
+ mv88e6xxx_reg_lock(chip);
+ err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ err_trunk = mv88e6xxx_port_set_trunk(chip, port, false, 0);
+ mv88e6xxx_reg_unlock(chip);
+ return err_sync ? : err_trunk;
+}
+
+static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
+ int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_lag_sync_masks(ds);
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
+ int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_pvt_map(chip, sw_index, port);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
+static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
+ int port, struct dsa_lag lag)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err_sync, err_pvt;
+
+ mv88e6xxx_reg_lock(chip);
+ err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
+ err_pvt = mv88e6xxx_pvt_map(chip, sw_index, port);
+ mv88e6xxx_reg_unlock(chip);
+ return err_sync ? : err_pvt;
+}
+
+static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
+ .get_tag_protocol = mv88e6xxx_get_tag_protocol,
+ .change_tag_protocol = mv88e6xxx_change_tag_protocol,
+ .setup = mv88e6xxx_setup,
+ .teardown = mv88e6xxx_teardown,
+ .port_setup = mv88e6xxx_port_setup,
+ .port_teardown = mv88e6xxx_port_teardown,
+ .phylink_get_caps = mv88e6xxx_get_caps,
+ .phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
+ .phylink_mac_config = mv88e6xxx_mac_config,
+ .phylink_mac_an_restart = mv88e6xxx_serdes_pcs_an_restart,
+ .phylink_mac_link_down = mv88e6xxx_mac_link_down,
+ .phylink_mac_link_up = mv88e6xxx_mac_link_up,
+ .get_strings = mv88e6xxx_get_strings,
+ .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
+ .get_sset_count = mv88e6xxx_get_sset_count,
+ .port_enable = mv88e6xxx_port_enable,
+ .port_disable = mv88e6xxx_port_disable,
+ .port_max_mtu = mv88e6xxx_get_max_mtu,
+ .port_change_mtu = mv88e6xxx_change_mtu,
+ .get_mac_eee = mv88e6xxx_get_mac_eee,
+ .set_mac_eee = mv88e6xxx_set_mac_eee,
+ .get_eeprom_len = mv88e6xxx_get_eeprom_len,
+ .get_eeprom = mv88e6xxx_get_eeprom,
+ .set_eeprom = mv88e6xxx_set_eeprom,
+ .get_regs_len = mv88e6xxx_get_regs_len,
+ .get_regs = mv88e6xxx_get_regs,
+ .get_rxnfc = mv88e6xxx_get_rxnfc,
+ .set_rxnfc = mv88e6xxx_set_rxnfc,
+ .set_ageing_time = mv88e6xxx_set_ageing_time,
+ .port_bridge_join = mv88e6xxx_port_bridge_join,
+ .port_bridge_leave = mv88e6xxx_port_bridge_leave,
+ .port_pre_bridge_flags = mv88e6xxx_port_pre_bridge_flags,
+ .port_bridge_flags = mv88e6xxx_port_bridge_flags,
+ .port_stp_state_set = mv88e6xxx_port_stp_state_set,
+ .port_mst_state_set = mv88e6xxx_port_mst_state_set,
+ .port_fast_age = mv88e6xxx_port_fast_age,
+ .port_vlan_fast_age = mv88e6xxx_port_vlan_fast_age,
+ .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
+ .port_vlan_add = mv88e6xxx_port_vlan_add,
+ .port_vlan_del = mv88e6xxx_port_vlan_del,
+ .vlan_msti_set = mv88e6xxx_vlan_msti_set,
+ .port_fdb_add = mv88e6xxx_port_fdb_add,
+ .port_fdb_del = mv88e6xxx_port_fdb_del,
+ .port_fdb_dump = mv88e6xxx_port_fdb_dump,
+ .port_mdb_add = mv88e6xxx_port_mdb_add,
+ .port_mdb_del = mv88e6xxx_port_mdb_del,
+ .port_mirror_add = mv88e6xxx_port_mirror_add,
+ .port_mirror_del = mv88e6xxx_port_mirror_del,
+ .crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join,
+ .crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave,
+ .port_hwtstamp_set = mv88e6xxx_port_hwtstamp_set,
+ .port_hwtstamp_get = mv88e6xxx_port_hwtstamp_get,
+ .port_txtstamp = mv88e6xxx_port_txtstamp,
+ .port_rxtstamp = mv88e6xxx_port_rxtstamp,
+ .get_ts_info = mv88e6xxx_get_ts_info,
+ .devlink_param_get = mv88e6xxx_devlink_param_get,
+ .devlink_param_set = mv88e6xxx_devlink_param_set,
+ .devlink_info_get = mv88e6xxx_devlink_info_get,
+ .port_lag_change = mv88e6xxx_port_lag_change,
+ .port_lag_join = mv88e6xxx_port_lag_join,
+ .port_lag_leave = mv88e6xxx_port_lag_leave,
+ .crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
+ .crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
+ .crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
+};
+
+static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
+{
+ struct device *dev = chip->dev;
+ struct dsa_switch *ds;
+
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ ds->dev = dev;
+ ds->num_ports = mv88e6xxx_num_ports(chip);
+ ds->priv = chip;
+ ds->dev = dev;
+ ds->ops = &mv88e6xxx_switch_ops;
+ ds->ageing_time_min = chip->info->age_time_coeff;
+ ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
+
+ /* Some chips support up to 32, but that requires enabling the
+ * 5-bit port mode, which we do not support. 640k^W16 ought to
+ * be enough for anyone.
+ */
+ ds->num_lag_ids = mv88e6xxx_has_lag(chip) ? 16 : 0;
+
+ dev_set_drvdata(dev, ds);
+
+ return dsa_register_switch(ds);
+}
+
+static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip)
+{
+ dsa_unregister_switch(chip->ds);
+}
+
+static const void *pdata_device_get_match_data(struct device *dev)
+{
+ const struct of_device_id *matches = dev->driver->of_match_table;
+ const struct dsa_mv88e6xxx_pdata *pdata = dev->platform_data;
+
+ for (; matches->name[0] || matches->type[0] || matches->compatible[0];
+ matches++) {
+ if (!strcmp(pdata->compatible, matches->compatible))
+ return matches->data;
+ }
+ return NULL;
+}
+
+/* There is no suspend to RAM support at DSA level yet, the switch configuration
+ * would be lost after a power cycle so prevent it to be suspended.
+ */
+static int __maybe_unused mv88e6xxx_suspend(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static int __maybe_unused mv88e6xxx_resume(struct device *dev)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mv88e6xxx_pm_ops, mv88e6xxx_suspend, mv88e6xxx_resume);
+
+static int mv88e6xxx_probe(struct mdio_device *mdiodev)
+{
+ struct dsa_mv88e6xxx_pdata *pdata = mdiodev->dev.platform_data;
+ const struct mv88e6xxx_info *compat_info = NULL;
+ struct device *dev = &mdiodev->dev;
+ struct device_node *np = dev->of_node;
+ struct mv88e6xxx_chip *chip;
+ int port;
+ int err;
+
+ if (!np && !pdata)
+ return -EINVAL;
+
+ if (np)
+ compat_info = of_device_get_match_data(dev);
+
+ if (pdata) {
+ compat_info = pdata_device_get_match_data(dev);
+
+ if (!pdata->netdev)
+ return -EINVAL;
+
+ for (port = 0; port < DSA_MAX_PORTS; port++) {
+ if (!(pdata->enabled_ports & (1 << port)))
+ continue;
+ if (strcmp(pdata->cd.port_names[port], "cpu"))
+ continue;
+ pdata->cd.netdev[port] = &pdata->netdev->dev;
+ break;
+ }
+ }
+
+ if (!compat_info)
+ return -EINVAL;
+
+ chip = mv88e6xxx_alloc_chip(dev);
+ if (!chip) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ chip->info = compat_info;
+
+ chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(chip->reset)) {
+ err = PTR_ERR(chip->reset);
+ goto out;
+ }
+ if (chip->reset)
+ usleep_range(10000, 20000);
+
+ /* Detect if the device is configured in single chip addressing mode,
+ * otherwise continue with address specific smi init/detection.
+ */
+ err = mv88e6xxx_single_chip_detect(chip, mdiodev);
+ if (err) {
+ err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_detect(chip);
+ if (err)
+ goto out;
+ }
+
+ if (chip->info->edsa_support == MV88E6XXX_EDSA_SUPPORTED)
+ chip->tag_protocol = DSA_TAG_PROTO_EDSA;
+ else
+ chip->tag_protocol = DSA_TAG_PROTO_DSA;
+
+ mv88e6xxx_phy_init(chip);
+
+ if (chip->info->ops->get_eeprom) {
+ if (np)
+ of_property_read_u32(np, "eeprom-length",
+ &chip->eeprom_len);
+ else
+ chip->eeprom_len = pdata->eeprom_len;
+ }
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_switch_reset(chip);
+ mv88e6xxx_reg_unlock(chip);
+ if (err)
+ goto out;
+
+ if (np) {
+ chip->irq = of_irq_get(np, 0);
+ if (chip->irq == -EPROBE_DEFER) {
+ err = chip->irq;
+ goto out;
+ }
+ }
+
+ if (pdata)
+ chip->irq = pdata->irq;
+
+ /* Has to be performed before the MDIO bus is created, because
+ * the PHYs will link their interrupts to these interrupt
+ * controllers
+ */
+ mv88e6xxx_reg_lock(chip);
+ if (chip->irq > 0)
+ err = mv88e6xxx_g1_irq_setup(chip);
+ else
+ err = mv88e6xxx_irq_poll_setup(chip);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ goto out;
+
+ if (chip->info->g2_irqs > 0) {
+ err = mv88e6xxx_g2_irq_setup(chip);
+ if (err)
+ goto out_g1_irq;
+ }
+
+ err = mv88e6xxx_g1_atu_prob_irq_setup(chip);
+ if (err)
+ goto out_g2_irq;
+
+ err = mv88e6xxx_g1_vtu_prob_irq_setup(chip);
+ if (err)
+ goto out_g1_atu_prob_irq;
+
+ err = mv88e6xxx_mdios_register(chip, np);
+ if (err)
+ goto out_g1_vtu_prob_irq;
+
+ err = mv88e6xxx_register_switch(chip);
+ if (err)
+ goto out_mdio;
+
+ return 0;
+
+out_mdio:
+ mv88e6xxx_mdios_unregister(chip);
+out_g1_vtu_prob_irq:
+ mv88e6xxx_g1_vtu_prob_irq_free(chip);
+out_g1_atu_prob_irq:
+ mv88e6xxx_g1_atu_prob_irq_free(chip);
+out_g2_irq:
+ if (chip->info->g2_irqs > 0)
+ mv88e6xxx_g2_irq_free(chip);
+out_g1_irq:
+ if (chip->irq > 0)
+ mv88e6xxx_g1_irq_free(chip);
+ else
+ mv88e6xxx_irq_poll_free(chip);
+out:
+ if (pdata)
+ dev_put(pdata->netdev);
+
+ return err;
+}
+
+static void mv88e6xxx_remove(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+ struct mv88e6xxx_chip *chip;
+
+ if (!ds)
+ return;
+
+ chip = ds->priv;
+
+ if (chip->info->ptp_support) {
+ mv88e6xxx_hwtstamp_free(chip);
+ mv88e6xxx_ptp_free(chip);
+ }
+
+ mv88e6xxx_phy_destroy(chip);
+ mv88e6xxx_unregister_switch(chip);
+ mv88e6xxx_mdios_unregister(chip);
+
+ mv88e6xxx_g1_vtu_prob_irq_free(chip);
+ mv88e6xxx_g1_atu_prob_irq_free(chip);
+
+ if (chip->info->g2_irqs > 0)
+ mv88e6xxx_g2_irq_free(chip);
+
+ if (chip->irq > 0)
+ mv88e6xxx_g1_irq_free(chip);
+ else
+ mv88e6xxx_irq_poll_free(chip);
+}
+
+static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static const struct of_device_id mv88e6xxx_of_match[] = {
+ {
+ .compatible = "marvell,mv88e6085",
+ .data = &mv88e6xxx_table[MV88E6085],
+ },
+ {
+ .compatible = "marvell,mv88e6190",
+ .data = &mv88e6xxx_table[MV88E6190],
+ },
+ {
+ .compatible = "marvell,mv88e6250",
+ .data = &mv88e6xxx_table[MV88E6250],
+ },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match);
+
+static struct mdio_driver mv88e6xxx_driver = {
+ .probe = mv88e6xxx_probe,
+ .remove = mv88e6xxx_remove,
+ .shutdown = mv88e6xxx_shutdown,
+ .mdiodrv.driver = {
+ .name = "mv88e6085",
+ .of_match_table = mv88e6xxx_of_match,
+ .pm = &mv88e6xxx_pm_ops,
+ },
+};
+
+mdio_module_driver(mv88e6xxx_driver);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
new file mode 100644
index 000000000..e693154cf
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -0,0 +1,807 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Marvell 88E6xxx Ethernet switch single-chip definition
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ */
+
+#ifndef _MV88E6XXX_CHIP_H
+#define _MV88E6XXX_CHIP_H
+
+#include <linux/idr.h>
+#include <linux/if_vlan.h>
+#include <linux/irq.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kthread.h>
+#include <linux/phy.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <net/dsa.h>
+
+#define EDSA_HLEN 8
+#define MV88E6XXX_N_FID 4096
+#define MV88E6XXX_N_SID 64
+
+#define MV88E6XXX_FID_STANDALONE 0
+#define MV88E6XXX_FID_BRIDGED 1
+
+/* PVT limits for 4-bit port and 5-bit switch */
+#define MV88E6XXX_MAX_PVT_SWITCHES 32
+#define MV88E6XXX_MAX_PVT_PORTS 16
+#define MV88E6XXX_MAX_PVT_ENTRIES \
+ (MV88E6XXX_MAX_PVT_SWITCHES * MV88E6XXX_MAX_PVT_PORTS)
+
+#define MV88E6XXX_MAX_GPIO 16
+
+enum mv88e6xxx_egress_mode {
+ MV88E6XXX_EGRESS_MODE_UNMODIFIED,
+ MV88E6XXX_EGRESS_MODE_UNTAGGED,
+ MV88E6XXX_EGRESS_MODE_TAGGED,
+ MV88E6XXX_EGRESS_MODE_ETHERTYPE,
+};
+
+enum mv88e6xxx_egress_direction {
+ MV88E6XXX_EGRESS_DIR_INGRESS,
+ MV88E6XXX_EGRESS_DIR_EGRESS,
+};
+
+enum mv88e6xxx_frame_mode {
+ MV88E6XXX_FRAME_MODE_NORMAL,
+ MV88E6XXX_FRAME_MODE_DSA,
+ MV88E6XXX_FRAME_MODE_PROVIDER,
+ MV88E6XXX_FRAME_MODE_ETHERTYPE,
+};
+
+/* List of supported models */
+enum mv88e6xxx_model {
+ MV88E6085,
+ MV88E6095,
+ MV88E6097,
+ MV88E6123,
+ MV88E6131,
+ MV88E6141,
+ MV88E6161,
+ MV88E6165,
+ MV88E6171,
+ MV88E6172,
+ MV88E6175,
+ MV88E6176,
+ MV88E6185,
+ MV88E6190,
+ MV88E6190X,
+ MV88E6191,
+ MV88E6191X,
+ MV88E6193X,
+ MV88E6220,
+ MV88E6240,
+ MV88E6250,
+ MV88E6290,
+ MV88E6320,
+ MV88E6321,
+ MV88E6341,
+ MV88E6350,
+ MV88E6351,
+ MV88E6352,
+ MV88E6390,
+ MV88E6390X,
+ MV88E6393X,
+};
+
+enum mv88e6xxx_family {
+ MV88E6XXX_FAMILY_NONE,
+ MV88E6XXX_FAMILY_6065, /* 6031 6035 6061 6065 */
+ MV88E6XXX_FAMILY_6095, /* 6092 6095 */
+ MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */
+ MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */
+ MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */
+ MV88E6XXX_FAMILY_6250, /* 6220 6250 */
+ MV88E6XXX_FAMILY_6320, /* 6320 6321 */
+ MV88E6XXX_FAMILY_6341, /* 6141 6341 */
+ MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
+ MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */
+ MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */
+ MV88E6XXX_FAMILY_6393, /* 6191X 6193X 6393X */
+};
+
+/**
+ * enum mv88e6xxx_edsa_support - Ethertype DSA tag support level
+ * @MV88E6XXX_EDSA_UNSUPPORTED: Device has no support for EDSA tags
+ * @MV88E6XXX_EDSA_UNDOCUMENTED: Documentation indicates that
+ * egressing FORWARD frames with an EDSA
+ * tag is reserved for future use, but
+ * empirical data shows that this mode
+ * is supported.
+ * @MV88E6XXX_EDSA_SUPPORTED: EDSA tags are fully supported.
+ */
+enum mv88e6xxx_edsa_support {
+ MV88E6XXX_EDSA_UNSUPPORTED = 0,
+ MV88E6XXX_EDSA_UNDOCUMENTED,
+ MV88E6XXX_EDSA_SUPPORTED,
+};
+
+struct mv88e6xxx_ops;
+
+struct mv88e6xxx_info {
+ enum mv88e6xxx_family family;
+ u16 prod_num;
+ const char *name;
+ unsigned int num_databases;
+ unsigned int num_macs;
+ unsigned int num_ports;
+ unsigned int num_internal_phys;
+ unsigned int num_gpio;
+ unsigned int max_vid;
+ unsigned int max_sid;
+ unsigned int port_base_addr;
+ unsigned int phy_base_addr;
+ unsigned int global1_addr;
+ unsigned int global2_addr;
+ unsigned int age_time_coeff;
+ unsigned int g1_irqs;
+ unsigned int g2_irqs;
+ bool pvt;
+
+ /* Mark certain ports as invalid. This is required for example for the
+ * MV88E6220 (which is in general a MV88E6250 with 7 ports) but the
+ * ports 2-4 are not routet to pins.
+ */
+ unsigned int invalid_port_mask;
+ /* Multi-chip Addressing Mode.
+ * Some chips respond to only 2 registers of its own SMI device address
+ * when it is non-zero, and use indirect access to internal registers.
+ */
+ bool multi_chip;
+ /* Dual-chip Addressing Mode
+ * Some chips respond to only half of the 32 SMI addresses,
+ * allowing two to coexist on the same SMI interface.
+ */
+ bool dual_chip;
+
+ enum mv88e6xxx_edsa_support edsa_support;
+
+ /* Mask for FromPort and ToPort value of PortVec used in ATU Move
+ * operation. 0 means that the ATU Move operation is not supported.
+ */
+ u8 atu_move_port_mask;
+ const struct mv88e6xxx_ops *ops;
+
+ /* Supports PTP */
+ bool ptp_support;
+};
+
+struct mv88e6xxx_atu_entry {
+ u8 state;
+ bool trunk;
+ u16 portvec;
+ u8 mac[ETH_ALEN];
+};
+
+struct mv88e6xxx_vtu_entry {
+ u16 vid;
+ u16 fid;
+ u8 sid;
+ bool valid;
+ bool policy;
+ u8 member[DSA_MAX_PORTS];
+ u8 state[DSA_MAX_PORTS]; /* Older silicon has no STU */
+};
+
+struct mv88e6xxx_stu_entry {
+ u8 sid;
+ bool valid;
+ u8 state[DSA_MAX_PORTS];
+};
+
+struct mv88e6xxx_bus_ops;
+struct mv88e6xxx_irq_ops;
+struct mv88e6xxx_gpio_ops;
+struct mv88e6xxx_avb_ops;
+struct mv88e6xxx_ptp_ops;
+
+struct mv88e6xxx_irq {
+ u16 masked;
+ struct irq_chip chip;
+ struct irq_domain *domain;
+ int nirqs;
+};
+
+/* state flags for mv88e6xxx_port_hwtstamp::state */
+enum {
+ MV88E6XXX_HWTSTAMP_ENABLED,
+ MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS,
+};
+
+struct mv88e6xxx_port_hwtstamp {
+ /* Port index */
+ int port_id;
+
+ /* Timestamping state */
+ unsigned long state;
+
+ /* Resources for receive timestamping */
+ struct sk_buff_head rx_queue;
+ struct sk_buff_head rx_queue2;
+
+ /* Resources for transmit timestamping */
+ unsigned long tx_tstamp_start;
+ struct sk_buff *tx_skb;
+ u16 tx_seq_id;
+
+ /* Current timestamp configuration */
+ struct hwtstamp_config tstamp_config;
+};
+
+enum mv88e6xxx_policy_mapping {
+ MV88E6XXX_POLICY_MAPPING_DA,
+ MV88E6XXX_POLICY_MAPPING_SA,
+ MV88E6XXX_POLICY_MAPPING_VTU,
+ MV88E6XXX_POLICY_MAPPING_ETYPE,
+ MV88E6XXX_POLICY_MAPPING_PPPOE,
+ MV88E6XXX_POLICY_MAPPING_VBAS,
+ MV88E6XXX_POLICY_MAPPING_OPT82,
+ MV88E6XXX_POLICY_MAPPING_UDP,
+};
+
+enum mv88e6xxx_policy_action {
+ MV88E6XXX_POLICY_ACTION_NORMAL,
+ MV88E6XXX_POLICY_ACTION_MIRROR,
+ MV88E6XXX_POLICY_ACTION_TRAP,
+ MV88E6XXX_POLICY_ACTION_DISCARD,
+};
+
+struct mv88e6xxx_policy {
+ enum mv88e6xxx_policy_mapping mapping;
+ enum mv88e6xxx_policy_action action;
+ struct ethtool_rx_flow_spec fs;
+ u8 addr[ETH_ALEN];
+ int port;
+ u16 vid;
+};
+
+struct mv88e6xxx_vlan {
+ u16 vid;
+ bool valid;
+};
+
+struct mv88e6xxx_port {
+ struct mv88e6xxx_chip *chip;
+ int port;
+ struct mv88e6xxx_vlan bridge_pvid;
+ u64 serdes_stats[2];
+ u64 atu_member_violation;
+ u64 atu_miss_violation;
+ u64 atu_full_violation;
+ u64 vtu_member_violation;
+ u64 vtu_miss_violation;
+ phy_interface_t interface;
+ u8 cmode;
+ bool mirror_ingress;
+ bool mirror_egress;
+ unsigned int serdes_irq;
+ char serdes_irq_name[64];
+ struct devlink_region *region;
+};
+
+enum mv88e6xxx_region_id {
+ MV88E6XXX_REGION_GLOBAL1 = 0,
+ MV88E6XXX_REGION_GLOBAL2,
+ MV88E6XXX_REGION_ATU,
+ MV88E6XXX_REGION_VTU,
+ MV88E6XXX_REGION_STU,
+ MV88E6XXX_REGION_PVT,
+
+ _MV88E6XXX_REGION_MAX,
+};
+
+struct mv88e6xxx_region_priv {
+ enum mv88e6xxx_region_id id;
+};
+
+struct mv88e6xxx_mst {
+ struct list_head node;
+
+ refcount_t refcnt;
+ struct net_device *br;
+ u16 msti;
+
+ struct mv88e6xxx_stu_entry stu;
+};
+
+struct mv88e6xxx_chip {
+ const struct mv88e6xxx_info *info;
+
+ /* Currently configured tagging protocol */
+ enum dsa_tag_protocol tag_protocol;
+
+ /* The dsa_switch this private structure is related to */
+ struct dsa_switch *ds;
+
+ /* The device this structure is associated to */
+ struct device *dev;
+
+ /* This mutex protects the access to the switch registers */
+ struct mutex reg_lock;
+
+ /* The MII bus and the address on the bus that is used to
+ * communication with the switch
+ */
+ const struct mv88e6xxx_bus_ops *smi_ops;
+ struct mii_bus *bus;
+ int sw_addr;
+
+ /* Handles automatic disabling and re-enabling of the PHY
+ * polling unit.
+ */
+ const struct mv88e6xxx_bus_ops *phy_ops;
+ struct mutex ppu_mutex;
+ int ppu_disabled;
+ struct work_struct ppu_work;
+ struct timer_list ppu_timer;
+
+ /* This mutex serialises access to the statistics unit.
+ * Hold this mutex over snapshot + dump sequences.
+ */
+ struct mutex stats_mutex;
+
+ /* A switch may have a GPIO line tied to its reset pin. Parse
+ * this from the device tree, and use it before performing
+ * switch soft reset.
+ */
+ struct gpio_desc *reset;
+
+ /* set to size of eeprom if supported by the switch */
+ u32 eeprom_len;
+
+ /* List of mdio busses */
+ struct list_head mdios;
+
+ /* Policy Control List IDs and rules */
+ struct idr policies;
+
+ /* There can be two interrupt controllers, which are chained
+ * off a GPIO as interrupt source
+ */
+ struct mv88e6xxx_irq g1_irq;
+ struct mv88e6xxx_irq g2_irq;
+ int irq;
+ char irq_name[64];
+ int device_irq;
+ char device_irq_name[64];
+ int watchdog_irq;
+ char watchdog_irq_name[64];
+
+ int atu_prob_irq;
+ char atu_prob_irq_name[64];
+ int vtu_prob_irq;
+ char vtu_prob_irq_name[64];
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work irq_poll_work;
+
+ /* GPIO resources */
+ u8 gpio_data[2];
+
+ /* This cyclecounter abstracts the switch PTP time.
+ * reg_lock must be held for any operation that read()s.
+ */
+ struct cyclecounter tstamp_cc;
+ struct timecounter tstamp_tc;
+ struct delayed_work overflow_work;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct delayed_work tai_event_work;
+ struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO];
+ u16 trig_config;
+ u16 evcap_config;
+ u16 enable_count;
+
+ /* Current ingress and egress monitor ports */
+ int egress_dest_port;
+ int ingress_dest_port;
+
+ /* Per-port timestamping resources. */
+ struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS];
+
+ /* Array of port structures. */
+ struct mv88e6xxx_port ports[DSA_MAX_PORTS];
+
+ /* devlink regions */
+ struct devlink_region *regions[_MV88E6XXX_REGION_MAX];
+
+ /* Bridge MST to SID mappings */
+ struct list_head msts;
+};
+
+struct mv88e6xxx_bus_ops {
+ int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
+ int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
+ int (*init)(struct mv88e6xxx_chip *chip);
+};
+
+struct mv88e6xxx_mdio_bus {
+ struct mii_bus *bus;
+ struct mv88e6xxx_chip *chip;
+ struct list_head list;
+ bool external;
+};
+
+struct mv88e6xxx_ops {
+ /* Switch Setup Errata, called early in the switch setup to
+ * allow any errata actions to be performed
+ */
+ int (*setup_errata)(struct mv88e6xxx_chip *chip);
+
+ int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
+ int (*ip_pri_map)(struct mv88e6xxx_chip *chip);
+
+ /* Ingress Rate Limit unit (IRL) operations */
+ int (*irl_init_all)(struct mv88e6xxx_chip *chip, int port);
+
+ int (*get_eeprom)(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+ int (*set_eeprom)(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
+ int (*set_switch_mac)(struct mv88e6xxx_chip *chip, u8 *addr);
+
+ int (*phy_read)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+ int (*phy_write)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val);
+
+ /* Priority Override Table operations */
+ int (*pot_clear)(struct mv88e6xxx_chip *chip);
+
+ /* PHY Polling Unit (PPU) operations */
+ int (*ppu_enable)(struct mv88e6xxx_chip *chip);
+ int (*ppu_disable)(struct mv88e6xxx_chip *chip);
+
+ /* Switch Software Reset */
+ int (*reset)(struct mv88e6xxx_chip *chip);
+
+ /* RGMII Receive/Transmit Timing Control
+ * Add delay on PHY_INTERFACE_MODE_RGMII_*ID, no delay otherwise.
+ */
+ int (*port_set_rgmii_delay)(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+
+#define LINK_FORCED_DOWN 0
+#define LINK_FORCED_UP 1
+#define LINK_UNFORCED -2
+
+ /* Port's MAC link state
+ * Use LINK_FORCED_UP or LINK_FORCED_DOWN to force link up or down,
+ * or LINK_UNFORCED for normal link detection.
+ */
+ int (*port_set_link)(struct mv88e6xxx_chip *chip, int port, int link);
+
+ /* Synchronise the port link state with that of the SERDES
+ */
+ int (*port_sync_link)(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
+
+#define PAUSE_ON 1
+#define PAUSE_OFF 0
+
+ /* Enable/disable sending Pause */
+ int (*port_set_pause)(struct mv88e6xxx_chip *chip, int port,
+ int pause);
+
+#define SPEED_UNFORCED -2
+#define DUPLEX_UNFORCED -2
+
+ /* Port's MAC speed (in Mbps) and MAC duplex mode
+ *
+ * Depending on the chip, 10, 100, 200, 1000, 2500, 10000 are valid.
+ * Use SPEED_UNFORCED for normal detection.
+ *
+ * Use DUPLEX_HALF or DUPLEX_FULL to force half or full duplex,
+ * or DUPLEX_UNFORCED for normal duplex detection.
+ */
+ int (*port_set_speed_duplex)(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+
+ /* What interface mode should be used for maximum speed? */
+ phy_interface_t (*port_max_speed_mode)(int port);
+
+ int (*port_tag_remap)(struct mv88e6xxx_chip *chip, int port);
+
+ int (*port_set_policy)(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_policy_mapping mapping,
+ enum mv88e6xxx_policy_action action);
+
+ int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode mode);
+ int (*port_set_ucast_flood)(struct mv88e6xxx_chip *chip, int port,
+ bool unicast);
+ int (*port_set_mcast_flood)(struct mv88e6xxx_chip *chip, int port,
+ bool multicast);
+ int (*port_set_ether_type)(struct mv88e6xxx_chip *chip, int port,
+ u16 etype);
+ int (*port_set_jumbo_size)(struct mv88e6xxx_chip *chip, int port,
+ size_t size);
+
+ int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_pause_limit)(struct mv88e6xxx_chip *chip, int port, u8 in,
+ u8 out);
+ int (*port_disable_learn_limit)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_disable_pri_override)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_setup_message_port)(struct mv88e6xxx_chip *chip, int port);
+
+ /* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc.
+ * Some chips allow this to be configured on specific ports.
+ */
+ int (*port_set_cmode)(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+ int (*port_get_cmode)(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+
+ /* Some devices have a per port register indicating what is
+ * the upstream port this port should forward to.
+ */
+ int (*port_set_upstream_port)(struct mv88e6xxx_chip *chip, int port,
+ int upstream_port);
+
+ /* Snapshot the statistics for a port. The statistics can then
+ * be read back a leisure but still with a consistent view.
+ */
+ int (*stats_snapshot)(struct mv88e6xxx_chip *chip, int port);
+
+ /* Set the histogram mode for statistics, when the control registers
+ * are separated out of the STATS_OP register.
+ */
+ int (*stats_set_histogram)(struct mv88e6xxx_chip *chip);
+
+ /* Return the number of strings describing statistics */
+ int (*stats_get_sset_count)(struct mv88e6xxx_chip *chip);
+ int (*stats_get_strings)(struct mv88e6xxx_chip *chip, uint8_t *data);
+ int (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
+ int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
+ int (*set_egress_port)(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
+
+#define MV88E6XXX_CASCADE_PORT_NONE 0xe
+#define MV88E6XXX_CASCADE_PORT_MULTIPLE 0xf
+
+ int (*set_cascade_port)(struct mv88e6xxx_chip *chip, int port);
+
+ const struct mv88e6xxx_irq_ops *watchdog_ops;
+
+ int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
+
+ /* Power on/off a SERDES interface */
+ int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool up);
+
+ /* SERDES lane mapping */
+ int (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port);
+
+ int (*serdes_pcs_get_state)(struct mv88e6xxx_chip *chip, int port,
+ int lane, struct phylink_link_state *state);
+ int (*serdes_pcs_config)(struct mv88e6xxx_chip *chip, int port,
+ int lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise);
+ int (*serdes_pcs_an_restart)(struct mv88e6xxx_chip *chip, int port,
+ int lane);
+ int (*serdes_pcs_link_up)(struct mv88e6xxx_chip *chip, int port,
+ int lane, int speed, int duplex);
+
+ /* SERDES interrupt handling */
+ unsigned int (*serdes_irq_mapping)(struct mv88e6xxx_chip *chip,
+ int port);
+ int (*serdes_irq_enable)(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool enable);
+ irqreturn_t (*serdes_irq_status)(struct mv88e6xxx_chip *chip, int port,
+ int lane);
+
+ /* Statistics from the SERDES interface */
+ int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port);
+ int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port,
+ uint8_t *data);
+ int (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
+
+ /* SERDES registers for ethtool */
+ int (*serdes_get_regs_len)(struct mv88e6xxx_chip *chip, int port);
+ void (*serdes_get_regs)(struct mv88e6xxx_chip *chip, int port,
+ void *_p);
+
+ /* SERDES SGMII/Fiber Output Amplitude */
+ int (*serdes_set_tx_amplitude)(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
+ /* Address Translation Unit operations */
+ int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
+ int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
+
+ /* VLAN Translation Unit operations */
+ int (*vtu_getnext)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+ int (*vtu_loadpurge)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+
+ /* Spanning Tree Unit operations */
+ int (*stu_getnext)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+ int (*stu_loadpurge)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+
+ /* GPIO operations */
+ const struct mv88e6xxx_gpio_ops *gpio_ops;
+
+ /* Interface to the AVB/PTP registers */
+ const struct mv88e6xxx_avb_ops *avb_ops;
+
+ /* Remote Management Unit operations */
+ int (*rmu_disable)(struct mv88e6xxx_chip *chip);
+
+ /* Precision Time Protocol operations */
+ const struct mv88e6xxx_ptp_ops *ptp_ops;
+
+ /* Phylink */
+ void (*phylink_get_caps)(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config);
+
+ /* Max Frame Size */
+ int (*set_max_frame_size)(struct mv88e6xxx_chip *chip, int mtu);
+};
+
+struct mv88e6xxx_irq_ops {
+ /* Action to be performed when the interrupt happens */
+ int (*irq_action)(struct mv88e6xxx_chip *chip, int irq);
+ /* Setup the hardware to generate the interrupt */
+ int (*irq_setup)(struct mv88e6xxx_chip *chip);
+ /* Reset the hardware to stop generating the interrupt */
+ void (*irq_free)(struct mv88e6xxx_chip *chip);
+};
+
+struct mv88e6xxx_gpio_ops {
+ /* Get/set data on GPIO pin */
+ int (*get_data)(struct mv88e6xxx_chip *chip, unsigned int pin);
+ int (*set_data)(struct mv88e6xxx_chip *chip, unsigned int pin,
+ int value);
+
+ /* get/set GPIO direction */
+ int (*get_dir)(struct mv88e6xxx_chip *chip, unsigned int pin);
+ int (*set_dir)(struct mv88e6xxx_chip *chip, unsigned int pin,
+ bool input);
+
+ /* get/set GPIO pin control */
+ int (*get_pctl)(struct mv88e6xxx_chip *chip, unsigned int pin,
+ int *func);
+ int (*set_pctl)(struct mv88e6xxx_chip *chip, unsigned int pin,
+ int func);
+};
+
+struct mv88e6xxx_avb_ops {
+ /* Access port-scoped Precision Time Protocol registers */
+ int (*port_ptp_read)(struct mv88e6xxx_chip *chip, int port, int addr,
+ u16 *data, int len);
+ int (*port_ptp_write)(struct mv88e6xxx_chip *chip, int port, int addr,
+ u16 data);
+
+ /* Access global Precision Time Protocol registers */
+ int (*ptp_read)(struct mv88e6xxx_chip *chip, int addr, u16 *data,
+ int len);
+ int (*ptp_write)(struct mv88e6xxx_chip *chip, int addr, u16 data);
+
+ /* Access global Time Application Interface registers */
+ int (*tai_read)(struct mv88e6xxx_chip *chip, int addr, u16 *data,
+ int len);
+ int (*tai_write)(struct mv88e6xxx_chip *chip, int addr, u16 data);
+};
+
+struct mv88e6xxx_ptp_ops {
+ u64 (*clock_read)(const struct cyclecounter *cc);
+ int (*ptp_enable)(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on);
+ int (*ptp_verify)(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan);
+ void (*event_work)(struct work_struct *ugly);
+ int (*port_enable)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_disable)(struct mv88e6xxx_chip *chip, int port);
+ int (*global_enable)(struct mv88e6xxx_chip *chip);
+ int (*global_disable)(struct mv88e6xxx_chip *chip);
+ int n_ext_ts;
+ int arr0_sts_reg;
+ int arr1_sts_reg;
+ int dep_sts_reg;
+ u32 rx_filters;
+ u32 cc_shift;
+ u32 cc_mult;
+ u32 cc_mult_num;
+ u32 cc_mult_dem;
+};
+
+#define STATS_TYPE_PORT BIT(0)
+#define STATS_TYPE_BANK0 BIT(1)
+#define STATS_TYPE_BANK1 BIT(2)
+
+struct mv88e6xxx_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ size_t size;
+ int reg;
+ int type;
+};
+
+static inline bool mv88e6xxx_has_stu(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_sid > 0 &&
+ chip->info->ops->stu_loadpurge &&
+ chip->info->ops->stu_getnext;
+}
+
+static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->pvt;
+}
+
+static inline bool mv88e6xxx_has_lag(struct mv88e6xxx_chip *chip)
+{
+ return !!chip->info->global2_addr;
+}
+
+static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_databases;
+}
+
+static inline unsigned int mv88e6xxx_num_macs(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_macs;
+}
+
+static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_ports;
+}
+
+static inline unsigned int mv88e6xxx_max_vid(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_vid;
+}
+
+static inline unsigned int mv88e6xxx_max_sid(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_sid;
+}
+
+static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
+{
+ return GENMASK((s32)mv88e6xxx_num_ports(chip) - 1, 0);
+}
+
+static inline unsigned int mv88e6xxx_num_gpio(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_gpio;
+}
+
+static inline bool mv88e6xxx_is_invalid_port(struct mv88e6xxx_chip *chip, int port)
+{
+ return (chip->info->invalid_port_mask & BIT(port)) != 0;
+}
+
+int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
+int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
+int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 mask, u16 val);
+int mv88e6xxx_wait_bit(struct mv88e6xxx_chip *chip, int addr, int reg,
+ int bit, int val);
+struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip);
+
+static inline void mv88e6xxx_reg_lock(struct mv88e6xxx_chip *chip)
+{
+ mutex_lock(&chip->reg_lock);
+}
+
+static inline void mv88e6xxx_reg_unlock(struct mv88e6xxx_chip *chip)
+{
+ mutex_unlock(&chip->reg_lock);
+}
+
+int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *bitmap);
+
+#endif /* _MV88E6XXX_CHIP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
new file mode 100644
index 000000000..1266eabee
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <net/dsa.h>
+
+#include "chip.h"
+#include "devlink.h"
+#include "global1.h"
+#include "global2.h"
+#include "port.h"
+
+static int mv88e6xxx_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
+{
+ if (chip->info->ops->atu_get_hash)
+ return chip->info->ops->atu_get_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
+static int mv88e6xxx_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
+{
+ if (chip->info->ops->atu_set_hash)
+ return chip->info->ops->atu_set_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
+enum mv88e6xxx_devlink_param_id {
+ MV88E6XXX_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+};
+
+int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_get_hash(chip, &ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_set_hash(chip, ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static const struct devlink_param mv88e6xxx_devlink_params[] = {
+ DSA_DEVLINK_PARAM_DRIVER(MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+ "ATU_hash", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+};
+
+int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds)
+{
+ return dsa_devlink_params_register(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds)
+{
+ dsa_devlink_params_unregister(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+enum mv88e6xxx_devlink_resource_id {
+ MV88E6XXX_RESOURCE_ID_ATU,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+};
+
+static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
+ u16 bin)
+{
+ u16 occupancy = 0;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_g2_atu_stats_set(chip, MV88E6XXX_G2_ATU_STATS_MODE_ALL,
+ bin);
+ if (err) {
+ dev_err(chip->dev, "failed to set ATU stats kind/bin\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g1_atu_get_next(chip, 0);
+ if (err) {
+ dev_err(chip->dev, "failed to perform ATU get next\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g2_atu_stats_get(chip, &occupancy);
+ if (err) {
+ dev_err(chip->dev, "failed to get ATU stats\n");
+ goto unlock;
+ }
+
+ occupancy &= MV88E6XXX_G2_ATU_STATS_MASK;
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return occupancy;
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_0_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_0);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_1_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_1);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_2_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_2);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_3_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_3);
+}
+
+static u64 mv88e6xxx_devlink_atu_get(void *priv)
+{
+ return mv88e6xxx_devlink_atu_bin_0_get(priv) +
+ mv88e6xxx_devlink_atu_bin_1_get(priv) +
+ mv88e6xxx_devlink_atu_bin_2_get(priv) +
+ mv88e6xxx_devlink_atu_bin_3_get(priv);
+}
+
+int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip),
+ mv88e6xxx_num_macs(chip),
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU",
+ mv88e6xxx_num_macs(chip),
+ MV88E6XXX_RESOURCE_ID_ATU,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip) / 4,
+ mv88e6xxx_num_macs(chip) / 4,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_0",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_1",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_2",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_3",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ mv88e6xxx_devlink_atu_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ mv88e6xxx_devlink_atu_bin_0_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ mv88e6xxx_devlink_atu_bin_1_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ mv88e6xxx_devlink_atu_bin_2_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ mv88e6xxx_devlink_atu_bin_3_get,
+ chip);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+
+static int mv88e6xxx_region_global_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct mv88e6xxx_region_priv *region_priv = ops->priv;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u16 *registers;
+ int i, err;
+
+ registers = kmalloc_array(32, sizeof(u16), GFP_KERNEL);
+ if (!registers)
+ return -ENOMEM;
+
+ mv88e6xxx_reg_lock(chip);
+ for (i = 0; i < 32; i++) {
+ switch (region_priv->id) {
+ case MV88E6XXX_REGION_GLOBAL1:
+ err = mv88e6xxx_g1_read(chip, i, &registers[i]);
+ break;
+ case MV88E6XXX_REGION_GLOBAL2:
+ err = mv88e6xxx_g2_read(chip, i, &registers[i]);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ if (err) {
+ kfree(registers);
+ goto out;
+ }
+ }
+ *data = (u8 *)registers;
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+/* The ATU entry varies between mv88e6xxx chipset generations. Define
+ * a generic format which covers all the current and hopefully future
+ * mv88e6xxx generations
+ */
+
+struct mv88e6xxx_devlink_atu_entry {
+ /* The FID is scattered over multiple registers. */
+ u16 fid;
+ u16 atu_op;
+ u16 atu_data;
+ u16 atu_01;
+ u16 atu_23;
+ u16 atu_45;
+};
+
+static int mv88e6xxx_region_atu_snapshot_fid(struct mv88e6xxx_chip *chip,
+ int fid,
+ struct mv88e6xxx_devlink_atu_entry *table,
+ int *count)
+{
+ u16 atu_op, atu_data, atu_01, atu_23, atu_45;
+ struct mv88e6xxx_atu_entry addr;
+ int err;
+
+ addr.state = 0;
+ eth_broadcast_addr(addr.mac);
+
+ do {
+ err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
+ if (err)
+ return err;
+
+ if (!addr.state)
+ break;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &atu_op);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_DATA, &atu_data);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC01, &atu_01);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC23, &atu_23);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC45, &atu_45);
+ if (err)
+ return err;
+
+ table[*count].fid = fid;
+ table[*count].atu_op = atu_op;
+ table[*count].atu_data = atu_data;
+ table[*count].atu_01 = atu_01;
+ table[*count].atu_23 = atu_23;
+ table[*count].atu_45 = atu_45;
+ (*count)++;
+ } while (!is_broadcast_ether_addr(addr.mac));
+
+ return 0;
+}
+
+static int mv88e6xxx_region_atu_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
+ struct mv88e6xxx_devlink_atu_entry *table;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int fid = -1, count, err;
+
+ table = kmalloc_array(mv88e6xxx_num_databases(chip),
+ sizeof(struct mv88e6xxx_devlink_atu_entry),
+ GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ memset(table, 0, mv88e6xxx_num_databases(chip) *
+ sizeof(struct mv88e6xxx_devlink_atu_entry));
+
+ count = 0;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_fid_map(chip, fid_bitmap);
+ if (err) {
+ kfree(table);
+ goto out;
+ }
+
+ while (1) {
+ fid = find_next_bit(fid_bitmap, MV88E6XXX_N_FID, fid + 1);
+ if (fid == MV88E6XXX_N_FID)
+ break;
+
+ err = mv88e6xxx_region_atu_snapshot_fid(chip, fid, table,
+ &count);
+ if (err) {
+ kfree(table);
+ goto out;
+ }
+ }
+ *data = (u8 *)table;
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+/**
+ * struct mv88e6xxx_devlink_vtu_entry - Devlink VTU entry
+ * @fid: Global1/2: FID and VLAN policy.
+ * @sid: Global1/3: SID, unknown filters and learning.
+ * @op: Global1/5: FID (old chipsets).
+ * @vid: Global1/6: VID, valid, and page.
+ * @data: Global1/7-9: Membership data and priority override.
+ * @resvd: Reserved. Also happens to align the size to 16B.
+ *
+ * The VTU entry format varies between chipset generations, the
+ * descriptions above represent the superset of all possible
+ * information, not all fields are valid on all devices. Since this is
+ * a low-level debug interface, copy all data verbatim and defer
+ * parsing to the consumer.
+ */
+struct mv88e6xxx_devlink_vtu_entry {
+ u16 fid;
+ u16 sid;
+ u16 op;
+ u16 vid;
+ u16 data[3];
+ u16 resvd;
+};
+
+static int mv88e6xxx_region_vtu_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct mv88e6xxx_devlink_vtu_entry *table, *entry;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ int err;
+
+ table = kcalloc(mv88e6xxx_max_vid(chip) + 1,
+ sizeof(struct mv88e6xxx_devlink_vtu_entry),
+ GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ entry = table;
+ vlan.vid = mv88e6xxx_max_vid(chip);
+ vlan.valid = false;
+
+ mv88e6xxx_reg_lock(chip);
+
+ do {
+ err = mv88e6xxx_g1_vtu_getnext(chip, &vlan);
+ if (err)
+ break;
+
+ if (!vlan.valid)
+ break;
+
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_FID,
+ &entry->fid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID,
+ &entry->sid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP,
+ &entry->op);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID,
+ &entry->vid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1,
+ &entry->data[0]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA2,
+ &entry->data[1]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA3,
+ &entry->data[2]);
+ if (err)
+ break;
+
+ entry++;
+ } while (vlan.vid < mv88e6xxx_max_vid(chip));
+
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err) {
+ kfree(table);
+ return err;
+ }
+
+ *data = (u8 *)table;
+ return 0;
+}
+
+/**
+ * struct mv88e6xxx_devlink_stu_entry - Devlink STU entry
+ * @sid: Global1/3: SID, unknown filters and learning.
+ * @vid: Global1/6: Valid bit.
+ * @data: Global1/7-9: Membership data and priority override.
+ * @resvd: Reserved. In case we forgot something.
+ *
+ * The STU entry format varies between chipset generations. Peridot
+ * and Amethyst packs the STU data into Global1/7-8. Older silicon
+ * spreads the information across all three VTU data registers -
+ * inheriting the layout of even older hardware that had no STU at
+ * all. Since this is a low-level debug interface, copy all data
+ * verbatim and defer parsing to the consumer.
+ */
+struct mv88e6xxx_devlink_stu_entry {
+ u16 sid;
+ u16 vid;
+ u16 data[3];
+ u16 resvd;
+};
+
+static int mv88e6xxx_region_stu_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct mv88e6xxx_devlink_stu_entry *table, *entry;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_stu_entry stu;
+ int err;
+
+ table = kcalloc(mv88e6xxx_max_sid(chip) + 1,
+ sizeof(struct mv88e6xxx_devlink_stu_entry),
+ GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ entry = table;
+ stu.sid = mv88e6xxx_max_sid(chip);
+ stu.valid = false;
+
+ mv88e6xxx_reg_lock(chip);
+
+ do {
+ err = mv88e6xxx_g1_stu_getnext(chip, &stu);
+ if (err)
+ break;
+
+ if (!stu.valid)
+ break;
+
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID,
+ &entry->sid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID,
+ &entry->vid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1,
+ &entry->data[0]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA2,
+ &entry->data[1]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA3,
+ &entry->data[2]);
+ if (err)
+ break;
+
+ entry++;
+ } while (stu.sid < mv88e6xxx_max_sid(chip));
+
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err) {
+ kfree(table);
+ return err;
+ }
+
+ *data = (u8 *)table;
+ return 0;
+}
+
+static int mv88e6xxx_region_pvt_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int dev, port, err;
+ u16 *pvt, *cur;
+
+ pvt = kcalloc(MV88E6XXX_MAX_PVT_ENTRIES, sizeof(*pvt), GFP_KERNEL);
+ if (!pvt)
+ return -ENOMEM;
+
+ mv88e6xxx_reg_lock(chip);
+
+ cur = pvt;
+ for (dev = 0; dev < MV88E6XXX_MAX_PVT_SWITCHES; dev++) {
+ for (port = 0; port < MV88E6XXX_MAX_PVT_PORTS; port++) {
+ err = mv88e6xxx_g2_pvt_read(chip, dev, port, cur);
+ if (err)
+ break;
+
+ cur++;
+ }
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err) {
+ kfree(pvt);
+ return err;
+ }
+
+ *data = (u8 *)pvt;
+ return 0;
+}
+
+static int mv88e6xxx_region_port_snapshot(struct devlink_port *devlink_port,
+ const struct devlink_port_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_port_to_ds(devlink_port);
+ int port = dsa_devlink_port_to_port(devlink_port);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u16 *registers;
+ int i, err;
+
+ registers = kmalloc_array(32, sizeof(u16), GFP_KERNEL);
+ if (!registers)
+ return -ENOMEM;
+
+ mv88e6xxx_reg_lock(chip);
+ for (i = 0; i < 32; i++) {
+ err = mv88e6xxx_port_read(chip, port, i, &registers[i]);
+ if (err) {
+ kfree(registers);
+ goto out;
+ }
+ }
+ *data = (u8 *)registers;
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static struct mv88e6xxx_region_priv mv88e6xxx_region_global1_priv = {
+ .id = MV88E6XXX_REGION_GLOBAL1,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_global1_ops = {
+ .name = "global1",
+ .snapshot = mv88e6xxx_region_global_snapshot,
+ .destructor = kfree,
+ .priv = &mv88e6xxx_region_global1_priv,
+};
+
+static struct mv88e6xxx_region_priv mv88e6xxx_region_global2_priv = {
+ .id = MV88E6XXX_REGION_GLOBAL2,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_global2_ops = {
+ .name = "global2",
+ .snapshot = mv88e6xxx_region_global_snapshot,
+ .destructor = kfree,
+ .priv = &mv88e6xxx_region_global2_priv,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_atu_ops = {
+ .name = "atu",
+ .snapshot = mv88e6xxx_region_atu_snapshot,
+ .destructor = kfree,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
+ .name = "vtu",
+ .snapshot = mv88e6xxx_region_vtu_snapshot,
+ .destructor = kfree,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_stu_ops = {
+ .name = "stu",
+ .snapshot = mv88e6xxx_region_stu_snapshot,
+ .destructor = kfree,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
+ .name = "pvt",
+ .snapshot = mv88e6xxx_region_pvt_snapshot,
+ .destructor = kfree,
+};
+
+static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = {
+ .name = "port",
+ .snapshot = mv88e6xxx_region_port_snapshot,
+ .destructor = kfree,
+};
+
+struct mv88e6xxx_region {
+ struct devlink_region_ops *ops;
+ u64 size;
+
+ bool (*cond)(struct mv88e6xxx_chip *chip);
+};
+
+static struct mv88e6xxx_region mv88e6xxx_regions[] = {
+ [MV88E6XXX_REGION_GLOBAL1] = {
+ .ops = &mv88e6xxx_region_global1_ops,
+ .size = 32 * sizeof(u16)
+ },
+ [MV88E6XXX_REGION_GLOBAL2] = {
+ .ops = &mv88e6xxx_region_global2_ops,
+ .size = 32 * sizeof(u16) },
+ [MV88E6XXX_REGION_ATU] = {
+ .ops = &mv88e6xxx_region_atu_ops
+ /* calculated at runtime */
+ },
+ [MV88E6XXX_REGION_VTU] = {
+ .ops = &mv88e6xxx_region_vtu_ops
+ /* calculated at runtime */
+ },
+ [MV88E6XXX_REGION_STU] = {
+ .ops = &mv88e6xxx_region_stu_ops,
+ .cond = mv88e6xxx_has_stu,
+ /* calculated at runtime */
+ },
+ [MV88E6XXX_REGION_PVT] = {
+ .ops = &mv88e6xxx_region_pvt_ops,
+ .size = MV88E6XXX_MAX_PVT_ENTRIES * sizeof(u16),
+ .cond = mv88e6xxx_has_pvt,
+ },
+};
+
+void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
+ dsa_devlink_region_destroy(chip->regions[i]);
+}
+
+void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ dsa_devlink_region_destroy(chip->ports[port].region);
+}
+
+int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct devlink_region *region;
+
+ region = dsa_devlink_port_region_create(ds,
+ port,
+ &mv88e6xxx_region_port_ops, 1,
+ 32 * sizeof(u16));
+ if (IS_ERR(region))
+ return PTR_ERR(region);
+
+ chip->ports[port].region = region;
+
+ return 0;
+}
+
+int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
+{
+ bool (*cond)(struct mv88e6xxx_chip *chip);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct devlink_region_ops *ops;
+ struct devlink_region *region;
+ u64 size;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++) {
+ ops = mv88e6xxx_regions[i].ops;
+ size = mv88e6xxx_regions[i].size;
+ cond = mv88e6xxx_regions[i].cond;
+
+ if (cond && !cond(chip))
+ continue;
+
+ switch (i) {
+ case MV88E6XXX_REGION_ATU:
+ size = mv88e6xxx_num_databases(chip) *
+ sizeof(struct mv88e6xxx_devlink_atu_entry);
+ break;
+ case MV88E6XXX_REGION_VTU:
+ size = (mv88e6xxx_max_vid(chip) + 1) *
+ sizeof(struct mv88e6xxx_devlink_vtu_entry);
+ break;
+ case MV88E6XXX_REGION_STU:
+ size = (mv88e6xxx_max_sid(chip) + 1) *
+ sizeof(struct mv88e6xxx_devlink_stu_entry);
+ break;
+ }
+
+ region = dsa_devlink_region_create(ds, ops, 1, size);
+ if (IS_ERR(region))
+ goto out;
+ chip->regions[i] = region;
+ }
+ return 0;
+
+out:
+ for (j = 0; j < i; j++)
+ dsa_devlink_region_destroy(chip->regions[j]);
+
+ return PTR_ERR(region);
+}
+
+int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ err = devlink_info_driver_name_put(req, "mv88e6xxx");
+ if (err)
+ return err;
+
+ return devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ chip->info->name);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.h b/drivers/net/dsa/mv88e6xxx/devlink.h
new file mode 100644
index 000000000..65ce6a685
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/devlink.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/* Marvell 88E6xxx Switch devlink support. */
+
+#ifndef _MV88E6XXX_DEVLINK_H
+#define _MV88E6XXX_DEVLINK_H
+
+int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds);
+void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds);
+int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds);
+void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port);
+void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port);
+
+int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+#endif /* _MV88E6XXX_DEVLINK_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
new file mode 100644
index 000000000..964928285
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6xxx Switch Global (1) Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ */
+
+#include <linux/bitfield.h>
+
+#include "chip.h"
+#include "global1.h"
+
+int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
+{
+ int addr = chip->info->global1_addr;
+
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
+{
+ int addr = chip->info->global1_addr;
+
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+int mv88e6xxx_g1_wait_bit(struct mv88e6xxx_chip *chip, int reg, int
+ bit, int val)
+{
+ return mv88e6xxx_wait_bit(chip, chip->info->global1_addr, reg,
+ bit, val);
+}
+
+int mv88e6xxx_g1_wait_mask(struct mv88e6xxx_chip *chip, int reg,
+ u16 mask, u16 val)
+{
+ return mv88e6xxx_wait_mask(chip, chip->info->global1_addr, reg,
+ mask, val);
+}
+
+/* Offset 0x00: Switch Global Status Register */
+
+static int mv88e6185_g1_wait_ppu_disabled(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_wait_mask(chip, MV88E6XXX_G1_STS,
+ MV88E6185_G1_STS_PPU_STATE_MASK,
+ MV88E6185_G1_STS_PPU_STATE_DISABLED);
+}
+
+static int mv88e6185_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_wait_mask(chip, MV88E6XXX_G1_STS,
+ MV88E6185_G1_STS_PPU_STATE_MASK,
+ MV88E6185_G1_STS_PPU_STATE_POLLING);
+}
+
+static int mv88e6352_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6352_G1_STS_PPU_STATE);
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
+}
+
+static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6XXX_G1_STS_INIT_READY);
+
+ /* Wait up to 1 second for the switch to be ready. The InitReady bit 11
+ * is set to a one when all units inside the device (ATU, VTU, etc.)
+ * have finished their initialization and are ready to accept frames.
+ */
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
+}
+
+/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
+ * Offset 0x02: Switch MAC Address Register Bytes 2 & 3
+ * Offset 0x03: Switch MAC Address Register Bytes 4 & 5
+ */
+int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
+{
+ u16 reg;
+ int err;
+
+ reg = (addr[0] << 8) | addr[1];
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_MAC_01, reg);
+ if (err)
+ return err;
+
+ reg = (addr[2] << 8) | addr[3];
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_MAC_23, reg);
+ if (err)
+ return err;
+
+ reg = (addr[4] << 8) | addr[5];
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_MAC_45, reg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Offset 0x04: Switch Global Control Register */
+
+int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ /* Set the SWReset bit 15 along with the PPUEn bit 14, to also restart
+ * the PPU, including re-doing PHY detection and initialization
+ */
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
+ if (err)
+ return err;
+
+ val |= MV88E6XXX_G1_CTL1_SW_RESET;
+ val |= MV88E6XXX_G1_CTL1_PPU_ENABLE;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_wait_init_ready(chip);
+ if (err)
+ return err;
+
+ return mv88e6185_g1_wait_ppu_polling(chip);
+}
+
+int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ /* Set the SWReset bit 15 */
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
+ if (err)
+ return err;
+
+ val |= MV88E6XXX_G1_CTL1_SW_RESET;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_wait_init_ready(chip);
+}
+
+int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6250_g1_reset(chip);
+ if (err)
+ return err;
+
+ return mv88e6352_g1_wait_ppu_polling(chip);
+}
+
+int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
+ if (err)
+ return err;
+
+ val |= MV88E6XXX_G1_CTL1_PPU_ENABLE;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
+ if (err)
+ return err;
+
+ return mv88e6185_g1_wait_ppu_polling(chip);
+}
+
+int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
+ if (err)
+ return err;
+
+ val &= ~MV88E6XXX_G1_CTL1_PPU_ENABLE;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
+ if (err)
+ return err;
+
+ return mv88e6185_g1_wait_ppu_disabled(chip);
+}
+
+int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu)
+{
+ u16 val;
+ int err;
+
+ mtu += ETH_HLEN + ETH_FCS_LEN;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
+ if (err)
+ return err;
+
+ val &= ~MV88E6185_G1_CTL1_MAX_FRAME_1632;
+
+ if (mtu > 1518)
+ val |= MV88E6185_G1_CTL1_MAX_FRAME_1632;
+
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
+}
+
+/* Offset 0x10: IP-PRI Mapping Register 0
+ * Offset 0x11: IP-PRI Mapping Register 1
+ * Offset 0x12: IP-PRI Mapping Register 2
+ * Offset 0x13: IP-PRI Mapping Register 3
+ * Offset 0x14: IP-PRI Mapping Register 4
+ * Offset 0x15: IP-PRI Mapping Register 5
+ * Offset 0x16: IP-PRI Mapping Register 6
+ * Offset 0x17: IP-PRI Mapping Register 7
+ */
+
+int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ /* Reset the IP TOS/DiffServ/Traffic priorities to defaults */
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_0, 0x0000);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_1, 0x0000);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_2, 0x5555);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_3, 0x5555);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_4, 0xaaaa);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_5, 0xaaaa);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_6, 0xffff);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_7, 0xffff);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Offset 0x18: IEEE-PRI Register */
+
+int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip)
+{
+ /* Reset the IEEE Tag priorities to defaults */
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa41);
+}
+
+int mv88e6250_g1_ieee_pri_map(struct mv88e6xxx_chip *chip)
+{
+ /* Reset the IEEE Tag priorities to defaults */
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa50);
+}
+
+/* Offset 0x1a: Monitor Control */
+/* Offset 0x1a: Monitor & MGMT Control on some devices */
+
+int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6185_G1_MONITOR_CTL, &reg);
+ if (err)
+ return err;
+
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ reg &= ~MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK;
+ reg |= port <<
+ __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK);
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ reg &= ~MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK;
+ reg |= port <<
+ __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
+}
+
+/* Older generations also call this the ARP destination. It has been
+ * generalized in more modern devices such that more than ARP can
+ * egress it
+ */
+int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6185_G1_MONITOR_CTL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6185_G1_MONITOR_CTL_ARP_DEST_MASK;
+ reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_ARP_DEST_MASK);
+
+ return mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg);
+}
+
+static int mv88e6390_g1_monitor_write(struct mv88e6xxx_chip *chip,
+ u16 pointer, u8 data)
+{
+ u16 reg;
+
+ reg = MV88E6390_G1_MONITOR_MGMT_CTL_UPDATE | pointer | data;
+
+ return mv88e6xxx_g1_write(chip, MV88E6390_G1_MONITOR_MGMT_CTL, reg);
+}
+
+int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port)
+{
+ u16 ptr;
+
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST;
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mv88e6390_g1_monitor_write(chip, ptr, port);
+}
+
+int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
+
+ /* Use the default high priority for management frames sent to
+ * the CPU.
+ */
+ port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
+
+ return mv88e6390_g1_monitor_write(chip, ptr, port);
+}
+
+int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
+{
+ u16 ptr;
+ int err;
+
+ /* 01:80:c2:00:00:00-01:80:c2:00:00:07 are Management */
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XLO;
+ err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
+ if (err)
+ return err;
+
+ /* 01:80:c2:00:00:08-01:80:c2:00:00:0f are Management */
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XHI;
+ err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
+ if (err)
+ return err;
+
+ /* 01:80:c2:00:00:20-01:80:c2:00:00:27 are Management */
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XLO;
+ err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
+ if (err)
+ return err;
+
+ /* 01:80:c2:00:00:28-01:80:c2:00:00:2f are Management */
+ ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XHI;
+ err = mv88e6390_g1_monitor_write(chip, ptr, 0xff);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Offset 0x1c: Global Control 2 */
+
+static int mv88e6xxx_g1_ctl2_mask(struct mv88e6xxx_chip *chip, u16 mask,
+ u16 val)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL2, &reg);
+ if (err)
+ return err;
+
+ reg &= ~mask;
+ reg |= val & mask;
+
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, reg);
+}
+
+int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port)
+{
+ const u16 mask = MV88E6185_G1_CTL2_CASCADE_PORT_MASK;
+
+ return mv88e6xxx_g1_ctl2_mask(chip, mask, port << __bf_shf(mask));
+}
+
+int mv88e6085_g1_rmu_disable(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_ctl2_mask(chip, MV88E6085_G1_CTL2_P10RM |
+ MV88E6085_G1_CTL2_RM_ENABLE, 0);
+}
+
+int mv88e6352_g1_rmu_disable(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_ctl2_mask(chip, MV88E6352_G1_CTL2_RMU_MODE_MASK,
+ MV88E6352_G1_CTL2_RMU_MODE_DISABLED);
+}
+
+int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_RMU_MODE_MASK,
+ MV88E6390_G1_CTL2_RMU_MODE_DISABLED);
+}
+
+int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_HIST_MODE_MASK,
+ MV88E6390_G1_CTL2_HIST_MODE_RX |
+ MV88E6390_G1_CTL2_HIST_MODE_TX);
+}
+
+int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index)
+{
+ return mv88e6xxx_g1_ctl2_mask(chip,
+ MV88E6XXX_G1_CTL2_DEVICE_NUMBER_MASK,
+ index);
+}
+
+/* Offset 0x1d: Statistics Operation 2 */
+
+static int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6XXX_G1_STATS_OP_BUSY);
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STATS_OP, bit, 0);
+}
+
+int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STATS_OP, &val);
+ if (err)
+ return err;
+
+ val |= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
+
+ return err;
+}
+
+int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ /* Snapshot the hardware statistics counters for this port. */
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP,
+ MV88E6XXX_G1_STATS_OP_BUSY |
+ MV88E6XXX_G1_STATS_OP_CAPTURE_PORT |
+ MV88E6XXX_G1_STATS_OP_HIST_RX_TX | port);
+ if (err)
+ return err;
+
+ /* Wait for the snapshotting to complete. */
+ return mv88e6xxx_g1_stats_wait(chip);
+}
+
+int mv88e6320_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
+{
+ port = (port + 1) << 5;
+
+ return mv88e6xxx_g1_stats_snapshot(chip, port);
+}
+
+int mv88e6390_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ port = (port + 1) << 5;
+
+ /* Snapshot the hardware statistics counters for this port. */
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP,
+ MV88E6XXX_G1_STATS_OP_BUSY |
+ MV88E6XXX_G1_STATS_OP_CAPTURE_PORT | port);
+ if (err)
+ return err;
+
+ /* Wait for the snapshotting to complete. */
+ return mv88e6xxx_g1_stats_wait(chip);
+}
+
+void mv88e6xxx_g1_stats_read(struct mv88e6xxx_chip *chip, int stat, u32 *val)
+{
+ u32 value;
+ u16 reg;
+ int err;
+
+ *val = 0;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP,
+ MV88E6XXX_G1_STATS_OP_BUSY |
+ MV88E6XXX_G1_STATS_OP_READ_CAPTURED | stat);
+ if (err)
+ return;
+
+ err = mv88e6xxx_g1_stats_wait(chip);
+ if (err)
+ return;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STATS_COUNTER_32, &reg);
+ if (err)
+ return;
+
+ value = reg << 16;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STATS_COUNTER_01, &reg);
+ if (err)
+ return;
+
+ *val = value | reg;
+}
+
+int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
+{
+ int err;
+ u16 val;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STATS_OP, &val);
+ if (err)
+ return err;
+
+ /* Keep the histogram mode bits */
+ val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
+ val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
+ if (err)
+ return err;
+
+ /* Wait for the flush to complete. */
+ return mv88e6xxx_g1_stats_wait(chip);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
new file mode 100644
index 000000000..04b57a21f
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -0,0 +1,364 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Marvell 88E6xxx Switch Global (1) Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ */
+
+#ifndef _MV88E6XXX_GLOBAL1_H
+#define _MV88E6XXX_GLOBAL1_H
+
+#include "chip.h"
+
+/* Offset 0x00: Switch Global Status Register */
+#define MV88E6XXX_G1_STS 0x00
+#define MV88E6352_G1_STS_PPU_STATE 0x8000
+#define MV88E6185_G1_STS_PPU_STATE_MASK 0xc000
+#define MV88E6185_G1_STS_PPU_STATE_DISABLED_RST 0x0000
+#define MV88E6185_G1_STS_PPU_STATE_INITIALIZING 0x4000
+#define MV88E6185_G1_STS_PPU_STATE_DISABLED 0x8000
+#define MV88E6185_G1_STS_PPU_STATE_POLLING 0xc000
+#define MV88E6XXX_G1_STS_INIT_READY 0x0800
+#define MV88E6393X_G1_STS_IRQ_DEVICE_2 9
+#define MV88E6XXX_G1_STS_IRQ_AVB 8
+#define MV88E6XXX_G1_STS_IRQ_DEVICE 7
+#define MV88E6XXX_G1_STS_IRQ_STATS 6
+#define MV88E6XXX_G1_STS_IRQ_VTU_PROB 5
+#define MV88E6XXX_G1_STS_IRQ_VTU_DONE 4
+#define MV88E6XXX_G1_STS_IRQ_ATU_PROB 3
+#define MV88E6XXX_G1_STS_IRQ_ATU_DONE 2
+#define MV88E6XXX_G1_STS_IRQ_TCAM_DONE 1
+#define MV88E6XXX_G1_STS_IRQ_EEPROM_DONE 0
+
+/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
+ * Offset 0x02: Switch MAC Address Register Bytes 2 & 3
+ * Offset 0x03: Switch MAC Address Register Bytes 4 & 5
+ */
+#define MV88E6XXX_G1_MAC_01 0x01
+#define MV88E6XXX_G1_MAC_23 0x02
+#define MV88E6XXX_G1_MAC_45 0x03
+
+/* Offset 0x01: ATU FID Register */
+#define MV88E6352_G1_ATU_FID 0x01
+
+/* Offset 0x02: VTU FID Register */
+#define MV88E6352_G1_VTU_FID 0x02
+#define MV88E6352_G1_VTU_FID_VID_POLICY 0x1000
+#define MV88E6352_G1_VTU_FID_MASK 0x0fff
+
+/* Offset 0x03: VTU SID Register */
+#define MV88E6352_G1_VTU_SID 0x03
+#define MV88E6352_G1_VTU_SID_MASK 0x3f
+
+/* Offset 0x04: Switch Global Control Register */
+#define MV88E6XXX_G1_CTL1 0x04
+#define MV88E6XXX_G1_CTL1_SW_RESET 0x8000
+#define MV88E6XXX_G1_CTL1_PPU_ENABLE 0x4000
+#define MV88E6352_G1_CTL1_DISCARD_EXCESS 0x2000
+#define MV88E6185_G1_CTL1_SCHED_PRIO 0x0800
+#define MV88E6185_G1_CTL1_MAX_FRAME_1632 0x0400
+#define MV88E6185_G1_CTL1_RELOAD_EEPROM 0x0200
+#define MV88E6393X_G1_CTL1_DEVICE2_EN 0x0200
+#define MV88E6XXX_G1_CTL1_DEVICE_EN 0x0080
+#define MV88E6XXX_G1_CTL1_STATS_DONE_EN 0x0040
+#define MV88E6XXX_G1_CTL1_VTU_PROBLEM_EN 0x0020
+#define MV88E6XXX_G1_CTL1_VTU_DONE_EN 0x0010
+#define MV88E6XXX_G1_CTL1_ATU_PROBLEM_EN 0x0008
+#define MV88E6XXX_G1_CTL1_ATU_DONE_EN 0x0004
+#define MV88E6XXX_G1_CTL1_TCAM_EN 0x0002
+#define MV88E6XXX_G1_CTL1_EEPROM_DONE_EN 0x0001
+
+/* Offset 0x05: VTU Operation Register */
+#define MV88E6XXX_G1_VTU_OP 0x05
+#define MV88E6XXX_G1_VTU_OP_BUSY 0x8000
+#define MV88E6XXX_G1_VTU_OP_MASK 0x7000
+#define MV88E6XXX_G1_VTU_OP_FLUSH_ALL 0x1000
+#define MV88E6XXX_G1_VTU_OP_NOOP 0x2000
+#define MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE 0x3000
+#define MV88E6XXX_G1_VTU_OP_VTU_GET_NEXT 0x4000
+#define MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE 0x5000
+#define MV88E6XXX_G1_VTU_OP_STU_GET_NEXT 0x6000
+#define MV88E6XXX_G1_VTU_OP_GET_CLR_VIOLATION 0x7000
+#define MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION BIT(6)
+#define MV88E6XXX_G1_VTU_OP_MISS_VIOLATION BIT(5)
+#define MV88E6XXX_G1_VTU_OP_SPID_MASK 0xf
+
+/* Offset 0x06: VTU VID Register */
+#define MV88E6XXX_G1_VTU_VID 0x06
+#define MV88E6XXX_G1_VTU_VID_MASK 0x0fff
+#define MV88E6390_G1_VTU_VID_PAGE 0x2000
+#define MV88E6XXX_G1_VTU_VID_VALID 0x1000
+
+/* Offset 0x07: VTU/STU Data Register 1
+ * Offset 0x08: VTU/STU Data Register 2
+ * Offset 0x09: VTU/STU Data Register 3
+ */
+#define MV88E6XXX_G1_VTU_DATA1 0x07
+#define MV88E6XXX_G1_VTU_DATA2 0x08
+#define MV88E6XXX_G1_VTU_DATA3 0x09
+#define MV88E6XXX_G1_VTU_STU_DATA_MASK 0x0003
+#define MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED 0x0000
+#define MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED 0x0001
+#define MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED 0x0002
+#define MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER 0x0003
+#define MV88E6XXX_G1_STU_DATA_PORT_STATE_DISABLED 0x0000
+#define MV88E6XXX_G1_STU_DATA_PORT_STATE_BLOCKING 0x0001
+#define MV88E6XXX_G1_STU_DATA_PORT_STATE_LEARNING 0x0002
+#define MV88E6XXX_G1_STU_DATA_PORT_STATE_FORWARDING 0x0003
+
+/* Offset 0x0A: ATU Control Register */
+#define MV88E6XXX_G1_ATU_CTL 0x0a
+#define MV88E6XXX_G1_ATU_CTL_LEARN2ALL 0x0008
+#define MV88E6161_G1_ATU_CTL_HASH_MASK 0x0003
+
+/* Offset 0x0B: ATU Operation Register */
+#define MV88E6XXX_G1_ATU_OP 0x0b
+#define MV88E6XXX_G1_ATU_OP_BUSY 0x8000
+#define MV88E6XXX_G1_ATU_OP_MASK 0x7000
+#define MV88E6XXX_G1_ATU_OP_NOOP 0x0000
+#define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL 0x1000
+#define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC 0x2000
+#define MV88E6XXX_G1_ATU_OP_LOAD_DB 0x3000
+#define MV88E6XXX_G1_ATU_OP_GET_NEXT_DB 0x4000
+#define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL_DB 0x5000
+#define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC_DB 0x6000
+#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000
+#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7)
+#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6)
+#define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION BIT(5)
+#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4)
+
+/* Offset 0x0C: ATU Data Register */
+#define MV88E6XXX_G1_ATU_DATA 0x0c
+#define MV88E6XXX_G1_ATU_DATA_TRUNK 0x8000
+#define MV88E6XXX_G1_ATU_DATA_TRUNK_ID_MASK 0x00f0
+#define MV88E6XXX_G1_ATU_DATA_PORT_VECTOR_MASK 0x3ff0
+#define MV88E6XXX_G1_ATU_DATA_STATE_MASK 0x000f
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_UNUSED 0x0000
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_1_OLDEST 0x0001
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_2 0x0002
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_3 0x0003
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_4 0x0004
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_5 0x0005
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_6 0x0006
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_AGE_7_NEWEST 0x0007
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_POLICY 0x0008
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_POLICY_PO 0x0009
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_AVB_NRL 0x000a
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_AVB_NRL_PO 0x000b
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_DA_MGMT 0x000c
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_DA_MGMT_PO 0x000d
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC 0x000e
+#define MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC_PO 0x000f
+#define MV88E6XXX_G1_ATU_DATA_STATE_MC_UNUSED 0x0000
+#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_POLICY 0x0004
+#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_AVB_NRL 0x0005
+#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_DA_MGMT 0x0006
+#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC 0x0007
+#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_POLICY_PO 0x000c
+#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_AVB_NRL_PO 0x000d
+#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_DA_MGMT_PO 0x000e
+#define MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC_PO 0x000f
+
+/* Offset 0x0D: ATU MAC Address Register Bytes 0 & 1
+ * Offset 0x0E: ATU MAC Address Register Bytes 2 & 3
+ * Offset 0x0F: ATU MAC Address Register Bytes 4 & 5
+ */
+#define MV88E6XXX_G1_ATU_MAC01 0x0d
+#define MV88E6XXX_G1_ATU_MAC23 0x0e
+#define MV88E6XXX_G1_ATU_MAC45 0x0f
+
+/* Offset 0x10: IP-PRI Mapping Register 0
+ * Offset 0x11: IP-PRI Mapping Register 1
+ * Offset 0x12: IP-PRI Mapping Register 2
+ * Offset 0x13: IP-PRI Mapping Register 3
+ * Offset 0x14: IP-PRI Mapping Register 4
+ * Offset 0x15: IP-PRI Mapping Register 5
+ * Offset 0x16: IP-PRI Mapping Register 6
+ * Offset 0x17: IP-PRI Mapping Register 7
+ */
+#define MV88E6XXX_G1_IP_PRI_0 0x10
+#define MV88E6XXX_G1_IP_PRI_1 0x11
+#define MV88E6XXX_G1_IP_PRI_2 0x12
+#define MV88E6XXX_G1_IP_PRI_3 0x13
+#define MV88E6XXX_G1_IP_PRI_4 0x14
+#define MV88E6XXX_G1_IP_PRI_5 0x15
+#define MV88E6XXX_G1_IP_PRI_6 0x16
+#define MV88E6XXX_G1_IP_PRI_7 0x17
+
+/* Offset 0x18: IEEE-PRI Register */
+#define MV88E6XXX_G1_IEEE_PRI 0x18
+
+/* Offset 0x19: Core Tag Type */
+#define MV88E6185_G1_CORE_TAG_TYPE 0x19
+
+/* Offset 0x1A: Monitor Control */
+#define MV88E6185_G1_MONITOR_CTL 0x1a
+#define MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK 0xf000
+#define MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK 0x0f00
+#define MV88E6185_G1_MONITOR_CTL_ARP_DEST_MASK 0x00f0
+#define MV88E6352_G1_MONITOR_CTL_CPU_DEST_MASK 0x00f0
+#define MV88E6352_G1_MONITOR_CTL_MIRROR_DEST_MASK 0x000f
+
+/* Offset 0x1A: Monitor & MGMT Control Register */
+#define MV88E6390_G1_MONITOR_MGMT_CTL 0x1a
+#define MV88E6390_G1_MONITOR_MGMT_CTL_UPDATE 0x8000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_MASK 0x3f00
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XLO 0x0000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200000XHI 0x0100
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XLO 0x0200
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_0180C200002XHI 0x0300
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
+#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
+
+/* Offset 0x1C: Global Control 2 */
+#define MV88E6XXX_G1_CTL2 0x1c
+#define MV88E6185_G1_CTL2_CASCADE_PORT_MASK 0xf000
+#define MV88E6185_G1_CTL2_CASCADE_PORT_NONE 0xe000
+#define MV88E6185_G1_CTL2_CASCADE_PORT_MULTI 0xf000
+#define MV88E6352_G1_CTL2_HEADER_TYPE_MASK 0xc000
+#define MV88E6352_G1_CTL2_HEADER_TYPE_ORIG 0x0000
+#define MV88E6352_G1_CTL2_HEADER_TYPE_MGMT 0x4000
+#define MV88E6390_G1_CTL2_HEADER_TYPE_LAG 0x8000
+#define MV88E6352_G1_CTL2_RMU_MODE_MASK 0x3000
+#define MV88E6352_G1_CTL2_RMU_MODE_DISABLED 0x0000
+#define MV88E6352_G1_CTL2_RMU_MODE_PORT_4 0x1000
+#define MV88E6352_G1_CTL2_RMU_MODE_PORT_5 0x2000
+#define MV88E6352_G1_CTL2_RMU_MODE_PORT_6 0x3000
+#define MV88E6085_G1_CTL2_DA_CHECK 0x4000
+#define MV88E6085_G1_CTL2_P10RM 0x2000
+#define MV88E6085_G1_CTL2_RM_ENABLE 0x1000
+#define MV88E6352_G1_CTL2_DA_CHECK 0x0800
+#define MV88E6390_G1_CTL2_RMU_MODE_MASK 0x0700
+#define MV88E6390_G1_CTL2_RMU_MODE_PORT_0 0x0000
+#define MV88E6390_G1_CTL2_RMU_MODE_PORT_1 0x0100
+#define MV88E6390_G1_CTL2_RMU_MODE_PORT_9 0x0200
+#define MV88E6390_G1_CTL2_RMU_MODE_PORT_10 0x0300
+#define MV88E6390_G1_CTL2_RMU_MODE_ALL_DSA 0x0600
+#define MV88E6390_G1_CTL2_RMU_MODE_DISABLED 0x0700
+#define MV88E6390_G1_CTL2_HIST_MODE_MASK 0x00c0
+#define MV88E6390_G1_CTL2_HIST_MODE_RX 0x0040
+#define MV88E6390_G1_CTL2_HIST_MODE_TX 0x0080
+#define MV88E6352_G1_CTL2_CTR_MODE_MASK 0x0060
+#define MV88E6390_G1_CTL2_CTR_MODE 0x0020
+#define MV88E6XXX_G1_CTL2_DEVICE_NUMBER_MASK 0x001f
+
+/* Offset 0x1D: Stats Operation Register */
+#define MV88E6XXX_G1_STATS_OP 0x1d
+#define MV88E6XXX_G1_STATS_OP_BUSY 0x8000
+#define MV88E6XXX_G1_STATS_OP_NOP 0x0000
+#define MV88E6XXX_G1_STATS_OP_FLUSH_ALL 0x1000
+#define MV88E6XXX_G1_STATS_OP_FLUSH_PORT 0x2000
+#define MV88E6XXX_G1_STATS_OP_READ_CAPTURED 0x4000
+#define MV88E6XXX_G1_STATS_OP_CAPTURE_PORT 0x5000
+#define MV88E6XXX_G1_STATS_OP_HIST_RX 0x0400
+#define MV88E6XXX_G1_STATS_OP_HIST_TX 0x0800
+#define MV88E6XXX_G1_STATS_OP_HIST_RX_TX 0x0c00
+#define MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9 0x0200
+#define MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10 0x0400
+
+/* Offset 0x1E: Stats Counter Register Bytes 3 & 2
+ * Offset 0x1F: Stats Counter Register Bytes 1 & 0
+ */
+#define MV88E6XXX_G1_STATS_COUNTER_32 0x1e
+#define MV88E6XXX_G1_STATS_COUNTER_01 0x1f
+
+int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
+int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
+int mv88e6xxx_g1_wait_bit(struct mv88e6xxx_chip *chip, int reg, int
+ bit, int val);
+int mv88e6xxx_g1_wait_mask(struct mv88e6xxx_chip *chip, int reg,
+ u16 mask, u16 val);
+
+int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+
+int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
+int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
+int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
+
+int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
+int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
+
+int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu);
+
+int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
+int mv88e6320_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
+int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip);
+int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g1_stats_read(struct mv88e6xxx_chip *chip, int stat, u32 *val);
+int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip);
+int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
+int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
+int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
+
+int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip);
+
+int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip);
+int mv88e6250_g1_ieee_pri_map(struct mv88e6xxx_chip *chip);
+
+int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port);
+
+int mv88e6085_g1_rmu_disable(struct mv88e6xxx_chip *chip);
+int mv88e6352_g1_rmu_disable(struct mv88e6xxx_chip *chip);
+int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip);
+
+int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index);
+
+int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all);
+int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
+ unsigned int msecs);
+int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry);
+int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry);
+int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all);
+int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
+ bool all);
+int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip);
+int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash);
+int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash);
+
+int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
+int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid);
+
+#endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
new file mode 100644
index 000000000..7c513a037
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6xxx Address Translation Unit (ATU) support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ * Copyright (c) 2017 Savoir-faire Linux, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+
+#include "chip.h"
+#include "global1.h"
+#include "trace.h"
+
+/* Offset 0x01: ATU FID Register */
+
+static int mv88e6xxx_g1_atu_fid_write(struct mv88e6xxx_chip *chip, u16 fid)
+{
+ return mv88e6xxx_g1_write(chip, MV88E6352_G1_ATU_FID, fid & 0xfff);
+}
+
+/* Offset 0x0A: ATU Control Register */
+
+int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
+ if (err)
+ return err;
+
+ if (learn2all)
+ val |= MV88E6XXX_G1_ATU_CTL_LEARN2ALL;
+ else
+ val &= ~MV88E6XXX_G1_ATU_CTL_LEARN2ALL;
+
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL, val);
+}
+
+int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
+ unsigned int msecs)
+{
+ const unsigned int coeff = chip->info->age_time_coeff;
+ const unsigned int min = 0x01 * coeff;
+ const unsigned int max = 0xff * coeff;
+ u8 age_time;
+ u16 val;
+ int err;
+
+ if (msecs < min || msecs > max)
+ return -ERANGE;
+
+ /* Round to nearest multiple of coeff */
+ age_time = (msecs + coeff / 2) / coeff;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
+ if (err)
+ return err;
+
+ /* AgeTime is 11:4 bits */
+ val &= ~0xff0;
+ val |= age_time << 4;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL, val);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "AgeTime set to 0x%02x (%d ms)\n", age_time,
+ age_time * coeff);
+
+ return 0;
+}
+
+int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
+{
+ int err;
+ u16 val;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
+ if (err)
+ return err;
+
+ *hash = val & MV88E6161_G1_ATU_CTL_HASH_MASK;
+
+ return 0;
+}
+
+int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
+{
+ int err;
+ u16 val;
+
+ if (hash & ~MV88E6161_G1_ATU_CTL_HASH_MASK)
+ return -EINVAL;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
+ if (err)
+ return err;
+
+ val &= ~MV88E6161_G1_ATU_CTL_HASH_MASK;
+ val |= hash;
+
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL, val);
+}
+
+/* Offset 0x0B: ATU Operation Register */
+
+static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6XXX_G1_ATU_OP_BUSY);
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_ATU_OP, bit, 0);
+}
+
+static int mv88e6xxx_g1_read_atu_violation(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_OP,
+ MV88E6XXX_G1_ATU_OP_BUSY |
+ MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_atu_op_wait(chip);
+}
+
+static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
+{
+ u16 val;
+ int err;
+
+ /* FID bits are dispatched all around gradually as more are supported */
+ if (mv88e6xxx_num_databases(chip) > 256) {
+ err = mv88e6xxx_g1_atu_fid_write(chip, fid);
+ if (err)
+ return err;
+ } else {
+ if (mv88e6xxx_num_databases(chip) > 64) {
+ /* ATU DBNum[7:4] are located in ATU Control 15:12 */
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL,
+ &val);
+ if (err)
+ return err;
+
+ val = (val & 0x0fff) | ((fid << 8) & 0xf000);
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL,
+ val);
+ if (err)
+ return err;
+ } else if (mv88e6xxx_num_databases(chip) > 16) {
+ /* ATU DBNum[5:4] are located in ATU Operation 9:8 */
+ op |= (fid & 0x30) << 4;
+ }
+
+ /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
+ op |= fid & 0xf;
+ }
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_OP,
+ MV88E6XXX_G1_ATU_OP_BUSY | op);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_atu_op_wait(chip);
+}
+
+int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid)
+{
+ return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
+}
+
+static int mv88e6xxx_g1_atu_fid_read(struct mv88e6xxx_chip *chip, u16 *fid)
+{
+ u16 val = 0, upper = 0, op = 0;
+ int err = -EOPNOTSUPP;
+
+ if (mv88e6xxx_num_databases(chip) > 256) {
+ err = mv88e6xxx_g1_read(chip, MV88E6352_G1_ATU_FID, &val);
+ val &= 0xfff;
+ if (err)
+ return err;
+ } else {
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &op);
+ if (err)
+ return err;
+ if (mv88e6xxx_num_databases(chip) > 64) {
+ /* ATU DBNum[7:4] are located in ATU Control 15:12 */
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL,
+ &upper);
+ if (err)
+ return err;
+
+ upper = (upper >> 8) & 0x00f0;
+ } else if (mv88e6xxx_num_databases(chip) > 16) {
+ /* ATU DBNum[5:4] are located in ATU Operation 9:8 */
+ upper = (op >> 4) & 0x30;
+ }
+
+ /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
+ val = (op & 0xf) | upper;
+ }
+ *fid = val;
+
+ return err;
+}
+
+/* Offset 0x0C: ATU Data Register */
+
+static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_DATA, &val);
+ if (err)
+ return err;
+
+ entry->state = val & 0xf;
+ if (entry->state) {
+ entry->trunk = !!(val & MV88E6XXX_G1_ATU_DATA_TRUNK);
+ entry->portvec = (val >> 4) & mv88e6xxx_port_mask(chip);
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_g1_atu_data_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ u16 data = entry->state & 0xf;
+
+ if (entry->state) {
+ if (entry->trunk)
+ data |= MV88E6XXX_G1_ATU_DATA_TRUNK;
+
+ data |= (entry->portvec & mv88e6xxx_port_mask(chip)) << 4;
+ }
+
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_DATA, data);
+}
+
+/* Offset 0x0D: ATU MAC Address Register Bytes 0 & 1
+ * Offset 0x0E: ATU MAC Address Register Bytes 2 & 3
+ * Offset 0x0F: ATU MAC Address Register Bytes 4 & 5
+ */
+
+static int mv88e6xxx_g1_atu_mac_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ u16 val;
+ int i, err;
+
+ for (i = 0; i < 3; i++) {
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC01 + i, &val);
+ if (err)
+ return err;
+
+ entry->mac[i * 2] = val >> 8;
+ entry->mac[i * 2 + 1] = val & 0xff;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_g1_atu_mac_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ u16 val;
+ int i, err;
+
+ for (i = 0; i < 3; i++) {
+ val = (entry->mac[i * 2] << 8) | entry->mac[i * 2 + 1];
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_MAC01 + i, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Address Translation Unit operations */
+
+int mv88e6xxx_g1_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_atu_op_wait(chip);
+ if (err)
+ return err;
+
+ /* Write the MAC address to iterate from only once */
+ if (!entry->state) {
+ err = mv88e6xxx_g1_atu_mac_write(chip, entry);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_atu_data_read(chip, entry);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_atu_mac_read(chip, entry);
+}
+
+int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_atu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_atu_mac_write(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_atu_data_write(chip, entry);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_LOAD_DB);
+}
+
+static int mv88e6xxx_g1_atu_flushmove(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry,
+ bool all)
+{
+ u16 op;
+ int err;
+
+ err = mv88e6xxx_g1_atu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_atu_data_write(chip, entry);
+ if (err)
+ return err;
+
+ /* Flush/Move all or non-static entries from all or a given database */
+ if (all && fid)
+ op = MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL_DB;
+ else if (fid)
+ op = MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
+ else if (all)
+ op = MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL;
+ else
+ op = MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC;
+
+ return mv88e6xxx_g1_atu_op(chip, fid, op);
+}
+
+int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all)
+{
+ struct mv88e6xxx_atu_entry entry = {
+ .state = 0, /* Null EntryState means Flush */
+ };
+
+ return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all);
+}
+
+static int mv88e6xxx_g1_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
+ int from_port, int to_port, bool all)
+{
+ struct mv88e6xxx_atu_entry entry = { 0 };
+ unsigned long mask;
+ int shift;
+
+ if (!chip->info->atu_move_port_mask)
+ return -EOPNOTSUPP;
+
+ mask = chip->info->atu_move_port_mask;
+ shift = bitmap_weight(&mask, 16);
+
+ entry.state = 0xf; /* Full EntryState means Move */
+ entry.portvec = from_port & mask;
+ entry.portvec |= (to_port & mask) << shift;
+
+ return mv88e6xxx_g1_atu_flushmove(chip, fid, &entry, all);
+}
+
+int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
+ bool all)
+{
+ int from_port = port;
+ int to_port = chip->info->atu_move_port_mask;
+
+ return mv88e6xxx_g1_atu_move(chip, fid, from_port, to_port, all);
+}
+
+static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_chip *chip = dev_id;
+ struct mv88e6xxx_atu_entry entry;
+ int err, spid;
+ u16 val, fid;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_g1_read_atu_violation(chip);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &val);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_atu_fid_read(chip, &fid);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_atu_data_read(chip, &entry);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_atu_mac_read(chip, &entry);
+ if (err)
+ goto out;
+
+ spid = entry.state;
+
+ if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
+ dev_err_ratelimited(chip->dev,
+ "ATU age out violation for %pM\n",
+ entry.mac);
+ }
+
+ if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
+ trace_mv88e6xxx_atu_member_violation(chip->dev, spid,
+ entry.portvec, entry.mac,
+ fid);
+ chip->ports[spid].atu_member_violation++;
+ }
+
+ if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
+ trace_mv88e6xxx_atu_miss_violation(chip->dev, spid,
+ entry.portvec, entry.mac,
+ fid);
+ chip->ports[spid].atu_miss_violation++;
+ }
+
+ if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
+ trace_mv88e6xxx_atu_full_violation(chip->dev, spid,
+ entry.portvec, entry.mac,
+ fid);
+ chip->ports[spid].atu_full_violation++;
+ }
+ mv88e6xxx_reg_unlock(chip);
+
+ return IRQ_HANDLED;
+
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ dev_err(chip->dev, "ATU problem: error %d while handling interrupt\n",
+ err);
+ return IRQ_HANDLED;
+}
+
+int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ chip->atu_prob_irq = irq_find_mapping(chip->g1_irq.domain,
+ MV88E6XXX_G1_STS_IRQ_ATU_PROB);
+ if (chip->atu_prob_irq < 0)
+ return chip->atu_prob_irq;
+
+ snprintf(chip->atu_prob_irq_name, sizeof(chip->atu_prob_irq_name),
+ "mv88e6xxx-%s-g1-atu-prob", dev_name(chip->dev));
+
+ err = request_threaded_irq(chip->atu_prob_irq, NULL,
+ mv88e6xxx_g1_atu_prob_irq_thread_fn,
+ IRQF_ONESHOT, chip->atu_prob_irq_name,
+ chip);
+ if (err)
+ irq_dispose_mapping(chip->atu_prob_irq);
+
+ return err;
+}
+
+void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip)
+{
+ free_irq(chip->atu_prob_irq, chip);
+ irq_dispose_mapping(chip->atu_prob_irq);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
new file mode 100644
index 000000000..bcfb4a812
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6xxx VLAN [Spanning Tree] Translation Unit (VTU [STU]) support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ * Copyright (c) 2015 CMC Electronics, Inc.
+ * Copyright (c) 2017 Savoir-faire Linux, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+
+#include "chip.h"
+#include "global1.h"
+#include "trace.h"
+
+/* Offset 0x02: VTU FID Register */
+
+static int mv88e6xxx_g1_vtu_fid_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_FID, &val);
+ if (err)
+ return err;
+
+ entry->fid = val & MV88E6352_G1_VTU_FID_MASK;
+ entry->policy = !!(val & MV88E6352_G1_VTU_FID_VID_POLICY);
+ return 0;
+}
+
+static int mv88e6xxx_g1_vtu_fid_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 val = entry->fid & MV88E6352_G1_VTU_FID_MASK;
+
+ if (entry->policy)
+ val |= MV88E6352_G1_VTU_FID_VID_POLICY;
+
+ return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_FID, val);
+}
+
+/* Offset 0x03: VTU SID Register */
+
+static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip, u8 *sid)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID, &val);
+ if (err)
+ return err;
+
+ *sid = val & MV88E6352_G1_VTU_SID_MASK;
+
+ return 0;
+}
+
+static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip, u8 sid)
+{
+ u16 val = sid & MV88E6352_G1_VTU_SID_MASK;
+
+ return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_SID, val);
+}
+
+/* Offset 0x05: VTU Operation Register */
+
+static int mv88e6xxx_g1_vtu_op_wait(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6XXX_G1_VTU_OP_BUSY);
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_VTU_OP, bit, 0);
+}
+
+static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op)
+{
+ int err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_OP,
+ MV88E6XXX_G1_VTU_OP_BUSY | op);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_vtu_op_wait(chip);
+}
+
+/* Offset 0x06: VTU VID Register */
+
+static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
+ bool *valid, u16 *vid)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID, &val);
+ if (err)
+ return err;
+
+ if (vid) {
+ *vid = val & 0xfff;
+
+ if (val & MV88E6390_G1_VTU_VID_PAGE)
+ *vid |= 0x1000;
+ }
+
+ if (valid)
+ *valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
+
+ return 0;
+}
+
+static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
+ bool valid, u16 vid)
+{
+ u16 val = vid & 0xfff;
+
+ if (vid & 0x1000)
+ val |= MV88E6390_G1_VTU_VID_PAGE;
+
+ if (valid)
+ val |= MV88E6XXX_G1_VTU_VID_VALID;
+
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_VID, val);
+}
+
+/* Offset 0x07: VTU/STU Data Register 1
+ * Offset 0x08: VTU/STU Data Register 2
+ * Offset 0x09: VTU/STU Data Register 3
+ */
+static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
+ u16 *regs)
+{
+ int i;
+
+ /* Read all 3 VTU/STU Data registers */
+ for (i = 0; i < 3; ++i) {
+ u16 *reg = &regs[i];
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1 + i, reg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
+ u8 *member, u8 *state)
+{
+ u16 regs[3];
+ int err;
+ int i;
+
+ err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
+ if (err)
+ return err;
+
+ /* Extract MemberTag data */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ unsigned int member_offset = (i % 4) * 4;
+ unsigned int state_offset = member_offset + 2;
+
+ if (member)
+ member[i] = (regs[i / 4] >> member_offset) & 0x3;
+
+ if (state)
+ state[i] = (regs[i / 4] >> state_offset) & 0x3;
+ }
+
+ return 0;
+}
+
+static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
+ u8 *member, u8 *state)
+{
+ u16 regs[3] = { 0 };
+ int i;
+
+ /* Insert MemberTag and PortState data */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ unsigned int member_offset = (i % 4) * 4;
+ unsigned int state_offset = member_offset + 2;
+
+ if (member)
+ regs[i / 4] |= (member[i] & 0x3) << member_offset;
+
+ if (state)
+ regs[i / 4] |= (state[i] & 0x3) << state_offset;
+ }
+
+ /* Write all 3 VTU/STU Data registers */
+ for (i = 0; i < 3; ++i) {
+ u16 reg = regs[i];
+ int err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_DATA1 + i, reg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6390_g1_vtu_data_read(struct mv88e6xxx_chip *chip, u8 *data)
+{
+ u16 regs[2];
+ int i;
+
+ /* Read the 2 VTU/STU Data registers */
+ for (i = 0; i < 2; ++i) {
+ u16 *reg = &regs[i];
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1 + i, reg);
+ if (err)
+ return err;
+ }
+
+ /* Extract data */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ unsigned int offset = (i % 8) * 2;
+
+ data[i] = (regs[i / 8] >> offset) & 0x3;
+ }
+
+ return 0;
+}
+
+static int mv88e6390_g1_vtu_data_write(struct mv88e6xxx_chip *chip, u8 *data)
+{
+ u16 regs[2] = { 0 };
+ int i;
+
+ /* Insert data */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ unsigned int offset = (i % 8) * 2;
+
+ regs[i / 8] |= (data[i] & 0x3) << offset;
+ }
+
+ /* Write the 2 VTU/STU Data registers */
+ for (i = 0; i < 2; ++i) {
+ u16 reg = regs[i];
+ int err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_DATA1 + i, reg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* VLAN Translation Unit Operations */
+
+int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ /* To get the next higher active VID, the VTU GetNext operation can be
+ * started again without setting the VID registers since it already
+ * contains the last VID.
+ *
+ * To save a few hardware accesses and abstract this to the caller,
+ * write the VID only once, when the entry is given as invalid.
+ */
+ if (!entry->valid) {
+ err = mv88e6xxx_g1_vtu_vid_write(chip, false, entry->vid);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_GET_NEXT);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, &entry->vid);
+}
+
+int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_vtu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_read(chip, entry->member, entry->state);
+ if (err)
+ return err;
+
+ /* VTU DBNum[3:0] are located in VTU Operation 3:0
+ * VTU DBNum[7:4] ([5:4] for 6250) are located in VTU Operation 11:8 (9:8)
+ */
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
+ if (err)
+ return err;
+
+ entry->fid = val & 0x000f;
+ entry->fid |= (val & 0x0f00) >> 4;
+ entry->fid &= mv88e6xxx_num_databases(chip) - 1;
+ }
+
+ return 0;
+}
+
+int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ int err;
+
+ /* Fetch VLAN MemberTag data from the VTU */
+ err = mv88e6xxx_g1_vtu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_read(chip, entry->member, NULL);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ int err;
+
+ /* Fetch VLAN MemberTag data from the VTU */
+ err = mv88e6xxx_g1_vtu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6390_g1_vtu_data_read(chip, entry->member);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ u16 op = MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE;
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_write(chip, entry->member, entry->state);
+ if (err)
+ return err;
+
+ /* VTU DBNum[3:0] are located in VTU Operation 3:0
+ * VTU DBNum[7:4] are located in VTU Operation 11:8
+ *
+ * For the 6250/6220, the latter are really [5:4] and
+ * 9:8, but in those cases bits 7:6 of entry->fid are
+ * 0 since they have num_databases = 64.
+ */
+ op |= entry->fid & 0x000f;
+ op |= (entry->fid & 0x00f0) << 4;
+ }
+
+ return mv88e6xxx_g1_vtu_op(chip, op);
+}
+
+int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ /* Write MemberTag data */
+ err = mv88e6185_g1_vtu_data_write(chip, entry->member, NULL);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge VTU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+}
+
+int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ /* Write MemberTag data */
+ err = mv88e6390_g1_vtu_data_write(chip, entry->member);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge VTU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+}
+
+int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
+}
+
+/* Spanning Tree Unit Operations */
+
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ /* To get the next higher active SID, the STU GetNext operation can be
+ * started again without setting the SID registers since it already
+ * contains the last SID.
+ *
+ * To save a few hardware accesses and abstract this to the caller,
+ * write the SID only once, when the entry is given as invalid.
+ */
+ if (!entry->valid) {
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, NULL);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_stu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (!entry->valid)
+ return 0;
+
+ return mv88e6185_g1_vtu_data_read(chip, NULL, entry->state);
+}
+
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_stu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (!entry->valid)
+ return 0;
+
+ return mv88e6390_g1_vtu_data_read(chip, entry->state);
+}
+
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_write(chip, NULL, entry->state);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge STU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+}
+
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge STU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+}
+
+/* VTU Violation Management */
+
+static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_chip *chip = dev_id;
+ u16 val, vid;
+ int spid;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_GET_CLR_VIOLATION);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_vtu_vid_read(chip, NULL, &vid);
+ if (err)
+ goto out;
+
+ spid = val & MV88E6XXX_G1_VTU_OP_SPID_MASK;
+
+ if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
+ trace_mv88e6xxx_vtu_member_violation(chip->dev, spid, vid);
+ chip->ports[spid].vtu_member_violation++;
+ }
+
+ if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) {
+ trace_mv88e6xxx_vtu_miss_violation(chip->dev, spid, vid);
+ chip->ports[spid].vtu_miss_violation++;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return IRQ_HANDLED;
+
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ dev_err(chip->dev, "VTU problem: error %d while handling interrupt\n",
+ err);
+
+ return IRQ_HANDLED;
+}
+
+int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ chip->vtu_prob_irq = irq_find_mapping(chip->g1_irq.domain,
+ MV88E6XXX_G1_STS_IRQ_VTU_PROB);
+ if (chip->vtu_prob_irq < 0)
+ return chip->vtu_prob_irq;
+
+ snprintf(chip->vtu_prob_irq_name, sizeof(chip->vtu_prob_irq_name),
+ "mv88e6xxx-%s-g1-vtu-prob", dev_name(chip->dev));
+
+ err = request_threaded_irq(chip->vtu_prob_irq, NULL,
+ mv88e6xxx_g1_vtu_prob_irq_thread_fn,
+ IRQF_ONESHOT, chip->vtu_prob_irq_name,
+ chip);
+ if (err)
+ irq_dispose_mapping(chip->vtu_prob_irq);
+
+ return err;
+}
+
+void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip)
+{
+ free_irq(chip->vtu_prob_irq, chip);
+ irq_dispose_mapping(chip->vtu_prob_irq);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
new file mode 100644
index 000000000..ac302a935
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -0,0 +1,1214 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6xxx Switch Global 2 Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+
+#include "chip.h"
+#include "global1.h" /* for MV88E6XXX_G1_STS_IRQ_DEVICE */
+#include "global2.h"
+
+int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
+{
+ return mv88e6xxx_read(chip, chip->info->global2_addr, reg, val);
+}
+
+int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
+{
+ return mv88e6xxx_write(chip, chip->info->global2_addr, reg, val);
+}
+
+int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg, int
+ bit, int val)
+{
+ return mv88e6xxx_wait_bit(chip, chip->info->global2_addr, reg,
+ bit, val);
+}
+
+/* Offset 0x00: Interrupt Source Register */
+
+static int mv88e6xxx_g2_int_source(struct mv88e6xxx_chip *chip, u16 *src)
+{
+ /* Read (and clear most of) the Interrupt Source bits */
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_INT_SRC, src);
+}
+
+/* Offset 0x01: Interrupt Mask Register */
+
+static int mv88e6xxx_g2_int_mask(struct mv88e6xxx_chip *chip, u16 mask)
+{
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_INT_MASK, mask);
+}
+
+/* Offset 0x02: Management Enable 2x */
+
+static int mv88e6xxx_g2_mgmt_enable_2x(struct mv88e6xxx_chip *chip, u16 en2x)
+{
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_2X, en2x);
+}
+
+/* Offset 0x03: Management Enable 0x */
+
+static int mv88e6xxx_g2_mgmt_enable_0x(struct mv88e6xxx_chip *chip, u16 en0x)
+{
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_0X, en0x);
+}
+
+/* Offset 0x05: Switch Management Register */
+
+static int mv88e6xxx_g2_switch_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip,
+ bool enable)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SWITCH_MGMT, &val);
+ if (err)
+ return err;
+
+ if (enable)
+ val |= MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU;
+ else
+ val &= ~MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU;
+
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, val);
+}
+
+int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ /* Consider the frames with reserved multicast destination
+ * addresses matching 01:80:c2:00:00:0x as MGMT.
+ */
+ err = mv88e6xxx_g2_mgmt_enable_0x(chip, 0xffff);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_switch_mgmt_rsvd2cpu(chip, true);
+}
+
+int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ /* Consider the frames with reserved multicast destination
+ * addresses matching 01:80:c2:00:00:2x as MGMT.
+ */
+ err = mv88e6xxx_g2_mgmt_enable_2x(chip, 0xffff);
+ if (err)
+ return err;
+
+ return mv88e6185_g2_mgmt_rsvd2cpu(chip);
+}
+
+/* Offset 0x06: Device Mapping Table register */
+
+int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
+ int port)
+{
+ u16 val = (target << 8) | (port & 0x1f);
+ /* Modern chips use 5 bits to define a device mapping port,
+ * but bit 4 is reserved on older chips, so it is safe to use.
+ */
+
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_DEVICE_MAPPING,
+ MV88E6XXX_G2_DEVICE_MAPPING_UPDATE | val);
+}
+
+/* Offset 0x07: Trunk Mask Table register */
+
+int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
+ bool hash, u16 mask)
+{
+ u16 val = (num << 12) | (mask & mv88e6xxx_port_mask(chip));
+
+ if (hash)
+ val |= MV88E6XXX_G2_TRUNK_MASK_HASH;
+
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_TRUNK_MASK,
+ MV88E6XXX_G2_TRUNK_MASK_UPDATE | val);
+}
+
+/* Offset 0x08: Trunk Mapping Table register */
+
+int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
+ u16 map)
+{
+ const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
+ u16 val = (id << 11) | (map & port_mask);
+
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_TRUNK_MAPPING,
+ MV88E6XXX_G2_TRUNK_MAPPING_UPDATE | val);
+}
+
+int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
+{
+ const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
+ int i, err;
+
+ /* Clear all eight possible Trunk Mask vectors */
+ for (i = 0; i < 8; ++i) {
+ err = mv88e6xxx_g2_trunk_mask_write(chip, i, false, port_mask);
+ if (err)
+ return err;
+ }
+
+ /* Clear all sixteen possible Trunk ID routing vectors */
+ for (i = 0; i < 16; ++i) {
+ err = mv88e6xxx_g2_trunk_mapping_write(chip, i, 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Offset 0x09: Ingress Rate Command register
+ * Offset 0x0A: Ingress Rate Data register
+ */
+
+static int mv88e6xxx_g2_irl_wait(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6XXX_G2_IRL_CMD_BUSY);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_IRL_CMD, bit, 0);
+}
+
+static int mv88e6xxx_g2_irl_op(struct mv88e6xxx_chip *chip, u16 op, int port,
+ int res, int reg)
+{
+ int err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_IRL_CMD,
+ MV88E6XXX_G2_IRL_CMD_BUSY | op | (port << 8) |
+ (res << 5) | reg);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_irl_wait(chip);
+}
+
+int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_g2_irl_op(chip, MV88E6352_G2_IRL_CMD_OP_INIT_ALL, port,
+ 0, 0);
+}
+
+int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_g2_irl_op(chip, MV88E6390_G2_IRL_CMD_OP_INIT_ALL, port,
+ 0, 0);
+}
+
+/* Offset 0x0B: Cross-chip Port VLAN (Addr) Register
+ * Offset 0x0C: Cross-chip Port VLAN Data Register
+ */
+
+static int mv88e6xxx_g2_pvt_op_wait(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6XXX_G2_PVT_ADDR_BUSY);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_PVT_ADDR, bit, 0);
+}
+
+static int mv88e6xxx_g2_pvt_op(struct mv88e6xxx_chip *chip, int src_dev,
+ int src_port, u16 op)
+{
+ int err;
+
+ /* 9-bit Cross-chip PVT pointer: with MV88E6XXX_G2_MISC_5_BIT_PORT
+ * cleared, source device is 5-bit, source port is 4-bit.
+ */
+ op |= MV88E6XXX_G2_PVT_ADDR_BUSY;
+ op |= (src_dev & 0x1f) << 4;
+ op |= (src_port & 0xf);
+
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_PVT_ADDR, op);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_pvt_op_wait(chip);
+}
+
+int mv88e6xxx_g2_pvt_read(struct mv88e6xxx_chip *chip, int src_dev,
+ int src_port, u16 *data)
+{
+ int err;
+
+ err = mv88e6xxx_g2_pvt_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_pvt_op(chip, src_dev, src_port,
+ MV88E6XXX_G2_PVT_ADDR_OP_READ);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_PVT_DATA, data);
+}
+
+int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
+ int src_port, u16 data)
+{
+ int err;
+
+ err = mv88e6xxx_g2_pvt_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_PVT_DATA, data);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_pvt_op(chip, src_dev, src_port,
+ MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN);
+}
+
+/* Offset 0x0D: Switch MAC/WoL/WoF register */
+
+static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip,
+ unsigned int pointer, u8 data)
+{
+ u16 val = (pointer << 8) | data;
+
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MAC,
+ MV88E6XXX_G2_SWITCH_MAC_UPDATE | val);
+}
+
+int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
+{
+ int i, err;
+
+ for (i = 0; i < 6; i++) {
+ err = mv88e6xxx_g2_switch_mac_write(chip, i, addr[i]);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* Offset 0x0E: ATU Statistics */
+
+int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin)
+{
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_ATU_STATS,
+ kind | bin);
+}
+
+int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats)
+{
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_ATU_STATS, stats);
+}
+
+/* Offset 0x0F: Priority Override Table */
+
+static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
+ u8 data)
+{
+ u16 val = (pointer << 8) | (data & 0x7);
+
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_PRIO_OVERRIDE,
+ MV88E6XXX_G2_PRIO_OVERRIDE_UPDATE | val);
+}
+
+int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
+{
+ int i, err;
+
+ /* Clear all sixteen possible Priority Override entries */
+ for (i = 0; i < 16; i++) {
+ err = mv88e6xxx_g2_pot_write(chip, i, 0);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* Offset 0x14: EEPROM Command
+ * Offset 0x15: EEPROM Data (for 16-bit data access)
+ * Offset 0x15: EEPROM Addr (for 8-bit data access)
+ */
+
+int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
+ int err;
+
+ err = mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_EEPROM_CMD, bit, 0);
+ if (err)
+ return err;
+
+ bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_RUNNING);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_EEPROM_CMD, bit, 0);
+}
+
+static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
+{
+ int err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_EEPROM_CMD,
+ MV88E6XXX_G2_EEPROM_CMD_BUSY | cmd);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_eeprom_wait(chip);
+}
+
+static int mv88e6xxx_g2_eeprom_read8(struct mv88e6xxx_chip *chip,
+ u16 addr, u8 *data)
+{
+ u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_READ;
+ int err;
+
+ err = mv88e6xxx_g2_eeprom_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6390_G2_EEPROM_ADDR, addr);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_eeprom_cmd(chip, cmd);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_EEPROM_CMD, &cmd);
+ if (err)
+ return err;
+
+ *data = cmd & 0xff;
+
+ return 0;
+}
+
+static int mv88e6xxx_g2_eeprom_write8(struct mv88e6xxx_chip *chip,
+ u16 addr, u8 data)
+{
+ u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_WRITE |
+ MV88E6XXX_G2_EEPROM_CMD_WRITE_EN;
+ int err;
+
+ err = mv88e6xxx_g2_eeprom_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6390_G2_EEPROM_ADDR, addr);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_eeprom_cmd(chip, cmd | data);
+}
+
+static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip,
+ u8 addr, u16 *data)
+{
+ u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_READ | addr;
+ int err;
+
+ err = mv88e6xxx_g2_eeprom_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_eeprom_cmd(chip, cmd);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_read(chip, MV88E6352_G2_EEPROM_DATA, data);
+}
+
+static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip,
+ u8 addr, u16 data)
+{
+ u16 cmd = MV88E6XXX_G2_EEPROM_CMD_OP_WRITE | addr;
+ int err;
+
+ err = mv88e6xxx_g2_eeprom_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6352_G2_EEPROM_DATA, data);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_eeprom_cmd(chip, cmd);
+}
+
+int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ unsigned int offset = eeprom->offset;
+ unsigned int len = eeprom->len;
+ int err;
+
+ eeprom->len = 0;
+
+ while (len) {
+ err = mv88e6xxx_g2_eeprom_read8(chip, offset, data);
+ if (err)
+ return err;
+
+ eeprom->len++;
+ offset++;
+ data++;
+ len--;
+ }
+
+ return 0;
+}
+
+int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ unsigned int offset = eeprom->offset;
+ unsigned int len = eeprom->len;
+ int err;
+
+ eeprom->len = 0;
+
+ while (len) {
+ err = mv88e6xxx_g2_eeprom_write8(chip, offset, *data);
+ if (err)
+ return err;
+
+ eeprom->len++;
+ offset++;
+ data++;
+ len--;
+ }
+
+ return 0;
+}
+
+int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ unsigned int offset = eeprom->offset;
+ unsigned int len = eeprom->len;
+ u16 val;
+ int err;
+
+ eeprom->len = 0;
+
+ if (offset & 1) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ *data++ = (val >> 8) & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ *data++ = val & 0xff;
+ *data++ = (val >> 8) & 0xff;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ *data++ = val & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ unsigned int offset = eeprom->offset;
+ unsigned int len = eeprom->len;
+ u16 val;
+ int err;
+
+ /* Ensure the RO WriteEn bit is set */
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_EEPROM_CMD, &val);
+ if (err)
+ return err;
+
+ if (!(val & MV88E6XXX_G2_EEPROM_CMD_WRITE_EN))
+ return -EROFS;
+
+ eeprom->len = 0;
+
+ if (offset & 1) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ val = (*data++ << 8) | (val & 0xff);
+
+ err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
+ if (err)
+ return err;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ val = *data++;
+ val |= *data++ << 8;
+
+ err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
+ if (err)
+ return err;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ val = (val & 0xff00) | *data++;
+
+ err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
+ if (err)
+ return err;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+/* Offset 0x18: SMI PHY Command Register
+ * Offset 0x19: SMI PHY Data Register
+ */
+
+static int mv88e6xxx_g2_smi_phy_wait(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6XXX_G2_SMI_PHY_CMD_BUSY);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_SMI_PHY_CMD, bit, 0);
+}
+
+static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
+{
+ int err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_CMD,
+ MV88E6XXX_G2_SMI_PHY_CMD_BUSY | cmd);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_wait(chip);
+}
+
+static int mv88e6xxx_g2_smi_phy_access(struct mv88e6xxx_chip *chip,
+ bool external, bool c45, u16 op, int dev,
+ int reg)
+{
+ u16 cmd = op;
+
+ if (external)
+ cmd |= MV88E6390_G2_SMI_PHY_CMD_FUNC_EXTERNAL;
+ else
+ cmd |= MV88E6390_G2_SMI_PHY_CMD_FUNC_INTERNAL; /* empty mask */
+
+ if (c45)
+ cmd |= MV88E6XXX_G2_SMI_PHY_CMD_MODE_45; /* empty mask */
+ else
+ cmd |= MV88E6XXX_G2_SMI_PHY_CMD_MODE_22;
+
+ dev <<= __bf_shf(MV88E6XXX_G2_SMI_PHY_CMD_DEV_ADDR_MASK);
+ cmd |= dev & MV88E6XXX_G2_SMI_PHY_CMD_DEV_ADDR_MASK;
+ cmd |= reg & MV88E6XXX_G2_SMI_PHY_CMD_REG_ADDR_MASK;
+
+ return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+}
+
+static int mv88e6xxx_g2_smi_phy_access_c22(struct mv88e6xxx_chip *chip,
+ bool external, u16 op, int dev,
+ int reg)
+{
+ return mv88e6xxx_g2_smi_phy_access(chip, external, false, op, dev, reg);
+}
+
+/* IEEE 802.3 Clause 22 Read Data Register */
+static int mv88e6xxx_g2_smi_phy_read_data_c22(struct mv88e6xxx_chip *chip,
+ bool external, int dev, int reg,
+ u16 *data)
+{
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_22_READ_DATA;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_smi_phy_access_c22(chip, external, op, dev, reg);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
+}
+
+/* IEEE 802.3 Clause 22 Write Data Register */
+static int mv88e6xxx_g2_smi_phy_write_data_c22(struct mv88e6xxx_chip *chip,
+ bool external, int dev, int reg,
+ u16 data)
+{
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_22_WRITE_DATA;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_access_c22(chip, external, op, dev, reg);
+}
+
+static int mv88e6xxx_g2_smi_phy_access_c45(struct mv88e6xxx_chip *chip,
+ bool external, u16 op, int port,
+ int dev)
+{
+ return mv88e6xxx_g2_smi_phy_access(chip, external, true, op, port, dev);
+}
+
+/* IEEE 802.3 Clause 45 Write Address Register */
+static int mv88e6xxx_g2_smi_phy_write_addr_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int dev,
+ int addr)
+{
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_ADDR;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_DATA, addr);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
+}
+
+/* IEEE 802.3 Clause 45 Read Data Register */
+static int mv88e6xxx_g2_smi_phy_read_data_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int dev,
+ u16 *data)
+{
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_45_READ_DATA;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
+}
+
+static int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int reg,
+ u16 *data)
+{
+ int dev = (reg >> 16) & 0x1f;
+ int addr = reg & 0xffff;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
+ addr);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, dev,
+ data);
+}
+
+/* IEEE 802.3 Clause 45 Write Data Register */
+static int mv88e6xxx_g2_smi_phy_write_data_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int dev,
+ u16 data)
+{
+ u16 op = MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_DATA;
+ int err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
+}
+
+static int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int reg,
+ u16 data)
+{
+ int dev = (reg >> 16) & 0x1f;
+ int addr = reg & 0xffff;
+ int err;
+
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
+ addr);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, dev,
+ data);
+}
+
+int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ bool external = mdio_bus->external;
+
+ if (reg & MII_ADDR_C45)
+ return mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, reg,
+ val);
+
+ return mv88e6xxx_g2_smi_phy_read_data_c22(chip, external, addr, reg,
+ val);
+}
+
+int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ bool external = mdio_bus->external;
+
+ if (reg & MII_ADDR_C45)
+ return mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, reg,
+ val);
+
+ return mv88e6xxx_g2_smi_phy_write_data_c22(chip, external, addr, reg,
+ val);
+}
+
+/* Offset 0x1B: Watchdog Control */
+static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
+{
+ u16 reg;
+
+ mv88e6xxx_g2_read(chip, MV88E6352_G2_WDOG_CTL, &reg);
+
+ dev_info(chip->dev, "Watchdog event: 0x%04x", reg);
+
+ return IRQ_HANDLED;
+}
+
+static void mv88e6097_watchdog_free(struct mv88e6xxx_chip *chip)
+{
+ u16 reg;
+
+ mv88e6xxx_g2_read(chip, MV88E6352_G2_WDOG_CTL, &reg);
+
+ reg &= ~(MV88E6352_G2_WDOG_CTL_EGRESS_ENABLE |
+ MV88E6352_G2_WDOG_CTL_QC_ENABLE);
+
+ mv88e6xxx_g2_write(chip, MV88E6352_G2_WDOG_CTL, reg);
+}
+
+static int mv88e6097_watchdog_setup(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g2_write(chip, MV88E6352_G2_WDOG_CTL,
+ MV88E6352_G2_WDOG_CTL_EGRESS_ENABLE |
+ MV88E6352_G2_WDOG_CTL_QC_ENABLE |
+ MV88E6352_G2_WDOG_CTL_SWRESET);
+}
+
+const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {
+ .irq_action = mv88e6097_watchdog_action,
+ .irq_setup = mv88e6097_watchdog_setup,
+ .irq_free = mv88e6097_watchdog_free,
+};
+
+static void mv88e6250_watchdog_free(struct mv88e6xxx_chip *chip)
+{
+ u16 reg;
+
+ mv88e6xxx_g2_read(chip, MV88E6250_G2_WDOG_CTL, &reg);
+
+ reg &= ~(MV88E6250_G2_WDOG_CTL_EGRESS_ENABLE |
+ MV88E6250_G2_WDOG_CTL_QC_ENABLE);
+
+ mv88e6xxx_g2_write(chip, MV88E6250_G2_WDOG_CTL, reg);
+}
+
+static int mv88e6250_watchdog_setup(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g2_write(chip, MV88E6250_G2_WDOG_CTL,
+ MV88E6250_G2_WDOG_CTL_EGRESS_ENABLE |
+ MV88E6250_G2_WDOG_CTL_QC_ENABLE |
+ MV88E6250_G2_WDOG_CTL_SWRESET);
+}
+
+const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops = {
+ .irq_action = mv88e6097_watchdog_action,
+ .irq_setup = mv88e6250_watchdog_setup,
+ .irq_free = mv88e6250_watchdog_free,
+};
+
+static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_UPDATE |
+ MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE |
+ MV88E6390_G2_WDOG_CTL_CUT_THROUGH |
+ MV88E6390_G2_WDOG_CTL_QUEUE_CONTROLLER |
+ MV88E6390_G2_WDOG_CTL_EGRESS |
+ MV88E6390_G2_WDOG_CTL_FORCE_IRQ);
+}
+
+static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
+{
+ u16 reg;
+
+ mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_PTR_EVENT);
+ mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, &reg);
+
+ dev_info(chip->dev, "Watchdog event: 0x%04x",
+ reg & MV88E6390_G2_WDOG_CTL_DATA_MASK);
+
+ mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_PTR_HISTORY);
+ mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, &reg);
+
+ dev_info(chip->dev, "Watchdog history: 0x%04x",
+ reg & MV88E6390_G2_WDOG_CTL_DATA_MASK);
+
+ /* Trigger a software reset to try to recover the switch */
+ if (chip->info->ops->reset)
+ chip->info->ops->reset(chip);
+
+ mv88e6390_watchdog_setup(chip);
+
+ return IRQ_HANDLED;
+}
+
+static void mv88e6390_watchdog_free(struct mv88e6xxx_chip *chip)
+{
+ mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_UPDATE |
+ MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE);
+}
+
+const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
+ .irq_action = mv88e6390_watchdog_action,
+ .irq_setup = mv88e6390_watchdog_setup,
+ .irq_free = mv88e6390_watchdog_free,
+};
+
+static int mv88e6393x_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
+{
+ mv88e6390_watchdog_action(chip, irq);
+
+ /* Fix for clearing the force WD event bit.
+ * Unreleased erratum on mv88e6393x.
+ */
+ mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_UPDATE |
+ MV88E6390_G2_WDOG_CTL_PTR_EVENT);
+
+ return IRQ_HANDLED;
+}
+
+const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops = {
+ .irq_action = mv88e6393x_watchdog_action,
+ .irq_setup = mv88e6390_watchdog_setup,
+ .irq_free = mv88e6390_watchdog_free,
+};
+
+static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_chip *chip = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+
+ mv88e6xxx_reg_lock(chip);
+ if (chip->info->ops->watchdog_ops->irq_action)
+ ret = chip->info->ops->watchdog_ops->irq_action(chip, irq);
+ mv88e6xxx_reg_unlock(chip);
+
+ return ret;
+}
+
+static void mv88e6xxx_g2_watchdog_free(struct mv88e6xxx_chip *chip)
+{
+ mv88e6xxx_reg_lock(chip);
+ if (chip->info->ops->watchdog_ops->irq_free)
+ chip->info->ops->watchdog_ops->irq_free(chip);
+ mv88e6xxx_reg_unlock(chip);
+
+ free_irq(chip->watchdog_irq, chip);
+ irq_dispose_mapping(chip->watchdog_irq);
+}
+
+static int mv88e6xxx_g2_watchdog_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ chip->watchdog_irq = irq_find_mapping(chip->g2_irq.domain,
+ MV88E6XXX_G2_INT_SOURCE_WATCHDOG);
+ if (chip->watchdog_irq < 0)
+ return chip->watchdog_irq;
+
+ snprintf(chip->watchdog_irq_name, sizeof(chip->watchdog_irq_name),
+ "mv88e6xxx-%s-watchdog", dev_name(chip->dev));
+
+ err = request_threaded_irq(chip->watchdog_irq, NULL,
+ mv88e6xxx_g2_watchdog_thread_fn,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ chip->watchdog_irq_name, chip);
+ if (err)
+ return err;
+
+ mv88e6xxx_reg_lock(chip);
+ if (chip->info->ops->watchdog_ops->irq_setup)
+ err = chip->info->ops->watchdog_ops->irq_setup(chip);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+/* Offset 0x1D: Misc Register */
+
+static int mv88e6xxx_g2_misc_5_bit_port(struct mv88e6xxx_chip *chip,
+ bool port_5_bit)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_MISC, &val);
+ if (err)
+ return err;
+
+ if (port_5_bit)
+ val |= MV88E6XXX_G2_MISC_5_BIT_PORT;
+ else
+ val &= ~MV88E6XXX_G2_MISC_5_BIT_PORT;
+
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MISC, val);
+}
+
+int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_g2_misc_5_bit_port(chip, false);
+}
+
+static void mv88e6xxx_g2_irq_mask(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int n = d->hwirq;
+
+ chip->g2_irq.masked |= (1 << n);
+}
+
+static void mv88e6xxx_g2_irq_unmask(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int n = d->hwirq;
+
+ chip->g2_irq.masked &= ~(1 << n);
+}
+
+static irqreturn_t mv88e6xxx_g2_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_chip *chip = dev_id;
+ unsigned int nhandled = 0;
+ unsigned int sub_irq;
+ unsigned int n;
+ int err;
+ u16 reg;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_g2_int_source(chip, &reg);
+ mv88e6xxx_reg_unlock(chip);
+ if (err)
+ goto out;
+
+ for (n = 0; n < 16; ++n) {
+ if (reg & (1 << n)) {
+ sub_irq = irq_find_mapping(chip->g2_irq.domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void mv88e6xxx_g2_irq_bus_lock(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+
+ mv88e6xxx_reg_lock(chip);
+}
+
+static void mv88e6xxx_g2_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
+ int err;
+
+ err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked);
+ if (err)
+ dev_err(chip->dev, "failed to mask interrupts\n");
+
+ mv88e6xxx_reg_unlock(chip);
+}
+
+static const struct irq_chip mv88e6xxx_g2_irq_chip = {
+ .name = "mv88e6xxx-g2",
+ .irq_mask = mv88e6xxx_g2_irq_mask,
+ .irq_unmask = mv88e6xxx_g2_irq_unmask,
+ .irq_bus_lock = mv88e6xxx_g2_irq_bus_lock,
+ .irq_bus_sync_unlock = mv88e6xxx_g2_irq_bus_sync_unlock,
+};
+
+static int mv88e6xxx_g2_irq_domain_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct mv88e6xxx_chip *chip = d->host_data;
+
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &chip->g2_irq.chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mv88e6xxx_g2_irq_domain_ops = {
+ .map = mv88e6xxx_g2_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip)
+{
+ int irq, virq;
+
+ mv88e6xxx_g2_watchdog_free(chip);
+
+ free_irq(chip->device_irq, chip);
+ irq_dispose_mapping(chip->device_irq);
+
+ for (irq = 0; irq < 16; irq++) {
+ virq = irq_find_mapping(chip->g2_irq.domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(chip->g2_irq.domain);
+}
+
+int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
+{
+ int err, irq, virq;
+
+ chip->g2_irq.masked = ~0;
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked);
+ mv88e6xxx_reg_unlock(chip);
+ if (err)
+ return err;
+
+ chip->g2_irq.domain = irq_domain_add_simple(
+ chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
+ if (!chip->g2_irq.domain)
+ return -ENOMEM;
+
+ for (irq = 0; irq < 16; irq++)
+ irq_create_mapping(chip->g2_irq.domain, irq);
+
+ chip->g2_irq.chip = mv88e6xxx_g2_irq_chip;
+
+ chip->device_irq = irq_find_mapping(chip->g1_irq.domain,
+ MV88E6XXX_G1_STS_IRQ_DEVICE);
+ if (chip->device_irq < 0) {
+ err = chip->device_irq;
+ goto out;
+ }
+
+ snprintf(chip->device_irq_name, sizeof(chip->device_irq_name),
+ "mv88e6xxx-%s-g2", dev_name(chip->dev));
+
+ err = request_threaded_irq(chip->device_irq, NULL,
+ mv88e6xxx_g2_irq_thread_fn,
+ IRQF_ONESHOT, chip->device_irq_name, chip);
+ if (err)
+ goto out;
+
+ return mv88e6xxx_g2_watchdog_setup(chip);
+
+out:
+ for (irq = 0; irq < 16; irq++) {
+ virq = irq_find_mapping(chip->g2_irq.domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(chip->g2_irq.domain);
+
+ return err;
+}
+
+int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus)
+{
+ int phy, irq, err, err_phy;
+
+ for (phy = 0; phy < chip->info->num_internal_phys; phy++) {
+ irq = irq_find_mapping(chip->g2_irq.domain, phy);
+ if (irq < 0) {
+ err = irq;
+ goto out;
+ }
+ bus->irq[chip->info->phy_base_addr + phy] = irq;
+ }
+ return 0;
+out:
+ err_phy = phy;
+
+ for (phy = 0; phy < err_phy; phy++)
+ irq_dispose_mapping(bus->irq[phy]);
+
+ return err;
+}
+
+void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus)
+{
+ int phy;
+
+ for (phy = 0; phy < chip->info->num_internal_phys; phy++)
+ irq_dispose_mapping(bus->irq[phy]);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
new file mode 100644
index 000000000..751a6c988
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Marvell 88E6xxx Switch Global 2 Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ */
+
+#ifndef _MV88E6XXX_GLOBAL2_H
+#define _MV88E6XXX_GLOBAL2_H
+
+#include "chip.h"
+
+/* Offset 0x00: Interrupt Source Register */
+#define MV88E6XXX_G2_INT_SRC 0x00
+#define MV88E6XXX_G2_INT_SRC_WDOG 0x8000
+#define MV88E6XXX_G2_INT_SRC_JAM_LIMIT 0x4000
+#define MV88E6XXX_G2_INT_SRC_DUPLEX_MISMATCH 0x2000
+#define MV88E6XXX_G2_INT_SRC_WAKE_EVENT 0x1000
+#define MV88E6352_G2_INT_SRC_SERDES 0x0800
+#define MV88E6352_G2_INT_SRC_PHY 0x001f
+#define MV88E6390_G2_INT_SRC_PHY 0x07fe
+
+#define MV88E6XXX_G2_INT_SOURCE_WATCHDOG 15
+
+/* Offset 0x01: Interrupt Mask Register */
+#define MV88E6XXX_G2_INT_MASK 0x01
+#define MV88E6XXX_G2_INT_MASK_WDOG 0x8000
+#define MV88E6XXX_G2_INT_MASK_JAM_LIMIT 0x4000
+#define MV88E6XXX_G2_INT_MASK_DUPLEX_MISMATCH 0x2000
+#define MV88E6XXX_G2_INT_MASK_WAKE_EVENT 0x1000
+#define MV88E6352_G2_INT_MASK_SERDES 0x0800
+#define MV88E6352_G2_INT_MASK_PHY 0x001f
+#define MV88E6390_G2_INT_MASK_PHY 0x07fe
+
+/* Offset 0x02: MGMT Enable Register 2x */
+#define MV88E6XXX_G2_MGMT_EN_2X 0x02
+
+/* Offset 0x02: MAC LINK change IRQ Register for MV88E6393X */
+#define MV88E6393X_G2_MACLINK_INT_SRC 0x02
+
+/* Offset 0x03: MGMT Enable Register 0x */
+#define MV88E6XXX_G2_MGMT_EN_0X 0x03
+
+/* Offset 0x03: MAC LINK change IRQ Mask Register for MV88E6393X */
+#define MV88E6393X_G2_MACLINK_INT_MASK 0x03
+
+/* Offset 0x04: Flow Control Delay Register */
+#define MV88E6XXX_G2_FLOW_CTL 0x04
+
+/* Offset 0x05: Switch Management Register */
+#define MV88E6XXX_G2_SWITCH_MGMT 0x05
+#define MV88E6XXX_G2_SWITCH_MGMT_USE_DOUBLE_TAG_DATA 0x8000
+#define MV88E6XXX_G2_SWITCH_MGMT_PREVENT_LOOPS 0x4000
+#define MV88E6XXX_G2_SWITCH_MGMT_FLOW_CTL_MSG 0x2000
+#define MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI 0x0080
+#define MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU 0x0008
+
+#define MV88E6393X_G2_EGRESS_MONITOR_DEST 0x05
+
+/* Offset 0x06: Device Mapping Table Register */
+#define MV88E6XXX_G2_DEVICE_MAPPING 0x06
+#define MV88E6XXX_G2_DEVICE_MAPPING_UPDATE 0x8000
+#define MV88E6XXX_G2_DEVICE_MAPPING_DEV_MASK 0x1f00
+#define MV88E6352_G2_DEVICE_MAPPING_PORT_MASK 0x000f
+#define MV88E6390_G2_DEVICE_MAPPING_PORT_MASK 0x001f
+
+/* Offset 0x07: Trunk Mask Table Register */
+#define MV88E6XXX_G2_TRUNK_MASK 0x07
+#define MV88E6XXX_G2_TRUNK_MASK_UPDATE 0x8000
+#define MV88E6XXX_G2_TRUNK_MASK_NUM_MASK 0x7000
+#define MV88E6XXX_G2_TRUNK_MASK_HASH 0x0800
+
+/* Offset 0x08: Trunk Mapping Table Register */
+#define MV88E6XXX_G2_TRUNK_MAPPING 0x08
+#define MV88E6XXX_G2_TRUNK_MAPPING_UPDATE 0x8000
+#define MV88E6XXX_G2_TRUNK_MAPPING_ID_MASK 0x7800
+
+/* Offset 0x09: Ingress Rate Command Register */
+#define MV88E6XXX_G2_IRL_CMD 0x09
+#define MV88E6XXX_G2_IRL_CMD_BUSY 0x8000
+#define MV88E6352_G2_IRL_CMD_OP_MASK 0x7000
+#define MV88E6352_G2_IRL_CMD_OP_NOOP 0x0000
+#define MV88E6352_G2_IRL_CMD_OP_INIT_ALL 0x1000
+#define MV88E6352_G2_IRL_CMD_OP_INIT_RES 0x2000
+#define MV88E6352_G2_IRL_CMD_OP_WRITE_REG 0x3000
+#define MV88E6352_G2_IRL_CMD_OP_READ_REG 0x4000
+#define MV88E6390_G2_IRL_CMD_OP_MASK 0x6000
+#define MV88E6390_G2_IRL_CMD_OP_READ_REG 0x0000
+#define MV88E6390_G2_IRL_CMD_OP_INIT_ALL 0x2000
+#define MV88E6390_G2_IRL_CMD_OP_INIT_RES 0x4000
+#define MV88E6390_G2_IRL_CMD_OP_WRITE_REG 0x6000
+#define MV88E6352_G2_IRL_CMD_PORT_MASK 0x0f00
+#define MV88E6390_G2_IRL_CMD_PORT_MASK 0x1f00
+#define MV88E6XXX_G2_IRL_CMD_RES_MASK 0x00e0
+#define MV88E6XXX_G2_IRL_CMD_REG_MASK 0x000f
+
+/* Offset 0x0A: Ingress Rate Data Register */
+#define MV88E6XXX_G2_IRL_DATA 0x0a
+#define MV88E6XXX_G2_IRL_DATA_MASK 0xffff
+
+/* Offset 0x0B: Cross-chip Port VLAN Register */
+#define MV88E6XXX_G2_PVT_ADDR 0x0b
+#define MV88E6XXX_G2_PVT_ADDR_BUSY 0x8000
+#define MV88E6XXX_G2_PVT_ADDR_OP_MASK 0x7000
+#define MV88E6XXX_G2_PVT_ADDR_OP_INIT_ONES 0x1000
+#define MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN 0x3000
+#define MV88E6XXX_G2_PVT_ADDR_OP_READ 0x4000
+#define MV88E6XXX_G2_PVT_ADDR_PTR_MASK 0x01ff
+#define MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK 0x1f
+
+/* Offset 0x0C: Cross-chip Port VLAN Data Register */
+#define MV88E6XXX_G2_PVT_DATA 0x0c
+#define MV88E6XXX_G2_PVT_DATA_MASK 0x7f
+
+/* Offset 0x0D: Switch MAC/WoL/WoF Register */
+#define MV88E6XXX_G2_SWITCH_MAC 0x0d
+#define MV88E6XXX_G2_SWITCH_MAC_UPDATE 0x8000
+#define MV88E6XXX_G2_SWITCH_MAC_PTR_MASK 0x1f00
+#define MV88E6XXX_G2_SWITCH_MAC_DATA_MASK 0x00ff
+
+/* Offset 0x0E: ATU Stats Register */
+#define MV88E6XXX_G2_ATU_STATS 0x0e
+#define MV88E6XXX_G2_ATU_STATS_BIN_0 (0x0 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_1 (0x1 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_2 (0x2 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_3 (0x3 << 14)
+#define MV88E6XXX_G2_ATU_STATS_MODE_ALL (0x0 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_ALL_DYNAMIC (0x1 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL (0x2 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL_DYNAMIC (0x3 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MASK 0x0fff
+
+/* Offset 0x0F: Priority Override Table */
+#define MV88E6XXX_G2_PRIO_OVERRIDE 0x0f
+#define MV88E6XXX_G2_PRIO_OVERRIDE_UPDATE 0x8000
+#define MV88E6XXX_G2_PRIO_OVERRIDE_FPRISET 0x1000
+#define MV88E6XXX_G2_PRIO_OVERRIDE_PTR_MASK 0x0f00
+#define MV88E6352_G2_PRIO_OVERRIDE_QPRIAVBEN 0x0080
+#define MV88E6352_G2_PRIO_OVERRIDE_DATAAVB_MASK 0x0030
+#define MV88E6XXX_G2_PRIO_OVERRIDE_QFPRIEN 0x0008
+#define MV88E6XXX_G2_PRIO_OVERRIDE_DATA_MASK 0x0007
+
+/* Offset 0x14: EEPROM Command */
+#define MV88E6XXX_G2_EEPROM_CMD 0x14
+#define MV88E6XXX_G2_EEPROM_CMD_BUSY 0x8000
+#define MV88E6XXX_G2_EEPROM_CMD_OP_MASK 0x7000
+#define MV88E6XXX_G2_EEPROM_CMD_OP_WRITE 0x3000
+#define MV88E6XXX_G2_EEPROM_CMD_OP_READ 0x4000
+#define MV88E6XXX_G2_EEPROM_CMD_OP_LOAD 0x6000
+#define MV88E6XXX_G2_EEPROM_CMD_RUNNING 0x0800
+#define MV88E6XXX_G2_EEPROM_CMD_WRITE_EN 0x0400
+#define MV88E6352_G2_EEPROM_CMD_ADDR_MASK 0x00ff
+#define MV88E6390_G2_EEPROM_CMD_DATA_MASK 0x00ff
+
+/* Offset 0x15: EEPROM Data */
+#define MV88E6352_G2_EEPROM_DATA 0x15
+#define MV88E6352_G2_EEPROM_DATA_MASK 0xffff
+
+/* Offset 0x15: EEPROM Addr */
+#define MV88E6390_G2_EEPROM_ADDR 0x15
+#define MV88E6390_G2_EEPROM_ADDR_MASK 0xffff
+
+/* Offset 0x16: AVB Command Register */
+#define MV88E6352_G2_AVB_CMD 0x16
+#define MV88E6352_G2_AVB_CMD_BUSY 0x8000
+#define MV88E6352_G2_AVB_CMD_OP_READ 0x4000
+#define MV88E6352_G2_AVB_CMD_OP_READ_INCR 0x6000
+#define MV88E6352_G2_AVB_CMD_OP_WRITE 0x3000
+#define MV88E6390_G2_AVB_CMD_OP_READ 0x0000
+#define MV88E6390_G2_AVB_CMD_OP_READ_INCR 0x4000
+#define MV88E6390_G2_AVB_CMD_OP_WRITE 0x6000
+#define MV88E6352_G2_AVB_CMD_PORT_MASK 0x0f00
+#define MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL 0xe
+#define MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL 0xf
+#define MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL 0xf
+#define MV88E6390_G2_AVB_CMD_PORT_MASK 0x1f00
+#define MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL 0x1e
+#define MV88E6390_G2_AVB_CMD_PORT_PTPGLOBAL 0x1f
+#define MV88E6352_G2_AVB_CMD_BLOCK_PTP 0
+#define MV88E6352_G2_AVB_CMD_BLOCK_AVB 1
+#define MV88E6352_G2_AVB_CMD_BLOCK_QAV 2
+#define MV88E6352_G2_AVB_CMD_BLOCK_QVB 3
+#define MV88E6352_G2_AVB_CMD_BLOCK_MASK 0x00e0
+#define MV88E6352_G2_AVB_CMD_ADDR_MASK 0x001f
+
+/* Offset 0x17: AVB Data Register */
+#define MV88E6352_G2_AVB_DATA 0x17
+
+/* Offset 0x18: SMI PHY Command Register */
+#define MV88E6XXX_G2_SMI_PHY_CMD 0x18
+#define MV88E6XXX_G2_SMI_PHY_CMD_BUSY 0x8000
+#define MV88E6390_G2_SMI_PHY_CMD_FUNC_MASK 0x6000
+#define MV88E6390_G2_SMI_PHY_CMD_FUNC_INTERNAL 0x0000
+#define MV88E6390_G2_SMI_PHY_CMD_FUNC_EXTERNAL 0x2000
+#define MV88E6390_G2_SMI_PHY_CMD_FUNC_SETUP 0x4000
+#define MV88E6XXX_G2_SMI_PHY_CMD_MODE_MASK 0x1000
+#define MV88E6XXX_G2_SMI_PHY_CMD_MODE_45 0x0000
+#define MV88E6XXX_G2_SMI_PHY_CMD_MODE_22 0x1000
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_MASK 0x0c00
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_22_WRITE_DATA 0x0400
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_22_READ_DATA 0x0800
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_ADDR 0x0000
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_45_WRITE_DATA 0x0400
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_45_READ_DATA_INC 0x0800
+#define MV88E6XXX_G2_SMI_PHY_CMD_OP_45_READ_DATA 0x0c00
+#define MV88E6XXX_G2_SMI_PHY_CMD_DEV_ADDR_MASK 0x03e0
+#define MV88E6XXX_G2_SMI_PHY_CMD_REG_ADDR_MASK 0x001f
+#define MV88E6XXX_G2_SMI_PHY_CMD_SETUP_PTR_MASK 0x03ff
+
+/* Offset 0x19: SMI PHY Data Register */
+#define MV88E6XXX_G2_SMI_PHY_DATA 0x19
+
+/* Offset 0x1A: Scratch and Misc. Register */
+#define MV88E6XXX_G2_SCRATCH_MISC_MISC 0x1a
+#define MV88E6XXX_G2_SCRATCH_MISC_UPDATE 0x8000
+#define MV88E6XXX_G2_SCRATCH_MISC_PTR_MASK 0x7f00
+#define MV88E6XXX_G2_SCRATCH_MISC_DATA_MASK 0x00ff
+
+/* Offset 0x1B: Watch Dog Control Register */
+#define MV88E6250_G2_WDOG_CTL 0x1b
+#define MV88E6250_G2_WDOG_CTL_QC_HISTORY 0x0100
+#define MV88E6250_G2_WDOG_CTL_QC_EVENT 0x0080
+#define MV88E6250_G2_WDOG_CTL_QC_ENABLE 0x0040
+#define MV88E6250_G2_WDOG_CTL_EGRESS_HISTORY 0x0020
+#define MV88E6250_G2_WDOG_CTL_EGRESS_EVENT 0x0010
+#define MV88E6250_G2_WDOG_CTL_EGRESS_ENABLE 0x0008
+#define MV88E6250_G2_WDOG_CTL_FORCE_IRQ 0x0004
+#define MV88E6250_G2_WDOG_CTL_HISTORY 0x0002
+#define MV88E6250_G2_WDOG_CTL_SWRESET 0x0001
+
+/* Offset 0x1B: Watch Dog Control Register */
+#define MV88E6352_G2_WDOG_CTL 0x1b
+#define MV88E6352_G2_WDOG_CTL_EGRESS_EVENT 0x0080
+#define MV88E6352_G2_WDOG_CTL_RMU_TIMEOUT 0x0040
+#define MV88E6352_G2_WDOG_CTL_QC_ENABLE 0x0020
+#define MV88E6352_G2_WDOG_CTL_EGRESS_HISTORY 0x0010
+#define MV88E6352_G2_WDOG_CTL_EGRESS_ENABLE 0x0008
+#define MV88E6352_G2_WDOG_CTL_FORCE_IRQ 0x0004
+#define MV88E6352_G2_WDOG_CTL_HISTORY 0x0002
+#define MV88E6352_G2_WDOG_CTL_SWRESET 0x0001
+
+/* Offset 0x1B: Watch Dog Control Register */
+#define MV88E6390_G2_WDOG_CTL 0x1b
+#define MV88E6390_G2_WDOG_CTL_UPDATE 0x8000
+#define MV88E6390_G2_WDOG_CTL_PTR_MASK 0x7f00
+#define MV88E6390_G2_WDOG_CTL_PTR_INT_SOURCE 0x0000
+#define MV88E6390_G2_WDOG_CTL_PTR_INT_STS 0x1000
+#define MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE 0x1100
+#define MV88E6390_G2_WDOG_CTL_PTR_EVENT 0x1200
+#define MV88E6390_G2_WDOG_CTL_PTR_HISTORY 0x1300
+#define MV88E6390_G2_WDOG_CTL_DATA_MASK 0x00ff
+#define MV88E6390_G2_WDOG_CTL_CUT_THROUGH 0x0008
+#define MV88E6390_G2_WDOG_CTL_QUEUE_CONTROLLER 0x0004
+#define MV88E6390_G2_WDOG_CTL_EGRESS 0x0002
+#define MV88E6390_G2_WDOG_CTL_FORCE_IRQ 0x0001
+
+/* Offset 0x1C: QoS Weights Register */
+#define MV88E6XXX_G2_QOS_WEIGHTS 0x1c
+#define MV88E6XXX_G2_QOS_WEIGHTS_UPDATE 0x8000
+#define MV88E6352_G2_QOS_WEIGHTS_PTR_MASK 0x3f00
+#define MV88E6390_G2_QOS_WEIGHTS_PTR_MASK 0x7f00
+#define MV88E6XXX_G2_QOS_WEIGHTS_DATA_MASK 0x00ff
+
+/* Offset 0x1D: Misc Register */
+#define MV88E6XXX_G2_MISC 0x1d
+#define MV88E6XXX_G2_MISC_5_BIT_PORT 0x4000
+#define MV88E6352_G2_NOEGR_POLICY 0x2000
+#define MV88E6390_G2_LAG_ID_4 0x2000
+
+/* Scratch/Misc registers accessed through MV88E6XXX_G2_SCRATCH_MISC */
+/* Offset 0x02: Misc Configuration */
+#define MV88E6352_G2_SCRATCH_MISC_CFG 0x02
+#define MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI 0x80
+/* Offset 0x60-0x61: GPIO Configuration */
+#define MV88E6352_G2_SCRATCH_GPIO_CFG0 0x60
+#define MV88E6352_G2_SCRATCH_GPIO_CFG1 0x61
+/* Offset 0x62-0x63: GPIO Direction */
+#define MV88E6352_G2_SCRATCH_GPIO_DIR0 0x62
+#define MV88E6352_G2_SCRATCH_GPIO_DIR1 0x63
+#define MV88E6352_G2_SCRATCH_GPIO_DIR_OUT 0
+#define MV88E6352_G2_SCRATCH_GPIO_DIR_IN 1
+/* Offset 0x64-0x65: GPIO Data */
+#define MV88E6352_G2_SCRATCH_GPIO_DATA0 0x64
+#define MV88E6352_G2_SCRATCH_GPIO_DATA1 0x65
+/* Offset 0x68-0x6F: GPIO Pin Control */
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL0 0x68
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL1 0x69
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL2 0x6A
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL3 0x6B
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL4 0x6C
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL5 0x6D
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL6 0x6E
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL7 0x6F
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA0 0x70
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA1 0x71
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU BIT(2)
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA2 0x72
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0xf
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA3 0x73
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL BIT(1)
+
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO 0
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL_TRIG 1
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ 2
+
+int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
+int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
+int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg,
+ int bit, int val);
+
+int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
+
+int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val);
+int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+
+int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
+int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
+int mv88e6xxx_g2_pvt_read(struct mv88e6xxx_chip *chip, int src_dev,
+ int src_port, u16 *data);
+int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
+ int src_port, u16 data);
+int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip);
+
+int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip);
+
+int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus);
+void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus);
+
+int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
+int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
+
+int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip);
+
+int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
+ bool hash, u16 mask);
+int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
+ u16 map);
+int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
+
+int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
+ int port);
+int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip);
+
+extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
+extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
+extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
+extern const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops;
+
+extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops;
+extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops;
+extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops;
+
+extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
+
+int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external);
+int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
+int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
+
+#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global2_avb.c b/drivers/net/dsa/mv88e6xxx/global2_avb.c
new file mode 100644
index 000000000..657783e04
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global2_avb.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6xxx Switch Global 2 Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * Copyright (c) 2017 National Instruments
+ * Brandon Streiff <brandon.streiff@ni.com>
+ */
+
+#include <linux/bitfield.h>
+
+#include "global2.h"
+
+/* Offset 0x16: AVB Command Register
+ * Offset 0x17: AVB Data Register
+ *
+ * There are two different versions of this register interface:
+ * "6352": 3-bit "op" field, 4-bit "port" field.
+ * "6390": 2-bit "op" field, 5-bit "port" field.
+ *
+ * The "op" codes are different between the two, as well as the special
+ * port fields for global PTP and TAI configuration.
+ */
+
+/* mv88e6xxx_g2_avb_read -- Read one or multiple 16-bit words.
+ * The hardware supports snapshotting up to four contiguous registers.
+ */
+static int mv88e6xxx_g2_avb_wait(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6352_G2_AVB_CMD_BUSY);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6352_G2_AVB_CMD, bit, 0);
+}
+
+static int mv88e6xxx_g2_avb_read(struct mv88e6xxx_chip *chip, u16 readop,
+ u16 *data, int len)
+{
+ int err;
+ int i;
+
+ err = mv88e6xxx_g2_avb_wait(chip);
+ if (err)
+ return err;
+
+ /* Hardware can only snapshot four words. */
+ if (len > 4)
+ return -E2BIG;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_CMD,
+ MV88E6352_G2_AVB_CMD_BUSY | readop);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_avb_wait(chip);
+ if (err)
+ return err;
+
+ for (i = 0; i < len; ++i) {
+ err = mv88e6xxx_g2_read(chip, MV88E6352_G2_AVB_DATA,
+ &data[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* mv88e6xxx_g2_avb_write -- Write one 16-bit word. */
+static int mv88e6xxx_g2_avb_write(struct mv88e6xxx_chip *chip, u16 writeop,
+ u16 data)
+{
+ int err;
+
+ err = mv88e6xxx_g2_avb_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_DATA, data);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_CMD,
+ MV88E6352_G2_AVB_CMD_BUSY | writeop);
+
+ return mv88e6xxx_g2_avb_wait(chip);
+}
+
+static int mv88e6352_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip,
+ int port, int addr, u16 *data,
+ int len)
+{
+ u16 readop = (len == 1 ? MV88E6352_G2_AVB_CMD_OP_READ :
+ MV88E6352_G2_AVB_CMD_OP_READ_INCR) |
+ (port << 8) | (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) |
+ addr;
+
+ return mv88e6xxx_g2_avb_read(chip, readop, data, len);
+}
+
+static int mv88e6352_g2_avb_port_ptp_write(struct mv88e6xxx_chip *chip,
+ int port, int addr, u16 data)
+{
+ u16 writeop = MV88E6352_G2_AVB_CMD_OP_WRITE | (port << 8) |
+ (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) | addr;
+
+ return mv88e6xxx_g2_avb_write(chip, writeop, data);
+}
+
+static int mv88e6352_g2_avb_ptp_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data, int len)
+{
+ return mv88e6352_g2_avb_port_ptp_read(chip,
+ MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data, len);
+}
+
+static int mv88e6352_g2_avb_ptp_write(struct mv88e6xxx_chip *chip, int addr,
+ u16 data)
+{
+ return mv88e6352_g2_avb_port_ptp_write(chip,
+ MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data);
+}
+
+static int mv88e6352_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data, int len)
+{
+ return mv88e6352_g2_avb_port_ptp_read(chip,
+ MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL,
+ addr, data, len);
+}
+
+static int mv88e6352_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr,
+ u16 data)
+{
+ return mv88e6352_g2_avb_port_ptp_write(chip,
+ MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL,
+ addr, data);
+}
+
+const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {
+ .port_ptp_read = mv88e6352_g2_avb_port_ptp_read,
+ .port_ptp_write = mv88e6352_g2_avb_port_ptp_write,
+ .ptp_read = mv88e6352_g2_avb_ptp_read,
+ .ptp_write = mv88e6352_g2_avb_ptp_write,
+ .tai_read = mv88e6352_g2_avb_tai_read,
+ .tai_write = mv88e6352_g2_avb_tai_write,
+};
+
+static int mv88e6165_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data, int len)
+{
+ return mv88e6352_g2_avb_port_ptp_read(chip,
+ MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data, len);
+}
+
+static int mv88e6165_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr,
+ u16 data)
+{
+ return mv88e6352_g2_avb_port_ptp_write(chip,
+ MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data);
+}
+
+const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {
+ .port_ptp_read = mv88e6352_g2_avb_port_ptp_read,
+ .port_ptp_write = mv88e6352_g2_avb_port_ptp_write,
+ .ptp_read = mv88e6352_g2_avb_ptp_read,
+ .ptp_write = mv88e6352_g2_avb_ptp_write,
+ .tai_read = mv88e6165_g2_avb_tai_read,
+ .tai_write = mv88e6165_g2_avb_tai_write,
+};
+
+static int mv88e6390_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip,
+ int port, int addr, u16 *data,
+ int len)
+{
+ u16 readop = (len == 1 ? MV88E6390_G2_AVB_CMD_OP_READ :
+ MV88E6390_G2_AVB_CMD_OP_READ_INCR) |
+ (port << 8) | (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) |
+ addr;
+
+ return mv88e6xxx_g2_avb_read(chip, readop, data, len);
+}
+
+static int mv88e6390_g2_avb_port_ptp_write(struct mv88e6xxx_chip *chip,
+ int port, int addr, u16 data)
+{
+ u16 writeop = MV88E6390_G2_AVB_CMD_OP_WRITE | (port << 8) |
+ (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) | addr;
+
+ return mv88e6xxx_g2_avb_write(chip, writeop, data);
+}
+
+static int mv88e6390_g2_avb_ptp_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data, int len)
+{
+ return mv88e6390_g2_avb_port_ptp_read(chip,
+ MV88E6390_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data, len);
+}
+
+static int mv88e6390_g2_avb_ptp_write(struct mv88e6xxx_chip *chip, int addr,
+ u16 data)
+{
+ return mv88e6390_g2_avb_port_ptp_write(chip,
+ MV88E6390_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data);
+}
+
+static int mv88e6390_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data, int len)
+{
+ return mv88e6390_g2_avb_port_ptp_read(chip,
+ MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL,
+ addr, data, len);
+}
+
+static int mv88e6390_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr,
+ u16 data)
+{
+ return mv88e6390_g2_avb_port_ptp_write(chip,
+ MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL,
+ addr, data);
+}
+
+const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {
+ .port_ptp_read = mv88e6390_g2_avb_port_ptp_read,
+ .port_ptp_write = mv88e6390_g2_avb_port_ptp_write,
+ .ptp_read = mv88e6390_g2_avb_ptp_read,
+ .ptp_write = mv88e6390_g2_avb_ptp_write,
+ .tai_read = mv88e6390_g2_avb_tai_read,
+ .tai_write = mv88e6390_g2_avb_tai_write,
+};
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
new file mode 100644
index 000000000..a9d6e4032
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6xxx Switch Global 2 Scratch & Misc Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 National Instruments
+ * Brandon Streiff <brandon.streiff@ni.com>
+ */
+
+#include "chip.h"
+#include "global2.h"
+
+/* Offset 0x1A: Scratch and Misc. Register */
+static int mv88e6xxx_g2_scratch_read(struct mv88e6xxx_chip *chip, int reg,
+ u8 *data)
+{
+ u16 value;
+ int err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC,
+ reg << 8);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC, &value);
+ if (err)
+ return err;
+
+ *data = (value & MV88E6XXX_G2_SCRATCH_MISC_DATA_MASK);
+
+ return 0;
+}
+
+static int mv88e6xxx_g2_scratch_write(struct mv88e6xxx_chip *chip, int reg,
+ u8 data)
+{
+ u16 value = (reg << 8) | data;
+
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC,
+ MV88E6XXX_G2_SCRATCH_MISC_UPDATE | value);
+}
+
+/**
+ * mv88e6xxx_g2_scratch_get_bit - get a bit
+ * @chip: chip private data
+ * @base_reg: base of scratch bits
+ * @offset: index of bit within the register
+ * @set: is bit set?
+ */
+static int mv88e6xxx_g2_scratch_get_bit(struct mv88e6xxx_chip *chip,
+ int base_reg, unsigned int offset,
+ int *set)
+{
+ int reg = base_reg + (offset / 8);
+ u8 mask = (1 << (offset & 0x7));
+ u8 val;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_read(chip, reg, &val);
+ if (err)
+ return err;
+
+ *set = !!(mask & val);
+
+ return 0;
+}
+
+/**
+ * mv88e6xxx_g2_scratch_set_bit - set (or clear) a bit
+ * @chip: chip private data
+ * @base_reg: base of scratch bits
+ * @offset: index of bit within the register
+ * @set: should this bit be set?
+ *
+ * Helper function for dealing with the direction and data registers.
+ */
+static int mv88e6xxx_g2_scratch_set_bit(struct mv88e6xxx_chip *chip,
+ int base_reg, unsigned int offset,
+ int set)
+{
+ int reg = base_reg + (offset / 8);
+ u8 mask = (1 << (offset & 0x7));
+ u8 val;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_read(chip, reg, &val);
+ if (err)
+ return err;
+
+ if (set)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ return mv88e6xxx_g2_scratch_write(chip, reg, val);
+}
+
+/**
+ * mv88e6352_g2_scratch_gpio_get_data - get data on gpio pin
+ * @chip: chip private data
+ * @pin: gpio index
+ *
+ * Return: 0 for low, 1 for high, negative error
+ */
+static int mv88e6352_g2_scratch_gpio_get_data(struct mv88e6xxx_chip *chip,
+ unsigned int pin)
+{
+ int val = 0;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_get_bit(chip,
+ MV88E6352_G2_SCRATCH_GPIO_DATA0,
+ pin, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+/**
+ * mv88e6352_g2_scratch_gpio_set_data - set data on gpio pin
+ * @chip: chip private data
+ * @pin: gpio index
+ * @value: value to set
+ */
+static int mv88e6352_g2_scratch_gpio_set_data(struct mv88e6xxx_chip *chip,
+ unsigned int pin, int value)
+{
+ u8 mask = (1 << (pin & 0x7));
+ int offset = (pin / 8);
+ int reg;
+
+ reg = MV88E6352_G2_SCRATCH_GPIO_DATA0 + offset;
+
+ if (value)
+ chip->gpio_data[offset] |= mask;
+ else
+ chip->gpio_data[offset] &= ~mask;
+
+ return mv88e6xxx_g2_scratch_write(chip, reg, chip->gpio_data[offset]);
+}
+
+/**
+ * mv88e6352_g2_scratch_gpio_get_dir - get direction of gpio pin
+ * @chip: chip private data
+ * @pin: gpio index
+ *
+ * Return: 0 for output, 1 for input (same as GPIOF_DIR_XXX).
+ */
+static int mv88e6352_g2_scratch_gpio_get_dir(struct mv88e6xxx_chip *chip,
+ unsigned int pin)
+{
+ int val = 0;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_get_bit(chip,
+ MV88E6352_G2_SCRATCH_GPIO_DIR0,
+ pin, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+/**
+ * mv88e6352_g2_scratch_gpio_set_dir - set direction of gpio pin
+ * @chip: chip private data
+ * @pin: gpio index
+ * @input: should the gpio be an input, or an output?
+ */
+static int mv88e6352_g2_scratch_gpio_set_dir(struct mv88e6xxx_chip *chip,
+ unsigned int pin, bool input)
+{
+ int value = (input ? MV88E6352_G2_SCRATCH_GPIO_DIR_IN :
+ MV88E6352_G2_SCRATCH_GPIO_DIR_OUT);
+
+ return mv88e6xxx_g2_scratch_set_bit(chip,
+ MV88E6352_G2_SCRATCH_GPIO_DIR0,
+ pin, value);
+}
+
+/**
+ * mv88e6352_g2_scratch_gpio_get_pctl - get pin control setting
+ * @chip: chip private data
+ * @pin: gpio index
+ * @func: function number
+ *
+ * Note that the function numbers themselves may vary by chipset.
+ */
+static int mv88e6352_g2_scratch_gpio_get_pctl(struct mv88e6xxx_chip *chip,
+ unsigned int pin, int *func)
+{
+ int reg = MV88E6352_G2_SCRATCH_GPIO_PCTL0 + (pin / 2);
+ int offset = (pin & 0x1) ? 4 : 0;
+ u8 mask = (0x7 << offset);
+ int err;
+ u8 val;
+
+ err = mv88e6xxx_g2_scratch_read(chip, reg, &val);
+ if (err)
+ return err;
+
+ *func = (val & mask) >> offset;
+
+ return 0;
+}
+
+/**
+ * mv88e6352_g2_scratch_gpio_set_pctl - set pin control setting
+ * @chip: chip private data
+ * @pin: gpio index
+ * @func: function number
+ */
+static int mv88e6352_g2_scratch_gpio_set_pctl(struct mv88e6xxx_chip *chip,
+ unsigned int pin, int func)
+{
+ int reg = MV88E6352_G2_SCRATCH_GPIO_PCTL0 + (pin / 2);
+ int offset = (pin & 0x1) ? 4 : 0;
+ u8 mask = (0x7 << offset);
+ int err;
+ u8 val;
+
+ err = mv88e6xxx_g2_scratch_read(chip, reg, &val);
+ if (err)
+ return err;
+
+ val = (val & ~mask) | ((func & mask) << offset);
+
+ return mv88e6xxx_g2_scratch_write(chip, reg, val);
+}
+
+const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
+ .get_data = mv88e6352_g2_scratch_gpio_get_data,
+ .set_data = mv88e6352_g2_scratch_gpio_set_data,
+ .get_dir = mv88e6352_g2_scratch_gpio_get_dir,
+ .set_dir = mv88e6352_g2_scratch_gpio_set_dir,
+ .get_pctl = mv88e6352_g2_scratch_gpio_get_pctl,
+ .set_pctl = mv88e6352_g2_scratch_gpio_set_pctl,
+};
+
+/**
+ * mv88e6xxx_g2_scratch_gpio_set_smi - set gpio muxing for external smi
+ * @chip: chip private data
+ * @external: set mux for external smi, or free for gpio usage
+ *
+ * Some mv88e6xxx models have GPIO pins that may be configured as
+ * an external SMI interface, or they may be made free for other
+ * GPIO uses.
+ */
+int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external)
+{
+ int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG;
+ int config_data1 = MV88E6352_G2_SCRATCH_CONFIG_DATA1;
+ int config_data2 = MV88E6352_G2_SCRATCH_CONFIG_DATA2;
+ bool no_cpu;
+ u8 p0_mode;
+ int err;
+ u8 val;
+
+ err = mv88e6xxx_g2_scratch_read(chip, config_data2, &val);
+ if (err)
+ return err;
+
+ p0_mode = val & MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK;
+
+ if (p0_mode == 0x01 || p0_mode == 0x02)
+ return -EBUSY;
+
+ err = mv88e6xxx_g2_scratch_read(chip, config_data1, &val);
+ if (err)
+ return err;
+
+ no_cpu = !!(val & MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU);
+
+ err = mv88e6xxx_g2_scratch_read(chip, misc_cfg, &val);
+ if (err)
+ return err;
+
+ /* NO_CPU being 0 inverts the meaning of the bit */
+ if (!no_cpu)
+ external = !external;
+
+ if (external)
+ val |= MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+ else
+ val &= ~MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+
+ return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val);
+}
+
+/**
+ * mv88e6352_g2_scratch_port_has_serdes - indicate if a port can have a serdes
+ * @chip: chip private data
+ * @port: port number to check for serdes
+ *
+ * Indicates whether the port may have a serdes attached according to the
+ * pin strapping. Returns negative error number, 0 if the port is not
+ * configured to have a serdes, and 1 if the port is configured to have a
+ * serdes attached.
+ */
+int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 config3, p;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_read(chip, MV88E6352_G2_SCRATCH_CONFIG_DATA3,
+ &config3);
+ if (err)
+ return err;
+
+ if (config3 & MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL)
+ p = 5;
+ else
+ p = 4;
+
+ return port == p;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
new file mode 100644
index 000000000..331b4ca08
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6xxx Switch hardware timestamping support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 National Instruments
+ * Erik Hons <erik.hons@ni.com>
+ * Brandon Streiff <brandon.streiff@ni.com>
+ * Dane Wagner <dane.wagner@ni.com>
+ */
+
+#include "chip.h"
+#include "global2.h"
+#include "hwtstamp.h"
+#include "ptp.h"
+#include <linux/ptp_classify.h>
+
+#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
+
+static int mv88e6xxx_port_ptp_read(struct mv88e6xxx_chip *chip, int port,
+ int addr, u16 *data, int len)
+{
+ if (!chip->info->ops->avb_ops->port_ptp_read)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->avb_ops->port_ptp_read(chip, port, addr,
+ data, len);
+}
+
+static int mv88e6xxx_port_ptp_write(struct mv88e6xxx_chip *chip, int port,
+ int addr, u16 data)
+{
+ if (!chip->info->ops->avb_ops->port_ptp_write)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->avb_ops->port_ptp_write(chip, port, addr,
+ data);
+}
+
+static int mv88e6xxx_ptp_write(struct mv88e6xxx_chip *chip, int addr,
+ u16 data)
+{
+ if (!chip->info->ops->avb_ops->ptp_write)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->avb_ops->ptp_write(chip, addr, data);
+}
+
+static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data)
+{
+ if (!chip->info->ops->avb_ops->ptp_read)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->avb_ops->ptp_read(chip, addr, data, 1);
+}
+
+/* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX
+ * timestamp. When working properly, hardware will produce a timestamp
+ * within 1ms. Software may enounter delays due to MDIO contention, so
+ * the timeout is set accordingly.
+ */
+#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
+
+int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ const struct mv88e6xxx_ptp_ops *ptp_ops;
+ struct mv88e6xxx_chip *chip;
+
+ chip = ds->priv;
+ ptp_ops = chip->info->ops->ptp_ops;
+
+ if (!chip->info->ptp_support)
+ return -EOPNOTSUPP;
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = ptp_clock_index(chip->ptp_clock);
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = ptp_ops->rx_filters;
+
+ return 0;
+}
+
+static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
+ struct hwtstamp_config *config)
+{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
+ struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
+ bool tstamp_enable = false;
+
+ /* Prevent the TX/RX paths from trying to interact with the
+ * timestamp hardware while we reconfigure it.
+ */
+ clear_bit_unlock(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tstamp_enable = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ tstamp_enable = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* The switch supports timestamping both L2 and L4; one cannot be
+ * disabled independently of the other.
+ */
+
+ if (!(BIT(config->rx_filter) & ptp_ops->rx_filters)) {
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ dev_dbg(chip->dev, "Unsupported rx_filter %d\n",
+ config->rx_filter);
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tstamp_enable = false;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ mv88e6xxx_reg_lock(chip);
+ if (tstamp_enable) {
+ chip->enable_count += 1;
+ if (chip->enable_count == 1 && ptp_ops->global_enable)
+ ptp_ops->global_enable(chip);
+ if (ptp_ops->port_enable)
+ ptp_ops->port_enable(chip, port);
+ } else {
+ if (ptp_ops->port_disable)
+ ptp_ops->port_disable(chip, port);
+ chip->enable_count -= 1;
+ if (chip->enable_count == 0 && ptp_ops->global_disable)
+ ptp_ops->global_disable(chip);
+ }
+ mv88e6xxx_reg_unlock(chip);
+
+ /* Once hardware has been configured, enable timestamp checks
+ * in the RX/TX paths.
+ */
+ if (tstamp_enable)
+ set_bit(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state);
+
+ return 0;
+}
+
+int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
+ struct hwtstamp_config config;
+ int err;
+
+ if (!chip->info->ptp_support)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = mv88e6xxx_set_hwtstamp_config(chip, port, &config);
+ if (err)
+ return err;
+
+ /* Save the chosen configuration to be returned later. */
+ memcpy(&ps->tstamp_config, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
+ struct hwtstamp_config *config = &ps->tstamp_config;
+
+ if (!chip->info->ptp_support)
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/* Returns a pointer to the PTP header if the caller should time stamp,
+ * or NULL if the caller should not.
+ */
+static struct ptp_header *mv88e6xxx_should_tstamp(struct mv88e6xxx_chip *chip,
+ int port, struct sk_buff *skb,
+ unsigned int type)
+{
+ struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
+ struct ptp_header *hdr;
+
+ if (!chip->info->ptp_support)
+ return NULL;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return NULL;
+
+ if (!test_bit(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state))
+ return NULL;
+
+ return hdr;
+}
+
+static int mv88e6xxx_ts_valid(u16 status)
+{
+ if (!(status & MV88E6XXX_PTP_TS_VALID))
+ return 0;
+ if (status & MV88E6XXX_PTP_TS_STATUS_MASK)
+ return 0;
+ return 1;
+}
+
+static int seq_match(struct sk_buff *skb, u16 ts_seqid)
+{
+ unsigned int type = SKB_PTP_TYPE(skb);
+ struct ptp_header *hdr;
+
+ hdr = ptp_parse_header(skb, type);
+
+ return ts_seqid == ntohs(hdr->sequence_id);
+}
+
+static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_port_hwtstamp *ps,
+ struct sk_buff *skb, u16 reg,
+ struct sk_buff_head *rxq)
+{
+ u16 buf[4] = { 0 }, status, seq_id;
+ struct skb_shared_hwtstamps *shwt;
+ struct sk_buff_head received;
+ u64 ns, timelo, timehi;
+ unsigned long flags;
+ int err;
+
+ /* The latched timestamp belongs to one of the received frames. */
+ __skb_queue_head_init(&received);
+ spin_lock_irqsave(&rxq->lock, flags);
+ skb_queue_splice_tail_init(rxq, &received);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
+ reg, buf, ARRAY_SIZE(buf));
+ mv88e6xxx_reg_unlock(chip);
+ if (err)
+ pr_err("failed to get the receive time stamp\n");
+
+ status = buf[0];
+ timelo = buf[1];
+ timehi = buf[2];
+ seq_id = buf[3];
+
+ if (status & MV88E6XXX_PTP_TS_VALID) {
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_ptp_write(chip, ps->port_id, reg, 0);
+ mv88e6xxx_reg_unlock(chip);
+ if (err)
+ pr_err("failed to clear the receive status\n");
+ }
+ /* Since the device can only handle one time stamp at a time,
+ * we purge any extra frames from the queue.
+ */
+ for ( ; skb; skb = __skb_dequeue(&received)) {
+ if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) {
+ ns = timehi << 16 | timelo;
+
+ mv88e6xxx_reg_lock(chip);
+ ns = timecounter_cyc2time(&chip->tstamp_tc, ns);
+ mv88e6xxx_reg_unlock(chip);
+ shwt = skb_hwtstamps(skb);
+ memset(shwt, 0, sizeof(*shwt));
+ shwt->hwtstamp = ns_to_ktime(ns);
+ status &= ~MV88E6XXX_PTP_TS_VALID;
+ }
+ netif_rx(skb);
+ }
+}
+
+static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_port_hwtstamp *ps)
+{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&ps->rx_queue);
+
+ if (skb)
+ mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr0_sts_reg,
+ &ps->rx_queue);
+
+ skb = skb_dequeue(&ps->rx_queue2);
+ if (skb)
+ mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr1_sts_reg,
+ &ps->rx_queue2);
+}
+
+static int is_pdelay_resp(const struct ptp_header *hdr)
+{
+ return (hdr->tsmt & 0xf) == 3;
+}
+
+bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct mv88e6xxx_port_hwtstamp *ps;
+ struct mv88e6xxx_chip *chip;
+ struct ptp_header *hdr;
+
+ chip = ds->priv;
+ ps = &chip->port_hwtstamp[port];
+
+ if (ps->tstamp_config.rx_filter != HWTSTAMP_FILTER_PTP_V2_EVENT)
+ return false;
+
+ hdr = mv88e6xxx_should_tstamp(chip, port, skb, type);
+ if (!hdr)
+ return false;
+
+ SKB_PTP_TYPE(skb) = type;
+
+ if (is_pdelay_resp(hdr))
+ skb_queue_tail(&ps->rx_queue2, skb);
+ else
+ skb_queue_tail(&ps->rx_queue, skb);
+
+ ptp_schedule_worker(chip->ptp_clock, 0);
+
+ return true;
+}
+
+static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_port_hwtstamp *ps)
+{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
+ struct skb_shared_hwtstamps shhwtstamps;
+ u16 departure_block[4], status;
+ struct sk_buff *tmp_skb;
+ u32 time_raw;
+ int err;
+ u64 ns;
+
+ if (!ps->tx_skb)
+ return 0;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
+ ptp_ops->dep_sts_reg,
+ departure_block,
+ ARRAY_SIZE(departure_block));
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ goto free_and_clear_skb;
+
+ if (!(departure_block[0] & MV88E6XXX_PTP_TS_VALID)) {
+ if (time_is_before_jiffies(ps->tx_tstamp_start +
+ TX_TSTAMP_TIMEOUT)) {
+ dev_warn(chip->dev, "p%d: clearing tx timestamp hang\n",
+ ps->port_id);
+ goto free_and_clear_skb;
+ }
+ /* The timestamp should be available quickly, while getting it
+ * is high priority and time bounded to only 10ms. A poll is
+ * warranted so restart the work.
+ */
+ return 1;
+ }
+
+ /* We have the timestamp; go ahead and clear valid now */
+ mv88e6xxx_reg_lock(chip);
+ mv88e6xxx_port_ptp_write(chip, ps->port_id, ptp_ops->dep_sts_reg, 0);
+ mv88e6xxx_reg_unlock(chip);
+
+ status = departure_block[0] & MV88E6XXX_PTP_TS_STATUS_MASK;
+ if (status != MV88E6XXX_PTP_TS_STATUS_NORMAL) {
+ dev_warn(chip->dev, "p%d: tx timestamp overrun\n", ps->port_id);
+ goto free_and_clear_skb;
+ }
+
+ if (departure_block[3] != ps->tx_seq_id) {
+ dev_warn(chip->dev, "p%d: unexpected seq. id\n", ps->port_id);
+ goto free_and_clear_skb;
+ }
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ time_raw = ((u32)departure_block[2] << 16) | departure_block[1];
+ mv88e6xxx_reg_lock(chip);
+ ns = timecounter_cyc2time(&chip->tstamp_tc, time_raw);
+ mv88e6xxx_reg_unlock(chip);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+
+ dev_dbg(chip->dev,
+ "p%d: txtstamp %llx status 0x%04x skb ID 0x%04x hw ID 0x%04x\n",
+ ps->port_id, ktime_to_ns(shhwtstamps.hwtstamp),
+ departure_block[0], ps->tx_seq_id, departure_block[3]);
+
+ /* skb_complete_tx_timestamp() will free up the client to make
+ * another timestamp-able transmit. We have to be ready for it
+ * -- by clearing the ps->tx_skb "flag" -- beforehand.
+ */
+
+ tmp_skb = ps->tx_skb;
+ ps->tx_skb = NULL;
+ clear_bit_unlock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
+ skb_complete_tx_timestamp(tmp_skb, &shhwtstamps);
+
+ return 0;
+
+free_and_clear_skb:
+ dev_kfree_skb_any(ps->tx_skb);
+ ps->tx_skb = NULL;
+ clear_bit_unlock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
+
+ return 0;
+}
+
+long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
+{
+ struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+ struct dsa_switch *ds = chip->ds;
+ struct mv88e6xxx_port_hwtstamp *ps;
+ int i, restart = 0;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ ps = &chip->port_hwtstamp[i];
+ if (test_bit(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state))
+ restart |= mv88e6xxx_txtstamp_work(chip, ps);
+
+ mv88e6xxx_rxtstamp_work(chip, ps);
+ }
+
+ return restart ? 1 : -1;
+}
+
+void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
+ struct ptp_header *hdr;
+ struct sk_buff *clone;
+ unsigned int type;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return;
+
+ hdr = mv88e6xxx_should_tstamp(chip, port, skb, type);
+ if (!hdr)
+ return;
+
+ clone = skb_clone_sk(skb);
+ if (!clone)
+ return;
+
+ if (test_and_set_bit_lock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS,
+ &ps->state)) {
+ kfree_skb(clone);
+ return;
+ }
+
+ ps->tx_skb = clone;
+ ps->tx_tstamp_start = jiffies;
+ ps->tx_seq_id = be16_to_cpu(hdr->sequence_id);
+
+ ptp_schedule_worker(chip->ptp_clock, 0);
+}
+
+int mv88e6165_global_disable(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val);
+ if (err)
+ return err;
+ val |= MV88E6165_PTP_CFG_DISABLE_PTP;
+
+ return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val);
+}
+
+int mv88e6165_global_enable(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val);
+ if (err)
+ return err;
+
+ val &= ~(MV88E6165_PTP_CFG_DISABLE_PTP | MV88E6165_PTP_CFG_TSPEC_MASK);
+
+ return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val);
+}
+
+int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
+ MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP);
+}
+
+int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
+ MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH);
+}
+
+static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port)
+{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
+ struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
+
+ ps->port_id = port;
+
+ skb_queue_head_init(&ps->rx_queue);
+ skb_queue_head_init(&ps->rx_queue2);
+
+ if (ptp_ops->port_disable)
+ return ptp_ops->port_disable(chip, port);
+
+ return 0;
+}
+
+int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
+{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
+ int err;
+ int i;
+
+ /* Disable timestamping on all ports. */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ err = mv88e6xxx_hwtstamp_port_setup(chip, i);
+ if (err)
+ return err;
+ }
+
+ /* Disable PTP globally */
+ if (ptp_ops->global_disable) {
+ err = ptp_ops->global_disable(chip);
+ if (err)
+ return err;
+ }
+
+ /* Set the ethertype of L2 PTP messages */
+ err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_GC_ETYPE, ETH_P_1588);
+ if (err)
+ return err;
+
+ /* MV88E6XXX_PTP_MSG_TYPE is a mask of PTP message types to
+ * timestamp. This affects all ports that have timestamping enabled,
+ * but the timestamp config is per-port; thus we configure all events
+ * here and only support the HWTSTAMP_FILTER_*_EVENT filter types.
+ */
+ err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_MSGTYPE,
+ MV88E6XXX_PTP_MSGTYPE_ALL_EVENT);
+ if (err)
+ return err;
+
+ /* Use ARRIVAL1 for peer delay response messages. */
+ err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_TS_ARRIVAL_PTR,
+ MV88E6XXX_PTP_MSGTYPE_PDLAY_RES);
+ if (err)
+ return err;
+
+ /* 88E6341 devices default to timestamping at the PHY, but this has
+ * a hardware issue that results in unreliable timestamps. Force
+ * these devices to timestamp at the MAC.
+ */
+ if (chip->info->family == MV88E6XXX_FAMILY_6341) {
+ u16 val = MV88E6341_PTP_CFG_UPDATE |
+ MV88E6341_PTP_CFG_MODE_IDX |
+ MV88E6341_PTP_CFG_MODE_TS_AT_MAC;
+ err = mv88e6xxx_ptp_write(chip, MV88E6341_PTP_CFG, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip)
+{
+}
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
new file mode 100644
index 000000000..cf7fb6d66
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Marvell 88E6xxx Switch hardware timestamping support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 National Instruments
+ * Erik Hons <erik.hons@ni.com>
+ * Brandon Streiff <brandon.streiff@ni.com>
+ * Dane Wagner <dane.wagner@ni.com>
+ */
+
+#ifndef _MV88E6XXX_HWTSTAMP_H
+#define _MV88E6XXX_HWTSTAMP_H
+
+#include "chip.h"
+
+/* Global 6352 PTP registers */
+/* Offset 0x00: PTP EtherType */
+#define MV88E6XXX_PTP_ETHERTYPE 0x00
+
+/* Offset 0x01: Message Type Timestamp Enables */
+#define MV88E6XXX_PTP_MSGTYPE 0x01
+#define MV88E6XXX_PTP_MSGTYPE_SYNC 0x0001
+#define MV88E6XXX_PTP_MSGTYPE_DELAY_REQ 0x0002
+#define MV88E6XXX_PTP_MSGTYPE_PDLAY_REQ 0x0004
+#define MV88E6XXX_PTP_MSGTYPE_PDLAY_RES 0x0008
+#define MV88E6XXX_PTP_MSGTYPE_ALL_EVENT 0x000f
+
+/* Offset 0x02: Timestamp Arrival Capture Pointers */
+#define MV88E6XXX_PTP_TS_ARRIVAL_PTR 0x02
+
+/* Offset 0x05: PTP Global Configuration */
+#define MV88E6165_PTP_CFG 0x05
+#define MV88E6165_PTP_CFG_TSPEC_MASK 0xf000
+#define MV88E6165_PTP_CFG_DISABLE_TS_OVERWRITE BIT(1)
+#define MV88E6165_PTP_CFG_DISABLE_PTP BIT(0)
+
+/* Offset 0x07: PTP Global Configuration */
+#define MV88E6341_PTP_CFG 0x07
+#define MV88E6341_PTP_CFG_UPDATE 0x8000
+#define MV88E6341_PTP_CFG_IDX_MASK 0x7f00
+#define MV88E6341_PTP_CFG_DATA_MASK 0x00ff
+#define MV88E6341_PTP_CFG_MODE_IDX 0x0
+#define MV88E6341_PTP_CFG_MODE_TS_AT_PHY 0x00
+#define MV88E6341_PTP_CFG_MODE_TS_AT_MAC 0x80
+
+/* Offset 0x08: PTP Interrupt Status */
+#define MV88E6XXX_PTP_IRQ_STATUS 0x08
+
+/* Per-Port 6352 PTP Registers */
+/* Offset 0x00: PTP Configuration 0 */
+#define MV88E6XXX_PORT_PTP_CFG0 0x00
+#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_SHIFT 12
+#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_MASK 0xf000
+#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_1588 0x0000
+#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_8021AS 0x1000
+#define MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH 0x0800
+#define MV88E6XXX_PORT_PTP_CFG0_DISABLE_OVERWRITE 0x0002
+#define MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP 0x0001
+
+/* Offset 0x01: PTP Configuration 1 */
+#define MV88E6XXX_PORT_PTP_CFG1 0x01
+
+/* Offset 0x02: PTP Configuration 2 */
+#define MV88E6XXX_PORT_PTP_CFG2 0x02
+#define MV88E6XXX_PORT_PTP_CFG2_EMBED_ARRIVAL 0x1000
+#define MV88E6XXX_PORT_PTP_CFG2_DEP_IRQ_EN 0x0002
+#define MV88E6XXX_PORT_PTP_CFG2_ARR_IRQ_EN 0x0001
+
+/* Offset 0x03: PTP LED Configuration */
+#define MV88E6XXX_PORT_PTP_LED_CFG 0x03
+
+/* Offset 0x08: PTP Arrival 0 Status */
+#define MV88E6XXX_PORT_PTP_ARR0_STS 0x08
+
+/* Offset 0x09/0x0A: PTP Arrival 0 Time */
+#define MV88E6XXX_PORT_PTP_ARR0_TIME_LO 0x09
+#define MV88E6XXX_PORT_PTP_ARR0_TIME_HI 0x0a
+
+/* Offset 0x0B: PTP Arrival 0 Sequence ID */
+#define MV88E6XXX_PORT_PTP_ARR0_SEQID 0x0b
+
+/* Offset 0x0C: PTP Arrival 1 Status */
+#define MV88E6XXX_PORT_PTP_ARR1_STS 0x0c
+
+/* Offset 0x0D/0x0E: PTP Arrival 1 Time */
+#define MV88E6XXX_PORT_PTP_ARR1_TIME_LO 0x0d
+#define MV88E6XXX_PORT_PTP_ARR1_TIME_HI 0x0e
+
+/* Offset 0x0F: PTP Arrival 1 Sequence ID */
+#define MV88E6XXX_PORT_PTP_ARR1_SEQID 0x0f
+
+/* Offset 0x10: PTP Departure Status */
+#define MV88E6XXX_PORT_PTP_DEP_STS 0x10
+
+/* Offset 0x11/0x12: PTP Deperture Time */
+#define MV88E6XXX_PORT_PTP_DEP_TIME_LO 0x11
+#define MV88E6XXX_PORT_PTP_DEP_TIME_HI 0x12
+
+/* Offset 0x13: PTP Departure Sequence ID */
+#define MV88E6XXX_PORT_PTP_DEP_SEQID 0x13
+
+/* Status fields for arrival and depature timestamp status registers */
+#define MV88E6XXX_PTP_TS_STATUS_MASK 0x0006
+#define MV88E6XXX_PTP_TS_STATUS_NORMAL 0x0000
+#define MV88E6XXX_PTP_TS_STATUS_OVERWITTEN 0x0002
+#define MV88E6XXX_PTP_TS_STATUS_DISCARDED 0x0004
+#define MV88E6XXX_PTP_TS_VALID 0x0001
+
+#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
+
+int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+
+bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type);
+void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb);
+
+int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info);
+
+int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip);
+int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port);
+int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port);
+int mv88e6165_global_enable(struct mv88e6xxx_chip *chip);
+int mv88e6165_global_disable(struct mv88e6xxx_chip *chip);
+
+#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
+
+static inline int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds,
+ int port, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds,
+ int port, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone,
+ unsigned int type)
+{
+ return false;
+}
+
+static inline void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb)
+{
+}
+
+static inline int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
+{
+ return 0;
+}
+
+static inline void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip)
+{
+}
+
+#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */
+
+#endif /* _MV88E6XXX_HWTSTAMP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
new file mode 100644
index 000000000..252b5b3a3
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88e6xxx Ethernet switch PHY and PPU support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch>
+ */
+
+#include <linux/mdio.h>
+#include <linux/module.h>
+
+#include "chip.h"
+#include "phy.h"
+
+int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val)
+{
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+int mv88e6165_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 val)
+{
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy, int reg, u16 *val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_read)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_read(chip, bus, addr, reg, val);
+}
+
+int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_write)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_write(chip, bus, addr, reg, val);
+}
+
+static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
+{
+ return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
+}
+
+static void mv88e6xxx_phy_page_put(struct mv88e6xxx_chip *chip, int phy)
+{
+ int err;
+
+ /* Restore PHY page Copper 0x0 for access via the registered
+ * MDIO bus
+ */
+ err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE,
+ MV88E6XXX_PHY_PAGE_COPPER);
+ if (unlikely(err)) {
+ dev_err(chip->dev,
+ "failed to restore PHY %d page Copper (%d)\n",
+ phy, err);
+ }
+}
+
+int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 *val)
+{
+ int err;
+
+ /* There is no paging for registers 22 */
+ if (reg == MV88E6XXX_PHY_PAGE)
+ return -EINVAL;
+
+ err = mv88e6xxx_phy_page_get(chip, phy, page);
+ if (!err) {
+ err = mv88e6xxx_phy_read(chip, phy, reg, val);
+ mv88e6xxx_phy_page_put(chip, phy);
+ }
+
+ return err;
+}
+
+int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 val)
+{
+ int err;
+
+ /* There is no paging for registers 22 */
+ if (reg == MV88E6XXX_PHY_PAGE)
+ return -EINVAL;
+
+ err = mv88e6xxx_phy_page_get(chip, phy, page);
+ if (!err) {
+ err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
+ if (!err)
+ err = mv88e6xxx_phy_write(chip, phy, reg, val);
+
+ mv88e6xxx_phy_page_put(chip, phy);
+ }
+
+ return err;
+}
+
+static int mv88e6xxx_phy_ppu_disable(struct mv88e6xxx_chip *chip)
+{
+ if (!chip->info->ops->ppu_disable)
+ return 0;
+
+ return chip->info->ops->ppu_disable(chip);
+}
+
+static int mv88e6xxx_phy_ppu_enable(struct mv88e6xxx_chip *chip)
+{
+ if (!chip->info->ops->ppu_enable)
+ return 0;
+
+ return chip->info->ops->ppu_enable(chip);
+}
+
+static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
+{
+ struct mv88e6xxx_chip *chip;
+
+ chip = container_of(ugly, struct mv88e6xxx_chip, ppu_work);
+
+ mv88e6xxx_reg_lock(chip);
+
+ if (mutex_trylock(&chip->ppu_mutex)) {
+ if (mv88e6xxx_phy_ppu_enable(chip) == 0)
+ chip->ppu_disabled = 0;
+ mutex_unlock(&chip->ppu_mutex);
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+}
+
+static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
+{
+ struct mv88e6xxx_chip *chip = from_timer(chip, t, ppu_timer);
+
+ schedule_work(&chip->ppu_work);
+}
+
+static int mv88e6xxx_phy_ppu_access_get(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+
+ mutex_lock(&chip->ppu_mutex);
+
+ /* If the PHY polling unit is enabled, disable it so that
+ * we can access the PHY registers. If it was already
+ * disabled, cancel the timer that is going to re-enable
+ * it.
+ */
+ if (!chip->ppu_disabled) {
+ ret = mv88e6xxx_phy_ppu_disable(chip);
+ if (ret < 0) {
+ mutex_unlock(&chip->ppu_mutex);
+ return ret;
+ }
+ chip->ppu_disabled = 1;
+ } else {
+ del_timer(&chip->ppu_timer);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void mv88e6xxx_phy_ppu_access_put(struct mv88e6xxx_chip *chip)
+{
+ /* Schedule a timer to re-enable the PHY polling unit. */
+ mod_timer(&chip->ppu_timer, jiffies + msecs_to_jiffies(10));
+ mutex_unlock(&chip->ppu_mutex);
+}
+
+static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
+{
+ mutex_init(&chip->ppu_mutex);
+ INIT_WORK(&chip->ppu_work, mv88e6xxx_phy_ppu_reenable_work);
+ timer_setup(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer, 0);
+}
+
+static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip)
+{
+ del_timer_sync(&chip->ppu_timer);
+}
+
+int mv88e6185_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val)
+{
+ int err;
+
+ err = mv88e6xxx_phy_ppu_access_get(chip);
+ if (!err) {
+ err = mv88e6xxx_read(chip, addr, reg, val);
+ mv88e6xxx_phy_ppu_access_put(chip);
+ }
+
+ return err;
+}
+
+int mv88e6185_phy_ppu_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 val)
+{
+ int err;
+
+ err = mv88e6xxx_phy_ppu_access_get(chip);
+ if (!err) {
+ err = mv88e6xxx_write(chip, addr, reg, val);
+ mv88e6xxx_phy_ppu_access_put(chip);
+ }
+
+ return err;
+}
+
+void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
+ mv88e6xxx_phy_ppu_state_init(chip);
+}
+
+void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
+ mv88e6xxx_phy_ppu_state_destroy(chip);
+}
+
+int mv88e6xxx_phy_setup(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6xxx_phy_ppu_enable(chip);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/phy.h b/drivers/net/dsa/mv88e6xxx/phy.h
new file mode 100644
index 000000000..05ea0d546
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/phy.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Marvell 88E6xxx PHY access
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch>
+ */
+
+#ifndef _MV88E6XXX_PHY_H
+#define _MV88E6XXX_PHY_H
+
+#define MV88E6XXX_PHY_PAGE 0x16
+#define MV88E6XXX_PHY_PAGE_COPPER 0x00
+
+/* PHY Registers accesses implementations */
+int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+int mv88e6165_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 val);
+int mv88e6185_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+int mv88e6185_phy_ppu_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 val);
+
+/* Generic PHY operations */
+int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
+ int reg, u16 *val);
+int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
+ int reg, u16 val);
+int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 *val);
+int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 val);
+void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_phy_setup(struct mv88e6xxx_chip *chip);
+
+#endif /*_MV88E6XXX_PHY_H */
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
new file mode 100644
index 000000000..f79cf716c
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -0,0 +1,1740 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6xxx Switch Port Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/if_bridge.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+
+#include "chip.h"
+#include "global2.h"
+#include "port.h"
+#include "serdes.h"
+
+int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 *val)
+{
+ int addr = chip->info->port_base_addr + port;
+
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg,
+ int bit, int val)
+{
+ int addr = chip->info->port_base_addr + port;
+
+ return mv88e6xxx_wait_bit(chip, addr, reg, bit, val);
+}
+
+int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 val)
+{
+ int addr = chip->info->port_base_addr + port;
+
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+/* Offset 0x00: MAC (or PCS or Physical) Status Register
+ *
+ * For most devices, this is read only. However the 6185 has the MyPause
+ * bit read/write.
+ */
+int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
+ int pause)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ if (pause)
+ reg |= MV88E6XXX_PORT_STS_MY_PAUSE;
+ else
+ reg &= ~MV88E6XXX_PORT_STS_MY_PAUSE;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
+}
+
+/* Offset 0x01: MAC (or PCS or Physical) Control Register
+ *
+ * Link, Duplex and Flow Control have one force bit, one value bit.
+ *
+ * For port's MAC speed, ForceSpd (or SpdValue) bits 1:0 program the value.
+ * Alternative values require the 200BASE (or AltSpeed) bit 12 set.
+ * Newer chips need a ForcedSpd bit 13 set to consider the value.
+ */
+
+static int mv88e6xxx_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~(MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK |
+ MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK);
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ reg |= MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ reg |= MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ reg |= MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK |
+ MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ break;
+ default:
+ return 0;
+ }
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "p%d: delay RXCLK %s, TXCLK %s\n", port,
+ reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK ? "yes" : "no",
+ reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK ? "yes" : "no");
+
+ return 0;
+}
+
+int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ if (port < 5)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
+}
+
+int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ if (port != 0)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
+}
+
+int mv88e6320_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ if (port != 2 && port != 5 && port != 6)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
+}
+
+int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~(MV88E6XXX_PORT_MAC_CTL_FORCE_LINK |
+ MV88E6XXX_PORT_MAC_CTL_LINK_UP);
+
+ switch (link) {
+ case LINK_FORCED_DOWN:
+ reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_LINK;
+ break;
+ case LINK_FORCED_UP:
+ reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_LINK |
+ MV88E6XXX_PORT_MAC_CTL_LINK_UP;
+ break;
+ case LINK_UNFORCED:
+ /* normal link detection */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "p%d: %s link %s\n", port,
+ reg & MV88E6XXX_PORT_MAC_CTL_FORCE_LINK ? "Force" : "Unforce",
+ reg & MV88E6XXX_PORT_MAC_CTL_LINK_UP ? "up" : "down");
+
+ return 0;
+}
+
+int mv88e6xxx_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup)
+{
+ const struct mv88e6xxx_ops *ops = chip->info->ops;
+ int err = 0;
+ int link;
+
+ if (isup)
+ link = LINK_FORCED_UP;
+ else
+ link = LINK_FORCED_DOWN;
+
+ if (ops->port_set_link)
+ err = ops->port_set_link(chip, port, link);
+
+ return err;
+}
+
+int mv88e6185_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup)
+{
+ const struct mv88e6xxx_ops *ops = chip->info->ops;
+ int err = 0;
+ int link;
+
+ if (mode == MLO_AN_INBAND)
+ link = LINK_UNFORCED;
+ else if (isup)
+ link = LINK_FORCED_UP;
+ else
+ link = LINK_FORCED_DOWN;
+
+ if (ops->port_set_link)
+ err = ops->port_set_link(chip, port, link);
+
+ return err;
+}
+
+static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
+ int port, int speed, bool alt_bit,
+ bool force_bit, int duplex)
+{
+ u16 reg, ctrl;
+ int err;
+
+ switch (speed) {
+ case 10:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_10;
+ break;
+ case 100:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100;
+ break;
+ case 200:
+ if (alt_bit)
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100 |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED;
+ else
+ ctrl = MV88E6065_PORT_MAC_CTL_SPEED_200;
+ break;
+ case 1000:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000;
+ break;
+ case 2500:
+ if (alt_bit)
+ ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED;
+ else
+ ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000;
+ break;
+ case 10000:
+ /* all bits set, fall through... */
+ case SPEED_UNFORCED:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_UNFORCED;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (duplex) {
+ case DUPLEX_HALF:
+ ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX;
+ break;
+ case DUPLEX_FULL:
+ ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
+ MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL;
+ break;
+ case DUPLEX_UNFORCED:
+ /* normal duplex detection */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~(MV88E6XXX_PORT_MAC_CTL_SPEED_MASK |
+ MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
+ MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL);
+
+ if (alt_bit)
+ reg &= ~MV88E6390_PORT_MAC_CTL_ALTSPEED;
+ if (force_bit) {
+ reg &= ~MV88E6390_PORT_MAC_CTL_FORCE_SPEED;
+ if (speed != SPEED_UNFORCED)
+ ctrl |= MV88E6390_PORT_MAC_CTL_FORCE_SPEED;
+ }
+ reg |= ctrl;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
+ if (err)
+ return err;
+
+ if (speed != SPEED_UNFORCED)
+ dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
+ else
+ dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
+ dev_dbg(chip->dev, "p%d: %s %s duplex\n", port,
+ reg & MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX ? "Force" : "Unforce",
+ reg & MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL ? "full" : "half");
+
+ return 0;
+}
+
+/* Support 10, 100, 1000 Mbps (e.g. 88E6185 family) */
+int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
+{
+ if (speed == 200 || speed > 1000)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
+ duplex);
+}
+
+/* Support 10, 100 Mbps (e.g. 88E6250 family) */
+int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
+{
+ if (speed > 100)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
+ duplex);
+}
+
+/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6341) */
+int mv88e6341_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
+{
+ if (speed > 2500)
+ return -EOPNOTSUPP;
+
+ if (speed == 200 && port != 0)
+ return -EOPNOTSUPP;
+
+ if (speed == 2500 && port < 5)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, !port, true,
+ duplex);
+}
+
+phy_interface_t mv88e6341_port_max_speed_mode(int port)
+{
+ if (port == 5)
+ return PHY_INTERFACE_MODE_2500BASEX;
+
+ return PHY_INTERFACE_MODE_NA;
+}
+
+/* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
+int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
+{
+ if (speed > 1000)
+ return -EOPNOTSUPP;
+
+ if (speed == 200 && port < 5)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, true, false,
+ duplex);
+}
+
+/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6390) */
+int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
+{
+ if (speed > 2500)
+ return -EOPNOTSUPP;
+
+ if (speed == 200 && port != 0)
+ return -EOPNOTSUPP;
+
+ if (speed == 2500 && port < 9)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, true, true,
+ duplex);
+}
+
+phy_interface_t mv88e6390_port_max_speed_mode(int port)
+{
+ if (port == 9 || port == 10)
+ return PHY_INTERFACE_MODE_2500BASEX;
+
+ return PHY_INTERFACE_MODE_NA;
+}
+
+/* Support 10, 100, 200, 1000, 2500, 10000 Mbps (e.g. 88E6190X) */
+int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
+{
+ if (speed == 200 && port != 0)
+ return -EOPNOTSUPP;
+
+ if (speed >= 2500 && port < 9)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, true, true,
+ duplex);
+}
+
+phy_interface_t mv88e6390x_port_max_speed_mode(int port)
+{
+ if (port == 9 || port == 10)
+ return PHY_INTERFACE_MODE_XAUI;
+
+ return PHY_INTERFACE_MODE_NA;
+}
+
+/* Support 10, 100, 200, 1000, 2500, 5000, 10000 Mbps (e.g. 88E6393X)
+ * Function mv88e6xxx_port_set_speed_duplex() can't be used as the register
+ * values for speeds 2500 & 5000 conflict.
+ */
+int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
+{
+ u16 reg, ctrl;
+ int err;
+
+ if (speed == 200 && port != 0)
+ return -EOPNOTSUPP;
+
+ if (speed >= 2500 && port > 0 && port < 9)
+ return -EOPNOTSUPP;
+
+ switch (speed) {
+ case 10:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_10;
+ break;
+ case 100:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100;
+ break;
+ case 200:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_100 |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED;
+ break;
+ case 1000:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000;
+ break;
+ case 2500:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000 |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED;
+ break;
+ case 5000:
+ ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED;
+ break;
+ case 10000:
+ case SPEED_UNFORCED:
+ ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_UNFORCED;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (duplex) {
+ case DUPLEX_HALF:
+ ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX;
+ break;
+ case DUPLEX_FULL:
+ ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
+ MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL;
+ break;
+ case DUPLEX_UNFORCED:
+ /* normal duplex detection */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~(MV88E6XXX_PORT_MAC_CTL_SPEED_MASK |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED |
+ MV88E6390_PORT_MAC_CTL_FORCE_SPEED);
+
+ if (speed != SPEED_UNFORCED)
+ reg |= MV88E6390_PORT_MAC_CTL_FORCE_SPEED;
+
+ reg |= ctrl;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
+ if (err)
+ return err;
+
+ if (speed != SPEED_UNFORCED)
+ dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
+ else
+ dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
+ dev_dbg(chip->dev, "p%d: %s %s duplex\n", port,
+ reg & MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX ? "Force" : "Unforce",
+ reg & MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL ? "full" : "half");
+
+ return 0;
+}
+
+phy_interface_t mv88e6393x_port_max_speed_mode(int port)
+{
+ if (port == 0 || port == 9 || port == 10)
+ return PHY_INTERFACE_MODE_10GBASER;
+
+ return PHY_INTERFACE_MODE_NA;
+}
+
+static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode, bool force)
+{
+ u16 cmode;
+ int lane;
+ u16 reg;
+ int err;
+
+ /* Default to a slow mode, so freeing up SERDES interfaces for
+ * other ports which might use them for SFPs.
+ */
+ if (mode == PHY_INTERFACE_MODE_NA)
+ mode = PHY_INTERFACE_MODE_1000BASEX;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ cmode = MV88E6XXX_PORT_STS_CMODE_1000BASEX;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ cmode = MV88E6XXX_PORT_STS_CMODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;
+ break;
+ case PHY_INTERFACE_MODE_5GBASER:
+ cmode = MV88E6393X_PORT_STS_CMODE_5GBASER;
+ break;
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_XAUI:
+ cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;
+ break;
+ case PHY_INTERFACE_MODE_RXAUI:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RXAUI;
+ break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ cmode = MV88E6393X_PORT_STS_CMODE_10GBASER;
+ break;
+ default:
+ cmode = 0;
+ }
+
+ /* cmode doesn't change, nothing to do for us unless forced */
+ if (cmode == chip->ports[port].cmode && !force)
+ return 0;
+
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane >= 0) {
+ if (chip->ports[port].serdes_irq) {
+ err = mv88e6xxx_serdes_irq_disable(chip, port, lane);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6xxx_serdes_power_down(chip, port, lane);
+ if (err)
+ return err;
+ }
+
+ chip->ports[port].cmode = 0;
+
+ if (cmode) {
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_STS_CMODE_MASK;
+ reg |= cmode;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
+ if (err)
+ return err;
+
+ chip->ports[port].cmode = cmode;
+
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane == -ENODEV)
+ return 0;
+ if (lane < 0)
+ return lane;
+
+ err = mv88e6xxx_serdes_power_up(chip, port, lane);
+ if (err)
+ return err;
+
+ if (chip->ports[port].serdes_irq) {
+ err = mv88e6xxx_serdes_irq_enable(chip, port, lane);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ if (port != 9 && port != 10)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_cmode(chip, port, mode, false);
+}
+
+int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ if (port != 9 && port != 10)
+ return -EOPNOTSUPP;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_NA:
+ return 0;
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ return -EINVAL;
+ default:
+ break;
+ }
+
+ return mv88e6xxx_port_set_cmode(chip, port, mode, false);
+}
+
+int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ int err;
+ u16 reg;
+
+ if (port != 0 && port != 9 && port != 10)
+ return -EOPNOTSUPP;
+
+ if (port == 9 || port == 10) {
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
+ /* mv88e6393x errata 4.5: EEE should be disabled on SERDES ports */
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_MAC_CTL_EEE;
+ reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_EEE;
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_set_cmode(chip, port, mode, false);
+}
+
+static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip,
+ int port)
+{
+ int err, addr;
+ u16 reg, bits;
+
+ if (port != 5)
+ return -EOPNOTSUPP;
+
+ addr = chip->info->port_base_addr + port;
+
+ err = mv88e6xxx_port_hidden_read(chip, 0x7, addr, 0, &reg);
+ if (err)
+ return err;
+
+ bits = MV88E6341_PORT_RESERVED_1A_FORCE_CMODE |
+ MV88E6341_PORT_RESERVED_1A_SGMII_AN;
+
+ if ((reg & bits) == bits)
+ return 0;
+
+ reg |= bits;
+ return mv88e6xxx_port_hidden_write(chip, 0x7, addr, 0, reg);
+}
+
+int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ int err;
+
+ if (port != 5)
+ return -EOPNOTSUPP;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_NA:
+ return 0;
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ return -EINVAL;
+ default:
+ break;
+ }
+
+ err = mv88e6341_port_set_cmode_writable(chip, port);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_set_cmode(chip, port, mode, true);
+}
+
+int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ *cmode = reg & MV88E6185_PORT_STS_CMODE_MASK;
+
+ return 0;
+}
+
+int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ *cmode = reg & MV88E6XXX_PORT_STS_CMODE_MASK;
+
+ return 0;
+}
+
+/* Offset 0x02: Jamming Control
+ *
+ * Do not limit the period of time that this port can be paused for by
+ * the remote end or the period of time that this port can pause the
+ * remote end.
+ */
+int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
+ u8 out)
+{
+ return mv88e6xxx_port_write(chip, port, MV88E6097_PORT_JAM_CTL,
+ out << 8 | in);
+}
+
+int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
+ u8 out)
+{
+ int err;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6390_PORT_FLOW_CTL,
+ MV88E6390_PORT_FLOW_CTL_UPDATE |
+ MV88E6390_PORT_FLOW_CTL_LIMIT_IN | in);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6390_PORT_FLOW_CTL,
+ MV88E6390_PORT_FLOW_CTL_UPDATE |
+ MV88E6390_PORT_FLOW_CTL_LIMIT_OUT | out);
+}
+
+/* Offset 0x04: Port Control Register */
+
+static const char * const mv88e6xxx_port_state_names[] = {
+ [MV88E6XXX_PORT_CTL0_STATE_DISABLED] = "Disabled",
+ [MV88E6XXX_PORT_CTL0_STATE_BLOCKING] = "Blocking/Listening",
+ [MV88E6XXX_PORT_CTL0_STATE_LEARNING] = "Learning",
+ [MV88E6XXX_PORT_CTL0_STATE_FORWARDING] = "Forwarding",
+};
+
+int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_CTL0_STATE_MASK;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ state = MV88E6XXX_PORT_CTL0_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ state = MV88E6XXX_PORT_CTL0_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ state = MV88E6XXX_PORT_CTL0_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ state = MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg |= state;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "p%d: PortState set to %s\n", port,
+ mv88e6xxx_port_state_names[state]);
+
+ return 0;
+}
+
+int mv88e6xxx_port_set_egress_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_egress_mode mode)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_CTL0_EGRESS_MODE_MASK;
+
+ switch (mode) {
+ case MV88E6XXX_EGRESS_MODE_UNMODIFIED:
+ reg |= MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNMODIFIED;
+ break;
+ case MV88E6XXX_EGRESS_MODE_UNTAGGED:
+ reg |= MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNTAGGED;
+ break;
+ case MV88E6XXX_EGRESS_MODE_TAGGED:
+ reg |= MV88E6XXX_PORT_CTL0_EGRESS_MODE_TAGGED;
+ break;
+ case MV88E6XXX_EGRESS_MODE_ETHERTYPE:
+ reg |= MV88E6XXX_PORT_CTL0_EGRESS_MODE_ETHER_TYPE_DSA;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+}
+
+int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode mode)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_CTL0_FRAME_MODE_MASK;
+
+ switch (mode) {
+ case MV88E6XXX_FRAME_MODE_NORMAL:
+ reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_NORMAL;
+ break;
+ case MV88E6XXX_FRAME_MODE_DSA:
+ reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_DSA;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+}
+
+int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode mode)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_CTL0_FRAME_MODE_MASK;
+
+ switch (mode) {
+ case MV88E6XXX_FRAME_MODE_NORMAL:
+ reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_NORMAL;
+ break;
+ case MV88E6XXX_FRAME_MODE_DSA:
+ reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_DSA;
+ break;
+ case MV88E6XXX_FRAME_MODE_PROVIDER:
+ reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_PROVIDER;
+ break;
+ case MV88E6XXX_FRAME_MODE_ETHERTYPE:
+ reg |= MV88E6XXX_PORT_CTL0_FRAME_MODE_ETHER_TYPE_DSA;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+}
+
+int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
+ int port, bool unicast)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ if (unicast)
+ reg |= MV88E6185_PORT_CTL0_FORWARD_UNKNOWN;
+ else
+ reg &= ~MV88E6185_PORT_CTL0_FORWARD_UNKNOWN;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+}
+
+int mv88e6352_port_set_ucast_flood(struct mv88e6xxx_chip *chip, int port,
+ bool unicast)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ if (unicast)
+ reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC;
+ else
+ reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+}
+
+int mv88e6352_port_set_mcast_flood(struct mv88e6xxx_chip *chip, int port,
+ bool multicast)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ if (multicast)
+ reg |= MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC;
+ else
+ reg &= ~MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+}
+
+/* Offset 0x05: Port Control 1 */
+
+int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
+ bool message_port)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1, &val);
+ if (err)
+ return err;
+
+ if (message_port)
+ val |= MV88E6XXX_PORT_CTL1_MESSAGE_PORT;
+ else
+ val &= ~MV88E6XXX_PORT_CTL1_MESSAGE_PORT;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1, val);
+}
+
+int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port,
+ bool trunk, u8 id)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1, &val);
+ if (err)
+ return err;
+
+ val &= ~MV88E6XXX_PORT_CTL1_TRUNK_ID_MASK;
+
+ if (trunk)
+ val |= MV88E6XXX_PORT_CTL1_TRUNK_PORT |
+ (id << MV88E6XXX_PORT_CTL1_TRUNK_ID_SHIFT);
+ else
+ val &= ~MV88E6XXX_PORT_CTL1_TRUNK_PORT;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1, val);
+}
+
+/* Offset 0x06: Port Based VLAN Map */
+
+int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map)
+{
+ const u16 mask = mv88e6xxx_port_mask(chip);
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_BASE_VLAN, &reg);
+ if (err)
+ return err;
+
+ reg &= ~mask;
+ reg |= map & mask;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_BASE_VLAN, reg);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "p%d: VLANTable set to %.3x\n", port, map);
+
+ return 0;
+}
+
+int mv88e6xxx_port_get_fid(struct mv88e6xxx_chip *chip, int port, u16 *fid)
+{
+ const u16 upper_mask = (mv88e6xxx_num_databases(chip) - 1) >> 4;
+ u16 reg;
+ int err;
+
+ /* Port's default FID lower 4 bits are located in reg 0x06, offset 12 */
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_BASE_VLAN, &reg);
+ if (err)
+ return err;
+
+ *fid = (reg & 0xf000) >> 12;
+
+ /* Port's default FID upper bits are located in reg 0x05, offset 0 */
+ if (upper_mask) {
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1,
+ &reg);
+ if (err)
+ return err;
+
+ *fid |= (reg & upper_mask) << 4;
+ }
+
+ return 0;
+}
+
+int mv88e6xxx_port_set_fid(struct mv88e6xxx_chip *chip, int port, u16 fid)
+{
+ const u16 upper_mask = (mv88e6xxx_num_databases(chip) - 1) >> 4;
+ u16 reg;
+ int err;
+
+ if (fid >= mv88e6xxx_num_databases(chip))
+ return -EINVAL;
+
+ /* Port's default FID lower 4 bits are located in reg 0x06, offset 12 */
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_BASE_VLAN, &reg);
+ if (err)
+ return err;
+
+ reg &= 0x0fff;
+ reg |= (fid & 0x000f) << 12;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_BASE_VLAN, reg);
+ if (err)
+ return err;
+
+ /* Port's default FID upper bits are located in reg 0x05, offset 0 */
+ if (upper_mask) {
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL1,
+ &reg);
+ if (err)
+ return err;
+
+ reg &= ~upper_mask;
+ reg |= (fid >> 4) & upper_mask;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL1,
+ reg);
+ if (err)
+ return err;
+ }
+
+ dev_dbg(chip->dev, "p%d: FID set to %u\n", port, fid);
+
+ return 0;
+}
+
+/* Offset 0x07: Default Port VLAN ID & Priority */
+
+int mv88e6xxx_port_get_pvid(struct mv88e6xxx_chip *chip, int port, u16 *pvid)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN,
+ &reg);
+ if (err)
+ return err;
+
+ *pvid = reg & MV88E6XXX_PORT_DEFAULT_VLAN_MASK;
+
+ return 0;
+}
+
+int mv88e6xxx_port_set_pvid(struct mv88e6xxx_chip *chip, int port, u16 pvid)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN,
+ &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_DEFAULT_VLAN_MASK;
+ reg |= pvid & MV88E6XXX_PORT_DEFAULT_VLAN_MASK;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN,
+ reg);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "p%d: DefaultVID set to %u\n", port, pvid);
+
+ return 0;
+}
+
+/* Offset 0x08: Port Control 2 Register */
+
+static const char * const mv88e6xxx_port_8021q_mode_names[] = {
+ [MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED] = "Disabled",
+ [MV88E6XXX_PORT_CTL2_8021Q_MODE_FALLBACK] = "Fallback",
+ [MV88E6XXX_PORT_CTL2_8021Q_MODE_CHECK] = "Check",
+ [MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE] = "Secure",
+};
+
+int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
+ int port, bool multicast)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
+ if (err)
+ return err;
+
+ if (multicast)
+ reg |= MV88E6XXX_PORT_CTL2_DEFAULT_FORWARD;
+ else
+ reg &= ~MV88E6XXX_PORT_CTL2_DEFAULT_FORWARD;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
+}
+
+int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
+ int upstream_port)
+{
+ int err;
+ u16 reg;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6095_PORT_CTL2_CPU_PORT_MASK;
+ reg |= upstream_port;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
+}
+
+int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_egress_direction direction,
+ bool mirror)
+{
+ bool *mirror_port;
+ u16 reg;
+ u16 bit;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
+ if (err)
+ return err;
+
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ bit = MV88E6XXX_PORT_CTL2_INGRESS_MONITOR;
+ mirror_port = &chip->ports[port].mirror_ingress;
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ bit = MV88E6XXX_PORT_CTL2_EGRESS_MONITOR;
+ mirror_port = &chip->ports[port].mirror_egress;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg &= ~bit;
+ if (mirror)
+ reg |= bit;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
+ if (!err)
+ *mirror_port = mirror;
+
+ return err;
+}
+
+int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port,
+ bool locked)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_CTL0_SA_FILT_MASK;
+ if (locked)
+ reg |= MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
+ if (locked)
+ reg |= MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, reg);
+}
+
+int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
+ u16 mode)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_CTL2_8021Q_MODE_MASK;
+ reg |= mode & MV88E6XXX_PORT_CTL2_8021Q_MODE_MASK;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "p%d: 802.1QMode set to %s\n", port,
+ mv88e6xxx_port_8021q_mode_names[mode]);
+
+ return 0;
+}
+
+int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
+ bool drop_untagged)
+{
+ u16 old, new;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &old);
+ if (err)
+ return err;
+
+ if (drop_untagged)
+ new = old | MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED;
+ else
+ new = old & ~MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED;
+
+ if (new == old)
+ return 0;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, new);
+}
+
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
+ if (err)
+ return err;
+
+ if (map)
+ reg |= MV88E6XXX_PORT_CTL2_MAP_DA;
+ else
+ reg &= ~MV88E6XXX_PORT_CTL2_MAP_DA;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
+}
+
+int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
+ size_t size)
+{
+ u16 reg;
+ int err;
+
+ size += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_CTL2_JUMBO_MODE_MASK;
+
+ if (size <= 1522)
+ reg |= MV88E6XXX_PORT_CTL2_JUMBO_MODE_1522;
+ else if (size <= 2048)
+ reg |= MV88E6XXX_PORT_CTL2_JUMBO_MODE_2048;
+ else if (size <= 10240)
+ reg |= MV88E6XXX_PORT_CTL2_JUMBO_MODE_10240;
+ else
+ return -ERANGE;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
+}
+
+/* Offset 0x09: Port Rate Control */
+
+int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_EGRESS_RATE_CTL1,
+ 0x0000);
+}
+
+int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_EGRESS_RATE_CTL1,
+ 0x0001);
+}
+
+/* Offset 0x0B: Port Association Vector */
+
+int mv88e6xxx_port_set_assoc_vector(struct mv88e6xxx_chip *chip, int port,
+ u16 pav)
+{
+ u16 reg, mask;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR,
+ &reg);
+ if (err)
+ return err;
+
+ mask = mv88e6xxx_port_mask(chip);
+ reg &= ~mask;
+ reg |= pav & mask;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR,
+ reg);
+}
+
+/* Offset 0x0C: Port ATU Control */
+
+int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ATU_CTL, 0);
+}
+
+/* Offset 0x0D: (Priority) Override Register */
+
+int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port)
+{
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_PRI_OVERRIDE, 0);
+}
+
+/* Offset 0x0E: Policy & MGMT Control Register for FAMILY 6191X 6193X 6393X */
+
+static int mv88e6393x_port_policy_read(struct mv88e6xxx_chip *chip, int port,
+ u16 pointer, u8 *data)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL,
+ pointer);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL,
+ &reg);
+ if (err)
+ return err;
+
+ *data = reg;
+
+ return 0;
+}
+
+static int mv88e6393x_port_policy_write(struct mv88e6xxx_chip *chip, int port,
+ u16 pointer, u8 data)
+{
+ u16 reg;
+
+ reg = MV88E6393X_PORT_POLICY_MGMT_CTL_UPDATE | pointer | data;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_POLICY_MGMT_CTL,
+ reg);
+}
+
+static int mv88e6393x_port_policy_write_all(struct mv88e6xxx_chip *chip,
+ u16 pointer, u8 data)
+{
+ int err, port;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ if (dsa_is_unused_port(chip->ds, port))
+ continue;
+
+ err = mv88e6393x_port_policy_write(chip, port, pointer, data);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mv88e6393x_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port)
+{
+ u16 ptr;
+ int err;
+
+ switch (direction) {
+ case MV88E6XXX_EGRESS_DIR_INGRESS:
+ ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_INGRESS_DEST;
+ err = mv88e6393x_port_policy_write_all(chip, ptr, port);
+ if (err)
+ return err;
+ break;
+ case MV88E6XXX_EGRESS_DIR_EGRESS:
+ ptr = MV88E6393X_G2_EGRESS_MONITOR_DEST;
+ err = mv88e6xxx_g2_write(chip, ptr, port);
+ if (err)
+ return err;
+ break;
+ }
+
+ return 0;
+}
+
+int mv88e6393x_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
+ int upstream_port)
+{
+ u16 ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_CPU_DEST;
+ u8 data = MV88E6393X_PORT_POLICY_MGMT_CTL_CPU_DEST_MGMTPRI |
+ upstream_port;
+
+ return mv88e6393x_port_policy_write(chip, port, ptr, data);
+}
+
+int mv88e6393x_port_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
+{
+ u16 ptr;
+ int err;
+
+ /* Consider the frames with reserved multicast destination
+ * addresses matching 01:80:c2:00:00:00 and
+ * 01:80:c2:00:00:02 as MGMT.
+ */
+ ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XLO;
+ err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff);
+ if (err)
+ return err;
+
+ ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XHI;
+ err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff);
+ if (err)
+ return err;
+
+ ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XLO;
+ err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff);
+ if (err)
+ return err;
+
+ ptr = MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XHI;
+ err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Offset 0x10 & 0x11: EPC */
+
+static int mv88e6393x_port_epc_wait_ready(struct mv88e6xxx_chip *chip, int port)
+{
+ int bit = __bf_shf(MV88E6393X_PORT_EPC_CMD_BUSY);
+
+ return mv88e6xxx_port_wait_bit(chip, port, MV88E6393X_PORT_EPC_CMD, bit, 0);
+}
+
+/* Port Ether type for 6393X family */
+
+int mv88e6393x_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
+ u16 etype)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6393x_port_epc_wait_ready(chip, port);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_EPC_DATA, etype);
+ if (err)
+ return err;
+
+ val = MV88E6393X_PORT_EPC_CMD_BUSY |
+ MV88E6393X_PORT_EPC_CMD_WRITE |
+ MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6393X_PORT_EPC_CMD, val);
+}
+
+/* Offset 0x0f: Port Ether type */
+
+int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
+ u16 etype)
+{
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ETH_TYPE, etype);
+}
+
+/* Offset 0x18: Port IEEE Priority Remapping Registers [0-3]
+ * Offset 0x19: Port IEEE Priority Remapping Registers [4-7]
+ */
+
+int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ /* Use a direct priority mapping for all IEEE tagged frames */
+ err = mv88e6xxx_port_write(chip, port,
+ MV88E6095_PORT_IEEE_PRIO_REMAP_0123,
+ 0x3210);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_write(chip, port,
+ MV88E6095_PORT_IEEE_PRIO_REMAP_4567,
+ 0x7654);
+}
+
+static int mv88e6xxx_port_ieeepmt_write(struct mv88e6xxx_chip *chip,
+ int port, u16 table, u8 ptr, u16 data)
+{
+ u16 reg;
+
+ reg = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE | table |
+ (ptr << __bf_shf(MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_PTR_MASK)) |
+ (data & MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_DATA_MASK);
+
+ return mv88e6xxx_port_write(chip, port,
+ MV88E6390_PORT_IEEE_PRIO_MAP_TABLE, reg);
+}
+
+int mv88e6390_port_tag_remap(struct mv88e6xxx_chip *chip, int port)
+{
+ int err, i;
+ u16 table;
+
+ for (i = 0; i <= 7; i++) {
+ table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_INGRESS_PCP;
+ err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i,
+ (i | i << 4));
+ if (err)
+ return err;
+
+ table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_PCP;
+ err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i);
+ if (err)
+ return err;
+
+ table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_PCP;
+ err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i);
+ if (err)
+ return err;
+
+ table = MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_PCP;
+ err = mv88e6xxx_port_ieeepmt_write(chip, port, table, i, i);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Offset 0x0E: Policy Control Register */
+
+static int
+mv88e6xxx_port_policy_mapping_get_pos(enum mv88e6xxx_policy_mapping mapping,
+ enum mv88e6xxx_policy_action action,
+ u16 *mask, u16 *val, int *shift)
+{
+ switch (mapping) {
+ case MV88E6XXX_POLICY_MAPPING_DA:
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_DA_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_DA_MASK;
+ break;
+ case MV88E6XXX_POLICY_MAPPING_SA:
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_SA_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_SA_MASK;
+ break;
+ case MV88E6XXX_POLICY_MAPPING_VTU:
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VTU_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_VTU_MASK;
+ break;
+ case MV88E6XXX_POLICY_MAPPING_ETYPE:
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK;
+ break;
+ case MV88E6XXX_POLICY_MAPPING_PPPOE:
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK;
+ break;
+ case MV88E6XXX_POLICY_MAPPING_VBAS:
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK;
+ break;
+ case MV88E6XXX_POLICY_MAPPING_OPT82:
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK;
+ break;
+ case MV88E6XXX_POLICY_MAPPING_UDP:
+ *shift = __bf_shf(MV88E6XXX_PORT_POLICY_CTL_UDP_MASK);
+ *mask = MV88E6XXX_PORT_POLICY_CTL_UDP_MASK;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (action) {
+ case MV88E6XXX_POLICY_ACTION_NORMAL:
+ *val = MV88E6XXX_PORT_POLICY_CTL_NORMAL;
+ break;
+ case MV88E6XXX_POLICY_ACTION_MIRROR:
+ *val = MV88E6XXX_PORT_POLICY_CTL_MIRROR;
+ break;
+ case MV88E6XXX_POLICY_ACTION_TRAP:
+ *val = MV88E6XXX_PORT_POLICY_CTL_TRAP;
+ break;
+ case MV88E6XXX_POLICY_ACTION_DISCARD:
+ *val = MV88E6XXX_PORT_POLICY_CTL_DISCARD;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_policy_mapping mapping,
+ enum mv88e6xxx_policy_action action)
+{
+ u16 reg, mask, val;
+ int shift;
+ int err;
+
+ err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask,
+ &val, &shift);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_POLICY_CTL, &reg);
+ if (err)
+ return err;
+
+ reg &= ~mask;
+ reg |= (val << shift) & mask;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_POLICY_CTL, reg);
+}
+
+int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_policy_mapping mapping,
+ enum mv88e6xxx_policy_action action)
+{
+ u16 mask, val;
+ int shift;
+ int err;
+ u16 ptr;
+ u8 reg;
+
+ err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask,
+ &val, &shift);
+ if (err)
+ return err;
+
+ /* The 16-bit Port Policy CTL register from older chips is on 6393x
+ * changed to Port Policy MGMT CTL, which can access more data, but
+ * indirectly. The original 16-bit value is divided into two 8-bit
+ * registers.
+ */
+ ptr = shift / 8;
+ shift %= 8;
+ mask >>= ptr * 8;
+
+ err = mv88e6393x_port_policy_read(chip, port, ptr, &reg);
+ if (err)
+ return err;
+
+ reg &= ~mask;
+ reg |= (val << shift) & mask;
+
+ return mv88e6393x_port_policy_write(chip, port, ptr, reg);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
new file mode 100644
index 000000000..d19b6303b
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -0,0 +1,456 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Marvell 88E6xxx Switch Port Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ */
+
+#ifndef _MV88E6XXX_PORT_H
+#define _MV88E6XXX_PORT_H
+
+#include "chip.h"
+
+/* Offset 0x00: Port Status Register */
+#define MV88E6XXX_PORT_STS 0x00
+#define MV88E6XXX_PORT_STS_PAUSE_EN 0x8000
+#define MV88E6XXX_PORT_STS_MY_PAUSE 0x4000
+#define MV88E6XXX_PORT_STS_HD_FLOW 0x2000
+#define MV88E6XXX_PORT_STS_PHY_DETECT 0x1000
+#define MV88E6250_PORT_STS_LINK 0x1000
+#define MV88E6250_PORT_STS_PORTMODE_MASK 0x0f00
+#define MV88E6250_PORT_STS_PORTMODE_PHY_10_HALF 0x0800
+#define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
+#define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
+#define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00
+#define MV88E6XXX_PORT_STS_LINK 0x0800
+#define MV88E6XXX_PORT_STS_DUPLEX 0x0400
+#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300
+#define MV88E6XXX_PORT_STS_SPEED_10 0x0000
+#define MV88E6XXX_PORT_STS_SPEED_100 0x0100
+#define MV88E6XXX_PORT_STS_SPEED_1000 0x0200
+#define MV88E6XXX_PORT_STS_SPEED_10000 0x0300
+#define MV88E6352_PORT_STS_EEE 0x0040
+#define MV88E6165_PORT_STS_AM_DIS 0x0040
+#define MV88E6185_PORT_STS_MGMII 0x0040
+#define MV88E6XXX_PORT_STS_TX_PAUSED 0x0020
+#define MV88E6XXX_PORT_STS_FLOW_CTL 0x0010
+#define MV88E6XXX_PORT_STS_CMODE_MASK 0x000f
+#define MV88E6XXX_PORT_STS_CMODE_MII_PHY 0x0001
+#define MV88E6XXX_PORT_STS_CMODE_MII 0x0002
+#define MV88E6XXX_PORT_STS_CMODE_GMII 0x0003
+#define MV88E6XXX_PORT_STS_CMODE_RMII_PHY 0x0004
+#define MV88E6XXX_PORT_STS_CMODE_RMII 0x0005
+#define MV88E6XXX_PORT_STS_CMODE_RGMII 0x0007
+#define MV88E6XXX_PORT_STS_CMODE_100BASEX 0x0008
+#define MV88E6XXX_PORT_STS_CMODE_1000BASEX 0x0009
+#define MV88E6XXX_PORT_STS_CMODE_SGMII 0x000a
+#define MV88E6XXX_PORT_STS_CMODE_2500BASEX 0x000b
+#define MV88E6XXX_PORT_STS_CMODE_XAUI 0x000c
+#define MV88E6XXX_PORT_STS_CMODE_RXAUI 0x000d
+#define MV88E6393X_PORT_STS_CMODE_5GBASER 0x000c
+#define MV88E6393X_PORT_STS_CMODE_10GBASER 0x000d
+#define MV88E6393X_PORT_STS_CMODE_USXGMII 0x000e
+#define MV88E6185_PORT_STS_CDUPLEX 0x0008
+#define MV88E6185_PORT_STS_CMODE_MASK 0x0007
+#define MV88E6185_PORT_STS_CMODE_GMII_FD 0x0000
+#define MV88E6185_PORT_STS_CMODE_MII_100_FD_PS 0x0001
+#define MV88E6185_PORT_STS_CMODE_MII_100 0x0002
+#define MV88E6185_PORT_STS_CMODE_MII_10 0x0003
+#define MV88E6185_PORT_STS_CMODE_SERDES 0x0004
+#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005
+#define MV88E6185_PORT_STS_CMODE_PHY 0x0006
+#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007
+
+/* Offset 0x01: MAC (or PCS or Physical) Control Register */
+#define MV88E6XXX_PORT_MAC_CTL 0x01
+#define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK 0x8000
+#define MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK 0x4000
+#define MV88E6185_PORT_MAC_CTL_SYNC_OK 0x4000
+#define MV88E6390_PORT_MAC_CTL_FORCE_SPEED 0x2000
+#define MV88E6390_PORT_MAC_CTL_ALTSPEED 0x1000
+#define MV88E6352_PORT_MAC_CTL_200BASE 0x1000
+#define MV88E6XXX_PORT_MAC_CTL_EEE 0x0200
+#define MV88E6XXX_PORT_MAC_CTL_FORCE_EEE 0x0100
+#define MV88E6185_PORT_MAC_CTL_AN_EN 0x0400
+#define MV88E6185_PORT_MAC_CTL_AN_RESTART 0x0200
+#define MV88E6185_PORT_MAC_CTL_AN_DONE 0x0100
+#define MV88E6XXX_PORT_MAC_CTL_FC 0x0080
+#define MV88E6XXX_PORT_MAC_CTL_FORCE_FC 0x0040
+#define MV88E6XXX_PORT_MAC_CTL_LINK_UP 0x0020
+#define MV88E6XXX_PORT_MAC_CTL_FORCE_LINK 0x0010
+#define MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL 0x0008
+#define MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX 0x0004
+#define MV88E6XXX_PORT_MAC_CTL_SPEED_MASK 0x0003
+#define MV88E6XXX_PORT_MAC_CTL_SPEED_10 0x0000
+#define MV88E6XXX_PORT_MAC_CTL_SPEED_100 0x0001
+#define MV88E6065_PORT_MAC_CTL_SPEED_200 0x0002
+#define MV88E6XXX_PORT_MAC_CTL_SPEED_1000 0x0002
+#define MV88E6390_PORT_MAC_CTL_SPEED_10000 0x0003
+#define MV88E6XXX_PORT_MAC_CTL_SPEED_UNFORCED 0x0003
+
+/* Offset 0x02: Jamming Control Register */
+#define MV88E6097_PORT_JAM_CTL 0x02
+#define MV88E6097_PORT_JAM_CTL_LIMIT_OUT_MASK 0xff00
+#define MV88E6097_PORT_JAM_CTL_LIMIT_IN_MASK 0x00ff
+
+/* Offset 0x02: Flow Control Register */
+#define MV88E6390_PORT_FLOW_CTL 0x02
+#define MV88E6390_PORT_FLOW_CTL_UPDATE 0x8000
+#define MV88E6390_PORT_FLOW_CTL_PTR_MASK 0x7f00
+#define MV88E6390_PORT_FLOW_CTL_LIMIT_IN 0x0000
+#define MV88E6390_PORT_FLOW_CTL_LIMIT_OUT 0x0100
+#define MV88E6390_PORT_FLOW_CTL_DATA_MASK 0x00ff
+
+/* Offset 0x03: Switch Identifier Register */
+#define MV88E6XXX_PORT_SWITCH_ID 0x03
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_MASK 0xfff0
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6085 0x04a0
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6095 0x0950
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6097 0x0990
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6190X 0x0a00
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6390X 0x0a10
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6131 0x1060
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6320 0x1150
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6123 0x1210
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6161 0x1610
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6165 0x1650
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6171 0x1710
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6172 0x1720
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6175 0x1750
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6176 0x1760
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6190 0x1900
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6191 0x1910
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6191X 0x1920
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6193X 0x1930
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6185 0x1a70
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6220 0x2200
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6240 0x2400
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6250 0x2500
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6290 0x2900
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6321 0x3100
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6141 0x3400
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6341 0x3410
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6352 0x3520
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6350 0x3710
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6351 0x3750
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6390 0x3900
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6393X 0x3930
+#define MV88E6XXX_PORT_SWITCH_ID_REV_MASK 0x000f
+
+/* Offset 0x04: Port Control Register */
+#define MV88E6XXX_PORT_CTL0 0x04
+#define MV88E6XXX_PORT_CTL0_USE_CORE_TAG 0x8000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_MASK 0xc000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DISABLED 0x0000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK 0x4000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_UNLOCK 0x8000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_CPU 0xc000
+#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_MASK 0x3000
+#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNMODIFIED 0x0000
+#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNTAGGED 0x1000
+#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_TAGGED 0x2000
+#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_ETHER_TYPE_DSA 0x3000
+#define MV88E6XXX_PORT_CTL0_HEADER 0x0800
+#define MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP 0x0400
+#define MV88E6XXX_PORT_CTL0_DOUBLE_TAG 0x0200
+#define MV88E6XXX_PORT_CTL0_FRAME_MODE_MASK 0x0300
+#define MV88E6XXX_PORT_CTL0_FRAME_MODE_NORMAL 0x0000
+#define MV88E6XXX_PORT_CTL0_FRAME_MODE_DSA 0x0100
+#define MV88E6XXX_PORT_CTL0_FRAME_MODE_PROVIDER 0x0200
+#define MV88E6XXX_PORT_CTL0_FRAME_MODE_ETHER_TYPE_DSA 0x0300
+#define MV88E6XXX_PORT_CTL0_DSA_TAG 0x0100
+#define MV88E6XXX_PORT_CTL0_VLAN_TUNNEL 0x0080
+#define MV88E6XXX_PORT_CTL0_TAG_IF_BOTH 0x0040
+#define MV88E6185_PORT_CTL0_USE_IP 0x0020
+#define MV88E6185_PORT_CTL0_USE_TAG 0x0010
+#define MV88E6185_PORT_CTL0_FORWARD_UNKNOWN 0x0004
+#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_UC 0x0004
+#define MV88E6352_PORT_CTL0_EGRESS_FLOODS_MC 0x0008
+#define MV88E6XXX_PORT_CTL0_STATE_MASK 0x0003
+#define MV88E6XXX_PORT_CTL0_STATE_DISABLED 0x0000
+#define MV88E6XXX_PORT_CTL0_STATE_BLOCKING 0x0001
+#define MV88E6XXX_PORT_CTL0_STATE_LEARNING 0x0002
+#define MV88E6XXX_PORT_CTL0_STATE_FORWARDING 0x0003
+
+/* Offset 0x05: Port Control 1 */
+#define MV88E6XXX_PORT_CTL1 0x05
+#define MV88E6XXX_PORT_CTL1_MESSAGE_PORT 0x8000
+#define MV88E6XXX_PORT_CTL1_TRUNK_PORT 0x4000
+#define MV88E6XXX_PORT_CTL1_TRUNK_ID_MASK 0x0f00
+#define MV88E6XXX_PORT_CTL1_TRUNK_ID_SHIFT 8
+#define MV88E6XXX_PORT_CTL1_FID_11_4_MASK 0x00ff
+
+/* Offset 0x06: Port Based VLAN Map */
+#define MV88E6XXX_PORT_BASE_VLAN 0x06
+#define MV88E6XXX_PORT_BASE_VLAN_FID_3_0_MASK 0xf000
+
+/* Offset 0x07: Default Port VLAN ID & Priority */
+#define MV88E6XXX_PORT_DEFAULT_VLAN 0x07
+#define MV88E6XXX_PORT_DEFAULT_VLAN_MASK 0x0fff
+
+/* Offset 0x08: Port Control 2 Register */
+#define MV88E6XXX_PORT_CTL2 0x08
+#define MV88E6XXX_PORT_CTL2_IGNORE_FCS 0x8000
+#define MV88E6XXX_PORT_CTL2_VTU_PRI_OVERRIDE 0x4000
+#define MV88E6XXX_PORT_CTL2_SA_PRIO_OVERRIDE 0x2000
+#define MV88E6XXX_PORT_CTL2_DA_PRIO_OVERRIDE 0x1000
+#define MV88E6XXX_PORT_CTL2_JUMBO_MODE_MASK 0x3000
+#define MV88E6XXX_PORT_CTL2_JUMBO_MODE_1522 0x0000
+#define MV88E6XXX_PORT_CTL2_JUMBO_MODE_2048 0x1000
+#define MV88E6XXX_PORT_CTL2_JUMBO_MODE_10240 0x2000
+#define MV88E6XXX_PORT_CTL2_8021Q_MODE_MASK 0x0c00
+#define MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED 0x0000
+#define MV88E6XXX_PORT_CTL2_8021Q_MODE_FALLBACK 0x0400
+#define MV88E6XXX_PORT_CTL2_8021Q_MODE_CHECK 0x0800
+#define MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE 0x0c00
+#define MV88E6XXX_PORT_CTL2_DISCARD_TAGGED 0x0200
+#define MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED 0x0100
+#define MV88E6XXX_PORT_CTL2_MAP_DA 0x0080
+#define MV88E6XXX_PORT_CTL2_DEFAULT_FORWARD 0x0040
+#define MV88E6XXX_PORT_CTL2_EGRESS_MONITOR 0x0020
+#define MV88E6XXX_PORT_CTL2_INGRESS_MONITOR 0x0010
+#define MV88E6095_PORT_CTL2_CPU_PORT_MASK 0x000f
+
+/* Offset 0x09: Egress Rate Control */
+#define MV88E6XXX_PORT_EGRESS_RATE_CTL1 0x09
+
+/* Offset 0x0A: Egress Rate Control 2 */
+#define MV88E6XXX_PORT_EGRESS_RATE_CTL2 0x0a
+
+/* Offset 0x0B: Port Association Vector */
+#define MV88E6XXX_PORT_ASSOC_VECTOR 0x0b
+#define MV88E6XXX_PORT_ASSOC_VECTOR_HOLD_AT_1 0x8000
+#define MV88E6XXX_PORT_ASSOC_VECTOR_INT_AGE_OUT 0x4000
+#define MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT 0x2000
+#define MV88E6XXX_PORT_ASSOC_VECTOR_IGNORE_WRONG 0x1000
+#define MV88E6XXX_PORT_ASSOC_VECTOR_REFRESH_LOCKED 0x0800
+
+/* Offset 0x0C: Port ATU Control */
+#define MV88E6XXX_PORT_ATU_CTL 0x0c
+
+/* Offset 0x0D: Priority Override Register */
+#define MV88E6XXX_PORT_PRI_OVERRIDE 0x0d
+
+/* Offset 0x0E: Policy Control Register */
+#define MV88E6XXX_PORT_POLICY_CTL 0x0e
+#define MV88E6XXX_PORT_POLICY_CTL_DA_MASK 0xc000
+#define MV88E6XXX_PORT_POLICY_CTL_SA_MASK 0x3000
+#define MV88E6XXX_PORT_POLICY_CTL_VTU_MASK 0x0c00
+#define MV88E6XXX_PORT_POLICY_CTL_ETYPE_MASK 0x0300
+#define MV88E6XXX_PORT_POLICY_CTL_PPPOE_MASK 0x00c0
+#define MV88E6XXX_PORT_POLICY_CTL_VBAS_MASK 0x0030
+#define MV88E6XXX_PORT_POLICY_CTL_OPT82_MASK 0x000c
+#define MV88E6XXX_PORT_POLICY_CTL_UDP_MASK 0x0003
+#define MV88E6XXX_PORT_POLICY_CTL_NORMAL 0x0000
+#define MV88E6XXX_PORT_POLICY_CTL_MIRROR 0x0001
+#define MV88E6XXX_PORT_POLICY_CTL_TRAP 0x0002
+#define MV88E6XXX_PORT_POLICY_CTL_DISCARD 0x0003
+
+/* Offset 0x0E: Policy & MGMT Control Register (FAMILY_6393X) */
+#define MV88E6393X_PORT_POLICY_MGMT_CTL 0x0e
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_UPDATE 0x8000
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_MASK 0x3f00
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_DATA_MASK 0x00ff
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XLO 0x2000
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000000XHI 0x2100
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XLO 0x2400
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_01C280000002XHI 0x2500
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_INGRESS_DEST 0x3000
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_PTR_CPU_DEST 0x3800
+#define MV88E6393X_PORT_POLICY_MGMT_CTL_CPU_DEST_MGMTPRI 0x00e0
+
+/* Offset 0x0F: Port Special Ether Type */
+#define MV88E6XXX_PORT_ETH_TYPE 0x0f
+#define MV88E6XXX_PORT_ETH_TYPE_DEFAULT 0x9100
+
+/* Offset 0x10: InDiscards Low Counter */
+#define MV88E6XXX_PORT_IN_DISCARD_LO 0x10
+
+/* Offset 0x10: Extended Port Control Command */
+#define MV88E6393X_PORT_EPC_CMD 0x10
+#define MV88E6393X_PORT_EPC_CMD_BUSY 0x8000
+#define MV88E6393X_PORT_EPC_CMD_WRITE 0x3000
+#define MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE 0x02
+
+/* Offset 0x11: Extended Port Control Data */
+#define MV88E6393X_PORT_EPC_DATA 0x11
+
+/* Offset 0x11: InDiscards High Counter */
+#define MV88E6XXX_PORT_IN_DISCARD_HI 0x11
+
+/* Offset 0x12: InFiltered Counter */
+#define MV88E6XXX_PORT_IN_FILTERED 0x12
+
+/* Offset 0x13: OutFiltered Counter */
+#define MV88E6XXX_PORT_OUT_FILTERED 0x13
+
+/* Offset 0x18: IEEE Priority Mapping Table */
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE 0x18
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE 0x8000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_MASK 0x7000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_INGRESS_PCP 0x0000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_PCP 0x1000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_PCP 0x2000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_PCP 0x3000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_GREEN_DSCP 0x5000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_YELLOW_DSCP 0x6000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_EGRESS_AVB_DSCP 0x7000
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_PTR_MASK 0x0e00
+#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_DATA_MASK 0x01ff
+
+/* Offset 0x18: Port IEEE Priority Remapping Registers (0-3) */
+#define MV88E6095_PORT_IEEE_PRIO_REMAP_0123 0x18
+
+/* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */
+#define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19
+
+/* Offset 0x1a: Magic undocumented errata register */
+#define MV88E6XXX_PORT_RESERVED_1A 0x1a
+#define MV88E6XXX_PORT_RESERVED_1A_BUSY 0x8000
+#define MV88E6XXX_PORT_RESERVED_1A_WRITE 0x4000
+#define MV88E6XXX_PORT_RESERVED_1A_READ 0x0000
+#define MV88E6XXX_PORT_RESERVED_1A_PORT_SHIFT 5
+#define MV88E6XXX_PORT_RESERVED_1A_BLOCK_SHIFT 10
+#define MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT 0x04
+#define MV88E6XXX_PORT_RESERVED_1A_DATA_PORT 0x05
+#define MV88E6341_PORT_RESERVED_1A_FORCE_CMODE 0x8000
+#define MV88E6341_PORT_RESERVED_1A_SGMII_AN 0x2000
+
+int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 *val);
+int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 val);
+int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg,
+ int bit, int val);
+
+int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
+ int pause);
+int mv88e6320_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+
+int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link);
+
+int mv88e6xxx_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
+int mv88e6185_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
+
+int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6341_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+
+phy_interface_t mv88e6341_port_max_speed_mode(int port);
+phy_interface_t mv88e6390_port_max_speed_mode(int port);
+phy_interface_t mv88e6390x_port_max_speed_mode(int port);
+phy_interface_t mv88e6393x_port_max_speed_mode(int port);
+
+int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state);
+
+int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map);
+
+int mv88e6xxx_port_get_fid(struct mv88e6xxx_chip *chip, int port, u16 *fid);
+int mv88e6xxx_port_set_fid(struct mv88e6xxx_chip *chip, int port, u16 fid);
+
+int mv88e6xxx_port_get_pvid(struct mv88e6xxx_chip *chip, int port, u16 *pvid);
+int mv88e6xxx_port_set_pvid(struct mv88e6xxx_chip *chip, int port, u16 pvid);
+
+int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port,
+ bool locked);
+
+int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
+ u16 mode);
+int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_port_tag_remap(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_set_egress_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_egress_mode mode);
+int mv88e6085_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode mode);
+int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_frame_mode mode);
+int mv88e6185_port_set_forward_unknown(struct mv88e6xxx_chip *chip,
+ int port, bool unicast);
+int mv88e6185_port_set_default_forward(struct mv88e6xxx_chip *chip,
+ int port, bool multicast);
+int mv88e6352_port_set_ucast_flood(struct mv88e6xxx_chip *chip, int port,
+ bool unicast);
+int mv88e6352_port_set_mcast_flood(struct mv88e6xxx_chip *chip, int port,
+ bool multicast);
+int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_policy_mapping mapping,
+ enum mv88e6xxx_policy_action action);
+int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_policy_mapping mapping,
+ enum mv88e6xxx_policy_action action);
+int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
+ u16 etype);
+int mv88e6393x_set_egress_port(struct mv88e6xxx_chip *chip,
+ enum mv88e6xxx_egress_direction direction,
+ int port);
+int mv88e6393x_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
+ int upstream_port);
+int mv88e6393x_port_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
+int mv88e6393x_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
+ u16 etype);
+int mv88e6xxx_port_set_message_port(struct mv88e6xxx_chip *chip, int port,
+ bool message_port);
+int mv88e6xxx_port_set_trunk(struct mv88e6xxx_chip *chip, int port,
+ bool trunk, u8 id);
+int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
+ size_t size);
+int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
+int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_set_assoc_vector(struct mv88e6xxx_chip *chip, int port,
+ u16 pav);
+int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
+ u8 out);
+int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
+ u8 out);
+int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
+int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
+ bool drop_untagged);
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map);
+int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
+ int upstream_port);
+int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
+ enum mv88e6xxx_egress_direction direction,
+ bool mirror);
+
+int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port);
+
+int mv88e6xxx_port_hidden_write(struct mv88e6xxx_chip *chip, int block,
+ int port, int reg, u16 val);
+int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
+ int reg, u16 *val);
+
+#endif /* _MV88E6XXX_PORT_H */
diff --git a/drivers/net/dsa/mv88e6xxx/port_hidden.c b/drivers/net/dsa/mv88e6xxx/port_hidden.c
new file mode 100644
index 000000000..7a9f9ff6d
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/port_hidden.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6xxx Switch Hidden Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2019 Andrew Lunn <andrew@lunn.ch>
+ */
+
+#include <linux/bitfield.h>
+
+#include "chip.h"
+#include "port.h"
+
+/* The mv88e6390 and mv88e6341 have some hidden registers used for debug and
+ * development. The errata also makes use of them.
+ */
+int mv88e6xxx_port_hidden_write(struct mv88e6xxx_chip *chip, int block,
+ int port, int reg, u16 val)
+{
+ u16 ctrl;
+ int err;
+
+ err = mv88e6xxx_port_write(chip, MV88E6XXX_PORT_RESERVED_1A_DATA_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, val);
+ if (err)
+ return err;
+
+ ctrl = MV88E6XXX_PORT_RESERVED_1A_BUSY |
+ MV88E6XXX_PORT_RESERVED_1A_WRITE |
+ block << MV88E6XXX_PORT_RESERVED_1A_BLOCK_SHIFT |
+ port << MV88E6XXX_PORT_RESERVED_1A_PORT_SHIFT |
+ reg;
+
+ return mv88e6xxx_port_write(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, ctrl);
+}
+
+int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY);
+
+ return mv88e6xxx_port_wait_bit(chip,
+ MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, bit, 0);
+}
+
+int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
+ int reg, u16 *val)
+{
+ u16 ctrl;
+ int err;
+
+ ctrl = MV88E6XXX_PORT_RESERVED_1A_BUSY |
+ MV88E6XXX_PORT_RESERVED_1A_READ |
+ block << MV88E6XXX_PORT_RESERVED_1A_BLOCK_SHIFT |
+ port << MV88E6XXX_PORT_RESERVED_1A_PORT_SHIFT |
+ reg;
+
+ err = mv88e6xxx_port_write(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, ctrl);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_hidden_wait(chip);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_read(chip, MV88E6XXX_PORT_RESERVED_1A_DATA_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, val);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
new file mode 100644
index 000000000..d838c174d
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6xxx Switch PTP support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 National Instruments
+ * Erik Hons <erik.hons@ni.com>
+ * Brandon Streiff <brandon.streiff@ni.com>
+ * Dane Wagner <dane.wagner@ni.com>
+ */
+
+#include "chip.h"
+#include "global2.h"
+#include "hwtstamp.h"
+#include "ptp.h"
+
+#define MV88E6XXX_MAX_ADJ_PPB 1000000
+
+/* Family MV88E6250:
+ * Raw timestamps are in units of 10-ns clock periods.
+ *
+ * clkadj = scaled_ppm * 10*2^28 / (10^6 * 2^16)
+ * simplifies to
+ * clkadj = scaled_ppm * 2^7 / 5^5
+ */
+#define MV88E6250_CC_SHIFT 28
+#define MV88E6250_CC_MULT (10 << MV88E6250_CC_SHIFT)
+#define MV88E6250_CC_MULT_NUM (1 << 7)
+#define MV88E6250_CC_MULT_DEM 3125ULL
+
+/* Other families:
+ * Raw timestamps are in units of 8-ns clock periods.
+ *
+ * clkadj = scaled_ppm * 8*2^28 / (10^6 * 2^16)
+ * simplifies to
+ * clkadj = scaled_ppm * 2^9 / 5^6
+ */
+#define MV88E6XXX_CC_SHIFT 28
+#define MV88E6XXX_CC_MULT (8 << MV88E6XXX_CC_SHIFT)
+#define MV88E6XXX_CC_MULT_NUM (1 << 9)
+#define MV88E6XXX_CC_MULT_DEM 15625ULL
+
+#define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100)
+
+#define cc_to_chip(cc) container_of(cc, struct mv88e6xxx_chip, tstamp_cc)
+#define dw_overflow_to_chip(dw) container_of(dw, struct mv88e6xxx_chip, \
+ overflow_work)
+#define dw_tai_event_to_chip(dw) container_of(dw, struct mv88e6xxx_chip, \
+ tai_event_work)
+
+static int mv88e6xxx_tai_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data, int len)
+{
+ if (!chip->info->ops->avb_ops->tai_read)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->avb_ops->tai_read(chip, addr, data, len);
+}
+
+static int mv88e6xxx_tai_write(struct mv88e6xxx_chip *chip, int addr, u16 data)
+{
+ if (!chip->info->ops->avb_ops->tai_write)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->avb_ops->tai_write(chip, addr, data);
+}
+
+/* TODO: places where this are called should be using pinctrl */
+static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
+ int func, int input)
+{
+ int err;
+
+ if (!chip->info->ops->gpio_ops)
+ return -EOPNOTSUPP;
+
+ err = chip->info->ops->gpio_ops->set_dir(chip, pin, input);
+ if (err)
+ return err;
+
+ return chip->info->ops->gpio_ops->set_pctl(chip, pin, func);
+}
+
+static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
+{
+ struct mv88e6xxx_chip *chip = cc_to_chip(cc);
+ u16 phc_time[2];
+ int err;
+
+ err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_TIME_LO, phc_time,
+ ARRAY_SIZE(phc_time));
+ if (err)
+ return 0;
+ else
+ return ((u32)phc_time[1] << 16) | phc_time[0];
+}
+
+static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc)
+{
+ struct mv88e6xxx_chip *chip = cc_to_chip(cc);
+ u16 phc_time[2];
+ int err;
+
+ err = mv88e6xxx_tai_read(chip, MV88E6XXX_PTP_GC_TIME_LO, phc_time,
+ ARRAY_SIZE(phc_time));
+ if (err)
+ return 0;
+ else
+ return ((u32)phc_time[1] << 16) | phc_time[0];
+}
+
+/* mv88e6352_config_eventcap - configure TAI event capture
+ * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external)
+ * @rising: zero for falling-edge trigger, else rising-edge trigger
+ *
+ * This will also reset the capture sequence counter.
+ */
+static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event,
+ int rising)
+{
+ u16 global_config;
+ u16 cap_config;
+ int err;
+
+ chip->evcap_config = MV88E6XXX_TAI_CFG_CAP_OVERWRITE |
+ MV88E6XXX_TAI_CFG_CAP_CTR_START;
+ if (!rising)
+ chip->evcap_config |= MV88E6XXX_TAI_CFG_EVREQ_FALLING;
+
+ global_config = (chip->evcap_config | chip->trig_config);
+ err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_CFG, global_config);
+ if (err)
+ return err;
+
+ if (event == PTP_CLOCK_PPS) {
+ cap_config = MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG;
+ } else if (event == PTP_CLOCK_EXTTS) {
+ /* if STATUS_CAP_TRIG is unset we capture PTP_EVREQ events */
+ cap_config = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ /* Write the capture config; this also clears the capture counter */
+ err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS,
+ cap_config);
+
+ return err;
+}
+
+static void mv88e6352_tai_event_work(struct work_struct *ugly)
+{
+ struct delayed_work *dw = to_delayed_work(ugly);
+ struct mv88e6xxx_chip *chip = dw_tai_event_to_chip(dw);
+ struct ptp_clock_event ev;
+ u16 status[4];
+ u32 raw_ts;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_EVENT_STATUS,
+ status, ARRAY_SIZE(status));
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err) {
+ dev_err(chip->dev, "failed to read TAI status register\n");
+ return;
+ }
+ if (status[0] & MV88E6XXX_TAI_EVENT_STATUS_ERROR) {
+ dev_warn(chip->dev, "missed event capture\n");
+ return;
+ }
+ if (!(status[0] & MV88E6XXX_TAI_EVENT_STATUS_VALID))
+ goto out;
+
+ raw_ts = ((u32)status[2] << 16) | status[1];
+
+ /* Clear the valid bit so the next timestamp can come in */
+ status[0] &= ~MV88E6XXX_TAI_EVENT_STATUS_VALID;
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS, status[0]);
+ mv88e6xxx_reg_unlock(chip);
+
+ /* This is an external timestamp */
+ ev.type = PTP_CLOCK_EXTTS;
+
+ /* We only have one timestamping channel. */
+ ev.index = 0;
+ mv88e6xxx_reg_lock(chip);
+ ev.timestamp = timecounter_cyc2time(&chip->tstamp_tc, raw_ts);
+ mv88e6xxx_reg_unlock(chip);
+
+ ptp_clock_event(chip->ptp_clock, &ev);
+out:
+ schedule_delayed_work(&chip->tai_event_work, TAI_EVENT_WORK_INTERVAL);
+}
+
+static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
+ int neg_adj = 0;
+ u32 diff, mult;
+ u64 adj;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ mult = ptp_ops->cc_mult;
+ adj = ptp_ops->cc_mult_num;
+ adj *= scaled_ppm;
+ diff = div_u64(adj, ptp_ops->cc_mult_dem);
+
+ mv88e6xxx_reg_lock(chip);
+
+ timecounter_read(&chip->tstamp_tc);
+ chip->tstamp_cc.mult = neg_adj ? mult - diff : mult + diff;
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return 0;
+}
+
+static int mv88e6xxx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+
+ mv88e6xxx_reg_lock(chip);
+ timecounter_adjtime(&chip->tstamp_tc, delta);
+ mv88e6xxx_reg_unlock(chip);
+
+ return 0;
+}
+
+static int mv88e6xxx_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+ u64 ns;
+
+ mv88e6xxx_reg_lock(chip);
+ ns = timecounter_read(&chip->tstamp_tc);
+ mv88e6xxx_reg_unlock(chip);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int mv88e6xxx_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+
+ mv88e6xxx_reg_lock(chip);
+ timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc, ns);
+ mv88e6xxx_reg_unlock(chip);
+
+ return 0;
+}
+
+static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
+ struct ptp_clock_request *rq, int on)
+{
+ int rising = (rq->extts.flags & PTP_RISING_EDGE);
+ int func;
+ int pin;
+ int err;
+
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests to enable time stamping on both edges. */
+ if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index);
+
+ if (pin < 0)
+ return -EBUSY;
+
+ mv88e6xxx_reg_lock(chip);
+
+ if (on) {
+ func = MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ;
+
+ err = mv88e6352_set_gpio_func(chip, pin, func, true);
+ if (err)
+ goto out;
+
+ schedule_delayed_work(&chip->tai_event_work,
+ TAI_EVENT_WORK_INTERVAL);
+
+ err = mv88e6352_config_eventcap(chip, PTP_CLOCK_EXTTS, rising);
+ } else {
+ func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO;
+
+ err = mv88e6352_set_gpio_func(chip, pin, func, true);
+
+ cancel_delayed_work_sync(&chip->tai_event_work);
+ }
+
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6352_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return mv88e6352_ptp_enable_extts(chip, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mv88e6352_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
+ .clock_read = mv88e6165_ptp_clock_read,
+ .global_enable = mv88e6165_global_enable,
+ .global_disable = mv88e6165_global_disable,
+ .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS,
+ .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+ .cc_shift = MV88E6XXX_CC_SHIFT,
+ .cc_mult = MV88E6XXX_CC_MULT,
+ .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
+ .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
+};
+
+const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
+ .clock_read = mv88e6352_ptp_clock_read,
+ .ptp_enable = mv88e6352_ptp_enable,
+ .ptp_verify = mv88e6352_ptp_verify,
+ .event_work = mv88e6352_tai_event_work,
+ .port_enable = mv88e6352_hwtstamp_port_enable,
+ .port_disable = mv88e6352_hwtstamp_port_disable,
+ .n_ext_ts = 1,
+ .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
+ .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+ .cc_shift = MV88E6250_CC_SHIFT,
+ .cc_mult = MV88E6250_CC_MULT,
+ .cc_mult_num = MV88E6250_CC_MULT_NUM,
+ .cc_mult_dem = MV88E6250_CC_MULT_DEM,
+};
+
+const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
+ .clock_read = mv88e6352_ptp_clock_read,
+ .ptp_enable = mv88e6352_ptp_enable,
+ .ptp_verify = mv88e6352_ptp_verify,
+ .event_work = mv88e6352_tai_event_work,
+ .port_enable = mv88e6352_hwtstamp_port_enable,
+ .port_disable = mv88e6352_hwtstamp_port_disable,
+ .n_ext_ts = 1,
+ .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
+ .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+ .cc_shift = MV88E6XXX_CC_SHIFT,
+ .cc_mult = MV88E6XXX_CC_MULT,
+ .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
+ .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
+};
+
+static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
+{
+ struct mv88e6xxx_chip *chip = cc_to_chip(cc);
+
+ if (chip->info->ops->ptp_ops->clock_read)
+ return chip->info->ops->ptp_ops->clock_read(cc);
+
+ return 0;
+}
+
+/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3
+ * seconds; this task forces periodic reads so that we don't miss any.
+ */
+#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 16)
+static void mv88e6xxx_ptp_overflow_check(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct mv88e6xxx_chip *chip = dw_overflow_to_chip(dw);
+ struct timespec64 ts;
+
+ mv88e6xxx_ptp_gettime(&chip->ptp_clock_info, &ts);
+
+ schedule_delayed_work(&chip->overflow_work,
+ MV88E6XXX_TAI_OVERFLOW_PERIOD);
+}
+
+int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
+{
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
+ int i;
+
+ /* Set up the cycle counter */
+ memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc));
+ chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read;
+ chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32);
+ chip->tstamp_cc.mult = ptp_ops->cc_mult;
+ chip->tstamp_cc.shift = ptp_ops->cc_shift;
+
+ timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc,
+ ktime_to_ns(ktime_get_real()));
+
+ INIT_DELAYED_WORK(&chip->overflow_work, mv88e6xxx_ptp_overflow_check);
+ if (ptp_ops->event_work)
+ INIT_DELAYED_WORK(&chip->tai_event_work, ptp_ops->event_work);
+
+ chip->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name),
+ "%s", dev_name(chip->dev));
+
+ chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts;
+ chip->ptp_clock_info.n_per_out = 0;
+ chip->ptp_clock_info.n_pins = mv88e6xxx_num_gpio(chip);
+ chip->ptp_clock_info.pps = 0;
+
+ for (i = 0; i < chip->ptp_clock_info.n_pins; ++i) {
+ struct ptp_pin_desc *ppd = &chip->pin_config[i];
+
+ snprintf(ppd->name, sizeof(ppd->name), "mv88e6xxx_gpio%d", i);
+ ppd->index = i;
+ ppd->func = PTP_PF_NONE;
+ }
+ chip->ptp_clock_info.pin_config = chip->pin_config;
+
+ chip->ptp_clock_info.max_adj = MV88E6XXX_MAX_ADJ_PPB;
+ chip->ptp_clock_info.adjfine = mv88e6xxx_ptp_adjfine;
+ chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime;
+ chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime;
+ chip->ptp_clock_info.settime64 = mv88e6xxx_ptp_settime;
+ chip->ptp_clock_info.enable = ptp_ops->ptp_enable;
+ chip->ptp_clock_info.verify = ptp_ops->ptp_verify;
+ chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
+
+ chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev);
+ if (IS_ERR(chip->ptp_clock))
+ return PTR_ERR(chip->ptp_clock);
+
+ schedule_delayed_work(&chip->overflow_work,
+ MV88E6XXX_TAI_OVERFLOW_PERIOD);
+
+ return 0;
+}
+
+void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
+{
+ if (chip->ptp_clock) {
+ cancel_delayed_work_sync(&chip->overflow_work);
+ if (chip->info->ops->ptp_ops->event_work)
+ cancel_delayed_work_sync(&chip->tai_event_work);
+
+ ptp_clock_unregister(chip->ptp_clock);
+ chip->ptp_clock = NULL;
+ }
+}
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
new file mode 100644
index 000000000..269d5d16a
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Marvell 88E6xxx Switch PTP support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 National Instruments
+ * Erik Hons <erik.hons@ni.com>
+ * Brandon Streiff <brandon.streiff@ni.com>
+ * Dane Wagner <dane.wagner@ni.com>
+ */
+
+#ifndef _MV88E6XXX_PTP_H
+#define _MV88E6XXX_PTP_H
+
+#include "chip.h"
+
+/* Offset 0x00: TAI Global Config */
+#define MV88E6XXX_TAI_CFG 0x00
+#define MV88E6XXX_TAI_CFG_CAP_OVERWRITE 0x8000
+#define MV88E6XXX_TAI_CFG_CAP_CTR_START 0x4000
+#define MV88E6XXX_TAI_CFG_EVREQ_FALLING 0x2000
+#define MV88E6XXX_TAI_CFG_TRIG_ACTIVE_LO 0x1000
+#define MV88E6XXX_TAI_CFG_IRL_ENABLE 0x0400
+#define MV88E6XXX_TAI_CFG_TRIG_IRQ_EN 0x0200
+#define MV88E6XXX_TAI_CFG_EVREQ_IRQ_EN 0x0100
+#define MV88E6XXX_TAI_CFG_TRIG_LOCK 0x0080
+#define MV88E6XXX_TAI_CFG_BLOCK_UPDATE 0x0008
+#define MV88E6XXX_TAI_CFG_MULTI_PTP 0x0004
+#define MV88E6XXX_TAI_CFG_TRIG_MODE_ONESHOT 0x0002
+#define MV88E6XXX_TAI_CFG_TRIG_ENABLE 0x0001
+
+/* Offset 0x01: Timestamp Clock Period (ps) */
+#define MV88E6XXX_TAI_CLOCK_PERIOD 0x01
+
+/* Offset 0x02/0x03: Trigger Generation Amount */
+#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_LO 0x02
+#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_HI 0x03
+
+/* Offset 0x04: Clock Compensation */
+#define MV88E6XXX_TAI_TRIG_CLOCK_COMP 0x04
+
+/* Offset 0x05: Trigger Configuration */
+#define MV88E6XXX_TAI_TRIG_CFG 0x05
+
+/* Offset 0x06: Ingress Rate Limiter Clock Generation Amount */
+#define MV88E6XXX_TAI_IRL_AMOUNT 0x06
+
+/* Offset 0x07: Ingress Rate Limiter Compensation */
+#define MV88E6XXX_TAI_IRL_COMP 0x07
+
+/* Offset 0x08: Ingress Rate Limiter Compensation */
+#define MV88E6XXX_TAI_IRL_COMP_PS 0x08
+
+/* Offset 0x09: Event Status */
+#define MV88E6XXX_TAI_EVENT_STATUS 0x09
+#define MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG 0x4000
+#define MV88E6XXX_TAI_EVENT_STATUS_ERROR 0x0200
+#define MV88E6XXX_TAI_EVENT_STATUS_VALID 0x0100
+#define MV88E6XXX_TAI_EVENT_STATUS_CTR_MASK 0x00ff
+
+/* Offset 0x0A/0x0B: Event Time */
+#define MV88E6XXX_TAI_EVENT_TIME_LO 0x0a
+#define MV88E6XXX_TAI_EVENT_TYPE_HI 0x0b
+
+/* Offset 0x0E/0x0F: PTP Global Time */
+#define MV88E6XXX_TAI_TIME_LO 0x0e
+#define MV88E6XXX_TAI_TIME_HI 0x0f
+
+/* Offset 0x10/0x11: Trig Generation Time */
+#define MV88E6XXX_TAI_TRIG_TIME_LO 0x10
+#define MV88E6XXX_TAI_TRIG_TIME_HI 0x11
+
+/* Offset 0x12: Lock Status */
+#define MV88E6XXX_TAI_LOCK_STATUS 0x12
+
+/* Offset 0x00: Ether Type */
+#define MV88E6XXX_PTP_GC_ETYPE 0x00
+
+/* 6165 Global Control Registers */
+/* Offset 0x00: Ether Type */
+#define MV88E6XXX_PTP_GC_ETYPE 0x00
+
+/* Offset 0x01: Message ID */
+#define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01
+
+/* Offset 0x02: Time Stamp Arrive Time */
+#define MV88E6XXX_PTP_GC_TS_ARR_PTR 0x02
+
+/* Offset 0x03: Port Arrival Interrupt Enable */
+#define MV88E6XXX_PTP_GC_PORT_ARR_INT_EN 0x03
+
+/* Offset 0x04: Port Departure Interrupt Enable */
+#define MV88E6XXX_PTP_GC_PORT_DEP_INT_EN 0x04
+
+/* Offset 0x05: Configuration */
+#define MV88E6XXX_PTP_GC_CONFIG 0x05
+#define MV88E6XXX_PTP_GC_CONFIG_DIS_OVERWRITE BIT(1)
+#define MV88E6XXX_PTP_GC_CONFIG_DIS_TS BIT(0)
+
+/* Offset 0x8: Interrupt Status */
+#define MV88E6XXX_PTP_GC_INT_STATUS 0x08
+
+/* Offset 0x9/0xa: Global Time */
+#define MV88E6XXX_PTP_GC_TIME_LO 0x09
+#define MV88E6XXX_PTP_GC_TIME_HI 0x0A
+
+/* 6165 Per Port Registers */
+/* Offset 0: Arrival Time 0 Status */
+#define MV88E6165_PORT_PTP_ARR0_STS 0x00
+
+/* Offset 0x01/0x02: PTP Arrival 0 Time */
+#define MV88E6165_PORT_PTP_ARR0_TIME_LO 0x01
+#define MV88E6165_PORT_PTP_ARR0_TIME_HI 0x02
+
+/* Offset 0x03: PTP Arrival 0 Sequence ID */
+#define MV88E6165_PORT_PTP_ARR0_SEQID 0x03
+
+/* Offset 0x04: PTP Arrival 1 Status */
+#define MV88E6165_PORT_PTP_ARR1_STS 0x04
+
+/* Offset 0x05/0x6E: PTP Arrival 1 Time */
+#define MV88E6165_PORT_PTP_ARR1_TIME_LO 0x05
+#define MV88E6165_PORT_PTP_ARR1_TIME_HI 0x06
+
+/* Offset 0x07: PTP Arrival 1 Sequence ID */
+#define MV88E6165_PORT_PTP_ARR1_SEQID 0x07
+
+/* Offset 0x08: PTP Departure Status */
+#define MV88E6165_PORT_PTP_DEP_STS 0x08
+
+/* Offset 0x09/0x0a: PTP Deperture Time */
+#define MV88E6165_PORT_PTP_DEP_TIME_LO 0x09
+#define MV88E6165_PORT_PTP_DEP_TIME_HI 0x0a
+
+/* Offset 0x0b: PTP Departure Sequence ID */
+#define MV88E6165_PORT_PTP_DEP_SEQID 0x0b
+
+/* Offset 0x0d: Port Status */
+#define MV88E6164_PORT_STATUS 0x0d
+
+#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
+
+long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
+int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
+
+#define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \
+ ptp_clock_info)
+
+extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
+extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops;
+extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
+
+#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
+
+static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
+{
+ return -1;
+}
+
+static inline int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
+{
+ return 0;
+}
+
+static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
+{
+}
+
+static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
+static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {};
+static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
+
+#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */
+
+#endif /* _MV88E6XXX_PTP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
new file mode 100644
index 000000000..d94150d8f
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -0,0 +1,1609 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6xxx SERDES manipulation, via SMI bus
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/mii.h>
+
+#include "chip.h"
+#include "global2.h"
+#include "phy.h"
+#include "port.h"
+#include "serdes.h"
+
+static int mv88e6352_serdes_read(struct mv88e6xxx_chip *chip, int reg,
+ u16 *val)
+{
+ return mv88e6xxx_phy_page_read(chip, MV88E6352_ADDR_SERDES,
+ MV88E6352_SERDES_PAGE_FIBER,
+ reg, val);
+}
+
+static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg,
+ u16 val)
+{
+ return mv88e6xxx_phy_page_write(chip, MV88E6352_ADDR_SERDES,
+ MV88E6352_SERDES_PAGE_FIBER,
+ reg, val);
+}
+
+static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip,
+ int lane, int device, int reg, u16 *val)
+{
+ int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
+
+ return mv88e6xxx_phy_read(chip, lane, reg_c45, val);
+}
+
+static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
+ int lane, int device, int reg, u16 val)
+{
+ int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
+
+ return mv88e6xxx_phy_write(chip, lane, reg_c45, val);
+}
+
+static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
+ u16 bmsr, u16 lpa, u16 status,
+ struct phylink_link_state *state)
+{
+ state->link = false;
+
+ /* If the BMSR reports that the link had failed, report this to
+ * phylink.
+ */
+ if (!(bmsr & BMSR_LSTATUS))
+ return 0;
+
+ state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
+
+ if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
+ /* The Spped and Duplex Resolved register is 1 if AN is enabled
+ * and complete, or if AN is disabled. So with disabled AN we
+ * still get here on link up.
+ */
+ state->duplex = status &
+ MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+
+ if (status & MV88E6390_SGMII_PHY_STATUS_TX_PAUSE)
+ state->pause |= MLO_PAUSE_TX;
+ if (status & MV88E6390_SGMII_PHY_STATUS_RX_PAUSE)
+ state->pause |= MLO_PAUSE_RX;
+
+ switch (status & MV88E6390_SGMII_PHY_STATUS_SPEED_MASK) {
+ case MV88E6390_SGMII_PHY_STATUS_SPEED_1000:
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_1000;
+ break;
+ case MV88E6390_SGMII_PHY_STATUS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case MV88E6390_SGMII_PHY_STATUS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ default:
+ dev_err(chip->dev, "invalid PHY speed\n");
+ return -EINVAL;
+ }
+ } else if (state->link &&
+ state->interface != PHY_INTERFACE_MODE_SGMII) {
+ /* If Speed and Duplex Resolved register is 0 and link is up, it
+ * means that AN was enabled, but link partner had it disabled
+ * and the PHY invoked the Auto-Negotiation Bypass feature and
+ * linked anyway.
+ */
+ state->duplex = DUPLEX_FULL;
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_1000;
+ } else {
+ state->link = false;
+ }
+
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ mii_lpa_mod_linkmode_x(state->lp_advertising, lpa,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
+ else if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ mii_lpa_mod_linkmode_x(state->lp_advertising, lpa,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
+
+ return 0;
+}
+
+int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool up)
+{
+ u16 val, new_val;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
+ if (err)
+ return err;
+
+ if (up)
+ new_val = val & ~BMCR_PDOWN;
+ else
+ new_val = val | BMCR_PDOWN;
+
+ if (val != new_val)
+ err = mv88e6352_serdes_write(chip, MII_BMCR, new_val);
+
+ return err;
+}
+
+int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ int lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise)
+{
+ u16 adv, bmcr, val;
+ bool changed;
+ int err;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ adv = 0x0001;
+ break;
+
+ case PHY_INTERFACE_MODE_1000BASEX:
+ adv = linkmode_adv_to_mii_adv_x(advertise,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
+ break;
+
+ default:
+ return 0;
+ }
+
+ err = mv88e6352_serdes_read(chip, MII_ADVERTISE, &val);
+ if (err)
+ return err;
+
+ changed = val != adv;
+ if (changed) {
+ err = mv88e6352_serdes_write(chip, MII_ADVERTISE, adv);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
+ if (err)
+ return err;
+
+ if (phylink_autoneg_inband(mode))
+ bmcr = val | BMCR_ANENABLE;
+ else
+ bmcr = val & ~BMCR_ANENABLE;
+
+ if (bmcr == val)
+ return changed;
+
+ return mv88e6352_serdes_write(chip, MII_BMCR, bmcr);
+}
+
+int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ int lane, struct phylink_link_state *state)
+{
+ u16 bmsr, lpa, status;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
+ return err;
+ }
+
+ err = mv88e6352_serdes_read(chip, 0x11, &status);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
+ return err;
+ }
+
+ err = mv88e6352_serdes_read(chip, MII_LPA, &lpa);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY LPA: %d\n", err);
+ return err;
+ }
+
+ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
+}
+
+int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
+ int lane)
+{
+ u16 bmcr;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &bmcr);
+ if (err)
+ return err;
+
+ return mv88e6352_serdes_write(chip, MII_BMCR, bmcr | BMCR_ANRESTART);
+}
+
+int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ int lane, int speed, int duplex)
+{
+ u16 val, bmcr;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
+ if (err)
+ return err;
+
+ bmcr = val & ~(BMCR_SPEED100 | BMCR_FULLDPLX | BMCR_SPEED1000);
+ switch (speed) {
+ case SPEED_1000:
+ bmcr |= BMCR_SPEED1000;
+ break;
+ case SPEED_100:
+ bmcr |= BMCR_SPEED100;
+ break;
+ case SPEED_10:
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (bmcr == val)
+ return 0;
+
+ return mv88e6352_serdes_write(chip, MII_BMCR, bmcr);
+}
+
+int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 cmode = chip->ports[port].cmode;
+ int lane = -ENODEV;
+
+ if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASEX) ||
+ (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX) ||
+ (cmode == MV88E6XXX_PORT_STS_CMODE_SGMII))
+ lane = 0xff; /* Unused */
+
+ return lane;
+}
+
+struct mv88e6352_serdes_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int reg;
+};
+
+static struct mv88e6352_serdes_hw_stat mv88e6352_serdes_hw_stats[] = {
+ { "serdes_fibre_rx_error", 16, 21 },
+ { "serdes_PRBS_error", 32, 24 },
+};
+
+int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
+}
+
+int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data)
+{
+ struct mv88e6352_serdes_hw_stat *stat;
+ int err, i;
+
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) {
+ stat = &mv88e6352_serdes_hw_stats[i];
+ memcpy(data + i * ETH_GSTRING_LEN, stat->string,
+ ETH_GSTRING_LEN);
+ }
+ return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
+}
+
+static uint64_t mv88e6352_serdes_get_stat(struct mv88e6xxx_chip *chip,
+ struct mv88e6352_serdes_hw_stat *stat)
+{
+ u64 val = 0;
+ u16 reg;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, stat->reg, &reg);
+ if (err) {
+ dev_err(chip->dev, "failed to read statistic\n");
+ return 0;
+ }
+
+ val = reg;
+
+ if (stat->sizeof_stat == 32) {
+ err = mv88e6352_serdes_read(chip, stat->reg + 1, &reg);
+ if (err) {
+ dev_err(chip->dev, "failed to read statistic\n");
+ return 0;
+ }
+ val = val << 16 | reg;
+ }
+
+ return val;
+}
+
+int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ struct mv88e6xxx_port *mv88e6xxx_port = &chip->ports[port];
+ struct mv88e6352_serdes_hw_stat *stat;
+ int i, err;
+ u64 value;
+
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ BUILD_BUG_ON(ARRAY_SIZE(mv88e6352_serdes_hw_stats) >
+ ARRAY_SIZE(mv88e6xxx_port->serdes_stats));
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) {
+ stat = &mv88e6352_serdes_hw_stats[i];
+ value = mv88e6352_serdes_get_stat(chip, stat);
+ mv88e6xxx_port->serdes_stats[i] += value;
+ data[i] = mv88e6xxx_port->serdes_stats[i];
+ }
+
+ return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
+}
+
+static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 bmsr;
+ int err;
+
+ /* If the link has dropped, we want to know about it. */
+ err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err);
+ return;
+ }
+
+ dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS));
+}
+
+irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ int lane)
+{
+ irqreturn_t ret = IRQ_NONE;
+ u16 status;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_INT_STATUS, &status);
+ if (err)
+ return ret;
+
+ if (status & MV88E6352_SERDES_INT_LINK_CHANGE) {
+ ret = IRQ_HANDLED;
+ mv88e6352_serdes_irq_link(chip, port);
+ }
+
+ return ret;
+}
+
+int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool enable)
+{
+ u16 val = 0;
+
+ if (enable)
+ val |= MV88E6352_SERDES_INT_LINK_CHANGE;
+
+ return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, val);
+}
+
+unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
+{
+ return irq_find_mapping(chip->g2_irq.domain, MV88E6352_SERDES_IRQ);
+}
+
+int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ mv88e6xxx_reg_unlock(chip);
+ if (err <= 0)
+ return err;
+
+ return 32 * sizeof(u16);
+}
+
+void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
+{
+ u16 *p = _p;
+ u16 reg;
+ int err;
+ int i;
+
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return;
+
+ for (i = 0 ; i < 32; i++) {
+ err = mv88e6352_serdes_read(chip, i, &reg);
+ if (!err)
+ p[i] = reg;
+ }
+}
+
+int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 cmode = chip->ports[port].cmode;
+ int lane = -ENODEV;
+
+ switch (port) {
+ case 5:
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ lane = MV88E6341_PORT5_LANE;
+ break;
+ }
+
+ return lane;
+}
+
+int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool up)
+{
+ /* The serdes power can't be controlled on this switch chip but we need
+ * to supply this function to avoid returning -EOPNOTSUPP in
+ * mv88e6xxx_serdes_power_up/mv88e6xxx_serdes_power_down
+ */
+ return 0;
+}
+
+int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+{
+ /* There are no configurable serdes lanes on this switch chip but we
+ * need to return a non-negative lane number so that callers of
+ * mv88e6xxx_serdes_get_lane() know this is a serdes port.
+ */
+ switch (chip->ports[port].cmode) {
+ case MV88E6185_PORT_STS_CMODE_SERDES:
+ case MV88E6185_PORT_STS_CMODE_1000BASE_X:
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ int lane, struct phylink_link_state *state)
+{
+ int err;
+ u16 status;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
+ if (err)
+ return err;
+
+ state->link = !!(status & MV88E6XXX_PORT_STS_LINK);
+
+ if (state->link) {
+ state->duplex = status & MV88E6XXX_PORT_STS_DUPLEX ? DUPLEX_FULL : DUPLEX_HALF;
+
+ switch (status & MV88E6XXX_PORT_STS_SPEED_MASK) {
+ case MV88E6XXX_PORT_STS_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ case MV88E6XXX_PORT_STS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case MV88E6XXX_PORT_STS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ default:
+ dev_err(chip->dev, "invalid PHY speed\n");
+ return -EINVAL;
+ }
+ } else {
+ state->duplex = DUPLEX_UNKNOWN;
+ state->speed = SPEED_UNKNOWN;
+ }
+
+ return 0;
+}
+
+int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool enable)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ /* The serdes interrupts are enabled in the G2_INT_MASK register. We
+ * need to return 0 to avoid returning -EOPNOTSUPP in
+ * mv88e6xxx_serdes_irq_enable/mv88e6xxx_serdes_irq_disable
+ */
+ switch (cmode) {
+ case MV88E6185_PORT_STS_CMODE_SERDES:
+ case MV88E6185_PORT_STS_CMODE_1000BASE_X:
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void mv88e6097_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 status;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
+ if (err) {
+ dev_err(chip->dev, "can't read port status: %d\n", err);
+ return;
+ }
+
+ dsa_port_phylink_mac_change(chip->ds, port, !!(status & MV88E6XXX_PORT_STS_LINK));
+}
+
+irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ int lane)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ switch (cmode) {
+ case MV88E6185_PORT_STS_CMODE_SERDES:
+ case MV88E6185_PORT_STS_CMODE_1000BASE_X:
+ mv88e6097_serdes_irq_link(chip, port);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 cmode = chip->ports[port].cmode;
+ int lane = -ENODEV;
+
+ switch (port) {
+ case 9:
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ lane = MV88E6390_PORT9_LANE0;
+ break;
+ case 10:
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ lane = MV88E6390_PORT10_LANE0;
+ break;
+ }
+
+ return lane;
+}
+
+int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 cmode_port = chip->ports[port].cmode;
+ u8 cmode_port10 = chip->ports[10].cmode;
+ u8 cmode_port9 = chip->ports[9].cmode;
+ int lane = -ENODEV;
+
+ switch (port) {
+ case 2:
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
+ lane = MV88E6390_PORT9_LANE1;
+ break;
+ case 3:
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
+ lane = MV88E6390_PORT9_LANE2;
+ break;
+ case 4:
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
+ lane = MV88E6390_PORT9_LANE3;
+ break;
+ case 5:
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
+ lane = MV88E6390_PORT10_LANE1;
+ break;
+ case 6:
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
+ lane = MV88E6390_PORT10_LANE2;
+ break;
+ case 7:
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
+ lane = MV88E6390_PORT10_LANE3;
+ break;
+ case 9:
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_XAUI ||
+ cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ lane = MV88E6390_PORT9_LANE0;
+ break;
+ case 10:
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_XAUI ||
+ cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
+ lane = MV88E6390_PORT10_LANE0;
+ break;
+ }
+
+ return lane;
+}
+
+/* Only Ports 0, 9 and 10 have SERDES lanes. Return the SERDES lane address
+ * a port is using else Returns -ENODEV.
+ */
+int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 cmode = chip->ports[port].cmode;
+ int lane = -ENODEV;
+
+ if (port != 0 && port != 9 && port != 10)
+ return -EOPNOTSUPP;
+
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
+ cmode == MV88E6393X_PORT_STS_CMODE_5GBASER ||
+ cmode == MV88E6393X_PORT_STS_CMODE_10GBASER)
+ lane = port;
+
+ return lane;
+}
+
+/* Set power up/down for 10GBASE-R and 10GBASE-X4/X2 */
+static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane,
+ bool up)
+{
+ u16 val, new_val;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_10G_CTRL1, &val);
+
+ if (err)
+ return err;
+
+ if (up)
+ new_val = val & ~(MDIO_CTRL1_RESET |
+ MDIO_PCS_CTRL1_LOOPBACK |
+ MDIO_CTRL1_LPOWER);
+ else
+ new_val = val | MDIO_CTRL1_LPOWER;
+
+ if (val != new_val)
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_10G_CTRL1, new_val);
+
+ return err;
+}
+
+/* Set power up/down for SGMII and 1000Base-X */
+static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane,
+ bool up)
+{
+ u16 val, new_val;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, &val);
+ if (err)
+ return err;
+
+ if (up)
+ new_val = val & ~(BMCR_RESET | BMCR_LOOPBACK | BMCR_PDOWN);
+ else
+ new_val = val | BMCR_PDOWN;
+
+ if (val != new_val)
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, new_val);
+
+ return err;
+}
+
+struct mv88e6390_serdes_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ int reg;
+};
+
+static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = {
+ { "serdes_rx_pkts", 0xf021 },
+ { "serdes_rx_bytes", 0xf024 },
+ { "serdes_rx_pkts_error", 0xf027 },
+};
+
+int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
+{
+ if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
+ return 0;
+
+ return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
+}
+
+int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data)
+{
+ struct mv88e6390_serdes_hw_stat *stat;
+ int i;
+
+ if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
+ stat = &mv88e6390_serdes_hw_stats[i];
+ memcpy(data + i * ETH_GSTRING_LEN, stat->string,
+ ETH_GSTRING_LEN);
+ }
+ return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
+}
+
+static uint64_t mv88e6390_serdes_get_stat(struct mv88e6xxx_chip *chip, int lane,
+ struct mv88e6390_serdes_hw_stat *stat)
+{
+ u16 reg[3];
+ int err, i;
+
+ for (i = 0; i < 3; i++) {
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ stat->reg + i, &reg[i]);
+ if (err) {
+ dev_err(chip->dev, "failed to read statistic\n");
+ return 0;
+ }
+ }
+
+ return reg[0] | ((u64)reg[1] << 16) | ((u64)reg[2] << 32);
+}
+
+int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ struct mv88e6390_serdes_hw_stat *stat;
+ int lane;
+ int i;
+
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
+ stat = &mv88e6390_serdes_hw_stats[i];
+ data[i] = mv88e6390_serdes_get_stat(chip, lane, stat);
+ }
+
+ return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
+}
+
+static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, int lane)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_PG_CONTROL, &reg);
+ if (err)
+ return err;
+
+ reg |= MV88E6390_PG_CONTROL_ENABLE_PC;
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_PG_CONTROL, reg);
+}
+
+int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool up)
+{
+ u8 cmode = chip->ports[port].cmode;
+ int err;
+
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ err = mv88e6390_serdes_power_sgmii(chip, lane, up);
+ break;
+ case MV88E6XXX_PORT_STS_CMODE_XAUI:
+ case MV88E6XXX_PORT_STS_CMODE_RXAUI:
+ err = mv88e6390_serdes_power_10g(chip, lane, up);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (!err && up)
+ err = mv88e6390_serdes_enable_checker(chip, lane);
+
+ return err;
+}
+
+int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ int lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise)
+{
+ u16 val, bmcr, adv;
+ bool changed;
+ int err;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ adv = 0x0001;
+ break;
+
+ case PHY_INTERFACE_MODE_1000BASEX:
+ adv = linkmode_adv_to_mii_adv_x(advertise,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ adv = linkmode_adv_to_mii_adv_x(advertise,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
+ break;
+
+ default:
+ return 0;
+ }
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_ADVERTISE, &val);
+ if (err)
+ return err;
+
+ changed = val != adv;
+ if (changed) {
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_ADVERTISE, adv);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, &val);
+ if (err)
+ return err;
+
+ if (phylink_autoneg_inband(mode))
+ bmcr = val | BMCR_ANENABLE;
+ else
+ bmcr = val & ~BMCR_ANENABLE;
+
+ /* setting ANENABLE triggers a restart of negotiation */
+ if (bmcr == val)
+ return changed;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, bmcr);
+}
+
+static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
+ int port, int lane, struct phylink_link_state *state)
+{
+ u16 bmsr, lpa, status;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMSR, &bmsr);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
+ return err;
+ }
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_PHY_STATUS, &status);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
+ return err;
+ }
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_LPA, &lpa);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY LPA: %d\n", err);
+ return err;
+ }
+
+ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
+}
+
+static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
+ int port, int lane, struct phylink_link_state *state)
+{
+ u16 status;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_10G_STAT1, &status);
+ if (err)
+ return err;
+
+ state->link = !!(status & MDIO_STAT1_LSTATUS);
+ if (state->link) {
+ state->speed = SPEED_10000;
+ state->duplex = DUPLEX_FULL;
+ }
+
+ return 0;
+}
+
+static int mv88e6393x_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
+ int port, int lane,
+ struct phylink_link_state *state)
+{
+ u16 status;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_10G_STAT1, &status);
+ if (err)
+ return err;
+
+ state->link = !!(status & MDIO_STAT1_LSTATUS);
+ if (state->link) {
+ if (state->interface == PHY_INTERFACE_MODE_5GBASER)
+ state->speed = SPEED_5000;
+ else
+ state->speed = SPEED_10000;
+ state->duplex = DUPLEX_FULL;
+ }
+
+ return 0;
+}
+
+int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ int lane, struct phylink_link_state *state)
+{
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane,
+ state);
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ return mv88e6390_serdes_pcs_get_state_10g(chip, port, lane,
+ state);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ int lane, struct phylink_link_state *state)
+{
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane,
+ state);
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ return mv88e6393x_serdes_pcs_get_state_10g(chip, port, lane,
+ state);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
+ int lane)
+{
+ u16 bmcr;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, &bmcr);
+ if (err)
+ return err;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR,
+ bmcr | BMCR_ANRESTART);
+}
+
+int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ int lane, int speed, int duplex)
+{
+ u16 val, bmcr;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, &val);
+ if (err)
+ return err;
+
+ bmcr = val & ~(BMCR_SPEED100 | BMCR_FULLDPLX | BMCR_SPEED1000);
+ switch (speed) {
+ case SPEED_2500:
+ case SPEED_1000:
+ bmcr |= BMCR_SPEED1000;
+ break;
+ case SPEED_100:
+ bmcr |= BMCR_SPEED100;
+ break;
+ case SPEED_10:
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (bmcr == val)
+ return 0;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, bmcr);
+}
+
+static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
+ int port, int lane)
+{
+ u16 bmsr;
+ int err;
+
+ /* If the link has dropped, we want to know about it. */
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMSR, &bmsr);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err);
+ return;
+ }
+
+ dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS));
+}
+
+static void mv88e6393x_serdes_irq_link_10g(struct mv88e6xxx_chip *chip,
+ int port, u8 lane)
+{
+ u16 status;
+ int err;
+
+ /* If the link has dropped, we want to know about it. */
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_10G_STAT1, &status);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes STAT1: %d\n", err);
+ return;
+ }
+
+ dsa_port_phylink_mac_change(chip->ds, port, !!(status & MDIO_STAT1_LSTATUS));
+}
+
+static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip,
+ int lane, bool enable)
+{
+ u16 val = 0;
+
+ if (enable)
+ val |= MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_INT_ENABLE, val);
+}
+
+int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool enable)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable);
+ }
+
+ return 0;
+}
+
+static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip,
+ int lane, u16 *status)
+{
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_INT_STATUS, status);
+
+ return err;
+}
+
+static int mv88e6393x_serdes_irq_enable_10g(struct mv88e6xxx_chip *chip,
+ u8 lane, bool enable)
+{
+ u16 val = 0;
+
+ if (enable)
+ val |= MV88E6393X_10G_INT_LINK_CHANGE;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_10G_INT_ENABLE, val);
+}
+
+int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
+ int lane, bool enable)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable);
+ case MV88E6393X_PORT_STS_CMODE_5GBASER:
+ case MV88E6393X_PORT_STS_CMODE_10GBASER:
+ return mv88e6393x_serdes_irq_enable_10g(chip, lane, enable);
+ }
+
+ return 0;
+}
+
+static int mv88e6393x_serdes_irq_status_10g(struct mv88e6xxx_chip *chip,
+ u8 lane, u16 *status)
+{
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_10G_INT_STATUS, status);
+
+ return err;
+}
+
+irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ int lane)
+{
+ u8 cmode = chip->ports[port].cmode;
+ irqreturn_t ret = IRQ_NONE;
+ u16 status;
+ int err;
+
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status);
+ if (err)
+ return ret;
+ if (status & (MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP)) {
+ ret = IRQ_HANDLED;
+ mv88e6390_serdes_irq_link_sgmii(chip, port, lane);
+ }
+ break;
+ case MV88E6393X_PORT_STS_CMODE_5GBASER:
+ case MV88E6393X_PORT_STS_CMODE_10GBASER:
+ err = mv88e6393x_serdes_irq_status_10g(chip, lane, &status);
+ if (err)
+ return err;
+ if (status & MV88E6393X_10G_INT_LINK_CHANGE) {
+ ret = IRQ_HANDLED;
+ mv88e6393x_serdes_irq_link_10g(chip, port, lane);
+ }
+ break;
+ }
+
+ return ret;
+}
+
+irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ int lane)
+{
+ u8 cmode = chip->ports[port].cmode;
+ irqreturn_t ret = IRQ_NONE;
+ u16 status;
+ int err;
+
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status);
+ if (err)
+ return ret;
+ if (status & (MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP)) {
+ ret = IRQ_HANDLED;
+ mv88e6390_serdes_irq_link_sgmii(chip, port, lane);
+ }
+ }
+
+ return ret;
+}
+
+unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
+{
+ return irq_find_mapping(chip->g2_irq.domain, port);
+}
+
+static const u16 mv88e6390_serdes_regs[] = {
+ /* SERDES common registers */
+ 0xf00a, 0xf00b, 0xf00c,
+ 0xf010, 0xf011, 0xf012, 0xf013,
+ 0xf016, 0xf017, 0xf018,
+ 0xf01b, 0xf01c, 0xf01d, 0xf01e, 0xf01f,
+ 0xf020, 0xf021, 0xf022, 0xf023, 0xf024, 0xf025, 0xf026, 0xf027,
+ 0xf028, 0xf029,
+ 0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
+ 0xf038, 0xf039,
+ /* SGMII */
+ 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007,
+ 0x2008,
+ 0x200f,
+ 0xa000, 0xa001, 0xa002, 0xa003,
+ /* 10Gbase-X */
+ 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007,
+ 0x1008,
+ 0x100e, 0x100f,
+ 0x1018, 0x1019,
+ 0x9000, 0x9001, 0x9002, 0x9003, 0x9004,
+ 0x9006,
+ 0x9010, 0x9011, 0x9012, 0x9013, 0x9014, 0x9015, 0x9016,
+ /* 10Gbase-R */
+ 0x1020, 0x1021, 0x1022, 0x1023, 0x1024, 0x1025, 0x1026, 0x1027,
+ 0x1028, 0x1029, 0x102a, 0x102b,
+};
+
+int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
+{
+ if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
+ return 0;
+
+ return ARRAY_SIZE(mv88e6390_serdes_regs) * sizeof(u16);
+}
+
+void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
+{
+ u16 *p = _p;
+ int lane;
+ u16 reg;
+ int err;
+ int i;
+
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return;
+
+ for (i = 0 ; i < ARRAY_SIZE(mv88e6390_serdes_regs); i++) {
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ mv88e6390_serdes_regs[i], &reg);
+ if (!err)
+ p[i] = reg;
+ }
+}
+
+static const int mv88e6352_serdes_p2p_to_reg[] = {
+ /* Index of value in microvolts corresponds to the register value */
+ 14000, 112000, 210000, 308000, 406000, 504000, 602000, 700000,
+};
+
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val)
+{
+ bool found = false;
+ u16 ctrl, reg;
+ int err;
+ int i;
+
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_p2p_to_reg); ++i) {
+ if (mv88e6352_serdes_p2p_to_reg[i] == val) {
+ reg = i;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_SPEC_CTRL2, &ctrl);
+ if (err)
+ return err;
+
+ ctrl &= ~MV88E6352_SERDES_OUT_AMP_MASK;
+ ctrl |= reg;
+
+ return mv88e6352_serdes_write(chip, MV88E6352_SERDES_SPEC_CTRL2, ctrl);
+}
+
+static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
+ bool on)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_CTRL1, &reg);
+ if (err)
+ return err;
+
+ if (on)
+ reg &= ~(MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+ MV88E6393X_SERDES_CTRL1_RX_PDOWN);
+ else
+ reg |= MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+ MV88E6393X_SERDES_CTRL1_RX_PDOWN;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_CTRL1, reg);
+}
+
+static int mv88e6393x_serdes_erratum_4_6(struct mv88e6xxx_chip *chip, int lane)
+{
+ u16 reg;
+ int err;
+
+ /* mv88e6393x family errata 4.6:
+ * Cannot clear PwrDn bit on SERDES if device is configured CPU_MGD
+ * mode or P0_mode is configured for [x]MII.
+ * Workaround: Set SERDES register 4.F002 bit 5=0 and bit 15=1.
+ *
+ * It seems that after this workaround the SERDES is automatically
+ * powered up (the bit is cleared), so power it down.
+ */
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6393X_SERDES_POC_PDOWN;
+ reg |= MV88E6393X_SERDES_POC_RESET;
+
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, reg);
+ if (err)
+ return err;
+
+ err = mv88e6390_serdes_power_sgmii(chip, lane, false);
+ if (err)
+ return err;
+
+ return mv88e6393x_serdes_power_lane(chip, lane, false);
+}
+
+int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT0_LANE);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT9_LANE);
+ if (err)
+ return err;
+
+ return mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT10_LANE);
+}
+
+static int mv88e6393x_serdes_erratum_4_8(struct mv88e6xxx_chip *chip, int lane)
+{
+ u16 reg, pcs;
+ int err;
+
+ /* mv88e6393x family errata 4.8:
+ * When a SERDES port is operating in 1000BASE-X or SGMII mode link may
+ * not come up after hardware reset or software reset of SERDES core.
+ * Workaround is to write SERDES register 4.F074.14=1 for only those
+ * modes and 0 in all other modes.
+ */
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, &pcs);
+ if (err)
+ return err;
+
+ pcs &= MV88E6393X_SERDES_POC_PCS_MASK;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_ERRATA_4_8_REG, &reg);
+ if (err)
+ return err;
+
+ if (pcs == MV88E6393X_SERDES_POC_PCS_1000BASEX ||
+ pcs == MV88E6393X_SERDES_POC_PCS_SGMII_PHY ||
+ pcs == MV88E6393X_SERDES_POC_PCS_SGMII_MAC)
+ reg |= MV88E6393X_ERRATA_4_8_BIT;
+ else
+ reg &= ~MV88E6393X_ERRATA_4_8_BIT;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_ERRATA_4_8_REG, reg);
+}
+
+static int mv88e6393x_serdes_erratum_5_2(struct mv88e6xxx_chip *chip, int lane,
+ u8 cmode)
+{
+ static const struct {
+ u16 dev, reg, val, mask;
+ } fixes[] = {
+ { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff },
+ { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff },
+ { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff },
+ { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f },
+ { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 },
+ { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff },
+ { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC,
+ MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET },
+ };
+ int err, i;
+ u16 reg;
+
+ /* mv88e6393x family errata 5.2:
+ * For optimal signal integrity the following sequence should be applied
+ * to SERDES operating in 10G mode. These registers only apply to 10G
+ * operation and have no effect on other speeds.
+ */
+ if (cmode != MV88E6393X_PORT_STS_CMODE_10GBASER)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(fixes); ++i) {
+ err = mv88e6390_serdes_read(chip, lane, fixes[i].dev,
+ fixes[i].reg, &reg);
+ if (err)
+ return err;
+
+ reg &= ~fixes[i].mask;
+ reg |= fixes[i].val;
+
+ err = mv88e6390_serdes_write(chip, lane, fixes[i].dev,
+ fixes[i].reg, reg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6393x_serdes_fix_2500basex_an(struct mv88e6xxx_chip *chip,
+ int lane, u8 cmode, bool on)
+{
+ u16 reg;
+ int err;
+
+ if (cmode != MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ return 0;
+
+ /* Inband AN is broken on Amethyst in 2500base-x mode when set by
+ * standard mechanism (via cmode).
+ * We can get around this by configuring the PCS mode to 1000base-x
+ * and then writing value 0x58 to register 1e.8000. (This must be done
+ * while SerDes receiver and transmitter are disabled, which is, when
+ * this function is called.)
+ * It seem that when we do this configuration to 2500base-x mode (by
+ * changing PCS mode to 1000base-x and frequency to 3.125 GHz from
+ * 1.25 GHz) and then configure to sgmii or 1000base-x, the device
+ * thinks that it already has SerDes at 1.25 GHz and does not change
+ * the 1e.8000 register, leaving SerDes at 3.125 GHz.
+ * To avoid this, change PCS mode back to 2500base-x when disabling
+ * SerDes from 2500base-x mode.
+ */
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, &reg);
+ if (err)
+ return err;
+
+ reg &= ~(MV88E6393X_SERDES_POC_PCS_MASK | MV88E6393X_SERDES_POC_AN);
+ if (on)
+ reg |= MV88E6393X_SERDES_POC_PCS_1000BASEX |
+ MV88E6393X_SERDES_POC_AN;
+ else
+ reg |= MV88E6393X_SERDES_POC_PCS_2500BASEX;
+ reg |= MV88E6393X_SERDES_POC_RESET;
+
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, reg);
+ if (err)
+ return err;
+
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_VEND1, 0x8000, 0x58);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool on)
+{
+ u8 cmode = chip->ports[port].cmode;
+ int err;
+
+ if (port != 0 && port != 9 && port != 10)
+ return -EOPNOTSUPP;
+
+ if (on) {
+ err = mv88e6393x_serdes_erratum_4_8(chip, lane);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_erratum_5_2(chip, lane, cmode);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
+ true);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_power_lane(chip, lane, true);
+ if (err)
+ return err;
+ }
+
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ err = mv88e6390_serdes_power_sgmii(chip, lane, on);
+ break;
+ case MV88E6393X_PORT_STS_CMODE_5GBASER:
+ case MV88E6393X_PORT_STS_CMODE_10GBASER:
+ err = mv88e6390_serdes_power_10g(chip, lane, on);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ if (!on) {
+ err = mv88e6393x_serdes_power_lane(chip, lane, false);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
+ false);
+ }
+
+ return err;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
new file mode 100644
index 000000000..29bb4e91e
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Marvell 88E6xxx SERDES manipulation, via SMI bus
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
+ */
+
+#ifndef _MV88E6XXX_SERDES_H
+#define _MV88E6XXX_SERDES_H
+
+#include "chip.h"
+
+#define MV88E6352_ADDR_SERDES 0x0f
+#define MV88E6352_SERDES_PAGE_FIBER 0x01
+#define MV88E6352_SERDES_IRQ 0x0b
+#define MV88E6352_SERDES_INT_ENABLE 0x12
+#define MV88E6352_SERDES_INT_SPEED_CHANGE BIT(14)
+#define MV88E6352_SERDES_INT_DUPLEX_CHANGE BIT(13)
+#define MV88E6352_SERDES_INT_PAGE_RX BIT(12)
+#define MV88E6352_SERDES_INT_AN_COMPLETE BIT(11)
+#define MV88E6352_SERDES_INT_LINK_CHANGE BIT(10)
+#define MV88E6352_SERDES_INT_SYMBOL_ERROR BIT(9)
+#define MV88E6352_SERDES_INT_FALSE_CARRIER BIT(8)
+#define MV88E6352_SERDES_INT_FIFO_OVER_UNDER BIT(7)
+#define MV88E6352_SERDES_INT_FIBRE_ENERGY BIT(4)
+#define MV88E6352_SERDES_INT_STATUS 0x13
+
+#define MV88E6352_SERDES_SPEC_CTRL2 0x1a
+#define MV88E6352_SERDES_OUT_AMP_MASK 0x0007
+
+#define MV88E6341_PORT5_LANE 0x15
+
+#define MV88E6390_PORT9_LANE0 0x09
+#define MV88E6390_PORT9_LANE1 0x12
+#define MV88E6390_PORT9_LANE2 0x13
+#define MV88E6390_PORT9_LANE3 0x14
+#define MV88E6390_PORT10_LANE0 0x0a
+#define MV88E6390_PORT10_LANE1 0x15
+#define MV88E6390_PORT10_LANE2 0x16
+#define MV88E6390_PORT10_LANE3 0x17
+
+/* 10GBASE-R and 10GBASE-X4/X2 */
+#define MV88E6390_10G_CTRL1 (0x1000 + MDIO_CTRL1)
+#define MV88E6390_10G_STAT1 (0x1000 + MDIO_STAT1)
+#define MV88E6393X_10G_INT_ENABLE 0x9000
+#define MV88E6393X_10G_INT_LINK_CHANGE BIT(2)
+#define MV88E6393X_10G_INT_STATUS 0x9001
+
+/* 1000BASE-X and SGMII */
+#define MV88E6390_SGMII_BMCR (0x2000 + MII_BMCR)
+#define MV88E6390_SGMII_BMSR (0x2000 + MII_BMSR)
+#define MV88E6390_SGMII_ADVERTISE (0x2000 + MII_ADVERTISE)
+#define MV88E6390_SGMII_LPA (0x2000 + MII_LPA)
+#define MV88E6390_SGMII_INT_ENABLE 0xa001
+#define MV88E6390_SGMII_INT_SPEED_CHANGE BIT(14)
+#define MV88E6390_SGMII_INT_DUPLEX_CHANGE BIT(13)
+#define MV88E6390_SGMII_INT_PAGE_RX BIT(12)
+#define MV88E6390_SGMII_INT_AN_COMPLETE BIT(11)
+#define MV88E6390_SGMII_INT_LINK_DOWN BIT(10)
+#define MV88E6390_SGMII_INT_LINK_UP BIT(9)
+#define MV88E6390_SGMII_INT_SYMBOL_ERROR BIT(8)
+#define MV88E6390_SGMII_INT_FALSE_CARRIER BIT(7)
+#define MV88E6390_SGMII_INT_STATUS 0xa002
+#define MV88E6390_SGMII_PHY_STATUS 0xa003
+#define MV88E6390_SGMII_PHY_STATUS_SPEED_MASK GENMASK(15, 14)
+#define MV88E6390_SGMII_PHY_STATUS_SPEED_1000 0x8000
+#define MV88E6390_SGMII_PHY_STATUS_SPEED_100 0x4000
+#define MV88E6390_SGMII_PHY_STATUS_SPEED_10 0x0000
+#define MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL BIT(13)
+#define MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID BIT(11)
+#define MV88E6390_SGMII_PHY_STATUS_LINK BIT(10)
+#define MV88E6390_SGMII_PHY_STATUS_TX_PAUSE BIT(3)
+#define MV88E6390_SGMII_PHY_STATUS_RX_PAUSE BIT(2)
+
+/* Packet generator pad packet checker */
+#define MV88E6390_PG_CONTROL 0xf010
+#define MV88E6390_PG_CONTROL_ENABLE_PC BIT(0)
+
+#define MV88E6393X_PORT0_LANE 0x00
+#define MV88E6393X_PORT9_LANE 0x09
+#define MV88E6393X_PORT10_LANE 0x0a
+
+/* Port Operational Configuration */
+#define MV88E6393X_SERDES_POC 0xf002
+#define MV88E6393X_SERDES_POC_PCS_1000BASEX 0x0000
+#define MV88E6393X_SERDES_POC_PCS_2500BASEX 0x0001
+#define MV88E6393X_SERDES_POC_PCS_SGMII_PHY 0x0002
+#define MV88E6393X_SERDES_POC_PCS_SGMII_MAC 0x0003
+#define MV88E6393X_SERDES_POC_PCS_5GBASER 0x0004
+#define MV88E6393X_SERDES_POC_PCS_10GBASER 0x0005
+#define MV88E6393X_SERDES_POC_PCS_USXGMII_PHY 0x0006
+#define MV88E6393X_SERDES_POC_PCS_USXGMII_MAC 0x0007
+#define MV88E6393X_SERDES_POC_PCS_MASK 0x0007
+#define MV88E6393X_SERDES_POC_RESET BIT(15)
+#define MV88E6393X_SERDES_POC_PDOWN BIT(5)
+#define MV88E6393X_SERDES_POC_AN BIT(3)
+#define MV88E6393X_SERDES_CTRL1 0xf003
+#define MV88E6393X_SERDES_CTRL1_TX_PDOWN BIT(9)
+#define MV88E6393X_SERDES_CTRL1_RX_PDOWN BIT(8)
+
+#define MV88E6393X_ERRATA_4_8_REG 0xF074
+#define MV88E6393X_ERRATA_4_8_BIT BIT(14)
+
+int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ int lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise);
+int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ int lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise);
+int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ int lane, struct phylink_link_state *state);
+int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ int lane, struct phylink_link_state *state);
+int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ int lane, struct phylink_link_state *state);
+int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ int lane, struct phylink_link_state *state);
+int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
+ int lane);
+int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
+ int lane);
+int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ int lane, int speed, int duplex);
+int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ int lane, int speed, int duplex);
+unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
+ int port);
+unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
+ int port);
+int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool up);
+int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool on);
+int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool on);
+int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool on);
+int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip);
+int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool enable);
+int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool enable);
+int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
+ bool enable);
+int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
+ int lane, bool enable);
+irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ int lane);
+irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ int lane);
+irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ int lane);
+irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ int lane);
+int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
+int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data);
+int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
+int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data);
+int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
+
+int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
+void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
+void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
+/* Return the (first) SERDES lane address a port is using, -errno otherwise. */
+static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
+ int port)
+{
+ if (!chip->info->ops->serdes_get_lane)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->serdes_get_lane(chip, port);
+}
+
+static inline int mv88e6xxx_serdes_power_up(struct mv88e6xxx_chip *chip,
+ int port, int lane)
+{
+ if (!chip->info->ops->serdes_power)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->serdes_power(chip, port, lane, true);
+}
+
+static inline int mv88e6xxx_serdes_power_down(struct mv88e6xxx_chip *chip,
+ int port, int lane)
+{
+ if (!chip->info->ops->serdes_power)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->serdes_power(chip, port, lane, false);
+}
+
+static inline unsigned int
+mv88e6xxx_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
+{
+ if (!chip->info->ops->serdes_irq_mapping)
+ return 0;
+
+ return chip->info->ops->serdes_irq_mapping(chip, port);
+}
+
+static inline int mv88e6xxx_serdes_irq_enable(struct mv88e6xxx_chip *chip,
+ int port, int lane)
+{
+ if (!chip->info->ops->serdes_irq_enable)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->serdes_irq_enable(chip, port, lane, true);
+}
+
+static inline int mv88e6xxx_serdes_irq_disable(struct mv88e6xxx_chip *chip,
+ int port, int lane)
+{
+ if (!chip->info->ops->serdes_irq_enable)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->serdes_irq_enable(chip, port, lane, false);
+}
+
+static inline irqreturn_t
+mv88e6xxx_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, int lane)
+{
+ if (!chip->info->ops->serdes_irq_status)
+ return IRQ_NONE;
+
+ return chip->info->ops->serdes_irq_status(chip, port, lane);
+}
+
+#endif
diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c
new file mode 100644
index 000000000..a990271b7
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/smi.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6xxx System Management Interface (SMI) support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2019 Vivien Didelot <vivien.didelot@gmail.com>
+ */
+
+#include "chip.h"
+#include "smi.h"
+
+/* The switch ADDR[4:1] configuration pins define the chip SMI device address
+ * (ADDR[0] is always zero, thus only even SMI addresses can be strapped).
+ *
+ * When ADDR is all zero, the chip uses Single-chip Addressing Mode, assuming it
+ * is the only device connected to the SMI master. In this mode it responds to
+ * all 32 possible SMI addresses, and thus maps directly the internal devices.
+ *
+ * When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing
+ * multiple devices to share the SMI interface. In this mode it responds to only
+ * 2 registers, used to indirectly access the internal SMI devices.
+ *
+ * Some chips use a different scheme: Only the ADDR4 pin is used for
+ * configuration, and the device responds to 16 of the 32 SMI
+ * addresses, allowing two to coexist on the same SMI interface.
+ */
+
+static int mv88e6xxx_smi_direct_read(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 *data)
+{
+ int ret;
+
+ ret = mdiobus_read_nested(chip->bus, dev, reg);
+ if (ret < 0)
+ return ret;
+
+ *data = ret & 0xffff;
+
+ return 0;
+}
+
+static int mv88e6xxx_smi_direct_write(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 data)
+{
+ int ret;
+
+ ret = mdiobus_write_nested(chip->bus, dev, reg, data);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
+ int dev, int reg, int bit, int val)
+{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(50);
+ u16 data;
+ int err;
+ int i;
+
+ /* Even if the initial poll takes longer than 50ms, always do
+ * at least one more attempt.
+ */
+ for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
+ err = mv88e6xxx_smi_direct_read(chip, dev, reg, &data);
+ if (err)
+ return err;
+
+ if (!!(data & BIT(bit)) == !!val)
+ return 0;
+
+ if (i < 2)
+ cpu_relax();
+ else
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_direct_ops = {
+ .read = mv88e6xxx_smi_direct_read,
+ .write = mv88e6xxx_smi_direct_write,
+};
+
+static int mv88e6xxx_smi_dual_direct_read(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 *data)
+{
+ return mv88e6xxx_smi_direct_read(chip, chip->sw_addr + dev, reg, data);
+}
+
+static int mv88e6xxx_smi_dual_direct_write(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 data)
+{
+ return mv88e6xxx_smi_direct_write(chip, chip->sw_addr + dev, reg, data);
+}
+
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_dual_direct_ops = {
+ .read = mv88e6xxx_smi_dual_direct_read,
+ .write = mv88e6xxx_smi_dual_direct_write,
+};
+
+/* Offset 0x00: SMI Command Register
+ * Offset 0x01: SMI Data Register
+ */
+
+static int mv88e6xxx_smi_indirect_read(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 *data)
+{
+ int err;
+
+ err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD,
+ MV88E6XXX_SMI_CMD_BUSY |
+ MV88E6XXX_SMI_CMD_MODE_22 |
+ MV88E6XXX_SMI_CMD_OP_22_READ |
+ (dev << 5) | reg);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+ if (err)
+ return err;
+
+ return mv88e6xxx_smi_direct_read(chip, chip->sw_addr,
+ MV88E6XXX_SMI_DATA, data);
+}
+
+static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 data)
+{
+ int err;
+
+ err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
+ MV88E6XXX_SMI_DATA, data);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD,
+ MV88E6XXX_SMI_CMD_BUSY |
+ MV88E6XXX_SMI_CMD_MODE_22 |
+ MV88E6XXX_SMI_CMD_OP_22_WRITE |
+ (dev << 5) | reg);
+ if (err)
+ return err;
+
+ return mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+}
+
+static int mv88e6xxx_smi_indirect_init(struct mv88e6xxx_chip *chip)
+{
+ /* Ensure that the chip starts out in the ready state. As both
+ * reads and writes always ensure this on return, they can
+ * safely depend on the chip not being busy on entry.
+ */
+ return mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+}
+
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_indirect_ops = {
+ .read = mv88e6xxx_smi_indirect_read,
+ .write = mv88e6xxx_smi_indirect_write,
+ .init = mv88e6xxx_smi_indirect_init,
+};
+
+int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int sw_addr)
+{
+ if (chip->info->dual_chip)
+ chip->smi_ops = &mv88e6xxx_smi_dual_direct_ops;
+ else if (sw_addr == 0)
+ chip->smi_ops = &mv88e6xxx_smi_direct_ops;
+ else if (chip->info->multi_chip)
+ chip->smi_ops = &mv88e6xxx_smi_indirect_ops;
+ else
+ return -EINVAL;
+
+ chip->bus = bus;
+ chip->sw_addr = sw_addr;
+
+ if (chip->smi_ops->init)
+ return chip->smi_ops->init(chip);
+
+ return 0;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/smi.h b/drivers/net/dsa/mv88e6xxx/smi.h
new file mode 100644
index 000000000..c6c71d575
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/smi.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Marvell 88E6xxx System Management Interface (SMI) support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2019 Vivien Didelot <vivien.didelot@gmail.com>
+ */
+
+#ifndef _MV88E6XXX_SMI_H
+#define _MV88E6XXX_SMI_H
+
+#include "chip.h"
+
+/* Offset 0x00: SMI Command Register */
+#define MV88E6XXX_SMI_CMD 0x00
+#define MV88E6XXX_SMI_CMD_BUSY 0x8000
+#define MV88E6XXX_SMI_CMD_MODE_MASK 0x1000
+#define MV88E6XXX_SMI_CMD_MODE_45 0x0000
+#define MV88E6XXX_SMI_CMD_MODE_22 0x1000
+#define MV88E6XXX_SMI_CMD_OP_MASK 0x0c00
+#define MV88E6XXX_SMI_CMD_OP_22_WRITE 0x0400
+#define MV88E6XXX_SMI_CMD_OP_22_READ 0x0800
+#define MV88E6XXX_SMI_CMD_OP_45_WRITE_ADDR 0x0000
+#define MV88E6XXX_SMI_CMD_OP_45_WRITE_DATA 0x0400
+#define MV88E6XXX_SMI_CMD_OP_45_READ_DATA 0x0800
+#define MV88E6XXX_SMI_CMD_OP_45_READ_DATA_INC 0x0c00
+#define MV88E6XXX_SMI_CMD_DEV_ADDR_MASK 0x003e
+#define MV88E6XXX_SMI_CMD_REG_ADDR_MASK 0x001f
+
+/* Offset 0x01: SMI Data Register */
+#define MV88E6XXX_SMI_DATA 0x01
+
+int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int sw_addr);
+
+static inline int mv88e6xxx_smi_read(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 *data)
+{
+ if (chip->smi_ops && chip->smi_ops->read)
+ return chip->smi_ops->read(chip, dev, reg, data);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_smi_write(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 data)
+{
+ if (chip->smi_ops && chip->smi_ops->write)
+ return chip->smi_ops->write(chip, dev, reg, data);
+
+ return -EOPNOTSUPP;
+}
+
+#endif /* _MV88E6XXX_SMI_H */
diff --git a/drivers/net/dsa/mv88e6xxx/trace.c b/drivers/net/dsa/mv88e6xxx/trace.c
new file mode 100644
index 000000000..7833cb50c
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/trace.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright 2022 NXP
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/dsa/mv88e6xxx/trace.h b/drivers/net/dsa/mv88e6xxx/trace.h
new file mode 100644
index 000000000..f59ca0476
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/trace.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright 2022 NXP
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mv88e6xxx
+
+#if !defined(_MV88E6XXX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _MV88E6XXX_TRACE_H
+
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(mv88e6xxx_atu_violation,
+
+ TP_PROTO(const struct device *dev, int spid, u16 portvec,
+ const unsigned char *addr, u16 fid),
+
+ TP_ARGS(dev, spid, portvec, addr, fid),
+
+ TP_STRUCT__entry(
+ __string(name, dev_name(dev))
+ __field(int, spid)
+ __field(u16, portvec)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, fid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->spid = spid;
+ __entry->portvec = portvec;
+ memcpy(__entry->addr, addr, ETH_ALEN);
+ __entry->fid = fid;
+ ),
+
+ TP_printk("dev %s spid %d portvec 0x%x addr %pM fid %u",
+ __get_str(name), __entry->spid, __entry->portvec,
+ __entry->addr, __entry->fid)
+);
+
+DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_member_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 portvec,
+ const unsigned char *addr, u16 fid),
+ TP_ARGS(dev, spid, portvec, addr, fid));
+
+DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_miss_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 portvec,
+ const unsigned char *addr, u16 fid),
+ TP_ARGS(dev, spid, portvec, addr, fid));
+
+DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_full_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 portvec,
+ const unsigned char *addr, u16 fid),
+ TP_ARGS(dev, spid, portvec, addr, fid));
+
+DECLARE_EVENT_CLASS(mv88e6xxx_vtu_violation,
+
+ TP_PROTO(const struct device *dev, int spid, u16 vid),
+
+ TP_ARGS(dev, spid, vid),
+
+ TP_STRUCT__entry(
+ __string(name, dev_name(dev))
+ __field(int, spid)
+ __field(u16, vid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->spid = spid;
+ __entry->vid = vid;
+ ),
+
+ TP_printk("dev %s spid %d vid %u",
+ __get_str(name), __entry->spid, __entry->vid)
+);
+
+DEFINE_EVENT(mv88e6xxx_vtu_violation, mv88e6xxx_vtu_member_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 vid),
+ TP_ARGS(dev, spid, vid));
+
+DEFINE_EVENT(mv88e6xxx_vtu_violation, mv88e6xxx_vtu_miss_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 vid),
+ TP_ARGS(dev, spid, vid));
+
+#endif /* _MV88E6XXX_TRACE_H */
+
+/* We don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
new file mode 100644
index 000000000..08db9cf76
--- /dev/null
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_MSCC_FELIX
+ tristate "Ocelot / Felix Ethernet switch support"
+ depends on NET_DSA && PCI
+ depends on NET_VENDOR_MICROSEMI
+ depends on NET_VENDOR_FREESCALE
+ depends on HAS_IOMEM
+ depends on PTP_1588_CLOCK_OPTIONAL
+ depends on NET_SCH_TAPRIO || NET_SCH_TAPRIO=n
+ select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_TAG_OCELOT_8021Q
+ select NET_DSA_TAG_OCELOT
+ select FSL_ENETC_MDIO
+ select PCS_LYNX
+ help
+ This driver supports the VSC9959 (Felix) switch, which is embedded as
+ a PCIe function of the NXP LS1028A ENETC RCiEP.
+
+config NET_DSA_MSCC_SEVILLE
+ tristate "Ocelot / Seville Ethernet switch support"
+ depends on NET_DSA
+ depends on NET_VENDOR_MICROSEMI
+ depends on HAS_IOMEM
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select MDIO_MSCC_MIIM
+ select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_TAG_OCELOT_8021Q
+ select NET_DSA_TAG_OCELOT
+ select PCS_LYNX
+ help
+ This driver supports the VSC9953 (Seville) switch, which is embedded
+ as a platform device on the NXP T1040 SoC.
diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile
new file mode 100644
index 000000000..f6dd131e7
--- /dev/null
+++ b/drivers/net/dsa/ocelot/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o
+obj-$(CONFIG_NET_DSA_MSCC_SEVILLE) += mscc_seville.o
+
+mscc_felix-objs := \
+ felix.o \
+ felix_vsc9959.o
+
+mscc_seville-objs := \
+ felix.o \
+ seville_vsc9953.o
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
new file mode 100644
index 000000000..2d2c6f941
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -0,0 +1,2150 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019-2021 NXP
+ *
+ * This is an umbrella module for all network switches that are
+ * register-compatible with Ocelot and that perform I/O to their host CPU
+ * through an NPI (Node Processor Interface) Ethernet port.
+ */
+#include <uapi/linux/if_bridge.h>
+#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/ocelot_qsys.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot_dev.h>
+#include <soc/mscc/ocelot_ana.h>
+#include <soc/mscc/ocelot_ptp.h>
+#include <soc/mscc/ocelot.h>
+#include <linux/dsa/8021q.h>
+#include <linux/dsa/ocelot.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_classify.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <net/pkt_sched.h>
+#include <net/dsa.h>
+#include "felix.h"
+
+/* Translate the DSA database API into the ocelot switch library API,
+ * which uses VID 0 for all ports that aren't part of a bridge,
+ * and expects the bridge_dev to be NULL in that case.
+ */
+static struct net_device *felix_classify_db(struct dsa_db db)
+{
+ switch (db.type) {
+ case DSA_DB_PORT:
+ case DSA_DB_LAG:
+ return NULL;
+ case DSA_DB_BRIDGE:
+ return db.bridge.dev;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
+
+static int felix_cpu_port_for_master(struct dsa_switch *ds,
+ struct net_device *master)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ int lag;
+
+ if (netif_is_lag_master(master)) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ lag = ocelot_bond_get_id(ocelot, master);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return lag;
+ }
+
+ cpu_dp = master->dsa_ptr;
+ return cpu_dp->index;
+}
+
+/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
+ * the tagger can perform RX source port identification.
+ */
+static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port,
+ int upstream, u16 vid)
+{
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
+ int key_length, err;
+
+ key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
+
+ outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter),
+ GFP_KERNEL);
+ if (!outer_tagging_rule)
+ return -ENOMEM;
+
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
+
+ outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
+ outer_tagging_rule->prio = 1;
+ outer_tagging_rule->id.cookie = cookie;
+ outer_tagging_rule->id.tc_offload = false;
+ outer_tagging_rule->block_id = VCAP_ES0;
+ outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ outer_tagging_rule->lookup = 0;
+ outer_tagging_rule->ingress_port.value = port;
+ outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0);
+ outer_tagging_rule->egress_port.value = upstream;
+ outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0);
+ outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG;
+ outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD;
+ outer_tagging_rule->action.tag_a_vid_sel = 1;
+ outer_tagging_rule->action.vid_a_val = vid;
+
+ err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL);
+ if (err)
+ kfree(outer_tagging_rule);
+
+ return err;
+}
+
+static int felix_tag_8021q_vlan_del_rx(struct dsa_switch *ds, int port,
+ int upstream, u16 vid)
+{
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot_vcap_block *block_vcap_es0;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
+
+ block_vcap_es0 = &ocelot->block[VCAP_ES0];
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
+
+ outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
+ cookie, false);
+ if (!outer_tagging_rule)
+ return -ENOENT;
+
+ return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
+}
+
+/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2
+ * rules for steering those tagged packets towards the correct destination port
+ */
+static int felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port,
+ u16 vid)
+{
+ struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
+ unsigned long cpu_ports = dsa_cpu_ports(ds);
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
+ int err;
+
+ untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
+ if (!untagging_rule)
+ return -ENOMEM;
+
+ redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
+ if (!redirect_rule) {
+ kfree(untagging_rule);
+ return -ENOMEM;
+ }
+
+ cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
+
+ untagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
+ untagging_rule->ingress_port_mask = cpu_ports;
+ untagging_rule->vlan.vid.value = vid;
+ untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
+ untagging_rule->prio = 1;
+ untagging_rule->id.cookie = cookie;
+ untagging_rule->id.tc_offload = false;
+ untagging_rule->block_id = VCAP_IS1;
+ untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ untagging_rule->lookup = 0;
+ untagging_rule->action.vlan_pop_cnt_ena = true;
+ untagging_rule->action.vlan_pop_cnt = 1;
+ untagging_rule->action.pag_override_mask = 0xff;
+ untagging_rule->action.pag_val = port;
+
+ err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL);
+ if (err) {
+ kfree(untagging_rule);
+ kfree(redirect_rule);
+ return err;
+ }
+
+ cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
+
+ redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
+ redirect_rule->ingress_port_mask = cpu_ports;
+ redirect_rule->pag = port;
+ redirect_rule->prio = 1;
+ redirect_rule->id.cookie = cookie;
+ redirect_rule->id.tc_offload = false;
+ redirect_rule->block_id = VCAP_IS2;
+ redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ redirect_rule->lookup = 0;
+ redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ redirect_rule->action.port_mask = BIT(port);
+
+ err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
+ if (err) {
+ ocelot_vcap_filter_del(ocelot, untagging_rule);
+ kfree(redirect_rule);
+ return err;
+ }
+
+ return 0;
+}
+
+static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
+ struct ocelot_vcap_block *block_vcap_is1;
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
+ int err;
+
+ block_vcap_is1 = &ocelot->block[VCAP_IS1];
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
+
+ cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
+ untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
+ cookie, false);
+ if (!untagging_rule)
+ return -ENOENT;
+
+ err = ocelot_vcap_filter_del(ocelot, untagging_rule);
+ if (err)
+ return err;
+
+ cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
+ redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
+ cookie, false);
+ if (!redirect_rule)
+ return -ENOENT;
+
+ return ocelot_vcap_filter_del(ocelot, redirect_rule);
+}
+
+static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
+{
+ struct dsa_port *cpu_dp;
+ int err;
+
+ /* tag_8021q.c assumes we are implementing this via port VLAN
+ * membership, which we aren't. So we don't need to add any VCAP filter
+ * for the CPU port.
+ */
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
+ if (err)
+ return err;
+ }
+
+ err = felix_tag_8021q_vlan_add_tx(ds, port, vid);
+ if (err)
+ goto add_tx_failed;
+
+ return 0;
+
+add_tx_failed:
+ dsa_switch_for_each_cpu_port(cpu_dp, ds)
+ felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
+
+ return err;
+}
+
+static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct dsa_port *cpu_dp;
+ int err;
+
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
+ if (err)
+ return err;
+ }
+
+ err = felix_tag_8021q_vlan_del_tx(ds, port, vid);
+ if (err)
+ goto del_tx_failed;
+
+ return 0;
+
+del_tx_failed:
+ dsa_switch_for_each_cpu_port(cpu_dp, ds)
+ felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
+
+ return err;
+}
+
+static int felix_trap_get_cpu_port(struct dsa_switch *ds,
+ const struct ocelot_vcap_filter *trap)
+{
+ struct dsa_port *dp;
+ int first_port;
+
+ if (WARN_ON(!trap->ingress_port_mask))
+ return -1;
+
+ first_port = __ffs(trap->ingress_port_mask);
+ dp = dsa_to_port(ds, first_port);
+
+ return dp->cpu_dp->index;
+}
+
+/* On switches with no extraction IRQ wired, trapped packets need to be
+ * replicated over Ethernet as well, otherwise we'd get no notification of
+ * their arrival when using the ocelot-8021q tagging protocol.
+ */
+static int felix_update_trapping_destinations(struct dsa_switch *ds,
+ bool using_tag_8021q)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot_vcap_filter *trap;
+ enum ocelot_mask_mode mask_mode;
+ unsigned long port_mask;
+ bool cpu_copy_ena;
+ int err;
+
+ if (!felix->info->quirk_no_xtr_irq)
+ return 0;
+
+ /* We are sure that "cpu" was found, otherwise
+ * dsa_tree_setup_default_cpu() would have failed earlier.
+ */
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
+
+ /* Make sure all traps are set up for that destination */
+ list_for_each_entry(trap, &block_vcap_is2->rules, list) {
+ if (!trap->is_trap)
+ continue;
+
+ /* Figure out the current trapping destination */
+ if (using_tag_8021q) {
+ /* Redirect to the tag_8021q CPU port. If timestamps
+ * are necessary, also copy trapped packets to the CPU
+ * port module.
+ */
+ mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ port_mask = BIT(felix_trap_get_cpu_port(ds, trap));
+ cpu_copy_ena = !!trap->take_ts;
+ } else {
+ /* Trap packets only to the CPU port module, which is
+ * redirected to the NPI port (the DSA CPU port)
+ */
+ mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ port_mask = 0;
+ cpu_copy_ena = true;
+ }
+
+ if (trap->action.mask_mode == mask_mode &&
+ trap->action.port_mask == port_mask &&
+ trap->action.cpu_copy_ena == cpu_copy_ena)
+ continue;
+
+ trap->action.mask_mode = mask_mode;
+ trap->action.port_mask = port_mask;
+ trap->action.cpu_copy_ena = cpu_copy_ena;
+
+ err = ocelot_vcap_filter_replace(ocelot, trap);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* The CPU port module is connected to the Node Processor Interface (NPI). This
+ * is the mode through which frames can be injected from and extracted to an
+ * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
+ * running Linux, and this forms a DSA setup together with the enetc or fman
+ * DSA master.
+ */
+static void felix_npi_port_init(struct ocelot *ocelot, int port)
+{
+ ocelot->npi = port;
+
+ ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
+ QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port),
+ QSYS_EXT_CPU_CFG);
+
+ /* NPI port Injection/Extraction configuration */
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
+ ocelot->npi_xtr_prefix);
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
+ ocelot->npi_inj_prefix);
+
+ /* Disable transmission of pause frames */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+}
+
+static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
+{
+ /* Restore hardware defaults */
+ int unused_port = ocelot->num_phys_ports + 2;
+
+ ocelot->npi = -1;
+
+ ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port),
+ QSYS_EXT_CPU_CFG);
+
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
+ OCELOT_TAG_PREFIX_DISABLED);
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
+ OCELOT_TAG_PREFIX_DISABLED);
+
+ /* Enable transmission of pause frames */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
+}
+
+static int felix_tag_npi_setup(struct dsa_switch *ds)
+{
+ struct dsa_port *dp, *first_cpu_dp = NULL;
+ struct ocelot *ocelot = ds->priv;
+
+ dsa_switch_for_each_user_port(dp, ds) {
+ if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) {
+ dev_err(ds->dev, "Multiple NPI ports not supported\n");
+ return -EINVAL;
+ }
+
+ first_cpu_dp = dp->cpu_dp;
+ }
+
+ if (!first_cpu_dp)
+ return -EINVAL;
+
+ felix_npi_port_init(ocelot, first_cpu_dp->index);
+
+ return 0;
+}
+
+static void felix_tag_npi_teardown(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ felix_npi_port_deinit(ocelot, ocelot->npi);
+}
+
+static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return BIT(ocelot->num_phys_ports);
+}
+
+static int felix_tag_npi_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct ocelot *ocelot = ds->priv;
+
+ if (netif_is_lag_master(master)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG DSA master only supported using ocelot-8021q");
+ return -EOPNOTSUPP;
+ }
+
+ /* Changing the NPI port breaks user ports still assigned to the old
+ * one, so only allow it while they're down, and don't allow them to
+ * come back up until they're all changed to the new one.
+ */
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *slave = other_dp->slave;
+
+ if (other_dp != dp && (slave->flags & IFF_UP) &&
+ dsa_port_to_master(other_dp) != master) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change while old master still has users");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ felix_npi_port_deinit(ocelot, ocelot->npi);
+ felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master));
+
+ return 0;
+}
+
+/* Alternatively to using the NPI functionality, that same hardware MAC
+ * connected internally to the enetc or fman DSA master can be configured to
+ * use the software-defined tag_8021q frame format. As far as the hardware is
+ * concerned, it thinks it is a "dumb switch" - the queues of the CPU port
+ * module are now disconnected from it, but can still be accessed through
+ * register-based MMIO.
+ */
+static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
+ .setup = felix_tag_npi_setup,
+ .teardown = felix_tag_npi_teardown,
+ .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask,
+ .change_master = felix_tag_npi_change_master,
+};
+
+static int felix_tag_8021q_setup(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *dp;
+ int err;
+
+ err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
+ if (err)
+ return err;
+
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index);
+
+ dsa_switch_for_each_user_port(dp, ds)
+ ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index,
+ dp->cpu_dp->index);
+
+ dsa_switch_for_each_available_port(dp, ds)
+ /* This overwrites ocelot_init():
+ * Do not forward BPDU frames to the CPU port module,
+ * for 2 reasons:
+ * - When these packets are injected from the tag_8021q
+ * CPU port, we want them to go out, not loop back
+ * into the system.
+ * - STP traffic ingressing on a user port should go to
+ * the tag_8021q CPU port, not to the hardware CPU
+ * port module.
+ */
+ ocelot_write_gix(ocelot,
+ ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
+ ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
+
+ /* The ownership of the CPU port module's queues might have just been
+ * transferred to the tag_8021q tagger from the NPI-based tagger.
+ * So there might still be all sorts of crap in the queues. On the
+ * other hand, the MMIO-based matching of PTP frames is very brittle,
+ * so we need to be careful that there are no extra frames to be
+ * dequeued over MMIO, since we would never know to discard them.
+ */
+ ocelot_drain_cpu_queue(ocelot, 0);
+
+ return 0;
+}
+
+static void felix_tag_8021q_teardown(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_available_port(dp, ds)
+ /* Restore the logic from ocelot_init:
+ * do not forward BPDU frames to the front ports.
+ */
+ ocelot_write_gix(ocelot,
+ ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
+ ANA_PORT_CPU_FWD_BPDU_CFG,
+ dp->index);
+
+ dsa_switch_for_each_user_port(dp, ds)
+ ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index);
+
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index);
+
+ dsa_tag_8021q_unregister(ds);
+}
+
+static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
+{
+ return dsa_cpu_ports(ds);
+}
+
+static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ int cpu = felix_cpu_port_for_master(ds, master);
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_unassign_dsa_8021q_cpu(ocelot, port);
+ ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu);
+
+ return felix_update_trapping_destinations(ds, true);
+}
+
+static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = {
+ .setup = felix_tag_8021q_setup,
+ .teardown = felix_tag_8021q_teardown,
+ .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask,
+ .change_master = felix_tag_8021q_change_master,
+};
+
+static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask,
+ bool uc, bool mc, bool bc)
+{
+ struct ocelot *ocelot = ds->priv;
+ unsigned long val;
+
+ val = uc ? mask : 0;
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC);
+
+ val = mc ? mask : 0;
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC);
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4);
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6);
+
+ val = bc ? mask : 0;
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_BC);
+}
+
+static void
+felix_migrate_host_flood(struct dsa_switch *ds,
+ const struct felix_tag_proto_ops *proto_ops,
+ const struct felix_tag_proto_ops *old_proto_ops)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ unsigned long mask;
+
+ if (old_proto_ops) {
+ mask = old_proto_ops->get_host_fwd_mask(ds);
+ felix_set_host_flood(ds, mask, false, false, false);
+ }
+
+ mask = proto_ops->get_host_fwd_mask(ds);
+ felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
+ !!felix->host_flood_mc_mask, true);
+}
+
+static int felix_migrate_mdbs(struct dsa_switch *ds,
+ const struct felix_tag_proto_ops *proto_ops,
+ const struct felix_tag_proto_ops *old_proto_ops)
+{
+ struct ocelot *ocelot = ds->priv;
+ unsigned long from, to;
+
+ if (!old_proto_ops)
+ return 0;
+
+ from = old_proto_ops->get_host_fwd_mask(ds);
+ to = proto_ops->get_host_fwd_mask(ds);
+
+ return ocelot_migrate_mdbs(ocelot, from, to);
+}
+
+/* Configure the shared hardware resources for a transition between
+ * @old_proto_ops and @proto_ops.
+ * Manual migration is needed because as far as DSA is concerned, no change of
+ * the CPU port is taking place here, just of the tagging protocol.
+ */
+static int
+felix_tag_proto_setup_shared(struct dsa_switch *ds,
+ const struct felix_tag_proto_ops *proto_ops,
+ const struct felix_tag_proto_ops *old_proto_ops)
+{
+ bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops);
+ int err;
+
+ err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops);
+ if (err)
+ return err;
+
+ felix_update_trapping_destinations(ds, using_tag_8021q);
+
+ felix_migrate_host_flood(ds, proto_ops, old_proto_ops);
+
+ return 0;
+}
+
+/* This always leaves the switch in a consistent state, because although the
+ * tag_8021q setup can fail, the NPI setup can't. So either the change is made,
+ * or the restoration is guaranteed to work.
+ */
+static int felix_change_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ const struct felix_tag_proto_ops *old_proto_ops, *proto_ops;
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int err;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_SEVILLE:
+ case DSA_TAG_PROTO_OCELOT:
+ proto_ops = &felix_tag_npi_proto_ops;
+ break;
+ case DSA_TAG_PROTO_OCELOT_8021Q:
+ proto_ops = &felix_tag_8021q_proto_ops;
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ old_proto_ops = felix->tag_proto_ops;
+
+ if (proto_ops == old_proto_ops)
+ return 0;
+
+ err = proto_ops->setup(ds);
+ if (err)
+ goto setup_failed;
+
+ err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops);
+ if (err)
+ goto setup_shared_failed;
+
+ if (old_proto_ops)
+ old_proto_ops->teardown(ds);
+
+ felix->tag_proto_ops = proto_ops;
+ felix->tag_proto = proto;
+
+ return 0;
+
+setup_shared_failed:
+ proto_ops->teardown(ds);
+setup_failed:
+ return err;
+}
+
+static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ return felix->tag_proto;
+}
+
+static void felix_port_set_host_flood(struct dsa_switch *ds, int port,
+ bool uc, bool mc)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ unsigned long mask;
+
+ if (uc)
+ felix->host_flood_uc_mask |= BIT(port);
+ else
+ felix->host_flood_uc_mask &= ~BIT(port);
+
+ if (mc)
+ felix->host_flood_mc_mask |= BIT(port);
+ else
+ felix->host_flood_mc_mask &= ~BIT(port);
+
+ mask = felix->tag_proto_ops->get_host_fwd_mask(ds);
+ felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
+ !!felix->host_flood_mc_mask, true);
+}
+
+static int felix_port_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ return felix->tag_proto_ops->change_master(ds, port, master, extack);
+}
+
+static int felix_set_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_set_ageing_time(ocelot, ageing_time);
+
+ return 0;
+}
+
+static void felix_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+ int err;
+
+ err = ocelot_mact_flush(ocelot, port);
+ if (err)
+ dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n",
+ port, ERR_PTR(err));
+}
+
+static int felix_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_fdb_dump(ocelot, port, cb, data);
+}
+
+static int felix_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_port_is_cpu(dp) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ if (dsa_port_is_cpu(dp))
+ port = PGID_CPU;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
+}
+
+static int felix_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_port_is_cpu(dp) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ if (dsa_port_is_cpu(dp))
+ port = PGID_CPU;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev);
+}
+
+static int felix_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
+ return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev);
+}
+
+static int felix_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
+ return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev);
+}
+
+static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_bridge_stp_state_set(ocelot, port, state);
+}
+
+static int felix_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags val,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_pre_bridge_flags(ocelot, port, val);
+}
+
+static int felix_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags val,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
+ ocelot_port_bridge_flags(ocelot, port, val);
+
+ return 0;
+}
+
+static int felix_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num,
+ extack);
+}
+
+static void felix_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_bridge_leave(ocelot, port, bridge.dev);
+}
+
+static int felix_lag_join(struct dsa_switch *ds, int port,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ int err;
+
+ err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack);
+ if (err)
+ return err;
+
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return felix_port_change_master(ds, port, lag.dev, extack);
+}
+
+static int felix_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_lag_leave(ocelot, port, lag.dev);
+
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return felix_port_change_master(ds, port, lag.dev, NULL);
+}
+
+static int felix_lag_change(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled);
+
+ return 0;
+}
+
+static int felix_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ u16 flags = vlan->flags;
+
+ /* Ocelot switches copy frames as-is to the CPU, so the flags:
+ * egress-untagged or not, pvid or not, make no difference. This
+ * behavior is already better than what DSA just tries to approximate
+ * when it installs the VLAN with the same flags on the CPU port.
+ * Just accept any configuration, and don't let ocelot deny installing
+ * multiple native VLANs on the NPI port, because the switch doesn't
+ * look at the port tag settings towards the NPI interface anyway.
+ */
+ if (port == ocelot->npi)
+ return 0;
+
+ return ocelot_vlan_prepare(ocelot, port, vlan->vid,
+ flags & BRIDGE_VLAN_INFO_PVID,
+ flags & BRIDGE_VLAN_INFO_UNTAGGED,
+ extack);
+}
+
+static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_vlan_filtering(ocelot, port, enabled, extack);
+}
+
+static int felix_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ u16 flags = vlan->flags;
+ int err;
+
+ err = felix_vlan_prepare(ds, port, vlan, extack);
+ if (err)
+ return err;
+
+ return ocelot_vlan_add(ocelot, port, vlan->vid,
+ flags & BRIDGE_VLAN_INFO_PVID,
+ flags & BRIDGE_VLAN_INFO_UNTAGGED);
+}
+
+static int felix_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_vlan_del(ocelot, port, vlan->vid);
+}
+
+static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
+
+ __set_bit(ocelot->ports[port]->phy_mode,
+ config->supported_interfaces);
+}
+
+static void felix_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->phylink_validate)
+ felix->info->phylink_validate(ocelot, port, supported, state);
+}
+
+static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds,
+ int port,
+ phy_interface_t iface)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct phylink_pcs *pcs = NULL;
+
+ if (felix->pcs && felix->pcs[port])
+ pcs = felix->pcs[port];
+
+ return pcs;
+}
+
+static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int link_an_mode,
+ phy_interface_t interface)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface,
+ FELIX_MAC_QUIRKS);
+}
+
+static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
+ interface, speed, duplex, tx_pause, rx_pause,
+ FELIX_MAC_QUIRKS);
+
+ if (felix->info->port_sched_speed_set)
+ felix->info->port_sched_speed_set(ocelot, port, speed);
+}
+
+static int felix_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+
+ if (!dsa_port_is_user(dp))
+ return 0;
+
+ if (ocelot->npi >= 0) {
+ struct net_device *master = dsa_port_to_master(dp);
+
+ if (felix_cpu_port_for_master(ds, master) != ocelot->npi) {
+ dev_err(ds->dev, "Multiple masters are not allowed\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
+{
+ int i;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_QOS_CFG_QOS_PCP_ENA,
+ ANA_PORT_QOS_CFG_QOS_PCP_ENA,
+ ANA_PORT_QOS_CFG,
+ port);
+
+ for (i = 0; i < OCELOT_NUM_TC * 2; i++) {
+ ocelot_rmw_ix(ocelot,
+ (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) |
+ ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i),
+ ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL |
+ ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M,
+ ANA_PORT_PCP_DEI_MAP,
+ port, i);
+ }
+}
+
+static void felix_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_stats64(ocelot, port, stats);
+}
+
+static void felix_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_pause_stats(ocelot, port, pause_stats);
+}
+
+static void felix_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges);
+}
+
+static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats);
+}
+
+static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats);
+}
+
+static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats);
+}
+
+static void felix_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_strings(ocelot, port, stringset, data);
+}
+
+static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_get_ethtool_stats(ocelot, port, data);
+}
+
+static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_sset_count(ocelot, port, sset);
+}
+
+static int felix_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_ts_info(ocelot, port, info);
+}
+
+static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = {
+ [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL,
+ [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII,
+ [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII,
+ [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII,
+ [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX,
+ [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX,
+};
+
+static int felix_validate_phy_mode(struct felix *felix, int port,
+ phy_interface_t phy_mode)
+{
+ u32 modes = felix->info->port_modes[port];
+
+ if (felix_phy_match_table[phy_mode] & modes)
+ return 0;
+ return -EOPNOTSUPP;
+}
+
+static int felix_parse_ports_node(struct felix *felix,
+ struct device_node *ports_node,
+ phy_interface_t *port_phy_modes)
+{
+ struct device *dev = felix->ocelot.dev;
+ struct device_node *child;
+
+ for_each_available_child_of_node(ports_node, child) {
+ phy_interface_t phy_mode;
+ u32 port;
+ int err;
+
+ /* Get switch port number from DT */
+ if (of_property_read_u32(child, "reg", &port) < 0) {
+ dev_err(dev, "Port number not defined in device tree "
+ "(property \"reg\")\n");
+ of_node_put(child);
+ return -ENODEV;
+ }
+
+ /* Get PHY mode from DT */
+ err = of_get_phy_mode(child, &phy_mode);
+ if (err) {
+ dev_err(dev, "Failed to read phy-mode or "
+ "phy-interface-type property for port %d\n",
+ port);
+ of_node_put(child);
+ return -ENODEV;
+ }
+
+ err = felix_validate_phy_mode(felix, port, phy_mode);
+ if (err < 0) {
+ dev_err(dev, "Unsupported PHY mode %s on port %d\n",
+ phy_modes(phy_mode), port);
+ of_node_put(child);
+ return err;
+ }
+
+ port_phy_modes[port] = phy_mode;
+ }
+
+ return 0;
+}
+
+static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
+{
+ struct device *dev = felix->ocelot.dev;
+ struct device_node *switch_node;
+ struct device_node *ports_node;
+ int err;
+
+ switch_node = dev->of_node;
+
+ ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node)
+ ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
+ if (!ports_node) {
+ dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n");
+ return -ENODEV;
+ }
+
+ err = felix_parse_ports_node(felix, ports_node, port_phy_modes);
+ of_node_put(ports_node);
+
+ return err;
+}
+
+static struct regmap *felix_request_regmap_by_name(struct felix *felix,
+ const char *resource_name)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ struct resource res;
+ int i;
+
+ for (i = 0; i < felix->info->num_resources; i++) {
+ if (strcmp(resource_name, felix->info->resources[i].name))
+ continue;
+
+ memcpy(&res, &felix->info->resources[i], sizeof(res));
+ res.start += felix->switch_base;
+ res.end += felix->switch_base;
+
+ return ocelot_regmap_init(ocelot, &res);
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct regmap *felix_request_regmap(struct felix *felix,
+ enum ocelot_target target)
+{
+ const char *resource_name = felix->info->resource_names[target];
+
+ /* If the driver didn't provide a resource name for the target,
+ * the resource is optional.
+ */
+ if (!resource_name)
+ return NULL;
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
+static struct regmap *felix_request_port_regmap(struct felix *felix, int port)
+{
+ char resource_name[32];
+
+ sprintf(resource_name, "port%d", port);
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
+static int felix_init_structs(struct felix *felix, int num_phys_ports)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ phy_interface_t *port_phy_modes;
+ struct regmap *target;
+ int port, i, err;
+
+ ocelot->num_phys_ports = num_phys_ports;
+ ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports,
+ sizeof(struct ocelot_port *), GFP_KERNEL);
+ if (!ocelot->ports)
+ return -ENOMEM;
+
+ ocelot->map = felix->info->map;
+ ocelot->stats_layout = felix->info->stats_layout;
+ ocelot->num_mact_rows = felix->info->num_mact_rows;
+ ocelot->vcap = felix->info->vcap;
+ ocelot->vcap_pol.base = felix->info->vcap_pol_base;
+ ocelot->vcap_pol.max = felix->info->vcap_pol_max;
+ ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2;
+ ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2;
+ ocelot->ops = felix->info->ops;
+ ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT;
+ ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
+ ocelot->devlink = felix->ds->devlink;
+
+ port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
+ GFP_KERNEL);
+ if (!port_phy_modes)
+ return -ENOMEM;
+
+ err = felix_parse_dt(felix, port_phy_modes);
+ if (err) {
+ kfree(port_phy_modes);
+ return err;
+ }
+
+ for (i = 0; i < TARGET_MAX; i++) {
+ target = felix_request_regmap(felix, i);
+ if (IS_ERR(target)) {
+ dev_err(ocelot->dev,
+ "Failed to map device memory space: %pe\n",
+ target);
+ kfree(port_phy_modes);
+ return PTR_ERR(target);
+ }
+
+ ocelot->targets[i] = target;
+ }
+
+ err = ocelot_regfields_init(ocelot, felix->info->regfields);
+ if (err) {
+ dev_err(ocelot->dev, "failed to init reg fields map\n");
+ kfree(port_phy_modes);
+ return err;
+ }
+
+ for (port = 0; port < num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port;
+
+ ocelot_port = devm_kzalloc(ocelot->dev,
+ sizeof(struct ocelot_port),
+ GFP_KERNEL);
+ if (!ocelot_port) {
+ dev_err(ocelot->dev,
+ "failed to allocate port memory\n");
+ kfree(port_phy_modes);
+ return -ENOMEM;
+ }
+
+ target = felix_request_port_regmap(felix, port);
+ if (IS_ERR(target)) {
+ dev_err(ocelot->dev,
+ "Failed to map memory space for port %d: %pe\n",
+ port, target);
+ kfree(port_phy_modes);
+ return PTR_ERR(target);
+ }
+
+ ocelot_port->phy_mode = port_phy_modes[port];
+ ocelot_port->ocelot = ocelot;
+ ocelot_port->target = target;
+ ocelot_port->index = port;
+ ocelot->ports[port] = ocelot_port;
+ }
+
+ kfree(port_phy_modes);
+
+ if (felix->info->mdio_bus_alloc) {
+ err = felix->info->mdio_bus_alloc(ocelot);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *skb)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+ struct sk_buff *skb_match = NULL, *skb_tmp;
+ unsigned long flags;
+
+ if (!clone)
+ return;
+
+ spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags);
+
+ skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
+ if (skb != clone)
+ continue;
+ __skb_unlink(skb, &ocelot_port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+
+ spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags);
+
+ WARN_ONCE(!skb_match,
+ "Could not find skb clone in TX timestamping list\n");
+}
+
+#define work_to_xmit_work(w) \
+ container_of((w), struct felix_deferred_xmit_work, work)
+
+static void felix_port_deferred_xmit(struct kthread_work *work)
+{
+ struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+ struct dsa_switch *ds = xmit_work->dp->ds;
+ struct sk_buff *skb = xmit_work->skb;
+ u32 rew_op = ocelot_ptp_rew_op(skb);
+ struct ocelot *ocelot = ds->priv;
+ int port = xmit_work->dp->index;
+ int retries = 10;
+
+ do {
+ if (ocelot_can_inject(ocelot, 0))
+ break;
+
+ cpu_relax();
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(ocelot->dev, "port %d failed to inject skb\n",
+ port);
+ ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
+ kfree_skb(skb);
+ return;
+ }
+
+ ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+
+ consume_skb(skb);
+ kfree(xmit_work);
+}
+
+static int felix_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct ocelot_8021q_tagger_data *tagger_data;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_OCELOT_8021Q:
+ tagger_data = ocelot_8021q_tagger_data(ds);
+ tagger_data->xmit_work_fn = felix_port_deferred_xmit;
+ return 0;
+ case DSA_TAG_PROTO_OCELOT:
+ case DSA_TAG_PROTO_SEVILLE:
+ return 0;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+}
+
+/* Hardware initialization done here so that we can allocate structures with
+ * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
+ * us to allocate structures twice (leak memory) and map PCI memory twice
+ * (which will not work).
+ */
+static int felix_setup(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp;
+ int err;
+
+ err = felix_init_structs(felix, ds->num_ports);
+ if (err)
+ return err;
+
+ err = ocelot_init(ocelot);
+ if (err)
+ goto out_mdiobus_free;
+
+ if (ocelot->ptp) {
+ err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps);
+ if (err) {
+ dev_err(ocelot->dev,
+ "Timestamp initialization failed\n");
+ ocelot->ptp = 0;
+ }
+ }
+
+ dsa_switch_for_each_available_port(dp, ds) {
+ ocelot_init_port(ocelot, dp->index);
+
+ /* Set the default QoS Classification based on PCP and DEI
+ * bits of vlan tag.
+ */
+ felix_port_qos_map_init(ocelot, dp->index);
+ }
+
+ err = ocelot_devlink_sb_register(ocelot);
+ if (err)
+ goto out_deinit_ports;
+
+ /* The initial tag protocol is NPI which won't fail during initial
+ * setup, there's no real point in checking for errors.
+ */
+ felix_change_tag_protocol(ds, felix->tag_proto);
+
+ ds->mtu_enforcement_ingress = true;
+ ds->assisted_learning_on_cpu_port = true;
+ ds->fdb_isolation = true;
+ ds->max_num_bridges = ds->num_ports;
+
+ return 0;
+
+out_deinit_ports:
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
+
+ ocelot_deinit_timestamp(ocelot);
+ ocelot_deinit(ocelot);
+
+out_mdiobus_free:
+ if (felix->info->mdio_bus_free)
+ felix->info->mdio_bus_free(ocelot);
+
+ return err;
+}
+
+static void felix_teardown(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp;
+
+ rtnl_lock();
+ if (felix->tag_proto_ops)
+ felix->tag_proto_ops->teardown(ds);
+ rtnl_unlock();
+
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
+
+ ocelot_devlink_sb_unregister(ocelot);
+ ocelot_deinit_timestamp(ocelot);
+ ocelot_deinit(ocelot);
+
+ if (felix->info->mdio_bus_free)
+ felix->info->mdio_bus_free(ocelot);
+}
+
+static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_hwstamp_get(ocelot, port, ifr);
+}
+
+static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
+
+ err = ocelot_hwstamp_set(ocelot, port, ifr);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
+
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
+}
+
+static bool felix_check_xtr_pkt(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int err = 0, grp = 0;
+
+ if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q)
+ return false;
+
+ if (!felix->info->quirk_no_xtr_irq)
+ return false;
+
+ while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
+ struct sk_buff *skb;
+ unsigned int type;
+
+ err = ocelot_xtr_poll_frame(ocelot, grp, &skb);
+ if (err)
+ goto out;
+
+ /* We trap to the CPU port module all PTP frames, but
+ * felix_rxtstamp() only gets called for event frames.
+ * So we need to avoid sending duplicate general
+ * message frames by running a second BPF classifier
+ * here and dropping those.
+ */
+ __skb_push(skb, ETH_HLEN);
+
+ type = ptp_classify_raw(skb);
+
+ __skb_pull(skb, ETH_HLEN);
+
+ if (type == PTP_CLASS_NONE) {
+ kfree_skb(skb);
+ continue;
+ }
+
+ netif_rx(skb);
+ }
+
+out:
+ if (err < 0) {
+ dev_err_ratelimited(ocelot->dev,
+ "Error during packet extraction: %pe\n",
+ ERR_PTR(err));
+ ocelot_drain_cpu_queue(ocelot, 0);
+ }
+
+ return true;
+}
+
+static bool felix_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct ocelot *ocelot = ds->priv;
+ struct timespec64 ts;
+ u32 tstamp_hi;
+ u64 tstamp;
+
+ switch (type & PTP_CLASS_PMASK) {
+ case PTP_CLASS_L2:
+ if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L2))
+ return false;
+ break;
+ case PTP_CLASS_IPV4:
+ case PTP_CLASS_IPV6:
+ if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L4))
+ return false;
+ break;
+ }
+
+ /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb
+ * for RX timestamping. Then free it, and poll for its copy through
+ * MMIO in the CPU port module, and inject that into the stack from
+ * ocelot_xtr_poll().
+ */
+ if (felix_check_xtr_pkt(ocelot)) {
+ kfree_skb(skb);
+ return true;
+ }
+
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+ tstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+
+ tstamp_hi = tstamp >> 32;
+ if ((tstamp & 0xffffffff) < tstamp_lo)
+ tstamp_hi--;
+
+ tstamp = ((u64)tstamp_hi << 32) | tstamp_lo;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = tstamp;
+ return false;
+}
+
+static void felix_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct sk_buff *clone = NULL;
+
+ if (!ocelot->ptp)
+ return;
+
+ if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) {
+ dev_err_ratelimited(ds->dev,
+ "port %d delivering skb without TX timestamp\n",
+ port);
+ return;
+ }
+
+ if (clone)
+ OCELOT_SKB_CB(skb)->clone = clone;
+}
+
+static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ ocelot_port_set_maxlen(ocelot, port, new_mtu);
+
+ mutex_lock(&ocelot->tas_lock);
+
+ if (ocelot_port->taprio && felix->info->tas_guard_bands_update)
+ felix->info->tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
+
+ return 0;
+}
+
+static int felix_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_max_mtu(ocelot, port);
+}
+
+static int felix_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
+
+ err = ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
+
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
+}
+
+static int felix_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_cls_flower_destroy(ocelot, port, cls, ingress);
+}
+
+static int felix_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_cls_flower_stats(ocelot, port, cls, ingress);
+}
+
+static int felix_port_policer_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_policer_tc_entry *policer)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_policer pol = {
+ .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8,
+ .burst = policer->burst,
+ };
+
+ return ocelot_port_policer_add(ocelot, port, &pol);
+}
+
+static void felix_port_policer_del(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_policer_del(ocelot, port);
+}
+
+static int felix_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port,
+ ingress, extack);
+}
+
+static void felix_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_mirror_del(ocelot, port, mirror->ingress);
+}
+
+static int felix_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->port_setup_tc)
+ return felix->info->port_setup_tc(ds, port, type, type_data);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index,
+ u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
+}
+
+static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
+ threshold_type, extack);
+}
+
+static int felix_sb_port_pool_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_threshold);
+}
+
+static int felix_sb_port_pool_set(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
+ threshold, extack);
+}
+
+static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
+ pool_type, p_pool_index,
+ p_threshold);
+}
+
+static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
+ pool_type, pool_index, threshold,
+ extack);
+}
+
+static int felix_sb_occ_snapshot(struct dsa_switch *ds,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_snapshot(ocelot, sb_index);
+}
+
+static int felix_sb_occ_max_clear(struct dsa_switch *ds,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_max_clear(ocelot, sb_index);
+}
+
+static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_cur, p_max);
+}
+
+static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index,
+ pool_type, p_cur, p_max);
+}
+
+static int felix_mrp_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_mrp *mrp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_mrp_add(ocelot, port, mrp);
+}
+
+static int felix_mrp_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_mrp *mrp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_mrp_add(ocelot, port, mrp);
+}
+
+static int
+felix_mrp_add_ring_role(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_mrp_add_ring_role(ocelot, port, mrp);
+}
+
+static int
+felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_mrp_del_ring_role(ocelot, port, mrp);
+}
+
+static int felix_port_get_default_prio(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_default_prio(ocelot, port);
+}
+
+static int felix_port_set_default_prio(struct dsa_switch *ds, int port,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_set_default_prio(ocelot, port, prio);
+}
+
+static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_dscp_prio(ocelot, port, dscp);
+}
+
+static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio);
+}
+
+static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio);
+}
+
+const struct dsa_switch_ops felix_switch_ops = {
+ .get_tag_protocol = felix_get_tag_protocol,
+ .change_tag_protocol = felix_change_tag_protocol,
+ .connect_tag_protocol = felix_connect_tag_protocol,
+ .setup = felix_setup,
+ .teardown = felix_teardown,
+ .set_ageing_time = felix_set_ageing_time,
+ .get_stats64 = felix_get_stats64,
+ .get_pause_stats = felix_get_pause_stats,
+ .get_rmon_stats = felix_get_rmon_stats,
+ .get_eth_ctrl_stats = felix_get_eth_ctrl_stats,
+ .get_eth_mac_stats = felix_get_eth_mac_stats,
+ .get_eth_phy_stats = felix_get_eth_phy_stats,
+ .get_strings = felix_get_strings,
+ .get_ethtool_stats = felix_get_ethtool_stats,
+ .get_sset_count = felix_get_sset_count,
+ .get_ts_info = felix_get_ts_info,
+ .phylink_get_caps = felix_phylink_get_caps,
+ .phylink_validate = felix_phylink_validate,
+ .phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
+ .phylink_mac_link_down = felix_phylink_mac_link_down,
+ .phylink_mac_link_up = felix_phylink_mac_link_up,
+ .port_enable = felix_port_enable,
+ .port_fast_age = felix_port_fast_age,
+ .port_fdb_dump = felix_fdb_dump,
+ .port_fdb_add = felix_fdb_add,
+ .port_fdb_del = felix_fdb_del,
+ .lag_fdb_add = felix_lag_fdb_add,
+ .lag_fdb_del = felix_lag_fdb_del,
+ .port_mdb_add = felix_mdb_add,
+ .port_mdb_del = felix_mdb_del,
+ .port_pre_bridge_flags = felix_pre_bridge_flags,
+ .port_bridge_flags = felix_bridge_flags,
+ .port_bridge_join = felix_bridge_join,
+ .port_bridge_leave = felix_bridge_leave,
+ .port_lag_join = felix_lag_join,
+ .port_lag_leave = felix_lag_leave,
+ .port_lag_change = felix_lag_change,
+ .port_stp_state_set = felix_bridge_stp_state_set,
+ .port_vlan_filtering = felix_vlan_filtering,
+ .port_vlan_add = felix_vlan_add,
+ .port_vlan_del = felix_vlan_del,
+ .port_hwtstamp_get = felix_hwtstamp_get,
+ .port_hwtstamp_set = felix_hwtstamp_set,
+ .port_rxtstamp = felix_rxtstamp,
+ .port_txtstamp = felix_txtstamp,
+ .port_change_mtu = felix_change_mtu,
+ .port_max_mtu = felix_get_max_mtu,
+ .port_policer_add = felix_port_policer_add,
+ .port_policer_del = felix_port_policer_del,
+ .port_mirror_add = felix_port_mirror_add,
+ .port_mirror_del = felix_port_mirror_del,
+ .cls_flower_add = felix_cls_flower_add,
+ .cls_flower_del = felix_cls_flower_del,
+ .cls_flower_stats = felix_cls_flower_stats,
+ .port_setup_tc = felix_port_setup_tc,
+ .devlink_sb_pool_get = felix_sb_pool_get,
+ .devlink_sb_pool_set = felix_sb_pool_set,
+ .devlink_sb_port_pool_get = felix_sb_port_pool_get,
+ .devlink_sb_port_pool_set = felix_sb_port_pool_set,
+ .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get,
+ .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set,
+ .devlink_sb_occ_snapshot = felix_sb_occ_snapshot,
+ .devlink_sb_occ_max_clear = felix_sb_occ_max_clear,
+ .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get,
+ .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get,
+ .port_mrp_add = felix_mrp_add,
+ .port_mrp_del = felix_mrp_del,
+ .port_mrp_add_ring_role = felix_mrp_add_ring_role,
+ .port_mrp_del_ring_role = felix_mrp_del_ring_role,
+ .tag_8021q_vlan_add = felix_tag_8021q_vlan_add,
+ .tag_8021q_vlan_del = felix_tag_8021q_vlan_del,
+ .port_get_default_prio = felix_port_get_default_prio,
+ .port_set_default_prio = felix_port_set_default_prio,
+ .port_get_dscp_prio = felix_port_get_dscp_prio,
+ .port_add_dscp_prio = felix_port_add_dscp_prio,
+ .port_del_dscp_prio = felix_port_del_dscp_prio,
+ .port_set_host_flood = felix_port_set_host_flood,
+ .port_change_master = felix_port_change_master,
+};
+
+struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_switch *ds = felix->ds;
+
+ if (!dsa_is_user_port(ds, port))
+ return NULL;
+
+ return dsa_to_port(ds, port)->slave;
+}
+
+int felix_netdev_to_port(struct net_device *dev)
+{
+ struct dsa_port *dp;
+
+ dp = dsa_port_from_netdev(dev);
+ if (IS_ERR(dp))
+ return -EINVAL;
+
+ return dp->index;
+}
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
new file mode 100644
index 000000000..c9c29999c
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 NXP
+ */
+#ifndef _MSCC_FELIX_H
+#define _MSCC_FELIX_H
+
+#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
+#define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION
+
+#define OCELOT_PORT_MODE_INTERNAL BIT(0)
+#define OCELOT_PORT_MODE_SGMII BIT(1)
+#define OCELOT_PORT_MODE_QSGMII BIT(2)
+#define OCELOT_PORT_MODE_2500BASEX BIT(3)
+#define OCELOT_PORT_MODE_USXGMII BIT(4)
+#define OCELOT_PORT_MODE_1000BASEX BIT(5)
+
+/* Platform-specific information */
+struct felix_info {
+ /* Hardcoded resources provided by the hardware instantiation. */
+ const struct resource *resources;
+ size_t num_resources;
+ /* Names of the mandatory resources that will be requested during
+ * probe. Must have TARGET_MAX elements, since it is indexed by target.
+ */
+ const char *const *resource_names;
+ const struct reg_field *regfields;
+ const u32 *const *map;
+ const struct ocelot_ops *ops;
+ const u32 *port_modes;
+ int num_mact_rows;
+ const struct ocelot_stat_layout *stats_layout;
+ int num_ports;
+ int num_tx_queues;
+ struct vcap_props *vcap;
+ u16 vcap_pol_base;
+ u16 vcap_pol_max;
+ u16 vcap_pol_base2;
+ u16 vcap_pol_max2;
+ const struct ptp_clock_info *ptp_caps;
+
+ /* Some Ocelot switches are integrated into the SoC without the
+ * extraction IRQ line connected to the ARM GIC. By enabling this
+ * workaround, the few packets that are delivered to the CPU port
+ * module (currently only PTP) are copied not only to the hardware CPU
+ * port module, but also to the 802.1Q Ethernet CPU port, and polling
+ * the extraction registers is triggered once the DSA tagger sees a PTP
+ * frame. The Ethernet frame is only used as a notification: it is
+ * dropped, and the original frame is extracted over MMIO and annotated
+ * with the RX timestamp.
+ */
+ bool quirk_no_xtr_irq;
+
+ int (*mdio_bus_alloc)(struct ocelot *ocelot);
+ void (*mdio_bus_free)(struct ocelot *ocelot);
+ void (*phylink_validate)(struct ocelot *ocelot, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state);
+ int (*port_setup_tc)(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data);
+ void (*tas_guard_bands_update)(struct ocelot *ocelot, int port);
+ void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
+ u32 speed);
+};
+
+/* Methods for initializing the hardware resources specific to a tagging
+ * protocol (like the NPI port, for "ocelot" or "seville", or the VCAP TCAMs,
+ * for "ocelot-8021q").
+ * It is important that the resources configured here do not have side effects
+ * for the other tagging protocols. If that is the case, their configuration
+ * needs to go to felix_tag_proto_setup_shared().
+ */
+struct felix_tag_proto_ops {
+ int (*setup)(struct dsa_switch *ds);
+ void (*teardown)(struct dsa_switch *ds);
+ unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds);
+ int (*change_master)(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack);
+};
+
+extern const struct dsa_switch_ops felix_switch_ops;
+
+/* DSA glue / front-end for struct ocelot */
+struct felix {
+ struct dsa_switch *ds;
+ const struct felix_info *info;
+ struct ocelot ocelot;
+ struct mii_bus *imdio;
+ struct phylink_pcs **pcs;
+ resource_size_t switch_base;
+ enum dsa_tag_protocol tag_proto;
+ const struct felix_tag_proto_ops *tag_proto_ops;
+ struct kthread_worker *xmit_worker;
+ unsigned long host_flood_uc_mask;
+ unsigned long host_flood_mc_mask;
+};
+
+struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
+int felix_netdev_to_port(struct net_device *dev);
+
+#endif
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
new file mode 100644
index 000000000..018648219
--- /dev/null
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -0,0 +1,2743 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright 2017 Microsemi Corporation
+ * Copyright 2018-2019 NXP
+ */
+#include <linux/fsl/enetc_mdio.h>
+#include <soc/mscc/ocelot_qsys.h>
+#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/ocelot_ana.h>
+#include <soc/mscc/ocelot_ptp.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <net/tc_act/tc_gate.h>
+#include <soc/mscc/ocelot.h>
+#include <linux/dsa/ocelot.h>
+#include <linux/pcs-lynx.h>
+#include <net/pkt_sched.h>
+#include <linux/iopoll.h>
+#include <linux/mdio.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include "felix.h"
+
+#define VSC9959_NUM_PORTS 6
+
+#define VSC9959_TAS_GCL_ENTRY_MAX 63
+#define VSC9959_TAS_MIN_GATE_LEN_NS 33
+#define VSC9959_VCAP_POLICER_BASE 63
+#define VSC9959_VCAP_POLICER_MAX 383
+#define VSC9959_SWITCH_PCI_BAR 4
+#define VSC9959_IMDIO_PCI_BAR 0
+
+#define VSC9959_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII | \
+ OCELOT_PORT_MODE_1000BASEX | \
+ OCELOT_PORT_MODE_2500BASEX | \
+ OCELOT_PORT_MODE_USXGMII)
+
+static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = {
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+};
+
+static const u32 vsc9959_ana_regmap[] = {
+ REG(ANA_ADVLEARN, 0x0089a0),
+ REG(ANA_VLANMASK, 0x0089a4),
+ REG_RESERVED(ANA_PORT_B_DOMAIN),
+ REG(ANA_ANAGEFIL, 0x0089ac),
+ REG(ANA_ANEVENTS, 0x0089b0),
+ REG(ANA_STORMLIMIT_BURST, 0x0089b4),
+ REG(ANA_STORMLIMIT_CFG, 0x0089b8),
+ REG(ANA_ISOLATED_PORTS, 0x0089c8),
+ REG(ANA_COMMUNITY_PORTS, 0x0089cc),
+ REG(ANA_AUTOAGE, 0x0089d0),
+ REG(ANA_MACTOPTIONS, 0x0089d4),
+ REG(ANA_LEARNDISC, 0x0089d8),
+ REG(ANA_AGENCTRL, 0x0089dc),
+ REG(ANA_MIRRORPORTS, 0x0089e0),
+ REG(ANA_EMIRRORPORTS, 0x0089e4),
+ REG(ANA_FLOODING, 0x0089e8),
+ REG(ANA_FLOODING_IPMC, 0x008a08),
+ REG(ANA_SFLOW_CFG, 0x008a0c),
+ REG(ANA_PORT_MODE, 0x008a28),
+ REG(ANA_CUT_THRU_CFG, 0x008a48),
+ REG(ANA_PGID_PGID, 0x008400),
+ REG(ANA_TABLES_ANMOVED, 0x007f1c),
+ REG(ANA_TABLES_MACHDATA, 0x007f20),
+ REG(ANA_TABLES_MACLDATA, 0x007f24),
+ REG(ANA_TABLES_STREAMDATA, 0x007f28),
+ REG(ANA_TABLES_MACACCESS, 0x007f2c),
+ REG(ANA_TABLES_MACTINDX, 0x007f30),
+ REG(ANA_TABLES_VLANACCESS, 0x007f34),
+ REG(ANA_TABLES_VLANTIDX, 0x007f38),
+ REG(ANA_TABLES_ISDXACCESS, 0x007f3c),
+ REG(ANA_TABLES_ISDXTIDX, 0x007f40),
+ REG(ANA_TABLES_ENTRYLIM, 0x007f00),
+ REG(ANA_TABLES_PTP_ID_HIGH, 0x007f44),
+ REG(ANA_TABLES_PTP_ID_LOW, 0x007f48),
+ REG(ANA_TABLES_STREAMACCESS, 0x007f4c),
+ REG(ANA_TABLES_STREAMTIDX, 0x007f50),
+ REG(ANA_TABLES_SEQ_HISTORY, 0x007f54),
+ REG(ANA_TABLES_SEQ_MASK, 0x007f58),
+ REG(ANA_TABLES_SFID_MASK, 0x007f5c),
+ REG(ANA_TABLES_SFIDACCESS, 0x007f60),
+ REG(ANA_TABLES_SFIDTIDX, 0x007f64),
+ REG(ANA_MSTI_STATE, 0x008600),
+ REG(ANA_OAM_UPM_LM_CNT, 0x008000),
+ REG(ANA_SG_ACCESS_CTRL, 0x008a64),
+ REG(ANA_SG_CONFIG_REG_1, 0x007fb0),
+ REG(ANA_SG_CONFIG_REG_2, 0x007fb4),
+ REG(ANA_SG_CONFIG_REG_3, 0x007fb8),
+ REG(ANA_SG_CONFIG_REG_4, 0x007fbc),
+ REG(ANA_SG_CONFIG_REG_5, 0x007fc0),
+ REG(ANA_SG_GCL_GS_CONFIG, 0x007f80),
+ REG(ANA_SG_GCL_TI_CONFIG, 0x007f90),
+ REG(ANA_SG_STATUS_REG_1, 0x008980),
+ REG(ANA_SG_STATUS_REG_2, 0x008984),
+ REG(ANA_SG_STATUS_REG_3, 0x008988),
+ REG(ANA_PORT_VLAN_CFG, 0x007800),
+ REG(ANA_PORT_DROP_CFG, 0x007804),
+ REG(ANA_PORT_QOS_CFG, 0x007808),
+ REG(ANA_PORT_VCAP_CFG, 0x00780c),
+ REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007810),
+ REG(ANA_PORT_VCAP_S2_CFG, 0x00781c),
+ REG(ANA_PORT_PCP_DEI_MAP, 0x007820),
+ REG(ANA_PORT_CPU_FWD_CFG, 0x007860),
+ REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007864),
+ REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007868),
+ REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00786c),
+ REG(ANA_PORT_PORT_CFG, 0x007870),
+ REG(ANA_PORT_POL_CFG, 0x007874),
+ REG(ANA_PORT_PTP_CFG, 0x007878),
+ REG(ANA_PORT_PTP_DLY1_CFG, 0x00787c),
+ REG(ANA_PORT_PTP_DLY2_CFG, 0x007880),
+ REG(ANA_PORT_SFID_CFG, 0x007884),
+ REG(ANA_PFC_PFC_CFG, 0x008800),
+ REG_RESERVED(ANA_PFC_PFC_TIMER),
+ REG_RESERVED(ANA_IPT_OAM_MEP_CFG),
+ REG_RESERVED(ANA_IPT_IPT),
+ REG_RESERVED(ANA_PPT_PPT),
+ REG_RESERVED(ANA_FID_MAP_FID_MAP),
+ REG(ANA_AGGR_CFG, 0x008a68),
+ REG(ANA_CPUQ_CFG, 0x008a6c),
+ REG_RESERVED(ANA_CPUQ_CFG2),
+ REG(ANA_CPUQ_8021_CFG, 0x008a74),
+ REG(ANA_DSCP_CFG, 0x008ab4),
+ REG(ANA_DSCP_REWR_CFG, 0x008bb4),
+ REG(ANA_VCAP_RNG_TYPE_CFG, 0x008bf4),
+ REG(ANA_VCAP_RNG_VAL_CFG, 0x008c14),
+ REG_RESERVED(ANA_VRAP_CFG),
+ REG_RESERVED(ANA_VRAP_HDR_DATA),
+ REG_RESERVED(ANA_VRAP_HDR_MASK),
+ REG(ANA_DISCARD_CFG, 0x008c40),
+ REG(ANA_FID_CFG, 0x008c44),
+ REG(ANA_POL_PIR_CFG, 0x004000),
+ REG(ANA_POL_CIR_CFG, 0x004004),
+ REG(ANA_POL_MODE_CFG, 0x004008),
+ REG(ANA_POL_PIR_STATE, 0x00400c),
+ REG(ANA_POL_CIR_STATE, 0x004010),
+ REG_RESERVED(ANA_POL_STATE),
+ REG(ANA_POL_FLOWC, 0x008c48),
+ REG(ANA_POL_HYST, 0x008cb4),
+ REG_RESERVED(ANA_POL_MISC_CFG),
+};
+
+static const u32 vsc9959_qs_regmap[] = {
+ REG(QS_XTR_GRP_CFG, 0x000000),
+ REG(QS_XTR_RD, 0x000008),
+ REG(QS_XTR_FRM_PRUNING, 0x000010),
+ REG(QS_XTR_FLUSH, 0x000018),
+ REG(QS_XTR_DATA_PRESENT, 0x00001c),
+ REG(QS_XTR_CFG, 0x000020),
+ REG(QS_INJ_GRP_CFG, 0x000024),
+ REG(QS_INJ_WR, 0x00002c),
+ REG(QS_INJ_CTRL, 0x000034),
+ REG(QS_INJ_STATUS, 0x00003c),
+ REG(QS_INJ_ERR, 0x000040),
+ REG_RESERVED(QS_INH_DBG),
+};
+
+static const u32 vsc9959_vcap_regmap[] = {
+ /* VCAP_CORE_CFG */
+ REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
+ REG(VCAP_CORE_MV_CFG, 0x000004),
+ /* VCAP_CORE_CACHE */
+ REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
+ REG(VCAP_CACHE_MASK_DAT, 0x000108),
+ REG(VCAP_CACHE_ACTION_DAT, 0x000208),
+ REG(VCAP_CACHE_CNT_DAT, 0x000308),
+ REG(VCAP_CACHE_TG_DAT, 0x000388),
+ /* VCAP_CONST */
+ REG(VCAP_CONST_VCAP_VER, 0x000398),
+ REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
+ REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
+ REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
+ REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
+ REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
+ REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
+ REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
+ REG(VCAP_CONST_CORE_CNT, 0x0003b8),
+ REG(VCAP_CONST_IF_CNT, 0x0003bc),
+};
+
+static const u32 vsc9959_qsys_regmap[] = {
+ REG(QSYS_PORT_MODE, 0x00f460),
+ REG(QSYS_SWITCH_PORT_MODE, 0x00f480),
+ REG(QSYS_STAT_CNT_CFG, 0x00f49c),
+ REG(QSYS_EEE_CFG, 0x00f4a0),
+ REG(QSYS_EEE_THRES, 0x00f4b8),
+ REG(QSYS_IGR_NO_SHARING, 0x00f4bc),
+ REG(QSYS_EGR_NO_SHARING, 0x00f4c0),
+ REG(QSYS_SW_STATUS, 0x00f4c4),
+ REG(QSYS_EXT_CPU_CFG, 0x00f4e0),
+ REG_RESERVED(QSYS_PAD_CFG),
+ REG(QSYS_CPU_GROUP_MAP, 0x00f4e8),
+ REG_RESERVED(QSYS_QMAP),
+ REG_RESERVED(QSYS_ISDX_SGRP),
+ REG_RESERVED(QSYS_TIMED_FRAME_ENTRY),
+ REG(QSYS_TFRM_MISC, 0x00f50c),
+ REG(QSYS_TFRM_PORT_DLY, 0x00f510),
+ REG(QSYS_TFRM_TIMER_CFG_1, 0x00f514),
+ REG(QSYS_TFRM_TIMER_CFG_2, 0x00f518),
+ REG(QSYS_TFRM_TIMER_CFG_3, 0x00f51c),
+ REG(QSYS_TFRM_TIMER_CFG_4, 0x00f520),
+ REG(QSYS_TFRM_TIMER_CFG_5, 0x00f524),
+ REG(QSYS_TFRM_TIMER_CFG_6, 0x00f528),
+ REG(QSYS_TFRM_TIMER_CFG_7, 0x00f52c),
+ REG(QSYS_TFRM_TIMER_CFG_8, 0x00f530),
+ REG(QSYS_RED_PROFILE, 0x00f534),
+ REG(QSYS_RES_QOS_MODE, 0x00f574),
+ REG(QSYS_RES_CFG, 0x00c000),
+ REG(QSYS_RES_STAT, 0x00c004),
+ REG(QSYS_EGR_DROP_MODE, 0x00f578),
+ REG(QSYS_EQ_CTRL, 0x00f57c),
+ REG_RESERVED(QSYS_EVENTS_CORE),
+ REG(QSYS_QMAXSDU_CFG_0, 0x00f584),
+ REG(QSYS_QMAXSDU_CFG_1, 0x00f5a0),
+ REG(QSYS_QMAXSDU_CFG_2, 0x00f5bc),
+ REG(QSYS_QMAXSDU_CFG_3, 0x00f5d8),
+ REG(QSYS_QMAXSDU_CFG_4, 0x00f5f4),
+ REG(QSYS_QMAXSDU_CFG_5, 0x00f610),
+ REG(QSYS_QMAXSDU_CFG_6, 0x00f62c),
+ REG(QSYS_QMAXSDU_CFG_7, 0x00f648),
+ REG(QSYS_PREEMPTION_CFG, 0x00f664),
+ REG(QSYS_CIR_CFG, 0x000000),
+ REG(QSYS_EIR_CFG, 0x000004),
+ REG(QSYS_SE_CFG, 0x000008),
+ REG(QSYS_SE_DWRR_CFG, 0x00000c),
+ REG_RESERVED(QSYS_SE_CONNECT),
+ REG(QSYS_SE_DLB_SENSE, 0x000040),
+ REG(QSYS_CIR_STATE, 0x000044),
+ REG(QSYS_EIR_STATE, 0x000048),
+ REG_RESERVED(QSYS_SE_STATE),
+ REG(QSYS_HSCH_MISC_CFG, 0x00f67c),
+ REG(QSYS_TAG_CONFIG, 0x00f680),
+ REG(QSYS_TAS_PARAM_CFG_CTRL, 0x00f698),
+ REG(QSYS_PORT_MAX_SDU, 0x00f69c),
+ REG(QSYS_PARAM_CFG_REG_1, 0x00f440),
+ REG(QSYS_PARAM_CFG_REG_2, 0x00f444),
+ REG(QSYS_PARAM_CFG_REG_3, 0x00f448),
+ REG(QSYS_PARAM_CFG_REG_4, 0x00f44c),
+ REG(QSYS_PARAM_CFG_REG_5, 0x00f450),
+ REG(QSYS_GCL_CFG_REG_1, 0x00f454),
+ REG(QSYS_GCL_CFG_REG_2, 0x00f458),
+ REG(QSYS_PARAM_STATUS_REG_1, 0x00f400),
+ REG(QSYS_PARAM_STATUS_REG_2, 0x00f404),
+ REG(QSYS_PARAM_STATUS_REG_3, 0x00f408),
+ REG(QSYS_PARAM_STATUS_REG_4, 0x00f40c),
+ REG(QSYS_PARAM_STATUS_REG_5, 0x00f410),
+ REG(QSYS_PARAM_STATUS_REG_6, 0x00f414),
+ REG(QSYS_PARAM_STATUS_REG_7, 0x00f418),
+ REG(QSYS_PARAM_STATUS_REG_8, 0x00f41c),
+ REG(QSYS_PARAM_STATUS_REG_9, 0x00f420),
+ REG(QSYS_GCL_STATUS_REG_1, 0x00f424),
+ REG(QSYS_GCL_STATUS_REG_2, 0x00f428),
+};
+
+static const u32 vsc9959_rew_regmap[] = {
+ REG(REW_PORT_VLAN_CFG, 0x000000),
+ REG(REW_TAG_CFG, 0x000004),
+ REG(REW_PORT_CFG, 0x000008),
+ REG(REW_DSCP_CFG, 0x00000c),
+ REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
+ REG(REW_PTP_CFG, 0x000050),
+ REG(REW_PTP_DLY1_CFG, 0x000054),
+ REG(REW_RED_TAG_CFG, 0x000058),
+ REG(REW_DSCP_REMAP_DP1_CFG, 0x000410),
+ REG(REW_DSCP_REMAP_CFG, 0x000510),
+ REG_RESERVED(REW_STAT_CFG),
+ REG_RESERVED(REW_REW_STICKY),
+ REG_RESERVED(REW_PPT),
+};
+
+static const u32 vsc9959_sys_regmap[] = {
+ REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
+ REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
+ REG(SYS_COUNT_RX_SHORTS, 0x000010),
+ REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
+ REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
+ REG(SYS_COUNT_RX_64, 0x000024),
+ REG(SYS_COUNT_RX_65_127, 0x000028),
+ REG(SYS_COUNT_RX_128_255, 0x00002c),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
+ REG(SYS_COUNT_TX_OCTETS, 0x000200),
+ REG(SYS_COUNT_TX_UNICAST, 0x000204),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000208),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00020c),
+ REG(SYS_COUNT_TX_COLLISION, 0x000210),
+ REG(SYS_COUNT_TX_DROPS, 0x000214),
+ REG(SYS_COUNT_TX_PAUSE, 0x000218),
+ REG(SYS_COUNT_TX_64, 0x00021c),
+ REG(SYS_COUNT_TX_65_127, 0x000220),
+ REG(SYS_COUNT_TX_128_255, 0x000224),
+ REG(SYS_COUNT_TX_256_511, 0x000228),
+ REG(SYS_COUNT_TX_512_1023, 0x00022c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000230),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000234),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000238),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00023c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000240),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000244),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000248),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00024c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000250),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000254),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000258),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00025c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000260),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000264),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000268),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
+ REG(SYS_COUNT_TX_AGED, 0x000278),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000400),
+ REG(SYS_COUNT_DROP_TAIL, 0x000404),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00040c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000410),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000414),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000418),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00041c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000420),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000424),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000428),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00042c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000430),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000434),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000438),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444),
+ REG(SYS_COUNT_SF_MATCHING_FRAMES, 0x000800),
+ REG(SYS_COUNT_SF_NOT_PASSING_FRAMES, 0x000804),
+ REG(SYS_COUNT_SF_NOT_PASSING_SDU, 0x000808),
+ REG(SYS_COUNT_SF_RED_FRAMES, 0x00080c),
+ REG(SYS_RESET_CFG, 0x000e00),
+ REG(SYS_SR_ETYPE_CFG, 0x000e04),
+ REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
+ REG(SYS_PORT_MODE, 0x000e0c),
+ REG(SYS_FRONT_PORT_MODE, 0x000e2c),
+ REG(SYS_FRM_AGING, 0x000e44),
+ REG(SYS_STAT_CFG, 0x000e48),
+ REG(SYS_SW_STATUS, 0x000e4c),
+ REG_RESERVED(SYS_MISC_CFG),
+ REG(SYS_REW_MAC_HIGH_CFG, 0x000e6c),
+ REG(SYS_REW_MAC_LOW_CFG, 0x000e84),
+ REG(SYS_TIMESTAMP_OFFSET, 0x000e9c),
+ REG(SYS_PAUSE_CFG, 0x000ea0),
+ REG(SYS_PAUSE_TOT_CFG, 0x000ebc),
+ REG(SYS_ATOP, 0x000ec0),
+ REG(SYS_ATOP_TOT_CFG, 0x000edc),
+ REG(SYS_MAC_FC_CFG, 0x000ee0),
+ REG(SYS_MMGT, 0x000ef8),
+ REG_RESERVED(SYS_MMGT_FAST),
+ REG_RESERVED(SYS_EVENTS_DIF),
+ REG_RESERVED(SYS_EVENTS_CORE),
+ REG(SYS_PTP_STATUS, 0x000f14),
+ REG(SYS_PTP_TXSTAMP, 0x000f18),
+ REG(SYS_PTP_NXT, 0x000f1c),
+ REG(SYS_PTP_CFG, 0x000f20),
+ REG(SYS_RAM_INIT, 0x000f24),
+ REG_RESERVED(SYS_CM_ADDR),
+ REG_RESERVED(SYS_CM_DATA_WR),
+ REG_RESERVED(SYS_CM_DATA_RD),
+ REG_RESERVED(SYS_CM_OP),
+ REG_RESERVED(SYS_CM_DATA),
+};
+
+static const u32 vsc9959_ptp_regmap[] = {
+ REG(PTP_PIN_CFG, 0x000000),
+ REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
+ REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
+ REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
+ REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
+ REG(PTP_CFG_MISC, 0x0000a0),
+ REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
+ REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
+};
+
+static const u32 vsc9959_gcb_regmap[] = {
+ REG(GCB_SOFT_RST, 0x000004),
+};
+
+static const u32 vsc9959_dev_gmii_regmap[] = {
+ REG(DEV_CLOCK_CFG, 0x0),
+ REG(DEV_PORT_MISC, 0x4),
+ REG(DEV_EVENTS, 0x8),
+ REG(DEV_EEE_CFG, 0xc),
+ REG(DEV_RX_PATH_DELAY, 0x10),
+ REG(DEV_TX_PATH_DELAY, 0x14),
+ REG(DEV_PTP_PREDICT_CFG, 0x18),
+ REG(DEV_MAC_ENA_CFG, 0x1c),
+ REG(DEV_MAC_MODE_CFG, 0x20),
+ REG(DEV_MAC_MAXLEN_CFG, 0x24),
+ REG(DEV_MAC_TAGS_CFG, 0x28),
+ REG(DEV_MAC_ADV_CHK_CFG, 0x2c),
+ REG(DEV_MAC_IFG_CFG, 0x30),
+ REG(DEV_MAC_HDX_CFG, 0x34),
+ REG(DEV_MAC_DBG_CFG, 0x38),
+ REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
+ REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
+ REG(DEV_MAC_STICKY, 0x44),
+ REG_RESERVED(PCS1G_CFG),
+ REG_RESERVED(PCS1G_MODE_CFG),
+ REG_RESERVED(PCS1G_SD_CFG),
+ REG_RESERVED(PCS1G_ANEG_CFG),
+ REG_RESERVED(PCS1G_ANEG_NP_CFG),
+ REG_RESERVED(PCS1G_LB_CFG),
+ REG_RESERVED(PCS1G_DBG_CFG),
+ REG_RESERVED(PCS1G_CDET_CFG),
+ REG_RESERVED(PCS1G_ANEG_STATUS),
+ REG_RESERVED(PCS1G_ANEG_NP_STATUS),
+ REG_RESERVED(PCS1G_LINK_STATUS),
+ REG_RESERVED(PCS1G_LINK_DOWN_CNT),
+ REG_RESERVED(PCS1G_STICKY),
+ REG_RESERVED(PCS1G_DEBUG_STATUS),
+ REG_RESERVED(PCS1G_LPI_CFG),
+ REG_RESERVED(PCS1G_LPI_WAKE_ERROR_CNT),
+ REG_RESERVED(PCS1G_LPI_STATUS),
+ REG_RESERVED(PCS1G_TSTPAT_MODE_CFG),
+ REG_RESERVED(PCS1G_TSTPAT_STATUS),
+ REG_RESERVED(DEV_PCS_FX100_CFG),
+ REG_RESERVED(DEV_PCS_FX100_STATUS),
+};
+
+static const u32 *vsc9959_regmap[TARGET_MAX] = {
+ [ANA] = vsc9959_ana_regmap,
+ [QS] = vsc9959_qs_regmap,
+ [QSYS] = vsc9959_qsys_regmap,
+ [REW] = vsc9959_rew_regmap,
+ [SYS] = vsc9959_sys_regmap,
+ [S0] = vsc9959_vcap_regmap,
+ [S1] = vsc9959_vcap_regmap,
+ [S2] = vsc9959_vcap_regmap,
+ [PTP] = vsc9959_ptp_regmap,
+ [GCB] = vsc9959_gcb_regmap,
+ [DEV_GMII] = vsc9959_dev_gmii_regmap,
+};
+
+/* Addresses are relative to the PCI device's base address */
+static const struct resource vsc9959_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
+};
+
+static const char * const vsc9959_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
+};
+
+/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
+ * SGMII/QSGMII MAC PCS can be found.
+ */
+static const struct resource vsc9959_imdio_res =
+ DEFINE_RES_MEM_NAMED(0x8030, 0x10, "imdio");
+
+static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
+ [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
+ [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 5),
+ [ANA_ANEVENTS_FLOOD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 30, 30),
+ [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 26, 26),
+ [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 24, 24),
+ [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 23, 23),
+ [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 22, 22),
+ [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 21, 21),
+ [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 20, 20),
+ [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 19, 19),
+ [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
+ [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 17, 17),
+ [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 15, 15),
+ [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 14, 14),
+ [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 13, 13),
+ [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 12, 12),
+ [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
+ [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
+ [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 9, 9),
+ [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 8, 8),
+ [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 7, 7),
+ [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
+ [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
+ [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 4, 4),
+ [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 3, 3),
+ [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 2, 2),
+ [ANA_ANEVENTS_SEQ_GEN_ERR_0] = REG_FIELD(ANA_ANEVENTS, 1, 1),
+ [ANA_ANEVENTS_SEQ_GEN_ERR_1] = REG_FIELD(ANA_ANEVENTS, 0, 0),
+ [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 16, 16),
+ [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 11, 12),
+ [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10),
+ [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 0, 0),
+ [GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0),
+ /* Replicated per number of ports (7), register size 4 per port */
+ [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 7, 4),
+ [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 7, 4),
+ [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 7, 4),
+ [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 7, 4),
+ [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 7, 4),
+ [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 7, 4),
+ [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 7, 4),
+ [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 7, 4),
+ [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 7, 4),
+ [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 7, 4),
+ [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 7, 4),
+ [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 7, 4),
+ [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
+};
+
+static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = {
+ OCELOT_COMMON_STATS,
+};
+
+static const struct vcap_field vsc9959_vcap_es0_keys[] = {
+ [VCAP_ES0_EGR_PORT] = { 0, 3},
+ [VCAP_ES0_IGR_PORT] = { 3, 3},
+ [VCAP_ES0_RSV] = { 6, 2},
+ [VCAP_ES0_L2_MC] = { 8, 1},
+ [VCAP_ES0_L2_BC] = { 9, 1},
+ [VCAP_ES0_VID] = { 10, 12},
+ [VCAP_ES0_DP] = { 22, 1},
+ [VCAP_ES0_PCP] = { 23, 3},
+};
+
+static const struct vcap_field vsc9959_vcap_es0_actions[] = {
+ [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
+ [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
+ [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
+ [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
+ [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
+ [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
+ [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
+ [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
+ [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
+ [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
+ [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
+ [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
+ [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
+ [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
+ [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
+ [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
+ [VCAP_ES0_ACT_RSV] = { 49, 23},
+ [VCAP_ES0_ACT_HIT_STICKY] = { 72, 1},
+};
+
+static const struct vcap_field vsc9959_vcap_is1_keys[] = {
+ [VCAP_IS1_HK_TYPE] = { 0, 1},
+ [VCAP_IS1_HK_LOOKUP] = { 1, 2},
+ [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 7},
+ [VCAP_IS1_HK_RSV] = { 10, 9},
+ [VCAP_IS1_HK_OAM_Y1731] = { 19, 1},
+ [VCAP_IS1_HK_L2_MC] = { 20, 1},
+ [VCAP_IS1_HK_L2_BC] = { 21, 1},
+ [VCAP_IS1_HK_IP_MC] = { 22, 1},
+ [VCAP_IS1_HK_VLAN_TAGGED] = { 23, 1},
+ [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 24, 1},
+ [VCAP_IS1_HK_TPID] = { 25, 1},
+ [VCAP_IS1_HK_VID] = { 26, 12},
+ [VCAP_IS1_HK_DEI] = { 38, 1},
+ [VCAP_IS1_HK_PCP] = { 39, 3},
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ [VCAP_IS1_HK_L2_SMAC] = { 42, 48},
+ [VCAP_IS1_HK_ETYPE_LEN] = { 90, 1},
+ [VCAP_IS1_HK_ETYPE] = { 91, 16},
+ [VCAP_IS1_HK_IP_SNAP] = {107, 1},
+ [VCAP_IS1_HK_IP4] = {108, 1},
+ /* Layer-3 Information */
+ [VCAP_IS1_HK_L3_FRAGMENT] = {109, 1},
+ [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {110, 1},
+ [VCAP_IS1_HK_L3_OPTIONS] = {111, 1},
+ [VCAP_IS1_HK_L3_DSCP] = {112, 6},
+ [VCAP_IS1_HK_L3_IP4_SIP] = {118, 32},
+ /* Layer-4 Information */
+ [VCAP_IS1_HK_TCP_UDP] = {150, 1},
+ [VCAP_IS1_HK_TCP] = {151, 1},
+ [VCAP_IS1_HK_L4_SPORT] = {152, 16},
+ [VCAP_IS1_HK_L4_RNG] = {168, 8},
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ [VCAP_IS1_HK_IP4_INNER_TPID] = { 42, 1},
+ [VCAP_IS1_HK_IP4_INNER_VID] = { 43, 12},
+ [VCAP_IS1_HK_IP4_INNER_DEI] = { 55, 1},
+ [VCAP_IS1_HK_IP4_INNER_PCP] = { 56, 3},
+ [VCAP_IS1_HK_IP4_IP4] = { 59, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 60, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 61, 1},
+ [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 62, 1},
+ [VCAP_IS1_HK_IP4_L3_DSCP] = { 63, 6},
+ [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 69, 32},
+ [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {101, 32},
+ [VCAP_IS1_HK_IP4_L3_PROTO] = {133, 8},
+ [VCAP_IS1_HK_IP4_TCP_UDP] = {141, 1},
+ [VCAP_IS1_HK_IP4_TCP] = {142, 1},
+ [VCAP_IS1_HK_IP4_L4_RNG] = {143, 8},
+ [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {151, 32},
+};
+
+static const struct vcap_field vsc9959_vcap_is1_actions[] = {
+ [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
+ [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
+ [VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
+ [VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
+ [VCAP_IS1_ACT_DP_ENA] = { 11, 1},
+ [VCAP_IS1_ACT_DP_VAL] = { 12, 1},
+ [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
+ [VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
+ [VCAP_IS1_ACT_RSV] = { 29, 9},
+ /* The fields below are incorrectly shifted by 2 in the manual */
+ [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1},
+ [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12},
+ [VCAP_IS1_ACT_FID_SEL] = { 51, 2},
+ [VCAP_IS1_ACT_FID_VAL] = { 53, 13},
+ [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1},
+ [VCAP_IS1_ACT_PCP_VAL] = { 67, 3},
+ [VCAP_IS1_ACT_DEI_VAL] = { 70, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2},
+ [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4},
+ [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1},
+};
+
+static struct vcap_field vsc9959_vcap_is2_keys[] = {
+ /* Common: 41 bits */
+ [VCAP_IS2_TYPE] = { 0, 4},
+ [VCAP_IS2_HK_FIRST] = { 4, 1},
+ [VCAP_IS2_HK_PAG] = { 5, 8},
+ [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 7},
+ [VCAP_IS2_HK_RSV2] = { 20, 1},
+ [VCAP_IS2_HK_HOST_MATCH] = { 21, 1},
+ [VCAP_IS2_HK_L2_MC] = { 22, 1},
+ [VCAP_IS2_HK_L2_BC] = { 23, 1},
+ [VCAP_IS2_HK_VLAN_TAGGED] = { 24, 1},
+ [VCAP_IS2_HK_VID] = { 25, 12},
+ [VCAP_IS2_HK_DEI] = { 37, 1},
+ [VCAP_IS2_HK_PCP] = { 38, 3},
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ [VCAP_IS2_HK_L2_DMAC] = { 41, 48},
+ [VCAP_IS2_HK_L2_SMAC] = { 89, 48},
+ /* MAC_ETYPE (TYPE=000) */
+ [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {137, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {153, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {169, 8},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {177, 3},
+ /* MAC_LLC (TYPE=001) */
+ [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {137, 40},
+ /* MAC_SNAP (TYPE=010) */
+ [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {137, 40},
+ /* MAC_ARP (TYPE=011) */
+ [VCAP_IS2_HK_MAC_ARP_SMAC] = { 41, 48},
+ [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 89, 1},
+ [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 90, 1},
+ [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 91, 1},
+ [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 92, 1},
+ [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 93, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 94, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 95, 2},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = { 97, 32},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {129, 32},
+ [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {161, 1},
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ [VCAP_IS2_HK_IP4] = { 41, 1},
+ [VCAP_IS2_HK_L3_FRAGMENT] = { 42, 1},
+ [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 43, 1},
+ [VCAP_IS2_HK_L3_OPTIONS] = { 44, 1},
+ [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 45, 1},
+ [VCAP_IS2_HK_L3_TOS] = { 46, 8},
+ [VCAP_IS2_HK_L3_IP4_DIP] = { 54, 32},
+ [VCAP_IS2_HK_L3_IP4_SIP] = { 86, 32},
+ [VCAP_IS2_HK_DIP_EQ_SIP] = {118, 1},
+ /* IP4_TCP_UDP (TYPE=100) */
+ [VCAP_IS2_HK_TCP] = {119, 1},
+ [VCAP_IS2_HK_L4_DPORT] = {120, 16},
+ [VCAP_IS2_HK_L4_SPORT] = {136, 16},
+ [VCAP_IS2_HK_L4_RNG] = {152, 8},
+ [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {160, 1},
+ [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {161, 1},
+ [VCAP_IS2_HK_L4_FIN] = {162, 1},
+ [VCAP_IS2_HK_L4_SYN] = {163, 1},
+ [VCAP_IS2_HK_L4_RST] = {164, 1},
+ [VCAP_IS2_HK_L4_PSH] = {165, 1},
+ [VCAP_IS2_HK_L4_ACK] = {166, 1},
+ [VCAP_IS2_HK_L4_URG] = {167, 1},
+ [VCAP_IS2_HK_L4_1588_DOM] = {168, 8},
+ [VCAP_IS2_HK_L4_1588_VER] = {176, 4},
+ /* IP4_OTHER (TYPE=101) */
+ [VCAP_IS2_HK_IP4_L3_PROTO] = {119, 8},
+ [VCAP_IS2_HK_L3_PAYLOAD] = {127, 56},
+ /* IP6_STD (TYPE=110) */
+ [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 41, 1},
+ [VCAP_IS2_HK_L3_IP6_SIP] = { 42, 128},
+ [VCAP_IS2_HK_IP6_L3_PROTO] = {170, 8},
+ /* OAM (TYPE=111) */
+ [VCAP_IS2_HK_OAM_MEL_FLAGS] = {137, 7},
+ [VCAP_IS2_HK_OAM_VER] = {144, 5},
+ [VCAP_IS2_HK_OAM_OPCODE] = {149, 8},
+ [VCAP_IS2_HK_OAM_FLAGS] = {157, 8},
+ [VCAP_IS2_HK_OAM_MEPID] = {165, 16},
+ [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {181, 1},
+ [VCAP_IS2_HK_OAM_IS_Y1731] = {182, 1},
+};
+
+static struct vcap_field vsc9959_vcap_is2_actions[] = {
+ [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
+ [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
+ [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
+ [VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
+ [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
+ [VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
+ [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
+ [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9},
+ [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1},
+ [VCAP_IS2_ACT_PORT_MASK] = { 20, 6},
+ [VCAP_IS2_ACT_REW_OP] = { 26, 9},
+ [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 35, 1},
+ [VCAP_IS2_ACT_RSV] = { 36, 2},
+ [VCAP_IS2_ACT_ACL_ID] = { 38, 6},
+ [VCAP_IS2_ACT_HIT_CNT] = { 44, 32},
+};
+
+static struct vcap_props vsc9959_vcap_props[] = {
+ [VCAP_ES0] = {
+ .action_type_width = 0,
+ .action_table = {
+ [ES0_ACTION_TYPE_NORMAL] = {
+ .width = 72, /* HIT_STICKY not included */
+ .count = 1,
+ },
+ },
+ .target = S0,
+ .keys = vsc9959_vcap_es0_keys,
+ .actions = vsc9959_vcap_es0_actions,
+ },
+ [VCAP_IS1] = {
+ .action_type_width = 0,
+ .action_table = {
+ [IS1_ACTION_TYPE_NORMAL] = {
+ .width = 78, /* HIT_STICKY not included */
+ .count = 4,
+ },
+ },
+ .target = S1,
+ .keys = vsc9959_vcap_is1_keys,
+ .actions = vsc9959_vcap_is1_actions,
+ },
+ [VCAP_IS2] = {
+ .action_type_width = 1,
+ .action_table = {
+ [IS2_ACTION_TYPE_NORMAL] = {
+ .width = 44,
+ .count = 2
+ },
+ [IS2_ACTION_TYPE_SMAC_SIP] = {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .target = S2,
+ .keys = vsc9959_vcap_is2_keys,
+ .actions = vsc9959_vcap_is2_actions,
+ },
+};
+
+static const struct ptp_clock_info vsc9959_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "felix ptp",
+ .max_adj = 0x7fffffff,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = OCELOT_PTP_PINS_NUM,
+ .n_pins = OCELOT_PTP_PINS_NUM,
+ .pps = 0,
+ .gettime64 = ocelot_ptp_gettime64,
+ .settime64 = ocelot_ptp_settime64,
+ .adjtime = ocelot_ptp_adjtime,
+ .adjfine = ocelot_ptp_adjfine,
+ .verify = ocelot_ptp_verify,
+ .enable = ocelot_ptp_enable,
+};
+
+#define VSC9959_INIT_TIMEOUT 50000
+#define VSC9959_GCB_RST_SLEEP 100
+#define VSC9959_SYS_RAMINIT_SLEEP 80
+
+static int vsc9959_gcb_soft_rst_status(struct ocelot *ocelot)
+{
+ int val;
+
+ ocelot_field_read(ocelot, GCB_SOFT_RST_SWC_RST, &val);
+
+ return val;
+}
+
+static int vsc9959_sys_ram_init_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, SYS_RAM_INIT);
+}
+
+/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
+ * RAM_INIT is in SYS:RAM_CTRL:RAM_INIT
+ */
+static int vsc9959_reset(struct ocelot *ocelot)
+{
+ int val, err;
+
+ /* soft-reset the switch core */
+ ocelot_field_write(ocelot, GCB_SOFT_RST_SWC_RST, 1);
+
+ err = readx_poll_timeout(vsc9959_gcb_soft_rst_status, ocelot, val, !val,
+ VSC9959_GCB_RST_SLEEP, VSC9959_INIT_TIMEOUT);
+ if (err) {
+ dev_err(ocelot->dev, "timeout: switch core reset\n");
+ return err;
+ }
+
+ /* initialize switch mem ~40us */
+ ocelot_write(ocelot, SYS_RAM_INIT_RAM_INIT, SYS_RAM_INIT);
+ err = readx_poll_timeout(vsc9959_sys_ram_init_status, ocelot, val, !val,
+ VSC9959_SYS_RAMINIT_SLEEP,
+ VSC9959_INIT_TIMEOUT);
+ if (err) {
+ dev_err(ocelot->dev, "timeout: switch sram init\n");
+ return err;
+ }
+
+ /* enable switch core */
+ ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1);
+
+ return 0;
+}
+
+static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX ||
+ state->interface == PHY_INTERFACE_MODE_USXGMII) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+}
+
+/* Watermark encode
+ * Bit 8: Unit; 0:1, 1:16
+ * Bit 7-0: Value to be multiplied with unit
+ */
+static u16 vsc9959_wm_enc(u16 value)
+{
+ WARN_ON(value >= 16 * BIT(8));
+
+ if (value >= BIT(8))
+ return BIT(8) | (value / 16);
+
+ return value;
+}
+
+static u16 vsc9959_wm_dec(u16 wm)
+{
+ WARN_ON(wm & ~GENMASK(8, 0));
+
+ if (wm & BIT(8))
+ return (wm & GENMASK(7, 0)) * 16;
+
+ return wm;
+}
+
+static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(23, 12)) >> 12;
+ *maxuse = val & GENMASK(11, 0);
+}
+
+static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
+{
+ struct pci_dev *pdev = to_pci_dev(ocelot->dev);
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct enetc_mdio_priv *mdio_priv;
+ struct device *dev = ocelot->dev;
+ resource_size_t imdio_base;
+ void __iomem *imdio_regs;
+ struct resource res;
+ struct enetc_hw *hw;
+ struct mii_bus *bus;
+ int port;
+ int rc;
+
+ felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
+ sizeof(struct phylink_pcs *),
+ GFP_KERNEL);
+ if (!felix->pcs) {
+ dev_err(dev, "failed to allocate array for PCS PHYs\n");
+ return -ENOMEM;
+ }
+
+ imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
+
+ memcpy(&res, &vsc9959_imdio_res, sizeof(res));
+ res.start += imdio_base;
+ res.end += imdio_base;
+
+ imdio_regs = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(imdio_regs))
+ return PTR_ERR(imdio_regs);
+
+ hw = enetc_hw_alloc(dev, imdio_regs);
+ if (IS_ERR(hw)) {
+ dev_err(dev, "failed to allocate ENETC HW structure\n");
+ return PTR_ERR(hw);
+ }
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "VSC9959 internal MDIO bus";
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = hw;
+ /* This gets added to imdio_regs, which already maps addresses
+ * starting with the proper offset.
+ */
+ mdio_priv->mdio_base = 0;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+
+ /* Needed in order to initialize the bus mutex lock */
+ rc = mdiobus_register(bus);
+ if (rc < 0) {
+ dev_err(dev, "failed to register MDIO bus\n");
+ mdiobus_free(bus);
+ return rc;
+ }
+
+ felix->imdio = bus;
+
+ for (port = 0; port < felix->info->num_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct phylink_pcs *phylink_pcs;
+ struct mdio_device *mdio_device;
+
+ if (dsa_is_unused_port(felix->ds, port))
+ continue;
+
+ if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
+ continue;
+
+ mdio_device = mdio_device_create(felix->imdio, port);
+ if (IS_ERR(mdio_device))
+ continue;
+
+ phylink_pcs = lynx_pcs_create(mdio_device);
+ if (!phylink_pcs) {
+ mdio_device_free(mdio_device);
+ continue;
+ }
+
+ felix->pcs[port] = phylink_pcs;
+
+ dev_info(dev, "Found PCS at internal MDIO address %d\n", port);
+ }
+
+ return 0;
+}
+
+static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct phylink_pcs *phylink_pcs = felix->pcs[port];
+ struct mdio_device *mdio_device;
+
+ if (!phylink_pcs)
+ continue;
+
+ mdio_device = lynx_get_mdio_device(phylink_pcs);
+ mdio_device_free(mdio_device);
+ lynx_pcs_destroy(phylink_pcs);
+ }
+ mdiobus_unregister(felix->imdio);
+ mdiobus_free(felix->imdio);
+}
+
+/* The switch considers any frame (regardless of size) as eligible for
+ * transmission if the traffic class gate is open for at least 33 ns.
+ * Overruns are prevented by cropping an interval at the end of the gate time
+ * slot for which egress scheduling is blocked, but we need to still keep 33 ns
+ * available for one packet to be transmitted, otherwise the port tc will hang.
+ * This function returns the size of a gate interval that remains available for
+ * setting the guard band, after reserving the space for one egress frame.
+ */
+static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
+{
+ /* Gate always open */
+ if (gate_len_ns == U64_MAX)
+ return U64_MAX;
+
+ if (gate_len_ns < VSC9959_TAS_MIN_GATE_LEN_NS)
+ return 0;
+
+ return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
+}
+
+/* Extract shortest continuous gate open intervals in ns for each traffic class
+ * of a cyclic tc-taprio schedule. If a gate is always open, the duration is
+ * considered U64_MAX. If the gate is always closed, it is considered 0.
+ */
+static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
+ u64 min_gate_len[OCELOT_NUM_TC])
+{
+ struct tc_taprio_sched_entry *entry;
+ u64 gate_len[OCELOT_NUM_TC];
+ u8 gates_ever_opened = 0;
+ int tc, i, n;
+
+ /* Initialize arrays */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ min_gate_len[tc] = U64_MAX;
+ gate_len[tc] = 0;
+ }
+
+ /* If we don't have taprio, consider all gates as permanently open */
+ if (!taprio)
+ return;
+
+ n = taprio->num_entries;
+
+ /* Walk through the gate list twice to determine the length
+ * of consecutively open gates for a traffic class, including
+ * open gates that wrap around. We are just interested in the
+ * minimum window size, and this doesn't change what the
+ * minimum is (if the gate never closes, min_gate_len will
+ * remain U64_MAX).
+ */
+ for (i = 0; i < 2 * n; i++) {
+ entry = &taprio->entries[i % n];
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ if (entry->gate_mask & BIT(tc)) {
+ gate_len[tc] += entry->interval;
+ gates_ever_opened |= BIT(tc);
+ } else {
+ /* Gate closes now, record a potential new
+ * minimum and reinitialize length
+ */
+ if (min_gate_len[tc] > gate_len[tc] &&
+ gate_len[tc])
+ min_gate_len[tc] = gate_len[tc];
+ gate_len[tc] = 0;
+ }
+ }
+ }
+
+ /* min_gate_len[tc] actually tracks minimum *open* gate time, so for
+ * permanently closed gates, min_gate_len[tc] will still be U64_MAX.
+ * Therefore they are currently indistinguishable from permanently
+ * open gates. Overwrite the gate len with 0 when we know they're
+ * actually permanently closed, i.e. after the loop above.
+ */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (!(gates_ever_opened & BIT(tc)))
+ min_gate_len[tc] = 0;
+}
+
+/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ,
+ * so we need to spell out the register access to each traffic class in helper
+ * functions, to simplify callers
+ */
+static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc,
+ u32 max_sdu)
+{
+ switch (tc) {
+ case 0:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
+ port);
+ break;
+ case 1:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
+ port);
+ break;
+ case 2:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
+ port);
+ break;
+ case 3:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
+ port);
+ break;
+ case 4:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
+ port);
+ break;
+ case 5:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
+ port);
+ break;
+ case 6:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
+ port);
+ break;
+ case 7:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
+ port);
+ break;
+ }
+}
+
+static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc)
+{
+ switch (tc) {
+ case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port);
+ case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port);
+ case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port);
+ case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port);
+ case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port);
+ case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port);
+ case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port);
+ case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port);
+ default:
+ return 0;
+ }
+}
+
+static u32 vsc9959_tas_tc_max_sdu(struct tc_taprio_qopt_offload *taprio, int tc)
+{
+ if (!taprio || !taprio->max_sdu[tc])
+ return 0;
+
+ return taprio->max_sdu[tc] + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
+}
+
+/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
+ * switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU
+ * values (the default value is 1518). Also, for traffic class windows smaller
+ * than one MTU sized frame, update QSYS_QMAXSDU_CFG to enable oversized frame
+ * dropping, such that these won't hang the port, as they will never be sent.
+ */
+static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct tc_taprio_qopt_offload *taprio;
+ u64 min_gate_len[OCELOT_NUM_TC];
+ int speed, picos_per_byte;
+ u64 needed_bit_time_ps;
+ u32 val, maxlen;
+ u8 tas_speed;
+ int tc;
+
+ lockdep_assert_held(&ocelot->tas_lock);
+
+ taprio = ocelot_port->taprio;
+
+ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
+ tas_speed = QSYS_TAG_CONFIG_LINK_SPEED_X(val);
+
+ switch (tas_speed) {
+ case OCELOT_SPEED_10:
+ speed = SPEED_10;
+ break;
+ case OCELOT_SPEED_100:
+ speed = SPEED_100;
+ break;
+ case OCELOT_SPEED_1000:
+ speed = SPEED_1000;
+ break;
+ case OCELOT_SPEED_2500:
+ speed = SPEED_2500;
+ break;
+ default:
+ return;
+ }
+
+ picos_per_byte = (USEC_PER_SEC * 8) / speed;
+
+ val = ocelot_port_readl(ocelot_port, DEV_MAC_MAXLEN_CFG);
+ /* MAXLEN_CFG accounts automatically for VLAN. We need to include it
+ * manually in the bit time calculation, plus the preamble and SFD.
+ */
+ maxlen = val + 2 * VLAN_HLEN;
+ /* Consider the standard Ethernet overhead of 8 octets preamble+SFD,
+ * 4 octets FCS, 12 octets IFG.
+ */
+ needed_bit_time_ps = (u64)(maxlen + 24) * picos_per_byte;
+
+ dev_dbg(ocelot->dev,
+ "port %d: max frame size %d needs %llu ps at speed %d\n",
+ port, maxlen, needed_bit_time_ps, speed);
+
+ vsc9959_tas_min_gate_lengths(taprio, min_gate_len);
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ u32 requested_max_sdu = vsc9959_tas_tc_max_sdu(taprio, tc);
+ u64 remaining_gate_len_ps;
+ u32 max_sdu;
+
+ remaining_gate_len_ps =
+ vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]);
+
+ if (remaining_gate_len_ps > needed_bit_time_ps) {
+ /* Setting QMAXSDU_CFG to 0 disables oversized frame
+ * dropping.
+ */
+ max_sdu = requested_max_sdu;
+ dev_dbg(ocelot->dev,
+ "port %d tc %d min gate len %llu"
+ ", sending all frames\n",
+ port, tc, min_gate_len[tc]);
+ } else {
+ /* If traffic class doesn't support a full MTU sized
+ * frame, make sure to enable oversize frame dropping
+ * for frames larger than the smallest that would fit.
+ *
+ * However, the exact same register, QSYS_QMAXSDU_CFG_*,
+ * controls not only oversized frame dropping, but also
+ * per-tc static guard band lengths, so it reduces the
+ * useful gate interval length. Therefore, be careful
+ * to calculate a guard band (and therefore max_sdu)
+ * that still leaves 33 ns available in the time slot.
+ */
+ max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
+ /* A TC gate may be completely closed, which is a
+ * special case where all packets are oversized.
+ * Any limit smaller than 64 octets accomplishes this
+ */
+ if (!max_sdu)
+ max_sdu = 1;
+ /* Take L1 overhead into account, but just don't allow
+ * max_sdu to go negative or to 0. Here we use 20
+ * because QSYS_MAXSDU_CFG_* already counts the 4 FCS
+ * octets as part of packet size.
+ */
+ if (max_sdu > 20)
+ max_sdu -= 20;
+
+ if (requested_max_sdu && requested_max_sdu < max_sdu)
+ max_sdu = requested_max_sdu;
+
+ dev_info(ocelot->dev,
+ "port %d tc %d min gate length %llu"
+ " ns not enough for max frame size %d at %d"
+ " Mbps, dropping frames over %d"
+ " octets including FCS\n",
+ port, tc, min_gate_len[tc], maxlen, speed,
+ max_sdu);
+ }
+
+ vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu);
+ }
+
+ ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
+
+ ocelot->ops->cut_through_fwd(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
+}
+
+static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
+ u32 speed)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u8 tas_speed;
+
+ switch (speed) {
+ case SPEED_10:
+ tas_speed = OCELOT_SPEED_10;
+ break;
+ case SPEED_100:
+ tas_speed = OCELOT_SPEED_100;
+ break;
+ case SPEED_1000:
+ tas_speed = OCELOT_SPEED_1000;
+ break;
+ case SPEED_2500:
+ tas_speed = OCELOT_SPEED_2500;
+ break;
+ default:
+ tas_speed = OCELOT_SPEED_1000;
+ break;
+ }
+
+ mutex_lock(&ocelot->tas_lock);
+
+ ocelot_rmw_rix(ocelot,
+ QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
+ QSYS_TAG_CONFIG_LINK_SPEED_M,
+ QSYS_TAG_CONFIG, port);
+
+ if (ocelot_port->taprio)
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
+}
+
+static void vsc9959_new_base_time(struct ocelot *ocelot, ktime_t base_time,
+ u64 cycle_time,
+ struct timespec64 *new_base_ts)
+{
+ struct timespec64 ts;
+ ktime_t new_base_time;
+ ktime_t current_time;
+
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+ current_time = timespec64_to_ktime(ts);
+ new_base_time = base_time;
+
+ if (base_time < current_time) {
+ u64 nr_of_cycles = current_time - base_time;
+
+ do_div(nr_of_cycles, cycle_time);
+ new_base_time += cycle_time * (nr_of_cycles + 1);
+ }
+
+ *new_base_ts = ktime_to_timespec64(new_base_time);
+}
+
+static u32 vsc9959_tas_read_cfg_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, QSYS_TAS_PARAM_CFG_CTRL);
+}
+
+static void vsc9959_tas_gcl_set(struct ocelot *ocelot, const u32 gcl_ix,
+ struct tc_taprio_sched_entry *entry)
+{
+ ocelot_write(ocelot,
+ QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(gcl_ix) |
+ QSYS_GCL_CFG_REG_1_GATE_STATE(entry->gate_mask),
+ QSYS_GCL_CFG_REG_1);
+ ocelot_write(ocelot, entry->interval, QSYS_GCL_CFG_REG_2);
+}
+
+static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct timespec64 base_ts;
+ int ret, i;
+ u32 val;
+
+ mutex_lock(&ocelot->tas_lock);
+
+ if (!taprio->enable) {
+ ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG, port);
+
+ taprio_offload_free(ocelot_port->taprio);
+ ocelot_port->taprio = NULL;
+
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
+ return 0;
+ }
+
+ if (taprio->cycle_time > NSEC_PER_SEC ||
+ taprio->cycle_time_extension >= NSEC_PER_SEC) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX) {
+ ret = -ERANGE;
+ goto err;
+ }
+
+ /* Enable guard band. The switch will schedule frames without taking
+ * their length into account. Thus we'll always need to enable the
+ * guard band which reserves the time of a maximum sized frame at the
+ * end of the time window.
+ *
+ * Although the ALWAYS_GUARD_BAND_SCH_Q bit is global for all ports, we
+ * need to set PORT_NUM, because subsequent writes to PARAM_CFG_REG_n
+ * operate on the port number.
+ */
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port) |
+ QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M |
+ QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ /* Hardware errata - Admin config could not be overwritten if
+ * config is pending, need reset the TAS module
+ */
+ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
+ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ ocelot_rmw_rix(ocelot,
+ QSYS_TAG_CONFIG_ENABLE |
+ QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF) |
+ QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(0xFF),
+ QSYS_TAG_CONFIG_ENABLE |
+ QSYS_TAG_CONFIG_INIT_GATE_STATE_M |
+ QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M,
+ QSYS_TAG_CONFIG, port);
+
+ vsc9959_new_base_time(ocelot, taprio->base_time,
+ taprio->cycle_time, &base_ts);
+ ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
+ ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec), QSYS_PARAM_CFG_REG_2);
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot,
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val) |
+ QSYS_PARAM_CFG_REG_3_LIST_LENGTH(taprio->num_entries),
+ QSYS_PARAM_CFG_REG_3);
+ ocelot_write(ocelot, taprio->cycle_time, QSYS_PARAM_CFG_REG_4);
+ ocelot_write(ocelot, taprio->cycle_time_extension, QSYS_PARAM_CFG_REG_5);
+
+ for (i = 0; i < taprio->num_entries; i++)
+ vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
+
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ ret = readx_poll_timeout(vsc9959_tas_read_cfg_status, ocelot, val,
+ !(val & QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE),
+ 10, 100000);
+ if (ret)
+ goto err;
+
+ ocelot_port->taprio = taprio_offload_get(taprio);
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+err:
+ mutex_unlock(&ocelot->tas_lock);
+
+ return ret;
+}
+
+static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
+{
+ struct tc_taprio_qopt_offload *taprio;
+ struct ocelot_port *ocelot_port;
+ struct timespec64 base_ts;
+ int port;
+ u32 val;
+
+ mutex_lock(&ocelot->tas_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_port = ocelot->ports[port];
+ taprio = ocelot_port->taprio;
+ if (!taprio)
+ continue;
+
+ ocelot_rmw(ocelot,
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ /* Disable time-aware shaper */
+ ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG, port);
+
+ vsc9959_new_base_time(ocelot, taprio->base_time,
+ taprio->cycle_time, &base_ts);
+
+ ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
+ ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec),
+ QSYS_PARAM_CFG_REG_2);
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_rmw(ocelot,
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val),
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
+ QSYS_PARAM_CFG_REG_3);
+
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ /* Re-enable time-aware shaper */
+ ocelot_rmw_rix(ocelot, QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG, port);
+ }
+ mutex_unlock(&ocelot->tas_lock);
+}
+
+static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
+ struct tc_cbs_qopt_offload *cbs_qopt)
+{
+ struct ocelot *ocelot = ds->priv;
+ int port_ix = port * 8 + cbs_qopt->queue;
+ u32 rate, burst;
+
+ if (cbs_qopt->queue >= ds->num_tx_queues)
+ return -EINVAL;
+
+ if (!cbs_qopt->enable) {
+ ocelot_write_gix(ocelot, QSYS_CIR_CFG_CIR_RATE(0) |
+ QSYS_CIR_CFG_CIR_BURST(0),
+ QSYS_CIR_CFG, port_ix);
+
+ ocelot_rmw_gix(ocelot, 0, QSYS_SE_CFG_SE_AVB_ENA,
+ QSYS_SE_CFG, port_ix);
+
+ return 0;
+ }
+
+ /* Rate unit is 100 kbps */
+ rate = DIV_ROUND_UP(cbs_qopt->idleslope, 100);
+ /* Avoid using zero rate */
+ rate = clamp_t(u32, rate, 1, GENMASK(14, 0));
+ /* Burst unit is 4kB */
+ burst = DIV_ROUND_UP(cbs_qopt->hicredit, 4096);
+ /* Avoid using zero burst size */
+ burst = clamp_t(u32, burst, 1, GENMASK(5, 0));
+ ocelot_write_gix(ocelot,
+ QSYS_CIR_CFG_CIR_RATE(rate) |
+ QSYS_CIR_CFG_CIR_BURST(burst),
+ QSYS_CIR_CFG,
+ port_ix);
+
+ ocelot_rmw_gix(ocelot,
+ QSYS_SE_CFG_SE_FRM_MODE(0) |
+ QSYS_SE_CFG_SE_AVB_ENA,
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE_M,
+ QSYS_SE_CFG,
+ port_ix);
+
+ return 0;
+}
+
+static int vsc9959_qos_query_caps(struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return vsc9959_qos_query_caps(type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return vsc9959_qos_port_tas_set(ocelot, port, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return vsc9959_qos_port_cbs_set(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#define VSC9959_PSFP_SFID_MAX 175
+#define VSC9959_PSFP_GATE_ID_MAX 183
+#define VSC9959_PSFP_POLICER_BASE 63
+#define VSC9959_PSFP_POLICER_MAX 383
+#define VSC9959_PSFP_GATE_LIST_NUM 4
+#define VSC9959_PSFP_GATE_CYCLETIME_MIN 5000
+
+struct felix_stream {
+ struct list_head list;
+ unsigned long id;
+ bool dummy;
+ int ports;
+ int port;
+ u8 dmac[ETH_ALEN];
+ u16 vid;
+ s8 prio;
+ u8 sfid_valid;
+ u8 ssid_valid;
+ u32 sfid;
+ u32 ssid;
+};
+
+struct felix_stream_filter_counters {
+ u64 match;
+ u64 not_pass_gate;
+ u64 not_pass_sdu;
+ u64 red;
+};
+
+struct felix_stream_filter {
+ struct felix_stream_filter_counters stats;
+ struct list_head list;
+ refcount_t refcount;
+ u32 index;
+ u8 enable;
+ int portmask;
+ u8 sg_valid;
+ u32 sgid;
+ u8 fm_valid;
+ u32 fmid;
+ u8 prio_valid;
+ u8 prio;
+ u32 maxsdu;
+};
+
+struct felix_stream_gate {
+ u32 index;
+ u8 enable;
+ u8 ipv_valid;
+ u8 init_ipv;
+ u64 basetime;
+ u64 cycletime;
+ u64 cycletime_ext;
+ u32 num_entries;
+ struct action_gate_entry entries[];
+};
+
+struct felix_stream_gate_entry {
+ struct list_head list;
+ refcount_t refcount;
+ u32 index;
+};
+
+static int vsc9959_stream_identify(struct flow_cls_offload *f,
+ struct felix_stream *stream)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ ether_addr_copy(stream->dmac, match.key->dst);
+ if (!is_zero_ether_addr(match.mask->src))
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_priority)
+ stream->prio = match.key->vlan_priority;
+ else
+ stream->prio = -1;
+
+ if (!match.mask->vlan_id)
+ return -EOPNOTSUPP;
+ stream->vid = match.key->vlan_id;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ stream->id = f->cookie;
+
+ return 0;
+}
+
+static int vsc9959_mact_stream_set(struct ocelot *ocelot,
+ struct felix_stream *stream,
+ struct netlink_ext_ack *extack)
+{
+ enum macaccess_entry_type type;
+ int ret, sfid, ssid;
+ u32 vid, dst_idx;
+ u8 mac[ETH_ALEN];
+
+ ether_addr_copy(mac, stream->dmac);
+ vid = stream->vid;
+
+ /* Stream identification desn't support to add a stream with non
+ * existent MAC (The MAC entry has not been learned in MAC table).
+ */
+ ret = ocelot_mact_lookup(ocelot, &dst_idx, mac, vid, &type);
+ if (ret) {
+ if (extack)
+ NL_SET_ERR_MSG_MOD(extack, "Stream is not learned in MAC table");
+ return -EOPNOTSUPP;
+ }
+
+ if ((stream->sfid_valid || stream->ssid_valid) &&
+ type == ENTRYTYPE_NORMAL)
+ type = ENTRYTYPE_LOCKED;
+
+ sfid = stream->sfid_valid ? stream->sfid : -1;
+ ssid = stream->ssid_valid ? stream->ssid : -1;
+
+ ret = ocelot_mact_learn_streamdata(ocelot, dst_idx, mac, vid, type,
+ sfid, ssid);
+
+ return ret;
+}
+
+static struct felix_stream *
+vsc9959_stream_table_lookup(struct list_head *stream_list,
+ struct felix_stream *stream)
+{
+ struct felix_stream *tmp;
+
+ list_for_each_entry(tmp, stream_list, list)
+ if (ether_addr_equal(tmp->dmac, stream->dmac) &&
+ tmp->vid == stream->vid)
+ return tmp;
+
+ return NULL;
+}
+
+static int vsc9959_stream_table_add(struct ocelot *ocelot,
+ struct list_head *stream_list,
+ struct felix_stream *stream,
+ struct netlink_ext_ack *extack)
+{
+ struct felix_stream *stream_entry;
+ int ret;
+
+ stream_entry = kmemdup(stream, sizeof(*stream_entry), GFP_KERNEL);
+ if (!stream_entry)
+ return -ENOMEM;
+
+ if (!stream->dummy) {
+ ret = vsc9959_mact_stream_set(ocelot, stream_entry, extack);
+ if (ret) {
+ kfree(stream_entry);
+ return ret;
+ }
+ }
+
+ list_add_tail(&stream_entry->list, stream_list);
+
+ return 0;
+}
+
+static struct felix_stream *
+vsc9959_stream_table_get(struct list_head *stream_list, unsigned long id)
+{
+ struct felix_stream *tmp;
+
+ list_for_each_entry(tmp, stream_list, list)
+ if (tmp->id == id)
+ return tmp;
+
+ return NULL;
+}
+
+static void vsc9959_stream_table_del(struct ocelot *ocelot,
+ struct felix_stream *stream)
+{
+ if (!stream->dummy)
+ vsc9959_mact_stream_set(ocelot, stream, NULL);
+
+ list_del(&stream->list);
+ kfree(stream);
+}
+
+static u32 vsc9959_sfi_access_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_TABLES_SFIDACCESS);
+}
+
+static int vsc9959_psfp_sfi_set(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ u32 val;
+
+ if (sfi->index > VSC9959_PSFP_SFID_MAX)
+ return -EINVAL;
+
+ if (!sfi->enable) {
+ ocelot_write(ocelot, ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
+ ANA_TABLES_SFIDTIDX);
+
+ val = ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE);
+ ocelot_write(ocelot, val, ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+ }
+
+ if (sfi->sgid > VSC9959_PSFP_GATE_ID_MAX ||
+ sfi->fmid > VSC9959_PSFP_POLICER_MAX)
+ return -EINVAL;
+
+ ocelot_write(ocelot,
+ (sfi->sg_valid ? ANA_TABLES_SFIDTIDX_SGID_VALID : 0) |
+ ANA_TABLES_SFIDTIDX_SGID(sfi->sgid) |
+ (sfi->fm_valid ? ANA_TABLES_SFIDTIDX_POL_ENA : 0) |
+ ANA_TABLES_SFIDTIDX_POL_IDX(sfi->fmid) |
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ (sfi->prio_valid ? ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA : 0) |
+ ANA_TABLES_SFIDACCESS_IGR_PRIO(sfi->prio) |
+ ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(sfi->maxsdu) |
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sfidmask_set(struct ocelot *ocelot, u32 sfid, int ports)
+{
+ u32 val;
+
+ ocelot_rmw(ocelot,
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfid),
+ ANA_TABLES_SFIDTIDX_SFID_INDEX_M,
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ ANA_TABLES_SFID_MASK_IGR_PORT_MASK(ports) |
+ ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA,
+ ANA_TABLES_SFID_MASK);
+
+ ocelot_rmw(ocelot,
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M,
+ ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sfi_list_add(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi,
+ struct list_head *pos)
+{
+ struct felix_stream_filter *sfi_entry;
+ int ret;
+
+ sfi_entry = kmemdup(sfi, sizeof(*sfi_entry), GFP_KERNEL);
+ if (!sfi_entry)
+ return -ENOMEM;
+
+ refcount_set(&sfi_entry->refcount, 1);
+
+ ret = vsc9959_psfp_sfi_set(ocelot, sfi_entry);
+ if (ret) {
+ kfree(sfi_entry);
+ return ret;
+ }
+
+ vsc9959_psfp_sfidmask_set(ocelot, sfi->index, sfi->portmask);
+
+ list_add(&sfi_entry->list, pos);
+
+ return 0;
+}
+
+static int vsc9959_psfp_sfi_table_add(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ struct list_head *pos, *q, *last;
+ struct felix_stream_filter *tmp;
+ struct ocelot_psfp_list *psfp;
+ u32 insert = 0;
+
+ psfp = &ocelot->psfp;
+ last = &psfp->sfi_list;
+
+ list_for_each_safe(pos, q, &psfp->sfi_list) {
+ tmp = list_entry(pos, struct felix_stream_filter, list);
+ if (sfi->sg_valid == tmp->sg_valid &&
+ sfi->fm_valid == tmp->fm_valid &&
+ sfi->portmask == tmp->portmask &&
+ tmp->sgid == sfi->sgid &&
+ tmp->fmid == sfi->fmid) {
+ sfi->index = tmp->index;
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+ /* Make sure that the index is increasing in order. */
+ if (tmp->index == insert) {
+ last = pos;
+ insert++;
+ }
+ }
+ sfi->index = insert;
+
+ return vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
+}
+
+static int vsc9959_psfp_sfi_table_add2(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi,
+ struct felix_stream_filter *sfi2)
+{
+ struct felix_stream_filter *tmp;
+ struct list_head *pos, *q, *last;
+ struct ocelot_psfp_list *psfp;
+ u32 insert = 0;
+ int ret;
+
+ psfp = &ocelot->psfp;
+ last = &psfp->sfi_list;
+
+ list_for_each_safe(pos, q, &psfp->sfi_list) {
+ tmp = list_entry(pos, struct felix_stream_filter, list);
+ /* Make sure that the index is increasing in order. */
+ if (tmp->index >= insert + 2)
+ break;
+
+ insert = tmp->index + 1;
+ last = pos;
+ }
+ sfi->index = insert;
+
+ ret = vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
+ if (ret)
+ return ret;
+
+ sfi2->index = insert + 1;
+
+ return vsc9959_psfp_sfi_list_add(ocelot, sfi2, last->next);
+}
+
+static struct felix_stream_filter *
+vsc9959_psfp_sfi_table_get(struct list_head *sfi_list, u32 index)
+{
+ struct felix_stream_filter *tmp;
+
+ list_for_each_entry(tmp, sfi_list, list)
+ if (tmp->index == index)
+ return tmp;
+
+ return NULL;
+}
+
+static void vsc9959_psfp_sfi_table_del(struct ocelot *ocelot, u32 index)
+{
+ struct felix_stream_filter *tmp, *n;
+ struct ocelot_psfp_list *psfp;
+ u8 z;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry_safe(tmp, n, &psfp->sfi_list, list)
+ if (tmp->index == index) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ tmp->enable = 0;
+ vsc9959_psfp_sfi_set(ocelot, tmp);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ break;
+ }
+}
+
+static void vsc9959_psfp_parse_gate(const struct flow_action_entry *entry,
+ struct felix_stream_gate *sgi)
+{
+ sgi->index = entry->hw_index;
+ sgi->ipv_valid = (entry->gate.prio < 0) ? 0 : 1;
+ sgi->init_ipv = (sgi->ipv_valid) ? entry->gate.prio : 0;
+ sgi->basetime = entry->gate.basetime;
+ sgi->cycletime = entry->gate.cycletime;
+ sgi->num_entries = entry->gate.num_entries;
+ sgi->enable = 1;
+
+ memcpy(sgi->entries, entry->gate.entries,
+ entry->gate.num_entries * sizeof(struct action_gate_entry));
+}
+
+static u32 vsc9959_sgi_cfg_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_SG_ACCESS_CTRL);
+}
+
+static int vsc9959_psfp_sgi_set(struct ocelot *ocelot,
+ struct felix_stream_gate *sgi)
+{
+ struct action_gate_entry *e;
+ struct timespec64 base_ts;
+ u32 interval_sum = 0;
+ u32 val;
+ int i;
+
+ if (sgi->index > VSC9959_PSFP_GATE_ID_MAX)
+ return -EINVAL;
+
+ ocelot_write(ocelot, ANA_SG_ACCESS_CTRL_SGID(sgi->index),
+ ANA_SG_ACCESS_CTRL);
+
+ if (!sgi->enable) {
+ ocelot_rmw(ocelot, ANA_SG_CONFIG_REG_3_INIT_GATE_STATE,
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE,
+ ANA_SG_CONFIG_REG_3);
+
+ return 0;
+ }
+
+ if (sgi->cycletime < VSC9959_PSFP_GATE_CYCLETIME_MIN ||
+ sgi->cycletime > NSEC_PER_SEC)
+ return -EINVAL;
+
+ if (sgi->num_entries > VSC9959_PSFP_GATE_LIST_NUM)
+ return -EINVAL;
+
+ vsc9959_new_base_time(ocelot, sgi->basetime, sgi->cycletime, &base_ts);
+ ocelot_write(ocelot, base_ts.tv_nsec, ANA_SG_CONFIG_REG_1);
+ val = lower_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot, val, ANA_SG_CONFIG_REG_2);
+
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot,
+ (sgi->ipv_valid ? ANA_SG_CONFIG_REG_3_IPV_VALID : 0) |
+ ANA_SG_CONFIG_REG_3_INIT_IPV(sgi->init_ipv) |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE |
+ ANA_SG_CONFIG_REG_3_LIST_LENGTH(sgi->num_entries) |
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(val),
+ ANA_SG_CONFIG_REG_3);
+
+ ocelot_write(ocelot, sgi->cycletime, ANA_SG_CONFIG_REG_4);
+
+ e = sgi->entries;
+ for (i = 0; i < sgi->num_entries; i++) {
+ u32 ips = (e[i].ipv < 0) ? 0 : (e[i].ipv + 8);
+
+ ocelot_write_rix(ocelot, ANA_SG_GCL_GS_CONFIG_IPS(ips) |
+ (e[i].gate_state ?
+ ANA_SG_GCL_GS_CONFIG_GATE_STATE : 0),
+ ANA_SG_GCL_GS_CONFIG, i);
+
+ interval_sum += e[i].interval;
+ ocelot_write_rix(ocelot, interval_sum, ANA_SG_GCL_TI_CONFIG, i);
+ }
+
+ ocelot_rmw(ocelot, ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL);
+
+ return readx_poll_timeout(vsc9959_sgi_cfg_status, ocelot, val,
+ (!(ANA_SG_ACCESS_CTRL_CONFIG_CHANGE & val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sgi_table_add(struct ocelot *ocelot,
+ struct felix_stream_gate *sgi)
+{
+ struct felix_stream_gate_entry *tmp;
+ struct ocelot_psfp_list *psfp;
+ int ret;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry(tmp, &psfp->sgi_list, list)
+ if (tmp->index == sgi->index) {
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = vsc9959_psfp_sgi_set(ocelot, sgi);
+ if (ret) {
+ kfree(tmp);
+ return ret;
+ }
+
+ tmp->index = sgi->index;
+ refcount_set(&tmp->refcount, 1);
+ list_add_tail(&tmp->list, &psfp->sgi_list);
+
+ return 0;
+}
+
+static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
+ u32 index)
+{
+ struct felix_stream_gate_entry *tmp, *n;
+ struct felix_stream_gate sgi = {0};
+ struct ocelot_psfp_list *psfp;
+ u8 z;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry_safe(tmp, n, &psfp->sgi_list, list)
+ if (tmp->index == index) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ sgi.index = index;
+ sgi.enable = 0;
+ vsc9959_psfp_sgi_set(ocelot, &sgi);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ break;
+ }
+}
+
+static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct felix_stream_filter old_sfi, *sfi_entry;
+ struct felix_stream_filter sfi = {0};
+ const struct flow_action_entry *a;
+ struct felix_stream *stream_entry;
+ struct felix_stream stream = {0};
+ struct felix_stream_gate *sgi;
+ struct ocelot_psfp_list *psfp;
+ struct ocelot_policer pol;
+ int ret, i, size;
+ u64 rate, burst;
+ u32 index;
+
+ psfp = &ocelot->psfp;
+
+ ret = vsc9959_stream_identify(f, &stream);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack, "Only can match on VID, PCP, and dest MAC");
+ return ret;
+ }
+
+ mutex_lock(&psfp->lock);
+
+ flow_action_for_each(i, a, &f->rule->action) {
+ switch (a->id) {
+ case FLOW_ACTION_GATE:
+ size = struct_size(sgi, entries, a->gate.num_entries);
+ sgi = kzalloc(size, GFP_KERNEL);
+ if (!sgi) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ vsc9959_psfp_parse_gate(a, sgi);
+ ret = vsc9959_psfp_sgi_table_add(ocelot, sgi);
+ if (ret) {
+ kfree(sgi);
+ goto err;
+ }
+ sfi.sg_valid = 1;
+ sfi.sgid = sgi->index;
+ kfree(sgi);
+ break;
+ case FLOW_ACTION_POLICE:
+ index = a->hw_index + VSC9959_PSFP_POLICER_BASE;
+ if (index > VSC9959_PSFP_POLICER_MAX) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ rate = a->police.rate_bytes_ps;
+ burst = rate * PSCHED_NS2TICKS(a->police.burst);
+ pol = (struct ocelot_policer) {
+ .burst = div_u64(burst, PSCHED_TICKS_PER_SEC),
+ .rate = div_u64(rate, 1000) * 8,
+ };
+ ret = ocelot_vcap_policer_add(ocelot, index, &pol);
+ if (ret)
+ goto err;
+
+ sfi.fm_valid = 1;
+ sfi.fmid = index;
+ sfi.maxsdu = a->police.mtu;
+ break;
+ default:
+ mutex_unlock(&psfp->lock);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ stream.ports = BIT(port);
+ stream.port = port;
+
+ sfi.portmask = stream.ports;
+ sfi.prio_valid = (stream.prio < 0 ? 0 : 1);
+ sfi.prio = (sfi.prio_valid ? stream.prio : 0);
+ sfi.enable = 1;
+
+ /* Check if stream is set. */
+ stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &stream);
+ if (stream_entry) {
+ if (stream_entry->ports & BIT(port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The stream is added on this port");
+ ret = -EEXIST;
+ goto err;
+ }
+
+ if (stream_entry->ports != BIT(stream_entry->port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The stream is added on two ports");
+ ret = -EEXIST;
+ goto err;
+ }
+
+ stream_entry->ports |= BIT(port);
+ stream.ports = stream_entry->ports;
+
+ sfi_entry = vsc9959_psfp_sfi_table_get(&psfp->sfi_list,
+ stream_entry->sfid);
+ memcpy(&old_sfi, sfi_entry, sizeof(old_sfi));
+
+ vsc9959_psfp_sfi_table_del(ocelot, stream_entry->sfid);
+
+ old_sfi.portmask = stream_entry->ports;
+ sfi.portmask = stream.ports;
+
+ if (stream_entry->port > port) {
+ ret = vsc9959_psfp_sfi_table_add2(ocelot, &sfi,
+ &old_sfi);
+ stream_entry->dummy = true;
+ } else {
+ ret = vsc9959_psfp_sfi_table_add2(ocelot, &old_sfi,
+ &sfi);
+ stream.dummy = true;
+ }
+ if (ret)
+ goto err;
+
+ stream_entry->sfid = old_sfi.index;
+ } else {
+ ret = vsc9959_psfp_sfi_table_add(ocelot, &sfi);
+ if (ret)
+ goto err;
+ }
+
+ stream.sfid = sfi.index;
+ stream.sfid_valid = 1;
+ ret = vsc9959_stream_table_add(ocelot, &psfp->stream_list,
+ &stream, extack);
+ if (ret) {
+ vsc9959_psfp_sfi_table_del(ocelot, stream.sfid);
+ goto err;
+ }
+
+ mutex_unlock(&psfp->lock);
+
+ return 0;
+
+err:
+ if (sfi.sg_valid)
+ vsc9959_psfp_sgi_table_del(ocelot, sfi.sgid);
+
+ if (sfi.fm_valid)
+ ocelot_vcap_policer_del(ocelot, sfi.fmid);
+
+ mutex_unlock(&psfp->lock);
+
+ return ret;
+}
+
+static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
+ struct flow_cls_offload *f)
+{
+ struct felix_stream *stream, tmp, *stream_entry;
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ static struct felix_stream_filter *sfi;
+
+ mutex_lock(&psfp->lock);
+
+ stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
+ if (!stream) {
+ mutex_unlock(&psfp->lock);
+ return -ENOMEM;
+ }
+
+ sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
+ if (!sfi) {
+ mutex_unlock(&psfp->lock);
+ return -ENOMEM;
+ }
+
+ if (sfi->sg_valid)
+ vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid);
+
+ if (sfi->fm_valid)
+ ocelot_vcap_policer_del(ocelot, sfi->fmid);
+
+ vsc9959_psfp_sfi_table_del(ocelot, stream->sfid);
+
+ memcpy(&tmp, stream, sizeof(tmp));
+
+ stream->sfid_valid = 0;
+ vsc9959_stream_table_del(ocelot, stream);
+
+ stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &tmp);
+ if (stream_entry) {
+ stream_entry->ports = BIT(stream_entry->port);
+ if (stream_entry->dummy) {
+ stream_entry->dummy = false;
+ vsc9959_mact_stream_set(ocelot, stream_entry, NULL);
+ }
+ vsc9959_psfp_sfidmask_set(ocelot, stream_entry->sfid,
+ stream_entry->ports);
+ }
+
+ mutex_unlock(&psfp->lock);
+
+ return 0;
+}
+
+static void vsc9959_update_sfid_stats(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ struct felix_stream_filter_counters *s = &sfi->stats;
+ u32 match, not_pass_gate, not_pass_sdu, red;
+ u32 sfid = sfi->index;
+
+ lockdep_assert_held(&ocelot->stat_view_lock);
+
+ ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(sfid),
+ SYS_STAT_CFG_STAT_VIEW_M,
+ SYS_STAT_CFG);
+
+ match = ocelot_read(ocelot, SYS_COUNT_SF_MATCHING_FRAMES);
+ not_pass_gate = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_FRAMES);
+ not_pass_sdu = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_SDU);
+ red = ocelot_read(ocelot, SYS_COUNT_SF_RED_FRAMES);
+
+ /* Clear the PSFP counter. */
+ ocelot_write(ocelot,
+ SYS_STAT_CFG_STAT_VIEW(sfid) |
+ SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
+ SYS_STAT_CFG);
+
+ s->match += match;
+ s->not_pass_gate += not_pass_gate;
+ s->not_pass_sdu += not_pass_sdu;
+ s->red += red;
+}
+
+/* Caller must hold &ocelot->stat_view_lock */
+static void vsc9959_update_stats(struct ocelot *ocelot)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter *sfi;
+
+ mutex_lock(&psfp->lock);
+
+ list_for_each_entry(sfi, &psfp->sfi_list, list)
+ vsc9959_update_sfid_stats(ocelot, sfi);
+
+ mutex_unlock(&psfp->lock);
+}
+
+static int vsc9959_psfp_stats_get(struct ocelot *ocelot,
+ struct flow_cls_offload *f,
+ struct flow_stats *stats)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter_counters *s;
+ static struct felix_stream_filter *sfi;
+ struct felix_stream *stream;
+
+ stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
+ if (!stream)
+ return -ENOMEM;
+
+ sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
+ if (!sfi)
+ return -EINVAL;
+
+ mutex_lock(&ocelot->stat_view_lock);
+
+ vsc9959_update_sfid_stats(ocelot, sfi);
+
+ s = &sfi->stats;
+ stats->pkts = s->match;
+ stats->drops = s->not_pass_gate + s->not_pass_sdu + s->red;
+
+ memset(s, 0, sizeof(*s));
+
+ mutex_unlock(&ocelot->stat_view_lock);
+
+ return 0;
+}
+
+static void vsc9959_psfp_init(struct ocelot *ocelot)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+
+ INIT_LIST_HEAD(&psfp->stream_list);
+ INIT_LIST_HEAD(&psfp->sfi_list);
+ INIT_LIST_HEAD(&psfp->sgi_list);
+ mutex_init(&psfp->lock);
+}
+
+/* When using cut-through forwarding and the egress port runs at a higher data
+ * rate than the ingress port, the packet currently under transmission would
+ * suffer an underrun since it would be transmitted faster than it is received.
+ * The Felix switch implementation of cut-through forwarding does not check in
+ * hardware whether this condition is satisfied or not, so we must restrict the
+ * list of ports that have cut-through forwarding enabled on egress to only be
+ * the ports operating at the lowest link speed within their respective
+ * forwarding domain.
+ */
+static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_switch *ds = felix->ds;
+ int tc, port, other_port;
+
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int min_speed = ocelot_port->speed;
+ unsigned long mask = 0;
+ u32 tmp, val = 0;
+
+ /* Disable cut-through on ports that are down */
+ if (ocelot_port->speed <= 0)
+ goto set;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* Ocelot switches forward from the NPI port towards
+ * any port, regardless of it being in the NPI port's
+ * forwarding domain or not.
+ */
+ mask = dsa_user_ports(ds);
+ } else {
+ mask = ocelot_get_bridge_fwd_mask(ocelot, port);
+ mask &= ~BIT(port);
+ if (ocelot->npi >= 0)
+ mask |= BIT(ocelot->npi);
+ else
+ mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
+ port);
+ }
+
+ /* Calculate the minimum link speed, among the ports that are
+ * up, of this source port's forwarding domain.
+ */
+ for_each_set_bit(other_port, &mask, ocelot->num_phys_ports) {
+ struct ocelot_port *other_ocelot_port;
+
+ other_ocelot_port = ocelot->ports[other_port];
+ if (other_ocelot_port->speed <= 0)
+ continue;
+
+ if (min_speed > other_ocelot_port->speed)
+ min_speed = other_ocelot_port->speed;
+ }
+
+ /* Enable cut-through forwarding for all traffic classes that
+ * don't have oversized dropping enabled, since this check is
+ * bypassed in cut-through mode.
+ */
+ if (ocelot_port->speed == min_speed) {
+ val = GENMASK(7, 0);
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (vsc9959_port_qmaxsdu_get(ocelot, port, tc))
+ val &= ~BIT(tc);
+ }
+
+set:
+ tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
+ if (tmp == val)
+ continue;
+
+ dev_dbg(ocelot->dev,
+ "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n",
+ port, mask, ocelot_port->speed, min_speed,
+ val ? "enabling" : "disabling", val);
+
+ ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
+ }
+}
+
+static const struct ocelot_ops vsc9959_ops = {
+ .reset = vsc9959_reset,
+ .wm_enc = vsc9959_wm_enc,
+ .wm_dec = vsc9959_wm_dec,
+ .wm_stat = vsc9959_wm_stat,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
+ .psfp_init = vsc9959_psfp_init,
+ .psfp_filter_add = vsc9959_psfp_filter_add,
+ .psfp_filter_del = vsc9959_psfp_filter_del,
+ .psfp_stats_get = vsc9959_psfp_stats_get,
+ .cut_through_fwd = vsc9959_cut_through_fwd,
+ .tas_clock_adjust = vsc9959_tas_clock_adjust,
+ .update_stats = vsc9959_update_stats,
+};
+
+static const struct felix_info felix_info_vsc9959 = {
+ .resources = vsc9959_resources,
+ .num_resources = ARRAY_SIZE(vsc9959_resources),
+ .resource_names = vsc9959_resource_names,
+ .regfields = vsc9959_regfields,
+ .map = vsc9959_regmap,
+ .ops = &vsc9959_ops,
+ .stats_layout = vsc9959_stats_layout,
+ .vcap = vsc9959_vcap_props,
+ .vcap_pol_base = VSC9959_VCAP_POLICER_BASE,
+ .vcap_pol_max = VSC9959_VCAP_POLICER_MAX,
+ .vcap_pol_base2 = 0,
+ .vcap_pol_max2 = 0,
+ .num_mact_rows = 2048,
+ .num_ports = VSC9959_NUM_PORTS,
+ .num_tx_queues = OCELOT_NUM_TC,
+ .quirk_no_xtr_irq = true,
+ .ptp_caps = &vsc9959_ptp_caps,
+ .mdio_bus_alloc = vsc9959_mdio_bus_alloc,
+ .mdio_bus_free = vsc9959_mdio_bus_free,
+ .phylink_validate = vsc9959_phylink_validate,
+ .port_modes = vsc9959_port_modes,
+ .port_setup_tc = vsc9959_port_setup_tc,
+ .port_sched_speed_set = vsc9959_sched_speed_set,
+ .tas_guard_bands_update = vsc9959_tas_guard_bands_update,
+};
+
+static irqreturn_t felix_irq_handler(int irq, void *data)
+{
+ struct ocelot *ocelot = (struct ocelot *)data;
+
+ /* The INTB interrupt is used for both PTP TX timestamp interrupt
+ * and preemption status change interrupt on each port.
+ *
+ * - Get txtstamp if have
+ * - TODO: handle preemption. Without handling it, driver may get
+ * interrupt storm.
+ */
+
+ ocelot_get_txtstamp(ocelot);
+
+ return IRQ_HANDLED;
+}
+
+static int felix_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct dsa_switch *ds;
+ struct ocelot *ocelot;
+ struct felix *felix;
+ int err;
+
+ if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
+ dev_info(&pdev->dev, "device is disabled, skipping\n");
+ return -ENODEV;
+ }
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "device enable failed\n");
+ goto err_pci_enable;
+ }
+
+ felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
+ if (!felix) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate driver memory\n");
+ goto err_alloc_felix;
+ }
+
+ pci_set_drvdata(pdev, felix);
+ ocelot = &felix->ocelot;
+ ocelot->dev = &pdev->dev;
+ ocelot->num_flooding_pgids = OCELOT_NUM_TC;
+ felix->info = &felix_info_vsc9959;
+ felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
+
+ pci_set_master(pdev);
+
+ err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL,
+ &felix_irq_handler, IRQF_ONESHOT,
+ "felix-intb", ocelot);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request irq\n");
+ goto err_alloc_irq;
+ }
+
+ ocelot->ptp = 1;
+
+ ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
+ if (!ds) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
+ goto err_alloc_ds;
+ }
+
+ ds->dev = &pdev->dev;
+ ds->num_ports = felix->info->num_ports;
+ ds->num_tx_queues = felix->info->num_tx_queues;
+ ds->ops = &felix_switch_ops;
+ ds->priv = ocelot;
+ felix->ds = ds;
+ felix->tag_proto = DSA_TAG_PROTO_OCELOT;
+
+ err = dsa_register_switch(ds);
+ if (err) {
+ dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
+ goto err_register_ds;
+ }
+
+ return 0;
+
+err_register_ds:
+ kfree(ds);
+err_alloc_ds:
+err_alloc_irq:
+ kfree(felix);
+err_alloc_felix:
+ pci_disable_device(pdev);
+err_pci_enable:
+ return err;
+}
+
+static void felix_pci_remove(struct pci_dev *pdev)
+{
+ struct felix *felix = pci_get_drvdata(pdev);
+
+ if (!felix)
+ return;
+
+ dsa_unregister_switch(felix->ds);
+
+ kfree(felix->ds);
+ kfree(felix);
+
+ pci_disable_device(pdev);
+}
+
+static void felix_pci_shutdown(struct pci_dev *pdev)
+{
+ struct felix *felix = pci_get_drvdata(pdev);
+
+ if (!felix)
+ return;
+
+ dsa_switch_shutdown(felix->ds);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_device_id felix_ids[] = {
+ {
+ /* NXP LS1028A */
+ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0xEEF0),
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, felix_ids);
+
+static struct pci_driver felix_vsc9959_pci_driver = {
+ .name = "mscc_felix",
+ .id_table = felix_ids,
+ .probe = felix_pci_probe,
+ .remove = felix_pci_remove,
+ .shutdown = felix_pci_shutdown,
+};
+module_pci_driver(felix_vsc9959_pci_driver);
+
+MODULE_DESCRIPTION("Felix Switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
new file mode 100644
index 000000000..c2863d6d8
--- /dev/null
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -0,0 +1,1119 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Distributed Switch Architecture VSC9953 driver
+ * Copyright (C) 2020, Maxim Kochetkov <fido_max@inbox.ru>
+ */
+#include <linux/types.h>
+#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot.h>
+#include <linux/mdio/mdio-mscc-miim.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/pcs-lynx.h>
+#include <linux/dsa/ocelot.h>
+#include <linux/iopoll.h>
+#include "felix.h"
+
+#define VSC9953_NUM_PORTS 10
+
+#define VSC9953_VCAP_POLICER_BASE 11
+#define VSC9953_VCAP_POLICER_MAX 31
+#define VSC9953_VCAP_POLICER_BASE2 120
+#define VSC9953_VCAP_POLICER_MAX2 161
+
+#define VSC9953_PORT_MODE_SERDES (OCELOT_PORT_MODE_1000BASEX | \
+ OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII)
+
+static const u32 vsc9953_port_modes[VSC9953_NUM_PORTS] = {
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+};
+
+static const u32 vsc9953_ana_regmap[] = {
+ REG(ANA_ADVLEARN, 0x00b500),
+ REG(ANA_VLANMASK, 0x00b504),
+ REG_RESERVED(ANA_PORT_B_DOMAIN),
+ REG(ANA_ANAGEFIL, 0x00b50c),
+ REG(ANA_ANEVENTS, 0x00b510),
+ REG(ANA_STORMLIMIT_BURST, 0x00b514),
+ REG(ANA_STORMLIMIT_CFG, 0x00b518),
+ REG(ANA_ISOLATED_PORTS, 0x00b528),
+ REG(ANA_COMMUNITY_PORTS, 0x00b52c),
+ REG(ANA_AUTOAGE, 0x00b530),
+ REG(ANA_MACTOPTIONS, 0x00b534),
+ REG(ANA_LEARNDISC, 0x00b538),
+ REG(ANA_AGENCTRL, 0x00b53c),
+ REG(ANA_MIRRORPORTS, 0x00b540),
+ REG(ANA_EMIRRORPORTS, 0x00b544),
+ REG(ANA_FLOODING, 0x00b548),
+ REG(ANA_FLOODING_IPMC, 0x00b54c),
+ REG(ANA_SFLOW_CFG, 0x00b550),
+ REG(ANA_PORT_MODE, 0x00b57c),
+ REG_RESERVED(ANA_CUT_THRU_CFG),
+ REG(ANA_PGID_PGID, 0x00b600),
+ REG(ANA_TABLES_ANMOVED, 0x00b4ac),
+ REG(ANA_TABLES_MACHDATA, 0x00b4b0),
+ REG(ANA_TABLES_MACLDATA, 0x00b4b4),
+ REG_RESERVED(ANA_TABLES_STREAMDATA),
+ REG(ANA_TABLES_MACACCESS, 0x00b4b8),
+ REG(ANA_TABLES_MACTINDX, 0x00b4bc),
+ REG(ANA_TABLES_VLANACCESS, 0x00b4c0),
+ REG(ANA_TABLES_VLANTIDX, 0x00b4c4),
+ REG_RESERVED(ANA_TABLES_ISDXACCESS),
+ REG_RESERVED(ANA_TABLES_ISDXTIDX),
+ REG(ANA_TABLES_ENTRYLIM, 0x00b480),
+ REG_RESERVED(ANA_TABLES_PTP_ID_HIGH),
+ REG_RESERVED(ANA_TABLES_PTP_ID_LOW),
+ REG_RESERVED(ANA_TABLES_STREAMACCESS),
+ REG_RESERVED(ANA_TABLES_STREAMTIDX),
+ REG_RESERVED(ANA_TABLES_SEQ_HISTORY),
+ REG_RESERVED(ANA_TABLES_SEQ_MASK),
+ REG_RESERVED(ANA_TABLES_SFID_MASK),
+ REG_RESERVED(ANA_TABLES_SFIDACCESS),
+ REG_RESERVED(ANA_TABLES_SFIDTIDX),
+ REG_RESERVED(ANA_MSTI_STATE),
+ REG_RESERVED(ANA_OAM_UPM_LM_CNT),
+ REG_RESERVED(ANA_SG_ACCESS_CTRL),
+ REG_RESERVED(ANA_SG_CONFIG_REG_1),
+ REG_RESERVED(ANA_SG_CONFIG_REG_2),
+ REG_RESERVED(ANA_SG_CONFIG_REG_3),
+ REG_RESERVED(ANA_SG_CONFIG_REG_4),
+ REG_RESERVED(ANA_SG_CONFIG_REG_5),
+ REG_RESERVED(ANA_SG_GCL_GS_CONFIG),
+ REG_RESERVED(ANA_SG_GCL_TI_CONFIG),
+ REG_RESERVED(ANA_SG_STATUS_REG_1),
+ REG_RESERVED(ANA_SG_STATUS_REG_2),
+ REG_RESERVED(ANA_SG_STATUS_REG_3),
+ REG(ANA_PORT_VLAN_CFG, 0x000000),
+ REG(ANA_PORT_DROP_CFG, 0x000004),
+ REG(ANA_PORT_QOS_CFG, 0x000008),
+ REG(ANA_PORT_VCAP_CFG, 0x00000c),
+ REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x000010),
+ REG(ANA_PORT_VCAP_S2_CFG, 0x00001c),
+ REG(ANA_PORT_PCP_DEI_MAP, 0x000020),
+ REG(ANA_PORT_CPU_FWD_CFG, 0x000060),
+ REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x000064),
+ REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x000068),
+ REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00006c),
+ REG(ANA_PORT_PORT_CFG, 0x000070),
+ REG(ANA_PORT_POL_CFG, 0x000074),
+ REG_RESERVED(ANA_PORT_PTP_CFG),
+ REG_RESERVED(ANA_PORT_PTP_DLY1_CFG),
+ REG_RESERVED(ANA_PORT_PTP_DLY2_CFG),
+ REG_RESERVED(ANA_PORT_SFID_CFG),
+ REG(ANA_PFC_PFC_CFG, 0x00c000),
+ REG_RESERVED(ANA_PFC_PFC_TIMER),
+ REG_RESERVED(ANA_IPT_OAM_MEP_CFG),
+ REG_RESERVED(ANA_IPT_IPT),
+ REG_RESERVED(ANA_PPT_PPT),
+ REG_RESERVED(ANA_FID_MAP_FID_MAP),
+ REG(ANA_AGGR_CFG, 0x00c600),
+ REG(ANA_CPUQ_CFG, 0x00c604),
+ REG_RESERVED(ANA_CPUQ_CFG2),
+ REG(ANA_CPUQ_8021_CFG, 0x00c60c),
+ REG(ANA_DSCP_CFG, 0x00c64c),
+ REG(ANA_DSCP_REWR_CFG, 0x00c74c),
+ REG(ANA_VCAP_RNG_TYPE_CFG, 0x00c78c),
+ REG(ANA_VCAP_RNG_VAL_CFG, 0x00c7ac),
+ REG_RESERVED(ANA_VRAP_CFG),
+ REG_RESERVED(ANA_VRAP_HDR_DATA),
+ REG_RESERVED(ANA_VRAP_HDR_MASK),
+ REG(ANA_DISCARD_CFG, 0x00c7d8),
+ REG(ANA_FID_CFG, 0x00c7dc),
+ REG(ANA_POL_PIR_CFG, 0x00a000),
+ REG(ANA_POL_CIR_CFG, 0x00a004),
+ REG(ANA_POL_MODE_CFG, 0x00a008),
+ REG(ANA_POL_PIR_STATE, 0x00a00c),
+ REG(ANA_POL_CIR_STATE, 0x00a010),
+ REG_RESERVED(ANA_POL_STATE),
+ REG(ANA_POL_FLOWC, 0x00c280),
+ REG(ANA_POL_HYST, 0x00c2ec),
+ REG_RESERVED(ANA_POL_MISC_CFG),
+};
+
+static const u32 vsc9953_qs_regmap[] = {
+ REG(QS_XTR_GRP_CFG, 0x000000),
+ REG(QS_XTR_RD, 0x000008),
+ REG(QS_XTR_FRM_PRUNING, 0x000010),
+ REG(QS_XTR_FLUSH, 0x000018),
+ REG(QS_XTR_DATA_PRESENT, 0x00001c),
+ REG(QS_XTR_CFG, 0x000020),
+ REG(QS_INJ_GRP_CFG, 0x000024),
+ REG(QS_INJ_WR, 0x00002c),
+ REG(QS_INJ_CTRL, 0x000034),
+ REG(QS_INJ_STATUS, 0x00003c),
+ REG(QS_INJ_ERR, 0x000040),
+ REG_RESERVED(QS_INH_DBG),
+};
+
+static const u32 vsc9953_vcap_regmap[] = {
+ /* VCAP_CORE_CFG */
+ REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
+ REG(VCAP_CORE_MV_CFG, 0x000004),
+ /* VCAP_CORE_CACHE */
+ REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
+ REG(VCAP_CACHE_MASK_DAT, 0x000108),
+ REG(VCAP_CACHE_ACTION_DAT, 0x000208),
+ REG(VCAP_CACHE_CNT_DAT, 0x000308),
+ REG(VCAP_CACHE_TG_DAT, 0x000388),
+ /* VCAP_CONST */
+ REG(VCAP_CONST_VCAP_VER, 0x000398),
+ REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
+ REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
+ REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
+ REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
+ REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
+ REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
+ REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
+ REG_RESERVED(VCAP_CONST_CORE_CNT),
+ REG_RESERVED(VCAP_CONST_IF_CNT),
+};
+
+static const u32 vsc9953_qsys_regmap[] = {
+ REG(QSYS_PORT_MODE, 0x003600),
+ REG(QSYS_SWITCH_PORT_MODE, 0x003630),
+ REG(QSYS_STAT_CNT_CFG, 0x00365c),
+ REG(QSYS_EEE_CFG, 0x003660),
+ REG(QSYS_EEE_THRES, 0x003688),
+ REG(QSYS_IGR_NO_SHARING, 0x00368c),
+ REG(QSYS_EGR_NO_SHARING, 0x003690),
+ REG(QSYS_SW_STATUS, 0x003694),
+ REG(QSYS_EXT_CPU_CFG, 0x0036c0),
+ REG_RESERVED(QSYS_PAD_CFG),
+ REG(QSYS_CPU_GROUP_MAP, 0x0036c8),
+ REG_RESERVED(QSYS_QMAP),
+ REG_RESERVED(QSYS_ISDX_SGRP),
+ REG_RESERVED(QSYS_TIMED_FRAME_ENTRY),
+ REG_RESERVED(QSYS_TFRM_MISC),
+ REG_RESERVED(QSYS_TFRM_PORT_DLY),
+ REG_RESERVED(QSYS_TFRM_TIMER_CFG_1),
+ REG_RESERVED(QSYS_TFRM_TIMER_CFG_2),
+ REG_RESERVED(QSYS_TFRM_TIMER_CFG_3),
+ REG_RESERVED(QSYS_TFRM_TIMER_CFG_4),
+ REG_RESERVED(QSYS_TFRM_TIMER_CFG_5),
+ REG_RESERVED(QSYS_TFRM_TIMER_CFG_6),
+ REG_RESERVED(QSYS_TFRM_TIMER_CFG_7),
+ REG_RESERVED(QSYS_TFRM_TIMER_CFG_8),
+ REG(QSYS_RED_PROFILE, 0x003724),
+ REG(QSYS_RES_QOS_MODE, 0x003764),
+ REG(QSYS_RES_CFG, 0x004000),
+ REG(QSYS_RES_STAT, 0x004004),
+ REG(QSYS_EGR_DROP_MODE, 0x003768),
+ REG(QSYS_EQ_CTRL, 0x00376c),
+ REG_RESERVED(QSYS_EVENTS_CORE),
+ REG_RESERVED(QSYS_QMAXSDU_CFG_0),
+ REG_RESERVED(QSYS_QMAXSDU_CFG_1),
+ REG_RESERVED(QSYS_QMAXSDU_CFG_2),
+ REG_RESERVED(QSYS_QMAXSDU_CFG_3),
+ REG_RESERVED(QSYS_QMAXSDU_CFG_4),
+ REG_RESERVED(QSYS_QMAXSDU_CFG_5),
+ REG_RESERVED(QSYS_QMAXSDU_CFG_6),
+ REG_RESERVED(QSYS_QMAXSDU_CFG_7),
+ REG_RESERVED(QSYS_PREEMPTION_CFG),
+ REG(QSYS_CIR_CFG, 0x000000),
+ REG_RESERVED(QSYS_EIR_CFG),
+ REG(QSYS_SE_CFG, 0x000008),
+ REG(QSYS_SE_DWRR_CFG, 0x00000c),
+ REG_RESERVED(QSYS_SE_CONNECT),
+ REG_RESERVED(QSYS_SE_DLB_SENSE),
+ REG(QSYS_CIR_STATE, 0x000044),
+ REG_RESERVED(QSYS_EIR_STATE),
+ REG_RESERVED(QSYS_SE_STATE),
+ REG(QSYS_HSCH_MISC_CFG, 0x003774),
+ REG_RESERVED(QSYS_TAG_CONFIG),
+ REG_RESERVED(QSYS_TAS_PARAM_CFG_CTRL),
+ REG_RESERVED(QSYS_PORT_MAX_SDU),
+ REG_RESERVED(QSYS_PARAM_CFG_REG_1),
+ REG_RESERVED(QSYS_PARAM_CFG_REG_2),
+ REG_RESERVED(QSYS_PARAM_CFG_REG_3),
+ REG_RESERVED(QSYS_PARAM_CFG_REG_4),
+ REG_RESERVED(QSYS_PARAM_CFG_REG_5),
+ REG_RESERVED(QSYS_GCL_CFG_REG_1),
+ REG_RESERVED(QSYS_GCL_CFG_REG_2),
+ REG_RESERVED(QSYS_PARAM_STATUS_REG_1),
+ REG_RESERVED(QSYS_PARAM_STATUS_REG_2),
+ REG_RESERVED(QSYS_PARAM_STATUS_REG_3),
+ REG_RESERVED(QSYS_PARAM_STATUS_REG_4),
+ REG_RESERVED(QSYS_PARAM_STATUS_REG_5),
+ REG_RESERVED(QSYS_PARAM_STATUS_REG_6),
+ REG_RESERVED(QSYS_PARAM_STATUS_REG_7),
+ REG_RESERVED(QSYS_PARAM_STATUS_REG_8),
+ REG_RESERVED(QSYS_PARAM_STATUS_REG_9),
+ REG_RESERVED(QSYS_GCL_STATUS_REG_1),
+ REG_RESERVED(QSYS_GCL_STATUS_REG_2),
+};
+
+static const u32 vsc9953_rew_regmap[] = {
+ REG(REW_PORT_VLAN_CFG, 0x000000),
+ REG(REW_TAG_CFG, 0x000004),
+ REG(REW_PORT_CFG, 0x000008),
+ REG(REW_DSCP_CFG, 0x00000c),
+ REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
+ REG_RESERVED(REW_PTP_CFG),
+ REG_RESERVED(REW_PTP_DLY1_CFG),
+ REG_RESERVED(REW_RED_TAG_CFG),
+ REG(REW_DSCP_REMAP_DP1_CFG, 0x000610),
+ REG(REW_DSCP_REMAP_CFG, 0x000710),
+ REG_RESERVED(REW_STAT_CFG),
+ REG_RESERVED(REW_REW_STICKY),
+ REG_RESERVED(REW_PPT),
+};
+
+static const u32 vsc9953_sys_regmap[] = {
+ REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
+ REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
+ REG(SYS_COUNT_RX_SHORTS, 0x000010),
+ REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
+ REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
+ REG(SYS_COUNT_RX_64, 0x000024),
+ REG(SYS_COUNT_RX_65_127, 0x000028),
+ REG(SYS_COUNT_RX_128_255, 0x00002c),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
+ REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
+ REG(SYS_COUNT_TX_COLLISION, 0x000110),
+ REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
+ REG(SYS_COUNT_TX_64, 0x00011c),
+ REG(SYS_COUNT_TX_65_127, 0x000120),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
+ REG(SYS_COUNT_TX_AGED, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
+ REG(SYS_RESET_CFG, 0x000318),
+ REG_RESERVED(SYS_SR_ETYPE_CFG),
+ REG(SYS_VLAN_ETYPE_CFG, 0x000320),
+ REG(SYS_PORT_MODE, 0x000324),
+ REG(SYS_FRONT_PORT_MODE, 0x000354),
+ REG(SYS_FRM_AGING, 0x00037c),
+ REG(SYS_STAT_CFG, 0x000380),
+ REG_RESERVED(SYS_SW_STATUS),
+ REG_RESERVED(SYS_MISC_CFG),
+ REG_RESERVED(SYS_REW_MAC_HIGH_CFG),
+ REG_RESERVED(SYS_REW_MAC_LOW_CFG),
+ REG_RESERVED(SYS_TIMESTAMP_OFFSET),
+ REG(SYS_PAUSE_CFG, 0x00044c),
+ REG(SYS_PAUSE_TOT_CFG, 0x000478),
+ REG(SYS_ATOP, 0x00047c),
+ REG(SYS_ATOP_TOT_CFG, 0x0004a8),
+ REG(SYS_MAC_FC_CFG, 0x0004ac),
+ REG(SYS_MMGT, 0x0004d4),
+ REG_RESERVED(SYS_MMGT_FAST),
+ REG_RESERVED(SYS_EVENTS_DIF),
+ REG_RESERVED(SYS_EVENTS_CORE),
+ REG_RESERVED(SYS_PTP_STATUS),
+ REG_RESERVED(SYS_PTP_TXSTAMP),
+ REG_RESERVED(SYS_PTP_NXT),
+ REG_RESERVED(SYS_PTP_CFG),
+ REG_RESERVED(SYS_RAM_INIT),
+ REG_RESERVED(SYS_CM_ADDR),
+ REG_RESERVED(SYS_CM_DATA_WR),
+ REG_RESERVED(SYS_CM_DATA_RD),
+ REG_RESERVED(SYS_CM_OP),
+ REG_RESERVED(SYS_CM_DATA),
+};
+
+static const u32 vsc9953_gcb_regmap[] = {
+ REG(GCB_SOFT_RST, 0x000008),
+ REG(GCB_MIIM_MII_STATUS, 0x0000ac),
+ REG(GCB_MIIM_MII_CMD, 0x0000b4),
+ REG(GCB_MIIM_MII_DATA, 0x0000b8),
+};
+
+static const u32 vsc9953_dev_gmii_regmap[] = {
+ REG(DEV_CLOCK_CFG, 0x0),
+ REG(DEV_PORT_MISC, 0x4),
+ REG_RESERVED(DEV_EVENTS),
+ REG(DEV_EEE_CFG, 0xc),
+ REG_RESERVED(DEV_RX_PATH_DELAY),
+ REG_RESERVED(DEV_TX_PATH_DELAY),
+ REG_RESERVED(DEV_PTP_PREDICT_CFG),
+ REG(DEV_MAC_ENA_CFG, 0x10),
+ REG(DEV_MAC_MODE_CFG, 0x14),
+ REG(DEV_MAC_MAXLEN_CFG, 0x18),
+ REG(DEV_MAC_TAGS_CFG, 0x1c),
+ REG(DEV_MAC_ADV_CHK_CFG, 0x20),
+ REG(DEV_MAC_IFG_CFG, 0x24),
+ REG(DEV_MAC_HDX_CFG, 0x28),
+ REG_RESERVED(DEV_MAC_DBG_CFG),
+ REG(DEV_MAC_FC_MAC_LOW_CFG, 0x30),
+ REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x34),
+ REG(DEV_MAC_STICKY, 0x38),
+ REG_RESERVED(PCS1G_CFG),
+ REG_RESERVED(PCS1G_MODE_CFG),
+ REG_RESERVED(PCS1G_SD_CFG),
+ REG_RESERVED(PCS1G_ANEG_CFG),
+ REG_RESERVED(PCS1G_ANEG_NP_CFG),
+ REG_RESERVED(PCS1G_LB_CFG),
+ REG_RESERVED(PCS1G_DBG_CFG),
+ REG_RESERVED(PCS1G_CDET_CFG),
+ REG_RESERVED(PCS1G_ANEG_STATUS),
+ REG_RESERVED(PCS1G_ANEG_NP_STATUS),
+ REG_RESERVED(PCS1G_LINK_STATUS),
+ REG_RESERVED(PCS1G_LINK_DOWN_CNT),
+ REG_RESERVED(PCS1G_STICKY),
+ REG_RESERVED(PCS1G_DEBUG_STATUS),
+ REG_RESERVED(PCS1G_LPI_CFG),
+ REG_RESERVED(PCS1G_LPI_WAKE_ERROR_CNT),
+ REG_RESERVED(PCS1G_LPI_STATUS),
+ REG_RESERVED(PCS1G_TSTPAT_MODE_CFG),
+ REG_RESERVED(PCS1G_TSTPAT_STATUS),
+ REG_RESERVED(DEV_PCS_FX100_CFG),
+ REG_RESERVED(DEV_PCS_FX100_STATUS),
+};
+
+static const u32 *vsc9953_regmap[TARGET_MAX] = {
+ [ANA] = vsc9953_ana_regmap,
+ [QS] = vsc9953_qs_regmap,
+ [QSYS] = vsc9953_qsys_regmap,
+ [REW] = vsc9953_rew_regmap,
+ [SYS] = vsc9953_sys_regmap,
+ [S0] = vsc9953_vcap_regmap,
+ [S1] = vsc9953_vcap_regmap,
+ [S2] = vsc9953_vcap_regmap,
+ [GCB] = vsc9953_gcb_regmap,
+ [DEV_GMII] = vsc9953_dev_gmii_regmap,
+};
+
+/* Addresses are relative to the device's base address */
+static const struct resource vsc9953_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0160000, 0x0010000, "port6"),
+ DEFINE_RES_MEM_NAMED(0x0170000, 0x0010000, "port7"),
+ DEFINE_RES_MEM_NAMED(0x0180000, 0x0010000, "port8"),
+ DEFINE_RES_MEM_NAMED(0x0190000, 0x0010000, "port9"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
+};
+
+static const char * const vsc9953_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
+};
+
+static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
+ [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 10, 10),
+ [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 9),
+ [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
+ [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
+ [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
+ [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
+ [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
+ [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
+ [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
+ [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
+ [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
+ [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
+ [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
+ [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
+ [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
+ [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
+ [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
+ [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
+ [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
+ [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
+ [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
+ [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
+ [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
+ [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
+ [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
+ [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 16, 16),
+ [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 11, 12),
+ [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10),
+ [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 7, 7),
+ [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 6, 6),
+ [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 5, 5),
+ [GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0),
+ [GCB_MIIM_MII_STATUS_PENDING] = REG_FIELD(GCB_MIIM_MII_STATUS, 2, 2),
+ [GCB_MIIM_MII_STATUS_BUSY] = REG_FIELD(GCB_MIIM_MII_STATUS, 3, 3),
+ /* Replicated per number of ports (11), register size 4 per port */
+ [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 13, 13, 11, 4),
+ [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 11, 4),
+ [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 11, 4),
+ [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 11, 4),
+ [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 11, 4),
+ [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 4, 5, 11, 4),
+ [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 2, 3, 11, 4),
+ [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 11, 4),
+ [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 11, 20, 11, 4),
+ [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 10, 11, 4),
+ [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
+};
+
+static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = {
+ OCELOT_COMMON_STATS,
+};
+
+static const struct vcap_field vsc9953_vcap_es0_keys[] = {
+ [VCAP_ES0_EGR_PORT] = { 0, 4},
+ [VCAP_ES0_IGR_PORT] = { 4, 4},
+ [VCAP_ES0_RSV] = { 8, 2},
+ [VCAP_ES0_L2_MC] = { 10, 1},
+ [VCAP_ES0_L2_BC] = { 11, 1},
+ [VCAP_ES0_VID] = { 12, 12},
+ [VCAP_ES0_DP] = { 24, 1},
+ [VCAP_ES0_PCP] = { 25, 3},
+};
+
+static const struct vcap_field vsc9953_vcap_es0_actions[] = {
+ [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
+ [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
+ [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
+ [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
+ [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
+ [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
+ [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
+ [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
+ [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
+ [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
+ [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
+ [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
+ [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
+ [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
+ [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
+ [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
+ [VCAP_ES0_ACT_RSV] = { 49, 24},
+ [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1},
+};
+
+static const struct vcap_field vsc9953_vcap_is1_keys[] = {
+ [VCAP_IS1_HK_TYPE] = { 0, 1},
+ [VCAP_IS1_HK_LOOKUP] = { 1, 2},
+ [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 11},
+ [VCAP_IS1_HK_RSV] = { 14, 10},
+ /* VCAP_IS1_HK_OAM_Y1731 not supported */
+ [VCAP_IS1_HK_L2_MC] = { 24, 1},
+ [VCAP_IS1_HK_L2_BC] = { 25, 1},
+ [VCAP_IS1_HK_IP_MC] = { 26, 1},
+ [VCAP_IS1_HK_VLAN_TAGGED] = { 27, 1},
+ [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 28, 1},
+ [VCAP_IS1_HK_TPID] = { 29, 1},
+ [VCAP_IS1_HK_VID] = { 30, 12},
+ [VCAP_IS1_HK_DEI] = { 42, 1},
+ [VCAP_IS1_HK_PCP] = { 43, 3},
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ [VCAP_IS1_HK_L2_SMAC] = { 46, 48},
+ [VCAP_IS1_HK_ETYPE_LEN] = { 94, 1},
+ [VCAP_IS1_HK_ETYPE] = { 95, 16},
+ [VCAP_IS1_HK_IP_SNAP] = {111, 1},
+ [VCAP_IS1_HK_IP4] = {112, 1},
+ /* Layer-3 Information */
+ [VCAP_IS1_HK_L3_FRAGMENT] = {113, 1},
+ [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {114, 1},
+ [VCAP_IS1_HK_L3_OPTIONS] = {115, 1},
+ [VCAP_IS1_HK_L3_DSCP] = {116, 6},
+ [VCAP_IS1_HK_L3_IP4_SIP] = {122, 32},
+ /* Layer-4 Information */
+ [VCAP_IS1_HK_TCP_UDP] = {154, 1},
+ [VCAP_IS1_HK_TCP] = {155, 1},
+ [VCAP_IS1_HK_L4_SPORT] = {156, 16},
+ [VCAP_IS1_HK_L4_RNG] = {172, 8},
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ [VCAP_IS1_HK_IP4_INNER_TPID] = { 46, 1},
+ [VCAP_IS1_HK_IP4_INNER_VID] = { 47, 12},
+ [VCAP_IS1_HK_IP4_INNER_DEI] = { 59, 1},
+ [VCAP_IS1_HK_IP4_INNER_PCP] = { 60, 3},
+ [VCAP_IS1_HK_IP4_IP4] = { 63, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 64, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 65, 1},
+ [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 66, 1},
+ [VCAP_IS1_HK_IP4_L3_DSCP] = { 67, 6},
+ [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 73, 32},
+ [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {105, 32},
+ [VCAP_IS1_HK_IP4_L3_PROTO] = {137, 8},
+ [VCAP_IS1_HK_IP4_TCP_UDP] = {145, 1},
+ [VCAP_IS1_HK_IP4_TCP] = {146, 1},
+ [VCAP_IS1_HK_IP4_L4_RNG] = {147, 8},
+ [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {155, 32},
+};
+
+static const struct vcap_field vsc9953_vcap_is1_actions[] = {
+ [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
+ [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
+ [VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
+ [VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
+ [VCAP_IS1_ACT_DP_ENA] = { 11, 1},
+ [VCAP_IS1_ACT_DP_VAL] = { 12, 1},
+ [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
+ [VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
+ [VCAP_IS1_ACT_RSV] = { 29, 11},
+ [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 40, 1},
+ [VCAP_IS1_ACT_VID_ADD_VAL] = { 41, 12},
+ [VCAP_IS1_ACT_FID_SEL] = { 53, 2},
+ [VCAP_IS1_ACT_FID_VAL] = { 55, 13},
+ [VCAP_IS1_ACT_PCP_DEI_ENA] = { 68, 1},
+ [VCAP_IS1_ACT_PCP_VAL] = { 69, 3},
+ [VCAP_IS1_ACT_DEI_VAL] = { 72, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 73, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT] = { 74, 2},
+ [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 76, 4},
+ [VCAP_IS1_ACT_HIT_STICKY] = { 80, 1},
+};
+
+static struct vcap_field vsc9953_vcap_is2_keys[] = {
+ /* Common: 41 bits */
+ [VCAP_IS2_TYPE] = { 0, 4},
+ [VCAP_IS2_HK_FIRST] = { 4, 1},
+ [VCAP_IS2_HK_PAG] = { 5, 8},
+ [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 11},
+ [VCAP_IS2_HK_RSV2] = { 24, 1},
+ [VCAP_IS2_HK_HOST_MATCH] = { 25, 1},
+ [VCAP_IS2_HK_L2_MC] = { 26, 1},
+ [VCAP_IS2_HK_L2_BC] = { 27, 1},
+ [VCAP_IS2_HK_VLAN_TAGGED] = { 28, 1},
+ [VCAP_IS2_HK_VID] = { 29, 12},
+ [VCAP_IS2_HK_DEI] = { 41, 1},
+ [VCAP_IS2_HK_PCP] = { 42, 3},
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ [VCAP_IS2_HK_L2_DMAC] = { 45, 48},
+ [VCAP_IS2_HK_L2_SMAC] = { 93, 48},
+ /* MAC_ETYPE (TYPE=000) */
+ [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {141, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {157, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {173, 8},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {181, 3},
+ /* MAC_LLC (TYPE=001) */
+ [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {141, 40},
+ /* MAC_SNAP (TYPE=010) */
+ [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {141, 40},
+ /* MAC_ARP (TYPE=011) */
+ [VCAP_IS2_HK_MAC_ARP_SMAC] = { 45, 48},
+ [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 93, 1},
+ [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 94, 1},
+ [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 95, 1},
+ [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 96, 1},
+ [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 97, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 98, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 99, 2},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = {101, 32},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {133, 32},
+ [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {165, 1},
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ [VCAP_IS2_HK_IP4] = { 45, 1},
+ [VCAP_IS2_HK_L3_FRAGMENT] = { 46, 1},
+ [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 47, 1},
+ [VCAP_IS2_HK_L3_OPTIONS] = { 48, 1},
+ [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 49, 1},
+ [VCAP_IS2_HK_L3_TOS] = { 50, 8},
+ [VCAP_IS2_HK_L3_IP4_DIP] = { 58, 32},
+ [VCAP_IS2_HK_L3_IP4_SIP] = { 90, 32},
+ [VCAP_IS2_HK_DIP_EQ_SIP] = {122, 1},
+ /* IP4_TCP_UDP (TYPE=100) */
+ [VCAP_IS2_HK_TCP] = {123, 1},
+ [VCAP_IS2_HK_L4_DPORT] = {124, 16},
+ [VCAP_IS2_HK_L4_SPORT] = {140, 16},
+ [VCAP_IS2_HK_L4_RNG] = {156, 8},
+ [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {164, 1},
+ [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {165, 1},
+ [VCAP_IS2_HK_L4_FIN] = {166, 1},
+ [VCAP_IS2_HK_L4_SYN] = {167, 1},
+ [VCAP_IS2_HK_L4_RST] = {168, 1},
+ [VCAP_IS2_HK_L4_PSH] = {169, 1},
+ [VCAP_IS2_HK_L4_ACK] = {170, 1},
+ [VCAP_IS2_HK_L4_URG] = {171, 1},
+ /* IP4_OTHER (TYPE=101) */
+ [VCAP_IS2_HK_IP4_L3_PROTO] = {123, 8},
+ [VCAP_IS2_HK_L3_PAYLOAD] = {131, 56},
+ /* IP6_STD (TYPE=110) */
+ [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 45, 1},
+ [VCAP_IS2_HK_L3_IP6_SIP] = { 46, 128},
+ [VCAP_IS2_HK_IP6_L3_PROTO] = {174, 8},
+};
+
+static struct vcap_field vsc9953_vcap_is2_actions[] = {
+ [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
+ [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
+ [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
+ [VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
+ [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
+ [VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
+ [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
+ [VCAP_IS2_ACT_POLICE_IDX] = { 10, 8},
+ [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 21, 1},
+ [VCAP_IS2_ACT_PORT_MASK] = { 22, 10},
+ [VCAP_IS2_ACT_ACL_ID] = { 44, 6},
+ [VCAP_IS2_ACT_HIT_CNT] = { 50, 32},
+};
+
+static struct vcap_props vsc9953_vcap_props[] = {
+ [VCAP_ES0] = {
+ .action_type_width = 0,
+ .action_table = {
+ [ES0_ACTION_TYPE_NORMAL] = {
+ .width = 73, /* HIT_STICKY not included */
+ .count = 1,
+ },
+ },
+ .target = S0,
+ .keys = vsc9953_vcap_es0_keys,
+ .actions = vsc9953_vcap_es0_actions,
+ },
+ [VCAP_IS1] = {
+ .action_type_width = 0,
+ .action_table = {
+ [IS1_ACTION_TYPE_NORMAL] = {
+ .width = 80, /* HIT_STICKY not included */
+ .count = 4,
+ },
+ },
+ .target = S1,
+ .keys = vsc9953_vcap_is1_keys,
+ .actions = vsc9953_vcap_is1_actions,
+ },
+ [VCAP_IS2] = {
+ .action_type_width = 1,
+ .action_table = {
+ [IS2_ACTION_TYPE_NORMAL] = {
+ .width = 50, /* HIT_CNT not included */
+ .count = 2
+ },
+ [IS2_ACTION_TYPE_SMAC_SIP] = {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .target = S2,
+ .keys = vsc9953_vcap_is2_keys,
+ .actions = vsc9953_vcap_is2_actions,
+ },
+};
+
+#define VSC9953_INIT_TIMEOUT 50000
+#define VSC9953_GCB_RST_SLEEP 100
+#define VSC9953_SYS_RAMINIT_SLEEP 80
+
+static int vsc9953_gcb_soft_rst_status(struct ocelot *ocelot)
+{
+ int val;
+
+ ocelot_field_read(ocelot, GCB_SOFT_RST_SWC_RST, &val);
+
+ return val;
+}
+
+static int vsc9953_sys_ram_init_status(struct ocelot *ocelot)
+{
+ int val;
+
+ ocelot_field_read(ocelot, SYS_RESET_CFG_MEM_INIT, &val);
+
+ return val;
+}
+
+
+/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
+ * MEM_INIT is in SYS:SYSTEM:RESET_CFG
+ * MEM_ENA is in SYS:SYSTEM:RESET_CFG
+ */
+static int vsc9953_reset(struct ocelot *ocelot)
+{
+ int val, err;
+
+ /* soft-reset the switch core */
+ ocelot_field_write(ocelot, GCB_SOFT_RST_SWC_RST, 1);
+
+ err = readx_poll_timeout(vsc9953_gcb_soft_rst_status, ocelot, val, !val,
+ VSC9953_GCB_RST_SLEEP, VSC9953_INIT_TIMEOUT);
+ if (err) {
+ dev_err(ocelot->dev, "timeout: switch core reset\n");
+ return err;
+ }
+
+ /* initialize switch mem ~40us */
+ ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_ENA, 1);
+ ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_INIT, 1);
+
+ err = readx_poll_timeout(vsc9953_sys_ram_init_status, ocelot, val, !val,
+ VSC9953_SYS_RAMINIT_SLEEP,
+ VSC9953_INIT_TIMEOUT);
+ if (err) {
+ dev_err(ocelot->dev, "timeout: switch sram init\n");
+ return err;
+ }
+
+ /* enable switch core */
+ ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1);
+
+ return 0;
+}
+
+static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ if (state->interface == PHY_INTERFACE_MODE_INTERNAL) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+}
+
+/* Watermark encode
+ * Bit 9: Unit; 0:1, 1:16
+ * Bit 8-0: Value to be multiplied with unit
+ */
+static u16 vsc9953_wm_enc(u16 value)
+{
+ WARN_ON(value >= 16 * BIT(9));
+
+ if (value >= BIT(9))
+ return BIT(9) | (value / 16);
+
+ return value;
+}
+
+static u16 vsc9953_wm_dec(u16 wm)
+{
+ WARN_ON(wm & ~GENMASK(9, 0));
+
+ if (wm & BIT(9))
+ return (wm & GENMASK(8, 0)) * 16;
+
+ return wm;
+}
+
+static void vsc9953_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(25, 13)) >> 13;
+ *maxuse = val & GENMASK(12, 0);
+}
+
+static const struct ocelot_ops vsc9953_ops = {
+ .reset = vsc9953_reset,
+ .wm_enc = vsc9953_wm_enc,
+ .wm_dec = vsc9953_wm_dec,
+ .wm_stat = vsc9953_wm_stat,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
+};
+
+static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct device *dev = ocelot->dev;
+ struct mii_bus *bus;
+ int port;
+ int rc;
+
+ felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
+ sizeof(struct phylink_pcs *),
+ GFP_KERNEL);
+ if (!felix->pcs) {
+ dev_err(dev, "failed to allocate array for PCS PHYs\n");
+ return -ENOMEM;
+ }
+
+ rc = mscc_miim_setup(dev, &bus, "VSC9953 internal MDIO bus",
+ ocelot->targets[GCB],
+ ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK],
+ true);
+ if (rc) {
+ dev_err(dev, "failed to setup MDIO bus\n");
+ return rc;
+ }
+
+ /* Needed in order to initialize the bus mutex lock */
+ rc = devm_of_mdiobus_register(dev, bus, NULL);
+ if (rc < 0) {
+ dev_err(dev, "failed to register MDIO bus\n");
+ return rc;
+ }
+
+ felix->imdio = bus;
+
+ for (port = 0; port < felix->info->num_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct phylink_pcs *phylink_pcs;
+ struct mdio_device *mdio_device;
+ int addr = port + 4;
+
+ if (dsa_is_unused_port(felix->ds, port))
+ continue;
+
+ if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
+ continue;
+
+ mdio_device = mdio_device_create(felix->imdio, addr);
+ if (IS_ERR(mdio_device))
+ continue;
+
+ phylink_pcs = lynx_pcs_create(mdio_device);
+ if (!phylink_pcs) {
+ mdio_device_free(mdio_device);
+ continue;
+ }
+
+ felix->pcs[port] = phylink_pcs;
+
+ dev_info(dev, "Found PCS at internal MDIO address %d\n", addr);
+ }
+
+ return 0;
+}
+
+static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct phylink_pcs *phylink_pcs = felix->pcs[port];
+ struct mdio_device *mdio_device;
+
+ if (!phylink_pcs)
+ continue;
+
+ mdio_device = lynx_get_mdio_device(phylink_pcs);
+ mdio_device_free(mdio_device);
+ lynx_pcs_destroy(phylink_pcs);
+ }
+
+ /* mdiobus_unregister and mdiobus_free handled by devres */
+}
+
+static const struct felix_info seville_info_vsc9953 = {
+ .resources = vsc9953_resources,
+ .num_resources = ARRAY_SIZE(vsc9953_resources),
+ .resource_names = vsc9953_resource_names,
+ .regfields = vsc9953_regfields,
+ .map = vsc9953_regmap,
+ .ops = &vsc9953_ops,
+ .stats_layout = vsc9953_stats_layout,
+ .vcap = vsc9953_vcap_props,
+ .vcap_pol_base = VSC9953_VCAP_POLICER_BASE,
+ .vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
+ .vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
+ .vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
+ .num_mact_rows = 2048,
+ .num_ports = VSC9953_NUM_PORTS,
+ .num_tx_queues = OCELOT_NUM_TC,
+ .mdio_bus_alloc = vsc9953_mdio_bus_alloc,
+ .mdio_bus_free = vsc9953_mdio_bus_free,
+ .phylink_validate = vsc9953_phylink_validate,
+ .port_modes = vsc9953_port_modes,
+};
+
+static int seville_probe(struct platform_device *pdev)
+{
+ struct dsa_switch *ds;
+ struct ocelot *ocelot;
+ struct resource *res;
+ struct felix *felix;
+ int err;
+
+ felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
+ if (!felix) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate driver memory\n");
+ goto err_alloc_felix;
+ }
+
+ platform_set_drvdata(pdev, felix);
+
+ ocelot = &felix->ocelot;
+ ocelot->dev = &pdev->dev;
+ ocelot->num_flooding_pgids = 1;
+ felix->info = &seville_info_vsc9953;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "Invalid resource\n");
+ goto err_alloc_felix;
+ }
+ felix->switch_base = res->start;
+
+ ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
+ if (!ds) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
+ goto err_alloc_ds;
+ }
+
+ ds->dev = &pdev->dev;
+ ds->num_ports = felix->info->num_ports;
+ ds->ops = &felix_switch_ops;
+ ds->priv = ocelot;
+ felix->ds = ds;
+ felix->tag_proto = DSA_TAG_PROTO_SEVILLE;
+
+ err = dsa_register_switch(ds);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+ goto err_register_ds;
+ }
+
+ return 0;
+
+err_register_ds:
+ kfree(ds);
+err_alloc_ds:
+err_alloc_felix:
+ kfree(felix);
+ return err;
+}
+
+static int seville_remove(struct platform_device *pdev)
+{
+ struct felix *felix = platform_get_drvdata(pdev);
+
+ if (!felix)
+ return 0;
+
+ dsa_unregister_switch(felix->ds);
+
+ kfree(felix->ds);
+ kfree(felix);
+
+ return 0;
+}
+
+static void seville_shutdown(struct platform_device *pdev)
+{
+ struct felix *felix = platform_get_drvdata(pdev);
+
+ if (!felix)
+ return;
+
+ dsa_switch_shutdown(felix->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id seville_of_match[] = {
+ { .compatible = "mscc,vsc9953-switch" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, seville_of_match);
+
+static struct platform_driver seville_vsc9953_driver = {
+ .probe = seville_probe,
+ .remove = seville_remove,
+ .shutdown = seville_shutdown,
+ .driver = {
+ .name = "mscc_seville",
+ .of_match_table = of_match_ptr(seville_of_match),
+ },
+};
+module_platform_driver(seville_vsc9953_driver);
+
+MODULE_DESCRIPTION("Seville Switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/qca/Kconfig b/drivers/net/dsa/qca/Kconfig
new file mode 100644
index 000000000..ba3397473
--- /dev/null
+++ b/drivers/net/dsa/qca/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_AR9331
+ tristate "Qualcomm Atheros AR9331 Ethernet switch support"
+ depends on NET_DSA
+ select NET_DSA_TAG_AR9331
+ select REGMAP
+ help
+ This enables support for the Qualcomm Atheros AR9331 built-in Ethernet
+ switch.
+
+config NET_DSA_QCA8K
+ tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
+ select NET_DSA_TAG_QCA
+ select REGMAP
+ help
+ This enables support for the Qualcomm Atheros QCA8K Ethernet
+ switch chips.
diff --git a/drivers/net/dsa/qca/Makefile b/drivers/net/dsa/qca/Makefile
new file mode 100644
index 000000000..701f1d199
--- /dev/null
+++ b/drivers/net/dsa/qca/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
+obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
+qca8k-y += qca8k-common.o qca8k-8xxx.o
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
new file mode 100644
index 000000000..e7b98b864
--- /dev/null
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -0,0 +1,1135 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2019 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+/*
+ * +----------------------+
+ * GMAC1----RGMII----|--MAC0 |
+ * \---MDIO1----|--REGs |----MDIO3----\
+ * | | | +------+
+ * | | +--| |
+ * | MAC1-|----RMII--M-----| PHY0 |-o P0
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC2-|----RMII--------| PHY1 |-o P1
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC3-|----RMII--------| PHY2 |-o P2
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC4-|----RMII--------| PHY3 |-o P3
+ * | | | | +------+
+ * | | | +--| |
+ * | MAC5-|--+-RMII--M-----|-PHY4-|-o P4
+ * | | | | +------+
+ * +----------------------+ | \--CFG_SW_PHY_SWAP
+ * GMAC0---------------RMII--------------------/ \-CFG_SW_PHY_ADDR_SWAP
+ * \---MDIO0--NC
+ *
+ * GMAC0 and MAC5 are connected together and use same PHY. Depending on
+ * configuration it can be PHY4 (default) or PHY0. Only GMAC0 or MAC5 can be
+ * used at same time. If GMAC0 is used (default) then MAC5 should be disabled.
+ *
+ * CFG_SW_PHY_SWAP - swap connections of PHY0 and PHY4. If this bit is not set
+ * PHY4 is connected to GMAC0/MAC5 bundle and PHY0 is connected to MAC1. If this
+ * bit is set, PHY4 is connected to MAC1 and PHY0 is connected to GMAC0/MAC5
+ * bundle.
+ *
+ * CFG_SW_PHY_ADDR_SWAP - swap addresses of PHY0 and PHY4
+ *
+ * CFG_SW_PHY_SWAP and CFG_SW_PHY_ADDR_SWAP are part of SoC specific register
+ * set and not related to switch internal registers.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <net/dsa.h>
+
+#define AR9331_SW_NAME "ar9331_switch"
+#define AR9331_SW_PORTS 6
+
+/* dummy reg to change page */
+#define AR9331_SW_REG_PAGE 0x40000
+
+/* Global Interrupt */
+#define AR9331_SW_REG_GINT 0x10
+#define AR9331_SW_REG_GINT_MASK 0x14
+#define AR9331_SW_GINT_PHY_INT BIT(2)
+
+#define AR9331_SW_REG_FLOOD_MASK 0x2c
+#define AR9331_SW_FLOOD_MASK_BROAD_TO_CPU BIT(26)
+
+#define AR9331_SW_REG_GLOBAL_CTRL 0x30
+#define AR9331_SW_GLOBAL_CTRL_MFS_M GENMASK(13, 0)
+
+#define AR9331_SW_REG_MDIO_CTRL 0x98
+#define AR9331_SW_MDIO_CTRL_BUSY BIT(31)
+#define AR9331_SW_MDIO_CTRL_MASTER_EN BIT(30)
+#define AR9331_SW_MDIO_CTRL_CMD_READ BIT(27)
+#define AR9331_SW_MDIO_CTRL_PHY_ADDR_M GENMASK(25, 21)
+#define AR9331_SW_MDIO_CTRL_REG_ADDR_M GENMASK(20, 16)
+#define AR9331_SW_MDIO_CTRL_DATA_M GENMASK(16, 0)
+
+#define AR9331_SW_REG_PORT_STATUS(_port) (0x100 + (_port) * 0x100)
+
+/* FLOW_LINK_EN - enable mac flow control config auto-neg with phy.
+ * If not set, mac can be config by software.
+ */
+#define AR9331_SW_PORT_STATUS_FLOW_LINK_EN BIT(12)
+
+/* LINK_EN - If set, MAC is configured from PHY link status.
+ * If not set, MAC should be configured by software.
+ */
+#define AR9331_SW_PORT_STATUS_LINK_EN BIT(9)
+#define AR9331_SW_PORT_STATUS_DUPLEX_MODE BIT(6)
+#define AR9331_SW_PORT_STATUS_RX_FLOW_EN BIT(5)
+#define AR9331_SW_PORT_STATUS_TX_FLOW_EN BIT(4)
+#define AR9331_SW_PORT_STATUS_RXMAC BIT(3)
+#define AR9331_SW_PORT_STATUS_TXMAC BIT(2)
+#define AR9331_SW_PORT_STATUS_SPEED_M GENMASK(1, 0)
+#define AR9331_SW_PORT_STATUS_SPEED_1000 2
+#define AR9331_SW_PORT_STATUS_SPEED_100 1
+#define AR9331_SW_PORT_STATUS_SPEED_10 0
+
+#define AR9331_SW_PORT_STATUS_MAC_MASK \
+ (AR9331_SW_PORT_STATUS_TXMAC | AR9331_SW_PORT_STATUS_RXMAC)
+
+#define AR9331_SW_PORT_STATUS_LINK_MASK \
+ (AR9331_SW_PORT_STATUS_DUPLEX_MODE | \
+ AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
+ AR9331_SW_PORT_STATUS_SPEED_M)
+
+#define AR9331_SW_REG_PORT_CTRL(_port) (0x104 + (_port) * 0x100)
+#define AR9331_SW_PORT_CTRL_HEAD_EN BIT(11)
+#define AR9331_SW_PORT_CTRL_PORT_STATE GENMASK(2, 0)
+#define AR9331_SW_PORT_CTRL_PORT_STATE_DISABLED 0
+#define AR9331_SW_PORT_CTRL_PORT_STATE_BLOCKING 1
+#define AR9331_SW_PORT_CTRL_PORT_STATE_LISTENING 2
+#define AR9331_SW_PORT_CTRL_PORT_STATE_LEARNING 3
+#define AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD 4
+
+#define AR9331_SW_REG_PORT_VLAN(_port) (0x108 + (_port) * 0x100)
+#define AR9331_SW_PORT_VLAN_8021Q_MODE GENMASK(31, 30)
+#define AR9331_SW_8021Q_MODE_SECURE 3
+#define AR9331_SW_8021Q_MODE_CHECK 2
+#define AR9331_SW_8021Q_MODE_FALLBACK 1
+#define AR9331_SW_8021Q_MODE_NONE 0
+#define AR9331_SW_PORT_VLAN_PORT_VID_MEMBER GENMASK(25, 16)
+
+/* MIB registers */
+#define AR9331_MIB_COUNTER(x) (0x20000 + ((x) * 0x100))
+
+/* Phy bypass mode
+ * ------------------------------------------------------------------------
+ * Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
+ *
+ * real | start | OP | PhyAddr | Reg Addr | TA |
+ * atheros| start | OP | 2'b00 |PhyAdd[2:0]| Reg Addr[4:0] | TA |
+ *
+ *
+ * Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
+ * real | Data |
+ * atheros| Data |
+ *
+ * ------------------------------------------------------------------------
+ * Page address mode
+ * ------------------------------------------------------------------------
+ * Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
+ * real | start | OP | PhyAddr | Reg Addr | TA |
+ * atheros| start | OP | 2'b11 | 8'b0 | TA |
+ *
+ * Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
+ * real | Data |
+ * atheros| | Page [9:0] |
+ */
+/* In case of Page Address mode, Bit[18:9] of 32 bit register address should be
+ * written to bits[9:0] of mdio data register.
+ */
+#define AR9331_SW_ADDR_PAGE GENMASK(18, 9)
+
+/* ------------------------------------------------------------------------
+ * Normal register access mode
+ * ------------------------------------------------------------------------
+ * Bit: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |
+ * real | start | OP | PhyAddr | Reg Addr | TA |
+ * atheros| start | OP | 2'b10 | low_addr[7:0] | TA |
+ *
+ * Bit: |16 |17 |18 |19 |20 |21 |22 |23 |24 |25 |26 |27 |28 |29 |30 |31 |
+ * real | Data |
+ * atheros| Data |
+ * ------------------------------------------------------------------------
+ */
+#define AR9331_SW_LOW_ADDR_PHY GENMASK(8, 6)
+#define AR9331_SW_LOW_ADDR_REG GENMASK(5, 1)
+
+#define AR9331_SW_MDIO_PHY_MODE_M GENMASK(4, 3)
+#define AR9331_SW_MDIO_PHY_MODE_PAGE 3
+#define AR9331_SW_MDIO_PHY_MODE_REG 2
+#define AR9331_SW_MDIO_PHY_MODE_BYPASS 0
+#define AR9331_SW_MDIO_PHY_ADDR_M GENMASK(2, 0)
+
+/* Empirical determined values */
+#define AR9331_SW_MDIO_POLL_SLEEP_US 1
+#define AR9331_SW_MDIO_POLL_TIMEOUT_US 20
+
+/* The interval should be small enough to avoid overflow of 32bit MIBs */
+/*
+ * FIXME: until we can read MIBs from stats64 call directly (i.e. sleep
+ * there), we have to poll stats more frequently then it is actually needed.
+ * For overflow protection, normally, 100 sec interval should have been OK.
+ */
+#define STATS_INTERVAL_JIFFIES (3 * HZ)
+
+struct ar9331_sw_stats_raw {
+ u32 rxbroad; /* 0x00 */
+ u32 rxpause; /* 0x04 */
+ u32 rxmulti; /* 0x08 */
+ u32 rxfcserr; /* 0x0c */
+ u32 rxalignerr; /* 0x10 */
+ u32 rxrunt; /* 0x14 */
+ u32 rxfragment; /* 0x18 */
+ u32 rx64byte; /* 0x1c */
+ u32 rx128byte; /* 0x20 */
+ u32 rx256byte; /* 0x24 */
+ u32 rx512byte; /* 0x28 */
+ u32 rx1024byte; /* 0x2c */
+ u32 rx1518byte; /* 0x30 */
+ u32 rxmaxbyte; /* 0x34 */
+ u32 rxtoolong; /* 0x38 */
+ u32 rxgoodbyte; /* 0x3c */
+ u32 rxgoodbyte_hi;
+ u32 rxbadbyte; /* 0x44 */
+ u32 rxbadbyte_hi;
+ u32 rxoverflow; /* 0x4c */
+ u32 filtered; /* 0x50 */
+ u32 txbroad; /* 0x54 */
+ u32 txpause; /* 0x58 */
+ u32 txmulti; /* 0x5c */
+ u32 txunderrun; /* 0x60 */
+ u32 tx64byte; /* 0x64 */
+ u32 tx128byte; /* 0x68 */
+ u32 tx256byte; /* 0x6c */
+ u32 tx512byte; /* 0x70 */
+ u32 tx1024byte; /* 0x74 */
+ u32 tx1518byte; /* 0x78 */
+ u32 txmaxbyte; /* 0x7c */
+ u32 txoversize; /* 0x80 */
+ u32 txbyte; /* 0x84 */
+ u32 txbyte_hi;
+ u32 txcollision; /* 0x8c */
+ u32 txabortcol; /* 0x90 */
+ u32 txmulticol; /* 0x94 */
+ u32 txsinglecol; /* 0x98 */
+ u32 txexcdefer; /* 0x9c */
+ u32 txdefer; /* 0xa0 */
+ u32 txlatecol; /* 0xa4 */
+};
+
+struct ar9331_sw_port {
+ int idx;
+ struct delayed_work mib_read;
+ struct rtnl_link_stats64 stats;
+ struct ethtool_pause_stats pause_stats;
+ struct spinlock stats_lock;
+};
+
+struct ar9331_sw_priv {
+ struct device *dev;
+ struct dsa_switch ds;
+ struct dsa_switch_ops ops;
+ struct irq_domain *irqdomain;
+ u32 irq_mask;
+ struct mutex lock_irq;
+ struct mii_bus *mbus; /* mdio master */
+ struct mii_bus *sbus; /* mdio slave */
+ struct regmap *regmap;
+ struct reset_control *sw_reset;
+ struct ar9331_sw_port port[AR9331_SW_PORTS];
+};
+
+static struct ar9331_sw_priv *ar9331_sw_port_to_priv(struct ar9331_sw_port *port)
+{
+ struct ar9331_sw_port *p = port - port->idx;
+
+ return (struct ar9331_sw_priv *)((void *)p -
+ offsetof(struct ar9331_sw_priv, port));
+}
+
+/* Warning: switch reset will reset last AR9331_SW_MDIO_PHY_MODE_PAGE request
+ * If some kind of optimization is used, the request should be repeated.
+ */
+static int ar9331_sw_reset(struct ar9331_sw_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_assert(priv->sw_reset);
+ if (ret)
+ goto error;
+
+ /* AR9331 doc do not provide any information about proper reset
+ * sequence. The AR8136 (the closes switch to the AR9331) doc says:
+ * reset duration should be greater than 10ms. So, let's use this value
+ * for now.
+ */
+ usleep_range(10000, 15000);
+ ret = reset_control_deassert(priv->sw_reset);
+ if (ret)
+ goto error;
+ /* There is no information on how long should we wait after reset.
+ * AR8136 has an EEPROM and there is an Interrupt for EEPROM load
+ * status. AR9331 has no EEPROM support.
+ * For now, do not wait. In case AR8136 will be needed, the after
+ * reset delay can be added as well.
+ */
+
+ return 0;
+error:
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+ return ret;
+}
+
+static int ar9331_sw_mbus_write(struct mii_bus *mbus, int port, int regnum,
+ u16 data)
+{
+ struct ar9331_sw_priv *priv = mbus->priv;
+ struct regmap *regmap = priv->regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_MDIO_CTRL,
+ AR9331_SW_MDIO_CTRL_BUSY |
+ AR9331_SW_MDIO_CTRL_MASTER_EN |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_PHY_ADDR_M, port) |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_REG_ADDR_M, regnum) |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_DATA_M, data));
+ if (ret)
+ goto error;
+
+ ret = regmap_read_poll_timeout(regmap, AR9331_SW_REG_MDIO_CTRL, val,
+ !(val & AR9331_SW_MDIO_CTRL_BUSY),
+ AR9331_SW_MDIO_POLL_SLEEP_US,
+ AR9331_SW_MDIO_POLL_TIMEOUT_US);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ dev_err_ratelimited(priv->dev, "PHY write error: %i\n", ret);
+ return ret;
+}
+
+static int ar9331_sw_mbus_read(struct mii_bus *mbus, int port, int regnum)
+{
+ struct ar9331_sw_priv *priv = mbus->priv;
+ struct regmap *regmap = priv->regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_MDIO_CTRL,
+ AR9331_SW_MDIO_CTRL_BUSY |
+ AR9331_SW_MDIO_CTRL_MASTER_EN |
+ AR9331_SW_MDIO_CTRL_CMD_READ |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_PHY_ADDR_M, port) |
+ FIELD_PREP(AR9331_SW_MDIO_CTRL_REG_ADDR_M, regnum));
+ if (ret)
+ goto error;
+
+ ret = regmap_read_poll_timeout(regmap, AR9331_SW_REG_MDIO_CTRL, val,
+ !(val & AR9331_SW_MDIO_CTRL_BUSY),
+ AR9331_SW_MDIO_POLL_SLEEP_US,
+ AR9331_SW_MDIO_POLL_TIMEOUT_US);
+ if (ret)
+ goto error;
+
+ ret = regmap_read(regmap, AR9331_SW_REG_MDIO_CTRL, &val);
+ if (ret)
+ goto error;
+
+ return FIELD_GET(AR9331_SW_MDIO_CTRL_DATA_M, val);
+
+error:
+ dev_err_ratelimited(priv->dev, "PHY read error: %i\n", ret);
+ return ret;
+}
+
+static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct mii_bus *mbus;
+ struct device_node *np, *mnp;
+ int ret;
+
+ np = dev->of_node;
+
+ mbus = devm_mdiobus_alloc(dev);
+ if (!mbus)
+ return -ENOMEM;
+
+ mbus->name = np->full_name;
+ snprintf(mbus->id, MII_BUS_ID_SIZE, "%pOF", np);
+
+ mbus->read = ar9331_sw_mbus_read;
+ mbus->write = ar9331_sw_mbus_write;
+ mbus->priv = priv;
+ mbus->parent = dev;
+
+ mnp = of_get_child_by_name(np, "mdio");
+ if (!mnp)
+ return -ENODEV;
+
+ ret = devm_of_mdiobus_register(dev, mbus, mnp);
+ of_node_put(mnp);
+ if (ret)
+ return ret;
+
+ priv->mbus = mbus;
+
+ return 0;
+}
+
+static int ar9331_sw_setup_port(struct dsa_switch *ds, int port)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ u32 port_mask, port_ctrl, val;
+ int ret;
+
+ /* Generate default port settings */
+ port_ctrl = FIELD_PREP(AR9331_SW_PORT_CTRL_PORT_STATE,
+ AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD);
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* CPU port should be allowed to communicate with all user
+ * ports.
+ */
+ port_mask = dsa_user_ports(ds);
+ /* Enable Atheros header on CPU port. This will allow us
+ * communicate with each port separately
+ */
+ port_ctrl |= AR9331_SW_PORT_CTRL_HEAD_EN;
+ } else if (dsa_is_user_port(ds, port)) {
+ /* User ports should communicate only with the CPU port.
+ */
+ port_mask = BIT(dsa_upstream_port(ds, port));
+ } else {
+ /* Other ports do not need to communicate at all */
+ port_mask = 0;
+ }
+
+ val = FIELD_PREP(AR9331_SW_PORT_VLAN_8021Q_MODE,
+ AR9331_SW_8021Q_MODE_NONE) |
+ FIELD_PREP(AR9331_SW_PORT_VLAN_PORT_VID_MEMBER, port_mask);
+
+ ret = regmap_write(regmap, AR9331_SW_REG_PORT_VLAN(port), val);
+ if (ret)
+ goto error;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_PORT_CTRL(port), port_ctrl);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ dev_err(priv->dev, "%s: error: %i\n", __func__, ret);
+
+ return ret;
+}
+
+static int ar9331_sw_setup(struct dsa_switch *ds)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret, i;
+
+ ret = ar9331_sw_reset(priv);
+ if (ret)
+ return ret;
+
+ /* Reset will set proper defaults. CPU - Port0 will be enabled and
+ * configured. All other ports (ports 1 - 5) are disabled
+ */
+ ret = ar9331_sw_mbus_init(priv);
+ if (ret)
+ return ret;
+
+ /* Do not drop broadcast frames */
+ ret = regmap_write_bits(regmap, AR9331_SW_REG_FLOOD_MASK,
+ AR9331_SW_FLOOD_MASK_BROAD_TO_CPU,
+ AR9331_SW_FLOOD_MASK_BROAD_TO_CPU);
+ if (ret)
+ goto error;
+
+ /* Set max frame size to the maximum supported value */
+ ret = regmap_write_bits(regmap, AR9331_SW_REG_GLOBAL_CTRL,
+ AR9331_SW_GLOBAL_CTRL_MFS_M,
+ AR9331_SW_GLOBAL_CTRL_MFS_M);
+ if (ret)
+ goto error;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ ret = ar9331_sw_setup_port(ds, i);
+ if (ret)
+ goto error;
+ }
+
+ ds->configure_vlan_while_not_filtering = false;
+
+ return 0;
+error:
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+ return ret;
+}
+
+static void ar9331_sw_port_disable(struct dsa_switch *ds, int port)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_write(regmap, AR9331_SW_REG_PORT_STATUS(port), 0);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static enum dsa_tag_protocol ar9331_sw_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol m)
+{
+ return DSA_TAG_PROTO_AR9331;
+}
+
+static void ar9331_sw_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ switch (port) {
+ case 0:
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_1000;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+ }
+}
+
+static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ AR9331_SW_PORT_STATUS_LINK_EN |
+ AR9331_SW_PORT_STATUS_FLOW_LINK_EN, 0);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ AR9331_SW_PORT_STATUS_MAC_MASK, 0);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+
+ cancel_delayed_work_sync(&p->mib_read);
+}
+
+static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
+ struct regmap *regmap = priv->regmap;
+ u32 val;
+ int ret;
+
+ schedule_delayed_work(&p->mib_read, 0);
+
+ val = AR9331_SW_PORT_STATUS_MAC_MASK;
+ switch (speed) {
+ case SPEED_1000:
+ val |= AR9331_SW_PORT_STATUS_SPEED_1000;
+ break;
+ case SPEED_100:
+ val |= AR9331_SW_PORT_STATUS_SPEED_100;
+ break;
+ case SPEED_10:
+ val |= AR9331_SW_PORT_STATUS_SPEED_10;
+ break;
+ default:
+ return;
+ }
+
+ if (duplex)
+ val |= AR9331_SW_PORT_STATUS_DUPLEX_MODE;
+
+ if (tx_pause)
+ val |= AR9331_SW_PORT_STATUS_TX_FLOW_EN;
+
+ if (rx_pause)
+ val |= AR9331_SW_PORT_STATUS_RX_FLOW_EN;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ AR9331_SW_PORT_STATUS_MAC_MASK |
+ AR9331_SW_PORT_STATUS_LINK_MASK,
+ val);
+ if (ret)
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+}
+
+static void ar9331_read_stats(struct ar9331_sw_port *port)
+{
+ struct ar9331_sw_priv *priv = ar9331_sw_port_to_priv(port);
+ struct ethtool_pause_stats *pstats = &port->pause_stats;
+ struct rtnl_link_stats64 *stats = &port->stats;
+ struct ar9331_sw_stats_raw raw;
+ int ret;
+
+ /* Do the slowest part first, to avoid needless locking for long time */
+ ret = regmap_bulk_read(priv->regmap, AR9331_MIB_COUNTER(port->idx),
+ &raw, sizeof(raw) / sizeof(u32));
+ if (ret) {
+ dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
+ return;
+ }
+ /* All MIB counters are cleared automatically on read */
+
+ spin_lock(&port->stats_lock);
+
+ stats->rx_bytes += raw.rxgoodbyte;
+ stats->tx_bytes += raw.txbyte;
+
+ stats->rx_packets += raw.rx64byte + raw.rx128byte + raw.rx256byte +
+ raw.rx512byte + raw.rx1024byte + raw.rx1518byte + raw.rxmaxbyte;
+ stats->tx_packets += raw.tx64byte + raw.tx128byte + raw.tx256byte +
+ raw.tx512byte + raw.tx1024byte + raw.tx1518byte + raw.txmaxbyte;
+
+ stats->rx_length_errors += raw.rxrunt + raw.rxfragment + raw.rxtoolong;
+ stats->rx_crc_errors += raw.rxfcserr;
+ stats->rx_frame_errors += raw.rxalignerr;
+ stats->rx_missed_errors += raw.rxoverflow;
+ stats->rx_dropped += raw.filtered;
+ stats->rx_errors += raw.rxfcserr + raw.rxalignerr + raw.rxrunt +
+ raw.rxfragment + raw.rxoverflow + raw.rxtoolong;
+
+ stats->tx_window_errors += raw.txlatecol;
+ stats->tx_fifo_errors += raw.txunderrun;
+ stats->tx_aborted_errors += raw.txabortcol;
+ stats->tx_errors += raw.txoversize + raw.txabortcol + raw.txunderrun +
+ raw.txlatecol;
+
+ stats->multicast += raw.rxmulti;
+ stats->collisions += raw.txcollision;
+
+ pstats->tx_pause_frames += raw.txpause;
+ pstats->rx_pause_frames += raw.rxpause;
+
+ spin_unlock(&port->stats_lock);
+}
+
+static void ar9331_do_stats_poll(struct work_struct *work)
+{
+ struct ar9331_sw_port *port = container_of(work, struct ar9331_sw_port,
+ mib_read.work);
+
+ ar9331_read_stats(port);
+
+ schedule_delayed_work(&port->mib_read, STATS_INTERVAL_JIFFIES);
+}
+
+static void ar9331_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(s, &p->stats, sizeof(*s));
+ spin_unlock(&p->stats_lock);
+}
+
+static void ar9331_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(pause_stats, &p->pause_stats, sizeof(*pause_stats));
+ spin_unlock(&p->stats_lock);
+}
+
+static const struct dsa_switch_ops ar9331_sw_ops = {
+ .get_tag_protocol = ar9331_sw_get_tag_protocol,
+ .setup = ar9331_sw_setup,
+ .port_disable = ar9331_sw_port_disable,
+ .phylink_get_caps = ar9331_sw_phylink_get_caps,
+ .phylink_mac_config = ar9331_sw_phylink_mac_config,
+ .phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
+ .phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
+ .get_stats64 = ar9331_get_stats64,
+ .get_pause_stats = ar9331_get_pause_stats,
+};
+
+static irqreturn_t ar9331_sw_irq(int irq, void *data)
+{
+ struct ar9331_sw_priv *priv = data;
+ struct regmap *regmap = priv->regmap;
+ u32 stat;
+ int ret;
+
+ ret = regmap_read(regmap, AR9331_SW_REG_GINT, &stat);
+ if (ret) {
+ dev_err(priv->dev, "can't read interrupt status\n");
+ return IRQ_NONE;
+ }
+
+ if (!stat)
+ return IRQ_NONE;
+
+ if (stat & AR9331_SW_GINT_PHY_INT) {
+ int child_irq;
+
+ child_irq = irq_find_mapping(priv->irqdomain, 0);
+ handle_nested_irq(child_irq);
+ }
+
+ ret = regmap_write(regmap, AR9331_SW_REG_GINT, stat);
+ if (ret) {
+ dev_err(priv->dev, "can't write interrupt status\n");
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ar9331_sw_mask_irq(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+
+ priv->irq_mask = 0;
+}
+
+static void ar9331_sw_unmask_irq(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+
+ priv->irq_mask = AR9331_SW_GINT_PHY_INT;
+}
+
+static void ar9331_sw_irq_bus_lock(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&priv->lock_irq);
+}
+
+static void ar9331_sw_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK,
+ AR9331_SW_GINT_PHY_INT, priv->irq_mask);
+ if (ret)
+ dev_err(priv->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&priv->lock_irq);
+}
+
+static struct irq_chip ar9331_sw_irq_chip = {
+ .name = AR9331_SW_NAME,
+ .irq_mask = ar9331_sw_mask_irq,
+ .irq_unmask = ar9331_sw_unmask_irq,
+ .irq_bus_lock = ar9331_sw_irq_bus_lock,
+ .irq_bus_sync_unlock = ar9331_sw_irq_bus_sync_unlock,
+};
+
+static int ar9331_sw_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &ar9331_sw_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static void ar9331_sw_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_nested_thread(irq, 0);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops ar9331_sw_irqdomain_ops = {
+ .map = ar9331_sw_irq_map,
+ .unmap = ar9331_sw_irq_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
+{
+ struct device_node *np = priv->dev->of_node;
+ struct device *dev = priv->dev;
+ int ret, irq;
+
+ irq = of_irq_get(np, 0);
+ if (irq <= 0) {
+ dev_err(dev, "failed to get parent IRQ\n");
+ return irq ? irq : -EINVAL;
+ }
+
+ mutex_init(&priv->lock_irq);
+ ret = devm_request_threaded_irq(dev, irq, NULL, ar9331_sw_irq,
+ IRQF_ONESHOT, AR9331_SW_NAME, priv);
+ if (ret) {
+ dev_err(dev, "unable to request irq: %d\n", ret);
+ return ret;
+ }
+
+ priv->irqdomain = irq_domain_add_linear(np, 1, &ar9331_sw_irqdomain_ops,
+ priv);
+ if (!priv->irqdomain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -EINVAL;
+ }
+
+ irq_set_parent(irq_create_mapping(priv->irqdomain, 0), irq);
+
+ return 0;
+}
+
+static int __ar9331_mdio_write(struct mii_bus *sbus, u8 mode, u16 reg, u16 val)
+{
+ u8 r, p;
+
+ p = FIELD_PREP(AR9331_SW_MDIO_PHY_MODE_M, mode) |
+ FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
+ r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
+
+ return __mdiobus_write(sbus, p, r, val);
+}
+
+static int __ar9331_mdio_read(struct mii_bus *sbus, u16 reg)
+{
+ u8 r, p;
+
+ p = FIELD_PREP(AR9331_SW_MDIO_PHY_MODE_M, AR9331_SW_MDIO_PHY_MODE_REG) |
+ FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
+ r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
+
+ return __mdiobus_read(sbus, p, r);
+}
+
+static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct ar9331_sw_priv *priv = ctx;
+ struct mii_bus *sbus = priv->sbus;
+ u32 reg = *(u32 *)reg_buf;
+ int ret;
+
+ if (reg == AR9331_SW_REG_PAGE) {
+ /* We cannot read the page selector register from hardware and
+ * we cache its value in regmap. Return all bits set here,
+ * that regmap will always write the page on first use.
+ */
+ *(u32 *)val_buf = GENMASK(9, 0);
+ return 0;
+ }
+
+ mutex_lock_nested(&sbus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = __ar9331_mdio_read(sbus, reg);
+ if (ret < 0)
+ goto error;
+
+ *(u32 *)val_buf = ret;
+ ret = __ar9331_mdio_read(sbus, reg + 2);
+ if (ret < 0)
+ goto error;
+
+ *(u32 *)val_buf |= ret << 16;
+
+ mutex_unlock(&sbus->mdio_lock);
+
+ return 0;
+error:
+ mutex_unlock(&sbus->mdio_lock);
+ dev_err_ratelimited(&sbus->dev, "Bus error. Failed to read register.\n");
+
+ return ret;
+}
+
+static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ctx;
+ struct mii_bus *sbus = priv->sbus;
+ int ret;
+
+ mutex_lock_nested(&sbus->mdio_lock, MDIO_MUTEX_NESTED);
+ if (reg == AR9331_SW_REG_PAGE) {
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_PAGE,
+ 0, val);
+ if (ret < 0)
+ goto error;
+
+ mutex_unlock(&sbus->mdio_lock);
+
+ return 0;
+ }
+
+ /* In case of this switch we work with 32bit registers on top of 16bit
+ * bus. Some registers (for example access to forwarding database) have
+ * trigger bit on the first 16bit half of request, the result and
+ * configuration of request in the second half.
+ * To make it work properly, we should do the second part of transfer
+ * before the first one is done.
+ */
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg + 2,
+ val >> 16);
+ if (ret < 0)
+ goto error;
+
+ ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_REG, reg, val);
+ if (ret < 0)
+ goto error;
+
+ mutex_unlock(&sbus->mdio_lock);
+
+ return 0;
+
+error:
+ mutex_unlock(&sbus->mdio_lock);
+ dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
+
+ return ret;
+}
+
+static int ar9331_sw_bus_write(void *context, const void *data, size_t count)
+{
+ u32 reg = *(u32 *)data;
+ u32 val = *((u32 *)data + 1);
+
+ return ar9331_mdio_write(context, reg, val);
+}
+
+static const struct regmap_range ar9331_valid_regs[] = {
+ regmap_reg_range(0x0, 0x0),
+ regmap_reg_range(0x10, 0x14),
+ regmap_reg_range(0x20, 0x24),
+ regmap_reg_range(0x2c, 0x30),
+ regmap_reg_range(0x40, 0x44),
+ regmap_reg_range(0x50, 0x78),
+ regmap_reg_range(0x80, 0x98),
+
+ regmap_reg_range(0x100, 0x120),
+ regmap_reg_range(0x200, 0x220),
+ regmap_reg_range(0x300, 0x320),
+ regmap_reg_range(0x400, 0x420),
+ regmap_reg_range(0x500, 0x520),
+ regmap_reg_range(0x600, 0x620),
+
+ regmap_reg_range(0x20000, 0x200a4),
+ regmap_reg_range(0x20100, 0x201a4),
+ regmap_reg_range(0x20200, 0x202a4),
+ regmap_reg_range(0x20300, 0x203a4),
+ regmap_reg_range(0x20400, 0x204a4),
+ regmap_reg_range(0x20500, 0x205a4),
+
+ /* dummy page selector reg */
+ regmap_reg_range(AR9331_SW_REG_PAGE, AR9331_SW_REG_PAGE),
+};
+
+static const struct regmap_range ar9331_nonvolatile_regs[] = {
+ regmap_reg_range(AR9331_SW_REG_PAGE, AR9331_SW_REG_PAGE),
+};
+
+static const struct regmap_range_cfg ar9331_regmap_range[] = {
+ {
+ .selector_reg = AR9331_SW_REG_PAGE,
+ .selector_mask = GENMASK(9, 0),
+ .selector_shift = 0,
+
+ .window_start = 0,
+ .window_len = 512,
+
+ .range_min = 0,
+ .range_max = AR9331_SW_REG_PAGE - 4,
+ },
+};
+
+static const struct regmap_access_table ar9331_register_set = {
+ .yes_ranges = ar9331_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ar9331_valid_regs),
+};
+
+static const struct regmap_access_table ar9331_volatile_set = {
+ .no_ranges = ar9331_nonvolatile_regs,
+ .n_no_ranges = ARRAY_SIZE(ar9331_nonvolatile_regs),
+};
+
+static const struct regmap_config ar9331_mdio_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = AR9331_SW_REG_PAGE,
+
+ .ranges = ar9331_regmap_range,
+ .num_ranges = ARRAY_SIZE(ar9331_regmap_range),
+
+ .volatile_table = &ar9331_volatile_set,
+ .wr_table = &ar9331_register_set,
+ .rd_table = &ar9331_register_set,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static struct regmap_bus ar9331_sw_bus = {
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .read = ar9331_mdio_read,
+ .write = ar9331_sw_bus_write,
+ .max_raw_read = 4,
+ .max_raw_write = 4,
+};
+
+static int ar9331_sw_probe(struct mdio_device *mdiodev)
+{
+ struct ar9331_sw_priv *priv;
+ struct dsa_switch *ds;
+ int ret, i;
+
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init(&mdiodev->dev, &ar9331_sw_bus, priv,
+ &ar9331_mdio_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&mdiodev->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ priv->sw_reset = devm_reset_control_get(&mdiodev->dev, "switch");
+ if (IS_ERR(priv->sw_reset)) {
+ dev_err(&mdiodev->dev, "missing switch reset\n");
+ return PTR_ERR(priv->sw_reset);
+ }
+
+ priv->sbus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+
+ ret = ar9331_sw_irq_init(priv);
+ if (ret)
+ return ret;
+
+ ds = &priv->ds;
+ ds->dev = &mdiodev->dev;
+ ds->num_ports = AR9331_SW_PORTS;
+ ds->priv = priv;
+ priv->ops = ar9331_sw_ops;
+ ds->ops = &priv->ops;
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
+ struct ar9331_sw_port *port = &priv->port[i];
+
+ port->idx = i;
+ spin_lock_init(&port->stats_lock);
+ INIT_DELAYED_WORK(&port->mib_read, ar9331_do_stats_poll);
+ }
+
+ ret = dsa_register_switch(ds);
+ if (ret)
+ goto err_remove_irq;
+
+ return 0;
+
+err_remove_irq:
+ irq_domain_remove(priv->irqdomain);
+
+ return ret;
+}
+
+static void ar9331_sw_remove(struct mdio_device *mdiodev)
+{
+ struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ unsigned int i;
+
+ if (!priv)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
+ struct ar9331_sw_port *port = &priv->port[i];
+
+ cancel_delayed_work_sync(&port->mib_read);
+ }
+
+ irq_domain_remove(priv->irqdomain);
+ dsa_unregister_switch(&priv->ds);
+
+ reset_control_assert(priv->sw_reset);
+}
+
+static void ar9331_sw_shutdown(struct mdio_device *mdiodev)
+{
+ struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(&priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static const struct of_device_id ar9331_sw_of_match[] = {
+ { .compatible = "qca,ar9331-switch" },
+ { },
+};
+
+static struct mdio_driver ar9331_sw_mdio_driver = {
+ .probe = ar9331_sw_probe,
+ .remove = ar9331_sw_remove,
+ .shutdown = ar9331_sw_shutdown,
+ .mdiodrv.driver = {
+ .name = AR9331_SW_NAME,
+ .of_match_table = ar9331_sw_of_match,
+ },
+};
+
+mdio_module_driver(ar9331_sw_mdio_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Driver for Atheros AR9331 switch");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
new file mode 100644
index 000000000..1e94ba103
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -0,0 +1,2090 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/mdio.h>
+#include <linux/phylink.h>
+#include <linux/gpio/consumer.h>
+#include <linux/etherdevice.h>
+#include <linux/dsa/tag_qca.h>
+
+#include "qca8k.h"
+
+static void
+qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
+{
+ regaddr >>= 1;
+ *r1 = regaddr & 0x1e;
+
+ regaddr >>= 5;
+ *r2 = regaddr & 0x7;
+
+ regaddr >>= 3;
+ *page = regaddr & 0x3ff;
+}
+
+static int
+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+{
+ int ret;
+
+ ret = bus->read(bus, phy_id, regnum);
+ if (ret >= 0) {
+ *val = ret;
+ ret = bus->read(bus, phy_id, regnum + 1);
+ *val |= ret << 16;
+ }
+
+ if (ret < 0) {
+ dev_err_ratelimited(&bus->dev,
+ "failed to read qca8k 32bit register\n");
+ *val = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+{
+ u16 lo, hi;
+ int ret;
+
+ lo = val & 0xffff;
+ hi = (u16)(val >> 16);
+
+ ret = bus->write(bus, phy_id, regnum, lo);
+ if (ret >= 0)
+ ret = bus->write(bus, phy_id, regnum + 1, hi);
+ if (ret < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to write qca8k 32bit register\n");
+}
+
+static int
+qca8k_set_page(struct qca8k_priv *priv, u16 page)
+{
+ u16 *cached_page = &priv->mdio_cache.page;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ if (page == *cached_page)
+ return 0;
+
+ ret = bus->write(bus, 0x18, 0, page);
+ if (ret < 0) {
+ dev_err_ratelimited(&bus->dev,
+ "failed to set qca8k page\n");
+ return ret;
+ }
+
+ *cached_page = page;
+ usleep_range(1000, 2000);
+ return 0;
+}
+
+static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u32 command;
+ u8 len, cmd;
+ int i;
+
+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
+ mgmt_eth_data = &priv->mgmt_eth_data;
+
+ command = get_unaligned_le32(&mgmt_ethhdr->command);
+ cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
+
+ len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
+ /* Special case for len of 15 as this is the max value for len and needs to
+ * be increased before converting it from word to dword.
+ */
+ if (len == 15)
+ len++;
+
+ /* We can ignore odd value, we always round up them in the alloc function. */
+ len *= sizeof(u16);
+
+ /* Make sure the seq match the requested packet */
+ if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
+ mgmt_eth_data->ack = true;
+
+ if (cmd == MDIO_READ) {
+ u32 *val = mgmt_eth_data->data;
+
+ *val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
+
+ /* Get the rest of the 12 byte of data.
+ * The read/write function will extract the requested data.
+ */
+ if (len > QCA_HDR_MGMT_DATA1_LEN) {
+ __le32 *data2 = (__le32 *)skb->data;
+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+ len - QCA_HDR_MGMT_DATA1_LEN);
+
+ val++;
+
+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+ *val = get_unaligned_le32(data2);
+ val++;
+ data2++;
+ }
+ }
+ }
+
+ complete(&mgmt_eth_data->rw_done);
+}
+
+static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
+ int priority, unsigned int len)
+{
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ unsigned int real_len;
+ struct sk_buff *skb;
+ __le32 *data2;
+ u32 command;
+ u16 hdr;
+ int i;
+
+ skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
+ if (!skb)
+ return NULL;
+
+ /* Hdr mgmt length value is in step of word size.
+ * As an example to process 4 byte of data the correct length to set is 2.
+ * To process 8 byte 4, 12 byte 6, 16 byte 8...
+ *
+ * Odd values will always return the next size on the ack packet.
+ * (length of 3 (6 byte) will always return 8 bytes of data)
+ *
+ * This means that a value of 15 (0xf) actually means reading/writing 32 bytes
+ * of data.
+ *
+ * To correctly calculate the length we devide the requested len by word and
+ * round up.
+ * On the ack function we can skip the odd check as we already handle the
+ * case here.
+ */
+ real_len = DIV_ROUND_UP(len, sizeof(u16));
+
+ /* We check if the result len is odd and we round up another time to
+ * the next size. (length of 3 will be increased to 4 as switch will always
+ * return 8 bytes)
+ */
+ if (real_len % sizeof(u16) != 0)
+ real_len++;
+
+ /* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */
+ if (real_len == 16)
+ real_len--;
+
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, skb->len);
+
+ mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
+
+ hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
+ hdr |= QCA_HDR_XMIT_FROM_CPU;
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
+
+ command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
+ command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
+ command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
+ command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
+ QCA_HDR_MGMT_CHECK_CODE_VAL);
+
+ put_unaligned_le32(command, &mgmt_ethhdr->command);
+
+ if (cmd == MDIO_WRITE)
+ put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
+
+ mgmt_ethhdr->hdr = htons(hdr);
+
+ data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
+ if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+ len - QCA_HDR_MGMT_DATA1_LEN);
+
+ val++;
+
+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+ put_unaligned_le32(*val, data2);
+ data2++;
+ val++;
+ }
+ }
+
+ return skb;
+}
+
+static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
+{
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u32 seq;
+
+ seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
+ put_unaligned_le32(seq, &mgmt_ethhdr->seq);
+}
+
+static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct sk_buff *skb;
+ bool ack;
+ int ret;
+
+ skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
+ QCA8K_ETHERNET_MDIO_PRIORITY, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check mgmt_master if is operational */
+ if (!priv->mgmt_master) {
+ kfree_skb(skb);
+ mutex_unlock(&mgmt_eth_data->mutex);
+ return -EINVAL;
+ }
+
+ skb->dev = priv->mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the mdio pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+
+ *val = mgmt_eth_data->data[0];
+ if (len > QCA_HDR_MGMT_DATA1_LEN)
+ memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
+
+ ack = mgmt_eth_data->ack;
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct sk_buff *skb;
+ bool ack;
+ int ret;
+
+ skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
+ QCA8K_ETHERNET_MDIO_PRIORITY, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check mgmt_master if is operational */
+ if (!priv->mgmt_master) {
+ kfree_skb(skb);
+ mutex_unlock(&mgmt_eth_data->mutex);
+ return -EINVAL;
+ }
+
+ skb->dev = priv->mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the mdio pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+
+ ack = mgmt_eth_data->ack;
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ val &= ~mask;
+ val |= write_val;
+
+ return qca8k_write_eth(priv, reg, &val, sizeof(val));
+}
+
+static int
+qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ int ret;
+
+ if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
+ return 0;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
+
+exit:
+ mutex_unlock(&bus->mdio_lock);
+ return ret;
+}
+
+static int
+qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ int ret;
+
+ if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
+ return 0;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret < 0)
+ goto exit;
+
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+
+exit:
+ mutex_unlock(&bus->mdio_lock);
+ return ret;
+}
+
+static int
+qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ u32 val;
+ int ret;
+
+ if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
+ return 0;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
+ if (ret < 0)
+ goto exit;
+
+ val &= ~mask;
+ val |= write_val;
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+
+exit:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static struct regmap_config qca8k_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x16ac, /* end MIB - Port6 range */
+ .reg_read = qca8k_regmap_read,
+ .reg_write = qca8k_regmap_write,
+ .reg_update_bits = qca8k_regmap_update_bits,
+ .rd_table = &qca8k_readable_table,
+ .disable_locking = true, /* Locking is handled by qca8k read/write */
+ .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
+};
+
+static int
+qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
+ struct sk_buff *read_skb, u32 *val)
+{
+ struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
+ bool ack;
+ int ret;
+
+ if (!skb)
+ return -ENOMEM;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the copy pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ *val = mgmt_eth_data->data[0];
+
+ return 0;
+}
+
+static int
+qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
+ int regnum, u16 data)
+{
+ struct sk_buff *write_skb, *clear_skb, *read_skb;
+ struct qca8k_mgmt_eth_data *mgmt_eth_data;
+ u32 write_val, clear_val = 0, val;
+ struct net_device *mgmt_master;
+ int ret, ret1;
+ bool ack;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ mgmt_eth_data = &priv->mgmt_eth_data;
+
+ write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+ if (read) {
+ write_val |= QCA8K_MDIO_MASTER_READ;
+ } else {
+ write_val |= QCA8K_MDIO_MASTER_WRITE;
+ write_val |= QCA8K_MDIO_MASTER_DATA(data);
+ }
+
+ /* Prealloc all the needed skb before the lock */
+ write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
+ if (!write_skb)
+ return -ENOMEM;
+
+ clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+ if (!clear_skb) {
+ ret = -ENOMEM;
+ goto err_clear_skb;
+ }
+
+ read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+ if (!read_skb) {
+ ret = -ENOMEM;
+ goto err_read_skb;
+ }
+
+ /* It seems that accessing the switch's internal PHYs via management
+ * packets still uses the MDIO bus within the switch internally, and
+ * these accesses can conflict with external MDIO accesses to other
+ * devices on the MDIO bus.
+ * We therefore need to lock the MDIO bus onto which the switch is
+ * connected.
+ */
+ mutex_lock(&priv->bus->mdio_lock);
+
+ /* Actually start the request:
+ * 1. Send mdio master packet
+ * 2. Busy Wait for mdio master command
+ * 3. Get the data if we are reading
+ * 4. Reset the mdio master (even with error)
+ */
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check if mgmt_master is operational */
+ mgmt_master = priv->mgmt_master;
+ if (!mgmt_master) {
+ mutex_unlock(&mgmt_eth_data->mutex);
+ mutex_unlock(&priv->bus->mdio_lock);
+ ret = -EINVAL;
+ goto err_mgmt_master;
+ }
+
+ read_skb->dev = mgmt_master;
+ clear_skb->dev = mgmt_master;
+ write_skb->dev = mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the write pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(write_skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0) {
+ ret = -ETIMEDOUT;
+ kfree_skb(read_skb);
+ goto exit;
+ }
+
+ if (!ack) {
+ ret = -EINVAL;
+ kfree_skb(read_skb);
+ goto exit;
+ }
+
+ ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
+ !(val & QCA8K_MDIO_MASTER_BUSY), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+ mgmt_eth_data, read_skb, &val);
+
+ if (ret < 0 && ret1 < 0) {
+ ret = ret1;
+ goto exit;
+ }
+
+ if (read) {
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the read pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(read_skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ if (!ack) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
+ } else {
+ kfree_skb(read_skb);
+ }
+exit:
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the clear pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(clear_skb);
+
+ wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+ mutex_unlock(&priv->bus->mdio_lock);
+
+ return ret;
+
+ /* Error handling before lock */
+err_mgmt_master:
+ kfree_skb(read_skb);
+err_read_skb:
+ kfree_skb(clear_skb);
+err_clear_skb:
+ kfree_skb(write_skb);
+
+ return ret;
+}
+
+static u32
+qca8k_port_to_phy(int port)
+{
+ /* From Andrew Lunn:
+ * Port 0 has no internal phy.
+ * Port 1 has an internal PHY at MDIO address 0.
+ * Port 2 has an internal PHY at MDIO address 1.
+ * ...
+ * Port 5 has an internal PHY at MDIO address 4.
+ * Port 6 has no internal PHY.
+ */
+
+ return port - 1;
+}
+
+static int
+qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
+{
+ u16 r1, r2, page;
+ u32 val;
+ int ret, ret1;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+ bus, 0x10 | r2, r1, &val);
+
+ /* Check if qca8k_read has failed for a different reason
+ * before returnting -ETIMEDOUT
+ */
+ if (ret < 0 && ret1 < 0)
+ return ret1;
+
+ return ret;
+}
+
+static int
+qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ u32 val;
+ int ret;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
+ QCA8K_MDIO_MASTER_DATA(data);
+
+ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret)
+ goto exit;
+
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+
+ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_BUSY);
+
+exit:
+ /* even if the busy_wait timeouts try to clear the MASTER_EN */
+ qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ u32 val;
+ int ret;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret)
+ goto exit;
+
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+
+ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
+
+exit:
+ /* even if the busy_wait timeouts try to clear the MASTER_EN */
+ qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ if (ret >= 0)
+ ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
+
+ return ret;
+}
+
+static int
+qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
+{
+ struct qca8k_priv *priv = slave_bus->priv;
+ int ret;
+
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
+ if (!ret)
+ return 0;
+
+ return qca8k_mdio_write(priv, phy, regnum, data);
+}
+
+static int
+qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
+{
+ struct qca8k_priv *priv = slave_bus->priv;
+ int ret;
+
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
+ if (ret >= 0)
+ return ret;
+
+ ret = qca8k_mdio_read(priv, phy, regnum);
+
+ if (ret < 0)
+ return 0xffff;
+
+ return ret;
+}
+
+static int
+qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
+{
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+
+ return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
+}
+
+static int
+qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
+{
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+
+ return qca8k_internal_mdio_read(slave_bus, port, regnum);
+}
+
+static int
+qca8k_mdio_register(struct qca8k_priv *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ struct device_node *mdio;
+ struct mii_bus *bus;
+
+ bus = devm_mdiobus_alloc(ds->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->priv = (void *)priv;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
+ ds->dst->index, ds->index);
+ bus->parent = ds->dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+ ds->slave_mii_bus = bus;
+
+ /* Check if the devicetree declare the port:phy mapping */
+ mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
+ if (of_device_is_available(mdio)) {
+ bus->name = "qca8k slave mii";
+ bus->read = qca8k_internal_mdio_read;
+ bus->write = qca8k_internal_mdio_write;
+ return devm_of_mdiobus_register(priv->dev, bus, mdio);
+ }
+
+ /* If a mapping can't be found the legacy mapping is used,
+ * using the qca8k_port_to_phy function
+ */
+ bus->name = "qca8k-legacy slave mii";
+ bus->read = qca8k_legacy_mdio_read;
+ bus->write = qca8k_legacy_mdio_write;
+ return devm_mdiobus_register(priv->dev, bus);
+}
+
+static int
+qca8k_setup_mdio_bus(struct qca8k_priv *priv)
+{
+ u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
+ struct device_node *ports, *port;
+ phy_interface_t mode;
+ int err;
+
+ ports = of_get_child_by_name(priv->dev->of_node, "ports");
+ if (!ports)
+ ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
+
+ if (!ports)
+ return -EINVAL;
+
+ for_each_available_child_of_node(ports, port) {
+ err = of_property_read_u32(port, "reg", &reg);
+ if (err) {
+ of_node_put(port);
+ of_node_put(ports);
+ return err;
+ }
+
+ if (!dsa_is_user_port(priv->ds, reg))
+ continue;
+
+ of_get_phy_mode(port, &mode);
+
+ if (of_property_read_bool(port, "phy-handle") &&
+ mode != PHY_INTERFACE_MODE_INTERNAL)
+ external_mdio_mask |= BIT(reg);
+ else
+ internal_mdio_mask |= BIT(reg);
+ }
+
+ of_node_put(ports);
+ if (!external_mdio_mask && !internal_mdio_mask) {
+ dev_err(priv->dev, "no PHYs are defined.\n");
+ return -EINVAL;
+ }
+
+ /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
+ * the MDIO_MASTER register also _disconnects_ the external MDC
+ * passthrough to the internal PHYs. It's not possible to use both
+ * configurations at the same time!
+ *
+ * Because this came up during the review process:
+ * If the external mdio-bus driver is capable magically disabling
+ * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
+ * accessors for the time being, it would be possible to pull this
+ * off.
+ */
+ if (!!external_mdio_mask && !!internal_mdio_mask) {
+ dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
+ return -EINVAL;
+ }
+
+ if (external_mdio_mask) {
+ /* Make sure to disable the internal mdio bus in cases
+ * a dt-overlay and driver reload changed the configuration
+ */
+
+ return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_EN);
+ }
+
+ return qca8k_mdio_register(priv);
+}
+
+static int
+qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
+{
+ u32 mask = 0;
+ int ret = 0;
+
+ /* SoC specific settings for ipq8064.
+ * If more device require this consider adding
+ * a dedicated binding.
+ */
+ if (of_machine_is_compatible("qcom,ipq8064"))
+ mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
+
+ /* SoC specific settings for ipq8065 */
+ if (of_machine_is_compatible("qcom,ipq8065"))
+ mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
+
+ if (mask) {
+ ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
+ QCA8K_MAC_PWR_RGMII0_1_8V |
+ QCA8K_MAC_PWR_RGMII1_1_8V,
+ mask);
+ }
+
+ return ret;
+}
+
+static int qca8k_find_cpu_port(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ /* Find the connected cpu port. Valid port are 0 or 6 */
+ if (dsa_is_cpu_port(ds, 0))
+ return 0;
+
+ dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
+
+ if (dsa_is_cpu_port(ds, 6))
+ return 6;
+
+ return -EINVAL;
+}
+
+static int
+qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
+{
+ const struct qca8k_match_data *data = priv->info;
+ struct device_node *node = priv->dev->of_node;
+ u32 val = 0;
+ int ret;
+
+ /* QCA8327 require to set to the correct mode.
+ * His bigger brother QCA8328 have the 172 pin layout.
+ * Should be applied by default but we set this just to make sure.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ /* Set the correct package of 148 pin for QCA8327 */
+ if (data->reduced_package)
+ val |= QCA8327_PWS_PACKAGE148_EN;
+
+ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
+ val);
+ if (ret)
+ return ret;
+ }
+
+ if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
+ val |= QCA8K_PWS_POWER_ON_SEL;
+
+ if (of_property_read_bool(node, "qca,led-open-drain")) {
+ if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
+ dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
+ return -EINVAL;
+ }
+
+ val |= QCA8K_PWS_LED_OPEN_EN_CSR;
+ }
+
+ return qca8k_rmw(priv, QCA8K_REG_PWS,
+ QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
+ val);
+}
+
+static int
+qca8k_parse_port_config(struct qca8k_priv *priv)
+{
+ int port, cpu_port_index = -1, ret;
+ struct device_node *port_dn;
+ phy_interface_t mode;
+ struct dsa_port *dp;
+ u32 delay;
+
+ /* We have 2 CPU port. Check them */
+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
+ /* Skip every other port */
+ if (port != 0 && port != 6)
+ continue;
+
+ dp = dsa_to_port(priv->ds, port);
+ port_dn = dp->dn;
+ cpu_port_index++;
+
+ if (!of_device_is_available(port_dn))
+ continue;
+
+ ret = of_get_phy_mode(port_dn, &mode);
+ if (ret)
+ continue;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_SGMII:
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID)
+ delay = 1;
+
+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
+ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
+
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ delay = 2;
+
+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
+ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
+
+ /* Skip sgmii parsing for rgmii* mode */
+ if (mode == PHY_INTERFACE_MODE_RGMII ||
+ mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ break;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
+ priv->ports_config.sgmii_tx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
+ priv->ports_config.sgmii_rx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
+ priv->ports_config.sgmii_enable_pll = true;
+
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
+ priv->ports_config.sgmii_enable_pll = false;
+ }
+
+ if (priv->switch_revision < 2)
+ dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
+ }
+
+ break;
+ default:
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+static void
+qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
+ u32 reg)
+{
+ u32 delay, val = 0;
+ int ret;
+
+ /* Delay can be declared in 3 different way.
+ * Mode to rgmii and internal-delay standard binding defined
+ * rgmii-id or rgmii-tx/rx phy mode set.
+ * The parse logic set a delay different than 0 only when one
+ * of the 3 different way is used. In all other case delay is
+ * not enabled. With ID or TX/RXID delay is enabled and set
+ * to the default and recommended value.
+ */
+ if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
+ }
+
+ if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
+ }
+
+ /* Set RGMII delay based on the selected values */
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
+ val);
+ if (ret)
+ dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
+ cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
+}
+
+static struct phylink_pcs *
+qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct qca8k_priv *priv = ds->priv;
+ struct phylink_pcs *pcs = NULL;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ switch (port) {
+ case 0:
+ pcs = &priv->pcs_port_0.pcs;
+ break;
+
+ case 6:
+ pcs = &priv->pcs_port_6.pcs;
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return pcs;
+}
+
+static void
+qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int cpu_port_index;
+ u32 reg;
+
+ switch (port) {
+ case 0: /* 1st CPU port */
+ if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
+ state->interface != PHY_INTERFACE_MODE_SGMII)
+ return;
+
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT0;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ /* Internal PHY, nothing to do */
+ return;
+ case 6: /* 2nd CPU port / external PHY */
+ if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
+ state->interface != PHY_INTERFACE_MODE_1000BASEX)
+ return;
+
+ reg = QCA8K_REG_PORT6_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT6;
+ break;
+ default:
+ dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
+ return;
+ }
+
+ if (port != 6 && phylink_autoneg_inband(mode)) {
+ dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
+ __func__);
+ return;
+ }
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
+
+ /* Configure rgmii delay */
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
+ /* QCA8337 requires to set rgmii rx delay for all ports.
+ * This is enabled through PORT5_PAD_CTRL for all ports,
+ * rather than individual port registers.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337)
+ qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ /* Enable SGMII on the port */
+ qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
+ break;
+ default:
+ dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
+ phy_modes(state->interface), port);
+ return;
+ }
+}
+
+static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ case 0: /* 1st CPU port */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ break;
+
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ /* Internal PHY */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+
+ case 6: /* 2nd CPU port / external PHY */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ break;
+ }
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
+
+ config->legacy_pre_march2020 = false;
+}
+
+static void
+qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 0);
+}
+
+static void
+qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phydev,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg;
+
+ if (phylink_autoneg_inband(mode)) {
+ reg = QCA8K_PORT_STATUS_LINK_AUTO;
+ } else {
+ switch (speed) {
+ case SPEED_10:
+ reg = QCA8K_PORT_STATUS_SPEED_10;
+ break;
+ case SPEED_100:
+ reg = QCA8K_PORT_STATUS_SPEED_100;
+ break;
+ case SPEED_1000:
+ reg = QCA8K_PORT_STATUS_SPEED_1000;
+ break;
+ default:
+ reg = QCA8K_PORT_STATUS_LINK_AUTO;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ reg |= QCA8K_PORT_STATUS_DUPLEX;
+
+ if (rx_pause || dsa_is_cpu_port(ds, port))
+ reg |= QCA8K_PORT_STATUS_RXFLOW;
+
+ if (tx_pause || dsa_is_cpu_port(ds, port))
+ reg |= QCA8K_PORT_STATUS_TXFLOW;
+ }
+
+ reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
+ qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
+}
+
+static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct qca8k_pcs, pcs);
+}
+
+static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+ int port = pcs_to_qca8k_pcs(pcs)->port;
+ u32 reg;
+ int ret;
+
+ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
+ if (ret < 0) {
+ state->link = false;
+ return;
+ }
+
+ state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
+ state->an_complete = state->link;
+ state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
+ DUPLEX_HALF;
+
+ switch (reg & QCA8K_PORT_STATUS_SPEED) {
+ case QCA8K_PORT_STATUS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ if (reg & QCA8K_PORT_STATUS_RXFLOW)
+ state->pause |= MLO_PAUSE_RX;
+ if (reg & QCA8K_PORT_STATUS_TXFLOW)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+ int cpu_port_index, ret, port;
+ u32 reg, val;
+
+ port = pcs_to_qca8k_pcs(pcs)->port;
+ switch (port) {
+ case 0:
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT0;
+ break;
+
+ case 6:
+ reg = QCA8K_REG_PORT6_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT6;
+ break;
+
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* Enable/disable SerDes auto-negotiation as necessary */
+ ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
+ if (ret)
+ return ret;
+ if (phylink_autoneg_inband(mode))
+ val &= ~QCA8K_PWS_SERDES_AEN_DIS;
+ else
+ val |= QCA8K_PWS_SERDES_AEN_DIS;
+ qca8k_write(priv, QCA8K_REG_PWS, val);
+
+ /* Configure the SGMII parameters */
+ ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
+ if (ret)
+ return ret;
+
+ val |= QCA8K_SGMII_EN_SD;
+
+ if (priv->ports_config.sgmii_enable_pll)
+ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
+ QCA8K_SGMII_EN_TX;
+
+ if (dsa_is_cpu_port(priv->ds, port)) {
+ /* CPU port, we're talking to the CPU MAC, be a PHY */
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_PHY;
+ } else if (interface == PHY_INTERFACE_MODE_SGMII) {
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_MAC;
+ } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_BASEX;
+ }
+
+ qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+
+ /* From original code is reported port instability as SGMII also
+ * require delay set. Apply advised values here or take them from DT.
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
+ * falling edge is set writing in the PORT0 PAD reg
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327 ||
+ priv->switch_id == QCA8K_ID_QCA8337)
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+
+ val = 0;
+
+ /* SGMII Clock phase configuration */
+ if (priv->ports_config.sgmii_rx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
+
+ if (priv->ports_config.sgmii_tx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
+
+ if (val)
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
+ val);
+
+ return 0;
+}
+
+static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
+{
+}
+
+static const struct phylink_pcs_ops qca8k_pcs_ops = {
+ .pcs_get_state = qca8k_pcs_get_state,
+ .pcs_config = qca8k_pcs_config,
+ .pcs_an_restart = qca8k_pcs_an_restart,
+};
+
+static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
+ int port)
+{
+ qpcs->pcs.ops = &qca8k_pcs_ops;
+
+ /* We don't have interrupts for link changes, so we need to poll */
+ qpcs->pcs.poll = true;
+ qpcs->priv = priv;
+ qpcs->port = port;
+}
+
+static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
+{
+ struct qca8k_mib_eth_data *mib_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ const struct qca8k_mib_desc *mib;
+ struct mib_ethhdr *mib_ethhdr;
+ __le32 *data2;
+ u8 port;
+ int i;
+
+ mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
+ mib_eth_data = &priv->mib_eth_data;
+
+ /* The switch autocast every port. Ignore other packet and
+ * parse only the requested one.
+ */
+ port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
+ if (port != mib_eth_data->req_port)
+ goto exit;
+
+ data2 = (__le32 *)skb->data;
+
+ for (i = 0; i < priv->info->mib_count; i++) {
+ mib = &ar8327_mib[i];
+
+ /* First 3 mib are present in the skb head */
+ if (i < 3) {
+ mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
+ continue;
+ }
+
+ /* Some mib are 64 bit wide */
+ if (mib->size == 2)
+ mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
+ else
+ mib_eth_data->data[i] = get_unaligned_le32(data2);
+
+ data2 += mib->size;
+ }
+
+exit:
+ /* Complete on receiving all the mib packet */
+ if (refcount_dec_and_test(&mib_eth_data->port_parsed))
+ complete(&mib_eth_data->rw_done);
+}
+
+static int
+qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct qca8k_mib_eth_data *mib_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ mib_eth_data = &priv->mib_eth_data;
+
+ mutex_lock(&mib_eth_data->mutex);
+
+ reinit_completion(&mib_eth_data->rw_done);
+
+ mib_eth_data->req_port = dp->index;
+ mib_eth_data->data = data;
+ refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
+
+ mutex_lock(&priv->reg_mutex);
+
+ /* Send mib autocast request */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
+ QCA8K_MIB_BUSY);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ if (ret)
+ goto exit;
+
+ ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
+
+exit:
+ mutex_unlock(&mib_eth_data->mutex);
+
+ return ret;
+}
+
+static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ /* Communicate to the phy internal driver the switch revision.
+ * Based on the switch revision different values needs to be
+ * set to the dbg and mmd reg on the phy.
+ * The first 2 bit are used to communicate the switch revision
+ * to the phy driver.
+ */
+ if (port > 0 && port < 6)
+ return priv->switch_revision;
+
+ return 0;
+}
+
+static enum dsa_tag_protocol
+qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_QCA;
+}
+
+static void
+qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
+ bool operational)
+{
+ struct dsa_port *dp = master->dsa_ptr;
+ struct qca8k_priv *priv = ds->priv;
+
+ /* Ethernet MIB/MDIO is only supported for CPU port 0 */
+ if (dp->index != 0)
+ return;
+
+ mutex_lock(&priv->mgmt_eth_data.mutex);
+ mutex_lock(&priv->mib_eth_data.mutex);
+
+ priv->mgmt_master = operational ? (struct net_device *)master : NULL;
+
+ mutex_unlock(&priv->mib_eth_data.mutex);
+ mutex_unlock(&priv->mgmt_eth_data.mutex);
+}
+
+static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct qca_tagger_data *tagger_data;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_QCA:
+ tagger_data = ds->tagger_data;
+
+ tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
+ tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_setup(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int cpu_port, ret, i;
+ u32 mask;
+
+ cpu_port = qca8k_find_cpu_port(ds);
+ if (cpu_port < 0) {
+ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
+ return cpu_port;
+ }
+
+ /* Parse CPU port config to be later used in phy_link mac_config */
+ ret = qca8k_parse_port_config(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mdio_bus(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_of_pws_reg(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mac_pwr_sel(priv);
+ if (ret)
+ return ret;
+
+ qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
+ qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
+
+ /* Make sure MAC06 is disabled */
+ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed disabling MAC06 exchange");
+ return ret;
+ }
+
+ /* Enable CPU Port */
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling CPU port");
+ return ret;
+ }
+
+ /* Enable MIB counters */
+ ret = qca8k_mib_init(priv);
+ if (ret)
+ dev_warn(priv->dev, "mib init failed");
+
+ /* Initial setup of all ports */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, 0);
+ if (ret)
+ return ret;
+
+ /* Enable QCA header mode on all cpu ports */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
+ if (ret) {
+ dev_err(priv->dev, "failed enabling QCA header mode");
+ return ret;
+ }
+ }
+
+ /* Disable MAC by default on all user ports */
+ if (dsa_is_user_port(ds, i))
+ qca8k_port_set_status(priv, i, 0);
+ }
+
+ /* Forward all unknown frames to CPU port for Linux processing
+ * Notice that in multi-cpu config only one port should be set
+ * for igmp, unknown, multicast and broadcast packet
+ */
+ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
+ if (ret)
+ return ret;
+
+ /* Setup connection between CPU port & user ports
+ * Configure specific switch configuration for ports
+ */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* CPU port gets connected to all user ports of the switch */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
+ if (ret)
+ return ret;
+ }
+
+ /* Individual user ports get connected to CPU port only */
+ if (dsa_is_user_port(ds, i)) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER,
+ BIT(cpu_port));
+ if (ret)
+ return ret;
+
+ /* Enable ARP Auto-learning by default */
+ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_LEARN);
+ if (ret)
+ return ret;
+
+ /* For port based vlans to work we need to set the
+ * default egress vid
+ */
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
+ QCA8K_EGREES_VLAN_PORT_MASK(i),
+ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
+ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
+ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+ }
+
+ /* The port 5 of the qca8337 have some problem in flood condition. The
+ * original legacy driver had some specific buffer and priority settings
+ * for the different port suggested by the QCA switch team. Add this
+ * missing settings to improve switch stability under load condition.
+ * This problem is limited to qca8337 and other qca8k switch are not affected.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337) {
+ switch (i) {
+ /* The 2 CPU port and port 5 requires some different
+ * priority than any other ports.
+ */
+ case 0:
+ case 5:
+ case 6:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
+ break;
+ default:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
+ }
+ qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
+
+ mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN;
+ qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
+ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN,
+ mask);
+ }
+ }
+
+ /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
+ qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
+ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
+ mask);
+ }
+
+ /* Setup our port MTUs to match power on defaults */
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
+ if (ret)
+ dev_warn(priv->dev, "failed setting MTU settings");
+
+ /* Flush the FDB table */
+ qca8k_fdb_flush(priv);
+
+ /* Set min a max ageing value supported */
+ ds->ageing_time_min = 7000;
+ ds->ageing_time_max = 458745000;
+
+ /* Set max number of LAGs supported */
+ ds->num_lag_ids = QCA8K_NUM_LAGS;
+
+ return 0;
+}
+
+static const struct dsa_switch_ops qca8k_switch_ops = {
+ .get_tag_protocol = qca8k_get_tag_protocol,
+ .setup = qca8k_setup,
+ .get_strings = qca8k_get_strings,
+ .get_ethtool_stats = qca8k_get_ethtool_stats,
+ .get_sset_count = qca8k_get_sset_count,
+ .set_ageing_time = qca8k_set_ageing_time,
+ .get_mac_eee = qca8k_get_mac_eee,
+ .set_mac_eee = qca8k_set_mac_eee,
+ .port_enable = qca8k_port_enable,
+ .port_disable = qca8k_port_disable,
+ .port_change_mtu = qca8k_port_change_mtu,
+ .port_max_mtu = qca8k_port_max_mtu,
+ .port_stp_state_set = qca8k_port_stp_state_set,
+ .port_bridge_join = qca8k_port_bridge_join,
+ .port_bridge_leave = qca8k_port_bridge_leave,
+ .port_fast_age = qca8k_port_fast_age,
+ .port_fdb_add = qca8k_port_fdb_add,
+ .port_fdb_del = qca8k_port_fdb_del,
+ .port_fdb_dump = qca8k_port_fdb_dump,
+ .port_mdb_add = qca8k_port_mdb_add,
+ .port_mdb_del = qca8k_port_mdb_del,
+ .port_mirror_add = qca8k_port_mirror_add,
+ .port_mirror_del = qca8k_port_mirror_del,
+ .port_vlan_filtering = qca8k_port_vlan_filtering,
+ .port_vlan_add = qca8k_port_vlan_add,
+ .port_vlan_del = qca8k_port_vlan_del,
+ .phylink_get_caps = qca8k_phylink_get_caps,
+ .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
+ .phylink_mac_config = qca8k_phylink_mac_config,
+ .phylink_mac_link_down = qca8k_phylink_mac_link_down,
+ .phylink_mac_link_up = qca8k_phylink_mac_link_up,
+ .get_phy_flags = qca8k_get_phy_flags,
+ .port_lag_join = qca8k_port_lag_join,
+ .port_lag_leave = qca8k_port_lag_leave,
+ .master_state_change = qca8k_master_change,
+ .connect_tag_protocol = qca8k_connect_tag_protocol,
+};
+
+static int
+qca8k_sw_probe(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv;
+ int ret;
+
+ /* allocate the private data struct so that we can probe the switches
+ * ID register
+ */
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+ priv->info = of_device_get_match_data(priv->dev);
+
+ priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
+ GPIOD_ASIS);
+ if (IS_ERR(priv->reset_gpio))
+ return PTR_ERR(priv->reset_gpio);
+
+ if (priv->reset_gpio) {
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ /* The active low duration must be greater than 10 ms
+ * and checkpatch.pl wants 20 ms.
+ */
+ msleep(20);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ }
+
+ /* Start by setting up the register mapping */
+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
+ &qca8k_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(priv->dev, "regmap initialization failed");
+ return PTR_ERR(priv->regmap);
+ }
+
+ priv->mdio_cache.page = 0xffff;
+
+ /* Check the detected switch id */
+ ret = qca8k_read_switch_id(priv);
+ if (ret)
+ return ret;
+
+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ mutex_init(&priv->mgmt_eth_data.mutex);
+ init_completion(&priv->mgmt_eth_data.rw_done);
+
+ mutex_init(&priv->mib_eth_data.mutex);
+ init_completion(&priv->mib_eth_data.rw_done);
+
+ priv->ds->dev = &mdiodev->dev;
+ priv->ds->num_ports = QCA8K_NUM_PORTS;
+ priv->ds->priv = priv;
+ priv->ds->ops = &qca8k_switch_ops;
+ mutex_init(&priv->reg_mutex);
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ return dsa_register_switch(priv->ds);
+}
+
+static void
+qca8k_sw_remove(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ int i;
+
+ if (!priv)
+ return;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++)
+ qca8k_port_set_status(priv, i, 0);
+
+ dsa_unregister_switch(priv->ds);
+}
+
+static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void
+qca8k_set_pm(struct qca8k_priv *priv, int enable)
+{
+ int port;
+
+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
+ /* Do not enable on resume if the port was
+ * disabled before.
+ */
+ if (!(priv->port_enabled_map & BIT(port)))
+ continue;
+
+ qca8k_port_set_status(priv, port, enable);
+ }
+}
+
+static int qca8k_suspend(struct device *dev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(dev);
+
+ qca8k_set_pm(priv, 0);
+
+ return dsa_switch_suspend(priv->ds);
+}
+
+static int qca8k_resume(struct device *dev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(dev);
+
+ qca8k_set_pm(priv, 1);
+
+ return dsa_switch_resume(priv->ds);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
+ qca8k_suspend, qca8k_resume);
+
+static const struct qca8k_info_ops qca8xxx_ops = {
+ .autocast_mib = qca8k_get_ethtool_stats_eth,
+ .read_eth = qca8k_read_eth,
+ .write_eth = qca8k_write_eth,
+};
+
+static const struct qca8k_match_data qca8327 = {
+ .id = QCA8K_ID_QCA8327,
+ .reduced_package = true,
+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
+};
+
+static const struct qca8k_match_data qca8328 = {
+ .id = QCA8K_ID_QCA8327,
+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
+};
+
+static const struct qca8k_match_data qca833x = {
+ .id = QCA8K_ID_QCA8337,
+ .mib_count = QCA8K_QCA833X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
+};
+
+static const struct of_device_id qca8k_of_match[] = {
+ { .compatible = "qca,qca8327", .data = &qca8327 },
+ { .compatible = "qca,qca8328", .data = &qca8328 },
+ { .compatible = "qca,qca8334", .data = &qca833x },
+ { .compatible = "qca,qca8337", .data = &qca833x },
+ { /* sentinel */ },
+};
+
+static struct mdio_driver qca8kmdio_driver = {
+ .probe = qca8k_sw_probe,
+ .remove = qca8k_sw_remove,
+ .shutdown = qca8k_sw_shutdown,
+ .mdiodrv.driver = {
+ .name = "qca8k",
+ .of_match_table = qca8k_of_match,
+ .pm = &qca8k_pm_ops,
+ },
+};
+
+mdio_module_driver(qca8kmdio_driver);
+
+MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
+MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qca8k");
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
new file mode 100644
index 000000000..7c3dd71a8
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -0,0 +1,1234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/netdevice.h>
+#include <net/dsa.h>
+#include <linux/if_bridge.h>
+
+#include "qca8k.h"
+
+#define MIB_DESC(_s, _o, _n) \
+ { \
+ .size = (_s), \
+ .offset = (_o), \
+ .name = (_n), \
+ }
+
+const struct qca8k_mib_desc ar8327_mib[] = {
+ MIB_DESC(1, 0x00, "RxBroad"),
+ MIB_DESC(1, 0x04, "RxPause"),
+ MIB_DESC(1, 0x08, "RxMulti"),
+ MIB_DESC(1, 0x0c, "RxFcsErr"),
+ MIB_DESC(1, 0x10, "RxAlignErr"),
+ MIB_DESC(1, 0x14, "RxRunt"),
+ MIB_DESC(1, 0x18, "RxFragment"),
+ MIB_DESC(1, 0x1c, "Rx64Byte"),
+ MIB_DESC(1, 0x20, "Rx128Byte"),
+ MIB_DESC(1, 0x24, "Rx256Byte"),
+ MIB_DESC(1, 0x28, "Rx512Byte"),
+ MIB_DESC(1, 0x2c, "Rx1024Byte"),
+ MIB_DESC(1, 0x30, "Rx1518Byte"),
+ MIB_DESC(1, 0x34, "RxMaxByte"),
+ MIB_DESC(1, 0x38, "RxTooLong"),
+ MIB_DESC(2, 0x3c, "RxGoodByte"),
+ MIB_DESC(2, 0x44, "RxBadByte"),
+ MIB_DESC(1, 0x4c, "RxOverFlow"),
+ MIB_DESC(1, 0x50, "Filtered"),
+ MIB_DESC(1, 0x54, "TxBroad"),
+ MIB_DESC(1, 0x58, "TxPause"),
+ MIB_DESC(1, 0x5c, "TxMulti"),
+ MIB_DESC(1, 0x60, "TxUnderRun"),
+ MIB_DESC(1, 0x64, "Tx64Byte"),
+ MIB_DESC(1, 0x68, "Tx128Byte"),
+ MIB_DESC(1, 0x6c, "Tx256Byte"),
+ MIB_DESC(1, 0x70, "Tx512Byte"),
+ MIB_DESC(1, 0x74, "Tx1024Byte"),
+ MIB_DESC(1, 0x78, "Tx1518Byte"),
+ MIB_DESC(1, 0x7c, "TxMaxByte"),
+ MIB_DESC(1, 0x80, "TxOverSize"),
+ MIB_DESC(2, 0x84, "TxByte"),
+ MIB_DESC(1, 0x8c, "TxCollision"),
+ MIB_DESC(1, 0x90, "TxAbortCol"),
+ MIB_DESC(1, 0x94, "TxMultiCol"),
+ MIB_DESC(1, 0x98, "TxSingleCol"),
+ MIB_DESC(1, 0x9c, "TxExcDefer"),
+ MIB_DESC(1, 0xa0, "TxDefer"),
+ MIB_DESC(1, 0xa4, "TxLateCol"),
+ MIB_DESC(1, 0xa8, "RXUnicast"),
+ MIB_DESC(1, 0xac, "TXUnicast"),
+};
+
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
+{
+ return regmap_read(priv->regmap, reg, val);
+}
+
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ return regmap_write(priv->regmap, reg, val);
+}
+
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ return regmap_update_bits(priv->regmap, reg, mask, write_val);
+}
+
+static const struct regmap_range qca8k_readable_ranges[] = {
+ regmap_reg_range(0x0000, 0x00e4), /* Global control */
+ regmap_reg_range(0x0100, 0x0168), /* EEE control */
+ regmap_reg_range(0x0200, 0x0270), /* Parser control */
+ regmap_reg_range(0x0400, 0x0454), /* ACL */
+ regmap_reg_range(0x0600, 0x0718), /* Lookup */
+ regmap_reg_range(0x0800, 0x0b70), /* QM */
+ regmap_reg_range(0x0c00, 0x0c80), /* PKT */
+ regmap_reg_range(0x0e00, 0x0e98), /* L3 */
+ regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
+ regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
+ regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
+ regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
+ regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
+ regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
+ regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
+};
+
+const struct regmap_access_table qca8k_readable_table = {
+ .yes_ranges = qca8k_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
+};
+
+/* TODO: remove these extra ops when we can support regmap bulk read/write */
+static int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ int i, count = len / sizeof(u32), ret;
+
+ if (priv->mgmt_master && priv->info->ops->read_eth &&
+ !priv->info->ops->read_eth(priv, reg, val, len))
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* TODO: remove these extra ops when we can support regmap bulk read/write */
+static int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ int i, count = len / sizeof(u32), ret;
+ u32 tmp;
+
+ if (priv->mgmt_master && priv->info->ops->write_eth &&
+ !priv->info->ops->write_eth(priv, reg, val, len))
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ tmp = val[i];
+
+ ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
+}
+
+static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
+{
+ u32 reg[3];
+ int ret;
+
+ /* load the ARL table into an array */
+ ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+ if (ret)
+ return ret;
+
+ /* vid - 83:72 */
+ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
+ /* aging - 67:64 */
+ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
+ /* portmask - 54:48 */
+ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
+ /* mac - 47:0 */
+ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
+ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
+ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
+ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
+ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
+ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
+
+ return 0;
+}
+
+static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
+ const u8 *mac, u8 aging)
+{
+ u32 reg[3] = { 0 };
+
+ /* vid - 83:72 */
+ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
+ /* aging - 67:64 */
+ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
+ /* portmask - 54:48 */
+ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
+ /* mac - 47:0 */
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
+
+ /* load the array into the ARL table */
+ qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+}
+
+static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
+ int port)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and FDB index */
+ reg = QCA8K_ATU_FUNC_BUSY;
+ reg |= cmd;
+ if (port >= 0) {
+ reg |= QCA8K_ATU_FUNC_PORT_EN;
+ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
+ }
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_FDB_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_ATU_FUNC_FULL)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
+ int port)
+{
+ int ret;
+
+ qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
+ if (ret < 0)
+ return ret;
+
+ return qca8k_fdb_read(priv, fdb);
+}
+
+static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
+ u16 port_mask, u16 vid, u8 aging)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
+ u16 port_mask, u16 vid)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+void qca8k_fdb_flush(struct qca8k_priv *priv)
+{
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid, u8 aging)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_fdb_read(priv, &fdb);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule exist. Delete first */
+ if (fdb.aging) {
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+ } else {
+ fdb.aging = aging;
+ }
+
+ /* Add port to fdb portmask */
+ fdb.port_mask |= port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_fdb_read(priv, &fdb);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule doesn't exist. Why delete? */
+ if (!fdb.aging) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+
+ /* Only port in the rule is this port. Don't re insert */
+ if (fdb.port_mask == port_mask)
+ goto exit;
+
+ /* Remove port from port mask */
+ fdb.port_mask &= ~port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int qca8k_vlan_access(struct qca8k_priv *priv,
+ enum qca8k_vlan_cmd cmd, u16 vid)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and VLAN index */
+ reg = QCA8K_VTU_FUNC1_BUSY;
+ reg |= cmd;
+ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_VLAN_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_VTU_FUNC1_FULL)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
+ bool untagged)
+{
+ u32 reg;
+ int ret;
+
+ /* We do the right thing with VLAN 0 and treat it as untagged while
+ * preserving the tag on egress.
+ */
+ if (vid == 0)
+ return 0;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
+ reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ if (untagged)
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
+ else
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
+
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
+{
+ u32 reg, mask;
+ int ret, i;
+ bool del;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
+
+ /* Check if we're the last member to be removed */
+ del = true;
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
+
+ if ((reg & mask) != mask) {
+ del = false;
+ break;
+ }
+ }
+
+ if (del) {
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
+ } else {
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+ }
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+int qca8k_mib_init(struct qca8k_priv *priv)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
+ QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
+{
+ u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
+ /* Port 0 and 6 have no internal PHY */
+ if (port > 0 && port < 6)
+ mask |= QCA8K_PORT_STATUS_LINK_AUTO;
+
+ if (enable)
+ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+ else
+ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+}
+
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < priv->info->mib_count; i++)
+ strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
+ ETH_GSTRING_LEN);
+}
+
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const struct qca8k_mib_desc *mib;
+ u32 reg, i, val;
+ u32 hi = 0;
+ int ret;
+
+ if (priv->mgmt_master && priv->info->ops->autocast_mib &&
+ priv->info->ops->autocast_mib(ds, port, data) > 0)
+ return;
+
+ for (i = 0; i < priv->info->mib_count; i++) {
+ mib = &ar8327_mib[i];
+ reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
+
+ ret = qca8k_read(priv, reg, &val);
+ if (ret < 0)
+ continue;
+
+ if (mib->size == 2) {
+ ret = qca8k_read(priv, reg + 4, &hi);
+ if (ret < 0)
+ continue;
+ }
+
+ data[i] = val;
+ if (mib->size == 2)
+ data[i] |= (u64)hi << 32;
+ }
+}
+
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return priv->info->mib_count;
+}
+
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *eee)
+{
+ u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg;
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
+ if (ret < 0)
+ goto exit;
+
+ if (eee->eee_enabled)
+ reg |= lpi_en;
+ else
+ reg &= ~lpi_en;
+ ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ /* Nothing to do on the port's MAC */
+ return 0;
+}
+
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LISTENING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
+ break;
+ }
+
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
+}
+
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int port_mask, cpu_port;
+ int i, ret;
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ port_mask = BIT(cpu_port);
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ /* Add this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ ret = regmap_set_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ if (ret)
+ return ret;
+ if (i != port)
+ port_mask |= BIT(i);
+ }
+
+ /* Add all other ports to this ports portvlan mask */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+ return ret;
+}
+
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int cpu_port, i;
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ /* Remove this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ regmap_clear_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ }
+
+ /* Set the cpu port to be the only one in the portvlan mask of
+ * this port
+ */
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
+}
+
+void qca8k_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct qca8k_priv *priv = ds->priv;
+ unsigned int secs = msecs / 1000;
+ u32 val;
+
+ /* AGE_TIME reg is set in 7s step */
+ val = secs / 7;
+
+ /* Handle case with 0 as val to NOT disable
+ * learning
+ */
+ if (!val)
+ val = 1;
+
+ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
+ QCA8K_ATU_AGE_TIME_MASK,
+ QCA8K_ATU_AGE_TIME(val));
+}
+
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 1);
+ priv->port_enabled_map |= BIT(port);
+
+ if (dsa_is_user_port(ds, port))
+ phy_support_asym_pause(phy);
+
+ return 0;
+}
+
+void qca8k_port_disable(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 0);
+ priv->port_enabled_map &= ~BIT(port);
+}
+
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ /* We have only have a general MTU setting.
+ * DSA always set the CPU port's MTU to the largest MTU of the slave
+ * ports.
+ * Setting MTU just for the CPU port is sufficient to correctly set a
+ * value for every port.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ /* To change the MAX_FRAME_SIZE the cpu ports must be off or
+ * the switch panics.
+ * Turn off both cpu ports before applying the new value to prevent
+ * this.
+ */
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 0);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 0);
+
+ /* Include L2 header / FCS length */
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
+ ETH_HLEN + ETH_FCS_LEN);
+
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 1);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 1);
+
+ return ret;
+}
+
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return QCA8K_MAX_MTU;
+}
+
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid)
+{
+ /* Set the vid to the port vlan id if no vid is set */
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_add(priv, addr, port_mask, vid,
+ QCA8K_ATU_STATUS_STATIC);
+}
+
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_del(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct qca8k_fdb _fdb = { 0 };
+ int cnt = QCA8K_NUM_FDB_RECORDS;
+ bool is_static;
+ int ret = 0;
+
+ mutex_lock(&priv->reg_mutex);
+ while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
+ if (!_fdb.aging)
+ break;
+ is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
+ ret = cb(_fdb.mac, _fdb.vid, is_static, data);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid,
+ QCA8K_ATU_STATUS_STATIC);
+}
+
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
+}
+
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int monitor_port, ret;
+ u32 reg, val;
+
+ /* Check for existent entry */
+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+ return -EEXIST;
+
+ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* QCA83xx can have only one port set to mirror mode.
+ * Check that the correct port is requested and return error otherwise.
+ * When no mirror port is set, the values is set to 0xF
+ */
+ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
+ return -EEXIST;
+
+ /* Set the monitor port */
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
+ mirror->to_local_port);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ return ret;
+
+ if (ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_update_bits(priv->regmap, reg, val, val);
+ if (ret)
+ return ret;
+
+ /* Track mirror port for tx and rx to decide when the
+ * mirror port has to be disabled.
+ */
+ if (ingress)
+ priv->mirror_rx |= BIT(port);
+ else
+ priv->mirror_tx |= BIT(port);
+
+ return 0;
+}
+
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg, val;
+ int ret;
+
+ if (mirror->ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_clear_bits(priv->regmap, reg, val);
+ if (ret)
+ goto err;
+
+ if (mirror->ingress)
+ priv->mirror_rx &= ~BIT(port);
+ else
+ priv->mirror_tx &= ~BIT(port);
+
+ /* No port set to send packet to mirror port. Disable mirror port */
+ if (!priv->mirror_rx && !priv->mirror_tx) {
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ goto err;
+ }
+err:
+ dev_err(priv->dev, "Failed to del mirror port from %d", port);
+}
+
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ if (vlan_filtering) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
+ } else {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
+ }
+
+ return ret;
+}
+
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
+ if (ret) {
+ dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
+ return ret;
+ }
+
+ if (pvid) {
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
+ QCA8K_EGREES_VLAN_PORT_MASK(port),
+ QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
+ QCA8K_PORT_VLAN_CVID(vlan->vid) |
+ QCA8K_PORT_VLAN_SVID(vlan->vid));
+ }
+
+ return ret;
+}
+
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_vlan_del(priv, port, vlan->vid);
+ if (ret)
+ dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
+
+ return ret;
+}
+
+static bool qca8k_lag_can_offload(struct dsa_switch *ds,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp;
+ int members = 0;
+
+ if (!lag.id)
+ return false;
+
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
+ /* Includes the port joining the LAG */
+ members++;
+
+ if (members > QCA8K_NUM_PORTS_FOR_LAG) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 4 LAG ports");
+ return false;
+ }
+
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
+ return false;
+ }
+
+ if (info->hash_type != NETDEV_LAG_HASH_L2 &&
+ info->hash_type != NETDEV_LAG_HASH_L23) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload L2 or L2+L3 TX hash");
+ return false;
+ }
+
+ return true;
+}
+
+static int qca8k_lag_setup_hash(struct dsa_switch *ds,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct net_device *lag_dev = lag.dev;
+ struct qca8k_priv *priv = ds->priv;
+ bool unique_lag = true;
+ unsigned int i;
+ u32 hash = 0;
+
+ switch (info->hash_type) {
+ case NETDEV_LAG_HASH_L23:
+ hash |= QCA8K_TRUNK_HASH_SIP_EN;
+ hash |= QCA8K_TRUNK_HASH_DIP_EN;
+ fallthrough;
+ case NETDEV_LAG_HASH_L2:
+ hash |= QCA8K_TRUNK_HASH_SA_EN;
+ hash |= QCA8K_TRUNK_HASH_DA_EN;
+ break;
+ default: /* We should NEVER reach this */
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are the unique configured LAG */
+ dsa_lags_foreach_id(i, ds->dst)
+ if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
+ unique_lag = false;
+ break;
+ }
+
+ /* Hash Mode is global. Make sure the same Hash Mode
+ * is set to all the 4 possible lag.
+ * If we are the unique LAG we can set whatever hash
+ * mode we want.
+ * To change hash mode it's needed to remove all LAG
+ * and change the mode with the latest.
+ */
+ if (unique_lag) {
+ priv->lag_hash_mode = hash;
+ } else if (priv->lag_hash_mode != hash) {
+ netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
+ QCA8K_TRUNK_HASH_MASK, hash);
+}
+
+static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
+ struct dsa_lag lag, bool delete)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret, id, i;
+ u32 val;
+
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
+
+ /* Read current port member */
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* Shift val to the correct trunk */
+ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
+ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
+ if (delete)
+ val &= ~BIT(port);
+ else
+ val |= BIT(port);
+
+ /* Update port member. With empty portmap disable trunk */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
+ QCA8K_REG_GOL_TRUNK_MEMBER(id) |
+ QCA8K_REG_GOL_TRUNK_EN(id),
+ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
+ val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
+
+ /* Search empty member if adding or port on deleting */
+ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
+ if (ret)
+ return ret;
+
+ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
+
+ if (delete) {
+ /* If port flagged to be disabled assume this member is
+ * empty
+ */
+ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
+ if (val != port)
+ continue;
+ } else {
+ /* If port flagged to be enabled assume this member is
+ * already set
+ */
+ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+ }
+
+ /* We have found the member to add/remove */
+ break;
+ }
+
+ /* Set port in the correct port mask or disable port if in delete mode */
+ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
+ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
+ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
+}
+
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ int ret;
+
+ if (!qca8k_lag_can_offload(ds, lag, info, extack))
+ return -EOPNOTSUPP;
+
+ ret = qca8k_lag_setup_hash(ds, lag, info);
+ if (ret)
+ return ret;
+
+ return qca8k_lag_refresh_portmap(ds, port, lag, false);
+}
+
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag)
+{
+ return qca8k_lag_refresh_portmap(ds, port, lag, true);
+}
+
+int qca8k_read_switch_id(struct qca8k_priv *priv)
+{
+ u32 val;
+ u8 id;
+ int ret;
+
+ if (!priv->info)
+ return -ENODEV;
+
+ ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
+ if (ret < 0)
+ return -ENODEV;
+
+ id = QCA8K_MASK_CTRL_DEVICE_ID(val);
+ if (id != priv->info->id) {
+ dev_err(priv->dev,
+ "Switch id detected %x but expected %x",
+ id, priv->info->id);
+ return -ENODEV;
+ }
+
+ priv->switch_id = id;
+
+ /* Save revision to communicate to the internal PHY driver */
+ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
+
+ return 0;
+}
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
new file mode 100644
index 000000000..03514f7a2
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -0,0 +1,515 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QCA8K_H
+#define __QCA8K_H
+
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/dsa/tag_qca.h>
+
+#define QCA8K_ETHERNET_MDIO_PRIORITY 7
+#define QCA8K_ETHERNET_PHY_PRIORITY 6
+#define QCA8K_ETHERNET_TIMEOUT 5
+
+#define QCA8K_NUM_PORTS 7
+#define QCA8K_NUM_CPU_PORTS 2
+#define QCA8K_MAX_MTU 9000
+#define QCA8K_NUM_LAGS 4
+#define QCA8K_NUM_PORTS_FOR_LAG 4
+
+#define PHY_ID_QCA8327 0x004dd034
+#define QCA8K_ID_QCA8327 0x12
+#define PHY_ID_QCA8337 0x004dd036
+#define QCA8K_ID_QCA8337 0x13
+
+#define QCA8K_QCA832X_MIB_COUNT 39
+#define QCA8K_QCA833X_MIB_COUNT 41
+
+#define QCA8K_BUSY_WAIT_TIMEOUT 2000
+
+#define QCA8K_NUM_FDB_RECORDS 2048
+
+#define QCA8K_PORT_VID_DEF 1
+
+/* Global control registers */
+#define QCA8K_REG_MASK_CTRL 0x000
+#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
+#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x)
+#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
+#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x)
+#define QCA8K_REG_PORT0_PAD_CTRL 0x004
+#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31)
+#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
+#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18)
+#define QCA8K_REG_PORT5_PAD_CTRL 0x008
+#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
+#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
+#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
+#define QCA8K_REG_PWS 0x010
+#define QCA8K_PWS_POWER_ON_SEL BIT(31)
+/* This reg is only valid for QCA832x and toggle the package
+ * type from 176 pin (by default) to 148 pin used on QCA8327
+ */
+#define QCA8327_PWS_PACKAGE148_EN BIT(30)
+#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24)
+#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
+#define QCA8K_REG_MODULE_EN 0x030
+#define QCA8K_MODULE_EN_MIB BIT(0)
+#define QCA8K_REG_MIB 0x034
+#define QCA8K_MIB_FUNC GENMASK(26, 24)
+#define QCA8K_MIB_CPU_KEEP BIT(20)
+#define QCA8K_MIB_BUSY BIT(17)
+#define QCA8K_MDIO_MASTER_CTRL 0x3c
+#define QCA8K_MDIO_MASTER_BUSY BIT(31)
+#define QCA8K_MDIO_MASTER_EN BIT(30)
+#define QCA8K_MDIO_MASTER_READ BIT(27)
+#define QCA8K_MDIO_MASTER_WRITE 0
+#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
+#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21)
+#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x)
+#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16)
+#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x)
+#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
+#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x)
+#define QCA8K_MDIO_MASTER_MAX_PORTS 5
+#define QCA8K_MDIO_MASTER_MAX_REG 32
+#define QCA8K_GOL_MAC_ADDR0 0x60
+#define QCA8K_GOL_MAC_ADDR1 0x64
+#define QCA8K_MAX_FRAME_SIZE 0x78
+#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
+#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0)
+#define QCA8K_PORT_STATUS_SPEED_10 0
+#define QCA8K_PORT_STATUS_SPEED_100 0x1
+#define QCA8K_PORT_STATUS_SPEED_1000 0x2
+#define QCA8K_PORT_STATUS_TXMAC BIT(2)
+#define QCA8K_PORT_STATUS_RXMAC BIT(3)
+#define QCA8K_PORT_STATUS_TXFLOW BIT(4)
+#define QCA8K_PORT_STATUS_RXFLOW BIT(5)
+#define QCA8K_PORT_STATUS_DUPLEX BIT(6)
+#define QCA8K_PORT_STATUS_LINK_UP BIT(8)
+#define QCA8K_PORT_STATUS_LINK_AUTO BIT(9)
+#define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10)
+#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
+#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
+#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
+#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
+#define QCA8K_PORT_HDR_CTRL_ALL 2
+#define QCA8K_PORT_HDR_CTRL_MGMT 1
+#define QCA8K_PORT_HDR_CTRL_NONE 0
+#define QCA8K_REG_SGMII_CTRL 0x0e0
+#define QCA8K_SGMII_EN_PLL BIT(1)
+#define QCA8K_SGMII_EN_RX BIT(2)
+#define QCA8K_SGMII_EN_TX BIT(3)
+#define QCA8K_SGMII_EN_SD BIT(4)
+#define QCA8K_SGMII_CLK125M_DELAY BIT(7)
+#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22)
+#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x)
+#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0)
+#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1)
+#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2)
+
+/* MAC_PWR_SEL registers */
+#define QCA8K_REG_MAC_PWR_SEL 0x0e4
+#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18)
+#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19)
+
+/* EEE control registers */
+#define QCA8K_REG_EEE_CTRL 0x100
+#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
+
+/* TRUNK_HASH_EN registers */
+#define QCA8K_TRUNK_HASH_EN_CTRL 0x270
+#define QCA8K_TRUNK_HASH_SIP_EN BIT(3)
+#define QCA8K_TRUNK_HASH_DIP_EN BIT(2)
+#define QCA8K_TRUNK_HASH_SA_EN BIT(1)
+#define QCA8K_TRUNK_HASH_DA_EN BIT(0)
+#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0)
+
+/* ACL registers */
+#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
+#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16)
+#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x)
+#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0)
+#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x)
+#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
+#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
+#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
+
+/* Lookup registers */
+#define QCA8K_REG_ATU_DATA0 0x600
+#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24)
+#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16)
+#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8)
+#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0)
+#define QCA8K_REG_ATU_DATA1 0x604
+#define QCA8K_ATU_PORT_MASK GENMASK(22, 16)
+#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8)
+#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0)
+#define QCA8K_REG_ATU_DATA2 0x608
+#define QCA8K_ATU_VID_MASK GENMASK(19, 8)
+#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0)
+#define QCA8K_ATU_STATUS_STATIC 0xf
+#define QCA8K_REG_ATU_FUNC 0x60c
+#define QCA8K_ATU_FUNC_BUSY BIT(31)
+#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
+#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
+#define QCA8K_ATU_FUNC_FULL BIT(12)
+#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8)
+#define QCA8K_REG_VTU_FUNC0 0x610
+#define QCA8K_VTU_FUNC0_VALID BIT(20)
+#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
+/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4)
+ * It does contain VLAN_MODE for each port [5:4] for port0,
+ * [7:6] for port1 ... [17:16] for port6. Use virtual port
+ * define to handle this.
+ */
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2)
+#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_REG_VTU_FUNC1 0x614
+#define QCA8K_VTU_FUNC1_BUSY BIT(31)
+#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16)
+#define QCA8K_VTU_FUNC1_FULL BIT(4)
+#define QCA8K_REG_ATU_CTRL 0x618
+#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0)
+#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
+#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
+#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
+#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4)
+#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
+#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24)
+#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16)
+#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8)
+#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0)
+#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
+#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3)
+#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
+#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x)
+#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0)
+#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1)
+#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2)
+#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
+#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
+#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
+#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
+
+#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700
+/* 4 max trunk first
+ * first 6 bit for member bitmap
+ * 7th bit is to enable trunk port
+ */
+#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8)
+#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7)
+#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
+#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0)
+#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
+/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */
+#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0)
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3)
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0)
+#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16)
+#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4)
+/* Complex shift: FIRST shift for port THEN shift for trunk */
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
+
+#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x)
+
+#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x)
+
+#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
+#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
+#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
+#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
+#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
+
+/* Pkt edit registers */
+#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2))
+#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
+#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
+#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
+
+/* L3 registers */
+#define QCA8K_HROUTER_CONTROL 0xe00
+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M GENMASK(17, 16)
+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S 16
+#define QCA8K_HROUTER_CONTROL_ARP_AGE_MODE 1
+#define QCA8K_HROUTER_PBASED_CONTROL1 0xe08
+#define QCA8K_HROUTER_PBASED_CONTROL2 0xe0c
+#define QCA8K_HNAT_CONTROL 0xe38
+
+/* MIB registers */
+#define QCA8K_PORT_MIB_COUNTER(_i) (0x1000 + (_i) * 0x100)
+
+/* QCA specific MII registers */
+#define MII_ATH_MMD_ADDR 0x0d
+#define MII_ATH_MMD_DATA 0x0e
+
+enum {
+ QCA8K_PORT_SPEED_10M = 0,
+ QCA8K_PORT_SPEED_100M = 1,
+ QCA8K_PORT_SPEED_1000M = 2,
+ QCA8K_PORT_SPEED_ERR = 3,
+};
+
+enum qca8k_fdb_cmd {
+ QCA8K_FDB_FLUSH = 1,
+ QCA8K_FDB_LOAD = 2,
+ QCA8K_FDB_PURGE = 3,
+ QCA8K_FDB_FLUSH_PORT = 5,
+ QCA8K_FDB_NEXT = 6,
+ QCA8K_FDB_SEARCH = 7,
+};
+
+enum qca8k_vlan_cmd {
+ QCA8K_VLAN_FLUSH = 1,
+ QCA8K_VLAN_LOAD = 2,
+ QCA8K_VLAN_PURGE = 3,
+ QCA8K_VLAN_REMOVE_PORT = 4,
+ QCA8K_VLAN_NEXT = 5,
+ QCA8K_VLAN_READ = 6,
+};
+
+enum qca8k_mid_cmd {
+ QCA8K_MIB_FLUSH = 1,
+ QCA8K_MIB_FLUSH_PORT = 2,
+ QCA8K_MIB_CAST = 3,
+};
+
+struct qca8k_priv;
+
+struct qca8k_info_ops {
+ int (*autocast_mib)(struct dsa_switch *ds, int port, u64 *data);
+ /* TODO: remove these extra ops when we can support regmap bulk read/write */
+ int (*read_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
+ int (*write_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
+};
+
+struct qca8k_match_data {
+ u8 id;
+ bool reduced_package;
+ u8 mib_count;
+ const struct qca8k_info_ops *ops;
+};
+
+enum {
+ QCA8K_CPU_PORT0,
+ QCA8K_CPU_PORT6,
+};
+
+struct qca8k_mgmt_eth_data {
+ struct completion rw_done;
+ struct mutex mutex; /* Enforce one mdio read/write at time */
+ bool ack;
+ u32 seq;
+ u32 data[4];
+};
+
+struct qca8k_mib_eth_data {
+ struct completion rw_done;
+ struct mutex mutex; /* Process one command at time */
+ refcount_t port_parsed; /* Counter to track parsed port */
+ u8 req_port;
+ u64 *data; /* pointer to ethtool data */
+};
+
+struct qca8k_ports_config {
+ bool sgmii_rx_clk_falling_edge;
+ bool sgmii_tx_clk_falling_edge;
+ bool sgmii_enable_pll;
+ u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
+ u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
+};
+
+struct qca8k_mdio_cache {
+/* The 32bit switch registers are accessed indirectly. To achieve this we need
+ * to set the page of the register. Track the last page that was set to reduce
+ * mdio writes
+ */
+ u16 page;
+};
+
+struct qca8k_pcs {
+ struct phylink_pcs pcs;
+ struct qca8k_priv *priv;
+ int port;
+};
+
+struct qca8k_priv {
+ u8 switch_id;
+ u8 switch_revision;
+ u8 mirror_rx;
+ u8 mirror_tx;
+ u8 lag_hash_mode;
+ /* Each bit correspond to a port. This switch can support a max of 7 port.
+ * Bit 1: port enabled. Bit 0: port disabled.
+ */
+ u8 port_enabled_map;
+ struct qca8k_ports_config ports_config;
+ struct regmap *regmap;
+ struct mii_bus *bus;
+ struct dsa_switch *ds;
+ struct mutex reg_mutex;
+ struct device *dev;
+ struct gpio_desc *reset_gpio;
+ struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */
+ struct qca8k_mgmt_eth_data mgmt_eth_data;
+ struct qca8k_mib_eth_data mib_eth_data;
+ struct qca8k_mdio_cache mdio_cache;
+ struct qca8k_pcs pcs_port_0;
+ struct qca8k_pcs pcs_port_6;
+ const struct qca8k_match_data *info;
+};
+
+struct qca8k_mib_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+struct qca8k_fdb {
+ u16 vid;
+ u8 port_mask;
+ u8 aging;
+ u8 mac[6];
+};
+
+/* Common setup function */
+extern const struct qca8k_mib_desc ar8327_mib[];
+extern const struct regmap_access_table qca8k_readable_table;
+int qca8k_mib_init(struct qca8k_priv *priv);
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable);
+int qca8k_read_switch_id(struct qca8k_priv *priv);
+
+/* Common read/write/rmw function */
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val);
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val);
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val);
+
+/* Common ops function */
+void qca8k_fdb_flush(struct qca8k_priv *priv);
+
+/* Common ethtool stats function */
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data);
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data);
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
+
+/* Common eee function */
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee);
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+
+/* Common bridge function */
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
+
+/* Common port enable/disable function */
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy);
+void qca8k_port_disable(struct dsa_switch *ds, int port);
+
+/* Common MTU function */
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu);
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port);
+
+/* Common fast age function */
+void qca8k_port_fast_age(struct dsa_switch *ds, int port);
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs);
+
+/* Common FDB function */
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid);
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+
+/* Common MDB function */
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+
+/* Common port mirror function */
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+
+/* Common port VLAN function */
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack);
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+
+/* Common port LAG function */
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag);
+
+#endif /* __QCA8K_H */
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
new file mode 100644
index 000000000..060165a85
--- /dev/null
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig NET_DSA_REALTEK
+ tristate "Realtek Ethernet switch family support"
+ depends on NET_DSA
+ select FIXED_PHY
+ select IRQ_DOMAIN
+ select REALTEK_PHY
+ select REGMAP
+ help
+ Select to enable support for Realtek Ethernet switch chips.
+
+ Note that at least one interface driver must be enabled for the
+ subdrivers to be loaded. Moreover, an interface driver cannot achieve
+ anything without at least one subdriver enabled.
+
+if NET_DSA_REALTEK
+
+config NET_DSA_REALTEK_MDIO
+ tristate "Realtek MDIO interface driver"
+ depends on OF
+ depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+ depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+ depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
+ help
+ Select to enable support for registering switches configured
+ through MDIO.
+
+config NET_DSA_REALTEK_SMI
+ tristate "Realtek SMI interface driver"
+ depends on OF
+ depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+ depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+ depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
+ help
+ Select to enable support for registering switches connected
+ through SMI.
+
+config NET_DSA_REALTEK_RTL8365MB
+ tristate "Realtek RTL8365MB switch subdriver"
+ imply NET_DSA_REALTEK_SMI
+ imply NET_DSA_REALTEK_MDIO
+ select NET_DSA_TAG_RTL8_4
+ help
+ Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
+
+config NET_DSA_REALTEK_RTL8366RB
+ tristate "Realtek RTL8366RB switch subdriver"
+ imply NET_DSA_REALTEK_SMI
+ imply NET_DSA_REALTEK_MDIO
+ select NET_DSA_TAG_RTL4_A
+ help
+ Select to enable support for Realtek RTL8366RB.
+
+endif
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
new file mode 100644
index 000000000..0aab57252
--- /dev/null
+++ b/drivers/net/dsa/realtek/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_REALTEK_MDIO) += realtek-mdio.o
+obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
+obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
+rtl8366-objs := rtl8366-core.o rtl8366rb.o
+obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
new file mode 100644
index 000000000..5a8fe707c
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek MDIO interface driver
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/overflow.h>
+#include <linux/regmap.h>
+
+#include "realtek.h"
+
+/* Read/write via mdiobus */
+#define REALTEK_MDIO_CTRL0_REG 31
+#define REALTEK_MDIO_START_REG 29
+#define REALTEK_MDIO_CTRL1_REG 21
+#define REALTEK_MDIO_ADDRESS_REG 23
+#define REALTEK_MDIO_DATA_WRITE_REG 24
+#define REALTEK_MDIO_DATA_READ_REG 25
+
+#define REALTEK_MDIO_START_OP 0xFFFF
+#define REALTEK_MDIO_ADDR_OP 0x000E
+#define REALTEK_MDIO_READ_OP 0x0001
+#define REALTEK_MDIO_WRITE_OP 0x0003
+
+static int realtek_mdio_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_priv *priv = ctx;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ mutex_lock(&bus->mdio_lock);
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_DATA_WRITE_REG, val);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_WRITE_OP);
+
+out_unlock:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int realtek_mdio_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_priv *priv = ctx;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ mutex_lock(&bus->mdio_lock);
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_READ_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->read(bus, priv->mdio_addr, REALTEK_MDIO_DATA_READ_REG);
+ if (ret >= 0) {
+ *val = ret;
+ ret = 0;
+ }
+
+out_unlock:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static void realtek_mdio_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+
+static void realtek_mdio_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+
+static const struct regmap_config realtek_mdio_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_mdio_read,
+ .reg_write = realtek_mdio_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = realtek_mdio_lock,
+ .unlock = realtek_mdio_unlock,
+};
+
+static const struct regmap_config realtek_mdio_nolock_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_mdio_read,
+ .reg_write = realtek_mdio_write,
+ .cache_type = REGCACHE_NONE,
+ .disable_locking = true,
+};
+
+static int realtek_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv;
+ struct device *dev = &mdiodev->dev;
+ const struct realtek_variant *var;
+ struct regmap_config rc;
+ struct device_node *np;
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ if (!var)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&mdiodev->dev,
+ size_add(sizeof(*priv), var->chip_data_sz),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->map_lock);
+
+ rc = realtek_mdio_regmap_config;
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ rc = realtek_mdio_nolock_regmap_config;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ priv->mdio_addr = mdiodev->addr;
+ priv->bus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ priv->clk_delay = var->clk_delay;
+ priv->cmd_read = var->cmd_read;
+ priv->cmd_write = var->cmd_write;
+ priv->ops = var->ops;
+
+ priv->write_reg_noack = realtek_mdio_write;
+
+ np = dev->of_node;
+
+ dev_set_drvdata(dev, priv);
+
+ /* TODO: if power is software controlled, set up any regulators here */
+ priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(priv->reset);
+ }
+
+ if (priv->reset) {
+ gpiod_set_value(priv->reset, 1);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ gpiod_set_value(priv->reset, 0);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err(dev, "unable to detect switch\n");
+ return ret;
+ }
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->num_ports;
+ priv->ds->priv = priv;
+ priv->ds->ops = var->ds_ops_mdio;
+
+ ret = dsa_register_switch(priv->ds);
+ if (ret) {
+ dev_err(priv->dev, "unable to register switch ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void realtek_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_unregister_switch(priv->ds);
+
+ /* leave the device reset asserted */
+ if (priv->reset)
+ gpiod_set_value(priv->reset, 1);
+}
+
+static void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static const struct of_device_id realtek_mdio_of_match[] = {
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
+ { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
+ { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
+#endif
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, realtek_mdio_of_match);
+
+static struct mdio_driver realtek_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "realtek-mdio",
+ .of_match_table = of_match_ptr(realtek_mdio_of_match),
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+mdio_module_driver(realtek_mdio_driver);
+
+MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
+MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via MDIO interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
new file mode 100644
index 000000000..1b447d96b
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek Simple Management Interface (SMI) driver
+ * It can be discussed how "simple" this interface is.
+ *
+ * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
+ * but the protocol is not MDIO at all. Instead it is a Realtek
+ * pecularity that need to bit-bang the lines in a special way to
+ * communicate with the switch.
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+
+#include "realtek.h"
+
+#define REALTEK_SMI_ACK_RETRY_COUNT 5
+
+static inline void realtek_smi_clk_delay(struct realtek_priv *priv)
+{
+ ndelay(priv->clk_delay);
+}
+
+static void realtek_smi_start(struct realtek_priv *priv)
+{
+ /* Set GPIO pins to output mode, with initial state:
+ * SCK = 0, SDA = 1
+ */
+ gpiod_direction_output(priv->mdc, 0);
+ gpiod_direction_output(priv->mdio, 1);
+ realtek_smi_clk_delay(priv);
+
+ /* CLK 1: 0 -> 1, 1 -> 0 */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+
+ /* CLK 2: */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 1);
+}
+
+static void realtek_smi_stop(struct realtek_priv *priv)
+{
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 0);
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+
+ /* Add a click */
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+
+ /* Set GPIO pins to input mode */
+ gpiod_direction_input(priv->mdio);
+ gpiod_direction_input(priv->mdc);
+}
+
+static void realtek_smi_write_bits(struct realtek_priv *priv, u32 data, u32 len)
+{
+ for (; len > 0; len--) {
+ realtek_smi_clk_delay(priv);
+
+ /* Prepare data */
+ gpiod_set_value(priv->mdio, !!(data & (1 << (len - 1))));
+ realtek_smi_clk_delay(priv);
+
+ /* Clocking */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ }
+}
+
+static void realtek_smi_read_bits(struct realtek_priv *priv, u32 len, u32 *data)
+{
+ gpiod_direction_input(priv->mdio);
+
+ for (*data = 0; len > 0; len--) {
+ u32 u;
+
+ realtek_smi_clk_delay(priv);
+
+ /* Clocking */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ u = !!gpiod_get_value(priv->mdio);
+ gpiod_set_value(priv->mdc, 0);
+
+ *data |= (u << (len - 1));
+ }
+
+ gpiod_direction_output(priv->mdio, 0);
+}
+
+static int realtek_smi_wait_for_ack(struct realtek_priv *priv)
+{
+ int retry_cnt;
+
+ retry_cnt = 0;
+ do {
+ u32 ack;
+
+ realtek_smi_read_bits(priv, 1, &ack);
+ if (ack == 0)
+ break;
+
+ if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
+ dev_err(priv->dev, "ACK timeout\n");
+ return -ETIMEDOUT;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static int realtek_smi_write_byte(struct realtek_priv *priv, u8 data)
+{
+ realtek_smi_write_bits(priv, data, 8);
+ return realtek_smi_wait_for_ack(priv);
+}
+
+static int realtek_smi_write_byte_noack(struct realtek_priv *priv, u8 data)
+{
+ realtek_smi_write_bits(priv, data, 8);
+ return 0;
+}
+
+static int realtek_smi_read_byte0(struct realtek_priv *priv, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(priv, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(priv, 0x00, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_byte1(struct realtek_priv *priv, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(priv, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(priv, 0x01, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_reg(struct realtek_priv *priv, u32 addr, u32 *data)
+{
+ unsigned long flags;
+ u8 lo = 0;
+ u8 hi = 0;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ realtek_smi_start(priv);
+
+ /* Send READ command */
+ ret = realtek_smi_write_byte(priv, priv->cmd_read);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(priv, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(priv, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Read DATA[7:0] */
+ realtek_smi_read_byte0(priv, &lo);
+ /* Read DATA[15:8] */
+ realtek_smi_read_byte1(priv, &hi);
+
+ *data = ((u32)lo) | (((u32)hi) << 8);
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static int realtek_smi_write_reg(struct realtek_priv *priv,
+ u32 addr, u32 data, bool ack)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ realtek_smi_start(priv);
+
+ /* Send WRITE command */
+ ret = realtek_smi_write_byte(priv, priv->cmd_write);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(priv, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(priv, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Write DATA[7:0] */
+ ret = realtek_smi_write_byte(priv, data & 0xff);
+ if (ret)
+ goto out;
+
+ /* Write DATA[15:8] */
+ if (ack)
+ ret = realtek_smi_write_byte(priv, data >> 8);
+ else
+ ret = realtek_smi_write_byte_noack(priv, data >> 8);
+ if (ret)
+ goto out;
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+/* There is one single case when we need to use this accessor and that
+ * is when issueing soft reset. Since the device reset as soon as we write
+ * that bit, no ACK will come back for natural reasons.
+ */
+static int realtek_smi_write_reg_noack(void *ctx, u32 reg, u32 val)
+{
+ return realtek_smi_write_reg(ctx, reg, val, false);
+}
+
+/* Regmap accessors */
+
+static int realtek_smi_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_priv *priv = ctx;
+
+ return realtek_smi_write_reg(priv, reg, val, true);
+}
+
+static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_priv *priv = ctx;
+
+ return realtek_smi_read_reg(priv, reg, val);
+}
+
+static void realtek_smi_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+
+static void realtek_smi_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+
+static const struct regmap_config realtek_smi_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_smi_read,
+ .reg_write = realtek_smi_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = realtek_smi_lock,
+ .unlock = realtek_smi_unlock,
+};
+
+static const struct regmap_config realtek_smi_nolock_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_smi_read,
+ .reg_write = realtek_smi_write,
+ .cache_type = REGCACHE_NONE,
+ .disable_locking = true,
+};
+
+static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_read(priv, addr, regnum);
+}
+
+static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_write(priv, addr, regnum, val);
+}
+
+static int realtek_smi_setup_mdio(struct dsa_switch *ds)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct device_node *mdio_np;
+ int ret;
+
+ mdio_np = of_get_compatible_child(priv->dev->of_node, "realtek,smi-mdio");
+ if (!mdio_np) {
+ dev_err(priv->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ priv->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
+ if (!priv->slave_mii_bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+ priv->slave_mii_bus->priv = priv;
+ priv->slave_mii_bus->name = "SMI slave MII";
+ priv->slave_mii_bus->read = realtek_smi_mdio_read;
+ priv->slave_mii_bus->write = realtek_smi_mdio_write;
+ snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
+ ds->index);
+ priv->slave_mii_bus->dev.of_node = mdio_np;
+ priv->slave_mii_bus->parent = priv->dev;
+ ds->slave_mii_bus = priv->slave_mii_bus;
+
+ ret = devm_of_mdiobus_register(priv->dev, priv->slave_mii_bus, mdio_np);
+ if (ret) {
+ dev_err(priv->dev, "unable to register MDIO bus %s\n",
+ priv->slave_mii_bus->id);
+ goto err_put_node;
+ }
+
+ return 0;
+
+err_put_node:
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+static int realtek_smi_probe(struct platform_device *pdev)
+{
+ const struct realtek_variant *var;
+ struct device *dev = &pdev->dev;
+ struct realtek_priv *priv;
+ struct regmap_config rc;
+ struct device_node *np;
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ np = dev->of_node;
+
+ priv = devm_kzalloc(dev, sizeof(*priv) + var->chip_data_sz, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ mutex_init(&priv->map_lock);
+
+ rc = realtek_smi_regmap_config;
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ rc = realtek_smi_nolock_regmap_config;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Link forward and backward */
+ priv->dev = dev;
+ priv->clk_delay = var->clk_delay;
+ priv->cmd_read = var->cmd_read;
+ priv->cmd_write = var->cmd_write;
+ priv->ops = var->ops;
+
+ priv->setup_interface = realtek_smi_setup_mdio;
+ priv->write_reg_noack = realtek_smi_write_reg_noack;
+
+ dev_set_drvdata(dev, priv);
+ spin_lock_init(&priv->lock);
+
+ /* TODO: if power is software controlled, set up any regulators here */
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(priv->reset);
+ }
+ if (priv->reset) {
+ gpiod_set_value(priv->reset, 1);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ gpiod_set_value(priv->reset, 0);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ /* Fetch MDIO pins */
+ priv->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->mdc))
+ return PTR_ERR(priv->mdc);
+ priv->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->mdio))
+ return PTR_ERR(priv->mdio);
+
+ priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err(dev, "unable to detect switch\n");
+ return ret;
+ }
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->num_ports;
+ priv->ds->priv = priv;
+
+ priv->ds->ops = var->ds_ops_smi;
+ ret = dsa_register_switch(priv->ds);
+ if (ret) {
+ dev_err_probe(dev, ret, "unable to register switch\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int realtek_smi_remove(struct platform_device *pdev)
+{
+ struct realtek_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return 0;
+
+ dsa_unregister_switch(priv->ds);
+ if (priv->slave_mii_bus)
+ of_node_put(priv->slave_mii_bus->dev.of_node);
+
+ /* leave the device reset asserted */
+ if (priv->reset)
+ gpiod_set_value(priv->reset, 1);
+
+ return 0;
+}
+
+static void realtek_smi_shutdown(struct platform_device *pdev)
+{
+ struct realtek_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id realtek_smi_of_match[] = {
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
+ {
+ .compatible = "realtek,rtl8366rb",
+ .data = &rtl8366rb_variant,
+ },
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
+ {
+ .compatible = "realtek,rtl8365mb",
+ .data = &rtl8365mb_variant,
+ },
+#endif
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
+
+static struct platform_driver realtek_smi_driver = {
+ .driver = {
+ .name = "realtek-smi",
+ .of_match_table = of_match_ptr(realtek_smi_of_match),
+ },
+ .probe = realtek_smi_probe,
+ .remove = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+module_platform_driver(realtek_smi_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via SMI interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h
new file mode 100644
index 000000000..4fa7c6ba8
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Realtek SMI interface driver defines
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#ifndef _REALTEK_H
+#define _REALTEK_H
+
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <net/dsa.h>
+
+#define REALTEK_HW_STOP_DELAY 25 /* msecs */
+#define REALTEK_HW_START_DELAY 100 /* msecs */
+
+struct realtek_ops;
+struct dentry;
+struct inode;
+struct file;
+
+struct rtl8366_mib_counter {
+ unsigned int base;
+ unsigned int offset;
+ unsigned int length;
+ const char *name;
+};
+
+/*
+ * struct rtl8366_vlan_mc - Virtual LAN member configuration
+ */
+struct rtl8366_vlan_mc {
+ u16 vid;
+ u16 untag;
+ u16 member;
+ u8 fid;
+ u8 priority;
+};
+
+struct rtl8366_vlan_4k {
+ u16 vid;
+ u16 untag;
+ u16 member;
+ u8 fid;
+};
+
+struct realtek_priv {
+ struct device *dev;
+ struct gpio_desc *reset;
+ struct gpio_desc *mdc;
+ struct gpio_desc *mdio;
+ struct regmap *map;
+ struct regmap *map_nolock;
+ struct mutex map_lock;
+ struct mii_bus *slave_mii_bus;
+ struct mii_bus *bus;
+ int mdio_addr;
+
+ unsigned int clk_delay;
+ u8 cmd_read;
+ u8 cmd_write;
+ spinlock_t lock; /* Locks around command writes */
+ struct dsa_switch *ds;
+ struct irq_domain *irqdomain;
+ bool leds_disabled;
+
+ unsigned int cpu_port;
+ unsigned int num_ports;
+ unsigned int num_vlan_mc;
+ unsigned int num_mib_counters;
+ struct rtl8366_mib_counter *mib_counters;
+
+ const struct realtek_ops *ops;
+ int (*setup_interface)(struct dsa_switch *ds);
+ int (*write_reg_noack)(void *ctx, u32 addr, u32 data);
+
+ int vlan_enabled;
+ int vlan4k_enabled;
+
+ char buf[4096];
+ void *chip_data; /* Per-chip extra variant data */
+};
+
+/*
+ * struct realtek_ops - vtable for the per-SMI-chiptype operations
+ * @detect: detects the chiptype
+ */
+struct realtek_ops {
+ int (*detect)(struct realtek_priv *priv);
+ int (*reset_chip)(struct realtek_priv *priv);
+ int (*setup)(struct realtek_priv *priv);
+ void (*cleanup)(struct realtek_priv *priv);
+ int (*get_mib_counter)(struct realtek_priv *priv,
+ int port,
+ struct rtl8366_mib_counter *mib,
+ u64 *mibvalue);
+ int (*get_vlan_mc)(struct realtek_priv *priv, u32 index,
+ struct rtl8366_vlan_mc *vlanmc);
+ int (*set_vlan_mc)(struct realtek_priv *priv, u32 index,
+ const struct rtl8366_vlan_mc *vlanmc);
+ int (*get_vlan_4k)(struct realtek_priv *priv, u32 vid,
+ struct rtl8366_vlan_4k *vlan4k);
+ int (*set_vlan_4k)(struct realtek_priv *priv,
+ const struct rtl8366_vlan_4k *vlan4k);
+ int (*get_mc_index)(struct realtek_priv *priv, int port, int *val);
+ int (*set_mc_index)(struct realtek_priv *priv, int port, int index);
+ bool (*is_vlan_valid)(struct realtek_priv *priv, unsigned int vlan);
+ int (*enable_vlan)(struct realtek_priv *priv, bool enable);
+ int (*enable_vlan4k)(struct realtek_priv *priv, bool enable);
+ int (*enable_port)(struct realtek_priv *priv, int port, bool enable);
+ int (*phy_read)(struct realtek_priv *priv, int phy, int regnum);
+ int (*phy_write)(struct realtek_priv *priv, int phy, int regnum,
+ u16 val);
+};
+
+struct realtek_variant {
+ const struct dsa_switch_ops *ds_ops_smi;
+ const struct dsa_switch_ops *ds_ops_mdio;
+ const struct realtek_ops *ops;
+ unsigned int clk_delay;
+ u8 cmd_read;
+ u8 cmd_write;
+ size_t chip_data_sz;
+};
+
+/* RTL8366 library helpers */
+int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used);
+int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
+ u32 untag, u32 fid);
+int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
+ unsigned int vid);
+int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable);
+int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable);
+int rtl8366_reset_vlan(struct realtek_priv *priv);
+int rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int rtl8366_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data);
+int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
+void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
+
+extern const struct realtek_variant rtl8366rb_variant;
+extern const struct realtek_variant rtl8365mb_variant;
+
+#endif /* _REALTEK_H */
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
new file mode 100644
index 000000000..da31d8b83
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -0,0 +1,2148 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Realtek SMI subdriver for the Realtek RTL8365MB-VC ethernet switch.
+ *
+ * Copyright (C) 2021 Alvin Å ipraga <alsi@bang-olufsen.dk>
+ * Copyright (C) 2021 Michael Rasmussen <mir@bang-olufsen.dk>
+ *
+ * The RTL8365MB-VC is a 4+1 port 10/100/1000M switch controller. It includes 4
+ * integrated PHYs for the user facing ports, and an extension interface which
+ * can be connected to the CPU - or another PHY - via either MII, RMII, or
+ * RGMII. The switch is configured via the Realtek Simple Management Interface
+ * (SMI), which uses the MDIO/MDC lines.
+ *
+ * Below is a simplified block diagram of the chip and its relevant interfaces.
+ *
+ * .-----------------------------------.
+ * | |
+ * UTP <---------------> Giga PHY <-> PCS <-> P0 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P1 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P2 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P3 GMAC |
+ * | |
+ * CPU/PHY <-MII/RMII/RGMII---> Extension <---> Extension |
+ * | interface 1 GMAC 1 |
+ * | |
+ * SMI driver/ <-MDC/SCL---> Management ~~~~~~~~~~~~~~ |
+ * EEPROM <-MDIO/SDA--> interface ~REALTEK ~~~~~ |
+ * | ~RTL8365MB ~~~ |
+ * | ~GXXXC TAIWAN~ |
+ * GPIO <--------------> Reset ~~~~~~~~~~~~~~ |
+ * | |
+ * Interrupt <----------> Link UP/DOWN events |
+ * controller | |
+ * '-----------------------------------'
+ *
+ * The driver uses DSA to integrate the 4 user and 1 extension ports into the
+ * kernel. Netdevices are created for the user ports, as are PHY devices for
+ * their integrated PHYs. The device tree firmware should also specify the link
+ * partner of the extension port - either via a fixed-link or other phy-handle.
+ * See the device tree bindings for more detailed information. Note that the
+ * driver has only been tested with a fixed-link, but in principle it should not
+ * matter.
+ *
+ * NOTE: Currently, only the RGMII interface is implemented in this driver.
+ *
+ * The interrupt line is asserted on link UP/DOWN events. The driver creates a
+ * custom irqchip to handle this interrupt and demultiplex the events by reading
+ * the status registers via SMI. Interrupts are then propagated to the relevant
+ * PHY device.
+ *
+ * The EEPROM contains initial register values which the chip will read over I2C
+ * upon hardware reset. It is also possible to omit the EEPROM. In both cases,
+ * the driver will manually reprogram some registers using jam tables to reach
+ * an initial state defined by the vendor driver.
+ *
+ * This Linux driver is written based on an OS-agnostic vendor driver from
+ * Realtek. The reference GPL-licensed sources can be found in the OpenWrt
+ * source tree under the name rtl8367c. The vendor driver claims to support a
+ * number of similar switch controllers from Realtek, but the only hardware we
+ * have is the RTL8365MB-VC. Moreover, there does not seem to be any chip under
+ * the name RTL8367C. Although one wishes that the 'C' stood for some kind of
+ * common hardware revision, there exist examples of chips with the suffix -VC
+ * which are explicitly not supported by the rtl8367c driver and which instead
+ * require the rtl8367d vendor driver. With all this uncertainty, the driver has
+ * been modestly named rtl8365mb. Future implementors may wish to rename things
+ * accordingly.
+ *
+ * In the same family of chips, some carry up to 8 user ports and up to 2
+ * extension ports. Where possible this driver tries to make things generic, but
+ * more work must be done to support these configurations. According to
+ * documentation from Realtek, the family should include the following chips:
+ *
+ * - RTL8363NB
+ * - RTL8363NB-VB
+ * - RTL8363SC
+ * - RTL8363SC-VB
+ * - RTL8364NB
+ * - RTL8364NB-VB
+ * - RTL8365MB-VC
+ * - RTL8366SC
+ * - RTL8367RB-VB
+ * - RTL8367SB
+ * - RTL8367S
+ * - RTL8370MB
+ * - RTL8310SR
+ *
+ * Some of the register logic for these additional chips has been skipped over
+ * while implementing this driver. It is therefore not possible to assume that
+ * things will work out-of-the-box for other chips, and a careful review of the
+ * vendor driver may be needed to expand support. The RTL8365MB-VC seems to be
+ * one of the simpler chips.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/mutex.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/if_bridge.h>
+
+#include "realtek.h"
+
+/* Family-specific data and limits */
+#define RTL8365MB_PHYADDRMAX 7
+#define RTL8365MB_NUM_PHYREGS 32
+#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
+#define RTL8365MB_MAX_NUM_PORTS 11
+#define RTL8365MB_MAX_NUM_EXTINTS 3
+#define RTL8365MB_LEARN_LIMIT_MAX 2112
+
+/* Chip identification registers */
+#define RTL8365MB_CHIP_ID_REG 0x1300
+
+#define RTL8365MB_CHIP_VER_REG 0x1301
+
+#define RTL8365MB_MAGIC_REG 0x13C2
+#define RTL8365MB_MAGIC_VALUE 0x0249
+
+/* Chip reset register */
+#define RTL8365MB_CHIP_RESET_REG 0x1322
+#define RTL8365MB_CHIP_RESET_SW_MASK 0x0002
+#define RTL8365MB_CHIP_RESET_HW_MASK 0x0001
+
+/* Interrupt polarity register */
+#define RTL8365MB_INTR_POLARITY_REG 0x1100
+#define RTL8365MB_INTR_POLARITY_MASK 0x0001
+#define RTL8365MB_INTR_POLARITY_HIGH 0
+#define RTL8365MB_INTR_POLARITY_LOW 1
+
+/* Interrupt control/status register - enable/check specific interrupt types */
+#define RTL8365MB_INTR_CTRL_REG 0x1101
+#define RTL8365MB_INTR_STATUS_REG 0x1102
+#define RTL8365MB_INTR_SLIENT_START_2_MASK 0x1000
+#define RTL8365MB_INTR_SLIENT_START_MASK 0x0800
+#define RTL8365MB_INTR_ACL_ACTION_MASK 0x0200
+#define RTL8365MB_INTR_CABLE_DIAG_FIN_MASK 0x0100
+#define RTL8365MB_INTR_INTERRUPT_8051_MASK 0x0080
+#define RTL8365MB_INTR_LOOP_DETECTION_MASK 0x0040
+#define RTL8365MB_INTR_GREEN_TIMER_MASK 0x0020
+#define RTL8365MB_INTR_SPECIAL_CONGEST_MASK 0x0010
+#define RTL8365MB_INTR_SPEED_CHANGE_MASK 0x0008
+#define RTL8365MB_INTR_LEARN_OVER_MASK 0x0004
+#define RTL8365MB_INTR_METER_EXCEEDED_MASK 0x0002
+#define RTL8365MB_INTR_LINK_CHANGE_MASK 0x0001
+#define RTL8365MB_INTR_ALL_MASK \
+ (RTL8365MB_INTR_SLIENT_START_2_MASK | \
+ RTL8365MB_INTR_SLIENT_START_MASK | \
+ RTL8365MB_INTR_ACL_ACTION_MASK | \
+ RTL8365MB_INTR_CABLE_DIAG_FIN_MASK | \
+ RTL8365MB_INTR_INTERRUPT_8051_MASK | \
+ RTL8365MB_INTR_LOOP_DETECTION_MASK | \
+ RTL8365MB_INTR_GREEN_TIMER_MASK | \
+ RTL8365MB_INTR_SPECIAL_CONGEST_MASK | \
+ RTL8365MB_INTR_SPEED_CHANGE_MASK | \
+ RTL8365MB_INTR_LEARN_OVER_MASK | \
+ RTL8365MB_INTR_METER_EXCEEDED_MASK | \
+ RTL8365MB_INTR_LINK_CHANGE_MASK)
+
+/* Per-port interrupt type status registers */
+#define RTL8365MB_PORT_LINKDOWN_IND_REG 0x1106
+#define RTL8365MB_PORT_LINKDOWN_IND_MASK 0x07FF
+
+#define RTL8365MB_PORT_LINKUP_IND_REG 0x1107
+#define RTL8365MB_PORT_LINKUP_IND_MASK 0x07FF
+
+/* PHY indirect access registers */
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_REG 0x1F00
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK 0x0002
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ 0
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE 1
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK 0x0001
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE 1
+#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0)
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(7, 5)
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8)
+#define RTL8365MB_PHY_BASE 0x2000
+#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03
+#define RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG 0x1F04
+
+/* PHY OCP address prefix register */
+#define RTL8365MB_GPHY_OCP_MSB_0_REG 0x1D15
+#define RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK 0x0FC0
+#define RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK 0xFC00
+
+/* The PHY OCP addresses of PHY registers 0~31 start here */
+#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
+
+/* External interface port mode values - used in DIGITAL_INTERFACE_SELECT */
+#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
+#define RTL8365MB_EXT_PORT_MODE_RGMII 1
+#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
+#define RTL8365MB_EXT_PORT_MODE_MII_PHY 3
+#define RTL8365MB_EXT_PORT_MODE_TMII_MAC 4
+#define RTL8365MB_EXT_PORT_MODE_TMII_PHY 5
+#define RTL8365MB_EXT_PORT_MODE_GMII 6
+#define RTL8365MB_EXT_PORT_MODE_RMII_MAC 7
+#define RTL8365MB_EXT_PORT_MODE_RMII_PHY 8
+#define RTL8365MB_EXT_PORT_MODE_SGMII 9
+#define RTL8365MB_EXT_PORT_MODE_HSGMII 10
+#define RTL8365MB_EXT_PORT_MODE_1000X_100FX 11
+#define RTL8365MB_EXT_PORT_MODE_1000X 12
+#define RTL8365MB_EXT_PORT_MODE_100FX 13
+
+/* External interface mode configuration registers 0~1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3 /* EXT2 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extint) \
+ ((_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
+ (_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 : \
+ 0x0)
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extint) \
+ (0xF << (((_extint) % 2)))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extint) \
+ (((_extint) % 2) * 4)
+
+/* External interface RGMII TX/RX delay configuration registers 0~2 */
+#define RTL8365MB_EXT_RGMXF_REG0 0x1306 /* EXT0 */
+#define RTL8365MB_EXT_RGMXF_REG1 0x1307 /* EXT1 */
+#define RTL8365MB_EXT_RGMXF_REG2 0x13C5 /* EXT2 */
+#define RTL8365MB_EXT_RGMXF_REG(_extint) \
+ ((_extint) == 0 ? RTL8365MB_EXT_RGMXF_REG0 : \
+ (_extint) == 1 ? RTL8365MB_EXT_RGMXF_REG1 : \
+ (_extint) == 2 ? RTL8365MB_EXT_RGMXF_REG2 : \
+ 0x0)
+#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007
+#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008
+
+/* External interface port speed values - used in DIGITAL_INTERFACE_FORCE */
+#define RTL8365MB_PORT_SPEED_10M 0
+#define RTL8365MB_PORT_SPEED_100M 1
+#define RTL8365MB_PORT_SPEED_1000M 2
+
+/* External interface force configuration registers 0~2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310 /* EXT0 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4 /* EXT2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extint) \
+ ((_extint) == 0 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 : \
+ (_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 : \
+ (_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 : \
+ 0x0)
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK 0x0020
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK 0x0010
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK 0x0004
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK 0x0003
+
+/* CPU port mask register - controls which ports are treated as CPU ports */
+#define RTL8365MB_CPU_PORT_MASK_REG 0x1219
+#define RTL8365MB_CPU_PORT_MASK_MASK 0x07FF
+
+/* CPU control register */
+#define RTL8365MB_CPU_CTRL_REG 0x121A
+#define RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK 0x0400
+#define RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK 0x0200
+#define RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK 0x0080
+#define RTL8365MB_CPU_CTRL_TAG_POSITION_MASK 0x0040
+#define RTL8365MB_CPU_CTRL_TRAP_PORT_MASK 0x0038
+#define RTL8365MB_CPU_CTRL_INSERTMODE_MASK 0x0006
+#define RTL8365MB_CPU_CTRL_EN_MASK 0x0001
+
+/* Maximum packet length register */
+#define RTL8365MB_CFG0_MAX_LEN_REG 0x088C
+#define RTL8365MB_CFG0_MAX_LEN_MASK 0x3FFF
+
+/* Port learning limit registers */
+#define RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE 0x0A20
+#define RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(_physport) \
+ (RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE + (_physport))
+
+/* Port isolation (forwarding mask) registers */
+#define RTL8365MB_PORT_ISOLATION_REG_BASE 0x08A2
+#define RTL8365MB_PORT_ISOLATION_REG(_physport) \
+ (RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport))
+#define RTL8365MB_PORT_ISOLATION_MASK 0x07FF
+
+/* MSTP port state registers - indexed by tree instance */
+#define RTL8365MB_MSTI_CTRL_BASE 0x0A00
+#define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \
+ (RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3))
+#define RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(_physport) ((_physport) << 1)
+#define RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(_physport) \
+ (0x3 << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET((_physport)))
+
+/* MIB counter value registers */
+#define RTL8365MB_MIB_COUNTER_BASE 0x1000
+#define RTL8365MB_MIB_COUNTER_REG(_x) (RTL8365MB_MIB_COUNTER_BASE + (_x))
+
+/* MIB counter address register */
+#define RTL8365MB_MIB_ADDRESS_REG 0x1004
+#define RTL8365MB_MIB_ADDRESS_PORT_OFFSET 0x007C
+#define RTL8365MB_MIB_ADDRESS(_p, _x) \
+ (((RTL8365MB_MIB_ADDRESS_PORT_OFFSET) * (_p) + (_x)) >> 2)
+
+#define RTL8365MB_MIB_CTRL0_REG 0x1005
+#define RTL8365MB_MIB_CTRL0_RESET_MASK 0x0002
+#define RTL8365MB_MIB_CTRL0_BUSY_MASK 0x0001
+
+/* The DSA callback .get_stats64 runs in atomic context, so we are not allowed
+ * to block. On the other hand, accessing MIB counters absolutely requires us to
+ * block. The solution is thus to schedule work which polls the MIB counters
+ * asynchronously and updates some private data, which the callback can then
+ * fetch atomically. Three seconds should be a good enough polling interval.
+ */
+#define RTL8365MB_STATS_INTERVAL_JIFFIES (3 * HZ)
+
+enum rtl8365mb_mib_counter_index {
+ RTL8365MB_MIB_ifInOctets,
+ RTL8365MB_MIB_dot3StatsFCSErrors,
+ RTL8365MB_MIB_dot3StatsSymbolErrors,
+ RTL8365MB_MIB_dot3InPauseFrames,
+ RTL8365MB_MIB_dot3ControlInUnknownOpcodes,
+ RTL8365MB_MIB_etherStatsFragments,
+ RTL8365MB_MIB_etherStatsJabbers,
+ RTL8365MB_MIB_ifInUcastPkts,
+ RTL8365MB_MIB_etherStatsDropEvents,
+ RTL8365MB_MIB_ifInMulticastPkts,
+ RTL8365MB_MIB_ifInBroadcastPkts,
+ RTL8365MB_MIB_inMldChecksumError,
+ RTL8365MB_MIB_inIgmpChecksumError,
+ RTL8365MB_MIB_inMldSpecificQuery,
+ RTL8365MB_MIB_inMldGeneralQuery,
+ RTL8365MB_MIB_inIgmpSpecificQuery,
+ RTL8365MB_MIB_inIgmpGeneralQuery,
+ RTL8365MB_MIB_inMldLeaves,
+ RTL8365MB_MIB_inIgmpLeaves,
+ RTL8365MB_MIB_etherStatsOctets,
+ RTL8365MB_MIB_etherStatsUnderSizePkts,
+ RTL8365MB_MIB_etherOversizeStats,
+ RTL8365MB_MIB_etherStatsPkts64Octets,
+ RTL8365MB_MIB_etherStatsPkts65to127Octets,
+ RTL8365MB_MIB_etherStatsPkts128to255Octets,
+ RTL8365MB_MIB_etherStatsPkts256to511Octets,
+ RTL8365MB_MIB_etherStatsPkts512to1023Octets,
+ RTL8365MB_MIB_etherStatsPkts1024to1518Octets,
+ RTL8365MB_MIB_ifOutOctets,
+ RTL8365MB_MIB_dot3StatsSingleCollisionFrames,
+ RTL8365MB_MIB_dot3StatsMultipleCollisionFrames,
+ RTL8365MB_MIB_dot3StatsDeferredTransmissions,
+ RTL8365MB_MIB_dot3StatsLateCollisions,
+ RTL8365MB_MIB_etherStatsCollisions,
+ RTL8365MB_MIB_dot3StatsExcessiveCollisions,
+ RTL8365MB_MIB_dot3OutPauseFrames,
+ RTL8365MB_MIB_ifOutDiscards,
+ RTL8365MB_MIB_dot1dTpPortInDiscards,
+ RTL8365MB_MIB_ifOutUcastPkts,
+ RTL8365MB_MIB_ifOutMulticastPkts,
+ RTL8365MB_MIB_ifOutBroadcastPkts,
+ RTL8365MB_MIB_outOampduPkts,
+ RTL8365MB_MIB_inOampduPkts,
+ RTL8365MB_MIB_inIgmpJoinsSuccess,
+ RTL8365MB_MIB_inIgmpJoinsFail,
+ RTL8365MB_MIB_inMldJoinsSuccess,
+ RTL8365MB_MIB_inMldJoinsFail,
+ RTL8365MB_MIB_inReportSuppressionDrop,
+ RTL8365MB_MIB_inLeaveSuppressionDrop,
+ RTL8365MB_MIB_outIgmpReports,
+ RTL8365MB_MIB_outIgmpLeaves,
+ RTL8365MB_MIB_outIgmpGeneralQuery,
+ RTL8365MB_MIB_outIgmpSpecificQuery,
+ RTL8365MB_MIB_outMldReports,
+ RTL8365MB_MIB_outMldLeaves,
+ RTL8365MB_MIB_outMldGeneralQuery,
+ RTL8365MB_MIB_outMldSpecificQuery,
+ RTL8365MB_MIB_inKnownMulticastPkts,
+ RTL8365MB_MIB_END,
+};
+
+struct rtl8365mb_mib_counter {
+ u32 offset;
+ u32 length;
+ const char *name;
+};
+
+#define RTL8365MB_MAKE_MIB_COUNTER(_offset, _length, _name) \
+ [RTL8365MB_MIB_ ## _name] = { _offset, _length, #_name }
+
+static struct rtl8365mb_mib_counter rtl8365mb_mib_counters[] = {
+ RTL8365MB_MAKE_MIB_COUNTER(0, 4, ifInOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(4, 2, dot3StatsFCSErrors),
+ RTL8365MB_MAKE_MIB_COUNTER(6, 2, dot3StatsSymbolErrors),
+ RTL8365MB_MAKE_MIB_COUNTER(8, 2, dot3InPauseFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(10, 2, dot3ControlInUnknownOpcodes),
+ RTL8365MB_MAKE_MIB_COUNTER(12, 2, etherStatsFragments),
+ RTL8365MB_MAKE_MIB_COUNTER(14, 2, etherStatsJabbers),
+ RTL8365MB_MAKE_MIB_COUNTER(16, 2, ifInUcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(18, 2, etherStatsDropEvents),
+ RTL8365MB_MAKE_MIB_COUNTER(20, 2, ifInMulticastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(22, 2, ifInBroadcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(24, 2, inMldChecksumError),
+ RTL8365MB_MAKE_MIB_COUNTER(26, 2, inIgmpChecksumError),
+ RTL8365MB_MAKE_MIB_COUNTER(28, 2, inMldSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(30, 2, inMldGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(32, 2, inIgmpSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(34, 2, inIgmpGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(36, 2, inMldLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(38, 2, inIgmpLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(40, 4, etherStatsOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(44, 2, etherStatsUnderSizePkts),
+ RTL8365MB_MAKE_MIB_COUNTER(46, 2, etherOversizeStats),
+ RTL8365MB_MAKE_MIB_COUNTER(48, 2, etherStatsPkts64Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(50, 2, etherStatsPkts65to127Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(52, 2, etherStatsPkts128to255Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(54, 2, etherStatsPkts256to511Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(56, 2, etherStatsPkts512to1023Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(58, 2, etherStatsPkts1024to1518Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(60, 4, ifOutOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(64, 2, dot3StatsSingleCollisionFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(66, 2, dot3StatsMultipleCollisionFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(68, 2, dot3StatsDeferredTransmissions),
+ RTL8365MB_MAKE_MIB_COUNTER(70, 2, dot3StatsLateCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(72, 2, etherStatsCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(74, 2, dot3StatsExcessiveCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(76, 2, dot3OutPauseFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(78, 2, ifOutDiscards),
+ RTL8365MB_MAKE_MIB_COUNTER(80, 2, dot1dTpPortInDiscards),
+ RTL8365MB_MAKE_MIB_COUNTER(82, 2, ifOutUcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(84, 2, ifOutMulticastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(86, 2, ifOutBroadcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(88, 2, outOampduPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(90, 2, inOampduPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(92, 4, inIgmpJoinsSuccess),
+ RTL8365MB_MAKE_MIB_COUNTER(96, 2, inIgmpJoinsFail),
+ RTL8365MB_MAKE_MIB_COUNTER(98, 2, inMldJoinsSuccess),
+ RTL8365MB_MAKE_MIB_COUNTER(100, 2, inMldJoinsFail),
+ RTL8365MB_MAKE_MIB_COUNTER(102, 2, inReportSuppressionDrop),
+ RTL8365MB_MAKE_MIB_COUNTER(104, 2, inLeaveSuppressionDrop),
+ RTL8365MB_MAKE_MIB_COUNTER(106, 2, outIgmpReports),
+ RTL8365MB_MAKE_MIB_COUNTER(108, 2, outIgmpLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(110, 2, outIgmpGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(112, 2, outIgmpSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(114, 2, outMldReports),
+ RTL8365MB_MAKE_MIB_COUNTER(116, 2, outMldLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(118, 2, outMldGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(120, 2, outMldSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(122, 2, inKnownMulticastPkts),
+};
+
+static_assert(ARRAY_SIZE(rtl8365mb_mib_counters) == RTL8365MB_MIB_END);
+
+struct rtl8365mb_jam_tbl_entry {
+ u16 reg;
+ u16 val;
+};
+
+/* Lifted from the vendor driver sources */
+static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_8365mb_vc[] = {
+ { 0x13EB, 0x15BB }, { 0x1303, 0x06D6 }, { 0x1304, 0x0700 },
+ { 0x13E2, 0x003F }, { 0x13F9, 0x0090 }, { 0x121E, 0x03CA },
+ { 0x1233, 0x0352 }, { 0x1237, 0x00A0 }, { 0x123A, 0x0030 },
+ { 0x1239, 0x0084 }, { 0x0301, 0x1000 }, { 0x1349, 0x001F },
+ { 0x18E0, 0x4004 }, { 0x122B, 0x241C }, { 0x1305, 0xC000 },
+ { 0x13F0, 0x0000 },
+};
+
+static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = {
+ { 0x1200, 0x7FCB }, { 0x0884, 0x0003 }, { 0x06EB, 0x0001 },
+ { 0x03Fa, 0x0007 }, { 0x08C8, 0x00C0 }, { 0x0A30, 0x020E },
+ { 0x0800, 0x0000 }, { 0x0802, 0x0000 }, { 0x09DA, 0x0013 },
+ { 0x1D32, 0x0002 },
+};
+
+enum rtl8365mb_phy_interface_mode {
+ RTL8365MB_PHY_INTERFACE_MODE_INVAL = 0,
+ RTL8365MB_PHY_INTERFACE_MODE_INTERNAL = BIT(0),
+ RTL8365MB_PHY_INTERFACE_MODE_MII = BIT(1),
+ RTL8365MB_PHY_INTERFACE_MODE_TMII = BIT(2),
+ RTL8365MB_PHY_INTERFACE_MODE_RMII = BIT(3),
+ RTL8365MB_PHY_INTERFACE_MODE_RGMII = BIT(4),
+ RTL8365MB_PHY_INTERFACE_MODE_SGMII = BIT(5),
+ RTL8365MB_PHY_INTERFACE_MODE_HSGMII = BIT(6),
+};
+
+/**
+ * struct rtl8365mb_extint - external interface info
+ * @port: the port with an external interface
+ * @id: the external interface ID, which is either 0, 1, or 2
+ * @supported_interfaces: a bitmask of supported PHY interface modes
+ *
+ * Represents a mapping: port -> { id, supported_interfaces }. To be embedded
+ * in &struct rtl8365mb_chip_info for every port with an external interface.
+ */
+struct rtl8365mb_extint {
+ int port;
+ int id;
+ unsigned int supported_interfaces;
+};
+
+/**
+ * struct rtl8365mb_chip_info - static chip-specific info
+ * @name: human-readable chip name
+ * @chip_id: chip identifier
+ * @chip_ver: chip silicon revision
+ * @extints: available external interfaces
+ * @jam_table: chip-specific initialization jam table
+ * @jam_size: size of the chip's jam table
+ *
+ * These data are specific to a given chip in the family of switches supported
+ * by this driver. When adding support for another chip in the family, a new
+ * chip info should be added to the rtl8365mb_chip_infos array.
+ */
+struct rtl8365mb_chip_info {
+ const char *name;
+ u32 chip_id;
+ u32 chip_ver;
+ const struct rtl8365mb_extint extints[RTL8365MB_MAX_NUM_EXTINTS];
+ const struct rtl8365mb_jam_tbl_entry *jam_table;
+ size_t jam_size;
+};
+
+/* Chip info for each supported switch in the family */
+#define PHY_INTF(_mode) (RTL8365MB_PHY_INTERFACE_MODE_ ## _mode)
+static const struct rtl8365mb_chip_info rtl8365mb_chip_infos[] = {
+ {
+ .name = "RTL8365MB-VC",
+ .chip_id = 0x6367,
+ .chip_ver = 0x0040,
+ .extints = {
+ { 6, 1, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+ {
+ .name = "RTL8367S",
+ .chip_id = 0x6367,
+ .chip_ver = 0x00A0,
+ .extints = {
+ { 6, 1, PHY_INTF(SGMII) | PHY_INTF(HSGMII) },
+ { 7, 2, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+ {
+ .name = "RTL8367RB-VB",
+ .chip_id = 0x6367,
+ .chip_ver = 0x0020,
+ .extints = {
+ { 6, 1, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ { 7, 2, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+};
+
+enum rtl8365mb_stp_state {
+ RTL8365MB_STP_STATE_DISABLED = 0,
+ RTL8365MB_STP_STATE_BLOCKING = 1,
+ RTL8365MB_STP_STATE_LEARNING = 2,
+ RTL8365MB_STP_STATE_FORWARDING = 3,
+};
+
+enum rtl8365mb_cpu_insert {
+ RTL8365MB_CPU_INSERT_TO_ALL = 0,
+ RTL8365MB_CPU_INSERT_TO_TRAPPING = 1,
+ RTL8365MB_CPU_INSERT_TO_NONE = 2,
+};
+
+enum rtl8365mb_cpu_position {
+ RTL8365MB_CPU_POS_AFTER_SA = 0,
+ RTL8365MB_CPU_POS_BEFORE_CRC = 1,
+};
+
+enum rtl8365mb_cpu_format {
+ RTL8365MB_CPU_FORMAT_8BYTES = 0,
+ RTL8365MB_CPU_FORMAT_4BYTES = 1,
+};
+
+enum rtl8365mb_cpu_rxlen {
+ RTL8365MB_CPU_RXLEN_72BYTES = 0,
+ RTL8365MB_CPU_RXLEN_64BYTES = 1,
+};
+
+/**
+ * struct rtl8365mb_cpu - CPU port configuration
+ * @enable: enable/disable hardware insertion of CPU tag in switch->CPU frames
+ * @mask: port mask of ports that parse should parse CPU tags
+ * @trap_port: forward trapped frames to this port
+ * @insert: CPU tag insertion mode in switch->CPU frames
+ * @position: position of CPU tag in frame
+ * @rx_length: minimum CPU RX length
+ * @format: CPU tag format
+ *
+ * Represents the CPU tagging and CPU port configuration of the switch. These
+ * settings are configurable at runtime.
+ */
+struct rtl8365mb_cpu {
+ bool enable;
+ u32 mask;
+ u32 trap_port;
+ enum rtl8365mb_cpu_insert insert;
+ enum rtl8365mb_cpu_position position;
+ enum rtl8365mb_cpu_rxlen rx_length;
+ enum rtl8365mb_cpu_format format;
+};
+
+/**
+ * struct rtl8365mb_port - private per-port data
+ * @priv: pointer to parent realtek_priv data
+ * @index: DSA port index, same as dsa_port::index
+ * @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic
+ * access via rtl8365mb_get_stats64
+ * @stats_lock: protect the stats structure during read/update
+ * @mib_work: delayed work for polling MIB counters
+ */
+struct rtl8365mb_port {
+ struct realtek_priv *priv;
+ unsigned int index;
+ struct rtnl_link_stats64 stats;
+ spinlock_t stats_lock;
+ struct delayed_work mib_work;
+};
+
+/**
+ * struct rtl8365mb - driver private data
+ * @priv: pointer to parent realtek_priv data
+ * @irq: registered IRQ or zero
+ * @chip_info: chip-specific info about the attached switch
+ * @cpu: CPU tagging and CPU port configuration for this chip
+ * @mib_lock: prevent concurrent reads of MIB counters
+ * @ports: per-port data
+ *
+ * Private data for this driver.
+ */
+struct rtl8365mb {
+ struct realtek_priv *priv;
+ int irq;
+ const struct rtl8365mb_chip_info *chip_info;
+ struct rtl8365mb_cpu cpu;
+ struct mutex mib_lock;
+ struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS];
+};
+
+static int rtl8365mb_phy_poll_busy(struct realtek_priv *priv)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
+ val, !val, 10, 100);
+}
+
+static int rtl8365mb_phy_ocp_prepare(struct realtek_priv *priv, int phy,
+ u32 ocp_addr)
+{
+ u32 val;
+ int ret;
+
+ /* Set OCP prefix */
+ val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
+ ret = regmap_update_bits(
+ priv->map_nolock, RTL8365MB_GPHY_OCP_MSB_0_REG,
+ RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
+ FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
+ if (ret)
+ return ret;
+
+ /* Set PHY register address */
+ val = RTL8365MB_PHY_BASE;
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK, phy);
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK,
+ ocp_addr >> 1);
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
+ ocp_addr >> 6);
+ ret = regmap_write(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
+ u32 ocp_addr, u16 *data)
+{
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->map_lock);
+
+ ret = rtl8365mb_phy_poll_busy(priv);
+ if (ret)
+ goto out;
+
+ ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
+ if (ret)
+ goto out;
+
+ /* Execute read operation */
+ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
+ FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
+ ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
+ val);
+ if (ret)
+ goto out;
+
+ ret = rtl8365mb_phy_poll_busy(priv);
+ if (ret)
+ goto out;
+
+ /* Get PHY register data */
+ ret = regmap_read(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG, &val);
+ if (ret)
+ goto out;
+
+ *data = val & 0xFFFF;
+
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return ret;
+}
+
+static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
+ u32 ocp_addr, u16 data)
+{
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->map_lock);
+
+ ret = rtl8365mb_phy_poll_busy(priv);
+ if (ret)
+ goto out;
+
+ ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
+ if (ret)
+ goto out;
+
+ /* Set PHY register data */
+ ret = regmap_write(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG, data);
+ if (ret)
+ goto out;
+
+ /* Execute write operation */
+ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
+ FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
+ ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
+ val);
+ if (ret)
+ goto out;
+
+ ret = rtl8365mb_phy_poll_busy(priv);
+ if (ret)
+ goto out;
+
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return 0;
+}
+
+static int rtl8365mb_phy_read(struct realtek_priv *priv, int phy, int regnum)
+{
+ u32 ocp_addr;
+ u16 val;
+ int ret;
+
+ if (phy > RTL8365MB_PHYADDRMAX)
+ return -EINVAL;
+
+ if (regnum > RTL8365MB_PHYREGMAX)
+ return -EINVAL;
+
+ ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
+
+ ret = rtl8365mb_phy_ocp_read(priv, phy, ocp_addr, &val);
+ if (ret) {
+ dev_err(priv->dev,
+ "failed to read PHY%d reg %02x @ %04x, ret %d\n", phy,
+ regnum, ocp_addr, ret);
+ return ret;
+ }
+
+ dev_dbg(priv->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
+ phy, regnum, ocp_addr, val);
+
+ return val;
+}
+
+static int rtl8365mb_phy_write(struct realtek_priv *priv, int phy, int regnum,
+ u16 val)
+{
+ u32 ocp_addr;
+ int ret;
+
+ if (phy > RTL8365MB_PHYADDRMAX)
+ return -EINVAL;
+
+ if (regnum > RTL8365MB_PHYREGMAX)
+ return -EINVAL;
+
+ ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
+
+ ret = rtl8365mb_phy_ocp_write(priv, phy, ocp_addr, val);
+ if (ret) {
+ dev_err(priv->dev,
+ "failed to write PHY%d reg %02x @ %04x, ret %d\n", phy,
+ regnum, ocp_addr, ret);
+ return ret;
+ }
+
+ dev_dbg(priv->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
+ phy, regnum, ocp_addr, val);
+
+ return 0;
+}
+
+static int rtl8365mb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ return rtl8365mb_phy_read(ds->priv, phy, regnum);
+}
+
+static int rtl8365mb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ return rtl8365mb_phy_write(ds->priv, phy, regnum, val);
+}
+
+static const struct rtl8365mb_extint *
+rtl8365mb_get_port_extint(struct realtek_priv *priv, int port)
+{
+ struct rtl8365mb *mb = priv->chip_data;
+ int i;
+
+ for (i = 0; i < RTL8365MB_MAX_NUM_EXTINTS; i++) {
+ const struct rtl8365mb_extint *extint =
+ &mb->chip_info->extints[i];
+
+ if (!extint->supported_interfaces)
+ continue;
+
+ if (extint->port == port)
+ return extint;
+ }
+
+ return NULL;
+}
+
+static enum dsa_tag_protocol
+rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
+
+ if (cpu->position == RTL8365MB_CPU_POS_BEFORE_CRC)
+ return DSA_TAG_PROTO_RTL8_4T;
+
+ return DSA_TAG_PROTO_RTL8_4;
+}
+
+static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
+ phy_interface_t interface)
+{
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(priv, port);
+ struct device_node *dn;
+ struct dsa_port *dp;
+ int tx_delay = 0;
+ int rx_delay = 0;
+ u32 val;
+ int ret;
+
+ if (!extint)
+ return -ENODEV;
+
+ dp = dsa_to_port(priv->ds, port);
+ dn = dp->dn;
+
+ /* Set the RGMII TX/RX delay
+ *
+ * The Realtek vendor driver indicates the following possible
+ * configuration settings:
+ *
+ * TX delay:
+ * 0 = no delay, 1 = 2 ns delay
+ * RX delay:
+ * 0 = no delay, 7 = maximum delay
+ * Each step is approximately 0.3 ns, so the maximum delay is about
+ * 2.1 ns.
+ *
+ * The vendor driver also states that this must be configured *before*
+ * forcing the external interface into a particular mode, which is done
+ * in the rtl8365mb_phylink_mac_link_{up,down} functions.
+ *
+ * Only configure an RGMII TX (resp. RX) delay if the
+ * tx-internal-delay-ps (resp. rx-internal-delay-ps) OF property is
+ * specified. We ignore the detail of the RGMII interface mode
+ * (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only
+ * property.
+ */
+ if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) {
+ val = val / 1000; /* convert to ns */
+
+ if (val == 0 || val == 2)
+ tx_delay = val / 2;
+ else
+ dev_warn(priv->dev,
+ "RGMII TX delay must be 0 or 2 ns\n");
+ }
+
+ if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
+ val = DIV_ROUND_CLOSEST(val, 300); /* convert to 0.3 ns step */
+
+ if (val <= 7)
+ rx_delay = val;
+ else
+ dev_warn(priv->dev,
+ "RGMII RX delay must be 0 to 2.1 ns\n");
+ }
+
+ ret = regmap_update_bits(
+ priv->map, RTL8365MB_EXT_RGMXF_REG(extint->id),
+ RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
+ RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
+ FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
+ FIELD_PREP(RTL8365MB_EXT_RGMXF_RXDELAY_MASK, rx_delay));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(
+ priv->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(extint->id),
+ RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(extint->id),
+ RTL8365MB_EXT_PORT_MODE_RGMII
+ << RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
+ extint->id));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
+ bool link, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(priv, port);
+ u32 r_tx_pause;
+ u32 r_rx_pause;
+ u32 r_duplex;
+ u32 r_speed;
+ u32 r_link;
+ int val;
+ int ret;
+
+ if (!extint)
+ return -ENODEV;
+
+ if (link) {
+ /* Force the link up with the desired configuration */
+ r_link = 1;
+ r_rx_pause = rx_pause ? 1 : 0;
+ r_tx_pause = tx_pause ? 1 : 0;
+
+ if (speed == SPEED_1000) {
+ r_speed = RTL8365MB_PORT_SPEED_1000M;
+ } else if (speed == SPEED_100) {
+ r_speed = RTL8365MB_PORT_SPEED_100M;
+ } else if (speed == SPEED_10) {
+ r_speed = RTL8365MB_PORT_SPEED_10M;
+ } else {
+ dev_err(priv->dev, "unsupported port speed %s\n",
+ phy_speed_to_str(speed));
+ return -EINVAL;
+ }
+
+ if (duplex == DUPLEX_FULL) {
+ r_duplex = 1;
+ } else if (duplex == DUPLEX_HALF) {
+ r_duplex = 0;
+ } else {
+ dev_err(priv->dev, "unsupported duplex %s\n",
+ phy_duplex_to_str(duplex));
+ return -EINVAL;
+ }
+ } else {
+ /* Force the link down and reset any programmed configuration */
+ r_link = 0;
+ r_tx_pause = 0;
+ r_rx_pause = 0;
+ r_speed = 0;
+ r_duplex = 0;
+ }
+
+ val = FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK, 1) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK,
+ r_tx_pause) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK,
+ r_rx_pause) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK, r_link) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK,
+ r_duplex) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
+ ret = regmap_write(priv->map,
+ RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(extint->id),
+ val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(ds->priv, port);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
+
+ if (!extint) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ /* GMII is the default interface mode for phylib, so
+ * we have to support it for ports with integrated PHY.
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ return;
+ }
+
+ /* Populate according to the modes supported by _this driver_,
+ * not necessarily the modes supported by the hardware, some of
+ * which remain unimplemented.
+ */
+
+ if (extint->supported_interfaces & RTL8365MB_PHY_INTERFACE_MODE_RGMII)
+ phy_interface_set_rgmii(config->supported_interfaces);
+}
+
+static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct realtek_priv *priv = ds->priv;
+ int ret;
+
+ if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
+ dev_err(priv->dev,
+ "port %d supports only conventional PHY or fixed-link\n",
+ port);
+ return;
+ }
+
+ if (phy_interface_mode_is_rgmii(state->interface)) {
+ ret = rtl8365mb_ext_config_rgmii(priv, port, state->interface);
+ if (ret)
+ dev_err(priv->dev,
+ "failed to configure RGMII mode on port %d: %d\n",
+ port, ret);
+ return;
+ }
+
+ /* TODO: Implement MII and RMII modes, which the RTL8365MB-VC also
+ * supports
+ */
+}
+
+static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+ int ret;
+
+ mb = priv->chip_data;
+ p = &mb->ports[port];
+ cancel_delayed_work_sync(&p->mib_work);
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ ret = rtl8365mb_ext_config_forcemode(priv, port, false, 0, 0,
+ false, false);
+ if (ret)
+ dev_err(priv->dev,
+ "failed to reset forced mode on port %d: %d\n",
+ port, ret);
+
+ return;
+ }
+}
+
+static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+ int ret;
+
+ mb = priv->chip_data;
+ p = &mb->ports[port];
+ schedule_delayed_work(&p->mib_work, 0);
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ ret = rtl8365mb_ext_config_forcemode(priv, port, true, speed,
+ duplex, tx_pause,
+ rx_pause);
+ if (ret)
+ dev_err(priv->dev,
+ "failed to force mode on port %d: %d\n", port,
+ ret);
+
+ return;
+ }
+}
+
+static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct realtek_priv *priv = ds->priv;
+ enum rtl8365mb_stp_state val;
+ int msti = 0;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ val = RTL8365MB_STP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = RTL8365MB_STP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ val = RTL8365MB_STP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = RTL8365MB_STP_STATE_FORWARDING;
+ break;
+ default:
+ dev_err(priv->dev, "invalid STP state: %u\n", state);
+ return;
+ }
+
+ regmap_update_bits(priv->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
+ RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port),
+ val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port));
+}
+
+static int rtl8365mb_port_set_learning(struct realtek_priv *priv, int port,
+ bool enable)
+{
+ /* Enable/disable learning by limiting the number of L2 addresses the
+ * port can learn. Realtek documentation states that a limit of zero
+ * disables learning. When enabling learning, set it to the chip's
+ * maximum.
+ */
+ return regmap_write(priv->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
+ enable ? RTL8365MB_LEARN_LIMIT_MAX : 0);
+}
+
+static int rtl8365mb_port_set_isolation(struct realtek_priv *priv, int port,
+ u32 mask)
+{
+ return regmap_write(priv->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
+}
+
+static int rtl8365mb_mib_counter_read(struct realtek_priv *priv, int port,
+ u32 offset, u32 length, u64 *mibvalue)
+{
+ u64 tmpvalue = 0;
+ u32 val;
+ int ret;
+ int i;
+
+ /* The MIB address is an SRAM address. We request a particular address
+ * and then poll the control register before reading the value from some
+ * counter registers.
+ */
+ ret = regmap_write(priv->map, RTL8365MB_MIB_ADDRESS_REG,
+ RTL8365MB_MIB_ADDRESS(port, offset));
+ if (ret)
+ return ret;
+
+ /* Poll for completion */
+ ret = regmap_read_poll_timeout(priv->map, RTL8365MB_MIB_CTRL0_REG, val,
+ !(val & RTL8365MB_MIB_CTRL0_BUSY_MASK),
+ 10, 100);
+ if (ret)
+ return ret;
+
+ /* Presumably this indicates a MIB counter read failure */
+ if (val & RTL8365MB_MIB_CTRL0_RESET_MASK)
+ return -EIO;
+
+ /* There are four MIB counter registers each holding a 16 bit word of a
+ * MIB counter. Depending on the offset, we should read from the upper
+ * two or lower two registers. In case the MIB counter is 4 words, we
+ * read from all four registers.
+ */
+ if (length == 4)
+ offset = 3;
+ else
+ offset = (offset + 1) % 4;
+
+ /* Read the MIB counter 16 bits at a time */
+ for (i = 0; i < length; i++) {
+ ret = regmap_read(priv->map,
+ RTL8365MB_MIB_COUNTER_REG(offset - i), &val);
+ if (ret)
+ return ret;
+
+ tmpvalue = ((tmpvalue) << 16) | (val & 0xFFFF);
+ }
+
+ /* Only commit the result if no error occurred */
+ *mibvalue = tmpvalue;
+
+ return 0;
+}
+
+static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = priv->chip_data;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
+ mib->length, &data[i]);
+ if (ret) {
+ dev_err(priv->dev,
+ "failed to read port %d counters: %d\n", port,
+ ret);
+ break;
+ }
+ }
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ strncpy(data + i * ETH_GSTRING_LEN, mib->name, ETH_GSTRING_LEN);
+ }
+}
+
+static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return RTL8365MB_MIB_END;
+}
+
+static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_mib_counter *mib;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors];
+
+ mutex_lock(&mb->mib_lock);
+ rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
+ &phy_stats->SymbolErrorDuringCarrier);
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ u64 cnt[RTL8365MB_MIB_END] = {
+ [RTL8365MB_MIB_ifOutOctets] = 1,
+ [RTL8365MB_MIB_ifOutUcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
+ [RTL8365MB_MIB_dot3OutPauseFrames] = 1,
+ [RTL8365MB_MIB_ifOutDiscards] = 1,
+ [RTL8365MB_MIB_ifInOctets] = 1,
+ [RTL8365MB_MIB_ifInUcastPkts] = 1,
+ [RTL8365MB_MIB_ifInMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
+ [RTL8365MB_MIB_dot3InPauseFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsSingleCollisionFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsMultipleCollisionFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
+ [RTL8365MB_MIB_dot3StatsDeferredTransmissions] = 1,
+ [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
+ [RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1,
+
+ };
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = priv->chip_data;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ /* Only fetch required MIB counters (marked = 1 above) */
+ if (!cnt[i])
+ continue;
+
+ ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
+ mib->length, &cnt[i]);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&mb->mib_lock);
+
+ /* The RTL8365MB-VC exposes MIB objects, which we have to translate into
+ * IEEE 802.3 Managed Objects. This is not always completely faithful,
+ * but we try out best. See RFC 3635 for a detailed treatment of the
+ * subject.
+ */
+
+ mac_stats->FramesTransmittedOK = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts] +
+ cnt[RTL8365MB_MIB_dot3OutPauseFrames] -
+ cnt[RTL8365MB_MIB_ifOutDiscards];
+ mac_stats->SingleCollisionFrames =
+ cnt[RTL8365MB_MIB_dot3StatsSingleCollisionFrames];
+ mac_stats->MultipleCollisionFrames =
+ cnt[RTL8365MB_MIB_dot3StatsMultipleCollisionFrames];
+ mac_stats->FramesReceivedOK = cnt[RTL8365MB_MIB_ifInUcastPkts] +
+ cnt[RTL8365MB_MIB_ifInMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts] +
+ cnt[RTL8365MB_MIB_dot3InPauseFrames];
+ mac_stats->FrameCheckSequenceErrors =
+ cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
+ mac_stats->OctetsTransmittedOK = cnt[RTL8365MB_MIB_ifOutOctets] -
+ 18 * mac_stats->FramesTransmittedOK;
+ mac_stats->FramesWithDeferredXmissions =
+ cnt[RTL8365MB_MIB_dot3StatsDeferredTransmissions];
+ mac_stats->LateCollisions = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
+ mac_stats->FramesAbortedDueToXSColls =
+ cnt[RTL8365MB_MIB_dot3StatsExcessiveCollisions];
+ mac_stats->OctetsReceivedOK = cnt[RTL8365MB_MIB_ifInOctets] -
+ 18 * mac_stats->FramesReceivedOK;
+ mac_stats->MulticastFramesXmittedOK =
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts];
+ mac_stats->BroadcastFramesXmittedOK =
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
+ mac_stats->MulticastFramesReceivedOK =
+ cnt[RTL8365MB_MIB_ifInMulticastPkts];
+ mac_stats->BroadcastFramesReceivedOK =
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts];
+}
+
+static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_mib_counter *mib;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes];
+
+ mutex_lock(&mb->mib_lock);
+ rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
+ &ctrl_stats->UnsupportedOpcodesReceived);
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_stats_update(struct realtek_priv *priv, int port)
+{
+ u64 cnt[RTL8365MB_MIB_END] = {
+ [RTL8365MB_MIB_ifOutOctets] = 1,
+ [RTL8365MB_MIB_ifOutUcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutDiscards] = 1,
+ [RTL8365MB_MIB_ifInOctets] = 1,
+ [RTL8365MB_MIB_ifInUcastPkts] = 1,
+ [RTL8365MB_MIB_ifInMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
+ [RTL8365MB_MIB_etherStatsDropEvents] = 1,
+ [RTL8365MB_MIB_etherStatsCollisions] = 1,
+ [RTL8365MB_MIB_etherStatsFragments] = 1,
+ [RTL8365MB_MIB_etherStatsJabbers] = 1,
+ [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
+ [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
+ };
+ struct rtl8365mb *mb = priv->chip_data;
+ struct rtnl_link_stats64 *stats;
+ int ret;
+ int i;
+
+ stats = &mb->ports[port].stats;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *c = &rtl8365mb_mib_counters[i];
+
+ /* Only fetch required MIB counters (marked = 1 above) */
+ if (!cnt[i])
+ continue;
+
+ ret = rtl8365mb_mib_counter_read(priv, port, c->offset,
+ c->length, &cnt[i]);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&mb->mib_lock);
+
+ /* Don't update statistics if there was an error reading the counters */
+ if (ret)
+ return;
+
+ spin_lock(&mb->ports[port].stats_lock);
+
+ stats->rx_packets = cnt[RTL8365MB_MIB_ifInUcastPkts] +
+ cnt[RTL8365MB_MIB_ifInMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts] -
+ cnt[RTL8365MB_MIB_ifOutDiscards];
+
+ stats->tx_packets = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
+
+ /* if{In,Out}Octets includes FCS - remove it */
+ stats->rx_bytes = cnt[RTL8365MB_MIB_ifInOctets] - 4 * stats->rx_packets;
+ stats->tx_bytes =
+ cnt[RTL8365MB_MIB_ifOutOctets] - 4 * stats->tx_packets;
+
+ stats->rx_dropped = cnt[RTL8365MB_MIB_etherStatsDropEvents];
+ stats->tx_dropped = cnt[RTL8365MB_MIB_ifOutDiscards];
+
+ stats->multicast = cnt[RTL8365MB_MIB_ifInMulticastPkts];
+ stats->collisions = cnt[RTL8365MB_MIB_etherStatsCollisions];
+
+ stats->rx_length_errors = cnt[RTL8365MB_MIB_etherStatsFragments] +
+ cnt[RTL8365MB_MIB_etherStatsJabbers];
+ stats->rx_crc_errors = cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors;
+
+ stats->tx_aborted_errors = cnt[RTL8365MB_MIB_ifOutDiscards];
+ stats->tx_window_errors = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
+ stats->tx_errors = stats->tx_aborted_errors + stats->tx_window_errors;
+
+ spin_unlock(&mb->ports[port].stats_lock);
+}
+
+static void rtl8365mb_stats_poll(struct work_struct *work)
+{
+ struct rtl8365mb_port *p = container_of(to_delayed_work(work),
+ struct rtl8365mb_port,
+ mib_work);
+ struct realtek_priv *priv = p->priv;
+
+ rtl8365mb_stats_update(priv, p->index);
+
+ schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES);
+}
+
+static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ p = &mb->ports[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(s, &p->stats, sizeof(*s));
+ spin_unlock(&p->stats_lock);
+}
+
+static void rtl8365mb_stats_setup(struct realtek_priv *priv)
+{
+ struct rtl8365mb *mb = priv->chip_data;
+ int i;
+
+ /* Per-chip global mutex to protect MIB counter access, since doing
+ * so requires accessing a series of registers in a particular order.
+ */
+ mutex_init(&mb->mib_lock);
+
+ for (i = 0; i < priv->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(priv->ds, i))
+ continue;
+
+ /* Per-port spinlock to protect the stats64 data */
+ spin_lock_init(&p->stats_lock);
+
+ /* This work polls the MIB counters and keeps the stats64 data
+ * up-to-date.
+ */
+ INIT_DELAYED_WORK(&p->mib_work, rtl8365mb_stats_poll);
+ }
+}
+
+static void rtl8365mb_stats_teardown(struct realtek_priv *priv)
+{
+ struct rtl8365mb *mb = priv->chip_data;
+ int i;
+
+ for (i = 0; i < priv->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(priv->ds, i))
+ continue;
+
+ cancel_delayed_work_sync(&p->mib_work);
+ }
+}
+
+static int rtl8365mb_get_and_clear_status_reg(struct realtek_priv *priv, u32 reg,
+ u32 *val)
+{
+ int ret;
+
+ ret = regmap_read(priv->map, reg, val);
+ if (ret)
+ return ret;
+
+ return regmap_write(priv->map, reg, *val);
+}
+
+static irqreturn_t rtl8365mb_irq(int irq, void *data)
+{
+ struct realtek_priv *priv = data;
+ unsigned long line_changes = 0;
+ u32 stat;
+ int line;
+ int ret;
+
+ ret = rtl8365mb_get_and_clear_status_reg(priv, RTL8365MB_INTR_STATUS_REG,
+ &stat);
+ if (ret)
+ goto out_error;
+
+ if (stat & RTL8365MB_INTR_LINK_CHANGE_MASK) {
+ u32 linkdown_ind;
+ u32 linkup_ind;
+ u32 val;
+
+ ret = rtl8365mb_get_and_clear_status_reg(
+ priv, RTL8365MB_PORT_LINKUP_IND_REG, &val);
+ if (ret)
+ goto out_error;
+
+ linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val);
+
+ ret = rtl8365mb_get_and_clear_status_reg(
+ priv, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
+ if (ret)
+ goto out_error;
+
+ linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val);
+
+ line_changes = linkup_ind | linkdown_ind;
+ }
+
+ if (!line_changes)
+ goto out_none;
+
+ for_each_set_bit(line, &line_changes, priv->num_ports) {
+ int child_irq = irq_find_mapping(priv->irqdomain, line);
+
+ handle_nested_irq(child_irq);
+ }
+
+ return IRQ_HANDLED;
+
+out_error:
+ dev_err(priv->dev, "failed to read interrupt status: %d\n", ret);
+
+out_none:
+ return IRQ_NONE;
+}
+
+static struct irq_chip rtl8365mb_irq_chip = {
+ .name = "rtl8365mb",
+ /* The hardware doesn't support masking IRQs on a per-port basis */
+};
+
+static int rtl8365mb_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &rtl8365mb_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static void rtl8365mb_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_nested_thread(irq, 0);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops rtl8365mb_irqdomain_ops = {
+ .map = rtl8365mb_irq_map,
+ .unmap = rtl8365mb_irq_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int rtl8365mb_set_irq_enable(struct realtek_priv *priv, bool enable)
+{
+ return regmap_update_bits(priv->map, RTL8365MB_INTR_CTRL_REG,
+ RTL8365MB_INTR_LINK_CHANGE_MASK,
+ FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK,
+ enable ? 1 : 0));
+}
+
+static int rtl8365mb_irq_enable(struct realtek_priv *priv)
+{
+ return rtl8365mb_set_irq_enable(priv, true);
+}
+
+static int rtl8365mb_irq_disable(struct realtek_priv *priv)
+{
+ return rtl8365mb_set_irq_enable(priv, false);
+}
+
+static int rtl8365mb_irq_setup(struct realtek_priv *priv)
+{
+ struct rtl8365mb *mb = priv->chip_data;
+ struct device_node *intc;
+ u32 irq_trig;
+ int virq;
+ int irq;
+ u32 val;
+ int ret;
+ int i;
+
+ intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
+ if (!intc) {
+ dev_err(priv->dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ /* rtl8365mb IRQs cascade off this one */
+ irq = of_irq_get(intc, 0);
+ if (irq <= 0) {
+ if (irq != -EPROBE_DEFER)
+ dev_err(priv->dev, "failed to get parent irq: %d\n",
+ irq);
+ ret = irq ? irq : -EINVAL;
+ goto out_put_node;
+ }
+
+ priv->irqdomain = irq_domain_add_linear(intc, priv->num_ports,
+ &rtl8365mb_irqdomain_ops, priv);
+ if (!priv->irqdomain) {
+ dev_err(priv->dev, "failed to add irq domain\n");
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_create_mapping(priv->irqdomain, i);
+ if (!virq) {
+ dev_err(priv->dev,
+ "failed to create irq domain mapping\n");
+ ret = -EINVAL;
+ goto out_remove_irqdomain;
+ }
+
+ irq_set_parent(virq, irq);
+ }
+
+ /* Configure chip interrupt signal polarity */
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ switch (irq_trig) {
+ case IRQF_TRIGGER_RISING:
+ case IRQF_TRIGGER_HIGH:
+ val = RTL8365MB_INTR_POLARITY_HIGH;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ case IRQF_TRIGGER_LOW:
+ val = RTL8365MB_INTR_POLARITY_LOW;
+ break;
+ default:
+ dev_err(priv->dev, "unsupported irq trigger type %u\n",
+ irq_trig);
+ ret = -EINVAL;
+ goto out_remove_irqdomain;
+ }
+
+ ret = regmap_update_bits(priv->map, RTL8365MB_INTR_POLARITY_REG,
+ RTL8365MB_INTR_POLARITY_MASK,
+ FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val));
+ if (ret)
+ goto out_remove_irqdomain;
+
+ /* Disable the interrupt in case the chip has it enabled on reset */
+ ret = rtl8365mb_irq_disable(priv);
+ if (ret)
+ goto out_remove_irqdomain;
+
+ /* Clear the interrupt status register */
+ ret = regmap_write(priv->map, RTL8365MB_INTR_STATUS_REG,
+ RTL8365MB_INTR_ALL_MASK);
+ if (ret)
+ goto out_remove_irqdomain;
+
+ ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT,
+ "rtl8365mb", priv);
+ if (ret) {
+ dev_err(priv->dev, "failed to request irq: %d\n", ret);
+ goto out_remove_irqdomain;
+ }
+
+ /* Store the irq so that we know to free it during teardown */
+ mb->irq = irq;
+
+ ret = rtl8365mb_irq_enable(priv);
+ if (ret)
+ goto out_free_irq;
+
+ of_node_put(intc);
+
+ return 0;
+
+out_free_irq:
+ free_irq(mb->irq, priv);
+ mb->irq = 0;
+
+out_remove_irqdomain:
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_find_mapping(priv->irqdomain, i);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(priv->irqdomain);
+ priv->irqdomain = NULL;
+
+out_put_node:
+ of_node_put(intc);
+
+ return ret;
+}
+
+static void rtl8365mb_irq_teardown(struct realtek_priv *priv)
+{
+ struct rtl8365mb *mb = priv->chip_data;
+ int virq;
+ int i;
+
+ if (mb->irq) {
+ free_irq(mb->irq, priv);
+ mb->irq = 0;
+ }
+
+ if (priv->irqdomain) {
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_find_mapping(priv->irqdomain, i);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(priv->irqdomain);
+ priv->irqdomain = NULL;
+ }
+}
+
+static int rtl8365mb_cpu_config(struct realtek_priv *priv)
+{
+ struct rtl8365mb *mb = priv->chip_data;
+ struct rtl8365mb_cpu *cpu = &mb->cpu;
+ u32 val;
+ int ret;
+
+ ret = regmap_update_bits(priv->map, RTL8365MB_CPU_PORT_MASK_REG,
+ RTL8365MB_CPU_PORT_MASK_MASK,
+ FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK,
+ cpu->mask));
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(RTL8365MB_CPU_CTRL_EN_MASK, cpu->enable ? 1 : 0) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_INSERTMODE_MASK, cpu->insert) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port & 0x7) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK,
+ cpu->trap_port >> 3 & 0x1);
+ ret = regmap_write(priv->map, RTL8365MB_CPU_CTRL_REG, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_RTL8_4:
+ cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
+ cpu->position = RTL8365MB_CPU_POS_AFTER_SA;
+ break;
+ case DSA_TAG_PROTO_RTL8_4T:
+ cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
+ cpu->position = RTL8365MB_CPU_POS_BEFORE_CRC;
+ break;
+ /* The switch also supports a 4-byte format, similar to rtl4a but with
+ * the same 0x04 8-bit version and probably 8-bit port source/dest.
+ * There is no public doc about it. Not supported yet and it will probably
+ * never be.
+ */
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ return rtl8365mb_cpu_config(priv);
+}
+
+static int rtl8365mb_switch_init(struct realtek_priv *priv)
+{
+ struct rtl8365mb *mb = priv->chip_data;
+ const struct rtl8365mb_chip_info *ci;
+ int ret;
+ int i;
+
+ ci = mb->chip_info;
+
+ /* Do any chip-specific init jam before getting to the common stuff */
+ if (ci->jam_table) {
+ for (i = 0; i < ci->jam_size; i++) {
+ ret = regmap_write(priv->map, ci->jam_table[i].reg,
+ ci->jam_table[i].val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Common init jam */
+ for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) {
+ ret = regmap_write(priv->map, rtl8365mb_init_jam_common[i].reg,
+ rtl8365mb_init_jam_common[i].val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtl8365mb_reset_chip(struct realtek_priv *priv)
+{
+ u32 val;
+
+ priv->write_reg_noack(priv, RTL8365MB_CHIP_RESET_REG,
+ FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK, 1));
+
+ /* Realtek documentation says the chip needs 1 second to reset. Sleep
+ * for 100 ms before accessing any registers to prevent ACK timeouts.
+ */
+ msleep(100);
+ return regmap_read_poll_timeout(priv->map, RTL8365MB_CHIP_RESET_REG, val,
+ !(val & RTL8365MB_CHIP_RESET_HW_MASK),
+ 20000, 1e6);
+}
+
+static int rtl8365mb_setup(struct dsa_switch *ds)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct dsa_port *cpu_dp;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
+
+ ret = rtl8365mb_reset_chip(priv);
+ if (ret) {
+ dev_err(priv->dev, "failed to reset chip: %d\n", ret);
+ goto out_error;
+ }
+
+ /* Configure switch to vendor-defined initial state */
+ ret = rtl8365mb_switch_init(priv);
+ if (ret) {
+ dev_err(priv->dev, "failed to initialize switch: %d\n", ret);
+ goto out_error;
+ }
+
+ /* Set up cascading IRQs */
+ ret = rtl8365mb_irq_setup(priv);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ else if (ret)
+ dev_info(priv->dev, "no interrupt support\n");
+
+ /* Configure CPU tagging */
+ dsa_switch_for_each_cpu_port(cpu_dp, priv->ds) {
+ cpu->mask |= BIT(cpu_dp->index);
+
+ if (cpu->trap_port == RTL8365MB_MAX_NUM_PORTS)
+ cpu->trap_port = cpu_dp->index;
+ }
+ cpu->enable = cpu->mask > 0;
+ ret = rtl8365mb_cpu_config(priv);
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Configure ports */
+ for (i = 0; i < priv->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(priv->ds, i))
+ continue;
+
+ /* Forward only to the CPU */
+ ret = rtl8365mb_port_set_isolation(priv, i, cpu->mask);
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Disable learning */
+ ret = rtl8365mb_port_set_learning(priv, i, false);
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Set the initial STP state of all ports to DISABLED, otherwise
+ * ports will still forward frames to the CPU despite being
+ * administratively down by default.
+ */
+ rtl8365mb_port_stp_state_set(priv->ds, i, BR_STATE_DISABLED);
+
+ /* Set up per-port private data */
+ p->priv = priv;
+ p->index = i;
+ }
+
+ /* Set maximum packet length to 1536 bytes */
+ ret = regmap_update_bits(priv->map, RTL8365MB_CFG0_MAX_LEN_REG,
+ RTL8365MB_CFG0_MAX_LEN_MASK,
+ FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536));
+ if (ret)
+ goto out_teardown_irq;
+
+ if (priv->setup_interface) {
+ ret = priv->setup_interface(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ goto out_teardown_irq;
+ }
+ }
+
+ /* Start statistics counter polling */
+ rtl8365mb_stats_setup(priv);
+
+ return 0;
+
+out_teardown_irq:
+ rtl8365mb_irq_teardown(priv);
+
+out_error:
+ return ret;
+}
+
+static void rtl8365mb_teardown(struct dsa_switch *ds)
+{
+ struct realtek_priv *priv = ds->priv;
+
+ rtl8365mb_stats_teardown(priv);
+ rtl8365mb_irq_teardown(priv);
+}
+
+static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
+{
+ int ret;
+
+ /* For some reason we have to write a magic value to an arbitrary
+ * register whenever accessing the chip ID/version registers.
+ */
+ ret = regmap_write(map, RTL8365MB_MAGIC_REG, RTL8365MB_MAGIC_VALUE);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, RTL8365MB_CHIP_ID_REG, id);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, RTL8365MB_CHIP_VER_REG, ver);
+ if (ret)
+ return ret;
+
+ /* Reset magic register */
+ ret = regmap_write(map, RTL8365MB_MAGIC_REG, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_detect(struct realtek_priv *priv)
+{
+ struct rtl8365mb *mb = priv->chip_data;
+ u32 chip_id;
+ u32 chip_ver;
+ int ret;
+ int i;
+
+ ret = rtl8365mb_get_chip_id_and_ver(priv->map, &chip_id, &chip_ver);
+ if (ret) {
+ dev_err(priv->dev, "failed to read chip id and version: %d\n",
+ ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rtl8365mb_chip_infos); i++) {
+ const struct rtl8365mb_chip_info *ci = &rtl8365mb_chip_infos[i];
+
+ if (ci->chip_id == chip_id && ci->chip_ver == chip_ver) {
+ mb->chip_info = ci;
+ break;
+ }
+ }
+
+ if (!mb->chip_info) {
+ dev_err(priv->dev,
+ "unrecognized switch (id=0x%04x, ver=0x%04x)", chip_id,
+ chip_ver);
+ return -ENODEV;
+ }
+
+ dev_info(priv->dev, "found an %s switch\n", mb->chip_info->name);
+
+ priv->num_ports = RTL8365MB_MAX_NUM_PORTS;
+ mb->priv = priv;
+ mb->cpu.trap_port = RTL8365MB_MAX_NUM_PORTS;
+ mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
+ mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
+ mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
+ mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
+
+ return 0;
+}
+
+static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
+ .get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .change_tag_protocol = rtl8365mb_change_tag_protocol,
+ .setup = rtl8365mb_setup,
+ .teardown = rtl8365mb_teardown,
+ .phylink_get_caps = rtl8365mb_phylink_get_caps,
+ .phylink_mac_config = rtl8365mb_phylink_mac_config,
+ .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .port_stp_state_set = rtl8365mb_port_stp_state_set,
+ .get_strings = rtl8365mb_get_strings,
+ .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
+ .get_sset_count = rtl8365mb_get_sset_count,
+ .get_eth_phy_stats = rtl8365mb_get_phy_stats,
+ .get_eth_mac_stats = rtl8365mb_get_mac_stats,
+ .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
+ .get_stats64 = rtl8365mb_get_stats64,
+};
+
+static const struct dsa_switch_ops rtl8365mb_switch_ops_mdio = {
+ .get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .change_tag_protocol = rtl8365mb_change_tag_protocol,
+ .setup = rtl8365mb_setup,
+ .teardown = rtl8365mb_teardown,
+ .phylink_get_caps = rtl8365mb_phylink_get_caps,
+ .phylink_mac_config = rtl8365mb_phylink_mac_config,
+ .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .phy_read = rtl8365mb_dsa_phy_read,
+ .phy_write = rtl8365mb_dsa_phy_write,
+ .port_stp_state_set = rtl8365mb_port_stp_state_set,
+ .get_strings = rtl8365mb_get_strings,
+ .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
+ .get_sset_count = rtl8365mb_get_sset_count,
+ .get_eth_phy_stats = rtl8365mb_get_phy_stats,
+ .get_eth_mac_stats = rtl8365mb_get_mac_stats,
+ .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
+ .get_stats64 = rtl8365mb_get_stats64,
+};
+
+static const struct realtek_ops rtl8365mb_ops = {
+ .detect = rtl8365mb_detect,
+ .phy_read = rtl8365mb_phy_read,
+ .phy_write = rtl8365mb_phy_write,
+};
+
+const struct realtek_variant rtl8365mb_variant = {
+ .ds_ops_smi = &rtl8365mb_switch_ops_smi,
+ .ds_ops_mdio = &rtl8365mb_switch_ops_mdio,
+ .ops = &rtl8365mb_ops,
+ .clk_delay = 10,
+ .cmd_read = 0xb9,
+ .cmd_write = 0xb8,
+ .chip_data_sz = sizeof(struct rtl8365mb),
+};
+EXPORT_SYMBOL_GPL(rtl8365mb_variant);
+
+MODULE_AUTHOR("Alvin Å ipraga <alsi@bang-olufsen.dk>");
+MODULE_DESCRIPTION("Driver for RTL8365MB-VC ethernet switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/rtl8366-core.c b/drivers/net/dsa/realtek/rtl8366-core.c
new file mode 100644
index 000000000..dc5f75be3
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366-core.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Realtek SMI library helpers for the RTL8366x variants
+ * RTL8366RB and RTL8366S
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ */
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+
+#include "realtek.h"
+
+int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used)
+{
+ int ret;
+ int i;
+
+ *used = 0;
+ for (i = 0; i < priv->num_ports; i++) {
+ int index = 0;
+
+ ret = priv->ops->get_mc_index(priv, i, &index);
+ if (ret)
+ return ret;
+
+ if (mc_index == index) {
+ *used = 1;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+
+/**
+ * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
+ * @priv: the Realtek SMI device instance
+ * @vid: the VLAN ID to look up or allocate
+ * @vlanmc: the pointer will be assigned to a pointer to a valid member config
+ * if successful
+ * @return: index of a new member config or negative error number
+ */
+static int rtl8366_obtain_mc(struct realtek_priv *priv, int vid,
+ struct rtl8366_vlan_mc *vlanmc)
+{
+ struct rtl8366_vlan_4k vlan4k;
+ int ret;
+ int i;
+
+ /* Try to find an existing member config entry for this VID */
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
+ if (ret) {
+ dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ if (vid == vlanmc->vid)
+ return i;
+ }
+
+ /* We have no MC entry for this VID, try to find an empty one */
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
+ if (ret) {
+ dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ if (vlanmc->vid == 0 && vlanmc->member == 0) {
+ /* Update the entry from the 4K table */
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
+ if (ret) {
+ dev_err(priv->dev, "error looking for 4K VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ vlanmc->vid = vid;
+ vlanmc->member = vlan4k.member;
+ vlanmc->untag = vlan4k.untag;
+ vlanmc->fid = vlan4k.fid;
+ ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
+ if (ret) {
+ dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ dev_dbg(priv->dev, "created new MC at index %d for VID %d\n",
+ i, vid);
+ return i;
+ }
+ }
+
+ /* MC table is full, try to find an unused entry and replace it */
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ int used;
+
+ ret = rtl8366_mc_is_used(priv, i, &used);
+ if (ret)
+ return ret;
+
+ if (!used) {
+ /* Update the entry from the 4K table */
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
+ if (ret)
+ return ret;
+
+ vlanmc->vid = vid;
+ vlanmc->member = vlan4k.member;
+ vlanmc->untag = vlan4k.untag;
+ vlanmc->fid = vlan4k.fid;
+ ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
+ if (ret) {
+ dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+ dev_dbg(priv->dev, "recycled MC at index %i for VID %d\n",
+ i, vid);
+ return i;
+ }
+ }
+
+ dev_err(priv->dev, "all VLAN member configurations are in use\n");
+ return -ENOSPC;
+}
+
+int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
+ u32 untag, u32 fid)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ struct rtl8366_vlan_4k vlan4k;
+ int mc;
+ int ret;
+
+ if (!priv->ops->is_vlan_valid(priv, vid))
+ return -EINVAL;
+
+ dev_dbg(priv->dev,
+ "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
+ vid, member, untag);
+
+ /* Update the 4K table */
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
+ if (ret)
+ return ret;
+
+ vlan4k.member |= member;
+ vlan4k.untag |= untag;
+ vlan4k.fid = fid;
+ ret = priv->ops->set_vlan_4k(priv, &vlan4k);
+ if (ret)
+ return ret;
+
+ dev_dbg(priv->dev,
+ "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
+ vid, vlan4k.member, vlan4k.untag);
+
+ /* Find or allocate a member config for this VID */
+ ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
+ if (ret < 0)
+ return ret;
+ mc = ret;
+
+ /* Update the MC entry */
+ vlanmc.member |= member;
+ vlanmc.untag |= untag;
+ vlanmc.fid = fid;
+
+ /* Commit updates to the MC entry */
+ ret = priv->ops->set_vlan_mc(priv, mc, &vlanmc);
+ if (ret)
+ dev_err(priv->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
+ mc, vid);
+ else
+ dev_dbg(priv->dev,
+ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
+ vid, vlanmc.member, vlanmc.untag);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
+
+int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
+ unsigned int vid)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ int mc;
+ int ret;
+
+ if (!priv->ops->is_vlan_valid(priv, vid))
+ return -EINVAL;
+
+ /* Find or allocate a member config for this VID */
+ ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
+ if (ret < 0)
+ return ret;
+ mc = ret;
+
+ ret = priv->ops->set_mc_index(priv, port, mc);
+ if (ret) {
+ dev_err(priv->dev, "set PVID: failed to set MC index %d for port %d\n",
+ mc, port);
+ return ret;
+ }
+
+ dev_dbg(priv->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
+ port, vid, mc);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
+
+int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
+{
+ int ret;
+
+ /* To enable 4k VLAN, ordinary VLAN must be enabled first,
+ * but if we disable 4k VLAN it is fine to leave ordinary
+ * VLAN enabled.
+ */
+ if (enable) {
+ /* Make sure VLAN is ON */
+ ret = priv->ops->enable_vlan(priv, true);
+ if (ret)
+ return ret;
+
+ priv->vlan_enabled = true;
+ }
+
+ ret = priv->ops->enable_vlan4k(priv, enable);
+ if (ret)
+ return ret;
+
+ priv->vlan4k_enabled = enable;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
+
+int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
+{
+ int ret;
+
+ ret = priv->ops->enable_vlan(priv, enable);
+ if (ret)
+ return ret;
+
+ priv->vlan_enabled = enable;
+
+ /* If we turn VLAN off, make sure that we turn off
+ * 4k VLAN as well, if that happened to be on.
+ */
+ if (!enable) {
+ priv->vlan4k_enabled = false;
+ ret = priv->ops->enable_vlan4k(priv, false);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
+
+int rtl8366_reset_vlan(struct realtek_priv *priv)
+{
+ struct rtl8366_vlan_mc vlanmc;
+ int ret;
+ int i;
+
+ rtl8366_enable_vlan(priv, false);
+ rtl8366_enable_vlan4k(priv, false);
+
+ /* Clear the 16 VLAN member configurations */
+ vlanmc.vid = 0;
+ vlanmc.priority = 0;
+ vlanmc.member = 0;
+ vlanmc.untag = 0;
+ vlanmc.fid = 0;
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
+
+int rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
+ struct realtek_priv *priv = ds->priv;
+ u32 member = 0;
+ u32 untag = 0;
+ int ret;
+
+ if (!priv->ops->is_vlan_valid(priv, vlan->vid)) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN ID not valid");
+ return -EINVAL;
+ }
+
+ /* Enable VLAN in the hardware
+ * FIXME: what's with this 4k business?
+ * Just rtl8366_enable_vlan() seems inconclusive.
+ */
+ ret = rtl8366_enable_vlan4k(priv, true);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to enable VLAN 4K");
+ return ret;
+ }
+
+ dev_dbg(priv->dev, "add VLAN %d on port %d, %s, %s\n",
+ vlan->vid, port, untagged ? "untagged" : "tagged",
+ pvid ? "PVID" : "no PVID");
+
+ member |= BIT(port);
+
+ if (untagged)
+ untag |= BIT(port);
+
+ ret = rtl8366_set_vlan(priv, vlan->vid, member, untag, 0);
+ if (ret) {
+ dev_err(priv->dev, "failed to set up VLAN %04x", vlan->vid);
+ return ret;
+ }
+
+ if (!pvid)
+ return 0;
+
+ ret = rtl8366_set_pvid(priv, port, vlan->vid);
+ if (ret) {
+ dev_err(priv->dev, "failed to set PVID on port %d to VLAN %04x",
+ port, vlan->vid);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
+
+int rtl8366_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct realtek_priv *priv = ds->priv;
+ int ret, i;
+
+ dev_dbg(priv->dev, "del VLAN %d on port %d\n", vlan->vid, port);
+
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ struct rtl8366_vlan_mc vlanmc;
+
+ ret = priv->ops->get_vlan_mc(priv, i, &vlanmc);
+ if (ret)
+ return ret;
+
+ if (vlan->vid == vlanmc.vid) {
+ /* Remove this port from the VLAN */
+ vlanmc.member &= ~BIT(port);
+ vlanmc.untag &= ~BIT(port);
+ /*
+ * If no ports are members of this VLAN
+ * anymore then clear the whole member
+ * config so it can be reused.
+ */
+ if (!vlanmc.member) {
+ vlanmc.vid = 0;
+ vlanmc.priority = 0;
+ vlanmc.fid = 0;
+ }
+ ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
+ if (ret) {
+ dev_err(priv->dev,
+ "failed to remove VLAN %04x\n",
+ vlan->vid);
+ return ret;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
+
+void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8366_mib_counter *mib;
+ int i;
+
+ if (port >= priv->num_ports)
+ return;
+
+ for (i = 0; i < priv->num_mib_counters; i++) {
+ mib = &priv->mib_counters[i];
+ strncpy(data + i * ETH_GSTRING_LEN,
+ mib->name, ETH_GSTRING_LEN);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_strings);
+
+int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct realtek_priv *priv = ds->priv;
+
+ /* We only support SS_STATS */
+ if (sset != ETH_SS_STATS)
+ return 0;
+ if (port >= priv->num_ports)
+ return -EINVAL;
+
+ return priv->num_mib_counters;
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
+
+void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
+{
+ struct realtek_priv *priv = ds->priv;
+ int i;
+ int ret;
+
+ if (port >= priv->num_ports)
+ return;
+
+ for (i = 0; i < priv->num_mib_counters; i++) {
+ struct rtl8366_mib_counter *mib;
+ u64 mibvalue = 0;
+
+ mib = &priv->mib_counters[i];
+ ret = priv->ops->get_mib_counter(priv, port, mib, &mibvalue);
+ if (ret) {
+ dev_err(priv->dev, "error reading MIB counter %s\n",
+ mib->name);
+ }
+ data[i] = mibvalue;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats);
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
new file mode 100644
index 000000000..25f88022b
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -0,0 +1,1873 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Realtek SMI subdriver for the Realtek RTL8366RB ethernet switch
+ *
+ * This is a sparsely documented chip, the only viable documentation seems
+ * to be a patched up code drop from the vendor that appear in various
+ * GPL source trees.
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#include "realtek.h"
+
+#define RTL8366RB_PORT_NUM_CPU 5
+#define RTL8366RB_NUM_PORTS 6
+#define RTL8366RB_PHY_NO_MAX 4
+#define RTL8366RB_PHY_ADDR_MAX 31
+
+/* Switch Global Configuration register */
+#define RTL8366RB_SGCR 0x0000
+#define RTL8366RB_SGCR_EN_BC_STORM_CTRL BIT(0)
+#define RTL8366RB_SGCR_MAX_LENGTH(a) ((a) << 4)
+#define RTL8366RB_SGCR_MAX_LENGTH_MASK RTL8366RB_SGCR_MAX_LENGTH(0x3)
+#define RTL8366RB_SGCR_MAX_LENGTH_1522 RTL8366RB_SGCR_MAX_LENGTH(0x0)
+#define RTL8366RB_SGCR_MAX_LENGTH_1536 RTL8366RB_SGCR_MAX_LENGTH(0x1)
+#define RTL8366RB_SGCR_MAX_LENGTH_1552 RTL8366RB_SGCR_MAX_LENGTH(0x2)
+#define RTL8366RB_SGCR_MAX_LENGTH_16000 RTL8366RB_SGCR_MAX_LENGTH(0x3)
+#define RTL8366RB_SGCR_EN_VLAN BIT(13)
+#define RTL8366RB_SGCR_EN_VLAN_4KTB BIT(14)
+
+/* Port Enable Control register */
+#define RTL8366RB_PECR 0x0001
+
+/* Switch per-port learning disablement register */
+#define RTL8366RB_PORT_LEARNDIS_CTRL 0x0002
+
+/* Security control, actually aging register */
+#define RTL8366RB_SECURITY_CTRL 0x0003
+
+#define RTL8366RB_SSCR2 0x0004
+#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
+
+/* Port Mode Control registers */
+#define RTL8366RB_PMC0 0x0005
+#define RTL8366RB_PMC0_SPI BIT(0)
+#define RTL8366RB_PMC0_EN_AUTOLOAD BIT(1)
+#define RTL8366RB_PMC0_PROBE BIT(2)
+#define RTL8366RB_PMC0_DIS_BISR BIT(3)
+#define RTL8366RB_PMC0_ADCTEST BIT(4)
+#define RTL8366RB_PMC0_SRAM_DIAG BIT(5)
+#define RTL8366RB_PMC0_EN_SCAN BIT(6)
+#define RTL8366RB_PMC0_P4_IOMODE_SHIFT 7
+#define RTL8366RB_PMC0_P4_IOMODE_MASK GENMASK(9, 7)
+#define RTL8366RB_PMC0_P5_IOMODE_SHIFT 10
+#define RTL8366RB_PMC0_P5_IOMODE_MASK GENMASK(12, 10)
+#define RTL8366RB_PMC0_SDSMODE_SHIFT 13
+#define RTL8366RB_PMC0_SDSMODE_MASK GENMASK(15, 13)
+#define RTL8366RB_PMC1 0x0006
+
+/* Port Mirror Control Register */
+#define RTL8366RB_PMCR 0x0007
+#define RTL8366RB_PMCR_SOURCE_PORT(a) (a)
+#define RTL8366RB_PMCR_SOURCE_PORT_MASK 0x000f
+#define RTL8366RB_PMCR_MONITOR_PORT(a) ((a) << 4)
+#define RTL8366RB_PMCR_MONITOR_PORT_MASK 0x00f0
+#define RTL8366RB_PMCR_MIRROR_RX BIT(8)
+#define RTL8366RB_PMCR_MIRROR_TX BIT(9)
+#define RTL8366RB_PMCR_MIRROR_SPC BIT(10)
+#define RTL8366RB_PMCR_MIRROR_ISO BIT(11)
+
+/* bits 0..7 = port 0, bits 8..15 = port 1 */
+#define RTL8366RB_PAACR0 0x0010
+/* bits 0..7 = port 2, bits 8..15 = port 3 */
+#define RTL8366RB_PAACR1 0x0011
+/* bits 0..7 = port 4, bits 8..15 = port 5 */
+#define RTL8366RB_PAACR2 0x0012
+#define RTL8366RB_PAACR_SPEED_10M 0
+#define RTL8366RB_PAACR_SPEED_100M 1
+#define RTL8366RB_PAACR_SPEED_1000M 2
+#define RTL8366RB_PAACR_FULL_DUPLEX BIT(2)
+#define RTL8366RB_PAACR_LINK_UP BIT(4)
+#define RTL8366RB_PAACR_TX_PAUSE BIT(5)
+#define RTL8366RB_PAACR_RX_PAUSE BIT(6)
+#define RTL8366RB_PAACR_AN BIT(7)
+
+#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \
+ RTL8366RB_PAACR_FULL_DUPLEX | \
+ RTL8366RB_PAACR_LINK_UP | \
+ RTL8366RB_PAACR_TX_PAUSE | \
+ RTL8366RB_PAACR_RX_PAUSE)
+
+/* bits 0..7 = port 0, bits 8..15 = port 1 */
+#define RTL8366RB_PSTAT0 0x0014
+/* bits 0..7 = port 2, bits 8..15 = port 3 */
+#define RTL8366RB_PSTAT1 0x0015
+/* bits 0..7 = port 4, bits 8..15 = port 5 */
+#define RTL8366RB_PSTAT2 0x0016
+
+#define RTL8366RB_POWER_SAVING_REG 0x0021
+
+/* Spanning tree status (STP) control, two bits per port per FID */
+#define RTL8366RB_STP_STATE_BASE 0x0050 /* 0x0050..0x0057 */
+#define RTL8366RB_STP_STATE_DISABLED 0x0
+#define RTL8366RB_STP_STATE_BLOCKING 0x1
+#define RTL8366RB_STP_STATE_LEARNING 0x2
+#define RTL8366RB_STP_STATE_FORWARDING 0x3
+#define RTL8366RB_STP_MASK GENMASK(1, 0)
+#define RTL8366RB_STP_STATE(port, state) \
+ ((state) << ((port) * 2))
+#define RTL8366RB_STP_STATE_MASK(port) \
+ RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK)
+
+/* CPU port control reg */
+#define RTL8368RB_CPU_CTRL_REG 0x0061
+#define RTL8368RB_CPU_PORTS_MSK 0x00FF
+/* Disables inserting custom tag length/type 0x8899 */
+#define RTL8368RB_CPU_NO_TAG BIT(15)
+
+#define RTL8366RB_SMAR0 0x0070 /* bits 0..15 */
+#define RTL8366RB_SMAR1 0x0071 /* bits 16..31 */
+#define RTL8366RB_SMAR2 0x0072 /* bits 32..47 */
+
+#define RTL8366RB_RESET_CTRL_REG 0x0100
+#define RTL8366RB_CHIP_CTRL_RESET_HW BIT(0)
+#define RTL8366RB_CHIP_CTRL_RESET_SW BIT(1)
+
+#define RTL8366RB_CHIP_ID_REG 0x0509
+#define RTL8366RB_CHIP_ID_8366 0x5937
+#define RTL8366RB_CHIP_VERSION_CTRL_REG 0x050A
+#define RTL8366RB_CHIP_VERSION_MASK 0xf
+
+/* PHY registers control */
+#define RTL8366RB_PHY_ACCESS_CTRL_REG 0x8000
+#define RTL8366RB_PHY_CTRL_READ BIT(0)
+#define RTL8366RB_PHY_CTRL_WRITE 0
+#define RTL8366RB_PHY_ACCESS_BUSY_REG 0x8001
+#define RTL8366RB_PHY_INT_BUSY BIT(0)
+#define RTL8366RB_PHY_EXT_BUSY BIT(4)
+#define RTL8366RB_PHY_ACCESS_DATA_REG 0x8002
+#define RTL8366RB_PHY_EXT_CTRL_REG 0x8010
+#define RTL8366RB_PHY_EXT_WRDATA_REG 0x8011
+#define RTL8366RB_PHY_EXT_RDDATA_REG 0x8012
+
+#define RTL8366RB_PHY_REG_MASK 0x1f
+#define RTL8366RB_PHY_PAGE_OFFSET 5
+#define RTL8366RB_PHY_PAGE_MASK (0xf << 5)
+#define RTL8366RB_PHY_NO_OFFSET 9
+#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
+
+/* VLAN Ingress Control Register 1, one bit per port.
+ * bit 0 .. 5 will make the switch drop ingress frames without
+ * VID such as untagged or priority-tagged frames for respective
+ * port.
+ * bit 6 .. 11 will make the switch drop ingress frames carrying
+ * a C-tag with VID != 0 for respective port.
+ */
+#define RTL8366RB_VLAN_INGRESS_CTRL1_REG 0x037E
+#define RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) (BIT((port)) | BIT((port) + 6))
+
+/* VLAN Ingress Control Register 2, one bit per port.
+ * bit0 .. bit5 will make the switch drop all ingress frames with
+ * a VLAN classification that does not include the port is in its
+ * member set.
+ */
+#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
+
+/* LED control registers */
+#define RTL8366RB_LED_BLINKRATE_REG 0x0430
+#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
+#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
+#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
+#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
+#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
+#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
+#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+
+#define RTL8366RB_LED_CTRL_REG 0x0431
+#define RTL8366RB_LED_OFF 0x0
+#define RTL8366RB_LED_DUP_COL 0x1
+#define RTL8366RB_LED_LINK_ACT 0x2
+#define RTL8366RB_LED_SPD1000 0x3
+#define RTL8366RB_LED_SPD100 0x4
+#define RTL8366RB_LED_SPD10 0x5
+#define RTL8366RB_LED_SPD1000_ACT 0x6
+#define RTL8366RB_LED_SPD100_ACT 0x7
+#define RTL8366RB_LED_SPD10_ACT 0x8
+#define RTL8366RB_LED_SPD100_10_ACT 0x9
+#define RTL8366RB_LED_FIBER 0xa
+#define RTL8366RB_LED_AN_FAULT 0xb
+#define RTL8366RB_LED_LINK_RX 0xc
+#define RTL8366RB_LED_LINK_TX 0xd
+#define RTL8366RB_LED_MASTER 0xe
+#define RTL8366RB_LED_FORCE 0xf
+#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
+#define RTL8366RB_LED_1_OFFSET 6
+#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
+#define RTL8366RB_LED_3_OFFSET 6
+
+#define RTL8366RB_MIB_COUNT 33
+#define RTL8366RB_GLOBAL_MIB_COUNT 1
+#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
+#define RTL8366RB_MIB_COUNTER_BASE 0x1000
+#define RTL8366RB_MIB_CTRL_REG 0x13F0
+#define RTL8366RB_MIB_CTRL_USER_MASK 0x0FFC
+#define RTL8366RB_MIB_CTRL_BUSY_MASK BIT(0)
+#define RTL8366RB_MIB_CTRL_RESET_MASK BIT(1)
+#define RTL8366RB_MIB_CTRL_PORT_RESET(_p) BIT(2 + (_p))
+#define RTL8366RB_MIB_CTRL_GLOBAL_RESET BIT(11)
+
+#define RTL8366RB_PORT_VLAN_CTRL_BASE 0x0063
+#define RTL8366RB_PORT_VLAN_CTRL_REG(_p) \
+ (RTL8366RB_PORT_VLAN_CTRL_BASE + (_p) / 4)
+#define RTL8366RB_PORT_VLAN_CTRL_MASK 0xf
+#define RTL8366RB_PORT_VLAN_CTRL_SHIFT(_p) (4 * ((_p) % 4))
+
+#define RTL8366RB_VLAN_TABLE_READ_BASE 0x018C
+#define RTL8366RB_VLAN_TABLE_WRITE_BASE 0x0185
+
+#define RTL8366RB_TABLE_ACCESS_CTRL_REG 0x0180
+#define RTL8366RB_TABLE_VLAN_READ_CTRL 0x0E01
+#define RTL8366RB_TABLE_VLAN_WRITE_CTRL 0x0F01
+
+#define RTL8366RB_VLAN_MC_BASE(_x) (0x0020 + (_x) * 3)
+
+#define RTL8366RB_PORT_LINK_STATUS_BASE 0x0014
+#define RTL8366RB_PORT_STATUS_SPEED_MASK 0x0003
+#define RTL8366RB_PORT_STATUS_DUPLEX_MASK 0x0004
+#define RTL8366RB_PORT_STATUS_LINK_MASK 0x0010
+#define RTL8366RB_PORT_STATUS_TXPAUSE_MASK 0x0020
+#define RTL8366RB_PORT_STATUS_RXPAUSE_MASK 0x0040
+#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
+
+#define RTL8366RB_NUM_VLANS 16
+#define RTL8366RB_NUM_LEDGROUPS 4
+#define RTL8366RB_NUM_VIDS 4096
+#define RTL8366RB_PRIORITYMAX 7
+#define RTL8366RB_NUM_FIDS 8
+#define RTL8366RB_FIDMAX 7
+
+#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */
+#define RTL8366RB_PORT_2 BIT(1) /* In userspace port 1 */
+#define RTL8366RB_PORT_3 BIT(2) /* In userspace port 2 */
+#define RTL8366RB_PORT_4 BIT(3) /* In userspace port 3 */
+#define RTL8366RB_PORT_5 BIT(4) /* In userspace port 4 */
+
+#define RTL8366RB_PORT_CPU BIT(5) /* CPU port */
+
+#define RTL8366RB_PORT_ALL (RTL8366RB_PORT_1 | \
+ RTL8366RB_PORT_2 | \
+ RTL8366RB_PORT_3 | \
+ RTL8366RB_PORT_4 | \
+ RTL8366RB_PORT_5 | \
+ RTL8366RB_PORT_CPU)
+
+#define RTL8366RB_PORT_ALL_BUT_CPU (RTL8366RB_PORT_1 | \
+ RTL8366RB_PORT_2 | \
+ RTL8366RB_PORT_3 | \
+ RTL8366RB_PORT_4 | \
+ RTL8366RB_PORT_5)
+
+#define RTL8366RB_PORT_ALL_EXTERNAL (RTL8366RB_PORT_1 | \
+ RTL8366RB_PORT_2 | \
+ RTL8366RB_PORT_3 | \
+ RTL8366RB_PORT_4)
+
+#define RTL8366RB_PORT_ALL_INTERNAL RTL8366RB_PORT_CPU
+
+/* First configuration word per member config, VID and prio */
+#define RTL8366RB_VLAN_VID_MASK 0xfff
+#define RTL8366RB_VLAN_PRIORITY_SHIFT 12
+#define RTL8366RB_VLAN_PRIORITY_MASK 0x7
+/* Second configuration word per member config, member and untagged */
+#define RTL8366RB_VLAN_UNTAG_SHIFT 8
+#define RTL8366RB_VLAN_UNTAG_MASK 0xff
+#define RTL8366RB_VLAN_MEMBER_MASK 0xff
+/* Third config word per member config, STAG currently unused */
+#define RTL8366RB_VLAN_STAG_MBR_MASK 0xff
+#define RTL8366RB_VLAN_STAG_MBR_SHIFT 8
+#define RTL8366RB_VLAN_STAG_IDX_MASK 0x7
+#define RTL8366RB_VLAN_STAG_IDX_SHIFT 5
+#define RTL8366RB_VLAN_FID_MASK 0x7
+
+/* Port ingress bandwidth control */
+#define RTL8366RB_IB_BASE 0x0200
+#define RTL8366RB_IB_REG(pnum) (RTL8366RB_IB_BASE + (pnum))
+#define RTL8366RB_IB_BDTH_MASK 0x3fff
+#define RTL8366RB_IB_PREIFG BIT(14)
+
+/* Port egress bandwidth control */
+#define RTL8366RB_EB_BASE 0x02d1
+#define RTL8366RB_EB_REG(pnum) (RTL8366RB_EB_BASE + (pnum))
+#define RTL8366RB_EB_BDTH_MASK 0x3fff
+#define RTL8366RB_EB_PREIFG_REG 0x02f8
+#define RTL8366RB_EB_PREIFG BIT(9)
+
+#define RTL8366RB_BDTH_SW_MAX 1048512 /* 1048576? */
+#define RTL8366RB_BDTH_UNIT 64
+#define RTL8366RB_BDTH_REG_DEFAULT 16383
+
+/* QOS */
+#define RTL8366RB_QOS BIT(15)
+/* Include/Exclude Preamble and IFG (20 bytes). 0:Exclude, 1:Include. */
+#define RTL8366RB_QOS_DEFAULT_PREIFG 1
+
+/* Interrupt handling */
+#define RTL8366RB_INTERRUPT_CONTROL_REG 0x0440
+#define RTL8366RB_INTERRUPT_POLARITY BIT(0)
+#define RTL8366RB_P4_RGMII_LED BIT(2)
+#define RTL8366RB_INTERRUPT_MASK_REG 0x0441
+#define RTL8366RB_INTERRUPT_LINK_CHGALL GENMASK(11, 0)
+#define RTL8366RB_INTERRUPT_ACLEXCEED BIT(8)
+#define RTL8366RB_INTERRUPT_STORMEXCEED BIT(9)
+#define RTL8366RB_INTERRUPT_P4_FIBER BIT(12)
+#define RTL8366RB_INTERRUPT_P4_UTP BIT(13)
+#define RTL8366RB_INTERRUPT_VALID (RTL8366RB_INTERRUPT_LINK_CHGALL | \
+ RTL8366RB_INTERRUPT_ACLEXCEED | \
+ RTL8366RB_INTERRUPT_STORMEXCEED | \
+ RTL8366RB_INTERRUPT_P4_FIBER | \
+ RTL8366RB_INTERRUPT_P4_UTP)
+#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
+#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */
+
+/* Port isolation registers */
+#define RTL8366RB_PORT_ISO_BASE 0x0F08
+#define RTL8366RB_PORT_ISO(pnum) (RTL8366RB_PORT_ISO_BASE + (pnum))
+#define RTL8366RB_PORT_ISO_EN BIT(0)
+#define RTL8366RB_PORT_ISO_PORTS_MASK GENMASK(7, 1)
+#define RTL8366RB_PORT_ISO_PORTS(pmask) ((pmask) << 1)
+
+/* bits 0..5 enable force when cleared */
+#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11
+
+#define RTL8366RB_OAM_PARSER_REG 0x0F14
+#define RTL8366RB_OAM_MULTIPLEXER_REG 0x0F15
+
+#define RTL8366RB_GREEN_FEATURE_REG 0x0F51
+#define RTL8366RB_GREEN_FEATURE_MSK 0x0007
+#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
+#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
+
+/**
+ * struct rtl8366rb - RTL8366RB-specific data
+ * @max_mtu: per-port max MTU setting
+ * @pvid_enabled: if PVID is set for respective port
+ */
+struct rtl8366rb {
+ unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+ bool pvid_enabled[RTL8366RB_NUM_PORTS];
+};
+
+static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
+ { 0, 0, 4, "IfInOctets" },
+ { 0, 4, 4, "EtherStatsOctets" },
+ { 0, 8, 2, "EtherStatsUnderSizePkts" },
+ { 0, 10, 2, "EtherFragments" },
+ { 0, 12, 2, "EtherStatsPkts64Octets" },
+ { 0, 14, 2, "EtherStatsPkts65to127Octets" },
+ { 0, 16, 2, "EtherStatsPkts128to255Octets" },
+ { 0, 18, 2, "EtherStatsPkts256to511Octets" },
+ { 0, 20, 2, "EtherStatsPkts512to1023Octets" },
+ { 0, 22, 2, "EtherStatsPkts1024to1518Octets" },
+ { 0, 24, 2, "EtherOversizeStats" },
+ { 0, 26, 2, "EtherStatsJabbers" },
+ { 0, 28, 2, "IfInUcastPkts" },
+ { 0, 30, 2, "EtherStatsMulticastPkts" },
+ { 0, 32, 2, "EtherStatsBroadcastPkts" },
+ { 0, 34, 2, "EtherStatsDropEvents" },
+ { 0, 36, 2, "Dot3StatsFCSErrors" },
+ { 0, 38, 2, "Dot3StatsSymbolErrors" },
+ { 0, 40, 2, "Dot3InPauseFrames" },
+ { 0, 42, 2, "Dot3ControlInUnknownOpcodes" },
+ { 0, 44, 4, "IfOutOctets" },
+ { 0, 48, 2, "Dot3StatsSingleCollisionFrames" },
+ { 0, 50, 2, "Dot3StatMultipleCollisionFrames" },
+ { 0, 52, 2, "Dot3sDeferredTransmissions" },
+ { 0, 54, 2, "Dot3StatsLateCollisions" },
+ { 0, 56, 2, "EtherStatsCollisions" },
+ { 0, 58, 2, "Dot3StatsExcessiveCollisions" },
+ { 0, 60, 2, "Dot3OutPauseFrames" },
+ { 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" },
+ { 0, 64, 2, "Dot1dTpPortInDiscards" },
+ { 0, 66, 2, "IfOutUcastPkts" },
+ { 0, 68, 2, "IfOutMulticastPkts" },
+ { 0, 70, 2, "IfOutBroadcastPkts" },
+};
+
+static int rtl8366rb_get_mib_counter(struct realtek_priv *priv,
+ int port,
+ struct rtl8366_mib_counter *mib,
+ u64 *mibvalue)
+{
+ u32 addr, val;
+ int ret;
+ int i;
+
+ addr = RTL8366RB_MIB_COUNTER_BASE +
+ RTL8366RB_MIB_COUNTER_PORT_OFFSET * (port) +
+ mib->offset;
+
+ /* Writing access counter address first
+ * then ASIC will prepare 64bits counter wait for being retrived
+ */
+ ret = regmap_write(priv->map, addr, 0); /* Write whatever */
+ if (ret)
+ return ret;
+
+ /* Read MIB control register */
+ ret = regmap_read(priv->map, RTL8366RB_MIB_CTRL_REG, &val);
+ if (ret)
+ return -EIO;
+
+ if (val & RTL8366RB_MIB_CTRL_BUSY_MASK)
+ return -EBUSY;
+
+ if (val & RTL8366RB_MIB_CTRL_RESET_MASK)
+ return -EIO;
+
+ /* Read each individual MIB 16 bits at the time */
+ *mibvalue = 0;
+ for (i = mib->length; i > 0; i--) {
+ ret = regmap_read(priv->map, addr + (i - 1), &val);
+ if (ret)
+ return ret;
+ *mibvalue = (*mibvalue << 16) | (val & 0xFFFF);
+ }
+ return 0;
+}
+
+static u32 rtl8366rb_get_irqmask(struct irq_data *d)
+{
+ int line = irqd_to_hwirq(d);
+ u32 val;
+
+ /* For line interrupts we combine link down in bits
+ * 6..11 with link up in bits 0..5 into one interrupt.
+ */
+ if (line < 12)
+ val = BIT(line) | BIT(line + 6);
+ else
+ val = BIT(line);
+ return val;
+}
+
+static void rtl8366rb_mask_irq(struct irq_data *d)
+{
+ struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
+ int ret;
+
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
+ rtl8366rb_get_irqmask(d), 0);
+ if (ret)
+ dev_err(priv->dev, "could not mask IRQ\n");
+}
+
+static void rtl8366rb_unmask_irq(struct irq_data *d)
+{
+ struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
+ int ret;
+
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
+ rtl8366rb_get_irqmask(d),
+ rtl8366rb_get_irqmask(d));
+ if (ret)
+ dev_err(priv->dev, "could not unmask IRQ\n");
+}
+
+static irqreturn_t rtl8366rb_irq(int irq, void *data)
+{
+ struct realtek_priv *priv = data;
+ u32 stat;
+ int ret;
+
+ /* This clears the IRQ status register */
+ ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ &stat);
+ if (ret) {
+ dev_err(priv->dev, "can't read interrupt status\n");
+ return IRQ_NONE;
+ }
+ stat &= RTL8366RB_INTERRUPT_VALID;
+ if (!stat)
+ return IRQ_NONE;
+ while (stat) {
+ int line = __ffs(stat);
+ int child_irq;
+
+ stat &= ~BIT(line);
+ /* For line interrupts we combine link down in bits
+ * 6..11 with link up in bits 0..5 into one interrupt.
+ */
+ if (line < 12 && line > 5)
+ line -= 5;
+ child_irq = irq_find_mapping(priv->irqdomain, line);
+ handle_nested_irq(child_irq);
+ }
+ return IRQ_HANDLED;
+}
+
+static struct irq_chip rtl8366rb_irq_chip = {
+ .name = "RTL8366RB",
+ .irq_mask = rtl8366rb_mask_irq,
+ .irq_unmask = rtl8366rb_unmask_irq,
+};
+
+static int rtl8366rb_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &rtl8366rb_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static void rtl8366rb_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_nested_thread(irq, 0);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops rtl8366rb_irqdomain_ops = {
+ .map = rtl8366rb_irq_map,
+ .unmap = rtl8366rb_irq_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
+{
+ struct device_node *intc;
+ unsigned long irq_trig;
+ int irq;
+ int ret;
+ u32 val;
+ int i;
+
+ intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
+ if (!intc) {
+ dev_err(priv->dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+ /* RB8366RB IRQs cascade off this one */
+ irq = of_irq_get(intc, 0);
+ if (irq <= 0) {
+ dev_err(priv->dev, "failed to get parent IRQ\n");
+ ret = irq ? irq : -EINVAL;
+ goto out_put_node;
+ }
+
+ /* This clears the IRQ status register */
+ ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ &val);
+ if (ret) {
+ dev_err(priv->dev, "can't read interrupt status\n");
+ goto out_put_node;
+ }
+
+ /* Fetch IRQ edge information from the descriptor */
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ switch (irq_trig) {
+ case IRQF_TRIGGER_RISING:
+ case IRQF_TRIGGER_HIGH:
+ dev_info(priv->dev, "active high/rising IRQ\n");
+ val = 0;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ case IRQF_TRIGGER_LOW:
+ dev_info(priv->dev, "active low/falling IRQ\n");
+ val = RTL8366RB_INTERRUPT_POLARITY;
+ break;
+ }
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_INTERRUPT_POLARITY,
+ val);
+ if (ret) {
+ dev_err(priv->dev, "could not configure IRQ polarity\n");
+ goto out_put_node;
+ }
+
+ ret = devm_request_threaded_irq(priv->dev, irq, NULL,
+ rtl8366rb_irq, IRQF_ONESHOT,
+ "RTL8366RB", priv);
+ if (ret) {
+ dev_err(priv->dev, "unable to request irq: %d\n", ret);
+ goto out_put_node;
+ }
+ priv->irqdomain = irq_domain_add_linear(intc,
+ RTL8366RB_NUM_INTERRUPT,
+ &rtl8366rb_irqdomain_ops,
+ priv);
+ if (!priv->irqdomain) {
+ dev_err(priv->dev, "failed to create IRQ domain\n");
+ ret = -EINVAL;
+ goto out_put_node;
+ }
+ for (i = 0; i < priv->num_ports; i++)
+ irq_set_parent(irq_create_mapping(priv->irqdomain, i), irq);
+
+out_put_node:
+ of_node_put(intc);
+ return ret;
+}
+
+static int rtl8366rb_set_addr(struct realtek_priv *priv)
+{
+ u8 addr[ETH_ALEN];
+ u16 val;
+ int ret;
+
+ eth_random_addr(addr);
+
+ dev_info(priv->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ val = addr[0] << 8 | addr[1];
+ ret = regmap_write(priv->map, RTL8366RB_SMAR0, val);
+ if (ret)
+ return ret;
+ val = addr[2] << 8 | addr[3];
+ ret = regmap_write(priv->map, RTL8366RB_SMAR1, val);
+ if (ret)
+ return ret;
+ val = addr[4] << 8 | addr[5];
+ ret = regmap_write(priv->map, RTL8366RB_SMAR2, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Found in a vendor driver */
+
+/* Struct for handling the jam tables' entries */
+struct rtl8366rb_jam_tbl_entry {
+ u16 reg;
+ u16 val;
+};
+
+/* For the "version 0" early silicon, appear in most source releases */
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_0[] = {
+ {0x000B, 0x0001}, {0x03A6, 0x0100}, {0x03A7, 0x0001}, {0x02D1, 0x3FFF},
+ {0x02D2, 0x3FFF}, {0x02D3, 0x3FFF}, {0x02D4, 0x3FFF}, {0x02D5, 0x3FFF},
+ {0x02D6, 0x3FFF}, {0x02D7, 0x3FFF}, {0x02D8, 0x3FFF}, {0x022B, 0x0688},
+ {0x022C, 0x0FAC}, {0x03D0, 0x4688}, {0x03D1, 0x01F5}, {0x0000, 0x0830},
+ {0x02F9, 0x0200}, {0x02F7, 0x7FFF}, {0x02F8, 0x03FF}, {0x0080, 0x03E8},
+ {0x0081, 0x00CE}, {0x0082, 0x00DA}, {0x0083, 0x0230}, {0xBE0F, 0x2000},
+ {0x0231, 0x422A}, {0x0232, 0x422A}, {0x0233, 0x422A}, {0x0234, 0x422A},
+ {0x0235, 0x422A}, {0x0236, 0x422A}, {0x0237, 0x422A}, {0x0238, 0x422A},
+ {0x0239, 0x422A}, {0x023A, 0x422A}, {0x023B, 0x422A}, {0x023C, 0x422A},
+ {0x023D, 0x422A}, {0x023E, 0x422A}, {0x023F, 0x422A}, {0x0240, 0x422A},
+ {0x0241, 0x422A}, {0x0242, 0x422A}, {0x0243, 0x422A}, {0x0244, 0x422A},
+ {0x0245, 0x422A}, {0x0246, 0x422A}, {0x0247, 0x422A}, {0x0248, 0x422A},
+ {0x0249, 0x0146}, {0x024A, 0x0146}, {0x024B, 0x0146}, {0xBE03, 0xC961},
+ {0x024D, 0x0146}, {0x024E, 0x0146}, {0x024F, 0x0146}, {0x0250, 0x0146},
+ {0xBE64, 0x0226}, {0x0252, 0x0146}, {0x0253, 0x0146}, {0x024C, 0x0146},
+ {0x0251, 0x0146}, {0x0254, 0x0146}, {0xBE62, 0x3FD0}, {0x0084, 0x0320},
+ {0x0255, 0x0146}, {0x0256, 0x0146}, {0x0257, 0x0146}, {0x0258, 0x0146},
+ {0x0259, 0x0146}, {0x025A, 0x0146}, {0x025B, 0x0146}, {0x025C, 0x0146},
+ {0x025D, 0x0146}, {0x025E, 0x0146}, {0x025F, 0x0146}, {0x0260, 0x0146},
+ {0x0261, 0xA23F}, {0x0262, 0x0294}, {0x0263, 0xA23F}, {0x0264, 0x0294},
+ {0x0265, 0xA23F}, {0x0266, 0x0294}, {0x0267, 0xA23F}, {0x0268, 0x0294},
+ {0x0269, 0xA23F}, {0x026A, 0x0294}, {0x026B, 0xA23F}, {0x026C, 0x0294},
+ {0x026D, 0xA23F}, {0x026E, 0x0294}, {0x026F, 0xA23F}, {0x0270, 0x0294},
+ {0x02F5, 0x0048}, {0xBE09, 0x0E00}, {0xBE1E, 0x0FA0}, {0xBE14, 0x8448},
+ {0xBE15, 0x1007}, {0xBE4A, 0xA284}, {0xC454, 0x3F0B}, {0xC474, 0x3F0B},
+ {0xBE48, 0x3672}, {0xBE4B, 0x17A7}, {0xBE4C, 0x0B15}, {0xBE52, 0x0EDD},
+ {0xBE49, 0x8C00}, {0xBE5B, 0x785C}, {0xBE5C, 0x785C}, {0xBE5D, 0x785C},
+ {0xBE61, 0x368A}, {0xBE63, 0x9B84}, {0xC456, 0xCC13}, {0xC476, 0xCC13},
+ {0xBE65, 0x307D}, {0xBE6D, 0x0005}, {0xBE6E, 0xE120}, {0xBE2E, 0x7BAF},
+};
+
+/* This v1 init sequence is from Belkin F5D8235 U-Boot release */
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_1[] = {
+ {0x0000, 0x0830}, {0x0001, 0x8000}, {0x0400, 0x8130}, {0xBE78, 0x3C3C},
+ {0x0431, 0x5432}, {0xBE37, 0x0CE4}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0},
+ {0xC44C, 0x1585}, {0xC44C, 0x1185}, {0xC44C, 0x1585}, {0xC46C, 0x1585},
+ {0xC46C, 0x1185}, {0xC46C, 0x1585}, {0xC451, 0x2135}, {0xC471, 0x2135},
+ {0xBE10, 0x8140}, {0xBE15, 0x0007}, {0xBE6E, 0xE120}, {0xBE69, 0xD20F},
+ {0xBE6B, 0x0320}, {0xBE24, 0xB000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF20},
+ {0xBE21, 0x0140}, {0xBE20, 0x00BB}, {0xBE24, 0xB800}, {0xBE24, 0x0000},
+ {0xBE24, 0x7000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF60}, {0xBE21, 0x0140},
+ {0xBE20, 0x0077}, {0xBE24, 0x7800}, {0xBE24, 0x0000}, {0xBE2E, 0x7B7A},
+ {0xBE36, 0x0CE4}, {0x02F5, 0x0048}, {0xBE77, 0x2940}, {0x000A, 0x83E0},
+ {0xBE79, 0x3C3C}, {0xBE00, 0x1340},
+};
+
+/* This v2 init sequence is from Belkin F5D8235 U-Boot release */
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_2[] = {
+ {0x0450, 0x0000}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0431, 0x5432},
+ {0xC44F, 0x6250}, {0xC46F, 0x6250}, {0xC456, 0x0C14}, {0xC476, 0x0C14},
+ {0xC44C, 0x1C85}, {0xC44C, 0x1885}, {0xC44C, 0x1C85}, {0xC46C, 0x1C85},
+ {0xC46C, 0x1885}, {0xC46C, 0x1C85}, {0xC44C, 0x0885}, {0xC44C, 0x0881},
+ {0xC44C, 0x0885}, {0xC46C, 0x0885}, {0xC46C, 0x0881}, {0xC46C, 0x0885},
+ {0xBE2E, 0x7BA7}, {0xBE36, 0x1000}, {0xBE37, 0x1000}, {0x8000, 0x0001},
+ {0xBE69, 0xD50F}, {0x8000, 0x0000}, {0xBE69, 0xD50F}, {0xBE6E, 0x0320},
+ {0xBE77, 0x2940}, {0xBE78, 0x3C3C}, {0xBE79, 0x3C3C}, {0xBE6E, 0xE120},
+ {0x8000, 0x0001}, {0xBE15, 0x1007}, {0x8000, 0x0000}, {0xBE15, 0x1007},
+ {0xBE14, 0x0448}, {0xBE1E, 0x00A0}, {0xBE10, 0x8160}, {0xBE10, 0x8140},
+ {0xBE00, 0x1340}, {0x0F51, 0x0010},
+};
+
+/* Appears in a DDWRT code dump */
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_ver_3[] = {
+ {0x0000, 0x0830}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0431, 0x5432},
+ {0x0F51, 0x0017}, {0x02F5, 0x0048}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0},
+ {0xC456, 0x0C14}, {0xC476, 0x0C14}, {0xC454, 0x3F8B}, {0xC474, 0x3F8B},
+ {0xC450, 0x2071}, {0xC470, 0x2071}, {0xC451, 0x226B}, {0xC471, 0x226B},
+ {0xC452, 0xA293}, {0xC472, 0xA293}, {0xC44C, 0x1585}, {0xC44C, 0x1185},
+ {0xC44C, 0x1585}, {0xC46C, 0x1585}, {0xC46C, 0x1185}, {0xC46C, 0x1585},
+ {0xC44C, 0x0185}, {0xC44C, 0x0181}, {0xC44C, 0x0185}, {0xC46C, 0x0185},
+ {0xC46C, 0x0181}, {0xC46C, 0x0185}, {0xBE24, 0xB000}, {0xBE23, 0xFF51},
+ {0xBE22, 0xDF20}, {0xBE21, 0x0140}, {0xBE20, 0x00BB}, {0xBE24, 0xB800},
+ {0xBE24, 0x0000}, {0xBE24, 0x7000}, {0xBE23, 0xFF51}, {0xBE22, 0xDF60},
+ {0xBE21, 0x0140}, {0xBE20, 0x0077}, {0xBE24, 0x7800}, {0xBE24, 0x0000},
+ {0xBE2E, 0x7BA7}, {0xBE36, 0x1000}, {0xBE37, 0x1000}, {0x8000, 0x0001},
+ {0xBE69, 0xD50F}, {0x8000, 0x0000}, {0xBE69, 0xD50F}, {0xBE6B, 0x0320},
+ {0xBE77, 0x2800}, {0xBE78, 0x3C3C}, {0xBE79, 0x3C3C}, {0xBE6E, 0xE120},
+ {0x8000, 0x0001}, {0xBE10, 0x8140}, {0x8000, 0x0000}, {0xBE10, 0x8140},
+ {0xBE15, 0x1007}, {0xBE14, 0x0448}, {0xBE1E, 0x00A0}, {0xBE10, 0x8160},
+ {0xBE10, 0x8140}, {0xBE00, 0x1340}, {0x0450, 0x0000}, {0x0401, 0x0000},
+};
+
+/* Belkin F5D8235 v1, "belkin,f5d8235-v1" */
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_f5d8235[] = {
+ {0x0242, 0x02BF}, {0x0245, 0x02BF}, {0x0248, 0x02BF}, {0x024B, 0x02BF},
+ {0x024E, 0x02BF}, {0x0251, 0x02BF}, {0x0254, 0x0A3F}, {0x0256, 0x0A3F},
+ {0x0258, 0x0A3F}, {0x025A, 0x0A3F}, {0x025C, 0x0A3F}, {0x025E, 0x0A3F},
+ {0x0263, 0x007C}, {0x0100, 0x0004}, {0xBE5B, 0x3500}, {0x800E, 0x200F},
+ {0xBE1D, 0x0F00}, {0x8001, 0x5011}, {0x800A, 0xA2F4}, {0x800B, 0x17A3},
+ {0xBE4B, 0x17A3}, {0xBE41, 0x5011}, {0xBE17, 0x2100}, {0x8000, 0x8304},
+ {0xBE40, 0x8304}, {0xBE4A, 0xA2F4}, {0x800C, 0xA8D5}, {0x8014, 0x5500},
+ {0x8015, 0x0004}, {0xBE4C, 0xA8D5}, {0xBE59, 0x0008}, {0xBE09, 0x0E00},
+ {0xBE36, 0x1036}, {0xBE37, 0x1036}, {0x800D, 0x00FF}, {0xBE4D, 0x00FF},
+};
+
+/* DGN3500, "netgear,dgn3500", "netgear,dgn3500b" */
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_init_jam_dgn3500[] = {
+ {0x0000, 0x0830}, {0x0400, 0x8130}, {0x000A, 0x83ED}, {0x0F51, 0x0017},
+ {0x02F5, 0x0048}, {0x02FA, 0xFFDF}, {0x02FB, 0xFFE0}, {0x0450, 0x0000},
+ {0x0401, 0x0000}, {0x0431, 0x0960},
+};
+
+/* This jam table activates "green ethernet", which means low power mode
+ * and is claimed to detect the cable length and not use more power than
+ * necessary, and the ports should enter power saving mode 10 seconds after
+ * a cable is disconnected. Seems to always be the same.
+ */
+static const struct rtl8366rb_jam_tbl_entry rtl8366rb_green_jam[] = {
+ {0xBE78, 0x323C}, {0xBE77, 0x5000}, {0xBE2E, 0x7BA7},
+ {0xBE59, 0x3459}, {0xBE5A, 0x745A}, {0xBE5B, 0x785C},
+ {0xBE5C, 0x785C}, {0xBE6E, 0xE120}, {0xBE79, 0x323C},
+};
+
+/* Function that jams the tables in the proper registers */
+static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
+ int jam_size, struct realtek_priv *priv,
+ bool write_dbg)
+{
+ u32 val;
+ int ret;
+ int i;
+
+ for (i = 0; i < jam_size; i++) {
+ if ((jam_table[i].reg & 0xBE00) == 0xBE00) {
+ ret = regmap_read(priv->map,
+ RTL8366RB_PHY_ACCESS_BUSY_REG,
+ &val);
+ if (ret)
+ return ret;
+ if (!(val & RTL8366RB_PHY_INT_BUSY)) {
+ ret = regmap_write(priv->map,
+ RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
+ if (ret)
+ return ret;
+ }
+ }
+ if (write_dbg)
+ dev_dbg(priv->dev, "jam %04x into register %04x\n",
+ jam_table[i].val,
+ jam_table[i].reg);
+ ret = regmap_write(priv->map,
+ jam_table[i].reg,
+ jam_table[i].val);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int rtl8366rb_setup(struct dsa_switch *ds)
+{
+ struct realtek_priv *priv = ds->priv;
+ const struct rtl8366rb_jam_tbl_entry *jam_table;
+ struct rtl8366rb *rb;
+ u32 chip_ver = 0;
+ u32 chip_id = 0;
+ int jam_size;
+ u32 val;
+ int ret;
+ int i;
+
+ rb = priv->chip_data;
+
+ ret = regmap_read(priv->map, RTL8366RB_CHIP_ID_REG, &chip_id);
+ if (ret) {
+ dev_err(priv->dev, "unable to read chip id\n");
+ return ret;
+ }
+
+ switch (chip_id) {
+ case RTL8366RB_CHIP_ID_8366:
+ break;
+ default:
+ dev_err(priv->dev, "unknown chip id (%04x)\n", chip_id);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(priv->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
+ &chip_ver);
+ if (ret) {
+ dev_err(priv->dev, "unable to read chip version\n");
+ return ret;
+ }
+
+ dev_info(priv->dev, "RTL%04x ver %u chip found\n",
+ chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
+
+ /* Do the init dance using the right jam table */
+ switch (chip_ver) {
+ case 0:
+ jam_table = rtl8366rb_init_jam_ver_0;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_0);
+ break;
+ case 1:
+ jam_table = rtl8366rb_init_jam_ver_1;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_1);
+ break;
+ case 2:
+ jam_table = rtl8366rb_init_jam_ver_2;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_2);
+ break;
+ default:
+ jam_table = rtl8366rb_init_jam_ver_3;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_3);
+ break;
+ }
+
+ /* Special jam tables for special routers
+ * TODO: are these necessary? Maintainers, please test
+ * without them, using just the off-the-shelf tables.
+ */
+ if (of_machine_is_compatible("belkin,f5d8235-v1")) {
+ jam_table = rtl8366rb_init_jam_f5d8235;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_f5d8235);
+ }
+ if (of_machine_is_compatible("netgear,dgn3500") ||
+ of_machine_is_compatible("netgear,dgn3500b")) {
+ jam_table = rtl8366rb_init_jam_dgn3500;
+ jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
+ }
+
+ ret = rtl8366rb_jam_table(jam_table, jam_size, priv, true);
+ if (ret)
+ return ret;
+
+ /* Isolate all user ports so they can only send packets to itself and the CPU port */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
+ RTL8366RB_PORT_ISO_EN);
+ if (ret)
+ return ret;
+ }
+ /* CPU port can send packets to all ports */
+ ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
+ RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
+ RTL8366RB_PORT_ISO_EN);
+ if (ret)
+ return ret;
+
+ /* Set up the "green ethernet" feature */
+ ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
+ ARRAY_SIZE(rtl8366rb_green_jam), priv, false);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->map,
+ RTL8366RB_GREEN_FEATURE_REG,
+ (chip_ver == 1) ? 0x0007 : 0x0003);
+ if (ret)
+ return ret;
+
+ /* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */
+ ret = regmap_write(priv->map, 0x0c, 0x240);
+ if (ret)
+ return ret;
+ ret = regmap_write(priv->map, 0x0d, 0x240);
+ if (ret)
+ return ret;
+
+ /* Set some random MAC address */
+ ret = rtl8366rb_set_addr(priv);
+ if (ret)
+ return ret;
+
+ /* Enable CPU port with custom DSA tag 8899.
+ *
+ * If you set RTL8368RB_CPU_NO_TAG (bit 15) in this registers
+ * the custom tag is turned off.
+ */
+ ret = regmap_update_bits(priv->map, RTL8368RB_CPU_CTRL_REG,
+ 0xFFFF,
+ BIT(priv->cpu_port));
+ if (ret)
+ return ret;
+
+ /* Make sure we default-enable the fixed CPU port */
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR,
+ BIT(priv->cpu_port),
+ 0);
+ if (ret)
+ return ret;
+
+ /* Set maximum packet length to 1536 bytes */
+ ret = regmap_update_bits(priv->map, RTL8366RB_SGCR,
+ RTL8366RB_SGCR_MAX_LENGTH_MASK,
+ RTL8366RB_SGCR_MAX_LENGTH_1536);
+ if (ret)
+ return ret;
+ for (i = 0; i < RTL8366RB_NUM_PORTS; i++)
+ /* layer 2 size, see rtl8366rb_change_mtu() */
+ rb->max_mtu[i] = 1532;
+
+ /* Disable learning for all ports */
+ ret = regmap_write(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ RTL8366RB_PORT_ALL);
+ if (ret)
+ return ret;
+
+ /* Enable auto ageing for all ports */
+ ret = regmap_write(priv->map, RTL8366RB_SECURITY_CTRL, 0);
+ if (ret)
+ return ret;
+
+ /* Port 4 setup: this enables Port 4, usually the WAN port,
+ * common PHY IO mode is apparently mode 0, and this is not what
+ * the port is initialized to. There is no explanation of the
+ * IO modes in the Realtek source code, if your WAN port is
+ * connected to something exotic such as fiber, then this might
+ * be worth experimenting with.
+ */
+ ret = regmap_update_bits(priv->map, RTL8366RB_PMC0,
+ RTL8366RB_PMC0_P4_IOMODE_MASK,
+ 0 << RTL8366RB_PMC0_P4_IOMODE_SHIFT);
+ if (ret)
+ return ret;
+
+ /* Accept all packets by default, we enable filtering on-demand */
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ 0);
+ if (ret)
+ return ret;
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ 0);
+ if (ret)
+ return ret;
+
+ /* Don't drop packets whose DA has not been learned */
+ ret = regmap_update_bits(priv->map, RTL8366RB_SSCR2,
+ RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
+ if (ret)
+ return ret;
+
+ /* Set blinking, TODO: make this configurable */
+ ret = regmap_update_bits(priv->map, RTL8366RB_LED_BLINKRATE_REG,
+ RTL8366RB_LED_BLINKRATE_MASK,
+ RTL8366RB_LED_BLINKRATE_56MS);
+ if (ret)
+ return ret;
+
+ /* Set up LED activity:
+ * Each port has 4 LEDs, we configure all ports to the same
+ * behaviour (no individual config) but we can set up each
+ * LED separately.
+ */
+ if (priv->leds_disabled) {
+ /* Turn everything off */
+ regmap_update_bits(priv->map,
+ RTL8366RB_LED_0_1_CTRL_REG,
+ 0x0FFF, 0);
+ regmap_update_bits(priv->map,
+ RTL8366RB_LED_2_3_CTRL_REG,
+ 0x0FFF, 0);
+ regmap_update_bits(priv->map,
+ RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_P4_RGMII_LED,
+ 0);
+ val = RTL8366RB_LED_OFF;
+ } else {
+ /* TODO: make this configurable per LED */
+ val = RTL8366RB_LED_FORCE;
+ }
+ for (i = 0; i < 4; i++) {
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_CTRL_REG,
+ 0xf << (i * 4),
+ val << (i * 4));
+ if (ret)
+ return ret;
+ }
+
+ ret = rtl8366_reset_vlan(priv);
+ if (ret)
+ return ret;
+
+ ret = rtl8366rb_setup_cascaded_irq(priv);
+ if (ret)
+ dev_info(priv->dev, "no interrupt support\n");
+
+ if (priv->setup_interface) {
+ ret = priv->setup_interface(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ /* This switch uses the 4 byte protocol A Realtek DSA tag */
+ return DSA_TAG_PROTO_RTL4_A;
+}
+
+static void
+rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phydev,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct realtek_priv *priv = ds->priv;
+ int ret;
+
+ if (port != priv->cpu_port)
+ return;
+
+ dev_dbg(priv->dev, "MAC link up on CPU port (%d)\n", port);
+
+ /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
+ ret = regmap_update_bits(priv->map, RTL8366RB_MAC_FORCE_CTRL_REG,
+ BIT(port), BIT(port));
+ if (ret) {
+ dev_err(priv->dev, "failed to force 1Gbit on CPU port\n");
+ return;
+ }
+
+ ret = regmap_update_bits(priv->map, RTL8366RB_PAACR2,
+ 0xFF00U,
+ RTL8366RB_PAACR_CPU_PORT << 8);
+ if (ret) {
+ dev_err(priv->dev, "failed to set PAACR on CPU port\n");
+ return;
+ }
+
+ /* Enable the CPU port */
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
+ 0);
+ if (ret) {
+ dev_err(priv->dev, "failed to enable the CPU port\n");
+ return;
+ }
+}
+
+static void
+rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct realtek_priv *priv = ds->priv;
+ int ret;
+
+ if (port != priv->cpu_port)
+ return;
+
+ dev_dbg(priv->dev, "MAC link down on CPU port (%d)\n", port);
+
+ /* Disable the CPU port */
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
+ BIT(port));
+ if (ret) {
+ dev_err(priv->dev, "failed to disable the CPU port\n");
+ return;
+ }
+}
+
+static void rb8366rb_set_port_led(struct realtek_priv *priv,
+ int port, bool enable)
+{
+ u16 val = enable ? 0x3f : 0;
+ int ret;
+
+ if (priv->leds_disabled)
+ return;
+
+ switch (port) {
+ case 0:
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_0_1_CTRL_REG,
+ 0x3F, val);
+ break;
+ case 1:
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_0_1_CTRL_REG,
+ 0x3F << RTL8366RB_LED_1_OFFSET,
+ val << RTL8366RB_LED_1_OFFSET);
+ break;
+ case 2:
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_2_3_CTRL_REG,
+ 0x3F, val);
+ break;
+ case 3:
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_2_3_CTRL_REG,
+ 0x3F << RTL8366RB_LED_3_OFFSET,
+ val << RTL8366RB_LED_3_OFFSET);
+ break;
+ case 4:
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_P4_RGMII_LED,
+ enable ? RTL8366RB_P4_RGMII_LED : 0);
+ break;
+ default:
+ dev_err(priv->dev, "no LED for port %d\n", port);
+ return;
+ }
+ if (ret)
+ dev_err(priv->dev, "error updating LED on port %d\n", port);
+}
+
+static int
+rtl8366rb_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct realtek_priv *priv = ds->priv;
+ int ret;
+
+ dev_dbg(priv->dev, "enable port %d\n", port);
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
+ 0);
+ if (ret)
+ return ret;
+
+ rb8366rb_set_port_led(priv, port, true);
+ return 0;
+}
+
+static void
+rtl8366rb_port_disable(struct dsa_switch *ds, int port)
+{
+ struct realtek_priv *priv = ds->priv;
+ int ret;
+
+ dev_dbg(priv->dev, "disable port %d\n", port);
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
+ BIT(port));
+ if (ret)
+ return;
+
+ rb8366rb_set_port_led(priv, port, false);
+}
+
+static int
+rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct realtek_priv *priv = ds->priv;
+ unsigned int port_bitmap = 0;
+ int ret, i;
+
+ /* Loop over all other ports than the current one */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ /* Current port handled last */
+ if (i == port)
+ continue;
+ /* Not on this bridge */
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ /* Join this port to each other port on the bridge */
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)));
+ if (ret)
+ dev_err(priv->dev, "failed to join port %d\n", port);
+
+ port_bitmap |= BIT(i);
+ }
+
+ /* Set the bits for the ports we can access */
+ return regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap));
+}
+
+static void
+rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct realtek_priv *priv = ds->priv;
+ unsigned int port_bitmap = 0;
+ int ret, i;
+
+ /* Loop over all other ports than this one */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ /* Current port handled last */
+ if (i == port)
+ continue;
+ /* Not on this bridge */
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ /* Remove this port from any other port on the bridge */
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
+ if (ret)
+ dev_err(priv->dev, "failed to leave port %d\n", port);
+
+ port_bitmap |= BIT(i);
+ }
+
+ /* Clear the bits for the ports we can not access, leave ourselves */
+ regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
+}
+
+/**
+ * rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
+ * @priv: SMI state container
+ * @port: the port to drop untagged and C-tagged frames on
+ * @drop: whether to drop or pass untagged and C-tagged frames
+ *
+ * Return: zero for success, a negative number on error.
+ */
+static int rtl8366rb_drop_untagged(struct realtek_priv *priv, int port, bool drop)
+{
+ return regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
+ drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
+}
+
+static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8366rb *rb;
+ int ret;
+
+ rb = priv->chip_data;
+
+ dev_dbg(priv->dev, "port %d: %s VLAN filtering\n", port,
+ vlan_filtering ? "enable" : "disable");
+
+ /* If the port is not in the member set, the frame will be dropped */
+ ret = regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ BIT(port), vlan_filtering ? BIT(port) : 0);
+ if (ret)
+ return ret;
+
+ /* If VLAN filtering is enabled and PVID is also enabled, we must
+ * not drop any untagged or C-tagged frames. If we turn off VLAN
+ * filtering on a port, we need to accept any frames.
+ */
+ if (vlan_filtering)
+ ret = rtl8366rb_drop_untagged(priv, port, !rb->pvid_enabled[port]);
+ else
+ ret = rtl8366rb_drop_untagged(priv, port, false);
+
+ return ret;
+}
+
+static int
+rtl8366rb_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ /* We support enabling/disabling learning */
+ if (flags.mask & ~(BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct realtek_priv *priv = ds->priv;
+ int ret;
+
+ if (flags.mask & BR_LEARNING) {
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ BIT(port),
+ (flags.val & BR_LEARNING) ? 0 : BIT(port));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct realtek_priv *priv = ds->priv;
+ u32 val;
+ int i;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ val = RTL8366RB_STP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = RTL8366RB_STP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ val = RTL8366RB_STP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = RTL8366RB_STP_STATE_FORWARDING;
+ break;
+ default:
+ dev_err(priv->dev, "unknown bridge state requested\n");
+ return;
+ }
+
+ /* Set the same status for the port on all the FIDs */
+ for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
+ regmap_update_bits(priv->map, RTL8366RB_STP_STATE_BASE + i,
+ RTL8366RB_STP_STATE_MASK(port),
+ RTL8366RB_STP_STATE(port, val));
+ }
+}
+
+static void
+rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct realtek_priv *priv = ds->priv;
+
+ /* This will age out any learned L2 entries */
+ regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
+ BIT(port), BIT(port));
+ /* Restore the normal state of things */
+ regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
+ BIT(port), 0);
+}
+
+static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8366rb *rb;
+ unsigned int max_mtu;
+ u32 len;
+ int i;
+
+ /* Cache the per-port MTU setting */
+ rb = priv->chip_data;
+ rb->max_mtu[port] = new_mtu;
+
+ /* Roof out the MTU for the entire switch to the greatest
+ * common denominator: the biggest set for any one port will
+ * be the biggest MTU for the switch.
+ *
+ * The first setting, 1522 bytes, is max IP packet 1500 bytes,
+ * plus ethernet header, 1518 bytes, plus CPU tag, 4 bytes.
+ * This function should consider the parameter an SDU, so the
+ * MTU passed for this setting is 1518 bytes. The same logic
+ * of subtracting the DSA tag of 4 bytes apply to the other
+ * settings.
+ */
+ max_mtu = 1518;
+ for (i = 0; i < RTL8366RB_NUM_PORTS; i++) {
+ if (rb->max_mtu[i] > max_mtu)
+ max_mtu = rb->max_mtu[i];
+ }
+ if (max_mtu <= 1518)
+ len = RTL8366RB_SGCR_MAX_LENGTH_1522;
+ else if (max_mtu > 1518 && max_mtu <= 1532)
+ len = RTL8366RB_SGCR_MAX_LENGTH_1536;
+ else if (max_mtu > 1532 && max_mtu <= 1548)
+ len = RTL8366RB_SGCR_MAX_LENGTH_1552;
+ else
+ len = RTL8366RB_SGCR_MAX_LENGTH_16000;
+
+ return regmap_update_bits(priv->map, RTL8366RB_SGCR,
+ RTL8366RB_SGCR_MAX_LENGTH_MASK,
+ len);
+}
+
+static int rtl8366rb_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* The max MTU is 16000 bytes, so we subtract the CPU tag
+ * and the max presented to the system is 15996 bytes.
+ */
+ return 15996;
+}
+
+static int rtl8366rb_get_vlan_4k(struct realtek_priv *priv, u32 vid,
+ struct rtl8366_vlan_4k *vlan4k)
+{
+ u32 data[3];
+ int ret;
+ int i;
+
+ memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k));
+
+ if (vid >= RTL8366RB_NUM_VIDS)
+ return -EINVAL;
+
+ /* write VID */
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
+ vid & RTL8366RB_VLAN_VID_MASK);
+ if (ret)
+ return ret;
+
+ /* write table access control word */
+ ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ RTL8366RB_TABLE_VLAN_READ_CTRL);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_read(priv->map,
+ RTL8366RB_VLAN_TABLE_READ_BASE + i,
+ &data[i]);
+ if (ret)
+ return ret;
+ }
+
+ vlan4k->vid = vid;
+ vlan4k->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
+ RTL8366RB_VLAN_UNTAG_MASK;
+ vlan4k->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
+ vlan4k->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
+
+ return 0;
+}
+
+static int rtl8366rb_set_vlan_4k(struct realtek_priv *priv,
+ const struct rtl8366_vlan_4k *vlan4k)
+{
+ u32 data[3];
+ int ret;
+ int i;
+
+ if (vlan4k->vid >= RTL8366RB_NUM_VIDS ||
+ vlan4k->member > RTL8366RB_VLAN_MEMBER_MASK ||
+ vlan4k->untag > RTL8366RB_VLAN_UNTAG_MASK ||
+ vlan4k->fid > RTL8366RB_FIDMAX)
+ return -EINVAL;
+
+ data[0] = vlan4k->vid & RTL8366RB_VLAN_VID_MASK;
+ data[1] = (vlan4k->member & RTL8366RB_VLAN_MEMBER_MASK) |
+ ((vlan4k->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
+ RTL8366RB_VLAN_UNTAG_SHIFT);
+ data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(priv->map,
+ RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
+ data[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* write table access control word */
+ ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ RTL8366RB_TABLE_VLAN_WRITE_CTRL);
+
+ return ret;
+}
+
+static int rtl8366rb_get_vlan_mc(struct realtek_priv *priv, u32 index,
+ struct rtl8366_vlan_mc *vlanmc)
+{
+ u32 data[3];
+ int ret;
+ int i;
+
+ memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc));
+
+ if (index >= RTL8366RB_NUM_VLANS)
+ return -EINVAL;
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_read(priv->map,
+ RTL8366RB_VLAN_MC_BASE(index) + i,
+ &data[i]);
+ if (ret)
+ return ret;
+ }
+
+ vlanmc->vid = data[0] & RTL8366RB_VLAN_VID_MASK;
+ vlanmc->priority = (data[0] >> RTL8366RB_VLAN_PRIORITY_SHIFT) &
+ RTL8366RB_VLAN_PRIORITY_MASK;
+ vlanmc->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
+ RTL8366RB_VLAN_UNTAG_MASK;
+ vlanmc->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
+ vlanmc->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
+
+ return 0;
+}
+
+static int rtl8366rb_set_vlan_mc(struct realtek_priv *priv, u32 index,
+ const struct rtl8366_vlan_mc *vlanmc)
+{
+ u32 data[3];
+ int ret;
+ int i;
+
+ if (index >= RTL8366RB_NUM_VLANS ||
+ vlanmc->vid >= RTL8366RB_NUM_VIDS ||
+ vlanmc->priority > RTL8366RB_PRIORITYMAX ||
+ vlanmc->member > RTL8366RB_VLAN_MEMBER_MASK ||
+ vlanmc->untag > RTL8366RB_VLAN_UNTAG_MASK ||
+ vlanmc->fid > RTL8366RB_FIDMAX)
+ return -EINVAL;
+
+ data[0] = (vlanmc->vid & RTL8366RB_VLAN_VID_MASK) |
+ ((vlanmc->priority & RTL8366RB_VLAN_PRIORITY_MASK) <<
+ RTL8366RB_VLAN_PRIORITY_SHIFT);
+ data[1] = (vlanmc->member & RTL8366RB_VLAN_MEMBER_MASK) |
+ ((vlanmc->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
+ RTL8366RB_VLAN_UNTAG_SHIFT);
+ data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
+
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(priv->map,
+ RTL8366RB_VLAN_MC_BASE(index) + i,
+ data[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtl8366rb_get_mc_index(struct realtek_priv *priv, int port, int *val)
+{
+ u32 data;
+ int ret;
+
+ if (port >= priv->num_ports)
+ return -EINVAL;
+
+ ret = regmap_read(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ &data);
+ if (ret)
+ return ret;
+
+ *val = (data >> RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)) &
+ RTL8366RB_PORT_VLAN_CTRL_MASK;
+
+ return 0;
+}
+
+static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index)
+{
+ struct rtl8366rb *rb;
+ bool pvid_enabled;
+ int ret;
+
+ rb = priv->chip_data;
+ pvid_enabled = !!index;
+
+ if (port >= priv->num_ports || index >= RTL8366RB_NUM_VLANS)
+ return -EINVAL;
+
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ RTL8366RB_PORT_VLAN_CTRL_MASK <<
+ RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
+ (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
+ RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
+ if (ret)
+ return ret;
+
+ rb->pvid_enabled[port] = pvid_enabled;
+
+ /* If VLAN filtering is enabled and PVID is also enabled, we must
+ * not drop any untagged or C-tagged frames. Make sure to update the
+ * filtering setting.
+ */
+ if (dsa_port_is_vlan_filtering(dsa_to_port(priv->ds, port)))
+ ret = rtl8366rb_drop_untagged(priv, port, !pvid_enabled);
+
+ return ret;
+}
+
+static bool rtl8366rb_is_vlan_valid(struct realtek_priv *priv, unsigned int vlan)
+{
+ unsigned int max = RTL8366RB_NUM_VLANS - 1;
+
+ if (priv->vlan4k_enabled)
+ max = RTL8366RB_NUM_VIDS - 1;
+
+ if (vlan > max)
+ return false;
+
+ return true;
+}
+
+static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
+{
+ dev_dbg(priv->dev, "%s VLAN\n", enable ? "enable" : "disable");
+ return regmap_update_bits(priv->map,
+ RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
+ enable ? RTL8366RB_SGCR_EN_VLAN : 0);
+}
+
+static int rtl8366rb_enable_vlan4k(struct realtek_priv *priv, bool enable)
+{
+ dev_dbg(priv->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
+ return regmap_update_bits(priv->map, RTL8366RB_SGCR,
+ RTL8366RB_SGCR_EN_VLAN_4KTB,
+ enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
+}
+
+static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
+{
+ u32 val;
+ u32 reg;
+ int ret;
+
+ if (phy > RTL8366RB_PHY_NO_MAX)
+ return -EINVAL;
+
+ mutex_lock(&priv->map_lock);
+
+ ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_READ);
+ if (ret)
+ goto out;
+
+ reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
+
+ ret = regmap_write(priv->map_nolock, reg, 0);
+ if (ret) {
+ dev_err(priv->dev,
+ "failed to write PHY%d reg %04x @ %04x, ret %d\n",
+ phy, regnum, reg, ret);
+ goto out;
+ }
+
+ ret = regmap_read(priv->map_nolock, RTL8366RB_PHY_ACCESS_DATA_REG,
+ &val);
+ if (ret)
+ goto out;
+
+ ret = val;
+
+ dev_dbg(priv->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
+ phy, regnum, reg, val);
+
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return ret;
+}
+
+static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
+ u16 val)
+{
+ u32 reg;
+ int ret;
+
+ if (phy > RTL8366RB_PHY_NO_MAX)
+ return -EINVAL;
+
+ mutex_lock(&priv->map_lock);
+
+ ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
+ if (ret)
+ goto out;
+
+ reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
+
+ dev_dbg(priv->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
+ phy, regnum, reg, val);
+
+ ret = regmap_write(priv->map_nolock, reg, val);
+ if (ret)
+ goto out;
+
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return ret;
+}
+
+static int rtl8366rb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ return rtl8366rb_phy_read(ds->priv, phy, regnum);
+}
+
+static int rtl8366rb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ return rtl8366rb_phy_write(ds->priv, phy, regnum, val);
+}
+
+static int rtl8366rb_reset_chip(struct realtek_priv *priv)
+{
+ int timeout = 10;
+ u32 val;
+ int ret;
+
+ priv->write_reg_noack(priv, RTL8366RB_RESET_CTRL_REG,
+ RTL8366RB_CHIP_CTRL_RESET_HW);
+ do {
+ usleep_range(20000, 25000);
+ ret = regmap_read(priv->map, RTL8366RB_RESET_CTRL_REG, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & RTL8366RB_CHIP_CTRL_RESET_HW))
+ break;
+ } while (--timeout);
+
+ if (!timeout) {
+ dev_err(priv->dev, "timeout waiting for the switch to reset\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rtl8366rb_detect(struct realtek_priv *priv)
+{
+ struct device *dev = priv->dev;
+ int ret;
+ u32 val;
+
+ /* Detect device */
+ ret = regmap_read(priv->map, 0x5c, &val);
+ if (ret) {
+ dev_err(dev, "can't get chip ID (%d)\n", ret);
+ return ret;
+ }
+
+ switch (val) {
+ case 0x6027:
+ dev_info(dev, "found an RTL8366S switch\n");
+ dev_err(dev, "this switch is not yet supported, submit patches!\n");
+ return -ENODEV;
+ case 0x5937:
+ dev_info(dev, "found an RTL8366RB switch\n");
+ priv->cpu_port = RTL8366RB_PORT_NUM_CPU;
+ priv->num_ports = RTL8366RB_NUM_PORTS;
+ priv->num_vlan_mc = RTL8366RB_NUM_VLANS;
+ priv->mib_counters = rtl8366rb_mib_counters;
+ priv->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
+ break;
+ default:
+ dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n",
+ val);
+ break;
+ }
+
+ ret = rtl8366rb_reset_chip(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
+ .get_tag_protocol = rtl8366_get_tag_protocol,
+ .setup = rtl8366rb_setup,
+ .phylink_mac_link_up = rtl8366rb_mac_link_up,
+ .phylink_mac_link_down = rtl8366rb_mac_link_down,
+ .get_strings = rtl8366_get_strings,
+ .get_ethtool_stats = rtl8366_get_ethtool_stats,
+ .get_sset_count = rtl8366_get_sset_count,
+ .port_bridge_join = rtl8366rb_port_bridge_join,
+ .port_bridge_leave = rtl8366rb_port_bridge_leave,
+ .port_vlan_filtering = rtl8366rb_vlan_filtering,
+ .port_vlan_add = rtl8366_vlan_add,
+ .port_vlan_del = rtl8366_vlan_del,
+ .port_enable = rtl8366rb_port_enable,
+ .port_disable = rtl8366rb_port_disable,
+ .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
+ .port_bridge_flags = rtl8366rb_port_bridge_flags,
+ .port_stp_state_set = rtl8366rb_port_stp_state_set,
+ .port_fast_age = rtl8366rb_port_fast_age,
+ .port_change_mtu = rtl8366rb_change_mtu,
+ .port_max_mtu = rtl8366rb_max_mtu,
+};
+
+static const struct dsa_switch_ops rtl8366rb_switch_ops_mdio = {
+ .get_tag_protocol = rtl8366_get_tag_protocol,
+ .setup = rtl8366rb_setup,
+ .phy_read = rtl8366rb_dsa_phy_read,
+ .phy_write = rtl8366rb_dsa_phy_write,
+ .phylink_mac_link_up = rtl8366rb_mac_link_up,
+ .phylink_mac_link_down = rtl8366rb_mac_link_down,
+ .get_strings = rtl8366_get_strings,
+ .get_ethtool_stats = rtl8366_get_ethtool_stats,
+ .get_sset_count = rtl8366_get_sset_count,
+ .port_bridge_join = rtl8366rb_port_bridge_join,
+ .port_bridge_leave = rtl8366rb_port_bridge_leave,
+ .port_vlan_filtering = rtl8366rb_vlan_filtering,
+ .port_vlan_add = rtl8366_vlan_add,
+ .port_vlan_del = rtl8366_vlan_del,
+ .port_enable = rtl8366rb_port_enable,
+ .port_disable = rtl8366rb_port_disable,
+ .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
+ .port_bridge_flags = rtl8366rb_port_bridge_flags,
+ .port_stp_state_set = rtl8366rb_port_stp_state_set,
+ .port_fast_age = rtl8366rb_port_fast_age,
+ .port_change_mtu = rtl8366rb_change_mtu,
+ .port_max_mtu = rtl8366rb_max_mtu,
+};
+
+static const struct realtek_ops rtl8366rb_ops = {
+ .detect = rtl8366rb_detect,
+ .get_vlan_mc = rtl8366rb_get_vlan_mc,
+ .set_vlan_mc = rtl8366rb_set_vlan_mc,
+ .get_vlan_4k = rtl8366rb_get_vlan_4k,
+ .set_vlan_4k = rtl8366rb_set_vlan_4k,
+ .get_mc_index = rtl8366rb_get_mc_index,
+ .set_mc_index = rtl8366rb_set_mc_index,
+ .get_mib_counter = rtl8366rb_get_mib_counter,
+ .is_vlan_valid = rtl8366rb_is_vlan_valid,
+ .enable_vlan = rtl8366rb_enable_vlan,
+ .enable_vlan4k = rtl8366rb_enable_vlan4k,
+ .phy_read = rtl8366rb_phy_read,
+ .phy_write = rtl8366rb_phy_write,
+};
+
+const struct realtek_variant rtl8366rb_variant = {
+ .ds_ops_smi = &rtl8366rb_switch_ops_smi,
+ .ds_ops_mdio = &rtl8366rb_switch_ops_mdio,
+ .ops = &rtl8366rb_ops,
+ .clk_delay = 10,
+ .cmd_read = 0xa9,
+ .cmd_write = 0xa8,
+ .chip_data_sz = sizeof(struct rtl8366rb),
+};
+EXPORT_SYMBOL_GPL(rtl8366rb_variant);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Driver for RTL8366RB ethernet switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
new file mode 100644
index 000000000..790e177e2
--- /dev/null
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -0,0 +1,1109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Schneider-Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <net/dsa.h>
+
+#include "rzn1_a5psw.h"
+
+struct a5psw_stats {
+ u16 offset;
+ const char name[ETH_GSTRING_LEN];
+};
+
+#define STAT_DESC(_offset) { \
+ .offset = A5PSW_##_offset, \
+ .name = __stringify(_offset), \
+}
+
+static const struct a5psw_stats a5psw_stats[] = {
+ STAT_DESC(aFramesTransmittedOK),
+ STAT_DESC(aFramesReceivedOK),
+ STAT_DESC(aFrameCheckSequenceErrors),
+ STAT_DESC(aAlignmentErrors),
+ STAT_DESC(aOctetsTransmittedOK),
+ STAT_DESC(aOctetsReceivedOK),
+ STAT_DESC(aTxPAUSEMACCtrlFrames),
+ STAT_DESC(aRxPAUSEMACCtrlFrames),
+ STAT_DESC(ifInErrors),
+ STAT_DESC(ifOutErrors),
+ STAT_DESC(ifInUcastPkts),
+ STAT_DESC(ifInMulticastPkts),
+ STAT_DESC(ifInBroadcastPkts),
+ STAT_DESC(ifOutDiscards),
+ STAT_DESC(ifOutUcastPkts),
+ STAT_DESC(ifOutMulticastPkts),
+ STAT_DESC(ifOutBroadcastPkts),
+ STAT_DESC(etherStatsDropEvents),
+ STAT_DESC(etherStatsOctets),
+ STAT_DESC(etherStatsPkts),
+ STAT_DESC(etherStatsUndersizePkts),
+ STAT_DESC(etherStatsOversizePkts),
+ STAT_DESC(etherStatsPkts64Octets),
+ STAT_DESC(etherStatsPkts65to127Octets),
+ STAT_DESC(etherStatsPkts128to255Octets),
+ STAT_DESC(etherStatsPkts256to511Octets),
+ STAT_DESC(etherStatsPkts1024to1518Octets),
+ STAT_DESC(etherStatsPkts1519toXOctets),
+ STAT_DESC(etherStatsJabbers),
+ STAT_DESC(etherStatsFragments),
+ STAT_DESC(VLANReceived),
+ STAT_DESC(VLANTransmitted),
+ STAT_DESC(aDeferred),
+ STAT_DESC(aMultipleCollisions),
+ STAT_DESC(aSingleCollisions),
+ STAT_DESC(aLateCollisions),
+ STAT_DESC(aExcessiveCollisions),
+ STAT_DESC(aCarrierSenseErrors),
+};
+
+static void a5psw_reg_writel(struct a5psw *a5psw, int offset, u32 value)
+{
+ writel(value, a5psw->base + offset);
+}
+
+static u32 a5psw_reg_readl(struct a5psw *a5psw, int offset)
+{
+ return readl(a5psw->base + offset);
+}
+
+static void a5psw_reg_rmw(struct a5psw *a5psw, int offset, u32 mask, u32 val)
+{
+ u32 reg;
+
+ spin_lock(&a5psw->reg_lock);
+
+ reg = a5psw_reg_readl(a5psw, offset);
+ reg &= ~mask;
+ reg |= val;
+ a5psw_reg_writel(a5psw, offset, reg);
+
+ spin_unlock(&a5psw->reg_lock);
+}
+
+static enum dsa_tag_protocol a5psw_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_RZN1_A5PSW;
+}
+
+static void a5psw_port_pattern_set(struct a5psw *a5psw, int port, int pattern,
+ bool enable)
+{
+ u32 rx_match = 0;
+
+ if (enable)
+ rx_match |= A5PSW_RXMATCH_CONFIG_PATTERN(pattern);
+
+ a5psw_reg_rmw(a5psw, A5PSW_RXMATCH_CONFIG(port),
+ A5PSW_RXMATCH_CONFIG_PATTERN(pattern), rx_match);
+}
+
+static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
+{
+ /* Enable "management forward" pattern matching, this will forward
+ * packets from this port only towards the management port and thus
+ * isolate the port.
+ */
+ a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
+}
+
+static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable)
+{
+ u32 mask = A5PSW_PORT_ENA_TX(port);
+ u32 reg = enable ? mask : 0;
+
+ /* Even though the port TX is disabled through TXENA bit in the
+ * PORT_ENA register, it can still send BPDUs. This depends on the tag
+ * configuration added when sending packets from the CPU port to the
+ * switch port. Indeed, when using forced forwarding without filtering,
+ * even disabled ports will be able to send packets that are tagged.
+ * This allows to implement STP support when ports are in a state where
+ * forwarding traffic should be stopped but BPDUs should still be sent.
+ */
+ a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg);
+}
+
+static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
+{
+ u32 port_ena = 0;
+
+ if (enable)
+ port_ena |= A5PSW_PORT_ENA_TX_RX(port);
+
+ a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, A5PSW_PORT_ENA_TX_RX(port),
+ port_ena);
+}
+
+static int a5psw_lk_execute_ctrl(struct a5psw *a5psw, u32 *ctrl)
+{
+ int ret;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_ADDR_CTRL, *ctrl);
+
+ ret = readl_poll_timeout(a5psw->base + A5PSW_LK_ADDR_CTRL, *ctrl,
+ !(*ctrl & A5PSW_LK_ADDR_CTRL_BUSY),
+ A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
+ if (ret)
+ dev_err(a5psw->dev, "LK_CTRL timeout waiting for BUSY bit\n");
+
+ return ret;
+}
+
+static void a5psw_port_fdb_flush(struct a5psw *a5psw, int port)
+{
+ u32 ctrl = A5PSW_LK_ADDR_CTRL_DELETE_PORT | BIT(port);
+
+ mutex_lock(&a5psw->lk_lock);
+ a5psw_lk_execute_ctrl(a5psw, &ctrl);
+ mutex_unlock(&a5psw->lk_lock);
+}
+
+static void a5psw_port_authorize_set(struct a5psw *a5psw, int port,
+ bool authorize)
+{
+ u32 reg = a5psw_reg_readl(a5psw, A5PSW_AUTH_PORT(port));
+
+ if (authorize)
+ reg |= A5PSW_AUTH_PORT_AUTHORIZED;
+ else
+ reg &= ~A5PSW_AUTH_PORT_AUTHORIZED;
+
+ a5psw_reg_writel(a5psw, A5PSW_AUTH_PORT(port), reg);
+}
+
+static void a5psw_port_disable(struct dsa_switch *ds, int port)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_authorize_set(a5psw, port, false);
+ a5psw_port_enable_set(a5psw, port, false);
+}
+
+static int a5psw_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_authorize_set(a5psw, port, true);
+ a5psw_port_enable_set(a5psw, port, true);
+
+ return 0;
+}
+
+static int a5psw_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ new_mtu += ETH_HLEN + A5PSW_EXTRA_MTU_LEN + ETH_FCS_LEN;
+ a5psw_reg_writel(a5psw, A5PSW_FRM_LENGTH(port), new_mtu);
+
+ return 0;
+}
+
+static int a5psw_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return A5PSW_MAX_MTU;
+}
+
+static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ unsigned long *intf = config->supported_interfaces;
+
+ config->mac_capabilities = MAC_1000FD;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* GMII is used internally and GMAC2 is connected to the switch
+ * using 1000Mbps Full-Duplex mode only (cf ethernet manual)
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII, intf);
+ } else {
+ config->mac_capabilities |= MAC_100 | MAC_10;
+ phy_interface_set_rgmii(intf);
+ __set_bit(PHY_INTERFACE_MODE_RMII, intf);
+ __set_bit(PHY_INTERFACE_MODE_MII, intf);
+ }
+}
+
+static struct phylink_pcs *
+a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct a5psw *a5psw = ds->priv;
+
+ if (!dsa_port_is_cpu(dp) && a5psw->pcs[port])
+ return a5psw->pcs[port];
+
+ return NULL;
+}
+
+static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct a5psw *a5psw = ds->priv;
+ u32 cmd_cfg;
+
+ cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
+ cmd_cfg &= ~(A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA);
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+}
+
+static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
+ A5PSW_CMD_CFG_TX_CRC_APPEND;
+ struct a5psw *a5psw = ds->priv;
+
+ if (speed == SPEED_1000)
+ cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
+
+ if (duplex == DUPLEX_HALF)
+ cmd_cfg |= A5PSW_CMD_CFG_HD_ENA;
+
+ cmd_cfg |= A5PSW_CMD_CFG_CNTL_FRM_ENA;
+
+ if (!rx_pause)
+ cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
+
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+}
+
+static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct a5psw *a5psw = ds->priv;
+ unsigned long rate;
+ u64 max, tmp;
+ u32 agetime;
+
+ rate = clk_get_rate(a5psw->clk);
+ max = div64_ul(((u64)A5PSW_LK_AGETIME_MASK * A5PSW_TABLE_ENTRIES * 1024),
+ rate) * 1000;
+ if (msecs > max)
+ return -EINVAL;
+
+ tmp = div_u64(rate, MSEC_PER_SEC);
+ agetime = div_u64(msecs * tmp, 1024 * A5PSW_TABLE_ENTRIES);
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_AGETIME, agetime);
+
+ return 0;
+}
+
+static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn)
+{
+ u32 mask = A5PSW_INPUT_LEARN_DIS(port);
+ u32 reg = !learn ? mask : 0;
+
+ a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
+static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
+{
+ u32 mask = A5PSW_INPUT_LEARN_BLOCK(port);
+ u32 reg = block ? mask : 0;
+
+ a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
+static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
+ bool set)
+{
+ u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
+ A5PSW_MCAST_DEF_MASK};
+ int i;
+
+ if (set)
+ a5psw->bridged_ports |= BIT(port);
+ else
+ a5psw->bridged_ports &= ~BIT(port);
+
+ for (i = 0; i < ARRAY_SIZE(offsets); i++)
+ a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
+}
+
+static void a5psw_port_set_standalone(struct a5psw *a5psw, int port,
+ bool standalone)
+{
+ a5psw_port_learning_set(a5psw, port, !standalone);
+ a5psw_flooding_set_resolution(a5psw, port, !standalone);
+ a5psw_port_mgmtfwd_set(a5psw, port, standalone);
+}
+
+static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ /* We only support 1 bridge device */
+ if (a5psw->br_dev && bridge.dev != a5psw->br_dev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Forwarding offload supported for a single bridge");
+ return -EOPNOTSUPP;
+ }
+
+ a5psw->br_dev = bridge.dev;
+ a5psw_port_set_standalone(a5psw, port, false);
+
+ return 0;
+}
+
+static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_set_standalone(a5psw, port, true);
+
+ /* No more ports bridged */
+ if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
+ a5psw->br_dev = NULL;
+}
+
+static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ bool learning_enabled, rx_enabled, tx_enabled;
+ struct a5psw *a5psw = ds->priv;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ rx_enabled = false;
+ tx_enabled = false;
+ learning_enabled = false;
+ break;
+ case BR_STATE_LEARNING:
+ rx_enabled = false;
+ tx_enabled = false;
+ learning_enabled = true;
+ break;
+ case BR_STATE_FORWARDING:
+ rx_enabled = true;
+ tx_enabled = true;
+ learning_enabled = true;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ a5psw_port_learning_set(a5psw, port, learning_enabled);
+ a5psw_port_rx_block_set(a5psw, port, !rx_enabled);
+ a5psw_port_tx_enable(a5psw, port, tx_enabled);
+}
+
+static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_fdb_flush(a5psw, port);
+}
+
+static int a5psw_lk_execute_lookup(struct a5psw *a5psw, union lk_data *lk_data,
+ u16 *entry)
+{
+ u32 ctrl;
+ int ret;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_LO, lk_data->lo);
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data->hi);
+
+ ctrl = A5PSW_LK_ADDR_CTRL_LOOKUP;
+ ret = a5psw_lk_execute_ctrl(a5psw, &ctrl);
+ if (ret)
+ return ret;
+
+ *entry = ctrl & A5PSW_LK_ADDR_CTRL_ADDRESS;
+
+ return 0;
+}
+
+static int a5psw_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data = {0};
+ bool inc_learncount = false;
+ int ret = 0;
+ u16 entry;
+ u32 reg;
+
+ ether_addr_copy(lk_data.entry.mac, addr);
+ lk_data.entry.port_mask = BIT(port);
+
+ mutex_lock(&a5psw->lk_lock);
+
+ /* Set the value to be written in the lookup table */
+ ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
+ if (ret)
+ goto lk_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+ if (!lk_data.entry.valid) {
+ inc_learncount = true;
+ /* port_mask set to 0x1f when entry is not valid, clear it */
+ lk_data.entry.port_mask = 0;
+ lk_data.entry.prio = 0;
+ }
+
+ lk_data.entry.port_mask |= BIT(port);
+ lk_data.entry.is_static = 1;
+ lk_data.entry.valid = 1;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
+
+ reg = A5PSW_LK_ADDR_CTRL_WRITE | entry;
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto lk_unlock;
+
+ if (inc_learncount) {
+ reg = A5PSW_LK_LEARNCOUNT_MODE_INC;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+ }
+
+lk_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static int a5psw_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data = {0};
+ bool clear = false;
+ u16 entry;
+ u32 reg;
+ int ret;
+
+ ether_addr_copy(lk_data.entry.mac, addr);
+
+ mutex_lock(&a5psw->lk_lock);
+
+ ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
+ if (ret)
+ goto lk_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+
+ /* Our hardware does not associate any VID to the FDB entries so this
+ * means that if two entries were added for the same mac but for
+ * different VID, then, on the deletion of the first one, we would also
+ * delete the second one. Since there is unfortunately nothing we can do
+ * about that, do not return an error...
+ */
+ if (!lk_data.entry.valid)
+ goto lk_unlock;
+
+ lk_data.entry.port_mask &= ~BIT(port);
+ /* If there is no more port in the mask, clear the entry */
+ if (lk_data.entry.port_mask == 0)
+ clear = true;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
+
+ reg = entry;
+ if (clear)
+ reg |= A5PSW_LK_ADDR_CTRL_CLEAR;
+ else
+ reg |= A5PSW_LK_ADDR_CTRL_WRITE;
+
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto lk_unlock;
+
+ /* Decrement LEARNCOUNT */
+ if (clear) {
+ reg = A5PSW_LK_LEARNCOUNT_MODE_DEC;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+ }
+
+lk_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static int a5psw_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data;
+ int i = 0, ret = 0;
+ u32 reg;
+
+ mutex_lock(&a5psw->lk_lock);
+
+ for (i = 0; i < A5PSW_TABLE_ENTRIES; i++) {
+ reg = A5PSW_LK_ADDR_CTRL_READ | A5PSW_LK_ADDR_CTRL_WAIT | i;
+
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto out_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+ /* If entry is not valid or does not contain the port, skip */
+ if (!lk_data.entry.valid ||
+ !(lk_data.entry.port_mask & BIT(port)))
+ continue;
+
+ lk_data.lo = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_LO);
+
+ ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data);
+ if (ret)
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port)
+{
+ u32 reg_lo, reg_hi;
+
+ reg_lo = a5psw_reg_readl(a5psw, offset + A5PSW_PORT_OFFSET(port));
+ /* A5PSW_STATS_HIWORD is latched on stat read */
+ reg_hi = a5psw_reg_readl(a5psw, A5PSW_STATS_HIWORD);
+
+ return ((u64)reg_hi << 32) | reg_lo;
+}
+
+static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ unsigned int u;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++) {
+ memcpy(data + u * ETH_GSTRING_LEN, a5psw_stats[u].name,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct a5psw *a5psw = ds->priv;
+ unsigned int u;
+
+ for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
+ data[u] = a5psw_read_stat(a5psw, a5psw_stats[u].offset, port);
+}
+
+static int a5psw_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(a5psw_stats);
+}
+
+static void a5psw_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct a5psw *a5psw = ds->priv;
+
+#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
+ mac_stats->FramesTransmittedOK = RD(aFramesTransmittedOK);
+ mac_stats->SingleCollisionFrames = RD(aSingleCollisions);
+ mac_stats->MultipleCollisionFrames = RD(aMultipleCollisions);
+ mac_stats->FramesReceivedOK = RD(aFramesReceivedOK);
+ mac_stats->FrameCheckSequenceErrors = RD(aFrameCheckSequenceErrors);
+ mac_stats->AlignmentErrors = RD(aAlignmentErrors);
+ mac_stats->OctetsTransmittedOK = RD(aOctetsTransmittedOK);
+ mac_stats->FramesWithDeferredXmissions = RD(aDeferred);
+ mac_stats->LateCollisions = RD(aLateCollisions);
+ mac_stats->FramesAbortedDueToXSColls = RD(aExcessiveCollisions);
+ mac_stats->FramesLostDueToIntMACXmitError = RD(ifOutErrors);
+ mac_stats->CarrierSenseErrors = RD(aCarrierSenseErrors);
+ mac_stats->OctetsReceivedOK = RD(aOctetsReceivedOK);
+ mac_stats->FramesLostDueToIntMACRcvError = RD(ifInErrors);
+ mac_stats->MulticastFramesXmittedOK = RD(ifOutMulticastPkts);
+ mac_stats->BroadcastFramesXmittedOK = RD(ifOutBroadcastPkts);
+ mac_stats->FramesWithExcessiveDeferral = RD(aDeferred);
+ mac_stats->MulticastFramesReceivedOK = RD(ifInMulticastPkts);
+ mac_stats->BroadcastFramesReceivedOK = RD(ifInBroadcastPkts);
+#undef RD
+}
+
+static const struct ethtool_rmon_hist_range a5psw_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, A5PSW_MAX_MTU },
+ {}
+};
+
+static void a5psw_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct a5psw *a5psw = ds->priv;
+
+#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
+ rmon_stats->undersize_pkts = RD(etherStatsUndersizePkts);
+ rmon_stats->oversize_pkts = RD(etherStatsOversizePkts);
+ rmon_stats->fragments = RD(etherStatsFragments);
+ rmon_stats->jabbers = RD(etherStatsJabbers);
+ rmon_stats->hist[0] = RD(etherStatsPkts64Octets);
+ rmon_stats->hist[1] = RD(etherStatsPkts65to127Octets);
+ rmon_stats->hist[2] = RD(etherStatsPkts128to255Octets);
+ rmon_stats->hist[3] = RD(etherStatsPkts256to511Octets);
+ rmon_stats->hist[4] = RD(etherStatsPkts512to1023Octets);
+ rmon_stats->hist[5] = RD(etherStatsPkts1024to1518Octets);
+ rmon_stats->hist[6] = RD(etherStatsPkts1519toXOctets);
+#undef RD
+
+ *ranges = a5psw_rmon_ranges;
+}
+
+static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct a5psw *a5psw = ds->priv;
+ u64 stat;
+
+ stat = a5psw_read_stat(a5psw, A5PSW_aTxPAUSEMACCtrlFrames, port);
+ ctrl_stats->MACControlFramesTransmitted = stat;
+ stat = a5psw_read_stat(a5psw, A5PSW_aRxPAUSEMACCtrlFrames, port);
+ ctrl_stats->MACControlFramesReceived = stat;
+}
+
+static int a5psw_setup(struct dsa_switch *ds)
+{
+ struct a5psw *a5psw = ds->priv;
+ int port, vlan, ret;
+ struct dsa_port *dp;
+ u32 reg;
+
+ /* Validate that there is only 1 CPU port with index A5PSW_CPU_PORT */
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ if (dp->index != A5PSW_CPU_PORT) {
+ dev_err(a5psw->dev, "Invalid CPU port\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Configure management port */
+ reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE;
+ a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
+
+ /* Set pattern 0 to forward all frame to mgmt port */
+ a5psw_reg_writel(a5psw, A5PSW_PATTERN_CTRL(A5PSW_PATTERN_MGMTFWD),
+ A5PSW_PATTERN_CTRL_MGMTFWD);
+
+ /* Enable port tagging */
+ reg = FIELD_PREP(A5PSW_MGMT_TAG_CFG_TAGFIELD, ETH_P_DSA_A5PSW);
+ reg |= A5PSW_MGMT_TAG_CFG_ENABLE | A5PSW_MGMT_TAG_CFG_ALL_FRAMES;
+ a5psw_reg_writel(a5psw, A5PSW_MGMT_TAG_CFG, reg);
+
+ /* Enable normal switch operation */
+ reg = A5PSW_LK_ADDR_CTRL_BLOCKING | A5PSW_LK_ADDR_CTRL_LEARNING |
+ A5PSW_LK_ADDR_CTRL_AGEING | A5PSW_LK_ADDR_CTRL_ALLOW_MIGR |
+ A5PSW_LK_ADDR_CTRL_CLEAR_TABLE;
+ a5psw_reg_writel(a5psw, A5PSW_LK_CTRL, reg);
+
+ ret = readl_poll_timeout(a5psw->base + A5PSW_LK_CTRL, reg,
+ !(reg & A5PSW_LK_ADDR_CTRL_CLEAR_TABLE),
+ A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
+ if (ret) {
+ dev_err(a5psw->dev, "Failed to clear lookup table\n");
+ return ret;
+ }
+
+ /* Reset learn count to 0 */
+ reg = A5PSW_LK_LEARNCOUNT_MODE_SET;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+
+ /* Clear VLAN resource table */
+ reg = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_WR_TAGMASK;
+ for (vlan = 0; vlan < A5PSW_VLAN_COUNT; vlan++)
+ a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(vlan), reg);
+
+ /* Reset all ports */
+ dsa_switch_for_each_port(dp, ds) {
+ port = dp->index;
+
+ /* Reset the port */
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port),
+ A5PSW_CMD_CFG_SW_RESET);
+
+ /* Enable only CPU port */
+ a5psw_port_enable_set(a5psw, port, dsa_port_is_cpu(dp));
+
+ if (dsa_port_is_unused(dp))
+ continue;
+
+ /* Enable egress flooding and learning for CPU port */
+ if (dsa_port_is_cpu(dp)) {
+ a5psw_flooding_set_resolution(a5psw, port, true);
+ a5psw_port_learning_set(a5psw, port, true);
+ }
+
+ /* Enable standalone mode for user ports */
+ if (dsa_port_is_user(dp))
+ a5psw_port_set_standalone(a5psw, port, true);
+ }
+
+ return 0;
+}
+
+static const struct dsa_switch_ops a5psw_switch_ops = {
+ .get_tag_protocol = a5psw_get_tag_protocol,
+ .setup = a5psw_setup,
+ .port_disable = a5psw_port_disable,
+ .port_enable = a5psw_port_enable,
+ .phylink_get_caps = a5psw_phylink_get_caps,
+ .phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs,
+ .phylink_mac_link_down = a5psw_phylink_mac_link_down,
+ .phylink_mac_link_up = a5psw_phylink_mac_link_up,
+ .port_change_mtu = a5psw_port_change_mtu,
+ .port_max_mtu = a5psw_port_max_mtu,
+ .get_sset_count = a5psw_get_sset_count,
+ .get_strings = a5psw_get_strings,
+ .get_ethtool_stats = a5psw_get_ethtool_stats,
+ .get_eth_mac_stats = a5psw_get_eth_mac_stats,
+ .get_eth_ctrl_stats = a5psw_get_eth_ctrl_stats,
+ .get_rmon_stats = a5psw_get_rmon_stats,
+ .set_ageing_time = a5psw_set_ageing_time,
+ .port_bridge_join = a5psw_port_bridge_join,
+ .port_bridge_leave = a5psw_port_bridge_leave,
+ .port_stp_state_set = a5psw_port_stp_state_set,
+ .port_fast_age = a5psw_port_fast_age,
+ .port_fdb_add = a5psw_port_fdb_add,
+ .port_fdb_del = a5psw_port_fdb_del,
+ .port_fdb_dump = a5psw_port_fdb_dump,
+};
+
+static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
+{
+ u32 status;
+ int err;
+
+ err = readl_poll_timeout(a5psw->base + A5PSW_MDIO_CFG_STATUS, status,
+ !(status & A5PSW_MDIO_CFG_STATUS_BUSY), 10,
+ 1000 * USEC_PER_MSEC);
+ if (err)
+ dev_err(a5psw->dev, "MDIO command timeout\n");
+
+ return err;
+}
+
+static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+ struct a5psw *a5psw = bus->priv;
+ u32 cmd, status;
+ int ret;
+
+ if (phy_reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ cmd = A5PSW_MDIO_COMMAND_READ;
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
+
+ ret = a5psw_mdio_wait_busy(a5psw);
+ if (ret)
+ return ret;
+
+ ret = a5psw_reg_readl(a5psw, A5PSW_MDIO_DATA) & A5PSW_MDIO_DATA_MASK;
+
+ status = a5psw_reg_readl(a5psw, A5PSW_MDIO_CFG_STATUS);
+ if (status & A5PSW_MDIO_CFG_STATUS_READERR)
+ return -EIO;
+
+ return ret;
+}
+
+static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
+ u16 phy_data)
+{
+ struct a5psw *a5psw = bus->priv;
+ u32 cmd;
+
+ if (phy_reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_DATA, phy_data);
+
+ return a5psw_mdio_wait_busy(a5psw);
+}
+
+static int a5psw_mdio_config(struct a5psw *a5psw, u32 mdio_freq)
+{
+ unsigned long rate;
+ unsigned long div;
+ u32 cfgstatus;
+
+ rate = clk_get_rate(a5psw->hclk);
+ div = ((rate / mdio_freq) / 2);
+ if (div > FIELD_MAX(A5PSW_MDIO_CFG_STATUS_CLKDIV) ||
+ div < A5PSW_MDIO_CLK_DIV_MIN) {
+ dev_err(a5psw->dev, "MDIO clock div %ld out of range\n", div);
+ return -ERANGE;
+ }
+
+ cfgstatus = FIELD_PREP(A5PSW_MDIO_CFG_STATUS_CLKDIV, div);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_CFG_STATUS, cfgstatus);
+
+ return 0;
+}
+
+static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node)
+{
+ struct device *dev = a5psw->dev;
+ struct mii_bus *bus;
+ u32 mdio_freq;
+ int ret;
+
+ if (of_property_read_u32(node, "clock-frequency", &mdio_freq))
+ mdio_freq = A5PSW_MDIO_DEF_FREQ;
+
+ ret = a5psw_mdio_config(a5psw, mdio_freq);
+ if (ret)
+ return ret;
+
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "a5psw_mdio";
+ bus->read = a5psw_mdio_read;
+ bus->write = a5psw_mdio_write;
+ bus->priv = a5psw;
+ bus->parent = dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ a5psw->mii_bus = bus;
+
+ return devm_of_mdiobus_register(dev, bus, node);
+}
+
+static void a5psw_pcs_free(struct a5psw *a5psw)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5psw->pcs); i++) {
+ if (a5psw->pcs[i])
+ miic_destroy(a5psw->pcs[i]);
+ }
+}
+
+static int a5psw_pcs_get(struct a5psw *a5psw)
+{
+ struct device_node *ports, *port, *pcs_node;
+ struct phylink_pcs *pcs;
+ int ret;
+ u32 reg;
+
+ ports = of_get_child_by_name(a5psw->dev->of_node, "ethernet-ports");
+ if (!ports)
+ return -EINVAL;
+
+ for_each_available_child_of_node(ports, port) {
+ pcs_node = of_parse_phandle(port, "pcs-handle", 0);
+ if (!pcs_node)
+ continue;
+
+ if (of_property_read_u32(port, "reg", &reg)) {
+ ret = -EINVAL;
+ goto free_pcs;
+ }
+
+ if (reg >= ARRAY_SIZE(a5psw->pcs)) {
+ ret = -ENODEV;
+ goto free_pcs;
+ }
+
+ pcs = miic_create(a5psw->dev, pcs_node);
+ if (IS_ERR(pcs)) {
+ dev_err(a5psw->dev, "Failed to create PCS for port %d\n",
+ reg);
+ ret = PTR_ERR(pcs);
+ goto free_pcs;
+ }
+
+ a5psw->pcs[reg] = pcs;
+ of_node_put(pcs_node);
+ }
+ of_node_put(ports);
+
+ return 0;
+
+free_pcs:
+ of_node_put(pcs_node);
+ of_node_put(port);
+ of_node_put(ports);
+ a5psw_pcs_free(a5psw);
+
+ return ret;
+}
+
+static int a5psw_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *mdio;
+ struct dsa_switch *ds;
+ struct a5psw *a5psw;
+ int ret;
+
+ a5psw = devm_kzalloc(dev, sizeof(*a5psw), GFP_KERNEL);
+ if (!a5psw)
+ return -ENOMEM;
+
+ a5psw->dev = dev;
+ mutex_init(&a5psw->lk_lock);
+ spin_lock_init(&a5psw->reg_lock);
+ a5psw->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(a5psw->base))
+ return PTR_ERR(a5psw->base);
+
+ ret = a5psw_pcs_get(a5psw);
+ if (ret)
+ return ret;
+
+ a5psw->hclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(a5psw->hclk)) {
+ dev_err(dev, "failed get hclk clock\n");
+ ret = PTR_ERR(a5psw->hclk);
+ goto free_pcs;
+ }
+
+ a5psw->clk = devm_clk_get(dev, "clk");
+ if (IS_ERR(a5psw->clk)) {
+ dev_err(dev, "failed get clk_switch clock\n");
+ ret = PTR_ERR(a5psw->clk);
+ goto free_pcs;
+ }
+
+ ret = clk_prepare_enable(a5psw->clk);
+ if (ret)
+ goto free_pcs;
+
+ ret = clk_prepare_enable(a5psw->hclk);
+ if (ret)
+ goto clk_disable;
+
+ mdio = of_get_child_by_name(dev->of_node, "mdio");
+ if (of_device_is_available(mdio)) {
+ ret = a5psw_probe_mdio(a5psw, mdio);
+ if (ret) {
+ of_node_put(mdio);
+ dev_err(dev, "Failed to register MDIO: %d\n", ret);
+ goto hclk_disable;
+ }
+ }
+
+ of_node_put(mdio);
+
+ ds = &a5psw->ds;
+ ds->dev = dev;
+ ds->num_ports = A5PSW_PORTS_NUM;
+ ds->ops = &a5psw_switch_ops;
+ ds->priv = a5psw;
+
+ ret = dsa_register_switch(ds);
+ if (ret) {
+ dev_err(dev, "Failed to register DSA switch: %d\n", ret);
+ goto hclk_disable;
+ }
+
+ return 0;
+
+hclk_disable:
+ clk_disable_unprepare(a5psw->hclk);
+clk_disable:
+ clk_disable_unprepare(a5psw->clk);
+free_pcs:
+ a5psw_pcs_free(a5psw);
+
+ return ret;
+}
+
+static int a5psw_remove(struct platform_device *pdev)
+{
+ struct a5psw *a5psw = platform_get_drvdata(pdev);
+
+ if (!a5psw)
+ return 0;
+
+ dsa_unregister_switch(&a5psw->ds);
+ a5psw_pcs_free(a5psw);
+ clk_disable_unprepare(a5psw->hclk);
+ clk_disable_unprepare(a5psw->clk);
+
+ return 0;
+}
+
+static void a5psw_shutdown(struct platform_device *pdev)
+{
+ struct a5psw *a5psw = platform_get_drvdata(pdev);
+
+ if (!a5psw)
+ return;
+
+ dsa_switch_shutdown(&a5psw->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id a5psw_of_mtable[] = {
+ { .compatible = "renesas,rzn1-a5psw", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, a5psw_of_mtable);
+
+static struct platform_driver a5psw_driver = {
+ .driver = {
+ .name = "rzn1_a5psw",
+ .of_match_table = of_match_ptr(a5psw_of_mtable),
+ },
+ .probe = a5psw_probe,
+ .remove = a5psw_remove,
+ .shutdown = a5psw_shutdown,
+};
+module_platform_driver(a5psw_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Renesas RZ/N1 Advanced 5-port Switch driver");
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
diff --git a/drivers/net/dsa/rzn1_a5psw.h b/drivers/net/dsa/rzn1_a5psw.h
new file mode 100644
index 000000000..b869192ee
--- /dev/null
+++ b/drivers/net/dsa/rzn1_a5psw.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <net/dsa.h>
+
+#define A5PSW_REVISION 0x0
+#define A5PSW_PORT_OFFSET(port) (0x400 * (port))
+
+#define A5PSW_PORT_ENA 0x8
+#define A5PSW_PORT_ENA_TX(port) BIT(port)
+#define A5PSW_PORT_ENA_RX_SHIFT 16
+#define A5PSW_PORT_ENA_TX_RX(port) (BIT((port) + A5PSW_PORT_ENA_RX_SHIFT) | \
+ BIT(port))
+#define A5PSW_UCAST_DEF_MASK 0xC
+
+#define A5PSW_VLAN_VERIFY 0x10
+#define A5PSW_VLAN_VERI_SHIFT 0
+#define A5PSW_VLAN_DISC_SHIFT 16
+
+#define A5PSW_BCAST_DEF_MASK 0x14
+#define A5PSW_MCAST_DEF_MASK 0x18
+
+#define A5PSW_INPUT_LEARN 0x1C
+#define A5PSW_INPUT_LEARN_DIS(p) BIT((p) + 16)
+#define A5PSW_INPUT_LEARN_BLOCK(p) BIT(p)
+
+#define A5PSW_MGMT_CFG 0x20
+#define A5PSW_MGMT_CFG_ENABLE BIT(6)
+
+#define A5PSW_MODE_CFG 0x24
+#define A5PSW_MODE_STATS_RESET BIT(31)
+
+#define A5PSW_VLAN_IN_MODE 0x28
+#define A5PSW_VLAN_IN_MODE_PORT_SHIFT(port) ((port) * 2)
+#define A5PSW_VLAN_IN_MODE_PORT(port) (GENMASK(1, 0) << \
+ A5PSW_VLAN_IN_MODE_PORT_SHIFT(port))
+#define A5PSW_VLAN_IN_MODE_SINGLE_PASSTHROUGH 0x0
+#define A5PSW_VLAN_IN_MODE_SINGLE_REPLACE 0x1
+#define A5PSW_VLAN_IN_MODE_TAG_ALWAYS 0x2
+
+#define A5PSW_VLAN_OUT_MODE 0x2C
+#define A5PSW_VLAN_OUT_MODE_PORT(port) (GENMASK(1, 0) << ((port) * 2))
+#define A5PSW_VLAN_OUT_MODE_DIS 0x0
+#define A5PSW_VLAN_OUT_MODE_STRIP 0x1
+#define A5PSW_VLAN_OUT_MODE_TAG_THROUGH 0x2
+#define A5PSW_VLAN_OUT_MODE_TRANSPARENT 0x3
+
+#define A5PSW_VLAN_IN_MODE_ENA 0x30
+#define A5PSW_VLAN_TAG_ID 0x34
+
+#define A5PSW_SYSTEM_TAGINFO(port) (0x200 + A5PSW_PORT_OFFSET(port))
+
+#define A5PSW_AUTH_PORT(port) (0x240 + 4 * (port))
+#define A5PSW_AUTH_PORT_AUTHORIZED BIT(0)
+
+#define A5PSW_VLAN_RES(entry) (0x280 + 4 * (entry))
+#define A5PSW_VLAN_RES_WR_PORTMASK BIT(30)
+#define A5PSW_VLAN_RES_WR_TAGMASK BIT(29)
+#define A5PSW_VLAN_RES_RD_TAGMASK BIT(28)
+#define A5PSW_VLAN_RES_ID GENMASK(16, 5)
+#define A5PSW_VLAN_RES_PORTMASK GENMASK(4, 0)
+
+#define A5PSW_RXMATCH_CONFIG(port) (0x3e80 + 4 * (port))
+#define A5PSW_RXMATCH_CONFIG_PATTERN(p) BIT(p)
+
+#define A5PSW_PATTERN_CTRL(p) (0x3eb0 + 4 * (p))
+#define A5PSW_PATTERN_CTRL_MGMTFWD BIT(1)
+
+#define A5PSW_LK_CTRL 0x400
+#define A5PSW_LK_ADDR_CTRL_BLOCKING BIT(0)
+#define A5PSW_LK_ADDR_CTRL_LEARNING BIT(1)
+#define A5PSW_LK_ADDR_CTRL_AGEING BIT(2)
+#define A5PSW_LK_ADDR_CTRL_ALLOW_MIGR BIT(3)
+#define A5PSW_LK_ADDR_CTRL_CLEAR_TABLE BIT(6)
+
+#define A5PSW_LK_ADDR_CTRL 0x408
+#define A5PSW_LK_ADDR_CTRL_BUSY BIT(31)
+#define A5PSW_LK_ADDR_CTRL_DELETE_PORT BIT(30)
+#define A5PSW_LK_ADDR_CTRL_CLEAR BIT(29)
+#define A5PSW_LK_ADDR_CTRL_LOOKUP BIT(28)
+#define A5PSW_LK_ADDR_CTRL_WAIT BIT(27)
+#define A5PSW_LK_ADDR_CTRL_READ BIT(26)
+#define A5PSW_LK_ADDR_CTRL_WRITE BIT(25)
+#define A5PSW_LK_ADDR_CTRL_ADDRESS GENMASK(12, 0)
+
+#define A5PSW_LK_DATA_LO 0x40C
+#define A5PSW_LK_DATA_HI 0x410
+#define A5PSW_LK_DATA_HI_VALID BIT(16)
+#define A5PSW_LK_DATA_HI_PORT BIT(16)
+
+#define A5PSW_LK_LEARNCOUNT 0x418
+#define A5PSW_LK_LEARNCOUNT_COUNT GENMASK(13, 0)
+#define A5PSW_LK_LEARNCOUNT_MODE GENMASK(31, 30)
+#define A5PSW_LK_LEARNCOUNT_MODE_SET 0x0
+#define A5PSW_LK_LEARNCOUNT_MODE_INC 0x1
+#define A5PSW_LK_LEARNCOUNT_MODE_DEC 0x2
+
+#define A5PSW_MGMT_TAG_CFG 0x480
+#define A5PSW_MGMT_TAG_CFG_TAGFIELD GENMASK(31, 16)
+#define A5PSW_MGMT_TAG_CFG_ALL_FRAMES BIT(1)
+#define A5PSW_MGMT_TAG_CFG_ENABLE BIT(0)
+
+#define A5PSW_LK_AGETIME 0x41C
+#define A5PSW_LK_AGETIME_MASK GENMASK(23, 0)
+
+#define A5PSW_MDIO_CFG_STATUS 0x700
+#define A5PSW_MDIO_CFG_STATUS_CLKDIV GENMASK(15, 7)
+#define A5PSW_MDIO_CFG_STATUS_READERR BIT(1)
+#define A5PSW_MDIO_CFG_STATUS_BUSY BIT(0)
+
+#define A5PSW_MDIO_COMMAND 0x704
+/* Register is named TRAININIT in datasheet and should be set when reading */
+#define A5PSW_MDIO_COMMAND_READ BIT(15)
+#define A5PSW_MDIO_COMMAND_PHY_ADDR GENMASK(9, 5)
+#define A5PSW_MDIO_COMMAND_REG_ADDR GENMASK(4, 0)
+
+#define A5PSW_MDIO_DATA 0x708
+#define A5PSW_MDIO_DATA_MASK GENMASK(15, 0)
+
+#define A5PSW_CMD_CFG(port) (0x808 + A5PSW_PORT_OFFSET(port))
+#define A5PSW_CMD_CFG_CNTL_FRM_ENA BIT(23)
+#define A5PSW_CMD_CFG_SW_RESET BIT(13)
+#define A5PSW_CMD_CFG_TX_CRC_APPEND BIT(11)
+#define A5PSW_CMD_CFG_HD_ENA BIT(10)
+#define A5PSW_CMD_CFG_PAUSE_IGNORE BIT(8)
+#define A5PSW_CMD_CFG_CRC_FWD BIT(6)
+#define A5PSW_CMD_CFG_ETH_SPEED BIT(3)
+#define A5PSW_CMD_CFG_RX_ENA BIT(1)
+#define A5PSW_CMD_CFG_TX_ENA BIT(0)
+
+#define A5PSW_FRM_LENGTH(port) (0x814 + A5PSW_PORT_OFFSET(port))
+#define A5PSW_FRM_LENGTH_MASK GENMASK(13, 0)
+
+#define A5PSW_STATUS(port) (0x840 + A5PSW_PORT_OFFSET(port))
+
+#define A5PSW_STATS_HIWORD 0x900
+
+/* Stats */
+#define A5PSW_aFramesTransmittedOK 0x868
+#define A5PSW_aFramesReceivedOK 0x86C
+#define A5PSW_aFrameCheckSequenceErrors 0x870
+#define A5PSW_aAlignmentErrors 0x874
+#define A5PSW_aOctetsTransmittedOK 0x878
+#define A5PSW_aOctetsReceivedOK 0x87C
+#define A5PSW_aTxPAUSEMACCtrlFrames 0x880
+#define A5PSW_aRxPAUSEMACCtrlFrames 0x884
+/* If */
+#define A5PSW_ifInErrors 0x888
+#define A5PSW_ifOutErrors 0x88C
+#define A5PSW_ifInUcastPkts 0x890
+#define A5PSW_ifInMulticastPkts 0x894
+#define A5PSW_ifInBroadcastPkts 0x898
+#define A5PSW_ifOutDiscards 0x89C
+#define A5PSW_ifOutUcastPkts 0x8A0
+#define A5PSW_ifOutMulticastPkts 0x8A4
+#define A5PSW_ifOutBroadcastPkts 0x8A8
+/* Ether */
+#define A5PSW_etherStatsDropEvents 0x8AC
+#define A5PSW_etherStatsOctets 0x8B0
+#define A5PSW_etherStatsPkts 0x8B4
+#define A5PSW_etherStatsUndersizePkts 0x8B8
+#define A5PSW_etherStatsOversizePkts 0x8BC
+#define A5PSW_etherStatsPkts64Octets 0x8C0
+#define A5PSW_etherStatsPkts65to127Octets 0x8C4
+#define A5PSW_etherStatsPkts128to255Octets 0x8C8
+#define A5PSW_etherStatsPkts256to511Octets 0x8CC
+#define A5PSW_etherStatsPkts512to1023Octets 0x8D0
+#define A5PSW_etherStatsPkts1024to1518Octets 0x8D4
+#define A5PSW_etherStatsPkts1519toXOctets 0x8D8
+#define A5PSW_etherStatsJabbers 0x8DC
+#define A5PSW_etherStatsFragments 0x8E0
+
+#define A5PSW_VLANReceived 0x8E8
+#define A5PSW_VLANTransmitted 0x8EC
+
+#define A5PSW_aDeferred 0x910
+#define A5PSW_aMultipleCollisions 0x914
+#define A5PSW_aSingleCollisions 0x918
+#define A5PSW_aLateCollisions 0x91C
+#define A5PSW_aExcessiveCollisions 0x920
+#define A5PSW_aCarrierSenseErrors 0x924
+
+#define A5PSW_VLAN_TAG(prio, id) (((prio) << 12) | (id))
+#define A5PSW_PORTS_NUM 5
+#define A5PSW_CPU_PORT (A5PSW_PORTS_NUM - 1)
+#define A5PSW_MDIO_DEF_FREQ 2500000
+#define A5PSW_MDIO_TIMEOUT 100
+#define A5PSW_JUMBO_LEN (10 * SZ_1K)
+#define A5PSW_MDIO_CLK_DIV_MIN 5
+#define A5PSW_TAG_LEN 8
+#define A5PSW_VLAN_COUNT 32
+
+/* Ensure enough space for 2 VLAN tags */
+#define A5PSW_EXTRA_MTU_LEN (A5PSW_TAG_LEN + 8)
+#define A5PSW_MAX_MTU (A5PSW_JUMBO_LEN - A5PSW_EXTRA_MTU_LEN)
+
+#define A5PSW_PATTERN_MGMTFWD 0
+
+#define A5PSW_LK_BUSY_USEC_POLL 10
+#define A5PSW_CTRL_TIMEOUT 1000
+#define A5PSW_TABLE_ENTRIES 8192
+
+struct fdb_entry {
+ u8 mac[ETH_ALEN];
+ u16 valid:1;
+ u16 is_static:1;
+ u16 prio:3;
+ u16 port_mask:5;
+ u16 reserved:6;
+} __packed;
+
+union lk_data {
+ struct {
+ u32 lo;
+ u32 hi;
+ };
+ struct fdb_entry entry;
+};
+
+/**
+ * struct a5psw - switch struct
+ * @base: Base address of the switch
+ * @hclk: hclk_switch clock
+ * @clk: clk_switch clock
+ * @dev: Device associated to the switch
+ * @mii_bus: MDIO bus struct
+ * @mdio_freq: MDIO bus frequency requested
+ * @pcs: Array of PCS connected to the switch ports (not for the CPU)
+ * @ds: DSA switch struct
+ * @stats_lock: lock to access statistics (shared HI counter)
+ * @lk_lock: Lock for the lookup table
+ * @reg_lock: Lock for register read-modify-write operation
+ * @bridged_ports: Mask of ports that are bridged and should be flooded
+ * @br_dev: Bridge net device
+ */
+struct a5psw {
+ void __iomem *base;
+ struct clk *hclk;
+ struct clk *clk;
+ struct device *dev;
+ struct mii_bus *mii_bus;
+ struct phylink_pcs *pcs[A5PSW_PORTS_NUM - 1];
+ struct dsa_switch ds;
+ struct mutex lk_lock;
+ spinlock_t reg_lock;
+ u32 bridged_ports;
+ struct net_device *br_dev;
+};
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
new file mode 100644
index 000000000..1291bba3f
--- /dev/null
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_SJA1105
+tristate "NXP SJA1105 Ethernet switch family support"
+ depends on NET_DSA && SPI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select NET_DSA_TAG_SJA1105
+ select PCS_XPCS
+ select PACKING
+ select CRC32
+ help
+ This is the driver for the NXP SJA1105 (5-port) and SJA1110 (10-port)
+ automotive Ethernet switch family. These are managed over an SPI
+ interface. Probing is handled based on OF bindings and so is the
+ linkage to PHYLINK. The driver supports the following revisions:
+ - SJA1105E (Gen. 1, No TT-Ethernet)
+ - SJA1105T (Gen. 1, TT-Ethernet)
+ - SJA1105P (Gen. 2, No SGMII, No TT-Ethernet)
+ - SJA1105Q (Gen. 2, No SGMII, TT-Ethernet)
+ - SJA1105R (Gen. 2, SGMII, No TT-Ethernet)
+ - SJA1105S (Gen. 2, SGMII, TT-Ethernet)
+ - SJA1110A (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 10 ports)
+ - SJA1110B (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 9 ports)
+ - SJA1110C (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 7 ports)
+ - SJA1110D (Gen. 3, SGMII, TT-Ethernet, no 100base-TX PHY, 7 ports)
+
+config NET_DSA_SJA1105_PTP
+ bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch"
+ depends on NET_DSA_SJA1105
+ depends on PTP_1588_CLOCK
+ help
+ This enables support for timestamping and PTP clock manipulations in
+ the SJA1105 DSA driver.
+
+config NET_DSA_SJA1105_TAS
+ bool "Support for the Time-Aware Scheduler on NXP SJA1105"
+ depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO
+ depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m
+ depends on NET_DSA_SJA1105_PTP
+ help
+ This enables support for the TTEthernet-based egress scheduling
+ engine in the SJA1105 DSA driver, which is controlled using a
+ hardware offload of the tc-tqprio qdisc.
+
+config NET_DSA_SJA1105_VL
+ bool "Support for Virtual Links on NXP SJA1105"
+ depends on NET_DSA_SJA1105_TAS
+ help
+ This enables support for flow classification using capable devices
+ (SJA1105T, SJA1105Q, SJA1105S). The following actions are supported:
+ - redirect, trap, drop
+ - time-based ingress policing, via the tc-gate action
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
new file mode 100644
index 000000000..40d69e6c0
--- /dev/null
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_SJA1105) += sja1105.o
+
+sja1105-objs := \
+ sja1105_spi.o \
+ sja1105_main.o \
+ sja1105_mdio.o \
+ sja1105_flower.o \
+ sja1105_ethtool.o \
+ sja1105_devlink.o \
+ sja1105_clocking.o \
+ sja1105_static_config.o \
+ sja1105_dynamic_config.o \
+
+ifdef CONFIG_NET_DSA_SJA1105_PTP
+sja1105-objs += sja1105_ptp.o
+endif
+
+ifdef CONFIG_NET_DSA_SJA1105_TAS
+sja1105-objs += sja1105_tas.o
+endif
+
+ifdef CONFIG_NET_DSA_SJA1105_VL
+sja1105-objs += sja1105_vl.o
+endif
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
new file mode 100644
index 000000000..06e0ebfe6
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_H
+#define _SJA1105_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <linux/dsa/sja1105.h>
+#include <linux/dsa/8021q.h>
+#include <net/dsa.h>
+#include <linux/mutex.h>
+#include "sja1105_static_config.h"
+
+#define SJA1105ET_FDB_BIN_SIZE 4
+/* The hardware value is in multiples of 10 ms.
+ * The passed parameter is in multiples of 1 ms.
+ */
+#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
+#define SJA1105_NUM_L2_POLICERS SJA1110_MAX_L2_POLICING_COUNT
+
+/* Calculated assuming 1Gbps, where the clock has 125 MHz (8 ns period)
+ * To avoid floating point operations, we'll multiply the degrees by 10
+ * to get a "phase" and get 1 decimal point precision.
+ */
+#define SJA1105_RGMII_DELAY_PS_TO_PHASE(ps) \
+ (((ps) * 360) / 800)
+#define SJA1105_RGMII_DELAY_PHASE_TO_PS(phase) \
+ ((800 * (phase)) / 360)
+#define SJA1105_RGMII_DELAY_PHASE_TO_HW(phase) \
+ (((phase) - 738) / 9)
+#define SJA1105_RGMII_DELAY_PS_TO_HW(ps) \
+ SJA1105_RGMII_DELAY_PHASE_TO_HW(SJA1105_RGMII_DELAY_PS_TO_PHASE(ps))
+
+/* Valid range in degrees is a value between 73.8 and 101.7
+ * in 0.9 degree increments
+ */
+#define SJA1105_RGMII_DELAY_MIN_PS \
+ SJA1105_RGMII_DELAY_PHASE_TO_PS(738)
+#define SJA1105_RGMII_DELAY_MAX_PS \
+ SJA1105_RGMII_DELAY_PHASE_TO_PS(1017)
+
+typedef enum {
+ SPI_READ = 0,
+ SPI_WRITE = 1,
+} sja1105_spi_rw_mode_t;
+
+#include "sja1105_tas.h"
+#include "sja1105_ptp.h"
+
+enum sja1105_stats_area {
+ MAC,
+ HL1,
+ HL2,
+ ETHER,
+ __MAX_SJA1105_STATS_AREA,
+};
+
+/* Keeps the different addresses between E/T and P/Q/R/S */
+struct sja1105_regs {
+ u64 device_id;
+ u64 prod_id;
+ u64 status;
+ u64 port_control;
+ u64 rgu;
+ u64 vl_status;
+ u64 config;
+ u64 rmii_pll1;
+ u64 ptppinst;
+ u64 ptppindur;
+ u64 ptp_control;
+ u64 ptpclkval;
+ u64 ptpclkrate;
+ u64 ptpclkcorp;
+ u64 ptpsyncts;
+ u64 ptpschtm;
+ u64 ptpegr_ts[SJA1105_MAX_NUM_PORTS];
+ u64 pad_mii_tx[SJA1105_MAX_NUM_PORTS];
+ u64 pad_mii_rx[SJA1105_MAX_NUM_PORTS];
+ u64 pad_mii_id[SJA1105_MAX_NUM_PORTS];
+ u64 cgu_idiv[SJA1105_MAX_NUM_PORTS];
+ u64 mii_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 mii_rx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 mii_ext_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 mii_ext_rx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 rgmii_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 rmii_ref_clk[SJA1105_MAX_NUM_PORTS];
+ u64 rmii_ext_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 stats[__MAX_SJA1105_STATS_AREA][SJA1105_MAX_NUM_PORTS];
+ u64 mdio_100base_tx;
+ u64 mdio_100base_t1;
+ u64 pcs_base[SJA1105_MAX_NUM_PORTS];
+};
+
+struct sja1105_mdio_private {
+ struct sja1105_private *priv;
+};
+
+enum {
+ SJA1105_SPEED_AUTO,
+ SJA1105_SPEED_10MBPS,
+ SJA1105_SPEED_100MBPS,
+ SJA1105_SPEED_1000MBPS,
+ SJA1105_SPEED_2500MBPS,
+ SJA1105_SPEED_MAX,
+};
+
+enum sja1105_internal_phy_t {
+ SJA1105_NO_PHY = 0,
+ SJA1105_PHY_BASE_TX,
+ SJA1105_PHY_BASE_T1,
+};
+
+struct sja1105_info {
+ u64 device_id;
+ /* Needed for distinction between P and R, and between Q and S
+ * (since the parts with/without SGMII share the same
+ * switch core and device_id)
+ */
+ u64 part_no;
+ /* E/T and P/Q/R/S have partial timestamps of different sizes.
+ * They must be reconstructed on both families anyway to get the full
+ * 64-bit values back.
+ */
+ int ptp_ts_bits;
+ /* Also SPI commands are of different sizes to retrieve
+ * the egress timestamps.
+ */
+ int ptpegr_ts_bytes;
+ int num_cbs_shapers;
+ int max_frame_mem;
+ int num_ports;
+ bool multiple_cascade_ports;
+ /* Every {port, TXQ} has its own CBS shaper */
+ bool fixed_cbs_mapping;
+ enum dsa_tag_protocol tag_proto;
+ const struct sja1105_dynamic_table_ops *dyn_ops;
+ const struct sja1105_table_ops *static_ops;
+ const struct sja1105_regs *regs;
+ bool can_limit_mcast_flood;
+ int (*reset_cmd)(struct dsa_switch *ds);
+ int (*setup_rgmii_delay)(const void *ctx, int port);
+ /* Prototypes from include/net/dsa.h */
+ int (*fdb_add_cmd)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ int (*fdb_del_cmd)(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+ void (*ptp_cmd_packing)(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
+ bool (*rxtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
+ void (*txtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
+ int (*clocking_setup)(struct sja1105_private *priv);
+ int (*pcs_mdio_read)(struct mii_bus *bus, int phy, int reg);
+ int (*pcs_mdio_write)(struct mii_bus *bus, int phy, int reg, u16 val);
+ int (*disable_microcontroller)(struct sja1105_private *priv);
+ const char *name;
+ bool supports_mii[SJA1105_MAX_NUM_PORTS];
+ bool supports_rmii[SJA1105_MAX_NUM_PORTS];
+ bool supports_rgmii[SJA1105_MAX_NUM_PORTS];
+ bool supports_sgmii[SJA1105_MAX_NUM_PORTS];
+ bool supports_2500basex[SJA1105_MAX_NUM_PORTS];
+ enum sja1105_internal_phy_t internal_phy[SJA1105_MAX_NUM_PORTS];
+ const u64 port_speed[SJA1105_SPEED_MAX];
+};
+
+enum sja1105_key_type {
+ SJA1105_KEY_BCAST,
+ SJA1105_KEY_TC,
+ SJA1105_KEY_VLAN_UNAWARE_VL,
+ SJA1105_KEY_VLAN_AWARE_VL,
+};
+
+struct sja1105_key {
+ enum sja1105_key_type type;
+
+ union {
+ /* SJA1105_KEY_TC */
+ struct {
+ int pcp;
+ } tc;
+
+ /* SJA1105_KEY_VLAN_UNAWARE_VL */
+ /* SJA1105_KEY_VLAN_AWARE_VL */
+ struct {
+ u64 dmac;
+ u16 vid;
+ u16 pcp;
+ } vl;
+ };
+};
+
+enum sja1105_rule_type {
+ SJA1105_RULE_BCAST_POLICER,
+ SJA1105_RULE_TC_POLICER,
+ SJA1105_RULE_VL,
+};
+
+enum sja1105_vl_type {
+ SJA1105_VL_NONCRITICAL,
+ SJA1105_VL_RATE_CONSTRAINED,
+ SJA1105_VL_TIME_TRIGGERED,
+};
+
+struct sja1105_rule {
+ struct list_head list;
+ unsigned long cookie;
+ unsigned long port_mask;
+ struct sja1105_key key;
+ enum sja1105_rule_type type;
+
+ /* Action */
+ union {
+ /* SJA1105_RULE_BCAST_POLICER */
+ struct {
+ int sharindx;
+ } bcast_pol;
+
+ /* SJA1105_RULE_TC_POLICER */
+ struct {
+ int sharindx;
+ } tc_pol;
+
+ /* SJA1105_RULE_VL */
+ struct {
+ enum sja1105_vl_type type;
+ unsigned long destports;
+ int sharindx;
+ int maxlen;
+ int ipv;
+ u64 base_time;
+ u64 cycle_time;
+ int num_entries;
+ struct action_gate_entry *entries;
+ struct flow_stats stats;
+ } vl;
+ };
+};
+
+struct sja1105_flow_block {
+ struct list_head rules;
+ bool l2_policer_used[SJA1105_NUM_L2_POLICERS];
+ int num_virtual_links;
+};
+
+struct sja1105_private {
+ struct sja1105_static_config static_config;
+ int rgmii_rx_delay_ps[SJA1105_MAX_NUM_PORTS];
+ int rgmii_tx_delay_ps[SJA1105_MAX_NUM_PORTS];
+ phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];
+ bool fixed_link[SJA1105_MAX_NUM_PORTS];
+ unsigned long ucast_egress_floods;
+ unsigned long bcast_egress_floods;
+ unsigned long hwts_tx_en;
+ unsigned long hwts_rx_en;
+ const struct sja1105_info *info;
+ size_t max_xfer_len;
+ struct spi_device *spidev;
+ struct dsa_switch *ds;
+ u16 bridge_pvid[SJA1105_MAX_NUM_PORTS];
+ u16 tag_8021q_pvid[SJA1105_MAX_NUM_PORTS];
+ struct sja1105_flow_block flow_block;
+ /* Serializes transmission of management frames so that
+ * the switch doesn't confuse them with one another.
+ */
+ struct mutex mgmt_lock;
+ /* Serializes accesses to the FDB */
+ struct mutex fdb_lock;
+ /* PTP two-step TX timestamp ID, and its serialization lock */
+ spinlock_t ts_id_lock;
+ u8 ts_id;
+ /* Serializes access to the dynamic config interface */
+ struct mutex dynamic_config_lock;
+ struct devlink_region **regions;
+ struct sja1105_cbs_entry *cbs;
+ struct mii_bus *mdio_base_t1;
+ struct mii_bus *mdio_base_tx;
+ struct mii_bus *mdio_pcs;
+ struct dw_xpcs *xpcs[SJA1105_MAX_NUM_PORTS];
+ struct sja1105_ptp_data ptp_data;
+ struct sja1105_tas_data tas_data;
+};
+
+#include "sja1105_dynamic_config.h"
+
+struct sja1105_spi_message {
+ u64 access;
+ u64 read_count;
+ u64 address;
+};
+
+/* From sja1105_main.c */
+enum sja1105_reset_reason {
+ SJA1105_VLAN_FILTERING = 0,
+ SJA1105_AGEING_TIME,
+ SJA1105_SCHEDULING,
+ SJA1105_BEST_EFFORT_POLICING,
+ SJA1105_VIRTUAL_LINKS,
+};
+
+int sja1105_static_config_reload(struct sja1105_private *priv,
+ enum sja1105_reset_reason reason);
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ struct netlink_ext_ack *extack);
+void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
+
+/* From sja1105_mdio.c */
+int sja1105_mdiobus_register(struct dsa_switch *ds);
+void sja1105_mdiobus_unregister(struct dsa_switch *ds);
+int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
+int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
+int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
+int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
+
+/* From sja1105_devlink.c */
+int sja1105_devlink_setup(struct dsa_switch *ds);
+void sja1105_devlink_teardown(struct dsa_switch *ds);
+int sja1105_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+
+/* From sja1105_spi.c */
+int sja1105_xfer_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u8 *buf, size_t len);
+int sja1105_xfer_u32(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
+ struct ptp_system_timestamp *ptp_sts);
+int sja1105_xfer_u64(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
+ struct ptp_system_timestamp *ptp_sts);
+int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
+ void *config_buf, int buf_len);
+int sja1105_static_config_upload(struct sja1105_private *priv);
+int sja1105_inhibit_tx(const struct sja1105_private *priv,
+ unsigned long port_bitmap, bool tx_inhibited);
+
+extern const struct sja1105_info sja1105e_info;
+extern const struct sja1105_info sja1105t_info;
+extern const struct sja1105_info sja1105p_info;
+extern const struct sja1105_info sja1105q_info;
+extern const struct sja1105_info sja1105r_info;
+extern const struct sja1105_info sja1105s_info;
+extern const struct sja1105_info sja1110a_info;
+extern const struct sja1105_info sja1110b_info;
+extern const struct sja1105_info sja1110c_info;
+extern const struct sja1105_info sja1110d_info;
+
+/* From sja1105_clocking.c */
+
+typedef enum {
+ XMII_MAC = 0,
+ XMII_PHY = 1,
+} sja1105_mii_role_t;
+
+typedef enum {
+ XMII_MODE_MII = 0,
+ XMII_MODE_RMII = 1,
+ XMII_MODE_RGMII = 2,
+ XMII_MODE_SGMII = 3,
+} sja1105_phy_interface_t;
+
+int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port);
+int sja1110_setup_rgmii_delay(const void *ctx, int port);
+int sja1105_clocking_setup_port(struct sja1105_private *priv, int port);
+int sja1105_clocking_setup(struct sja1105_private *priv);
+int sja1110_disable_microcontroller(struct sja1105_private *priv);
+
+/* From sja1105_ethtool.c */
+void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data);
+void sja1105_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data);
+int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset);
+
+/* From sja1105_dynamic_config.c */
+int sja1105_dynamic_config_read(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry);
+int sja1105_dynamic_config_write(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry, bool keep);
+
+enum sja1105_iotag {
+ SJA1105_C_TAG = 0, /* Inner VLAN header */
+ SJA1105_S_TAG = 1, /* Outer VLAN header */
+};
+
+enum sja1110_vlan_type {
+ SJA1110_VLAN_INVALID = 0,
+ SJA1110_VLAN_C_TAG = 1, /* Single inner VLAN tag */
+ SJA1110_VLAN_S_TAG = 2, /* Single outer VLAN tag */
+ SJA1110_VLAN_D_TAG = 3, /* Double tagged, use outer tag for lookup */
+};
+
+enum sja1110_shaper_type {
+ SJA1110_LEAKY_BUCKET_SHAPER = 0,
+ SJA1110_CBS_SHAPER = 1,
+};
+
+u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid);
+int sja1105et_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105et_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid);
+
+/* From sja1105_flower.c */
+int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+void sja1105_flower_setup(struct dsa_switch *ds);
+void sja1105_flower_teardown(struct dsa_switch *ds);
+struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
+ unsigned long cookie);
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
new file mode 100644
index 000000000..e3699f76f
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -0,0 +1,855 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright 2016-2018 NXP
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/packing.h>
+#include "sja1105.h"
+
+#define SJA1105_SIZE_CGU_CMD 4
+#define SJA1110_BASE_MCSS_CLK SJA1110_CGU_ADDR(0x70)
+#define SJA1110_BASE_TIMER_CLK SJA1110_CGU_ADDR(0x74)
+
+/* Common structure for CFG_PAD_MIIx_RX and CFG_PAD_MIIx_TX */
+struct sja1105_cfg_pad_mii {
+ u64 d32_os;
+ u64 d32_ih;
+ u64 d32_ipud;
+ u64 d10_ih;
+ u64 d10_os;
+ u64 d10_ipud;
+ u64 ctrl_os;
+ u64 ctrl_ih;
+ u64 ctrl_ipud;
+ u64 clk_os;
+ u64 clk_ih;
+ u64 clk_ipud;
+};
+
+struct sja1105_cfg_pad_mii_id {
+ u64 rxc_stable_ovr;
+ u64 rxc_delay;
+ u64 rxc_bypass;
+ u64 rxc_pd;
+ u64 txc_stable_ovr;
+ u64 txc_delay;
+ u64 txc_bypass;
+ u64 txc_pd;
+};
+
+/* UM10944 Table 82.
+ * IDIV_0_C to IDIV_4_C control registers
+ * (addr. 10000Bh to 10000Fh)
+ */
+struct sja1105_cgu_idiv {
+ u64 clksrc;
+ u64 autoblock;
+ u64 idiv;
+ u64 pd;
+};
+
+/* PLL_1_C control register
+ *
+ * SJA1105 E/T: UM10944 Table 81 (address 10000Ah)
+ * SJA1105 P/Q/R/S: UM11040 Table 116 (address 10000Ah)
+ */
+struct sja1105_cgu_pll_ctrl {
+ u64 pllclksrc;
+ u64 msel;
+ u64 autoblock;
+ u64 psel;
+ u64 direct;
+ u64 fbsel;
+ u64 bypass;
+ u64 pd;
+};
+
+struct sja1110_cgu_outclk {
+ u64 clksrc;
+ u64 autoblock;
+ u64 pd;
+};
+
+enum {
+ CLKSRC_MII0_TX_CLK = 0x00,
+ CLKSRC_MII0_RX_CLK = 0x01,
+ CLKSRC_MII1_TX_CLK = 0x02,
+ CLKSRC_MII1_RX_CLK = 0x03,
+ CLKSRC_MII2_TX_CLK = 0x04,
+ CLKSRC_MII2_RX_CLK = 0x05,
+ CLKSRC_MII3_TX_CLK = 0x06,
+ CLKSRC_MII3_RX_CLK = 0x07,
+ CLKSRC_MII4_TX_CLK = 0x08,
+ CLKSRC_MII4_RX_CLK = 0x09,
+ CLKSRC_PLL0 = 0x0B,
+ CLKSRC_PLL1 = 0x0E,
+ CLKSRC_IDIV0 = 0x11,
+ CLKSRC_IDIV1 = 0x12,
+ CLKSRC_IDIV2 = 0x13,
+ CLKSRC_IDIV3 = 0x14,
+ CLKSRC_IDIV4 = 0x15,
+};
+
+/* UM10944 Table 83.
+ * MIIx clock control registers 1 to 30
+ * (addresses 100013h to 100035h)
+ */
+struct sja1105_cgu_mii_ctrl {
+ u64 clksrc;
+ u64 autoblock;
+ u64 pd;
+};
+
+static void sja1105_cgu_idiv_packing(void *buf, struct sja1105_cgu_idiv *idiv,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &idiv->clksrc, 28, 24, size, op);
+ sja1105_packing(buf, &idiv->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &idiv->idiv, 5, 2, size, op);
+ sja1105_packing(buf, &idiv->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
+ bool enabled, int factor)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = priv->ds->dev;
+ struct sja1105_cgu_idiv idiv;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ if (regs->cgu_idiv[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ if (enabled && factor != 1 && factor != 10) {
+ dev_err(dev, "idiv factor must be 1 or 10\n");
+ return -ERANGE;
+ }
+
+ /* Payload for packed_buf */
+ idiv.clksrc = 0x0A; /* 25MHz */
+ idiv.autoblock = 1; /* Block clk automatically */
+ idiv.idiv = factor - 1; /* Divide by 1 or 10 */
+ idiv.pd = enabled ? 0 : 1; /* Power down? */
+ sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->cgu_idiv[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static void
+sja1105_cgu_mii_control_packing(void *buf, struct sja1105_cgu_mii_ctrl *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->clksrc, 28, 24, size, op);
+ sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
+ int port, sja1105_mii_role_t role)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_tx_clk;
+ const int mac_clk_sources[] = {
+ CLKSRC_MII0_TX_CLK,
+ CLKSRC_MII1_TX_CLK,
+ CLKSRC_MII2_TX_CLK,
+ CLKSRC_MII3_TX_CLK,
+ CLKSRC_MII4_TX_CLK,
+ };
+ const int phy_clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int clksrc;
+
+ if (regs->mii_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ if (role == XMII_MAC)
+ clksrc = mac_clk_sources[port];
+ else
+ clksrc = phy_clk_sources[port];
+
+ /* Payload for packed_buf */
+ mii_tx_clk.clksrc = clksrc;
+ mii_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_rx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_MII0_RX_CLK,
+ CLKSRC_MII1_RX_CLK,
+ CLKSRC_MII2_RX_CLK,
+ CLKSRC_MII3_RX_CLK,
+ CLKSRC_MII4_RX_CLK,
+ };
+
+ if (regs->mii_rx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload for packed_buf */
+ mii_rx_clk.clksrc = clk_sources[port];
+ mii_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_rx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_ext_tx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+
+ if (regs->mii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload for packed_buf */
+ mii_ext_tx_clk.clksrc = clk_sources[port];
+ mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_ext_rx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+
+ if (regs->mii_ext_rx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload for packed_buf */
+ mii_ext_rx_clk.clksrc = clk_sources[port];
+ mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
+{
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ dev_dbg(dev, "Configuring MII-%s clocking\n",
+ (role == XMII_MAC) ? "MAC" : "PHY");
+ /* If role is MAC, disable IDIV
+ * If role is PHY, enable IDIV and configure for 1/1 divider
+ */
+ rc = sja1105_cgu_idiv_config(priv, port, (role == XMII_PHY), 1);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of MII_TX_CLK_n
+ * * If role is MAC, select TX_CLK_n
+ * * If role is PHY, select IDIV_n
+ */
+ rc = sja1105_cgu_mii_tx_clk_config(priv, port, role);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of MII_RX_CLK_n
+ * Select RX_CLK_n
+ */
+ rc = sja1105_cgu_mii_rx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+
+ if (role == XMII_PHY) {
+ /* Per MII spec, the PHY (which is us) drives the TX_CLK pin */
+
+ /* Configure CLKSRC of EXT_TX_CLK_n
+ * Select IDIV_n
+ */
+ rc = sja1105_cgu_mii_ext_tx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of EXT_RX_CLK_n
+ * Select IDIV_n
+ */
+ rc = sja1105_cgu_mii_ext_rx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+static void
+sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->pllclksrc, 28, 24, size, op);
+ sja1105_packing(buf, &cmd->msel, 23, 16, size, op);
+ sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &cmd->psel, 9, 8, size, op);
+ sja1105_packing(buf, &cmd->direct, 7, 7, size, op);
+ sja1105_packing(buf, &cmd->fbsel, 6, 6, size, op);
+ sja1105_packing(buf, &cmd->bypass, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
+ int port, u64 speed)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl txc;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int clksrc;
+
+ if (regs->rgmii_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
+ clksrc = CLKSRC_PLL0;
+ } else {
+ int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
+ CLKSRC_IDIV3, CLKSRC_IDIV4};
+ clksrc = clk_sources[port];
+ }
+
+ /* RGMII: 125MHz for 1000, 25MHz for 100, 2.5MHz for 10 */
+ txc.clksrc = clksrc;
+ /* Autoblock clk while changing clksrc */
+ txc.autoblock = 1;
+ /* Power Down off => enabled */
+ txc.pd = 0;
+ sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgmii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+/* AGU */
+static void
+sja1105_cfg_pad_mii_packing(void *buf, struct sja1105_cfg_pad_mii *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->d32_os, 28, 27, size, op);
+ sja1105_packing(buf, &cmd->d32_ih, 26, 26, size, op);
+ sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op);
+ sja1105_packing(buf, &cmd->d10_os, 20, 19, size, op);
+ sja1105_packing(buf, &cmd->d10_ih, 18, 18, size, op);
+ sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op);
+ sja1105_packing(buf, &cmd->ctrl_os, 12, 11, size, op);
+ sja1105_packing(buf, &cmd->ctrl_ih, 10, 10, size, op);
+ sja1105_packing(buf, &cmd->ctrl_ipud, 9, 8, size, op);
+ sja1105_packing(buf, &cmd->clk_os, 4, 3, size, op);
+ sja1105_packing(buf, &cmd->clk_ih, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->clk_ipud, 1, 0, size, op);
+}
+
+static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii pad_mii_tx = {0};
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ if (regs->pad_mii_tx[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload */
+ pad_mii_tx.d32_os = 3; /* TXD[3:2] output stage: */
+ /* high noise/high speed */
+ pad_mii_tx.d10_os = 3; /* TXD[1:0] output stage: */
+ /* high noise/high speed */
+ pad_mii_tx.d32_ipud = 2; /* TXD[3:2] input stage: */
+ /* plain input (default) */
+ pad_mii_tx.d10_ipud = 2; /* TXD[1:0] input stage: */
+ /* plain input (default) */
+ pad_mii_tx.ctrl_os = 3; /* TX_CTL / TX_ER output stage */
+ pad_mii_tx.ctrl_ipud = 2; /* TX_CTL / TX_ER input stage (default) */
+ pad_mii_tx.clk_os = 3; /* TX_CLK output stage */
+ pad_mii_tx.clk_ih = 0; /* TX_CLK input hysteresis (default) */
+ pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */
+ sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_tx, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_cfg_pad_rx_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii pad_mii_rx = {0};
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ if (regs->pad_mii_rx[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload */
+ pad_mii_rx.d32_ih = 0; /* RXD[3:2] input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.d32_ipud = 2; /* RXD[3:2] input weak pull-up/down */
+ /* plain input (default) */
+ pad_mii_rx.d10_ih = 0; /* RXD[1:0] input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.d10_ipud = 2; /* RXD[1:0] input weak pull-up/down */
+ /* plain input (default) */
+ pad_mii_rx.ctrl_ih = 0; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
+ /* input stage hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.ctrl_ipud = 3; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
+ /* input stage weak pull-up/down: */
+ /* pull-down */
+ pad_mii_rx.clk_os = 2; /* RX_CLK/RXC output stage: */
+ /* medium noise/fast speed (default) */
+ pad_mii_rx.clk_ih = 0; /* RX_CLK/RXC input hysteresis: */
+ /* non-Schmitt (default) */
+ pad_mii_rx.clk_ipud = 2; /* RX_CLK/RXC input pull-up/down: */
+ /* plain input (default) */
+ sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_rx, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_rx[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static void
+sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_CGU_CMD;
+
+ sja1105_packing(buf, &cmd->rxc_stable_ovr, 15, 15, size, op);
+ sja1105_packing(buf, &cmd->rxc_delay, 14, 10, size, op);
+ sja1105_packing(buf, &cmd->rxc_bypass, 9, 9, size, op);
+ sja1105_packing(buf, &cmd->rxc_pd, 8, 8, size, op);
+ sja1105_packing(buf, &cmd->txc_stable_ovr, 7, 7, size, op);
+ sja1105_packing(buf, &cmd->txc_delay, 6, 2, size, op);
+ sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
+}
+
+static void
+sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_CGU_CMD;
+ u64 range = 4;
+
+ /* Fields RXC_RANGE and TXC_RANGE select the input frequency range:
+ * 0 = 2.5MHz
+ * 1 = 25MHz
+ * 2 = 50MHz
+ * 3 = 125MHz
+ * 4 = Automatically determined by port speed.
+ * There's no point in defining a structure different than the one for
+ * SJA1105, so just hardcode the frequency range to automatic, just as
+ * before.
+ */
+ sja1105_packing(buf, &cmd->rxc_stable_ovr, 26, 26, size, op);
+ sja1105_packing(buf, &cmd->rxc_delay, 25, 21, size, op);
+ sja1105_packing(buf, &range, 20, 18, size, op);
+ sja1105_packing(buf, &cmd->rxc_bypass, 17, 17, size, op);
+ sja1105_packing(buf, &cmd->rxc_pd, 16, 16, size, op);
+ sja1105_packing(buf, &cmd->txc_stable_ovr, 10, 10, size, op);
+ sja1105_packing(buf, &cmd->txc_delay, 9, 5, size, op);
+ sja1105_packing(buf, &range, 4, 2, size, op);
+ sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
+}
+
+/* The RGMII delay setup procedure is 2-step and gets called upon each
+ * .phylink_mac_config. Both are strategic.
+ * The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
+ * with recovering from a frequency change of the link partner's RGMII clock.
+ * The easiest way to recover from this is to temporarily power down the TDL,
+ * as it will re-lock at the new frequency afterwards.
+ */
+int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
+{
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ int rx_delay = priv->rgmii_rx_delay_ps[port];
+ int tx_delay = priv->rgmii_tx_delay_ps[port];
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int rc;
+
+ if (rx_delay)
+ pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
+ if (tx_delay)
+ pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
+
+ /* Stage 1: Turn the RGMII delay lines off. */
+ pad_mii_id.rxc_bypass = 1;
+ pad_mii_id.rxc_pd = 1;
+ pad_mii_id.txc_bypass = 1;
+ pad_mii_id.txc_pd = 1;
+ sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
+
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+ if (rc < 0)
+ return rc;
+
+ /* Stage 2: Turn the RGMII delay lines on. */
+ if (rx_delay) {
+ pad_mii_id.rxc_bypass = 0;
+ pad_mii_id.rxc_pd = 0;
+ }
+ if (tx_delay) {
+ pad_mii_id.txc_bypass = 0;
+ pad_mii_id.txc_pd = 0;
+ }
+ sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+int sja1110_setup_rgmii_delay(const void *ctx, int port)
+{
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ int rx_delay = priv->rgmii_rx_delay_ps[port];
+ int tx_delay = priv->rgmii_tx_delay_ps[port];
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ pad_mii_id.rxc_pd = 1;
+ pad_mii_id.txc_pd = 1;
+
+ if (rx_delay) {
+ pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
+ /* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
+ pad_mii_id.rxc_bypass = 1;
+ pad_mii_id.rxc_pd = 0;
+ }
+
+ if (tx_delay) {
+ pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
+ pad_mii_id.txc_bypass = 1;
+ pad_mii_id.txc_pd = 0;
+ }
+
+ sja1110_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
+{
+ struct device *dev = priv->ds->dev;
+ struct sja1105_mac_config_entry *mac;
+ u64 speed;
+ int rc;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+ speed = mac[port].speed;
+
+ dev_dbg(dev, "Configuring port %d RGMII at speed %lldMbps\n",
+ port, speed);
+
+ if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
+ /* 1000Mbps, IDIV disabled (125 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, false, 1);
+ } else if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) {
+ /* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, true, 1);
+ } else if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) {
+ /* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, true, 10);
+ } else if (speed == priv->info->port_speed[SJA1105_SPEED_AUTO]) {
+ /* Skip CGU configuration if there is no speed available
+ * (e.g. link is not established yet)
+ */
+ dev_dbg(dev, "Speed not available, skipping CGU config\n");
+ return 0;
+ } else {
+ rc = -EINVAL;
+ }
+
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure idiv\n");
+ return rc;
+ }
+ rc = sja1105_cgu_rgmii_tx_clk_config(priv, port, speed);
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure RGMII Tx clock\n");
+ return rc;
+ }
+ rc = sja1105_rgmii_cfg_pad_tx_config(priv, port);
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure Tx pad registers\n");
+ return rc;
+ }
+
+ if (!priv->info->setup_rgmii_delay)
+ return 0;
+
+ return priv->info->setup_rgmii_delay(priv, port);
+}
+
+static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl ref_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_MII0_TX_CLK,
+ CLKSRC_MII1_TX_CLK,
+ CLKSRC_MII2_TX_CLK,
+ CLKSRC_MII3_TX_CLK,
+ CLKSRC_MII4_TX_CLK,
+ };
+
+ if (regs->rmii_ref_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload for packed_buf */
+ ref_clk.clksrc = clk_sources[port];
+ ref_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ ref_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ref_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl ext_tx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ if (regs->rmii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* Payload for packed_buf */
+ ext_tx_clk.clksrc = CLKSRC_PLL1;
+ ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ ext_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ struct sja1105_cgu_pll_ctrl pll = {0};
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ if (regs->rmii_pll1 == SJA1105_RSV_ADDR)
+ return 0;
+
+ /* PLL1 must be enabled and output 50 Mhz.
+ * This is done by writing first 0x0A010941 to
+ * the PLL_1_C register and then deasserting
+ * power down (PD) 0x0A010940.
+ */
+
+ /* Step 1: PLL1 setup for 50Mhz */
+ pll.pllclksrc = 0xA;
+ pll.msel = 0x1;
+ pll.autoblock = 0x1;
+ pll.psel = 0x1;
+ pll.direct = 0x0;
+ pll.fbsel = 0x1;
+ pll.bypass = 0x0;
+ pll.pd = 0x1;
+
+ sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
+ SJA1105_SIZE_CGU_CMD);
+ if (rc < 0) {
+ dev_err(dev, "failed to configure PLL1 for 50MHz\n");
+ return rc;
+ }
+
+ /* Step 2: Enable PLL1 */
+ pll.pd = 0x0;
+
+ sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
+ SJA1105_SIZE_CGU_CMD);
+ if (rc < 0) {
+ dev_err(dev, "failed to enable PLL1\n");
+ return rc;
+ }
+ return rc;
+}
+
+static int sja1105_rmii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
+{
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ dev_dbg(dev, "Configuring RMII-%s clocking\n",
+ (role == XMII_MAC) ? "MAC" : "PHY");
+ /* AH1601.pdf chapter 2.5.1. Sources */
+ if (role == XMII_MAC) {
+ /* Configure and enable PLL1 for 50Mhz output */
+ rc = sja1105_cgu_rmii_pll_config(priv);
+ if (rc < 0)
+ return rc;
+ }
+ /* Disable IDIV for this port */
+ rc = sja1105_cgu_idiv_config(priv, port, false, 1);
+ if (rc < 0)
+ return rc;
+ /* Source to sink mappings */
+ rc = sja1105_cgu_rmii_ref_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ if (role == XMII_MAC) {
+ rc = sja1105_cgu_rmii_ext_tx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
+{
+ struct sja1105_xmii_params_entry *mii;
+ struct device *dev = priv->ds->dev;
+ sja1105_phy_interface_t phy_mode;
+ sja1105_mii_role_t role;
+ int rc;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+
+ /* RGMII etc */
+ phy_mode = mii->xmii_mode[port];
+ /* MAC or PHY, for applicable types (not RGMII) */
+ role = mii->phy_mac[port];
+
+ switch (phy_mode) {
+ case XMII_MODE_MII:
+ rc = sja1105_mii_clocking_setup(priv, port, role);
+ break;
+ case XMII_MODE_RMII:
+ rc = sja1105_rmii_clocking_setup(priv, port, role);
+ break;
+ case XMII_MODE_RGMII:
+ rc = sja1105_rgmii_clocking_setup(priv, port, role);
+ break;
+ case XMII_MODE_SGMII:
+ /* Nothing to do in the CGU for SGMII */
+ rc = 0;
+ break;
+ default:
+ dev_err(dev, "Invalid interface mode specified: %d\n",
+ phy_mode);
+ return -EINVAL;
+ }
+ if (rc) {
+ dev_err(dev, "Clocking setup for port %d failed: %d\n",
+ port, rc);
+ return rc;
+ }
+
+ /* Internally pull down the RX_DV/CRS_DV/RX_CTL and RX_ER inputs */
+ return sja1105_cfg_pad_rx_config(priv, port);
+}
+
+int sja1105_clocking_setup(struct sja1105_private *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ int port, rc;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ rc = sja1105_clocking_setup_port(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+static void
+sja1110_cgu_outclk_packing(void *buf, struct sja1110_cgu_outclk *outclk,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &outclk->clksrc, 27, 24, size, op);
+ sja1105_packing(buf, &outclk->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &outclk->pd, 0, 0, size, op);
+}
+
+int sja1110_disable_microcontroller(struct sja1105_private *priv)
+{
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ struct sja1110_cgu_outclk outclk_6_c = {
+ .clksrc = 0x3,
+ .pd = true,
+ };
+ struct sja1110_cgu_outclk outclk_7_c = {
+ .clksrc = 0x5,
+ .pd = true,
+ };
+ int rc;
+
+ /* Power down the BASE_TIMER_CLK to disable the watchdog timer */
+ sja1110_cgu_outclk_packing(packed_buf, &outclk_7_c, PACK);
+
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_TIMER_CLK,
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+ if (rc)
+ return rc;
+
+ /* Power down the BASE_MCSS_CLOCK to gate the microcontroller off */
+ sja1110_cgu_outclk_packing(packed_buf, &outclk_6_c, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_MCSS_CLK,
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
new file mode 100644
index 000000000..bdbbff2a7
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ * Copyright 2020 NXP
+ */
+#include "sja1105.h"
+
+/* Since devlink regions have a fixed size and the static config has a variable
+ * size, we need to calculate the maximum possible static config size by
+ * creating a dummy config with all table entries populated to the max, and get
+ * its packed length. This is done dynamically as opposed to simply hardcoding
+ * a number, since currently not all static config tables are implemented, so
+ * we are avoiding a possible code desynchronization.
+ */
+static size_t sja1105_static_config_get_max_size(struct sja1105_private *priv)
+{
+ struct sja1105_static_config config;
+ enum sja1105_blk_idx blk_idx;
+ int rc;
+
+ rc = sja1105_static_config_init(&config,
+ priv->info->static_ops,
+ priv->info->device_id);
+ if (rc)
+ return 0;
+
+ for (blk_idx = 0; blk_idx < BLK_IDX_MAX; blk_idx++) {
+ struct sja1105_table *table = &config.tables[blk_idx];
+
+ table->entry_count = table->ops->max_entry_count;
+ }
+
+ return sja1105_static_config_get_length(&config);
+}
+
+static int
+sja1105_region_static_config_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct sja1105_private *priv = ds->priv;
+ size_t max_len, len;
+
+ len = sja1105_static_config_get_length(&priv->static_config);
+ max_len = sja1105_static_config_get_max_size(priv);
+
+ *data = kcalloc(max_len, sizeof(u8), GFP_KERNEL);
+ if (!*data)
+ return -ENOMEM;
+
+ return static_config_buf_prepare_for_upload(priv, *data, len);
+}
+
+static struct devlink_region_ops sja1105_region_static_config_ops = {
+ .name = "static-config",
+ .snapshot = sja1105_region_static_config_snapshot,
+ .destructor = kfree,
+};
+
+enum sja1105_region_id {
+ SJA1105_REGION_STATIC_CONFIG = 0,
+};
+
+struct sja1105_region {
+ const struct devlink_region_ops *ops;
+ size_t (*get_size)(struct sja1105_private *priv);
+};
+
+static struct sja1105_region sja1105_regions[] = {
+ [SJA1105_REGION_STATIC_CONFIG] = {
+ .ops = &sja1105_region_static_config_ops,
+ .get_size = sja1105_static_config_get_max_size,
+ },
+};
+
+static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
+{
+ int i, num_regions = ARRAY_SIZE(sja1105_regions);
+ struct sja1105_private *priv = ds->priv;
+ const struct devlink_region_ops *ops;
+ struct devlink_region *region;
+ u64 size;
+
+ priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *),
+ GFP_KERNEL);
+ if (!priv->regions)
+ return -ENOMEM;
+
+ for (i = 0; i < num_regions; i++) {
+ size = sja1105_regions[i].get_size(priv);
+ ops = sja1105_regions[i].ops;
+
+ region = dsa_devlink_region_create(ds, ops, 1, size);
+ if (IS_ERR(region)) {
+ while (--i >= 0)
+ dsa_devlink_region_destroy(priv->regions[i]);
+
+ kfree(priv->regions);
+ return PTR_ERR(region);
+ }
+
+ priv->regions[i] = region;
+ }
+
+ return 0;
+}
+
+static void sja1105_teardown_devlink_regions(struct dsa_switch *ds)
+{
+ int i, num_regions = ARRAY_SIZE(sja1105_regions);
+ struct sja1105_private *priv = ds->priv;
+
+ for (i = 0; i < num_regions; i++)
+ dsa_devlink_region_destroy(priv->regions[i]);
+
+ kfree(priv->regions);
+}
+
+int sja1105_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ rc = devlink_info_driver_name_put(req, "sja1105");
+ if (rc)
+ return rc;
+
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ priv->info->name);
+ return rc;
+}
+
+int sja1105_devlink_setup(struct dsa_switch *ds)
+{
+ return sja1105_setup_devlink_regions(ds);
+}
+
+void sja1105_devlink_teardown(struct dsa_switch *ds)
+{
+ sja1105_teardown_devlink_regions(ds);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
new file mode 100644
index 000000000..984c0e604
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -0,0 +1,1413 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+/* In the dynamic configuration interface, the switch exposes a register-like
+ * view of some of the static configuration tables.
+ * Many times the field organization of the dynamic tables is abbreviated (not
+ * all fields are dynamically reconfigurable) and different from the static
+ * ones, but the key reason for having it is that we can spare a switch reset
+ * for settings that can be changed dynamically.
+ *
+ * This file creates a per-switch-family abstraction called
+ * struct sja1105_dynamic_table_ops and two operations that work with it:
+ * - sja1105_dynamic_config_write
+ * - sja1105_dynamic_config_read
+ *
+ * Compared to the struct sja1105_table_ops from sja1105_static_config.c,
+ * the dynamic accessors work with a compound buffer:
+ *
+ * packed_buf
+ *
+ * |
+ * V
+ * +-----------------------------------------+------------------+
+ * | ENTRY BUFFER | COMMAND BUFFER |
+ * +-----------------------------------------+------------------+
+ *
+ * <----------------------- packed_size ------------------------>
+ *
+ * The ENTRY BUFFER may or may not have the same layout, or size, as its static
+ * configuration table entry counterpart. When it does, the same packing
+ * function is reused (bar exceptional cases - see
+ * sja1105pqrs_dyn_l2_lookup_entry_packing).
+ *
+ * The reason for the COMMAND BUFFER being at the end is to be able to send
+ * a dynamic write command through a single SPI burst. By the time the switch
+ * reacts to the command, the ENTRY BUFFER is already populated with the data
+ * sent by the core.
+ *
+ * The COMMAND BUFFER is always SJA1105_SIZE_DYN_CMD bytes (one 32-bit word) in
+ * size.
+ *
+ * Sometimes the ENTRY BUFFER does not really exist (when the number of fields
+ * that can be reconfigured is small), then the switch repurposes some of the
+ * unused 32 bits of the COMMAND BUFFER to hold ENTRY data.
+ *
+ * The key members of struct sja1105_dynamic_table_ops are:
+ * - .entry_packing: A function that deals with packing an ENTRY structure
+ * into an SPI buffer, or retrieving an ENTRY structure
+ * from one.
+ * The @packed_buf pointer it's given does always point to
+ * the ENTRY portion of the buffer.
+ * - .cmd_packing: A function that deals with packing/unpacking the COMMAND
+ * structure to/from the SPI buffer.
+ * It is given the same @packed_buf pointer as .entry_packing,
+ * so most of the time, the @packed_buf points *behind* the
+ * COMMAND offset inside the buffer.
+ * To access the COMMAND portion of the buffer, the function
+ * knows its correct offset.
+ * Giving both functions the same pointer is handy because in
+ * extreme cases (see sja1105pqrs_dyn_l2_lookup_entry_packing)
+ * the .entry_packing is able to jump to the COMMAND portion,
+ * or vice-versa (sja1105pqrs_l2_lookup_cmd_packing).
+ * - .access: A bitmap of:
+ * OP_READ: Set if the hardware manual marks the ENTRY portion of the
+ * dynamic configuration table buffer as R (readable) after
+ * an SPI read command (the switch will populate the buffer).
+ * OP_WRITE: Set if the manual marks the ENTRY portion of the dynamic
+ * table buffer as W (writable) after an SPI write command
+ * (the switch will read the fields provided in the buffer).
+ * OP_DEL: Set if the manual says the VALIDENT bit is supported in the
+ * COMMAND portion of this dynamic config buffer (i.e. the
+ * specified entry can be invalidated through a SPI write
+ * command).
+ * OP_SEARCH: Set if the manual says that the index of an entry can
+ * be retrieved in the COMMAND portion of the buffer based
+ * on its ENTRY portion, as a result of a SPI write command.
+ * Only the TCAM-based FDB table on SJA1105 P/Q/R/S supports
+ * this.
+ * OP_VALID_ANYWAY: Reading some tables through the dynamic config
+ * interface is possible even if the VALIDENT bit is not
+ * set in the writeback. So don't error out in that case.
+ * - .max_entry_count: The number of entries, counting from zero, that can be
+ * reconfigured through the dynamic interface. If a static
+ * table can be reconfigured at all dynamically, this
+ * number always matches the maximum number of supported
+ * static entries.
+ * - .packed_size: The length in bytes of the compound ENTRY + COMMAND BUFFER.
+ * Note that sometimes the compound buffer may contain holes in
+ * it (see sja1105_vlan_lookup_cmd_packing). The @packed_buf is
+ * contiguous however, so @packed_size includes any unused
+ * bytes.
+ * - .addr: The base SPI address at which the buffer must be written to the
+ * switch's memory. When looking at the hardware manual, this must
+ * always match the lowest documented address for the ENTRY, and not
+ * that of the COMMAND, since the other 32-bit words will follow along
+ * at the correct addresses.
+ */
+
+#define SJA1105_SIZE_DYN_CMD 4
+
+#define SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_LOOKUP_ENTRY)
+
+#define SJA1110_SIZE_VL_POLICING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_POLICING_ENTRY)
+
+#define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_L2_LOOKUP_ENTRY)
+
+#define SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY)
+
+#define SJA1110_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_L2_LOOKUP_ENTRY)
+
+#define SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + 4 + SJA1105_SIZE_VLAN_LOOKUP_ENTRY)
+
+#define SJA1110_SIZE_VLAN_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_VLAN_LOOKUP_ENTRY)
+
+#define SJA1105_SIZE_L2_FORWARDING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_FORWARDING_ENTRY)
+
+#define SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY)
+
+#define SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY)
+
+#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY)
+
+#define SJA1110_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY)
+
+#define SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY)
+
+#define SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_GENERAL_PARAMS_ENTRY)
+
+#define SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY)
+
+#define SJA1105_SIZE_RETAGGING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_RETAGGING_ENTRY)
+
+#define SJA1105ET_SIZE_CBS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_CBS_ENTRY)
+
+#define SJA1105PQRS_SIZE_CBS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_CBS_ENTRY)
+
+#define SJA1110_SIZE_XMII_PARAMS_DYN_CMD \
+ SJA1110_SIZE_XMII_PARAMS_ENTRY
+
+#define SJA1110_SIZE_L2_POLICING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_POLICING_ENTRY)
+
+#define SJA1110_SIZE_L2_FORWARDING_PARAMS_DYN_CMD \
+ SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY
+
+#define SJA1105_MAX_DYN_CMD_SIZE \
+ SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD
+
+struct sja1105_dyn_cmd {
+ bool search;
+ u64 valid;
+ u64 rdwrset;
+ u64 errors;
+ u64 valident;
+ u64 index;
+};
+
+enum sja1105_hostcmd {
+ SJA1105_HOSTCMD_SEARCH = 1,
+ SJA1105_HOSTCMD_READ = 2,
+ SJA1105_HOSTCMD_WRITE = 3,
+ SJA1105_HOSTCMD_INVALIDATE = 4,
+};
+
+/* Command and entry overlap */
+static void
+sja1105et_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(buf, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->index, 9, 0, size, op);
+}
+
+/* Command and entry are separate */
+static void
+sja1105pqrs_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 9, 0, size, op);
+}
+
+static void
+sja1110_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 11, 0, size, op);
+}
+
+static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const int size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD;
+
+ sja1105_packing(buf, &entry->egrmirr, 21, 17, size, op);
+ sja1105_packing(buf, &entry->ingrmirr, 16, 16, size, op);
+ return size;
+}
+
+static void
+sja1110_vl_policing_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->index, 11, 0, size, op);
+}
+
+static void
+sja1105pqrs_common_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op, int entry_size)
+{
+ const int size = SJA1105_SIZE_DYN_CMD;
+ u8 *p = buf + entry_size;
+ u64 hostcmd;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+
+ /* VALIDENT is supposed to indicate "keep or not", but in SJA1105 E/T,
+ * using it to delete a management route was unsupported. UM10944
+ * said about it:
+ *
+ * In case of a write access with the MGMTROUTE flag set,
+ * the flag will be ignored. It will always be found cleared
+ * for read accesses with the MGMTROUTE flag set.
+ *
+ * SJA1105 P/Q/R/S keeps the same behavior w.r.t. VALIDENT, but there
+ * is now another flag called HOSTCMD which does more stuff (quoting
+ * from UM11040):
+ *
+ * A write request is accepted only when HOSTCMD is set to write host
+ * or invalid. A read request is accepted only when HOSTCMD is set to
+ * search host or read host.
+ *
+ * So it is possible to translate a RDWRSET/VALIDENT combination into
+ * HOSTCMD so that we keep the dynamic command API in place, and
+ * at the same time achieve compatibility with the management route
+ * command structure.
+ */
+ if (cmd->rdwrset == SPI_READ) {
+ if (cmd->search)
+ hostcmd = SJA1105_HOSTCMD_SEARCH;
+ else
+ hostcmd = SJA1105_HOSTCMD_READ;
+ } else {
+ /* SPI_WRITE */
+ if (cmd->valident)
+ hostcmd = SJA1105_HOSTCMD_WRITE;
+ else
+ hostcmd = SJA1105_HOSTCMD_INVALIDATE;
+ }
+ sja1105_packing(p, &hostcmd, 25, 23, size, op);
+}
+
+static void
+sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ int entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+
+ sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
+
+ /* Hack - The hardware takes the 'index' field within
+ * struct sja1105_l2_lookup_entry as the index on which this command
+ * will operate. However it will ignore everything else, so 'index'
+ * is logically part of command but physically part of entry.
+ * Populate the 'index' entry field from within the command callback,
+ * such that our API doesn't need to ask for a full-blown entry
+ * structure when e.g. a delete is requested.
+ */
+ sja1105_packing(buf, &cmd->index, 15, 6, entry_size, op);
+}
+
+static void
+sja1110_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ int entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+
+ sja1105pqrs_common_l2_lookup_cmd_packing(buf, cmd, op, entry_size);
+
+ sja1105_packing(buf, &cmd->index, 10, 1, entry_size, op);
+}
+
+/* The switch is so retarded that it makes our command/entry abstraction
+ * crumble apart.
+ *
+ * On P/Q/R/S, the switch tries to say whether a FDB entry
+ * is statically programmed or dynamically learned via a flag called LOCKEDS.
+ * The hardware manual says about this fiels:
+ *
+ * On write will specify the format of ENTRY.
+ * On read the flag will be found cleared at times the VALID flag is found
+ * set. The flag will also be found cleared in response to a read having the
+ * MGMTROUTE flag set. In response to a read with the MGMTROUTE flag
+ * cleared, the flag be set if the most recent access operated on an entry
+ * that was either loaded by configuration or through dynamic reconfiguration
+ * (as opposed to automatically learned entries).
+ *
+ * The trouble with this flag is that it's part of the *command* to access the
+ * dynamic interface, and not part of the *entry* retrieved from it.
+ * Otherwise said, for a sja1105_dynamic_config_read, LOCKEDS is supposed to be
+ * an output from the switch into the command buffer, and for a
+ * sja1105_dynamic_config_write, the switch treats LOCKEDS as an input
+ * (hence we can write either static, or automatically learned entries, from
+ * the core).
+ * But the manual contradicts itself in the last phrase where it says that on
+ * read, LOCKEDS will be set to 1 for all FDB entries written through the
+ * dynamic interface (therefore, the value of LOCKEDS from the
+ * sja1105_dynamic_config_write is not really used for anything, it'll store a
+ * 1 anyway).
+ * This means you can't really write a FDB entry with LOCKEDS=0 (automatically
+ * learned) into the switch, which kind of makes sense.
+ * As for reading through the dynamic interface, it doesn't make too much sense
+ * to put LOCKEDS into the command, since the switch will inevitably have to
+ * ignore it (otherwise a command would be like "read the FDB entry 123, but
+ * only if it's dynamically learned" <- well how am I supposed to know?) and
+ * just use it as an output buffer for its findings. But guess what... that's
+ * what the entry buffer is for!
+ * Unfortunately, what really breaks this abstraction is the fact that it
+ * wasn't designed having the fact in mind that the switch can output
+ * entry-related data as writeback through the command buffer.
+ * However, whether a FDB entry is statically or dynamically learned *is* part
+ * of the entry and not the command data, no matter what the switch thinks.
+ * In order to do that, we'll need to wrap around the
+ * sja1105pqrs_l2_lookup_entry_packing from sja1105_static_config.c, and take
+ * a peek outside of the caller-supplied @buf (the entry buffer), to reach the
+ * command buffer.
+ */
+static size_t
+sja1105pqrs_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+ u8 *cmd = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
+
+ return sja1105pqrs_l2_lookup_entry_packing(buf, entry_ptr, op);
+}
+
+static size_t sja1110_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+ u8 *cmd = buf + SJA1110_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
+
+ return sja1110_l2_lookup_entry_packing(buf, entry_ptr, op);
+}
+
+static void
+sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+ /* Hack - see comments above. */
+ sja1105_packing(buf, &cmd->index, 29, 20,
+ SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op);
+}
+
+static size_t sja1105et_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+ u8 *cmd = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
+
+ return sja1105et_l2_lookup_entry_packing(buf, entry_ptr, op);
+}
+
+static void
+sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ u64 mgmtroute = 1;
+
+ sja1105et_l2_lookup_cmd_packing(buf, cmd, op);
+ if (op == PACK)
+ sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
+}
+
+static size_t sja1105et_mgmt_route_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_mgmt_entry *entry = entry_ptr;
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+
+ /* UM10944: To specify if a PTP egress timestamp shall be captured on
+ * each port upon transmission of the frame, the LSB of VLANID in the
+ * ENTRY field provided by the host must be set.
+ * Bit 1 of VLANID then specifies the register where the timestamp for
+ * this port is stored in.
+ */
+ sja1105_packing(buf, &entry->tsreg, 85, 85, size, op);
+ sja1105_packing(buf, &entry->takets, 84, 84, size, op);
+ sja1105_packing(buf, &entry->macaddr, 83, 36, size, op);
+ sja1105_packing(buf, &entry->destports, 35, 31, size, op);
+ sja1105_packing(buf, &entry->enfport, 30, 30, size, op);
+ return size;
+}
+
+static void
+sja1105pqrs_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ u64 mgmtroute = 1;
+
+ sja1105pqrs_l2_lookup_cmd_packing(buf, cmd, op);
+ if (op == PACK)
+ sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
+}
+
+static size_t sja1105pqrs_mgmt_route_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_mgmt_entry *entry = entry_ptr;
+
+ /* In P/Q/R/S, enfport got renamed to mgmtvalid, but its purpose
+ * is the same (driver uses it to confirm that frame was sent).
+ * So just keep the name from E/T.
+ */
+ sja1105_packing(buf, &entry->tsreg, 71, 71, size, op);
+ sja1105_packing(buf, &entry->takets, 70, 70, size, op);
+ sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
+ sja1105_packing(buf, &entry->destports, 21, 17, size, op);
+ sja1105_packing(buf, &entry->enfport, 16, 16, size, op);
+ return size;
+}
+
+/* In E/T, entry is at addresses 0x27-0x28. There is a 4 byte gap at 0x29,
+ * and command is at 0x2a. Similarly in P/Q/R/S there is a 1 register gap
+ * between entry (0x2d, 0x2e) and command (0x30).
+ */
+static void
+sja1105_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VLAN_LOOKUP_ENTRY + 4;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+ /* Hack - see comments above, applied for 'vlanid' field of
+ * struct sja1105_vlan_lookup_entry.
+ */
+ sja1105_packing(buf, &cmd->index, 38, 27,
+ SJA1105_SIZE_VLAN_LOOKUP_ENTRY, op);
+}
+
+/* In SJA1110 there is no gap between the command and the data, yay... */
+static void
+sja1110_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1110_SIZE_VLAN_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+ u64 type_entry = 0;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ /* Hack: treat 'vlanid' field of struct sja1105_vlan_lookup_entry as
+ * cmd->index.
+ */
+ sja1105_packing(buf, &cmd->index, 38, 27,
+ SJA1110_SIZE_VLAN_LOOKUP_ENTRY, op);
+
+ /* But the VALIDENT bit has disappeared, now we are supposed to
+ * invalidate an entry through the TYPE_ENTRY field of the entry..
+ * This is a hack to transform the non-zero quality of the TYPE_ENTRY
+ * field into a VALIDENT bit.
+ */
+ if (op == PACK && !cmd->valident) {
+ sja1105_packing(buf, &type_entry, 40, 39,
+ SJA1110_SIZE_VLAN_LOOKUP_ENTRY, PACK);
+ } else if (op == UNPACK) {
+ sja1105_packing(buf, &type_entry, 40, 39,
+ SJA1110_SIZE_VLAN_LOOKUP_ENTRY, UNPACK);
+ cmd->valident = !!type_entry;
+ }
+}
+
+static void
+sja1105_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 4, 0, size, op);
+}
+
+static void
+sja1110_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 4, 0, size, op);
+}
+
+static void
+sja1105et_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_DYN_CMD;
+ /* Yup, user manual definitions are reversed */
+ u8 *reg1 = buf + 4;
+
+ sja1105_packing(reg1, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(reg1, &cmd->index, 26, 24, size, op);
+}
+
+static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ /* Yup, user manual definitions are reversed */
+ u8 *reg1 = buf + 4;
+ u8 *reg2 = buf;
+
+ sja1105_packing(reg1, &entry->speed, 30, 29, size, op);
+ sja1105_packing(reg1, &entry->drpdtag, 23, 23, size, op);
+ sja1105_packing(reg1, &entry->drpuntag, 22, 22, size, op);
+ sja1105_packing(reg1, &entry->retag, 21, 21, size, op);
+ sja1105_packing(reg1, &entry->dyn_learn, 20, 20, size, op);
+ sja1105_packing(reg1, &entry->egress, 19, 19, size, op);
+ sja1105_packing(reg1, &entry->ingress, 18, 18, size, op);
+ sja1105_packing(reg1, &entry->ing_mirr, 17, 17, size, op);
+ sja1105_packing(reg1, &entry->egr_mirr, 16, 16, size, op);
+ sja1105_packing(reg1, &entry->vlanprio, 14, 12, size, op);
+ sja1105_packing(reg1, &entry->vlanid, 11, 0, size, op);
+ sja1105_packing(reg2, &entry->tp_delin, 31, 16, size, op);
+ sja1105_packing(reg2, &entry->tp_delout, 15, 0, size, op);
+ /* MAC configuration table entries which can't be reconfigured:
+ * top, base, enabled, ifg, maxage, drpnona664
+ */
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+static void
+sja1105pqrs_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY;
+ u8 *p = buf + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 2, 0, size, op);
+}
+
+static void
+sja1110_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 3, 0, size, op);
+}
+
+static void
+sja1105et_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ sja1105_packing(buf, &cmd->valid, 31, 31,
+ SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op);
+}
+
+static size_t
+sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->poly, 7, 0,
+ SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op);
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+static void
+sja1105pqrs_l2_lookup_params_cmd_packing(void *buf,
+ struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+}
+
+static void
+sja1110_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+}
+
+static void
+sja1105et_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD;
+
+ sja1105_packing(buf, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->errors, 30, 30, size, op);
+}
+
+static size_t
+sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_general_params_entry *entry = entry_ptr;
+ const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD;
+
+ sja1105_packing(buf, &entry->mirr_port, 2, 0, size, op);
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+static void
+sja1105pqrs_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 28, 28, size, op);
+}
+
+static void
+sja1110_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1110_SIZE_GENERAL_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+}
+
+static void
+sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+}
+
+static void
+sja1105_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_RETAGGING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->valident, 29, 29, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 28, 28, size, op);
+ sja1105_packing(p, &cmd->index, 5, 0, size, op);
+}
+
+static void
+sja1110_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_RETAGGING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 28, 28, size, op);
+ sja1105_packing(p, &cmd->index, 4, 0, size, op);
+}
+
+static void sja1105et_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_CBS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->index, 19, 16, size, op);
+}
+
+static size_t sja1105et_cbs_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_CBS_ENTRY;
+ struct sja1105_cbs_entry *entry = entry_ptr;
+ u8 *cmd = buf + size;
+ u32 *p = buf;
+
+ sja1105_packing(cmd, &entry->port, 5, 3, SJA1105_SIZE_DYN_CMD, op);
+ sja1105_packing(cmd, &entry->prio, 2, 0, SJA1105_SIZE_DYN_CMD, op);
+ sja1105_packing(p + 3, &entry->credit_lo, 31, 0, size, op);
+ sja1105_packing(p + 2, &entry->credit_hi, 31, 0, size, op);
+ sja1105_packing(p + 1, &entry->send_slope, 31, 0, size, op);
+ sja1105_packing(p + 0, &entry->idle_slope, 31, 0, size, op);
+ return size;
+}
+
+static void sja1105pqrs_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_CBS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 3, 0, size, op);
+}
+
+static void sja1110_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_CBS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 7, 0, size, op);
+}
+
+static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_CBS_ENTRY;
+ struct sja1105_cbs_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->port, 159, 157, size, op);
+ sja1105_packing(buf, &entry->prio, 156, 154, size, op);
+ sja1105_packing(buf, &entry->credit_lo, 153, 122, size, op);
+ sja1105_packing(buf, &entry->credit_hi, 121, 90, size, op);
+ sja1105_packing(buf, &entry->send_slope, 89, 58, size, op);
+ sja1105_packing(buf, &entry->idle_slope, 57, 26, size, op);
+ return size;
+}
+
+static size_t sja1110_cbs_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_CBS_ENTRY;
+ struct sja1105_cbs_entry *entry = entry_ptr;
+ u64 entry_type = SJA1110_CBS_SHAPER;
+
+ sja1105_packing(buf, &entry_type, 159, 159, size, op);
+ sja1105_packing(buf, &entry->credit_lo, 151, 120, size, op);
+ sja1105_packing(buf, &entry->credit_hi, 119, 88, size, op);
+ sja1105_packing(buf, &entry->send_slope, 87, 56, size, op);
+ sja1105_packing(buf, &entry->idle_slope, 55, 24, size, op);
+ return size;
+}
+
+static void sja1110_dummy_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+}
+
+static void
+sja1110_l2_policing_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_L2_POLICING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 6, 0, size, op);
+}
+
+#define OP_READ BIT(0)
+#define OP_WRITE BIT(1)
+#define OP_DEL BIT(2)
+#define OP_SEARCH BIT(3)
+#define OP_VALID_ANYWAY BIT(4)
+
+/* SJA1105E/T: First generation */
+const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1105et_vl_lookup_entry_packing,
+ .cmd_packing = sja1105et_vl_lookup_cmd_packing,
+ .access = OP_WRITE,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = 0x35,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1105et_dyn_l2_lookup_entry_packing,
+ .cmd_packing = sja1105et_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x20,
+ },
+ [BLK_IDX_MGMT_ROUTE] = {
+ .entry_packing = sja1105et_mgmt_route_entry_packing,
+ .cmd_packing = sja1105et_mgmt_route_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .max_entry_count = SJA1105_NUM_PORTS,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x20,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1105_vlan_lookup_entry_packing,
+ .cmd_packing = sja1105_vlan_lookup_cmd_packing,
+ .access = (OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = 0x27,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1105_l2_forwarding_entry_packing,
+ .cmd_packing = sja1105_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1105et_mac_config_entry_packing,
+ .cmd_packing = sja1105et_mac_config_cmd_packing,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = 0x36,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1105et_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = 0x38,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1105et_general_params_entry_packing,
+ .cmd_packing = sja1105et_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = 0x34,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1105_retagging_entry_packing,
+ .cmd_packing = sja1105_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = 0x31,
+ },
+ [BLK_IDX_CBS] = {
+ .entry_packing = sja1105et_cbs_entry_packing,
+ .cmd_packing = sja1105et_cbs_cmd_packing,
+ .max_entry_count = SJA1105ET_MAX_CBS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_CBS_DYN_CMD,
+ .addr = 0x2c,
+ },
+};
+
+/* SJA1105P/Q/R/S: Second generation */
+const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1105_vl_lookup_entry_packing,
+ .cmd_packing = sja1105pqrs_vl_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE),
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = 0x47,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1105pqrs_dyn_l2_lookup_entry_packing,
+ .cmd_packing = sja1105pqrs_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_MGMT_ROUTE] = {
+ .entry_packing = sja1105pqrs_mgmt_route_entry_packing,
+ .cmd_packing = sja1105pqrs_mgmt_route_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH | OP_VALID_ANYWAY),
+ .max_entry_count = SJA1105_NUM_PORTS,
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1105_vlan_lookup_entry_packing,
+ .cmd_packing = sja1105_vlan_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = 0x2D,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1105_l2_forwarding_entry_packing,
+ .cmd_packing = sja1105_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = 0x2A,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1105pqrs_mac_config_entry_packing,
+ .cmd_packing = sja1105pqrs_mac_config_cmd_packing,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = 0x4B,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1105pqrs_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = 0x54,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .entry_packing = sja1105pqrs_avb_params_entry_packing,
+ .cmd_packing = sja1105pqrs_avb_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD,
+ .addr = 0x8003,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1105pqrs_general_params_entry_packing,
+ .cmd_packing = sja1105pqrs_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = 0x3B,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1105_retagging_entry_packing,
+ .cmd_packing = sja1105_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = 0x38,
+ },
+ [BLK_IDX_CBS] = {
+ .entry_packing = sja1105pqrs_cbs_entry_packing,
+ .cmd_packing = sja1105pqrs_cbs_cmd_packing,
+ .max_entry_count = SJA1105PQRS_MAX_CBS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105PQRS_SIZE_CBS_DYN_CMD,
+ .addr = 0x32,
+ },
+};
+
+/* SJA1110: Third generation */
+const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1110_vl_lookup_entry_packing,
+ .cmd_packing = sja1110_vl_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .max_entry_count = SJA1110_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x124),
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .entry_packing = sja1110_vl_policing_entry_packing,
+ .cmd_packing = sja1110_vl_policing_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .max_entry_count = SJA1110_MAX_VL_POLICING_COUNT,
+ .packed_size = SJA1110_SIZE_VL_POLICING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x310),
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1110_dyn_l2_lookup_entry_packing,
+ .cmd_packing = sja1110_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1110_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x8c),
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1110_vlan_lookup_entry_packing,
+ .cmd_packing = sja1110_vlan_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1110_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xb4),
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1110_l2_forwarding_entry_packing,
+ .cmd_packing = sja1110_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1110_MAX_L2_FORWARDING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xa8),
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1110_mac_config_entry_packing,
+ .cmd_packing = sja1110_mac_config_cmd_packing,
+ .max_entry_count = SJA1110_MAX_MAC_CONFIG_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x134),
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1110_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1110_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x158),
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .entry_packing = sja1105pqrs_avb_params_entry_packing,
+ .cmd_packing = sja1105pqrs_avb_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x2000C),
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1110_general_params_entry_packing,
+ .cmd_packing = sja1110_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xe8),
+ },
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1110_retagging_entry_packing,
+ .cmd_packing = sja1110_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xdc),
+ },
+ [BLK_IDX_CBS] = {
+ .entry_packing = sja1110_cbs_entry_packing,
+ .cmd_packing = sja1110_cbs_cmd_packing,
+ .max_entry_count = SJA1110_MAX_CBS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105PQRS_SIZE_CBS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xc4),
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .entry_packing = sja1110_xmii_params_entry_packing,
+ .cmd_packing = sja1110_dummy_cmd_packing,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ .access = (OP_READ | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_XMII_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x3c),
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .entry_packing = sja1110_l2_policing_entry_packing,
+ .cmd_packing = sja1110_l2_policing_cmd_packing,
+ .max_entry_count = SJA1110_MAX_L2_POLICING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_L2_POLICING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x2fc),
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .entry_packing = sja1110_l2_forwarding_params_entry_packing,
+ .cmd_packing = sja1110_dummy_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ .access = (OP_READ | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_L2_FORWARDING_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x20000),
+ },
+};
+
+#define SJA1105_DYNAMIC_CONFIG_SLEEP_US 10
+#define SJA1105_DYNAMIC_CONFIG_TIMEOUT_US 100000
+
+static int
+sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
+{
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
+ struct sja1105_dyn_cmd cmd = {};
+ int rc;
+
+ /* Read back the whole entry + command structure. */
+ rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
+ ops->packed_size);
+ if (rc)
+ return rc;
+
+ /* Unpack the command structure, and return it to the caller in case it
+ * needs to perform further checks on it (VALIDENT).
+ */
+ ops->cmd_packing(packed_buf, &cmd, UNPACK);
+
+ /* Hardware hasn't cleared VALID => still working on it */
+ if (cmd.valid)
+ return -EAGAIN;
+
+ if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
+ return -ENOENT;
+
+ if (check_errors && cmd.errors)
+ return -EINVAL;
+
+ /* Don't dereference possibly NULL pointer - maybe caller
+ * only wanted to see whether the entry existed or not.
+ */
+ if (entry)
+ ops->entry_packing(packed_buf, entry, UNPACK);
+
+ return 0;
+}
+
+/* Poll the dynamic config entry's control area until the hardware has
+ * cleared the VALID bit, which means we have confirmation that it has
+ * finished processing the command.
+ */
+static int
+sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
+{
+ int err, rc;
+
+ err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
+ rc, rc != -EAGAIN,
+ SJA1105_DYNAMIC_CONFIG_SLEEP_US,
+ SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
+ false, priv, ops, entry, check_valident,
+ check_errors);
+ return err < 0 ? err : rc;
+}
+
+/* Provides read access to the settings through the dynamic interface
+ * of the switch.
+ * @blk_idx is used as key to select from the sja1105_dynamic_table_ops.
+ * The selection is limited by the hardware in respect to which
+ * configuration blocks can be read through the dynamic interface.
+ * @index is used to retrieve a particular table entry. If negative,
+ * (and if the @blk_idx supports the searching operation) a search
+ * is performed by the @entry parameter.
+ * @entry Type-casted to an unpacked structure that holds a table entry
+ * of the type specified in @blk_idx.
+ * Usually an output argument. If @index is negative, then this
+ * argument is used as input/output: it should be pre-populated
+ * with the element to search for. Entries which support the
+ * search operation will have an "index" field (not the @index
+ * argument to this function) and that is where the found index
+ * will be returned (or left unmodified - thus negative - if not
+ * found).
+ */
+int sja1105_dynamic_config_read(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry)
+{
+ const struct sja1105_dynamic_table_ops *ops;
+ struct sja1105_dyn_cmd cmd = {0};
+ /* SPI payload buffer */
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
+ int rc;
+
+ if (blk_idx >= BLK_IDX_MAX_DYN)
+ return -ERANGE;
+
+ ops = &priv->info->dyn_ops[blk_idx];
+
+ if (index >= 0 && index >= ops->max_entry_count)
+ return -ERANGE;
+ if (index < 0 && !(ops->access & OP_SEARCH))
+ return -EOPNOTSUPP;
+ if (!(ops->access & OP_READ))
+ return -EOPNOTSUPP;
+ if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
+ return -ERANGE;
+ if (!ops->cmd_packing)
+ return -EOPNOTSUPP;
+ if (!ops->entry_packing)
+ return -EOPNOTSUPP;
+
+ cmd.valid = true; /* Trigger action on table entry */
+ cmd.rdwrset = SPI_READ; /* Action is read */
+ if (index < 0) {
+ /* Avoid copying a signed negative number to an u64 */
+ cmd.index = 0;
+ cmd.search = true;
+ } else {
+ cmd.index = index;
+ cmd.search = false;
+ }
+ cmd.valident = true;
+ ops->cmd_packing(packed_buf, &cmd, PACK);
+
+ if (cmd.search)
+ ops->entry_packing(packed_buf, entry, PACK);
+
+ /* Send SPI write operation: read config table entry */
+ mutex_lock(&priv->dynamic_config_lock);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ ops->packed_size);
+ if (rc < 0)
+ goto out;
+
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);
+
+ return rc;
+}
+
+int sja1105_dynamic_config_write(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry, bool keep)
+{
+ const struct sja1105_dynamic_table_ops *ops;
+ struct sja1105_dyn_cmd cmd = {0};
+ /* SPI payload buffer */
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
+ int rc;
+
+ if (blk_idx >= BLK_IDX_MAX_DYN)
+ return -ERANGE;
+
+ ops = &priv->info->dyn_ops[blk_idx];
+
+ if (index >= ops->max_entry_count)
+ return -ERANGE;
+ if (index < 0)
+ return -ERANGE;
+ if (!(ops->access & OP_WRITE))
+ return -EOPNOTSUPP;
+ if (!keep && !(ops->access & OP_DEL))
+ return -EOPNOTSUPP;
+ if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
+ return -ERANGE;
+
+ cmd.valident = keep; /* If false, deletes entry */
+ cmd.valid = true; /* Trigger action on table entry */
+ cmd.rdwrset = SPI_WRITE; /* Action is write */
+ cmd.index = index;
+
+ if (!ops->cmd_packing)
+ return -EOPNOTSUPP;
+ ops->cmd_packing(packed_buf, &cmd, PACK);
+
+ if (!ops->entry_packing)
+ return -EOPNOTSUPP;
+ /* Don't dereference potentially NULL pointer if just
+ * deleting a table entry is what was requested. For cases
+ * where 'index' field is physically part of entry structure,
+ * and needed here, we deal with that in the cmd_packing callback.
+ */
+ if (keep)
+ ops->entry_packing(packed_buf, entry, PACK);
+
+ /* Send SPI write operation: read config table entry */
+ mutex_lock(&priv->dynamic_config_lock);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ ops->packed_size);
+ if (rc < 0)
+ goto out;
+
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);
+
+ return rc;
+}
+
+static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if ((crc ^ byte) & (1 << 7)) {
+ crc <<= 1;
+ crc ^= poly;
+ } else {
+ crc <<= 1;
+ }
+ byte <<= 1;
+ }
+ return crc;
+}
+
+/* CRC8 algorithm with non-reversed input, non-reversed output,
+ * no input xor and no output xor. Code customized for receiving
+ * the SJA1105 E/T FDB keys (vlanid, macaddr) as input. CRC polynomial
+ * is also received as argument in the Koopman notation that the switch
+ * hardware stores it in.
+ */
+u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_params_entry *l2_lookup_params =
+ priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS].entries;
+ u64 input, poly_koopman = l2_lookup_params->poly;
+ /* Convert polynomial from Koopman to 'normal' notation */
+ u8 poly = (u8)(1 + (poly_koopman << 1));
+ u8 crc = 0; /* seed */
+ int i;
+
+ input = ((u64)vid << 48) | ether_addr_to_u64(addr);
+
+ /* Mask the eight bytes starting from MSB one at a time */
+ for (i = 56; i >= 0; i -= 8) {
+ u8 byte = (input & (0xffull << i)) >> i;
+
+ crc = sja1105_crc8_add(crc, byte, poly);
+ }
+ return crc;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
new file mode 100644
index 000000000..a1472f80a
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_DYNAMIC_CONFIG_H
+#define _SJA1105_DYNAMIC_CONFIG_H
+
+#include "sja1105.h"
+#include <linux/packing.h>
+
+/* Special index that can be used for sja1105_dynamic_config_read */
+#define SJA1105_SEARCH -1
+
+struct sja1105_dyn_cmd;
+
+struct sja1105_dynamic_table_ops {
+ /* This returns size_t just to keep same prototype as the
+ * static config ops, of which we are reusing some functions.
+ */
+ size_t (*entry_packing)(void *buf, void *entry_ptr, enum packing_op op);
+ void (*cmd_packing)(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op);
+ size_t max_entry_count;
+ size_t packed_size;
+ u64 addr;
+ u8 access;
+};
+
+struct sja1105_mgmt_entry {
+ u64 tsreg;
+ u64 takets;
+ u64 macaddr;
+ u64 destports;
+ u64 enfport;
+ u64 index;
+};
+
+extern const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN];
+extern const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN];
+extern const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN];
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
new file mode 100644
index 000000000..decc6c931
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+enum sja1105_counter_index {
+ __SJA1105_COUNTER_UNUSED,
+ /* MAC */
+ N_RUNT,
+ N_SOFERR,
+ N_ALIGNERR,
+ N_MIIERR,
+ TYPEERR,
+ SIZEERR,
+ TCTIMEOUT,
+ PRIORERR,
+ NOMASTER,
+ MEMOV,
+ MEMERR,
+ INVTYP,
+ INTCYOV,
+ DOMERR,
+ PCFBAGDROP,
+ SPCPRIOR,
+ AGEPRIOR,
+ PORTDROP,
+ LENDROP,
+ BAGDROP,
+ POLICEERR,
+ DRPNONA664ERR,
+ SPCERR,
+ AGEDRP,
+ /* HL1 */
+ N_N664ERR,
+ N_VLANERR,
+ N_UNRELEASED,
+ N_SIZEERR,
+ N_CRCERR,
+ N_VLNOTFOUND,
+ N_CTPOLERR,
+ N_POLERR,
+ N_RXFRM,
+ N_RXBYTE,
+ N_TXFRM,
+ N_TXBYTE,
+ /* HL2 */
+ N_QFULL,
+ N_PART_DROP,
+ N_EGR_DISABLED,
+ N_NOT_REACH,
+ __MAX_SJA1105ET_PORT_COUNTER,
+ /* P/Q/R/S only */
+ /* ETHER */
+ N_DROPS_NOLEARN = __MAX_SJA1105ET_PORT_COUNTER,
+ N_DROPS_NOROUTE,
+ N_DROPS_ILL_DTAG,
+ N_DROPS_DTAG,
+ N_DROPS_SOTAG,
+ N_DROPS_SITAG,
+ N_DROPS_UTAG,
+ N_TX_BYTES_1024_2047,
+ N_TX_BYTES_512_1023,
+ N_TX_BYTES_256_511,
+ N_TX_BYTES_128_255,
+ N_TX_BYTES_65_127,
+ N_TX_BYTES_64,
+ N_TX_MCAST,
+ N_TX_BCAST,
+ N_RX_BYTES_1024_2047,
+ N_RX_BYTES_512_1023,
+ N_RX_BYTES_256_511,
+ N_RX_BYTES_128_255,
+ N_RX_BYTES_65_127,
+ N_RX_BYTES_64,
+ N_RX_MCAST,
+ N_RX_BCAST,
+ __MAX_SJA1105PQRS_PORT_COUNTER,
+};
+
+struct sja1105_port_counter {
+ enum sja1105_stats_area area;
+ const char name[ETH_GSTRING_LEN];
+ int offset;
+ int start;
+ int end;
+ bool is_64bit;
+};
+
+static const struct sja1105_port_counter sja1105_port_counters[] = {
+ /* MAC-Level Diagnostic Counters */
+ [N_RUNT] = {
+ .area = MAC,
+ .name = "n_runt",
+ .offset = 0,
+ .start = 31,
+ .end = 24,
+ },
+ [N_SOFERR] = {
+ .area = MAC,
+ .name = "n_soferr",
+ .offset = 0x0,
+ .start = 23,
+ .end = 16,
+ },
+ [N_ALIGNERR] = {
+ .area = MAC,
+ .name = "n_alignerr",
+ .offset = 0x0,
+ .start = 15,
+ .end = 8,
+ },
+ [N_MIIERR] = {
+ .area = MAC,
+ .name = "n_miierr",
+ .offset = 0x0,
+ .start = 7,
+ .end = 0,
+ },
+ /* MAC-Level Diagnostic Flags */
+ [TYPEERR] = {
+ .area = MAC,
+ .name = "typeerr",
+ .offset = 0x1,
+ .start = 27,
+ .end = 27,
+ },
+ [SIZEERR] = {
+ .area = MAC,
+ .name = "sizeerr",
+ .offset = 0x1,
+ .start = 26,
+ .end = 26,
+ },
+ [TCTIMEOUT] = {
+ .area = MAC,
+ .name = "tctimeout",
+ .offset = 0x1,
+ .start = 25,
+ .end = 25,
+ },
+ [PRIORERR] = {
+ .area = MAC,
+ .name = "priorerr",
+ .offset = 0x1,
+ .start = 24,
+ .end = 24,
+ },
+ [NOMASTER] = {
+ .area = MAC,
+ .name = "nomaster",
+ .offset = 0x1,
+ .start = 23,
+ .end = 23,
+ },
+ [MEMOV] = {
+ .area = MAC,
+ .name = "memov",
+ .offset = 0x1,
+ .start = 22,
+ .end = 22,
+ },
+ [MEMERR] = {
+ .area = MAC,
+ .name = "memerr",
+ .offset = 0x1,
+ .start = 21,
+ .end = 21,
+ },
+ [INVTYP] = {
+ .area = MAC,
+ .name = "invtyp",
+ .offset = 0x1,
+ .start = 19,
+ .end = 19,
+ },
+ [INTCYOV] = {
+ .area = MAC,
+ .name = "intcyov",
+ .offset = 0x1,
+ .start = 18,
+ .end = 18,
+ },
+ [DOMERR] = {
+ .area = MAC,
+ .name = "domerr",
+ .offset = 0x1,
+ .start = 17,
+ .end = 17,
+ },
+ [PCFBAGDROP] = {
+ .area = MAC,
+ .name = "pcfbagdrop",
+ .offset = 0x1,
+ .start = 16,
+ .end = 16,
+ },
+ [SPCPRIOR] = {
+ .area = MAC,
+ .name = "spcprior",
+ .offset = 0x1,
+ .start = 15,
+ .end = 12,
+ },
+ [AGEPRIOR] = {
+ .area = MAC,
+ .name = "ageprior",
+ .offset = 0x1,
+ .start = 11,
+ .end = 8,
+ },
+ [PORTDROP] = {
+ .area = MAC,
+ .name = "portdrop",
+ .offset = 0x1,
+ .start = 6,
+ .end = 6,
+ },
+ [LENDROP] = {
+ .area = MAC,
+ .name = "lendrop",
+ .offset = 0x1,
+ .start = 5,
+ .end = 5,
+ },
+ [BAGDROP] = {
+ .area = MAC,
+ .name = "bagdrop",
+ .offset = 0x1,
+ .start = 4,
+ .end = 4,
+ },
+ [POLICEERR] = {
+ .area = MAC,
+ .name = "policeerr",
+ .offset = 0x1,
+ .start = 3,
+ .end = 3,
+ },
+ [DRPNONA664ERR] = {
+ .area = MAC,
+ .name = "drpnona664err",
+ .offset = 0x1,
+ .start = 2,
+ .end = 2,
+ },
+ [SPCERR] = {
+ .area = MAC,
+ .name = "spcerr",
+ .offset = 0x1,
+ .start = 1,
+ .end = 1,
+ },
+ [AGEDRP] = {
+ .area = MAC,
+ .name = "agedrp",
+ .offset = 0x1,
+ .start = 0,
+ .end = 0,
+ },
+ /* High-Level Diagnostic Counters */
+ [N_N664ERR] = {
+ .area = HL1,
+ .name = "n_n664err",
+ .offset = 0xF,
+ .start = 31,
+ .end = 0,
+ },
+ [N_VLANERR] = {
+ .area = HL1,
+ .name = "n_vlanerr",
+ .offset = 0xE,
+ .start = 31,
+ .end = 0,
+ },
+ [N_UNRELEASED] = {
+ .area = HL1,
+ .name = "n_unreleased",
+ .offset = 0xD,
+ .start = 31,
+ .end = 0,
+ },
+ [N_SIZEERR] = {
+ .area = HL1,
+ .name = "n_sizeerr",
+ .offset = 0xC,
+ .start = 31,
+ .end = 0,
+ },
+ [N_CRCERR] = {
+ .area = HL1,
+ .name = "n_crcerr",
+ .offset = 0xB,
+ .start = 31,
+ .end = 0,
+ },
+ [N_VLNOTFOUND] = {
+ .area = HL1,
+ .name = "n_vlnotfound",
+ .offset = 0xA,
+ .start = 31,
+ .end = 0,
+ },
+ [N_CTPOLERR] = {
+ .area = HL1,
+ .name = "n_ctpolerr",
+ .offset = 0x9,
+ .start = 31,
+ .end = 0,
+ },
+ [N_POLERR] = {
+ .area = HL1,
+ .name = "n_polerr",
+ .offset = 0x8,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RXFRM] = {
+ .area = HL1,
+ .name = "n_rxfrm",
+ .offset = 0x6,
+ .start = 31,
+ .end = 0,
+ .is_64bit = true,
+ },
+ [N_RXBYTE] = {
+ .area = HL1,
+ .name = "n_rxbyte",
+ .offset = 0x4,
+ .start = 31,
+ .end = 0,
+ .is_64bit = true,
+ },
+ [N_TXFRM] = {
+ .area = HL1,
+ .name = "n_txfrm",
+ .offset = 0x2,
+ .start = 31,
+ .end = 0,
+ .is_64bit = true,
+ },
+ [N_TXBYTE] = {
+ .area = HL1,
+ .name = "n_txbyte",
+ .offset = 0x0,
+ .start = 31,
+ .end = 0,
+ .is_64bit = true,
+ },
+ [N_QFULL] = {
+ .area = HL2,
+ .name = "n_qfull",
+ .offset = 0x3,
+ .start = 31,
+ .end = 0,
+ },
+ [N_PART_DROP] = {
+ .area = HL2,
+ .name = "n_part_drop",
+ .offset = 0x2,
+ .start = 31,
+ .end = 0,
+ },
+ [N_EGR_DISABLED] = {
+ .area = HL2,
+ .name = "n_egr_disabled",
+ .offset = 0x1,
+ .start = 31,
+ .end = 0,
+ },
+ [N_NOT_REACH] = {
+ .area = HL2,
+ .name = "n_not_reach",
+ .offset = 0x0,
+ .start = 31,
+ .end = 0,
+ },
+ /* Ether Stats */
+ [N_DROPS_NOLEARN] = {
+ .area = ETHER,
+ .name = "n_drops_nolearn",
+ .offset = 0x16,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_NOROUTE] = {
+ .area = ETHER,
+ .name = "n_drops_noroute",
+ .offset = 0x15,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_ILL_DTAG] = {
+ .area = ETHER,
+ .name = "n_drops_ill_dtag",
+ .offset = 0x14,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_DTAG] = {
+ .area = ETHER,
+ .name = "n_drops_dtag",
+ .offset = 0x13,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_SOTAG] = {
+ .area = ETHER,
+ .name = "n_drops_sotag",
+ .offset = 0x12,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_SITAG] = {
+ .area = ETHER,
+ .name = "n_drops_sitag",
+ .offset = 0x11,
+ .start = 31,
+ .end = 0,
+ },
+ [N_DROPS_UTAG] = {
+ .area = ETHER,
+ .name = "n_drops_utag",
+ .offset = 0x10,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_1024_2047] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_1024_2047",
+ .offset = 0x0F,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_512_1023] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_512_1023",
+ .offset = 0x0E,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_256_511] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_256_511",
+ .offset = 0x0D,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_128_255] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_128_255",
+ .offset = 0x0C,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_65_127] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_65_127",
+ .offset = 0x0B,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BYTES_64] = {
+ .area = ETHER,
+ .name = "n_tx_bytes_64",
+ .offset = 0x0A,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_MCAST] = {
+ .area = ETHER,
+ .name = "n_tx_mcast",
+ .offset = 0x09,
+ .start = 31,
+ .end = 0,
+ },
+ [N_TX_BCAST] = {
+ .area = ETHER,
+ .name = "n_tx_bcast",
+ .offset = 0x08,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_1024_2047] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_1024_2047",
+ .offset = 0x07,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_512_1023] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_512_1023",
+ .offset = 0x06,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_256_511] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_256_511",
+ .offset = 0x05,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_128_255] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_128_255",
+ .offset = 0x04,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_65_127] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_65_127",
+ .offset = 0x03,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BYTES_64] = {
+ .area = ETHER,
+ .name = "n_rx_bytes_64",
+ .offset = 0x02,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_MCAST] = {
+ .area = ETHER,
+ .name = "n_rx_mcast",
+ .offset = 0x01,
+ .start = 31,
+ .end = 0,
+ },
+ [N_RX_BCAST] = {
+ .area = ETHER,
+ .name = "n_rx_bcast",
+ .offset = 0x00,
+ .start = 31,
+ .end = 0,
+ },
+};
+
+static int sja1105_port_counter_read(struct sja1105_private *priv, int port,
+ enum sja1105_counter_index idx, u64 *ctr)
+{
+ const struct sja1105_port_counter *c = &sja1105_port_counters[idx];
+ size_t size = c->is_64bit ? 8 : 4;
+ u8 buf[8] = {0};
+ u64 regs;
+ int rc;
+
+ regs = priv->info->regs->stats[c->area][port];
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs + c->offset, buf, size);
+ if (rc)
+ return rc;
+
+ sja1105_unpack(buf, ctr, c->start, c->end, size);
+
+ return 0;
+}
+
+void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ enum sja1105_counter_index max_ctr, i;
+ int rc, k = 0;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ max_ctr = __MAX_SJA1105ET_PORT_COUNTER;
+ else
+ max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
+
+ for (i = 0; i < max_ctr; i++) {
+ rc = sja1105_port_counter_read(priv, port, i, &data[k++]);
+ if (rc) {
+ dev_err(ds->dev,
+ "Failed to read port %d counters: %d\n",
+ port, rc);
+ break;
+ }
+ }
+}
+
+void sja1105_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ enum sja1105_counter_index max_ctr, i;
+ char *p = data;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ max_ctr = __MAX_SJA1105ET_PORT_COUNTER;
+ else
+ max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
+
+ for (i = 0; i < max_ctr; i++) {
+ strscpy(p, sja1105_port_counters[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct sja1105_private *priv = ds->priv;
+ enum sja1105_counter_index max_ctr, i;
+ int sset_count = 0;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ max_ctr = __MAX_SJA1105ET_PORT_COUNTER;
+ else
+ max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
+
+ for (i = 0; i < max_ctr; i++) {
+ if (!strlen(sja1105_port_counters[i].name))
+ continue;
+
+ sset_count++;
+ }
+
+ return sset_count;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
new file mode 100644
index 000000000..fad5afe38
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2020 NXP
+ */
+#include "sja1105.h"
+#include "sja1105_vl.h"
+
+struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
+ unsigned long cookie)
+{
+ struct sja1105_rule *rule;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list)
+ if (rule->cookie == cookie)
+ return rule;
+
+ return NULL;
+}
+
+static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
+{
+ int i;
+
+ for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
+ if (!priv->flow_block.l2_policer_used[i])
+ return i;
+
+ return -1;
+}
+
+static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie, int port,
+ u64 rate_bytes_per_sec,
+ u32 burst)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct sja1105_l2_policing_entry *policing;
+ struct dsa_switch *ds = priv->ds;
+ bool new_rule = false;
+ unsigned long p;
+ int rc;
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_BCAST_POLICER;
+ rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
+ rule->key.type = SJA1105_KEY_BCAST;
+ new_rule = true;
+ }
+
+ if (rule->bcast_pol.sharindx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[(ds->num_ports * SJA1105_NUM_TC) + port].sharindx != port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port already has a broadcast policer");
+ rc = -EEXIST;
+ goto out;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ /* Make the broadcast policers of all ports attached to this block
+ * point to the newly allocated policer
+ */
+ for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
+ int bcast = (ds->num_ports * SJA1105_NUM_TC) + p;
+
+ policing[bcast].sharindx = rule->bcast_pol.sharindx;
+ }
+
+ policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
+ 512, 1000000);
+ policing[rule->bcast_pol.sharindx].smax = burst;
+
+ /* TODO: support per-flow MTU */
+ policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
+ ETH_FCS_LEN;
+
+ rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+
+out:
+ if (rc == 0 && new_rule) {
+ priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
+ list_add(&rule->list, &priv->flow_block.rules);
+ } else if (new_rule) {
+ kfree(rule);
+ }
+
+ return rc;
+}
+
+static int sja1105_setup_tc_policer(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie, int port, int tc,
+ u64 rate_bytes_per_sec,
+ u32 burst)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct sja1105_l2_policing_entry *policing;
+ bool new_rule = false;
+ unsigned long p;
+ int rc;
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_TC_POLICER;
+ rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
+ rule->key.type = SJA1105_KEY_TC;
+ rule->key.tc.pcp = tc;
+ new_rule = true;
+ }
+
+ if (rule->tc_pol.sharindx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port-TC pair already has an L2 policer");
+ rc = -EEXIST;
+ goto out;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ /* Make the policers for traffic class @tc of all ports attached to
+ * this block point to the newly allocated policer
+ */
+ for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
+ int index = (p * SJA1105_NUM_TC) + tc;
+
+ policing[index].sharindx = rule->tc_pol.sharindx;
+ }
+
+ policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
+ 512, 1000000);
+ policing[rule->tc_pol.sharindx].smax = burst;
+
+ /* TODO: support per-flow MTU */
+ policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
+ ETH_FCS_LEN;
+
+ rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+
+out:
+ if (rc == 0 && new_rule) {
+ priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
+ list_add(&rule->list, &priv->flow_block.rules);
+ } else if (new_rule) {
+ kfree(rule);
+ }
+
+ return rc;
+}
+
+static int sja1105_flower_policer(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key,
+ u64 rate_bytes_per_sec,
+ u32 burst)
+{
+ switch (key->type) {
+ case SJA1105_KEY_BCAST:
+ return sja1105_setup_bcast_policer(priv, extack, cookie, port,
+ rate_bytes_per_sec, burst);
+ case SJA1105_KEY_TC:
+ return sja1105_setup_tc_policer(priv, extack, cookie, port,
+ key->tc.pcp, rate_bytes_per_sec,
+ burst);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int sja1105_flower_parse_key(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ struct sja1105_key *key)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ bool is_bcast_dmac = false;
+ u64 dmac = U64_MAX;
+ u16 vid = U16_MAX;
+ u16 pcp = U16_MAX;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ if (match.key->n_proto) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on protocol not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!ether_addr_equal_masked(match.key->src, null,
+ match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ether_addr_equal(match.mask->dst, bcast)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ dmac = ether_addr_to_u64(match.key->dst);
+ is_bcast_dmac = ether_addr_equal(match.key->dst, bcast);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.mask->vlan_id &&
+ match.mask->vlan_id != VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on VID is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->vlan_priority &&
+ match.mask->vlan_priority != 0x7) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on PCP is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->vlan_id)
+ vid = match.key->vlan_id;
+ if (match.mask->vlan_priority)
+ pcp = match.key->vlan_priority;
+ }
+
+ if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
+ key->type = SJA1105_KEY_BCAST;
+ return 0;
+ }
+ if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
+ key->type = SJA1105_KEY_TC;
+ key->tc.pcp = pcp;
+ return 0;
+ }
+ if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
+ key->type = SJA1105_KEY_VLAN_AWARE_VL;
+ key->vl.dmac = dmac;
+ key->vl.vid = vid;
+ key->vl.pcp = pcp;
+ return 0;
+ }
+ if (dmac != U64_MAX) {
+ key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
+ key->vl.dmac = dmac;
+ return 0;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
+ return -EOPNOTSUPP;
+}
+
+static int sja1105_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct sja1105_private *priv = ds->priv;
+ const struct flow_action_entry *act;
+ unsigned long cookie = cls->cookie;
+ bool routing_rule = false;
+ struct sja1105_key key;
+ bool gate_rule = false;
+ bool vl_rule = false;
+ int rc, i;
+
+ rc = sja1105_flower_parse_key(priv, extack, cls, &key);
+ if (rc)
+ return rc;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ rc = sja1105_policer_validate(&rule->action, act, extack);
+ if (rc)
+ goto out;
+
+ rc = sja1105_flower_policer(priv, port, extack, cookie,
+ &key,
+ act->police.rate_bytes_ps,
+ act->police.burst);
+ if (rc)
+ goto out;
+ break;
+ case FLOW_ACTION_TRAP: {
+ int cpu = dsa_upstream_port(ds, port);
+
+ routing_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, BIT(cpu), true);
+ if (rc)
+ goto out;
+ break;
+ }
+ case FLOW_ACTION_REDIRECT: {
+ struct dsa_port *to_dp;
+
+ to_dp = dsa_port_from_netdev(act->dev);
+ if (IS_ERR(to_dp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a switch port");
+ return -EOPNOTSUPP;
+ }
+
+ routing_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, BIT(to_dp->index), true);
+ if (rc)
+ goto out;
+ break;
+ }
+ case FLOW_ACTION_DROP:
+ vl_rule = true;
+
+ rc = sja1105_vl_redirect(priv, port, extack, cookie,
+ &key, 0, false);
+ if (rc)
+ goto out;
+ break;
+ case FLOW_ACTION_GATE:
+ gate_rule = true;
+ vl_rule = true;
+
+ rc = sja1105_vl_gate(priv, port, extack, cookie,
+ &key, act->hw_index,
+ act->gate.prio,
+ act->gate.basetime,
+ act->gate.cycletime,
+ act->gate.cycletimeext,
+ act->gate.num_entries,
+ act->gate.entries);
+ if (rc)
+ goto out;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ if (vl_rule && !rc) {
+ /* Delay scheduling configuration until DESTPORTS has been
+ * populated by all other actions.
+ */
+ if (gate_rule) {
+ if (!routing_rule) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload gate action together with redirect or trap");
+ return -EOPNOTSUPP;
+ }
+ rc = sja1105_init_scheduling(priv);
+ if (rc)
+ goto out;
+ }
+
+ rc = sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
+ }
+
+out:
+ return rc;
+}
+
+int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
+ struct sja1105_l2_policing_entry *policing;
+ int old_sharindx;
+
+ if (!rule)
+ return 0;
+
+ if (rule->type == SJA1105_RULE_VL)
+ return sja1105_vl_delete(priv, port, rule, cls->common.extack);
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (rule->type == SJA1105_RULE_BCAST_POLICER) {
+ int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
+
+ old_sharindx = policing[bcast].sharindx;
+ policing[bcast].sharindx = port;
+ } else if (rule->type == SJA1105_RULE_TC_POLICER) {
+ int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
+
+ old_sharindx = policing[index].sharindx;
+ policing[index].sharindx = port;
+ } else {
+ return -EINVAL;
+ }
+
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ priv->flow_block.l2_policer_used[old_sharindx] = false;
+ list_del(&rule->list);
+ kfree(rule);
+ }
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
+ int rc;
+
+ if (!rule)
+ return 0;
+
+ if (rule->type != SJA1105_RULE_VL)
+ return 0;
+
+ rc = sja1105_vl_stats(priv, port, rule, &cls->stats,
+ cls->common.extack);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+void sja1105_flower_setup(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ int port;
+
+ INIT_LIST_HEAD(&priv->flow_block.rules);
+
+ for (port = 0; port < ds->num_ports; port++)
+ priv->flow_block.l2_policer_used[port] = true;
+}
+
+void sja1105_flower_teardown(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule;
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &priv->flow_block.rules) {
+ rule = list_entry(pos, struct sja1105_rule, list);
+ list_del(&rule->list);
+ kfree(rule);
+ }
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
new file mode 100644
index 000000000..f1f1368e8
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -0,0 +1,3481 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/spi/spi.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/phylink.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_device.h>
+#include <linux/pcs/pcs-xpcs.h>
+#include <linux/netdev_features.h>
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/dsa/8021q.h>
+#include "sja1105.h"
+#include "sja1105_tas.h"
+
+#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
+
+/* Configure the optional reset pin and bring up switch */
+static int sja1105_hw_reset(struct device *dev, unsigned int pulse_len,
+ unsigned int startup_delay)
+{
+ struct gpio_desc *gpio;
+
+ gpio = gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (!gpio)
+ return 0;
+
+ gpiod_set_value_cansleep(gpio, 1);
+ /* Wait for minimum reset pulse length */
+ msleep(pulse_len);
+ gpiod_set_value_cansleep(gpio, 0);
+ /* Wait until chip is ready after reset */
+ msleep(startup_delay);
+
+ gpiod_put(gpio);
+
+ return 0;
+}
+
+static void
+sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
+ int from, int to, bool allow)
+{
+ if (allow)
+ l2_fwd[from].reach_port |= BIT(to);
+ else
+ l2_fwd[from].reach_port &= ~BIT(to);
+}
+
+static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd,
+ int from, int to)
+{
+ return !!(l2_fwd[from].reach_port & BIT(to));
+}
+
+static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ int count, i;
+
+ vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
+ count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
+
+ for (i = 0; i < count; i++)
+ if (vlan[i].vlanid == vid)
+ return i;
+
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+static int sja1105_drop_untagged(struct dsa_switch *ds, int port, bool drop)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ if (mac[port].drpuntag == drop)
+ return 0;
+
+ mac[port].drpuntag = drop;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
+{
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ if (mac[port].vlanid == pvid)
+ return 0;
+
+ mac[port].vlanid = pvid;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_commit_pvid(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_vlan_lookup_entry *vlan;
+ bool drop_untagged = false;
+ int match, rc;
+ u16 pvid;
+
+ if (br && br_vlan_enabled(br))
+ pvid = priv->bridge_pvid[port];
+ else
+ pvid = priv->tag_8021q_pvid[port];
+
+ rc = sja1105_pvid_apply(priv, port, pvid);
+ if (rc)
+ return rc;
+
+ /* Only force dropping of untagged packets when the port is under a
+ * VLAN-aware bridge. When the tag_8021q pvid is used, we are
+ * deliberately removing the RX VLAN from the port's VMEMB_PORT list,
+ * to prevent DSA tag spoofing from the link partner. Untagged packets
+ * are the only ones that should be received with tag_8021q, so
+ * definitely don't drop them.
+ */
+ if (pvid == priv->bridge_pvid[port]) {
+ vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
+
+ match = sja1105_is_vlan_configured(priv, pvid);
+
+ if (match < 0 || !(vlan[match].vmemb_port & BIT(port)))
+ drop_untagged = true;
+ }
+
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ drop_untagged = true;
+
+ return sja1105_drop_untagged(ds, port, drop_untagged);
+}
+
+static int sja1105_init_mac_settings(struct sja1105_private *priv)
+{
+ struct sja1105_mac_config_entry default_mac = {
+ /* Enable all 8 priority queues on egress.
+ * Every queue i holds top[i] - base[i] frames.
+ * Sum of top[i] - base[i] is 511 (max hardware limit).
+ */
+ .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
+ .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
+ .enabled = {true, true, true, true, true, true, true, true},
+ /* Keep standard IFG of 12 bytes on egress. */
+ .ifg = 0,
+ /* Always put the MAC speed in automatic mode, where it can be
+ * adjusted at runtime by PHYLINK.
+ */
+ .speed = priv->info->port_speed[SJA1105_SPEED_AUTO],
+ /* No static correction for 1-step 1588 events */
+ .tp_delin = 0,
+ .tp_delout = 0,
+ /* Disable aging for critical TTEthernet traffic */
+ .maxage = 0xFF,
+ /* Internal VLAN (pvid) to apply to untagged ingress */
+ .vlanprio = 0,
+ .vlanid = 1,
+ .ing_mirr = false,
+ .egr_mirr = false,
+ /* Don't drop traffic with other EtherType than ETH_P_IP */
+ .drpnona664 = false,
+ /* Don't drop double-tagged traffic */
+ .drpdtag = false,
+ /* Don't drop untagged traffic */
+ .drpuntag = false,
+ /* Don't retag 802.1p (VID 0) traffic with the pvid */
+ .retag = false,
+ /* Disable learning and I/O on user ports by default -
+ * STP will enable it.
+ */
+ .dyn_learn = false,
+ .egress = false,
+ .ingress = false,
+ };
+ struct sja1105_mac_config_entry *mac;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ struct dsa_port *dp;
+
+ table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
+
+ /* Discard previous MAC Configuration Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ mac = table->entries;
+
+ list_for_each_entry(dp, &ds->dst->ports, list) {
+ if (dp->ds != ds)
+ continue;
+
+ mac[dp->index] = default_mac;
+
+ /* Let sja1105_bridge_stp_state_set() keep address learning
+ * enabled for the DSA ports. CPU ports use software-assisted
+ * learning to ensure that only FDB entries belonging to the
+ * bridge are learned, and that they are learned towards all
+ * CPU ports in a cross-chip topology if multiple CPU ports
+ * exist.
+ */
+ if (dsa_port_is_dsa(dp))
+ dp->learning = true;
+
+ /* Disallow untagged packets from being received on the
+ * CPU and DSA ports.
+ */
+ if (dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))
+ mac[dp->index].drpuntag = true;
+ }
+
+ return 0;
+}
+
+static int sja1105_init_mii_settings(struct sja1105_private *priv)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct sja1105_xmii_params_entry *mii;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
+
+ /* Discard previous xMII Mode Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ /* Override table based on PHYLINK DT bindings */
+ table->entry_count = table->ops->max_entry_count;
+
+ mii = table->entries;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ sja1105_mii_role_t role = XMII_MAC;
+
+ if (dsa_is_unused_port(priv->ds, i))
+ continue;
+
+ switch (priv->phy_mode[i]) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ if (priv->info->internal_phy[i] == SJA1105_NO_PHY)
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_MII;
+ if (priv->info->internal_phy[i] == SJA1105_PHY_BASE_TX)
+ mii->special[i] = true;
+
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ role = XMII_PHY;
+ fallthrough;
+ case PHY_INTERFACE_MODE_MII:
+ if (!priv->info->supports_mii[i])
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_MII;
+ break;
+ case PHY_INTERFACE_MODE_REVRMII:
+ role = XMII_PHY;
+ fallthrough;
+ case PHY_INTERFACE_MODE_RMII:
+ if (!priv->info->supports_rmii[i])
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_RMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (!priv->info->supports_rgmii[i])
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (!priv->info->supports_sgmii[i])
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_SGMII;
+ mii->special[i] = true;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (!priv->info->supports_2500basex[i])
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_SGMII;
+ mii->special[i] = true;
+ break;
+unsupported:
+ default:
+ dev_err(dev, "Unsupported PHY mode %s on port %d!\n",
+ phy_modes(priv->phy_mode[i]), i);
+ return -EINVAL;
+ }
+
+ mii->phy_mac[i] = role;
+ }
+ return 0;
+}
+
+static int sja1105_init_static_fdb(struct sja1105_private *priv)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int port;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+
+ /* We only populate the FDB table through dynamic L2 Address Lookup
+ * entries, except for a special entry at the end which is a catch-all
+ * for unknown multicast and will be used to control flooding domain.
+ */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ if (!priv->info->can_limit_mcast_flood)
+ return 0;
+
+ table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = 1;
+ l2_lookup = table->entries;
+
+ /* All L2 multicast addresses have an odd first octet */
+ l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST;
+ l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST;
+ l2_lookup[0].lockeds = true;
+ l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1;
+
+ /* Flood multicast to every port by default */
+ for (port = 0; port < priv->ds->num_ports; port++)
+ if (!dsa_is_unused_port(priv->ds, port))
+ l2_lookup[0].destports |= BIT(port);
+
+ return 0;
+}
+
+static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
+{
+ struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
+ /* Learned FDB entries are forgotten after 300 seconds */
+ .maxage = SJA1105_AGEING_TIME_MS(300000),
+ /* All entries within a FDB bin are available for learning */
+ .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
+ /* And the P/Q/R/S equivalent setting: */
+ .start_dynspc = 0,
+ /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
+ .poly = 0x97,
+ /* Always use Independent VLAN Learning (IVL) */
+ .shared_learn = false,
+ /* Don't discard management traffic based on ENFPORT -
+ * we don't perform SMAC port enforcement anyway, so
+ * what we are setting here doesn't matter.
+ */
+ .no_enf_hostprt = false,
+ /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
+ * Maybe correlate with no_linklocal_learn from bridge driver?
+ */
+ .no_mgmt_learn = true,
+ /* P/Q/R/S only */
+ .use_static = true,
+ /* Dynamically learned FDB entries can overwrite other (older)
+ * dynamic FDB entries
+ */
+ .owr_dyn = true,
+ .drpnolearn = true,
+ };
+ struct dsa_switch *ds = priv->ds;
+ int port, num_used_ports = 0;
+ struct sja1105_table *table;
+ u64 max_fdb_entries;
+
+ for (port = 0; port < ds->num_ports; port++)
+ if (!dsa_is_unused_port(ds, port))
+ num_used_ports++;
+
+ max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / num_used_ports;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ default_l2_lookup_params.maxaddrp[port] = max_fdb_entries;
+ }
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ /* This table only has a single entry */
+ ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
+ default_l2_lookup_params;
+
+ return 0;
+}
+
+/* Set up a default VLAN for untagged traffic injected from the CPU
+ * using management routes (e.g. STP, PTP) as opposed to tag_8021q.
+ * All DT-defined ports are members of this VLAN, and there are no
+ * restrictions on forwarding (since the CPU selects the destination).
+ * Frames from this VLAN will always be transmitted as untagged, and
+ * neither the bridge nor the 8021q module cannot create this VLAN ID.
+ */
+static int sja1105_init_static_vlan(struct sja1105_private *priv)
+{
+ struct sja1105_table *table;
+ struct sja1105_vlan_lookup_entry pvid = {
+ .type_entry = SJA1110_VLAN_D_TAG,
+ .ving_mirr = 0,
+ .vegr_mirr = 0,
+ .vmemb_port = 0,
+ .vlan_bc = 0,
+ .tag_port = 0,
+ .vlanid = SJA1105_DEFAULT_VLAN,
+ };
+ struct dsa_switch *ds = priv->ds;
+ int port;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kzalloc(table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = 1;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ pvid.vmemb_port |= BIT(port);
+ pvid.vlan_bc |= BIT(port);
+ pvid.tag_port &= ~BIT(port);
+
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+ priv->tag_8021q_pvid[port] = SJA1105_DEFAULT_VLAN;
+ priv->bridge_pvid[port] = SJA1105_DEFAULT_VLAN;
+ }
+ }
+
+ ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
+ return 0;
+}
+
+static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_entry *l2fwd;
+ struct dsa_switch *ds = priv->ds;
+ struct dsa_switch_tree *dst;
+ struct sja1105_table *table;
+ struct dsa_link *dl;
+ int port, tc;
+ int from, to;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ l2fwd = table->entries;
+
+ /* First 5 entries in the L2 Forwarding Table define the forwarding
+ * rules and the VLAN PCP to ingress queue mapping.
+ * Set up the ingress queue mapping first.
+ */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++)
+ l2fwd[port].vlan_pmap[tc] = tc;
+ }
+
+ /* Then manage the forwarding domain for user ports. These can forward
+ * only to the always-on domain (CPU port and DSA links)
+ */
+ for (from = 0; from < ds->num_ports; from++) {
+ if (!dsa_is_user_port(ds, from))
+ continue;
+
+ for (to = 0; to < ds->num_ports; to++) {
+ if (!dsa_is_cpu_port(ds, to) &&
+ !dsa_is_dsa_port(ds, to))
+ continue;
+
+ l2fwd[from].bc_domain |= BIT(to);
+ l2fwd[from].fl_domain |= BIT(to);
+
+ sja1105_port_allow_traffic(l2fwd, from, to, true);
+ }
+ }
+
+ /* Then manage the forwarding domain for DSA links and CPU ports (the
+ * always-on domain). These can send packets to any enabled port except
+ * themselves.
+ */
+ for (from = 0; from < ds->num_ports; from++) {
+ if (!dsa_is_cpu_port(ds, from) && !dsa_is_dsa_port(ds, from))
+ continue;
+
+ for (to = 0; to < ds->num_ports; to++) {
+ if (dsa_is_unused_port(ds, to))
+ continue;
+
+ if (from == to)
+ continue;
+
+ l2fwd[from].bc_domain |= BIT(to);
+ l2fwd[from].fl_domain |= BIT(to);
+
+ sja1105_port_allow_traffic(l2fwd, from, to, true);
+ }
+ }
+
+ /* In odd topologies ("H" connections where there is a DSA link to
+ * another switch which also has its own CPU port), TX packets can loop
+ * back into the system (they are flooded from CPU port 1 to the DSA
+ * link, and from there to CPU port 2). Prevent this from happening by
+ * cutting RX from DSA links towards our CPU port, if the remote switch
+ * has its own CPU port and therefore doesn't need ours for network
+ * stack termination.
+ */
+ dst = ds->dst;
+
+ list_for_each_entry(dl, &dst->rtable, list) {
+ if (dl->dp->ds != ds || dl->link_dp->cpu_dp == dl->dp->cpu_dp)
+ continue;
+
+ from = dl->dp->index;
+ to = dsa_upstream_port(ds, from);
+
+ dev_warn(ds->dev,
+ "H topology detected, cutting RX from DSA link %d to CPU port %d to prevent TX packet loops\n",
+ from, to);
+
+ sja1105_port_allow_traffic(l2fwd, from, to, false);
+
+ l2fwd[from].bc_domain &= ~BIT(to);
+ l2fwd[from].fl_domain &= ~BIT(to);
+ }
+
+ /* Finally, manage the egress flooding domain. All ports start up with
+ * flooding enabled, including the CPU port and DSA links.
+ */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ priv->ucast_egress_floods |= BIT(port);
+ priv->bcast_egress_floods |= BIT(port);
+ }
+
+ /* Next 8 entries define VLAN PCP mapping from ingress to egress.
+ * Create a one-to-one mapping.
+ */
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++) {
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ l2fwd[ds->num_ports + tc].vlan_pmap[port] = tc;
+ }
+
+ l2fwd[ds->num_ports + tc].type_egrpcp2outputq = true;
+ }
+
+ return 0;
+}
+
+static int sja1110_init_pcp_remapping(struct sja1105_private *priv)
+{
+ struct sja1110_pcp_remapping_entry *pcp_remap;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int port, tc;
+
+ table = &priv->static_config.tables[BLK_IDX_PCP_REMAPPING];
+
+ /* Nothing to do for SJA1105 */
+ if (!table->ops->max_entry_count)
+ return 0;
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ pcp_remap = table->entries;
+
+ /* Repeat the configuration done for vlan_pmap */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++)
+ pcp_remap[port].egrpcp[tc] = tc;
+ }
+
+ return 0;
+}
+
+static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_params_entry *l2fwd_params;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ /* This table only has a single entry */
+ l2fwd_params = table->entries;
+
+ /* Disallow dynamic reconfiguration of vlan_pmap */
+ l2fwd_params->max_dynp = 0;
+ /* Use a single memory partition for all ingress queues */
+ l2fwd_params->part_spc[0] = priv->info->max_frame_mem;
+
+ return 0;
+}
+
+void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
+ struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
+ l2_fwd_params = table->entries;
+ l2_fwd_params->part_spc[0] = SJA1105_MAX_FRAME_MEMORY;
+
+ /* If we have any critical-traffic virtual links, we need to reserve
+ * some frame buffer memory for them. At the moment, hardcode the value
+ * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
+ * remaining for best-effort traffic. TODO: figure out a more flexible
+ * way to perform the frame buffer partitioning.
+ */
+ if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
+ return;
+
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ vl_fwd_params = table->entries;
+
+ l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
+ vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
+}
+
+/* SJA1110 TDMACONFIGIDX values:
+ *
+ * | 100 Mbps ports | 1Gbps ports | 2.5Gbps ports | Disabled ports
+ * -----+----------------+---------------+---------------+---------------
+ * 0 | 0, [5:10] | [1:2] | [3:4] | retag
+ * 1 |0, [5:10], retag| [1:2] | [3:4] | -
+ * 2 | 0, [5:10] | [1:3], retag | 4 | -
+ * 3 | 0, [5:10] |[1:2], 4, retag| 3 | -
+ * 4 | 0, 2, [5:10] | 1, retag | [3:4] | -
+ * 5 | 0, 1, [5:10] | 2, retag | [3:4] | -
+ * 14 | 0, [5:10] | [1:4], retag | - | -
+ * 15 | [5:10] | [0:4], retag | - | -
+ */
+static void sja1110_select_tdmaconfigidx(struct sja1105_private *priv)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_table *table;
+ bool port_1_is_base_tx;
+ bool port_3_is_2500;
+ bool port_4_is_2500;
+ u64 tdmaconfigidx;
+
+ if (priv->info->device_id != SJA1110_DEVICE_ID)
+ return;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+
+ /* All the settings below are "as opposed to SGMII", which is the
+ * other pinmuxing option.
+ */
+ port_1_is_base_tx = priv->phy_mode[1] == PHY_INTERFACE_MODE_INTERNAL;
+ port_3_is_2500 = priv->phy_mode[3] == PHY_INTERFACE_MODE_2500BASEX;
+ port_4_is_2500 = priv->phy_mode[4] == PHY_INTERFACE_MODE_2500BASEX;
+
+ if (port_1_is_base_tx)
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 5;
+ else if (port_3_is_2500 && port_4_is_2500)
+ /* Retagging port will operate at 100 Mbps */
+ tdmaconfigidx = 1;
+ else if (port_3_is_2500)
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 3;
+ else if (port_4_is_2500)
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 2;
+ else
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 14;
+
+ general_params->tdmaconfigidx = tdmaconfigidx;
+}
+
+static int sja1105_init_topology(struct sja1105_private *priv,
+ struct sja1105_general_params_entry *general_params)
+{
+ struct dsa_switch *ds = priv->ds;
+ int port;
+
+ /* The host port is the destination for traffic matching mac_fltres1
+ * and mac_fltres0 on all ports except itself. Default to an invalid
+ * value.
+ */
+ general_params->host_port = ds->num_ports;
+
+ /* Link-local traffic received on casc_port will be forwarded
+ * to host_port without embedding the source port and device ID
+ * info in the destination MAC address, and no RX timestamps will be
+ * taken either (presumably because it is a cascaded port and a
+ * downstream SJA switch already did that).
+ * To disable the feature, we need to do different things depending on
+ * switch generation. On SJA1105 we need to set an invalid port, while
+ * on SJA1110 which support multiple cascaded ports, this field is a
+ * bitmask so it must be left zero.
+ */
+ if (!priv->info->multiple_cascade_ports)
+ general_params->casc_port = ds->num_ports;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ bool is_upstream = dsa_is_upstream_port(ds, port);
+ bool is_dsa_link = dsa_is_dsa_port(ds, port);
+
+ /* Upstream ports can be dedicated CPU ports or
+ * upstream-facing DSA links
+ */
+ if (is_upstream) {
+ if (general_params->host_port == ds->num_ports) {
+ general_params->host_port = port;
+ } else {
+ dev_err(ds->dev,
+ "Port %llu is already a host port, configuring %d as one too is not supported\n",
+ general_params->host_port, port);
+ return -EINVAL;
+ }
+ }
+
+ /* Cascade ports are downstream-facing DSA links */
+ if (is_dsa_link && !is_upstream) {
+ if (priv->info->multiple_cascade_ports) {
+ general_params->casc_port |= BIT(port);
+ } else if (general_params->casc_port == ds->num_ports) {
+ general_params->casc_port = port;
+ } else {
+ dev_err(ds->dev,
+ "Port %llu is already a cascade port, configuring %d as one too is not supported\n",
+ general_params->casc_port, port);
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (general_params->host_port == ds->num_ports) {
+ dev_err(ds->dev, "No host port configured\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sja1105_init_general_params(struct sja1105_private *priv)
+{
+ struct sja1105_general_params_entry default_general_params = {
+ /* Allow dynamic changing of the mirror port */
+ .mirr_ptacu = true,
+ .switchid = priv->ds->index,
+ /* Priority queue for link-local management frames
+ * (both ingress to and egress from CPU - PTP, STP etc)
+ */
+ .hostprio = 7,
+ .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
+ .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
+ .incl_srcpt1 = true,
+ .send_meta1 = true,
+ .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
+ .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
+ .incl_srcpt0 = true,
+ .send_meta0 = true,
+ /* Default to an invalid value */
+ .mirr_port = priv->ds->num_ports,
+ /* No TTEthernet */
+ .vllupformat = SJA1105_VL_FORMAT_PSFP,
+ .vlmarker = 0,
+ .vlmask = 0,
+ /* Only update correctionField for 1-step PTP (L2 transport) */
+ .ignore2stf = 0,
+ /* Forcefully disable VLAN filtering by telling
+ * the switch that VLAN has a different EtherType.
+ */
+ .tpid = ETH_P_SJA1105,
+ .tpid2 = ETH_P_SJA1105,
+ /* Enable the TTEthernet engine on SJA1110 */
+ .tte_en = true,
+ /* Set up the EtherType for control packets on SJA1110 */
+ .header_type = ETH_P_SJA1110,
+ };
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_table *table;
+ int rc;
+
+ rc = sja1105_init_topology(priv, &default_general_params);
+ if (rc)
+ return rc;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ general_params = table->entries;
+
+ /* This table only has a single entry */
+ general_params[0] = default_general_params;
+
+ sja1110_select_tdmaconfigidx(priv);
+
+ return 0;
+}
+
+static int sja1105_init_avb_params(struct sja1105_private *priv)
+{
+ struct sja1105_avb_params_entry *avb;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
+
+ /* Discard previous AVB Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ avb = table->entries;
+
+ /* Configure the MAC addresses for meta frames */
+ avb->destmeta = SJA1105_META_DMAC;
+ avb->srcmeta = SJA1105_META_SMAC;
+ /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by
+ * default. This is because there might be boards with a hardware
+ * layout where enabling the pin as output might cause an electrical
+ * clash. On E/T the pin is always an output, which the board designers
+ * probably already knew, so even if there are going to be electrical
+ * issues, there's nothing we can do.
+ */
+ avb->cas_master = false;
+
+ return 0;
+}
+
+/* The L2 policing table is 2-stage. The table is looked up for each frame
+ * according to the ingress port, whether it was broadcast or not, and the
+ * classified traffic class (given by VLAN PCP). This portion of the lookup is
+ * fixed, and gives access to the SHARINDX, an indirection register pointing
+ * within the policing table itself, which is used to resolve the policer that
+ * will be used for this frame.
+ *
+ * Stage 1 Stage 2
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 0 |SHARINDX| | Policer 0: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 1 |SHARINDX| | Policer 1: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 2: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 7 |SHARINDX| | Policer 3: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 1 TC 0 |SHARINDX| | Policer 4: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 5: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 1 TC 7 |SHARINDX| | Policer 6: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 7: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 4 TC 7 |SHARINDX| ...
+ * +------------+--------+
+ * |Port 0 BCAST|SHARINDX| ...
+ * +------------+--------+
+ * |Port 1 BCAST|SHARINDX| ...
+ * +------------+--------+
+ * ... ...
+ * +------------+--------+ +---------------------------------+
+ * |Port 4 BCAST|SHARINDX| | Policer 44: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ *
+ * In this driver, we shall use policers 0-4 as statically alocated port
+ * (matchall) policers. So we need to make the SHARINDX for all lookups
+ * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast
+ * lookup) equal.
+ * The remaining policers (40) shall be dynamically allocated for flower
+ * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff.
+ */
+#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
+
+static int sja1105_init_l2_policing(struct sja1105_private *priv)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int port, tc;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
+
+ /* Discard previous L2 Policing Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ policing = table->entries;
+
+ /* Setup shared indices for the matchall policers */
+ for (port = 0; port < ds->num_ports; port++) {
+ int mcast = (ds->num_ports * (SJA1105_NUM_TC + 1)) + port;
+ int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
+
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++)
+ policing[port * SJA1105_NUM_TC + tc].sharindx = port;
+
+ policing[bcast].sharindx = port;
+ /* Only SJA1110 has multicast policers */
+ if (mcast < table->ops->max_entry_count)
+ policing[mcast].sharindx = port;
+ }
+
+ /* Setup the matchall policer parameters */
+ for (port = 0; port < ds->num_ports; port++) {
+ int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ mtu += VLAN_HLEN;
+
+ policing[port].smax = 65535; /* Burst size in bytes */
+ policing[port].rate = SJA1105_RATE_MBPS(1000);
+ policing[port].maxlen = mtu;
+ policing[port].partition = 0;
+ }
+
+ return 0;
+}
+
+static int sja1105_static_config_load(struct sja1105_private *priv)
+{
+ int rc;
+
+ sja1105_static_config_free(&priv->static_config);
+ rc = sja1105_static_config_init(&priv->static_config,
+ priv->info->static_ops,
+ priv->info->device_id);
+ if (rc)
+ return rc;
+
+ /* Build static configuration */
+ rc = sja1105_init_mac_settings(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_mii_settings(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_static_fdb(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_static_vlan(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_lookup_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_forwarding(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_forwarding_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_policing(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_general_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_avb_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1110_init_pcp_remapping(priv);
+ if (rc < 0)
+ return rc;
+
+ /* Send initial configuration to hardware via SPI */
+ return sja1105_static_config_upload(priv);
+}
+
+/* This is the "new way" for a MAC driver to configure its RGMII delay lines,
+ * based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps"
+ * properties. It has the advantage of working with fixed links and with PHYs
+ * that apply RGMII delays too, and the MAC driver needs not perform any
+ * special checks.
+ *
+ * Previously we were acting upon the "phy-mode" property when we were
+ * operating in fixed-link, basically acting as a PHY, but with a reversed
+ * interpretation: PHY_INTERFACE_MODE_RGMII_TXID means that the MAC should
+ * behave as if it is connected to a PHY which has applied RGMII delays in the
+ * TX direction. So if anything, RX delays should have been added by the MAC,
+ * but we were adding TX delays.
+ *
+ * If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall
+ * back to the legacy behavior and apply delays on fixed-link ports based on
+ * the reverse interpretation of the phy-mode. This is a deviation from the
+ * expected default behavior which is to simply apply no delays. To achieve
+ * that behavior with the new bindings, it is mandatory to specify
+ * "{rx,tx}-internal-delay-ps" with a value of 0.
+ */
+static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, int port,
+ struct device_node *port_dn)
+{
+ phy_interface_t phy_mode = priv->phy_mode[port];
+ struct device *dev = &priv->spidev->dev;
+ int rx_delay = -1, tx_delay = -1;
+
+ if (!phy_interface_mode_is_rgmii(phy_mode))
+ return 0;
+
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
+
+ if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) {
+ dev_warn(dev,
+ "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
+ "please update device tree to specify \"rx-internal-delay-ps\" and "
+ "\"tx-internal-delay-ps\"",
+ port);
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ rx_delay = 2000;
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ tx_delay = 2000;
+ }
+
+ if (rx_delay < 0)
+ rx_delay = 0;
+ if (tx_delay < 0)
+ tx_delay = 0;
+
+ if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) {
+ dev_err(dev, "Chip cannot apply RGMII delays\n");
+ return -EINVAL;
+ }
+
+ if ((rx_delay && rx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
+ (tx_delay && tx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
+ (rx_delay > SJA1105_RGMII_DELAY_MAX_PS) ||
+ (tx_delay > SJA1105_RGMII_DELAY_MAX_PS)) {
+ dev_err(dev,
+ "port %d RGMII delay values out of range, must be between %d and %d ps\n",
+ port, SJA1105_RGMII_DELAY_MIN_PS, SJA1105_RGMII_DELAY_MAX_PS);
+ return -ERANGE;
+ }
+
+ priv->rgmii_rx_delay_ps[port] = rx_delay;
+ priv->rgmii_tx_delay_ps[port] = tx_delay;
+
+ return 0;
+}
+
+static int sja1105_parse_ports_node(struct sja1105_private *priv,
+ struct device_node *ports_node)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct device_node *child;
+
+ for_each_available_child_of_node(ports_node, child) {
+ struct device_node *phy_node;
+ phy_interface_t phy_mode;
+ u32 index;
+ int err;
+
+ /* Get switch port number from DT */
+ if (of_property_read_u32(child, "reg", &index) < 0) {
+ dev_err(dev, "Port number not defined in device tree "
+ "(property \"reg\")\n");
+ of_node_put(child);
+ return -ENODEV;
+ }
+
+ /* Get PHY mode from DT */
+ err = of_get_phy_mode(child, &phy_mode);
+ if (err) {
+ dev_err(dev, "Failed to read phy-mode or "
+ "phy-interface-type property for port %d\n",
+ index);
+ of_node_put(child);
+ return -ENODEV;
+ }
+
+ phy_node = of_parse_phandle(child, "phy-handle", 0);
+ if (!phy_node) {
+ if (!of_phy_is_fixed_link(child)) {
+ dev_err(dev, "phy-handle or fixed-link "
+ "properties missing!\n");
+ of_node_put(child);
+ return -ENODEV;
+ }
+ /* phy-handle is missing, but fixed-link isn't.
+ * So it's a fixed link. Default to PHY role.
+ */
+ priv->fixed_link[index] = true;
+ } else {
+ of_node_put(phy_node);
+ }
+
+ priv->phy_mode[index] = phy_mode;
+
+ err = sja1105_parse_rgmii_delays(priv, index, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_parse_dt(struct sja1105_private *priv)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct device_node *switch_node = dev->of_node;
+ struct device_node *ports_node;
+ int rc;
+
+ ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node)
+ ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
+ if (!ports_node) {
+ dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
+ return -ENODEV;
+ }
+
+ rc = sja1105_parse_ports_node(priv, ports_node);
+ of_node_put(ports_node);
+
+ return rc;
+}
+
+/* Convert link speed from SJA1105 to ethtool encoding */
+static int sja1105_port_speed_to_ethtool(struct sja1105_private *priv,
+ u64 speed)
+{
+ if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS])
+ return SPEED_10;
+ if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS])
+ return SPEED_100;
+ if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS])
+ return SPEED_1000;
+ if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS])
+ return SPEED_2500;
+ return SPEED_UNKNOWN;
+}
+
+/* Set link speed in the MAC configuration for a specific port. */
+static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
+ int speed_mbps)
+{
+ struct sja1105_mac_config_entry *mac;
+ struct device *dev = priv->ds->dev;
+ u64 speed;
+ int rc;
+
+ /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
+ * tables. On E/T, MAC reconfig tables are not readable, only writable.
+ * We have to *know* what the MAC looks like. For the sake of keeping
+ * the code common, we'll use the static configuration tables as a
+ * reasonable approximation for both E/T and P/Q/R/S.
+ */
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ switch (speed_mbps) {
+ case SPEED_UNKNOWN:
+ /* PHYLINK called sja1105_mac_config() to inform us about
+ * the state->interface, but AN has not completed and the
+ * speed is not yet valid. UM10944.pdf says that setting
+ * SJA1105_SPEED_AUTO at runtime disables the port, so that is
+ * ok for power consumption in case AN will never complete -
+ * otherwise PHYLINK should come back with a new update.
+ */
+ speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
+ break;
+ case SPEED_10:
+ speed = priv->info->port_speed[SJA1105_SPEED_10MBPS];
+ break;
+ case SPEED_100:
+ speed = priv->info->port_speed[SJA1105_SPEED_100MBPS];
+ break;
+ case SPEED_1000:
+ speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
+ break;
+ case SPEED_2500:
+ speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
+ break;
+ default:
+ dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
+ return -EINVAL;
+ }
+
+ /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
+ * table, since this will be used for the clocking setup, and we no
+ * longer need to store it in the static config (already told hardware
+ * we want auto during upload phase).
+ * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
+ * we need to configure the PCS only (if even that).
+ */
+ if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII)
+ mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
+ else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX)
+ mac[port].speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
+ else
+ mac[port].speed = speed;
+
+ /* Write to the dynamic reconfiguration tables */
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+ if (rc < 0) {
+ dev_err(dev, "Failed to write MAC config: %d\n", rc);
+ return rc;
+ }
+
+ /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
+ * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
+ * RMII no change of the clock setup is required. Actually, changing
+ * the clock setup does interrupt the clock signal for a certain time
+ * which causes trouble for all PHYs relying on this signal.
+ */
+ if (!phy_interface_mode_is_rgmii(priv->phy_mode[port]))
+ return 0;
+
+ return sja1105_clocking_setup_port(priv, port);
+}
+
+static struct phylink_pcs *
+sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct dw_xpcs *xpcs = priv->xpcs[port];
+
+ if (xpcs)
+ return &xpcs->pcs;
+
+ return NULL;
+}
+
+static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ sja1105_inhibit_tx(ds->priv, BIT(port), true);
+}
+
+static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ sja1105_adjust_port_config(priv, port, speed);
+
+ sja1105_inhibit_tx(priv, BIT(port), false);
+}
+
+static void sja1105_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_xmii_params_entry *mii;
+ phy_interface_t phy_mode;
+
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
+
+ phy_mode = priv->phy_mode[port];
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
+ /* Changing the PHY mode on SERDES ports is possible and makes
+ * sense, because that is done through the XPCS. We allow
+ * changes between SGMII and 2500base-X.
+ */
+ if (priv->info->supports_sgmii[port])
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+
+ if (priv->info->supports_2500basex[port])
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ } else {
+ /* The SJA1105 MAC programming model is through the static
+ * config (the xMII Mode table cannot be dynamically
+ * reconfigured), and we have to program that early.
+ */
+ __set_bit(phy_mode, config->supported_interfaces);
+ }
+
+ /* The MAC does not support pause frames, and also doesn't
+ * support half-duplex traffic modes.
+ */
+ config->mac_capabilities = MAC_10FD | MAC_100FD;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+ if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
+ mii->xmii_mode[port] == XMII_MODE_SGMII)
+ config->mac_capabilities |= MAC_1000FD;
+
+ if (priv->info->supports_2500basex[port])
+ config->mac_capabilities |= MAC_2500FD;
+}
+
+static int
+sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
+ const struct sja1105_l2_lookup_entry *requested)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+ l2_lookup = table->entries;
+
+ for (i = 0; i < table->entry_count; i++)
+ if (l2_lookup[i].macaddr == requested->macaddr &&
+ l2_lookup[i].vlanid == requested->vlanid &&
+ l2_lookup[i].destports & BIT(port))
+ return i;
+
+ return -1;
+}
+
+/* We want FDB entries added statically through the bridge command to persist
+ * across switch resets, which are a common thing during normal SJA1105
+ * operation. So we have to back them up in the static configuration tables
+ * and hence apply them on next static config upload... yay!
+ */
+static int
+sja1105_static_fdb_change(struct sja1105_private *priv, int port,
+ const struct sja1105_l2_lookup_entry *requested,
+ bool keep)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int rc, match;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+
+ match = sja1105_find_static_fdb_entry(priv, port, requested);
+ if (match < 0) {
+ /* Can't delete a missing entry. */
+ if (!keep)
+ return 0;
+
+ /* No match => new entry */
+ rc = sja1105_table_resize(table, table->entry_count + 1);
+ if (rc)
+ return rc;
+
+ match = table->entry_count - 1;
+ }
+
+ /* Assign pointer after the resize (it may be new memory) */
+ l2_lookup = table->entries;
+
+ /* We have a match.
+ * If the job was to add this FDB entry, it's already done (mostly
+ * anyway, since the port forwarding mask may have changed, case in
+ * which we update it).
+ * Otherwise we have to delete it.
+ */
+ if (keep) {
+ l2_lookup[match] = *requested;
+ return 0;
+ }
+
+ /* To remove, the strategy is to overwrite the element with
+ * the last one, and then reduce the array size by 1
+ */
+ l2_lookup[match] = l2_lookup[table->entry_count - 1];
+ return sja1105_table_resize(table, table->entry_count - 1);
+}
+
+/* First-generation switches have a 4-way set associative TCAM that
+ * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
+ * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
+ * For the placement of a newly learnt FDB entry, the switch selects the bin
+ * based on a hash function, and the way within that bin incrementally.
+ */
+static int sja1105et_fdb_index(int bin, int way)
+{
+ return bin * SJA1105ET_FDB_BIN_SIZE + way;
+}
+
+static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
+ const u8 *addr, u16 vid,
+ struct sja1105_l2_lookup_entry *match,
+ int *last_unused)
+{
+ int way;
+
+ for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ int index = sja1105et_fdb_index(bin, way);
+
+ /* Skip unused entries, optionally marking them
+ * into the return value
+ */
+ if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ index, &l2_lookup)) {
+ if (last_unused)
+ *last_unused = way;
+ continue;
+ }
+
+ if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
+ l2_lookup.vlanid == vid) {
+ if (match)
+ *match = l2_lookup;
+ return way;
+ }
+ }
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+int sja1105et_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
+ struct sja1105_private *priv = ds->priv;
+ struct device *dev = ds->dev;
+ int last_unused = -1;
+ int start, end, i;
+ int bin, way, rc;
+
+ bin = sja1105et_fdb_hash(priv, addr, vid);
+
+ way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, &last_unused);
+ if (way >= 0) {
+ /* We have an FDB entry. Is our port in the destination
+ * mask? If yes, we need to do nothing. If not, we need
+ * to rewrite the entry by adding this port to it.
+ */
+ if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds)
+ return 0;
+ l2_lookup.destports |= BIT(port);
+ } else {
+ int index = sja1105et_fdb_index(bin, way);
+
+ /* We don't have an FDB entry. We construct a new one and
+ * try to find a place for it within the FDB table.
+ */
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.destports = BIT(port);
+ l2_lookup.vlanid = vid;
+
+ if (last_unused >= 0) {
+ way = last_unused;
+ } else {
+ /* Bin is full, need to evict somebody.
+ * Choose victim at random. If you get these messages
+ * often, you may need to consider changing the
+ * distribution function:
+ * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
+ */
+ get_random_bytes(&way, sizeof(u8));
+ way %= SJA1105ET_FDB_BIN_SIZE;
+ dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
+ bin, addr, way);
+ /* Evict entry */
+ sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ index, NULL, false);
+ }
+ }
+ l2_lookup.lockeds = true;
+ l2_lookup.index = sja1105et_fdb_index(bin, way);
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup,
+ true);
+ if (rc < 0)
+ return rc;
+
+ /* Invalidate a dynamically learned entry if that exists */
+ start = sja1105et_fdb_index(bin, 0);
+ end = sja1105et_fdb_index(bin, way);
+
+ for (i = start; i < end; i++) {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, &tmp);
+ if (rc == -ENOENT)
+ continue;
+ if (rc)
+ return rc;
+
+ if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid)
+ continue;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ i, NULL, false);
+ if (rc)
+ return rc;
+
+ break;
+ }
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
+}
+
+int sja1105et_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ int index, bin, way, rc;
+ bool keep;
+
+ bin = sja1105et_fdb_hash(priv, addr, vid);
+ way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, NULL);
+ if (way < 0)
+ return 0;
+ index = sja1105et_fdb_index(bin, way);
+
+ /* We have an FDB entry. Is our port in the destination mask? If yes,
+ * we need to remove it. If the resulting port mask becomes empty, we
+ * need to completely evict the FDB entry.
+ * Otherwise we just write it back.
+ */
+ l2_lookup.destports &= ~BIT(port);
+
+ if (l2_lookup.destports)
+ keep = true;
+ else
+ keep = false;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ index, &l2_lookup, keep);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
+}
+
+int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp;
+ struct sja1105_private *priv = ds->priv;
+ int rc, i;
+
+ /* Search for an existing entry in the FDB table */
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.vlanid = vid;
+ l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.destports = BIT(port);
+
+ tmp = l2_lookup;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &tmp);
+ if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) {
+ /* Found a static entry and this port is already in the entry's
+ * port mask => job done
+ */
+ if ((tmp.destports & BIT(port)) && tmp.lockeds)
+ return 0;
+
+ l2_lookup = tmp;
+
+ /* l2_lookup.index is populated by the switch in case it
+ * found something.
+ */
+ l2_lookup.destports |= BIT(port);
+ goto skip_finding_an_index;
+ }
+
+ /* Not found, so try to find an unused spot in the FDB.
+ * This is slightly inefficient because the strategy is knock-knock at
+ * every possible position from 0 to 1023.
+ */
+ for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, NULL);
+ if (rc < 0)
+ break;
+ }
+ if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
+ dev_err(ds->dev, "FDB is full, cannot add entry.\n");
+ return -EINVAL;
+ }
+ l2_lookup.index = i;
+
+skip_finding_an_index:
+ l2_lookup.lockeds = true;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup,
+ true);
+ if (rc < 0)
+ return rc;
+
+ /* The switch learns dynamic entries and looks up the FDB left to
+ * right. It is possible that our addition was concurrent with the
+ * dynamic learning of the same address, so now that the static entry
+ * has been installed, we are certain that address learning for this
+ * particular address has been turned off, so the dynamic entry either
+ * is in the FDB at an index smaller than the static one, or isn't (it
+ * can also be at a larger index, but in that case it is inactive
+ * because the static FDB entry will match first, and the dynamic one
+ * will eventually age out). Search for a dynamically learned address
+ * prior to our static one and invalidate it.
+ */
+ tmp = l2_lookup;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &tmp);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "port %d failed to read back entry for %pM vid %d: %pe\n",
+ port, addr, vid, ERR_PTR(rc));
+ return rc;
+ }
+
+ if (tmp.index < l2_lookup.index) {
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ tmp.index, NULL, false);
+ if (rc < 0)
+ return rc;
+ }
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
+}
+
+int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ bool keep;
+ int rc;
+
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.vlanid = vid;
+ l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.destports = BIT(port);
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ SJA1105_SEARCH, &l2_lookup);
+ if (rc < 0)
+ return 0;
+
+ l2_lookup.destports &= ~BIT(port);
+
+ /* Decide whether we remove just this port from the FDB entry,
+ * or if we remove it completely.
+ */
+ if (l2_lookup.destports)
+ keep = true;
+ else
+ keep = false;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup, keep);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
+}
+
+static int sja1105_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_lock(&priv->fdb_lock);
+ rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
+}
+
+static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return priv->info->fdb_del_cmd(ds, port, addr, vid);
+}
+
+static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ mutex_lock(&priv->fdb_lock);
+ rc = __sja1105_fdb_del(ds, port, addr, vid, db);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
+}
+
+static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct device *dev = ds->dev;
+ int i;
+
+ for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ u8 macaddr[ETH_ALEN];
+ int rc;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, &l2_lookup);
+ /* No fdb entry at i, not an issue */
+ if (rc == -ENOENT)
+ continue;
+ if (rc) {
+ dev_err(dev, "Failed to dump FDB: %d\n", rc);
+ return rc;
+ }
+
+ /* FDB dump callback is per port. This means we have to
+ * disregard a valid entry if it's not for this port, even if
+ * only to revisit it later. This is inefficient because the
+ * 1024-sized FDB table needs to be traversed 4 times through
+ * SPI during a 'bridge fdb show' command.
+ */
+ if (!(l2_lookup.destports & BIT(port)))
+ continue;
+
+ u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+
+ /* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
+ * only wants to see unicast
+ */
+ if (is_multicast_ether_addr(macaddr))
+ continue;
+
+ /* We need to hide the dsa_8021q VLANs from the user. */
+ if (vid_is_dsa_8021q(l2_lookup.vlanid))
+ l2_lookup.vlanid = 0;
+ rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static void sja1105_fast_age(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct sja1105_private *priv = ds->priv;
+ struct dsa_db db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = {
+ .dev = dsa_port_bridge_dev_get(dp),
+ .num = dsa_port_bridge_num_get(dp),
+ },
+ };
+ int i;
+
+ mutex_lock(&priv->fdb_lock);
+
+ for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ u8 macaddr[ETH_ALEN];
+ int rc;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, &l2_lookup);
+ /* No fdb entry at i, not an issue */
+ if (rc == -ENOENT)
+ continue;
+ if (rc) {
+ dev_err(ds->dev, "Failed to read FDB: %pe\n",
+ ERR_PTR(rc));
+ break;
+ }
+
+ if (!(l2_lookup.destports & BIT(port)))
+ continue;
+
+ /* Don't delete static FDB entries */
+ if (l2_lookup.lockeds)
+ continue;
+
+ u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+
+ rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
+ if (rc) {
+ dev_err(ds->dev,
+ "Failed to delete FDB entry %pM vid %lld: %pe\n",
+ macaddr, l2_lookup.vlanid, ERR_PTR(rc));
+ break;
+ }
+ }
+
+ mutex_unlock(&priv->fdb_lock);
+}
+
+static int sja1105_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db);
+}
+
+static int sja1105_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db);
+}
+
+/* Common function for unicast and broadcast flood configuration.
+ * Flooding is configured between each {ingress, egress} port pair, and since
+ * the bridge's semantics are those of "egress flooding", it means we must
+ * enable flooding towards this port from all ingress ports that are in the
+ * same forwarding domain.
+ */
+static int sja1105_manage_flood_domains(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_entry *l2_fwd;
+ struct dsa_switch *ds = priv->ds;
+ int from, to, rc;
+
+ l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
+
+ for (from = 0; from < ds->num_ports; from++) {
+ u64 fl_domain = 0, bc_domain = 0;
+
+ for (to = 0; to < priv->ds->num_ports; to++) {
+ if (!sja1105_can_forward(l2_fwd, from, to))
+ continue;
+
+ if (priv->ucast_egress_floods & BIT(to))
+ fl_domain |= BIT(to);
+ if (priv->bcast_egress_floods & BIT(to))
+ bc_domain |= BIT(to);
+ }
+
+ /* Nothing changed, nothing to do */
+ if (l2_fwd[from].fl_domain == fl_domain &&
+ l2_fwd[from].bc_domain == bc_domain)
+ continue;
+
+ l2_fwd[from].fl_domain = fl_domain;
+ l2_fwd[from].bc_domain = bc_domain;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ from, &l2_fwd[from], true);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int sja1105_bridge_member(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge, bool member)
+{
+ struct sja1105_l2_forwarding_entry *l2_fwd;
+ struct sja1105_private *priv = ds->priv;
+ int i, rc;
+
+ l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ /* Add this port to the forwarding matrix of the
+ * other ports in the same bridge, and viceversa.
+ */
+ if (!dsa_is_user_port(ds, i))
+ continue;
+ /* For the ports already under the bridge, only one thing needs
+ * to be done, and that is to add this port to their
+ * reachability domain. So we can perform the SPI write for
+ * them immediately. However, for this port itself (the one
+ * that is new to the bridge), we need to add all other ports
+ * to its reachability domain. So we do that incrementally in
+ * this loop, and perform the SPI write only at the end, once
+ * the domain contains all other bridge ports.
+ */
+ if (i == port)
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ sja1105_port_allow_traffic(l2_fwd, i, port, member);
+ sja1105_port_allow_traffic(l2_fwd, port, i, member);
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ i, &l2_fwd[i], true);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ port, &l2_fwd[port], true);
+ if (rc)
+ return rc;
+
+ rc = sja1105_commit_pvid(ds, port);
+ if (rc)
+ return rc;
+
+ return sja1105_manage_flood_domains(priv);
+}
+
+static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ /* From UM10944 description of DRPDTAG (why put this there?):
+ * "Management traffic flows to the port regardless of the state
+ * of the INGRESS flag". So BPDUs are still be allowed to pass.
+ * At the moment no difference between DISABLED and BLOCKING.
+ */
+ mac[port].ingress = false;
+ mac[port].egress = false;
+ mac[port].dyn_learn = false;
+ break;
+ case BR_STATE_LISTENING:
+ mac[port].ingress = true;
+ mac[port].egress = false;
+ mac[port].dyn_learn = false;
+ break;
+ case BR_STATE_LEARNING:
+ mac[port].ingress = true;
+ mac[port].egress = false;
+ mac[port].dyn_learn = dp->learning;
+ break;
+ case BR_STATE_FORWARDING:
+ mac[port].ingress = true;
+ mac[port].egress = true;
+ mac[port].dyn_learn = dp->learning;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ rc = sja1105_bridge_member(ds, port, bridge, true);
+ if (rc)
+ return rc;
+
+ rc = dsa_tag_8021q_bridge_join(ds, port, bridge);
+ if (rc) {
+ sja1105_bridge_member(ds, port, bridge, false);
+ return rc;
+ }
+
+ *tx_fwd_offload = true;
+
+ return 0;
+}
+
+static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ dsa_tag_8021q_bridge_leave(ds, port, bridge);
+ sja1105_bridge_member(ds, port, bridge, false);
+}
+
+#define BYTES_PER_KBIT (1000LL / 8)
+/* Port 0 (the uC port) does not have CBS shapers */
+#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
+
+static int sja1105_find_cbs_shaper(struct sja1105_private *priv,
+ int port, int prio)
+{
+ int i;
+
+ if (priv->info->fixed_cbs_mapping) {
+ i = SJA1110_FIXED_CBS(port, prio);
+ if (i >= 0 && i < priv->info->num_cbs_shapers)
+ return i;
+
+ return -1;
+ }
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ if (priv->cbs[i].port == port && priv->cbs[i].prio == prio)
+ return i;
+
+ return -1;
+}
+
+static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
+{
+ int i;
+
+ if (priv->info->fixed_cbs_mapping)
+ return -1;
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
+ return i;
+
+ return -1;
+}
+
+static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port,
+ int prio)
+{
+ int i;
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++) {
+ struct sja1105_cbs_entry *cbs = &priv->cbs[i];
+
+ if (cbs->port == port && cbs->prio == prio) {
+ memset(cbs, 0, sizeof(*cbs));
+ return sja1105_dynamic_config_write(priv, BLK_IDX_CBS,
+ i, cbs, true);
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
+ struct tc_cbs_qopt_offload *offload)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_cbs_entry *cbs;
+ s64 port_transmit_rate_kbps;
+ int index;
+
+ if (!offload->enable)
+ return sja1105_delete_cbs_shaper(priv, port, offload->queue);
+
+ /* The user may be replacing an existing shaper */
+ index = sja1105_find_cbs_shaper(priv, port, offload->queue);
+ if (index < 0) {
+ /* That isn't the case - see if we can allocate a new one */
+ index = sja1105_find_unused_cbs_shaper(priv);
+ if (index < 0)
+ return -ENOSPC;
+ }
+
+ cbs = &priv->cbs[index];
+ cbs->port = port;
+ cbs->prio = offload->queue;
+ /* locredit and sendslope are negative by definition. In hardware,
+ * positive values must be provided, and the negative sign is implicit.
+ */
+ cbs->credit_hi = offload->hicredit;
+ cbs->credit_lo = abs(offload->locredit);
+ /* User space is in kbits/sec, while the hardware in bytes/sec times
+ * link speed. Since the given offload->sendslope is good only for the
+ * current link speed anyway, and user space is likely to reprogram it
+ * when that changes, don't even bother to track the port's link speed,
+ * but deduce the port transmit rate from idleslope - sendslope.
+ */
+ port_transmit_rate_kbps = offload->idleslope - offload->sendslope;
+ cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT,
+ port_transmit_rate_kbps);
+ cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT),
+ port_transmit_rate_kbps);
+ /* Convert the negative values from 64-bit 2's complement
+ * to 32-bit 2's complement (for the case of 0x80000000 whose
+ * negative is still negative).
+ */
+ cbs->credit_lo &= GENMASK_ULL(31, 0);
+ cbs->send_slope &= GENMASK_ULL(31, 0);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs,
+ true);
+}
+
+static int sja1105_reload_cbs(struct sja1105_private *priv)
+{
+ int rc = 0, i;
+
+ /* The credit based shapers are only allocated if
+ * CONFIG_NET_SCH_CBS is enabled.
+ */
+ if (!priv->cbs)
+ return 0;
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++) {
+ struct sja1105_cbs_entry *cbs = &priv->cbs[i];
+
+ if (!cbs->idle_slope && !cbs->send_slope)
+ continue;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs,
+ true);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static const char * const sja1105_reset_reasons[] = {
+ [SJA1105_VLAN_FILTERING] = "VLAN filtering",
+ [SJA1105_AGEING_TIME] = "Ageing time",
+ [SJA1105_SCHEDULING] = "Time-aware scheduling",
+ [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
+ [SJA1105_VIRTUAL_LINKS] = "Virtual links",
+};
+
+/* For situations where we need to change a setting at runtime that is only
+ * available through the static configuration, resetting the switch in order
+ * to upload the new static config is unavoidable. Back up the settings we
+ * modify at runtime (currently only MAC) and restore them after uploading,
+ * such that this operation is relatively seamless.
+ */
+int sja1105_static_config_reload(struct sja1105_private *priv,
+ enum sja1105_reset_reason reason)
+{
+ struct ptp_system_timestamp ptp_sts_before;
+ struct ptp_system_timestamp ptp_sts_after;
+ int speed_mbps[SJA1105_MAX_NUM_PORTS];
+ u16 bmcr[SJA1105_MAX_NUM_PORTS] = {0};
+ struct sja1105_mac_config_entry *mac;
+ struct dsa_switch *ds = priv->ds;
+ s64 t1, t2, t3, t4;
+ s64 t12, t34;
+ int rc, i;
+ s64 now;
+
+ mutex_lock(&priv->fdb_lock);
+ mutex_lock(&priv->mgmt_lock);
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ /* Back up the dynamic link speed changed by sja1105_adjust_port_config
+ * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
+ * switch wants to see in the static config in order to allow us to
+ * change it through the dynamic interface later.
+ */
+ for (i = 0; i < ds->num_ports; i++) {
+ speed_mbps[i] = sja1105_port_speed_to_ethtool(priv,
+ mac[i].speed);
+ mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
+
+ if (priv->xpcs[i])
+ bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i,
+ MDIO_MMD_VEND2, MDIO_CTRL1);
+ }
+
+ /* No PTP operations can run right now */
+ mutex_lock(&priv->ptp_data.lock);
+
+ rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
+ if (rc < 0) {
+ mutex_unlock(&priv->ptp_data.lock);
+ goto out;
+ }
+
+ /* Reset switch and send updated static configuration */
+ rc = sja1105_static_config_upload(priv);
+ if (rc < 0) {
+ mutex_unlock(&priv->ptp_data.lock);
+ goto out;
+ }
+
+ rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
+ if (rc < 0) {
+ mutex_unlock(&priv->ptp_data.lock);
+ goto out;
+ }
+
+ t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
+ t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
+ t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
+ t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
+ /* Mid point, corresponds to pre-reset PTPCLKVAL */
+ t12 = t1 + (t2 - t1) / 2;
+ /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
+ t34 = t3 + (t4 - t3) / 2;
+ /* Advance PTPCLKVAL by the time it took since its readout */
+ now += (t34 - t12);
+
+ __sja1105_ptp_adjtime(ds, now);
+
+ mutex_unlock(&priv->ptp_data.lock);
+
+ dev_info(priv->ds->dev,
+ "Reset switch and programmed static config. Reason: %s\n",
+ sja1105_reset_reasons[reason]);
+
+ /* Configure the CGU (PLLs) for MII and RMII PHYs.
+ * For these interfaces there is no dynamic configuration
+ * needed, since PLLs have same settings at all speeds.
+ */
+ if (priv->info->clocking_setup) {
+ rc = priv->info->clocking_setup(priv);
+ if (rc < 0)
+ goto out;
+ }
+
+ for (i = 0; i < ds->num_ports; i++) {
+ struct dw_xpcs *xpcs = priv->xpcs[i];
+ unsigned int mode;
+
+ rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
+ if (rc < 0)
+ goto out;
+
+ if (!xpcs)
+ continue;
+
+ if (bmcr[i] & BMCR_ANENABLE)
+ mode = MLO_AN_INBAND;
+ else if (priv->fixed_link[i])
+ mode = MLO_AN_FIXED;
+ else
+ mode = MLO_AN_PHY;
+
+ rc = xpcs_do_config(xpcs, priv->phy_mode[i], mode, NULL);
+ if (rc < 0)
+ goto out;
+
+ if (!phylink_autoneg_inband(mode)) {
+ int speed = SPEED_UNKNOWN;
+
+ if (priv->phy_mode[i] == PHY_INTERFACE_MODE_2500BASEX)
+ speed = SPEED_2500;
+ else if (bmcr[i] & BMCR_SPEED1000)
+ speed = SPEED_1000;
+ else if (bmcr[i] & BMCR_SPEED100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ xpcs_link_up(&xpcs->pcs, mode, priv->phy_mode[i],
+ speed, DUPLEX_FULL);
+ }
+ }
+
+ rc = sja1105_reload_cbs(priv);
+ if (rc < 0)
+ goto out;
+out:
+ mutex_unlock(&priv->mgmt_lock);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
+}
+
+static enum dsa_tag_protocol
+sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ return priv->info->tag_proto;
+}
+
+/* The TPID setting belongs to the General Parameters table,
+ * which can only be partially reconfigured at runtime (and not the TPID).
+ * So a switch reset is required.
+ */
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_table *table;
+ struct sja1105_rule *rule;
+ u16 tpid, tpid2;
+ int rc;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type == SJA1105_RULE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change VLAN filtering with active VL rules");
+ return -EBUSY;
+ }
+ }
+
+ if (enabled) {
+ /* Enable VLAN filtering. */
+ tpid = ETH_P_8021Q;
+ tpid2 = ETH_P_8021AD;
+ } else {
+ /* Disable VLAN filtering. */
+ tpid = ETH_P_SJA1105;
+ tpid2 = ETH_P_SJA1105;
+ }
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+ /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
+ general_params->tpid = tpid;
+ /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
+ general_params->tpid2 = tpid2;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ rc = sja1105_commit_pvid(ds, port);
+ if (rc)
+ return rc;
+ }
+
+ rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
+ if (rc)
+ NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype");
+
+ return rc;
+}
+
+static int sja1105_vlan_add(struct sja1105_private *priv, int port, u16 vid,
+ u16 flags, bool allowed_ingress)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ struct sja1105_table *table;
+ int match, rc;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ match = sja1105_is_vlan_configured(priv, vid);
+ if (match < 0) {
+ rc = sja1105_table_resize(table, table->entry_count + 1);
+ if (rc)
+ return rc;
+ match = table->entry_count - 1;
+ }
+
+ /* Assign pointer after the resize (it's new memory) */
+ vlan = table->entries;
+
+ vlan[match].type_entry = SJA1110_VLAN_D_TAG;
+ vlan[match].vlanid = vid;
+ vlan[match].vlan_bc |= BIT(port);
+
+ if (allowed_ingress)
+ vlan[match].vmemb_port |= BIT(port);
+ else
+ vlan[match].vmemb_port &= ~BIT(port);
+
+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
+ vlan[match].tag_port &= ~BIT(port);
+ else
+ vlan[match].tag_port |= BIT(port);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
+ &vlan[match], true);
+}
+
+static int sja1105_vlan_del(struct sja1105_private *priv, int port, u16 vid)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ struct sja1105_table *table;
+ bool keep = true;
+ int match, rc;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ match = sja1105_is_vlan_configured(priv, vid);
+ /* Can't delete a missing entry. */
+ if (match < 0)
+ return 0;
+
+ /* Assign pointer after the resize (it's new memory) */
+ vlan = table->entries;
+
+ vlan[match].vlanid = vid;
+ vlan[match].vlan_bc &= ~BIT(port);
+ vlan[match].vmemb_port &= ~BIT(port);
+ /* Also unset tag_port, just so we don't have a confusing bitmap
+ * (no practical purpose).
+ */
+ vlan[match].tag_port &= ~BIT(port);
+
+ /* If there's no port left as member of this VLAN,
+ * it's time for it to go.
+ */
+ if (!vlan[match].vmemb_port)
+ keep = false;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
+ &vlan[match], keep);
+ if (rc < 0)
+ return rc;
+
+ if (!keep)
+ return sja1105_table_delete_entry(table, match);
+
+ return 0;
+}
+
+static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+ u16 flags = vlan->flags;
+ int rc;
+
+ /* Be sure to deny alterations to the configuration done by tag_8021q.
+ */
+ if (vid_is_dsa_8021q(vlan->vid)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Range 3072-4095 reserved for dsa_8021q operation");
+ return -EBUSY;
+ }
+
+ /* Always install bridge VLANs as egress-tagged on CPU and DSA ports */
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ flags = 0;
+
+ rc = sja1105_vlan_add(priv, port, vlan->vid, flags, true);
+ if (rc)
+ return rc;
+
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ priv->bridge_pvid[port] = vlan->vid;
+
+ return sja1105_commit_pvid(ds, port);
+}
+
+static int sja1105_bridge_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ rc = sja1105_vlan_del(priv, port, vlan->vid);
+ if (rc)
+ return rc;
+
+ /* In case the pvid was deleted, make sure that untagged packets will
+ * be dropped.
+ */
+ return sja1105_commit_pvid(ds, port);
+}
+
+static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
+{
+ struct sja1105_private *priv = ds->priv;
+ bool allowed_ingress = true;
+ int rc;
+
+ /* Prevent attackers from trying to inject a DSA tag from
+ * the outside world.
+ */
+ if (dsa_is_user_port(ds, port))
+ allowed_ingress = false;
+
+ rc = sja1105_vlan_add(priv, port, vid, flags, allowed_ingress);
+ if (rc)
+ return rc;
+
+ if (flags & BRIDGE_VLAN_INFO_PVID)
+ priv->tag_8021q_pvid[port] = vid;
+
+ return sja1105_commit_pvid(ds, port);
+}
+
+static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ return sja1105_vlan_del(priv, port, vid);
+}
+
+static int sja1105_prechangeupper(struct dsa_switch *ds, int port,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct netlink_ext_ack *extack = info->info.extack;
+ struct net_device *upper = info->upper_dev;
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp;
+
+ if (is_vlan_dev(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "8021q uppers are not supported");
+ return -EBUSY;
+ }
+
+ if (netif_is_bridge_master(upper)) {
+ list_for_each_entry(dp, &dst->ports, list) {
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br && br != upper && br_vlan_enabled(br)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one VLAN-aware bridge is supported");
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
+ struct sk_buff *skb, bool takets)
+{
+ struct sja1105_mgmt_entry mgmt_route = {0};
+ struct sja1105_private *priv = ds->priv;
+ struct ethhdr *hdr;
+ int timeout = 10;
+ int rc;
+
+ hdr = eth_hdr(skb);
+
+ mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
+ mgmt_route.destports = BIT(port);
+ mgmt_route.enfport = 1;
+ mgmt_route.tsreg = 0;
+ mgmt_route.takets = takets;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route, true);
+ if (rc < 0) {
+ kfree_skb(skb);
+ return rc;
+ }
+
+ /* Transfer skb to the host port. */
+ dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
+
+ /* Wait until the switch has processed the frame */
+ do {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route);
+ if (rc < 0) {
+ dev_err_ratelimited(priv->ds->dev,
+ "failed to poll for mgmt route\n");
+ continue;
+ }
+
+ /* UM10944: The ENFPORT flag of the respective entry is
+ * cleared when a match is found. The host can use this
+ * flag as an acknowledgment.
+ */
+ cpu_relax();
+ } while (mgmt_route.enfport && --timeout);
+
+ if (!timeout) {
+ /* Clean up the management route so that a follow-up
+ * frame may not match on it by mistake.
+ * This is only hardware supported on P/Q/R/S - on E/T it is
+ * a no-op and we are silently discarding the -EOPNOTSUPP.
+ */
+ sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route, false);
+ dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
+ }
+
+ return NETDEV_TX_OK;
+}
+
+#define work_to_xmit_work(w) \
+ container_of((w), struct sja1105_deferred_xmit_work, work)
+
+/* Deferred work is unfortunately necessary because setting up the management
+ * route cannot be done from atomit context (SPI transfer takes a sleepable
+ * lock on the bus)
+ */
+static void sja1105_port_deferred_xmit(struct kthread_work *work)
+{
+ struct sja1105_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+ struct sk_buff *clone, *skb = xmit_work->skb;
+ struct dsa_switch *ds = xmit_work->dp->ds;
+ struct sja1105_private *priv = ds->priv;
+ int port = xmit_work->dp->index;
+
+ clone = SJA1105_SKB_CB(skb)->clone;
+
+ mutex_lock(&priv->mgmt_lock);
+
+ sja1105_mgmt_xmit(ds, port, 0, skb, !!clone);
+
+ /* The clone, if there, was made by dsa_skb_tx_timestamp */
+ if (clone)
+ sja1105_ptp_txtstamp_skb(ds, port, clone);
+
+ mutex_unlock(&priv->mgmt_lock);
+
+ kfree(xmit_work);
+}
+
+static int sja1105_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *tagger_data;
+
+ if (proto != priv->info->tag_proto)
+ return -EPROTONOSUPPORT;
+
+ tagger_data = sja1105_tagger_data(ds);
+ tagger_data->xmit_work_fn = sja1105_port_deferred_xmit;
+ tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp;
+
+ return 0;
+}
+
+/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
+ * which cannot be reconfigured at runtime. So a switch reset is required.
+ */
+static int sja1105_set_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ struct sja1105_l2_lookup_params_entry *l2_lookup_params;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_table *table;
+ unsigned int maxage;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+ l2_lookup_params = table->entries;
+
+ maxage = SJA1105_AGEING_TIME_MS(ageing_time);
+
+ if (l2_lookup_params->maxage == maxage)
+ return 0;
+
+ l2_lookup_params->maxage = maxage;
+
+ return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
+}
+
+static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ new_mtu += VLAN_HLEN;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[port].maxlen == new_mtu)
+ return 0;
+
+ policing[port].maxlen = new_mtu;
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return sja1105_setup_tc_taprio(ds, port, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return sja1105_setup_tc_cbs(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* We have a single mirror (@to) port, but can configure ingress and egress
+ * mirroring on all other (@from) ports.
+ * We need to allow mirroring rules only as long as the @to port is always the
+ * same, and we need to unset the @to port from mirr_port only when there is no
+ * mirroring rule that references it.
+ */
+static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
+ bool ingress, bool enabled)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_mac_config_entry *mac;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ bool already_enabled;
+ u64 new_mirr_port;
+ int rc;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ already_enabled = (general_params->mirr_port != ds->num_ports);
+ if (already_enabled && enabled && general_params->mirr_port != to) {
+ dev_err(priv->ds->dev,
+ "Delete mirroring rules towards port %llu first\n",
+ general_params->mirr_port);
+ return -EBUSY;
+ }
+
+ new_mirr_port = to;
+ if (!enabled) {
+ bool keep = false;
+ int port;
+
+ /* Anybody still referencing mirr_port? */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (mac[port].ing_mirr || mac[port].egr_mirr) {
+ keep = true;
+ break;
+ }
+ }
+ /* Unset already_enabled for next time */
+ if (!keep)
+ new_mirr_port = ds->num_ports;
+ }
+ if (new_mirr_port != general_params->mirr_port) {
+ general_params->mirr_port = new_mirr_port;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
+ 0, general_params, true);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (ingress)
+ mac[from].ing_mirr = enabled;
+ else
+ mac[from].egr_mirr = enabled;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
+ &mac[from], true);
+}
+
+static int sja1105_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
+ ingress, true);
+}
+
+static void sja1105_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
+ mirror->ingress, false);
+}
+
+static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_policer_tc_entry *policer)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ /* In hardware, every 8 microseconds the credit level is incremented by
+ * the value of RATE bytes divided by 64, up to a maximum of SMAX
+ * bytes.
+ */
+ policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
+ 1000000);
+ policing[port].smax = policer->burst;
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ policing[port].rate = SJA1105_RATE_MBPS(1000);
+ policing[port].smax = 65535;
+
+ sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+static int sja1105_port_set_learning(struct sja1105_private *priv, int port,
+ bool enabled)
+{
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ mac[port].dyn_learn = enabled;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_FLOOD) {
+ if (flags.val & BR_FLOOD)
+ priv->ucast_egress_floods |= BIT(to);
+ else
+ priv->ucast_egress_floods &= ~BIT(to);
+ }
+
+ if (flags.mask & BR_BCAST_FLOOD) {
+ if (flags.val & BR_BCAST_FLOOD)
+ priv->bcast_egress_floods |= BIT(to);
+ else
+ priv->bcast_egress_floods &= ~BIT(to);
+ }
+
+ return sja1105_manage_flood_domains(priv);
+}
+
+static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int match, rc;
+
+ mutex_lock(&priv->fdb_lock);
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+ l2_lookup = table->entries;
+
+ for (match = 0; match < table->entry_count; match++)
+ if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST &&
+ l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
+ break;
+
+ if (match == table->entry_count) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not find FDB entry for unknown multicast");
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ if (flags.val & BR_MCAST_FLOOD)
+ l2_lookup[match].destports |= BIT(to);
+ else
+ l2_lookup[match].destports &= ~BIT(to);
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup[match].index,
+ &l2_lookup[match], true);
+out:
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
+}
+
+static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) &&
+ !priv->info->can_limit_mcast_flood) {
+ bool multicast = !!(flags.val & BR_MCAST_FLOOD);
+ bool unicast = !!(flags.val & BR_FLOOD);
+
+ if (unicast != multicast) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "This chip cannot configure multicast flooding independently of unicast");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ if (flags.mask & BR_LEARNING) {
+ bool learn_ena = !!(flags.val & BR_LEARNING);
+
+ rc = sja1105_port_set_learning(priv, port, learn_ena);
+ if (rc)
+ return rc;
+ }
+
+ if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) {
+ rc = sja1105_port_ucast_bcast_flood(priv, port, flags);
+ if (rc)
+ return rc;
+ }
+
+ /* For chips that can't offload BR_MCAST_FLOOD independently, there
+ * is nothing to do here, we ensured the configuration is in sync by
+ * offloading BR_FLOOD.
+ */
+ if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) {
+ rc = sja1105_port_mcast_flood(priv, port, flags,
+ extack);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+/* The programming model for the SJA1105 switch is "all-at-once" via static
+ * configuration tables. Some of these can be dynamically modified at runtime,
+ * but not the xMII mode parameters table.
+ * Furthermode, some PHYs may not have crystals for generating their clocks
+ * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
+ * ref_clk pin. So port clocking needs to be initialized early, before
+ * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
+ * Setting correct PHY link speed does not matter now.
+ * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
+ * bindings are not yet parsed by DSA core. We need to parse early so that we
+ * can populate the xMII mode parameters table.
+ */
+static int sja1105_setup(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ if (priv->info->disable_microcontroller) {
+ rc = priv->info->disable_microcontroller(priv);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "Failed to disable microcontroller: %pe\n",
+ ERR_PTR(rc));
+ return rc;
+ }
+ }
+
+ /* Create and send configuration down to device */
+ rc = sja1105_static_config_load(priv);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to load static config: %d\n", rc);
+ return rc;
+ }
+
+ /* Configure the CGU (PHY link modes and speeds) */
+ if (priv->info->clocking_setup) {
+ rc = priv->info->clocking_setup(priv);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "Failed to configure MII clocking: %pe\n",
+ ERR_PTR(rc));
+ goto out_static_config_free;
+ }
+ }
+
+ sja1105_tas_setup(ds);
+ sja1105_flower_setup(ds);
+
+ rc = sja1105_ptp_clock_register(ds);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
+ goto out_flower_teardown;
+ }
+
+ rc = sja1105_mdiobus_register(ds);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to register MDIO bus: %pe\n",
+ ERR_PTR(rc));
+ goto out_ptp_clock_unregister;
+ }
+
+ rc = sja1105_devlink_setup(ds);
+ if (rc < 0)
+ goto out_mdiobus_unregister;
+
+ rtnl_lock();
+ rc = dsa_tag_8021q_register(ds, htons(ETH_P_8021Q));
+ rtnl_unlock();
+ if (rc)
+ goto out_devlink_teardown;
+
+ /* On SJA1105, VLAN filtering per se is always enabled in hardware.
+ * The only thing we can do to disable it is lie about what the 802.1Q
+ * EtherType is.
+ * So it will still try to apply VLAN filtering, but all ingress
+ * traffic (except frames received with EtherType of ETH_P_SJA1105)
+ * will be internally tagged with a distorted VLAN header where the
+ * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
+ */
+ ds->vlan_filtering_is_global = true;
+ ds->untag_bridge_pvid = true;
+ ds->fdb_isolation = true;
+ /* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
+ ds->max_num_bridges = 7;
+
+ /* Advertise the 8 egress queues */
+ ds->num_tx_queues = SJA1105_NUM_TC;
+
+ ds->mtu_enforcement_ingress = true;
+ ds->assisted_learning_on_cpu_port = true;
+
+ return 0;
+
+out_devlink_teardown:
+ sja1105_devlink_teardown(ds);
+out_mdiobus_unregister:
+ sja1105_mdiobus_unregister(ds);
+out_ptp_clock_unregister:
+ sja1105_ptp_clock_unregister(ds);
+out_flower_teardown:
+ sja1105_flower_teardown(ds);
+ sja1105_tas_teardown(ds);
+out_static_config_free:
+ sja1105_static_config_free(&priv->static_config);
+
+ return rc;
+}
+
+static void sja1105_teardown(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ rtnl_lock();
+ dsa_tag_8021q_unregister(ds);
+ rtnl_unlock();
+
+ sja1105_devlink_teardown(ds);
+ sja1105_mdiobus_unregister(ds);
+ sja1105_ptp_clock_unregister(ds);
+ sja1105_flower_teardown(ds);
+ sja1105_tas_teardown(ds);
+ sja1105_static_config_free(&priv->static_config);
+}
+
+static const struct dsa_switch_ops sja1105_switch_ops = {
+ .get_tag_protocol = sja1105_get_tag_protocol,
+ .connect_tag_protocol = sja1105_connect_tag_protocol,
+ .setup = sja1105_setup,
+ .teardown = sja1105_teardown,
+ .set_ageing_time = sja1105_set_ageing_time,
+ .port_change_mtu = sja1105_change_mtu,
+ .port_max_mtu = sja1105_get_max_mtu,
+ .phylink_get_caps = sja1105_phylink_get_caps,
+ .phylink_mac_select_pcs = sja1105_mac_select_pcs,
+ .phylink_mac_link_up = sja1105_mac_link_up,
+ .phylink_mac_link_down = sja1105_mac_link_down,
+ .get_strings = sja1105_get_strings,
+ .get_ethtool_stats = sja1105_get_ethtool_stats,
+ .get_sset_count = sja1105_get_sset_count,
+ .get_ts_info = sja1105_get_ts_info,
+ .port_fdb_dump = sja1105_fdb_dump,
+ .port_fdb_add = sja1105_fdb_add,
+ .port_fdb_del = sja1105_fdb_del,
+ .port_fast_age = sja1105_fast_age,
+ .port_bridge_join = sja1105_bridge_join,
+ .port_bridge_leave = sja1105_bridge_leave,
+ .port_pre_bridge_flags = sja1105_port_pre_bridge_flags,
+ .port_bridge_flags = sja1105_port_bridge_flags,
+ .port_stp_state_set = sja1105_bridge_stp_state_set,
+ .port_vlan_filtering = sja1105_vlan_filtering,
+ .port_vlan_add = sja1105_bridge_vlan_add,
+ .port_vlan_del = sja1105_bridge_vlan_del,
+ .port_mdb_add = sja1105_mdb_add,
+ .port_mdb_del = sja1105_mdb_del,
+ .port_hwtstamp_get = sja1105_hwtstamp_get,
+ .port_hwtstamp_set = sja1105_hwtstamp_set,
+ .port_rxtstamp = sja1105_port_rxtstamp,
+ .port_txtstamp = sja1105_port_txtstamp,
+ .port_setup_tc = sja1105_port_setup_tc,
+ .port_mirror_add = sja1105_mirror_add,
+ .port_mirror_del = sja1105_mirror_del,
+ .port_policer_add = sja1105_port_policer_add,
+ .port_policer_del = sja1105_port_policer_del,
+ .cls_flower_add = sja1105_cls_flower_add,
+ .cls_flower_del = sja1105_cls_flower_del,
+ .cls_flower_stats = sja1105_cls_flower_stats,
+ .devlink_info_get = sja1105_devlink_info_get,
+ .tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add,
+ .tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del,
+ .port_prechangeupper = sja1105_prechangeupper,
+};
+
+static const struct of_device_id sja1105_dt_ids[];
+
+static int sja1105_check_device_id(struct sja1105_private *priv)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
+ struct device *dev = &priv->spidev->dev;
+ const struct of_device_id *match;
+ u32 device_id;
+ u64 part_no;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
+ NULL);
+ if (rc < 0)
+ return rc;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
+ SJA1105_SIZE_DEVICE_ID);
+ if (rc < 0)
+ return rc;
+
+ sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
+
+ for (match = sja1105_dt_ids; match->compatible[0]; match++) {
+ const struct sja1105_info *info = match->data;
+
+ /* Is what's been probed in our match table at all? */
+ if (info->device_id != device_id || info->part_no != part_no)
+ continue;
+
+ /* But is it what's in the device tree? */
+ if (priv->info->device_id != device_id ||
+ priv->info->part_no != part_no) {
+ dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n",
+ priv->info->name, info->name);
+ /* It isn't. No problem, pick that up. */
+ priv->info = info;
+ }
+
+ return 0;
+ }
+
+ dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n",
+ device_id, part_no);
+
+ return -ENODEV;
+}
+
+static int sja1105_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct sja1105_private *priv;
+ size_t max_xfer, max_msg;
+ struct dsa_switch *ds;
+ int rc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "No DTS bindings for SJA1105 driver\n");
+ return -EINVAL;
+ }
+
+ rc = sja1105_hw_reset(dev, 1, 1);
+ if (rc)
+ return rc;
+
+ priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Populate our driver private structure (priv) based on
+ * the device tree node that was probed (spi)
+ */
+ priv->spidev = spi;
+ spi_set_drvdata(spi, priv);
+
+ /* Configure the SPI bus */
+ spi->bits_per_word = 8;
+ rc = spi_setup(spi);
+ if (rc < 0) {
+ dev_err(dev, "Could not init SPI\n");
+ return rc;
+ }
+
+ /* In sja1105_xfer, we send spi_messages composed of two spi_transfers:
+ * a small one for the message header and another one for the current
+ * chunk of the packed buffer.
+ * Check that the restrictions imposed by the SPI controller are
+ * respected: the chunk buffer is smaller than the max transfer size,
+ * and the total length of the chunk plus its message header is smaller
+ * than the max message size.
+ * We do that during probe time since the maximum transfer size is a
+ * runtime invariant.
+ */
+ max_xfer = spi_max_transfer_size(spi);
+ max_msg = spi_max_message_size(spi);
+
+ /* We need to send at least one 64-bit word of SPI payload per message
+ * in order to be able to make useful progress.
+ */
+ if (max_msg < SJA1105_SIZE_SPI_MSG_HEADER + 8) {
+ dev_err(dev, "SPI master cannot send large enough buffers, aborting\n");
+ return -EINVAL;
+ }
+
+ priv->max_xfer_len = SJA1105_SIZE_SPI_MSG_MAXLEN;
+ if (priv->max_xfer_len > max_xfer)
+ priv->max_xfer_len = max_xfer;
+ if (priv->max_xfer_len > max_msg - SJA1105_SIZE_SPI_MSG_HEADER)
+ priv->max_xfer_len = max_msg - SJA1105_SIZE_SPI_MSG_HEADER;
+
+ priv->info = of_device_get_match_data(dev);
+
+ /* Detect hardware device */
+ rc = sja1105_check_device_id(priv);
+ if (rc < 0) {
+ dev_err(dev, "Device ID check failed: %d\n", rc);
+ return rc;
+ }
+
+ dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
+
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ ds->dev = dev;
+ ds->num_ports = priv->info->num_ports;
+ ds->ops = &sja1105_switch_ops;
+ ds->priv = priv;
+ priv->ds = ds;
+
+ mutex_init(&priv->ptp_data.lock);
+ mutex_init(&priv->dynamic_config_lock);
+ mutex_init(&priv->mgmt_lock);
+ mutex_init(&priv->fdb_lock);
+ spin_lock_init(&priv->ts_id_lock);
+
+ rc = sja1105_parse_dt(priv);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
+ return rc;
+ }
+
+ if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
+ priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
+ sizeof(struct sja1105_cbs_entry),
+ GFP_KERNEL);
+ if (!priv->cbs)
+ return -ENOMEM;
+ }
+
+ return dsa_register_switch(priv->ds);
+}
+
+static void sja1105_remove(struct spi_device *spi)
+{
+ struct sja1105_private *priv = spi_get_drvdata(spi);
+
+ if (!priv)
+ return;
+
+ dsa_unregister_switch(priv->ds);
+}
+
+static void sja1105_shutdown(struct spi_device *spi)
+{
+ struct sja1105_private *priv = spi_get_drvdata(spi);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ spi_set_drvdata(spi, NULL);
+}
+
+static const struct of_device_id sja1105_dt_ids[] = {
+ { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
+ { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
+ { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
+ { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
+ { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
+ { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
+ { .compatible = "nxp,sja1110a", .data = &sja1110a_info },
+ { .compatible = "nxp,sja1110b", .data = &sja1110b_info },
+ { .compatible = "nxp,sja1110c", .data = &sja1110c_info },
+ { .compatible = "nxp,sja1110d", .data = &sja1110d_info },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
+
+static const struct spi_device_id sja1105_spi_ids[] = {
+ { "sja1105e" },
+ { "sja1105t" },
+ { "sja1105p" },
+ { "sja1105q" },
+ { "sja1105r" },
+ { "sja1105s" },
+ { "sja1110a" },
+ { "sja1110b" },
+ { "sja1110c" },
+ { "sja1110d" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, sja1105_spi_ids);
+
+static struct spi_driver sja1105_driver = {
+ .driver = {
+ .name = "sja1105",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(sja1105_dt_ids),
+ },
+ .id_table = sja1105_spi_ids,
+ .probe = sja1105_probe,
+ .remove = sja1105_remove,
+ .shutdown = sja1105_shutdown,
+};
+
+module_spi_driver(sja1105_driver);
+
+MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
+MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
+MODULE_DESCRIPTION("SJA1105 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
new file mode 100644
index 000000000..4059fcc8c
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2021 NXP
+ */
+#include <linux/pcs/pcs-xpcs.h>
+#include <linux/of_mdio.h>
+#include "sja1105.h"
+
+#define SJA1110_PCS_BANK_REG SJA1110_SPI_ADDR(0x3fc)
+
+int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+ int rc;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+
+ if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
+ return 0xffff;
+
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
+ return NXP_SJA1105_XPCS_ID >> 16;
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID2)
+ return NXP_SJA1105_XPCS_ID & GENMASK(15, 0);
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ tmp = val;
+
+ if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
+ return -EINVAL;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+}
+
+int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ int offset, bank;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+ int rc;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
+ return -ENODEV;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
+ return NXP_SJA1110_XPCS_ID >> 16;
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID2)
+ return NXP_SJA1110_XPCS_ID & GENMASK(15, 0);
+
+ bank = addr >> 8;
+ offset = addr & GENMASK(7, 0);
+
+ /* This addressing scheme reserves register 0xff for the bank address
+ * register, so that can never be addressed.
+ */
+ if (WARN_ON(offset == 0xff))
+ return -ENODEV;
+
+ tmp = bank;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE,
+ regs->pcs_base[phy] + SJA1110_PCS_BANK_REG,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->pcs_base[phy] + offset,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ int offset, bank;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+ int rc;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
+ return -ENODEV;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+
+ bank = addr >> 8;
+ offset = addr & GENMASK(7, 0);
+
+ /* This addressing scheme reserves register 0xff for the bank address
+ * register, so that can never be addressed.
+ */
+ if (WARN_ON(offset == 0xff))
+ return -ENODEV;
+
+ tmp = bank;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE,
+ regs->pcs_base[phy] + SJA1110_PCS_BANK_REG,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ tmp = val;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->pcs_base[phy] + offset,
+ &tmp, NULL);
+}
+
+enum sja1105_mdio_opcode {
+ SJA1105_C45_ADDR = 0,
+ SJA1105_C22 = 1,
+ SJA1105_C45_DATA = 2,
+ SJA1105_C45_DATA_AUTOINC = 3,
+};
+
+static u64 sja1105_base_t1_encode_addr(struct sja1105_private *priv,
+ int phy, enum sja1105_mdio_opcode op,
+ int xad)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return regs->mdio_100base_t1 | (phy << 7) | (op << 5) | (xad << 0);
+}
+
+static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
+
+ if (reg & MII_ADDR_C45) {
+ u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
+ mmd);
+
+ tmp = reg & MII_REGADDR_C45_MASK;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
+ mmd);
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+ }
+
+ /* Clause 22 read */
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+static int sja1105_base_t1_mdio_write(struct mii_bus *bus, int phy, int reg,
+ u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
+
+ if (reg & MII_ADDR_C45) {
+ u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
+ mmd);
+
+ tmp = reg & MII_REGADDR_C45_MASK;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
+ mmd);
+
+ tmp = val & 0xffff;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+ }
+
+ /* Clause 22 write */
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+
+ tmp = val & 0xffff;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+}
+
+static int sja1105_base_tx_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 tmp;
+ int rc;
+
+ if (reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->mdio_100base_tx + reg,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+static int sja1105_base_tx_mdio_write(struct mii_bus *bus, int phy, int reg,
+ u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 tmp = val;
+
+ if (reg & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->mdio_100base_tx + reg,
+ &tmp, NULL);
+}
+
+static int sja1105_mdiobus_base_tx_register(struct sja1105_private *priv,
+ struct device_node *mdio_node)
+{
+ struct sja1105_mdio_private *mdio_priv;
+ struct device_node *np;
+ struct mii_bus *bus;
+ int rc = 0;
+
+ np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-tx-mdio");
+ if (!np)
+ return 0;
+
+ if (!of_device_is_available(np))
+ goto out_put_np;
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus) {
+ rc = -ENOMEM;
+ goto out_put_np;
+ }
+
+ bus->name = "SJA1110 100base-TX MDIO bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-tx",
+ dev_name(priv->ds->dev));
+ bus->read = sja1105_base_tx_mdio_read;
+ bus->write = sja1105_base_tx_mdio_write;
+ bus->parent = priv->ds->dev;
+ mdio_priv = bus->priv;
+ mdio_priv->priv = priv;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc) {
+ mdiobus_free(bus);
+ goto out_put_np;
+ }
+
+ priv->mdio_base_tx = bus;
+
+out_put_np:
+ of_node_put(np);
+
+ return rc;
+}
+
+static void sja1105_mdiobus_base_tx_unregister(struct sja1105_private *priv)
+{
+ if (!priv->mdio_base_tx)
+ return;
+
+ mdiobus_unregister(priv->mdio_base_tx);
+ mdiobus_free(priv->mdio_base_tx);
+ priv->mdio_base_tx = NULL;
+}
+
+static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
+ struct device_node *mdio_node)
+{
+ struct sja1105_mdio_private *mdio_priv;
+ struct device_node *np;
+ struct mii_bus *bus;
+ int rc = 0;
+
+ np = of_get_compatible_child(mdio_node, "nxp,sja1110-base-t1-mdio");
+ if (!np)
+ return 0;
+
+ if (!of_device_is_available(np))
+ goto out_put_np;
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus) {
+ rc = -ENOMEM;
+ goto out_put_np;
+ }
+
+ bus->name = "SJA1110 100base-T1 MDIO bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-t1",
+ dev_name(priv->ds->dev));
+ bus->read = sja1105_base_t1_mdio_read;
+ bus->write = sja1105_base_t1_mdio_write;
+ bus->parent = priv->ds->dev;
+ mdio_priv = bus->priv;
+ mdio_priv->priv = priv;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc) {
+ mdiobus_free(bus);
+ goto out_put_np;
+ }
+
+ priv->mdio_base_t1 = bus;
+
+out_put_np:
+ of_node_put(np);
+
+ return rc;
+}
+
+static void sja1105_mdiobus_base_t1_unregister(struct sja1105_private *priv)
+{
+ if (!priv->mdio_base_t1)
+ return;
+
+ mdiobus_unregister(priv->mdio_base_t1);
+ mdiobus_free(priv->mdio_base_t1);
+ priv->mdio_base_t1 = NULL;
+}
+
+static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
+{
+ struct sja1105_mdio_private *mdio_priv;
+ struct dsa_switch *ds = priv->ds;
+ struct mii_bus *bus;
+ int rc = 0;
+ int port;
+
+ if (!priv->info->pcs_mdio_read || !priv->info->pcs_mdio_write)
+ return 0;
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "SJA1105 PCS MDIO bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
+ dev_name(ds->dev));
+ bus->read = priv->info->pcs_mdio_read;
+ bus->write = priv->info->pcs_mdio_write;
+ bus->parent = ds->dev;
+ /* There is no PHY on this MDIO bus => mask out all PHY addresses
+ * from auto probing.
+ */
+ bus->phy_mask = ~0;
+ mdio_priv = bus->priv;
+ mdio_priv->priv = priv;
+
+ rc = mdiobus_register(bus);
+ if (rc) {
+ mdiobus_free(bus);
+ return rc;
+ }
+
+ for (port = 0; port < ds->num_ports; port++) {
+ struct mdio_device *mdiodev;
+ struct dw_xpcs *xpcs;
+
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ if (priv->phy_mode[port] != PHY_INTERFACE_MODE_SGMII &&
+ priv->phy_mode[port] != PHY_INTERFACE_MODE_2500BASEX)
+ continue;
+
+ mdiodev = mdio_device_create(bus, port);
+ if (IS_ERR(mdiodev)) {
+ rc = PTR_ERR(mdiodev);
+ goto out_pcs_free;
+ }
+
+ xpcs = xpcs_create(mdiodev, priv->phy_mode[port]);
+ if (IS_ERR(xpcs)) {
+ rc = PTR_ERR(xpcs);
+ goto out_pcs_free;
+ }
+
+ priv->xpcs[port] = xpcs;
+ }
+
+ priv->mdio_pcs = bus;
+
+ return 0;
+
+out_pcs_free:
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!priv->xpcs[port])
+ continue;
+
+ mdio_device_free(priv->xpcs[port]->mdiodev);
+ xpcs_destroy(priv->xpcs[port]);
+ priv->xpcs[port] = NULL;
+ }
+
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+
+ return rc;
+}
+
+static void sja1105_mdiobus_pcs_unregister(struct sja1105_private *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ int port;
+
+ if (!priv->mdio_pcs)
+ return;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!priv->xpcs[port])
+ continue;
+
+ mdio_device_free(priv->xpcs[port]->mdiodev);
+ xpcs_destroy(priv->xpcs[port]);
+ priv->xpcs[port] = NULL;
+ }
+
+ mdiobus_unregister(priv->mdio_pcs);
+ mdiobus_free(priv->mdio_pcs);
+ priv->mdio_pcs = NULL;
+}
+
+int sja1105_mdiobus_register(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device_node *switch_node = ds->dev->of_node;
+ struct device_node *mdio_node;
+ int rc;
+
+ rc = sja1105_mdiobus_pcs_register(priv);
+ if (rc)
+ return rc;
+
+ mdio_node = of_get_child_by_name(switch_node, "mdios");
+ if (!mdio_node)
+ return 0;
+
+ if (!of_device_is_available(mdio_node))
+ goto out_put_mdio_node;
+
+ if (regs->mdio_100base_tx != SJA1105_RSV_ADDR) {
+ rc = sja1105_mdiobus_base_tx_register(priv, mdio_node);
+ if (rc)
+ goto err_put_mdio_node;
+ }
+
+ if (regs->mdio_100base_t1 != SJA1105_RSV_ADDR) {
+ rc = sja1105_mdiobus_base_t1_register(priv, mdio_node);
+ if (rc)
+ goto err_free_base_tx_mdiobus;
+ }
+
+out_put_mdio_node:
+ of_node_put(mdio_node);
+
+ return 0;
+
+err_free_base_tx_mdiobus:
+ sja1105_mdiobus_base_tx_unregister(priv);
+err_put_mdio_node:
+ of_node_put(mdio_node);
+ sja1105_mdiobus_pcs_unregister(priv);
+
+ return rc;
+}
+
+void sja1105_mdiobus_unregister(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ sja1105_mdiobus_base_t1_unregister(priv);
+ sja1105_mdiobus_base_tx_unregister(priv);
+ sja1105_mdiobus_pcs_unregister(priv);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
new file mode 100644
index 000000000..a7d41e781
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -0,0 +1,974 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/spi/spi.h>
+#include "sja1105.h"
+
+/* The adjfine API clamps ppb between [-32,768,000, 32,768,000], and
+ * therefore scaled_ppm between [-2,147,483,648, 2,147,483,647].
+ * Set the maximum supported ppb to a round value smaller than the maximum.
+ *
+ * Percentually speaking, this is a +/- 0.032x adjustment of the
+ * free-running counter (0.968x to 1.032x).
+ */
+#define SJA1105_MAX_ADJ_PPB 32000000
+#define SJA1105_SIZE_PTP_CMD 4
+
+/* PTPSYNCTS has no interrupt or update mechanism, because the intended
+ * hardware use case is for the timestamp to be collected synchronously,
+ * immediately after the CAS_MASTER SJA1105 switch has performed a CASSYNC
+ * one-shot toggle (no return to level) on the PTP_CLK pin. When used as a
+ * generic extts source, the PTPSYNCTS register needs polling and a comparison
+ * with the old value. The polling interval is configured as the Nyquist rate
+ * of a signal with 50% duty cycle and 1Hz frequency, which is sadly all that
+ * this hardware can do (but may be enough for some setups). Anything of higher
+ * frequency than 1 Hz will be lost, since there is no timestamp FIFO.
+ */
+#define SJA1105_EXTTS_INTERVAL (HZ / 6)
+
+/* This range is actually +/- SJA1105_MAX_ADJ_PPB
+ * divided by 1000 (ppb -> ppm) and with a 16-bit
+ * "fractional" part (actually fixed point).
+ * |
+ * v
+ * Convert scaled_ppm from the +/- ((10^6) << 16) range
+ * into the +/- (1 << 31) range.
+ *
+ * This forgoes a "ppb" numeric representation (up to NSEC_PER_SEC)
+ * and defines the scaling factor between scaled_ppm and the actual
+ * frequency adjustments of the PHC.
+ *
+ * ptpclkrate = scaled_ppm * 2^31 / (10^6 * 2^16)
+ * simplifies to
+ * ptpclkrate = scaled_ppm * 2^9 / 5^6
+ */
+#define SJA1105_CC_MULT_NUM (1 << 9)
+#define SJA1105_CC_MULT_DEM 15625
+#define SJA1105_CC_MULT 0x80000000
+
+enum sja1105_ptp_clk_mode {
+ PTP_ADD_MODE = 1,
+ PTP_SET_MODE = 0,
+};
+
+#define extts_to_data(t) \
+ container_of((t), struct sja1105_ptp_data, extts_timer)
+#define ptp_caps_to_data(d) \
+ container_of((d), struct sja1105_ptp_data, caps)
+#define ptp_data_to_sja1105(d) \
+ container_of((d), struct sja1105_private, ptp_data)
+
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->hwts_tx_en &= ~BIT(port);
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->hwts_tx_en |= BIT(port);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->hwts_rx_en &= ~BIT(port);
+ break;
+ default:
+ priv->hwts_rx_en |= BIT(port);
+ break;
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+ return 0;
+}
+
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ if (priv->hwts_tx_en & BIT(port))
+ config.tx_type = HWTSTAMP_TX_ON;
+ else
+ config.tx_type = HWTSTAMP_TX_OFF;
+ if (priv->hwts_rx_en & BIT(port))
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+int sja1105_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ /* Called during cleanup */
+ if (!ptp_data->clock)
+ return -ENODEV;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
+ info->phc_index = ptp_clock_index(ptp_data->clock);
+ return 0;
+}
+
+void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_PTP_CMD;
+ /* No need to keep this as part of the structure */
+ u64 valid = 1;
+
+ sja1105_packing(buf, &valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->startptpcp, 28, 28, size, op);
+ sja1105_packing(buf, &cmd->stopptpcp, 27, 27, size, op);
+ sja1105_packing(buf, &cmd->resptp, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->corrclk4ts, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
+}
+
+void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_PTP_CMD;
+ /* No need to keep this as part of the structure */
+ u64 valid = 1;
+
+ sja1105_packing(buf, &valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
+ sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->startptpcp, 28, 28, size, op);
+ sja1105_packing(buf, &cmd->stopptpcp, 27, 27, size, op);
+ sja1105_packing(buf, &cmd->resptp, 3, 3, size, op);
+ sja1105_packing(buf, &cmd->corrclk4ts, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
+}
+
+int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw)
+{
+ const struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
+ int rc;
+
+ if (rw == SPI_WRITE)
+ priv->info->ptp_cmd_packing(buf, cmd, PACK);
+
+ rc = sja1105_xfer_buf(priv, rw, regs->ptp_control, buf,
+ SJA1105_SIZE_PTP_CMD);
+
+ if (rw == SPI_READ)
+ priv->info->ptp_cmd_packing(buf, cmd, UNPACK);
+
+ return rc;
+}
+
+/* The switch returns partial timestamps (24 bits for SJA1105 E/T, which wrap
+ * around in 0.135 seconds, and 32 bits for P/Q/R/S, wrapping around in 34.35
+ * seconds).
+ *
+ * This receives the RX or TX MAC timestamps, provided by hardware as
+ * the lower bits of the cycle counter, sampled at the time the timestamp was
+ * collected.
+ *
+ * To reconstruct into a full 64-bit-wide timestamp, the cycle counter is
+ * read and the high-order bits are filled in.
+ *
+ * Must be called within one wraparound period of the partial timestamp since
+ * it was generated by the MAC.
+ */
+static u64 sja1105_tstamp_reconstruct(struct dsa_switch *ds, u64 now,
+ u64 ts_partial)
+{
+ struct sja1105_private *priv = ds->priv;
+ u64 partial_tstamp_mask = CYCLECOUNTER_MASK(priv->info->ptp_ts_bits);
+ u64 ts_reconstructed;
+
+ ts_reconstructed = (now & ~partial_tstamp_mask) | ts_partial;
+
+ /* Check lower bits of current cycle counter against the timestamp.
+ * If the current cycle counter is lower than the partial timestamp,
+ * then wraparound surely occurred and must be accounted for.
+ */
+ if ((now & partial_tstamp_mask) <= ts_partial)
+ ts_reconstructed -= (partial_tstamp_mask + 1);
+
+ return ts_reconstructed;
+}
+
+/* Reads the SPI interface for an egress timestamp generated by the switch
+ * for frames sent using management routes.
+ *
+ * SJA1105 E/T layout of the 4-byte SPI payload:
+ *
+ * 31 23 15 7 0
+ * | | | | |
+ * +-----+-----+-----+ ^
+ * ^ |
+ * | |
+ * 24-bit timestamp Update bit
+ *
+ *
+ * SJA1105 P/Q/R/S layout of the 8-byte SPI payload:
+ *
+ * 31 23 15 7 0 63 55 47 39 32
+ * | | | | | | | | | |
+ * ^ +-----+-----+-----+-----+
+ * | ^
+ * | |
+ * Update bit 32-bit timestamp
+ *
+ * Notice that the update bit is in the same place.
+ * To have common code for E/T and P/Q/R/S for reading the timestamp,
+ * we need to juggle with the offset and the bit indices.
+ */
+static int sja1105_ptpegr_ts_poll(struct dsa_switch *ds, int port, u64 *ts)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ int tstamp_bit_start, tstamp_bit_end;
+ int timeout = 10;
+ u8 packed_buf[8];
+ u64 update;
+ int rc;
+
+ do {
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->ptpegr_ts[port],
+ packed_buf, priv->info->ptpegr_ts_bytes);
+ if (rc < 0)
+ return rc;
+
+ sja1105_unpack(packed_buf, &update, 0, 0,
+ priv->info->ptpegr_ts_bytes);
+ if (update)
+ break;
+
+ usleep_range(10, 50);
+ } while (--timeout);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ /* Point the end bit to the second 32-bit word on P/Q/R/S,
+ * no-op on E/T.
+ */
+ tstamp_bit_end = (priv->info->ptpegr_ts_bytes - 4) * 8;
+ /* Shift the 24-bit timestamp on E/T to be collected from 31:8.
+ * No-op on P/Q/R/S.
+ */
+ tstamp_bit_end += 32 - priv->info->ptp_ts_bits;
+ tstamp_bit_start = tstamp_bit_end + priv->info->ptp_ts_bits - 1;
+
+ *ts = 0;
+
+ sja1105_unpack(packed_buf, ts, tstamp_bit_start, tstamp_bit_end,
+ priv->info->ptpegr_ts_bytes);
+
+ return 0;
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptpclkval_read(struct sja1105_private *priv, u64 *ticks,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return sja1105_xfer_u64(priv, SPI_READ, regs->ptpclkval, ticks,
+ ptp_sts);
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpclkval, &ticks,
+ ptp_sts);
+}
+
+static void sja1105_extts_poll(struct sja1105_private *priv)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct ptp_clock_event event;
+ u64 ptpsyncts = 0;
+ int rc;
+
+ rc = sja1105_xfer_u64(priv, SPI_READ, regs->ptpsyncts, &ptpsyncts,
+ NULL);
+ if (rc < 0)
+ dev_err_ratelimited(priv->ds->dev,
+ "Failed to read PTPSYNCTS: %d\n", rc);
+
+ if (ptpsyncts && ptp_data->ptpsyncts != ptpsyncts) {
+ event.index = 0;
+ event.type = PTP_CLOCK_EXTTS;
+ event.timestamp = ns_to_ktime(sja1105_ticks_to_ns(ptpsyncts));
+ ptp_clock_event(ptp_data->clock, &event);
+
+ ptp_data->ptpsyncts = ptpsyncts;
+ }
+}
+
+static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ struct dsa_switch *ds = priv->ds;
+ struct sk_buff *skb;
+
+ mutex_lock(&ptp_data->lock);
+
+ while ((skb = skb_dequeue(&ptp_data->skb_rxtstamp_queue)) != NULL) {
+ struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
+ u64 ticks, ts;
+ int rc;
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ kfree_skb(skb);
+ continue;
+ }
+
+ *shwt = (struct skb_shared_hwtstamps) {0};
+
+ ts = SJA1105_SKB_CB(skb)->tstamp;
+ ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
+
+ shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+ netif_rx(skb);
+ }
+
+ if (ptp_data->extts_enabled)
+ sja1105_extts_poll(priv);
+
+ mutex_unlock(&ptp_data->lock);
+
+ /* Don't restart */
+ return -1;
+}
+
+bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (!(priv->hwts_rx_en & BIT(port)))
+ return false;
+
+ /* We need to read the full PTP clock to reconstruct the Rx
+ * timestamp. For that we need a sleepable context.
+ */
+ skb_queue_tail(&ptp_data->skb_rxtstamp_queue, skb);
+ ptp_schedule_worker(ptp_data->clock, 0);
+ return true;
+}
+
+bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
+ u64 ts = SJA1105_SKB_CB(skb)->tstamp;
+
+ *shwt = (struct skb_shared_hwtstamps) {0};
+
+ shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+
+ /* Don't defer */
+ return false;
+}
+
+/* Called from dsa_skb_defer_rx_timestamp */
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ return priv->info->rxtstamp(ds, port, skb);
+}
+
+void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shwt = {0};
+
+ /* We don't care about RX timestamps on the CPU port */
+ if (dir == SJA1110_META_TSTAMP_RX)
+ return;
+
+ spin_lock(&ptp_data->skb_txtstamp_queue.lock);
+
+ skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) {
+ if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
+ continue;
+
+ __skb_unlink(skb, &ptp_data->skb_txtstamp_queue);
+ skb_match = skb;
+
+ break;
+ }
+
+ spin_unlock(&ptp_data->skb_txtstamp_queue.lock);
+
+ if (WARN_ON(!skb_match))
+ return;
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
+ skb_complete_tx_timestamp(skb_match, &shwt);
+}
+
+/* In addition to cloning the skb which is done by the common
+ * sja1105_port_txtstamp, we need to generate a timestamp ID and save the
+ * packet to the TX timestamping queue.
+ */
+void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ u8 ts_id;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ spin_lock(&priv->ts_id_lock);
+
+ ts_id = priv->ts_id;
+ /* Deal automatically with 8-bit wraparound */
+ priv->ts_id++;
+
+ SJA1105_SKB_CB(clone)->ts_id = ts_id;
+
+ spin_unlock(&priv->ts_id_lock);
+
+ skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone);
+}
+
+/* Called from dsa_skb_tx_timestamp. This callback is just to clone
+ * the skb and have it available in SJA1105_SKB_CB in the .port_deferred_xmit
+ * callback, where we will timestamp it synchronously.
+ */
+void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sk_buff *clone;
+
+ if (!(priv->hwts_tx_en & BIT(port)))
+ return;
+
+ clone = skb_clone_sk(skb);
+ if (!clone)
+ return;
+
+ SJA1105_SKB_CB(skb)->clone = clone;
+
+ if (priv->info->txtstamp)
+ priv->info->txtstamp(ds, port, skb);
+}
+
+static int sja1105_ptp_reset(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sja1105_ptp_cmd cmd = ptp_data->cmd;
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ cmd.resptp = 1;
+
+ dev_dbg(ds->dev, "Resetting PTP clock\n");
+ rc = sja1105_ptp_commit(ds, &cmd, SPI_WRITE);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+/* Caller must hold ptp_data->lock */
+int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_private *priv = ds->priv;
+ u64 ticks;
+ int rc;
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, ptp_sts);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ return rc;
+ }
+
+ *ns = sja1105_ticks_to_ns(ticks);
+
+ return 0;
+}
+
+static int sja1105_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ u64 now = 0;
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = __sja1105_ptp_gettimex(priv->ds, &now, ptp_sts);
+ *ts = ns_to_timespec64(now);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptp_mode_set(struct sja1105_private *priv,
+ enum sja1105_ptp_clk_mode mode)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (ptp_data->cmd.ptpclkadd == mode)
+ return 0;
+
+ ptp_data->cmd.ptpclkadd = mode;
+
+ return sja1105_ptp_commit(priv->ds, &ptp_data->cmd, SPI_WRITE);
+}
+
+/* Write to PTPCLKVAL while PTPCLKADD is 0 */
+int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ struct sja1105_private *priv = ds->priv;
+ u64 ticks = ns_to_sja1105_ticks(ns);
+ int rc;
+
+ rc = sja1105_ptp_mode_set(priv, PTP_SET_MODE);
+ if (rc < 0) {
+ dev_err(priv->ds->dev, "Failed to put PTPCLK in set mode\n");
+ return rc;
+ }
+
+ rc = sja1105_ptpclkval_write(priv, ticks, ptp_sts);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ return rc;
+}
+
+static int sja1105_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ u64 ns = timespec64_to_ns(ts);
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = __sja1105_ptp_settime(priv->ds, ns, NULL);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+static int sja1105_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 clkrate32;
+ s64 clkrate;
+ int rc;
+
+ clkrate = (s64)scaled_ppm * SJA1105_CC_MULT_NUM;
+ clkrate = div_s64(clkrate, SJA1105_CC_MULT_DEM);
+
+ /* Take a +/- value and re-center it around 2^31. */
+ clkrate = SJA1105_CC_MULT + clkrate;
+ WARN_ON(abs(clkrate) >= GENMASK_ULL(31, 0));
+ clkrate32 = clkrate;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkrate, &clkrate32,
+ NULL);
+
+ sja1105_tas_adjfreq(priv->ds);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+/* Write to PTPCLKVAL while PTPCLKADD is 1 */
+int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta)
+{
+ struct sja1105_private *priv = ds->priv;
+ s64 ticks = ns_to_sja1105_ticks(delta);
+ int rc;
+
+ rc = sja1105_ptp_mode_set(priv, PTP_ADD_MODE);
+ if (rc < 0) {
+ dev_err(priv->ds->dev, "Failed to put PTPCLK in add mode\n");
+ return rc;
+ }
+
+ rc = sja1105_ptpclkval_write(priv, ticks, NULL);
+
+ sja1105_tas_clockstep(priv->ds);
+
+ return rc;
+}
+
+static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = __sja1105_ptp_adjtime(priv->ds, delta);
+
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+static void sja1105_ptp_extts_setup_timer(struct sja1105_ptp_data *ptp_data)
+{
+ unsigned long expires = ((jiffies / SJA1105_EXTTS_INTERVAL) + 1) *
+ SJA1105_EXTTS_INTERVAL;
+
+ mod_timer(&ptp_data->extts_timer, expires);
+}
+
+static void sja1105_ptp_extts_timer(struct timer_list *t)
+{
+ struct sja1105_ptp_data *ptp_data = extts_to_data(t);
+
+ ptp_schedule_worker(ptp_data->clock, 0);
+
+ sja1105_ptp_extts_setup_timer(ptp_data);
+}
+
+static int sja1105_change_ptp_clk_pin_func(struct sja1105_private *priv,
+ enum ptp_pin_function func)
+{
+ struct sja1105_avb_params_entry *avb;
+ enum ptp_pin_function old_func;
+
+ avb = priv->static_config.tables[BLK_IDX_AVB_PARAMS].entries;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID ||
+ avb->cas_master)
+ old_func = PTP_PF_PEROUT;
+ else
+ old_func = PTP_PF_EXTTS;
+
+ if (func == old_func)
+ return 0;
+
+ avb->cas_master = (func == PTP_PF_PEROUT);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_AVB_PARAMS, 0, avb,
+ true);
+}
+
+/* The PTP_CLK pin may be configured to toggle with a 50% duty cycle and a
+ * frequency f:
+ *
+ * NSEC_PER_SEC
+ * f = ----------------------
+ * (PTPPINDUR * 8 ns) * 2
+ */
+static int sja1105_per_out_enable(struct sja1105_private *priv,
+ struct ptp_perout_request *perout,
+ bool on)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_ptp_cmd cmd = ptp_data->cmd;
+ int rc;
+
+ /* We only support one channel */
+ if (perout->index != 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (perout->flags)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_PEROUT);
+ if (rc)
+ goto out;
+
+ if (on) {
+ struct timespec64 pin_duration_ts = {
+ .tv_sec = perout->period.sec,
+ .tv_nsec = perout->period.nsec,
+ };
+ struct timespec64 pin_start_ts = {
+ .tv_sec = perout->start.sec,
+ .tv_nsec = perout->start.nsec,
+ };
+ u64 pin_duration = timespec64_to_ns(&pin_duration_ts);
+ u64 pin_start = timespec64_to_ns(&pin_start_ts);
+ u32 pin_duration32;
+ u64 now;
+
+ /* ptppindur: 32 bit register which holds the interval between
+ * 2 edges on PTP_CLK. So check for truncation which happens
+ * at periods larger than around 68.7 seconds.
+ */
+ pin_duration = ns_to_sja1105_ticks(pin_duration / 2);
+ if (pin_duration > U32_MAX) {
+ rc = -ERANGE;
+ goto out;
+ }
+ pin_duration32 = pin_duration;
+
+ /* ptppins: 64 bit register which needs to hold a PTP time
+ * larger than the current time, otherwise the startptpcp
+ * command won't do anything. So advance the current time
+ * by a number of periods in a way that won't alter the
+ * phase offset.
+ */
+ rc = __sja1105_ptp_gettimex(priv->ds, &now, NULL);
+ if (rc < 0)
+ goto out;
+
+ pin_start = future_base_time(pin_start, pin_duration,
+ now + 1ull * NSEC_PER_SEC);
+ pin_start = ns_to_sja1105_ticks(pin_start);
+
+ rc = sja1105_xfer_u64(priv, SPI_WRITE, regs->ptppinst,
+ &pin_start, NULL);
+ if (rc < 0)
+ goto out;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptppindur,
+ &pin_duration32, NULL);
+ if (rc < 0)
+ goto out;
+ }
+
+ if (on)
+ cmd.startptpcp = true;
+ else
+ cmd.stopptpcp = true;
+
+ rc = sja1105_ptp_commit(priv->ds, &cmd, SPI_WRITE);
+
+out:
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+static int sja1105_extts_enable(struct sja1105_private *priv,
+ struct ptp_extts_request *extts,
+ bool on)
+{
+ int rc;
+
+ /* We only support one channel */
+ if (extts->index != 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (extts->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* We can only enable time stamping on both edges, sadly. */
+ if ((extts->flags & PTP_STRICT_FLAGS) &&
+ (extts->flags & PTP_ENABLE_FEATURE) &&
+ (extts->flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
+ rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_EXTTS);
+ if (rc)
+ return rc;
+
+ priv->ptp_data.extts_enabled = on;
+
+ if (on)
+ sja1105_ptp_extts_setup_timer(&priv->ptp_data);
+ else
+ del_timer_sync(&priv->ptp_data.extts_timer);
+
+ return 0;
+}
+
+static int sja1105_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ int rc = -EOPNOTSUPP;
+
+ if (req->type == PTP_CLK_REQ_PEROUT)
+ rc = sja1105_per_out_enable(priv, &req->perout, on);
+ else if (req->type == PTP_CLK_REQ_EXTTS)
+ rc = sja1105_extts_enable(priv, &req->extts, on);
+
+ return rc;
+}
+
+static int sja1105_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+
+ if (chan != 0 || pin != 0)
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_EXTTS:
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return -1;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+static struct ptp_pin_desc sja1105_ptp_pin = {
+ .name = "ptp_clk",
+ .index = 0,
+ .func = PTP_PF_NONE,
+};
+
+int sja1105_ptp_clock_register(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ ptp_data->caps = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "SJA1105 PHC",
+ .adjfine = sja1105_ptp_adjfine,
+ .adjtime = sja1105_ptp_adjtime,
+ .gettimex64 = sja1105_ptp_gettimex,
+ .settime64 = sja1105_ptp_settime,
+ .enable = sja1105_ptp_enable,
+ .verify = sja1105_ptp_verify_pin,
+ .do_aux_work = sja1105_rxtstamp_work,
+ .max_adj = SJA1105_MAX_ADJ_PPB,
+ .pin_config = &sja1105_ptp_pin,
+ .n_pins = 1,
+ .n_ext_ts = 1,
+ .n_per_out = 1,
+ };
+
+ /* Only used on SJA1105 */
+ skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
+ /* Only used on SJA1110 */
+ skb_queue_head_init(&ptp_data->skb_txtstamp_queue);
+
+ ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return PTR_ERR(ptp_data->clock);
+
+ ptp_data->cmd.corrclk4ts = true;
+ ptp_data->cmd.ptpclkadd = PTP_SET_MODE;
+
+ timer_setup(&ptp_data->extts_timer, sja1105_ptp_extts_timer, 0);
+
+ return sja1105_ptp_reset(ds);
+}
+
+void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return;
+
+ del_timer_sync(&ptp_data->extts_timer);
+ ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_txtstamp_queue);
+ skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
+ ptp_clock_unregister(ptp_data->clock);
+ ptp_data->clock = NULL;
+}
+
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port,
+ struct sk_buff *skb)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct skb_shared_hwtstamps shwt = {0};
+ u64 ticks, ts;
+ int rc;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_ptpegr_ts_poll(ds, port, &ts);
+ if (rc < 0) {
+ dev_err(ds->dev, "timed out polling for tstamp\n");
+ kfree_skb(skb);
+ goto out;
+ }
+
+ rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ kfree_skb(skb);
+ goto out;
+ }
+
+ ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+ skb_complete_tx_timestamp(skb, &shwt);
+
+out:
+ mutex_unlock(&ptp_data->lock);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
new file mode 100644
index 000000000..416461ee9
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_PTP_H
+#define _SJA1105_PTP_H
+
+#include <linux/timer.h>
+
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
+
+/* Timestamps are in units of 8 ns clock ticks (equivalent to
+ * a fixed 125 MHz clock).
+ */
+#define SJA1105_TICK_NS 8
+
+static inline s64 ns_to_sja1105_ticks(s64 ns)
+{
+ return ns / SJA1105_TICK_NS;
+}
+
+static inline s64 sja1105_ticks_to_ns(s64 ticks)
+{
+ return ticks * SJA1105_TICK_NS;
+}
+
+/* Calculate the first base_time in the future that satisfies this
+ * relationship:
+ *
+ * future_base_time = base_time + N x cycle_time >= now, or
+ *
+ * now - base_time
+ * N >= ---------------
+ * cycle_time
+ *
+ * Because N is an integer, the ceiling value of the above "a / b" ratio
+ * is in fact precisely the floor value of "(a + b - 1) / b", which is
+ * easier to calculate only having integer division tools.
+ */
+static inline s64 future_base_time(s64 base_time, s64 cycle_time, s64 now)
+{
+ s64 a, b, n;
+
+ if (base_time >= now)
+ return base_time;
+
+ a = now - base_time;
+ b = cycle_time;
+ n = div_s64(a + b - 1, b);
+
+ return base_time + n * cycle_time;
+}
+
+/* This is not a preprocessor macro because the "ns" argument may or may not be
+ * s64 at caller side. This ensures it is properly type-cast before div_s64.
+ */
+static inline s64 ns_to_sja1105_delta(s64 ns)
+{
+ return div_s64(ns, 200);
+}
+
+static inline s64 sja1105_delta_to_ns(s64 delta)
+{
+ return delta * 200;
+}
+
+struct sja1105_ptp_cmd {
+ u64 startptpcp; /* start toggling PTP_CLK pin */
+ u64 stopptpcp; /* stop toggling PTP_CLK pin */
+ u64 ptpstrtsch; /* start schedule */
+ u64 ptpstopsch; /* stop schedule */
+ u64 resptp; /* reset */
+ u64 corrclk4ts; /* use the corrected clock for timestamps */
+ u64 ptpclkadd; /* enum sja1105_ptp_clk_mode */
+};
+
+struct sja1105_ptp_data {
+ struct timer_list extts_timer;
+ /* Used only on SJA1105 to reconstruct partial timestamps */
+ struct sk_buff_head skb_rxtstamp_queue;
+ /* Used on SJA1110 where meta frames are generated only for
+ * 2-step TX timestamps
+ */
+ struct sk_buff_head skb_txtstamp_queue;
+ struct ptp_clock_info caps;
+ struct ptp_clock *clock;
+ struct sja1105_ptp_cmd cmd;
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+ bool extts_enabled;
+ u64 ptpsyncts;
+};
+
+int sja1105_ptp_clock_register(struct dsa_switch *ds);
+
+void sja1105_ptp_clock_unregister(struct dsa_switch *ds);
+
+void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
+
+void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
+ enum packing_op op);
+
+int sja1105_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *ts);
+
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *clone);
+
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type);
+
+void sja1105_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb);
+
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
+
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+
+int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *sts);
+
+int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts);
+
+int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta);
+
+int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw);
+
+bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+
+void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp);
+
+#else
+
+struct sja1105_ptp_cmd;
+
+/* Structures cannot be empty in C. Bah!
+ * Keep the mutex as the only element, which is a bit more difficult to
+ * refactor out of sja1105_main.c anyway.
+ */
+struct sja1105_ptp_data {
+ struct mutex lock;
+};
+
+static inline int sja1105_ptp_clock_register(struct dsa_switch *ds)
+{
+ return 0;
+}
+
+static inline void sja1105_ptp_clock_unregister(struct dsa_switch *ds) { }
+
+static inline void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *clone)
+{
+}
+
+static inline int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
+ struct ptp_system_timestamp *sts)
+{
+ return 0;
+}
+
+static inline int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ return 0;
+}
+
+static inline int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta)
+{
+ return 0;
+}
+
+static inline int sja1105_ptp_commit(struct dsa_switch *ds,
+ struct sja1105_ptp_cmd *cmd,
+ sja1105_spi_rw_mode_t rw)
+{
+ return 0;
+}
+
+#define sja1105et_ptp_cmd_packing NULL
+
+#define sja1105pqrs_ptp_cmd_packing NULL
+
+#define sja1105_get_ts_info NULL
+
+#define sja1105_port_rxtstamp NULL
+
+#define sja1105_port_txtstamp NULL
+
+#define sja1105_hwtstamp_get NULL
+
+#define sja1105_hwtstamp_set NULL
+
+#define sja1105_rxtstamp NULL
+#define sja1110_rxtstamp NULL
+#define sja1110_txtstamp NULL
+
+#define sja1110_process_meta_tstamp NULL
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
+
+#endif /* _SJA1105_PTP_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
new file mode 100644
index 000000000..e6b61aef4
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -0,0 +1,977 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright 2016-2018 NXP
+ * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/spi/spi.h>
+#include <linux/packing.h>
+#include "sja1105.h"
+
+struct sja1105_chunk {
+ u8 *buf;
+ size_t len;
+ u64 reg_addr;
+};
+
+static void
+sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
+{
+ const int size = SJA1105_SIZE_SPI_MSG_HEADER;
+
+ memset(buf, 0, size);
+
+ sja1105_pack(buf, &msg->access, 31, 31, size);
+ sja1105_pack(buf, &msg->read_count, 30, 25, size);
+ sja1105_pack(buf, &msg->address, 24, 4, size);
+}
+
+/* If @rw is:
+ * - SPI_WRITE: creates and sends an SPI write message at absolute
+ * address reg_addr, taking @len bytes from *buf
+ * - SPI_READ: creates and sends an SPI read message from absolute
+ * address reg_addr, writing @len bytes into *buf
+ */
+static int sja1105_xfer(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u8 *buf,
+ size_t len, struct ptp_system_timestamp *ptp_sts)
+{
+ u8 hdr_buf[SJA1105_SIZE_SPI_MSG_HEADER] = {0};
+ struct spi_device *spi = priv->spidev;
+ struct spi_transfer xfers[2] = {0};
+ struct spi_transfer *chunk_xfer;
+ struct spi_transfer *hdr_xfer;
+ struct sja1105_chunk chunk;
+ int num_chunks;
+ int rc, i = 0;
+
+ num_chunks = DIV_ROUND_UP(len, priv->max_xfer_len);
+
+ chunk.reg_addr = reg_addr;
+ chunk.buf = buf;
+ chunk.len = min_t(size_t, len, priv->max_xfer_len);
+
+ hdr_xfer = &xfers[0];
+ chunk_xfer = &xfers[1];
+
+ for (i = 0; i < num_chunks; i++) {
+ struct spi_transfer *ptp_sts_xfer;
+ struct sja1105_spi_message msg;
+
+ /* Populate the transfer's header buffer */
+ msg.address = chunk.reg_addr;
+ msg.access = rw;
+ if (rw == SPI_READ)
+ msg.read_count = chunk.len / 4;
+ else
+ /* Ignored */
+ msg.read_count = 0;
+ sja1105_spi_message_pack(hdr_buf, &msg);
+ hdr_xfer->tx_buf = hdr_buf;
+ hdr_xfer->len = SJA1105_SIZE_SPI_MSG_HEADER;
+
+ /* Populate the transfer's data buffer */
+ if (rw == SPI_READ)
+ chunk_xfer->rx_buf = chunk.buf;
+ else
+ chunk_xfer->tx_buf = chunk.buf;
+ chunk_xfer->len = chunk.len;
+
+ /* Request timestamping for the transfer. Instead of letting
+ * callers specify which byte they want to timestamp, we can
+ * make certain assumptions:
+ * - A read operation will request a software timestamp when
+ * what's being read is the PTP time. That is snapshotted by
+ * the switch hardware at the end of the command portion
+ * (hdr_xfer).
+ * - A write operation will request a software timestamp on
+ * actions that modify the PTP time. Taking clock stepping as
+ * an example, the switch writes the PTP time at the end of
+ * the data portion (chunk_xfer).
+ */
+ if (rw == SPI_READ)
+ ptp_sts_xfer = hdr_xfer;
+ else
+ ptp_sts_xfer = chunk_xfer;
+ ptp_sts_xfer->ptp_sts_word_pre = ptp_sts_xfer->len - 1;
+ ptp_sts_xfer->ptp_sts_word_post = ptp_sts_xfer->len - 1;
+ ptp_sts_xfer->ptp_sts = ptp_sts;
+
+ /* Calculate next chunk */
+ chunk.buf += chunk.len;
+ chunk.reg_addr += chunk.len / 4;
+ chunk.len = min_t(size_t, (ptrdiff_t)(buf + len - chunk.buf),
+ priv->max_xfer_len);
+
+ rc = spi_sync_transfer(spi, xfers, 2);
+ if (rc < 0) {
+ dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int sja1105_xfer_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u8 *buf, size_t len)
+{
+ return sja1105_xfer(priv, rw, reg_addr, buf, len, NULL);
+}
+
+/* If @rw is:
+ * - SPI_WRITE: creates and sends an SPI write message at absolute
+ * address reg_addr
+ * - SPI_READ: creates and sends an SPI read message from absolute
+ * address reg_addr
+ *
+ * The u64 *value is unpacked, meaning that it's stored in the native
+ * CPU endianness and directly usable by software running on the core.
+ */
+int sja1105_xfer_u64(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ u8 packed_buf[8];
+ int rc;
+
+ if (rw == SPI_WRITE)
+ sja1105_pack(packed_buf, value, 63, 0, 8);
+
+ rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 8, ptp_sts);
+
+ if (rw == SPI_READ)
+ sja1105_unpack(packed_buf, value, 63, 0, 8);
+
+ return rc;
+}
+
+/* Same as above, but transfers only a 4 byte word */
+int sja1105_xfer_u32(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
+ struct ptp_system_timestamp *ptp_sts)
+{
+ u8 packed_buf[4];
+ u64 tmp;
+ int rc;
+
+ if (rw == SPI_WRITE) {
+ /* The packing API only supports u64 as CPU word size,
+ * so we need to convert.
+ */
+ tmp = *value;
+ sja1105_pack(packed_buf, &tmp, 31, 0, 4);
+ }
+
+ rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 4, ptp_sts);
+
+ if (rw == SPI_READ) {
+ sja1105_unpack(packed_buf, &tmp, 31, 0, 4);
+ *value = tmp;
+ }
+
+ return rc;
+}
+
+static int sja1105et_reset_cmd(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 cold_reset = BIT(3);
+
+ /* Cold reset */
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &cold_reset, NULL);
+}
+
+static int sja1105pqrs_reset_cmd(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 cold_reset = BIT(2);
+
+ /* Cold reset */
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &cold_reset, NULL);
+}
+
+static int sja1110_reset_cmd(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 switch_reset = BIT(20);
+
+ /* Only reset the switch core.
+ * A full cold reset would re-enable the BASE_MCSS_CLOCK PLL which
+ * would turn on the microcontroller, potentially letting it execute
+ * code which could interfere with our configuration.
+ */
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &switch_reset, NULL);
+}
+
+int sja1105_inhibit_tx(const struct sja1105_private *priv,
+ unsigned long port_bitmap, bool tx_inhibited)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 inhibit_cmd;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->port_control,
+ &inhibit_cmd, NULL);
+ if (rc < 0)
+ return rc;
+
+ if (tx_inhibited)
+ inhibit_cmd |= port_bitmap;
+ else
+ inhibit_cmd &= ~port_bitmap;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->port_control,
+ &inhibit_cmd, NULL);
+}
+
+struct sja1105_status {
+ u64 configs;
+ u64 crcchkl;
+ u64 ids;
+ u64 crcchkg;
+};
+
+/* This is not reading the entire General Status area, which is also
+ * divergent between E/T and P/Q/R/S, but only the relevant bits for
+ * ensuring that the static config upload procedure was successful.
+ */
+static void sja1105_status_unpack(void *buf, struct sja1105_status *status)
+{
+ /* So that addition translates to 4 bytes */
+ u32 *p = buf;
+
+ /* device_id is missing from the buffer, but we don't
+ * want to diverge from the manual definition of the
+ * register addresses, so we'll back off one step with
+ * the register pointer, and never access p[0].
+ */
+ p--;
+ sja1105_unpack(p + 0x1, &status->configs, 31, 31, 4);
+ sja1105_unpack(p + 0x1, &status->crcchkl, 30, 30, 4);
+ sja1105_unpack(p + 0x1, &status->ids, 29, 29, 4);
+ sja1105_unpack(p + 0x1, &status->crcchkg, 28, 28, 4);
+}
+
+static int sja1105_status_get(struct sja1105_private *priv,
+ struct sja1105_status *status)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[4];
+ int rc;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->status, packed_buf, 4);
+ if (rc < 0)
+ return rc;
+
+ sja1105_status_unpack(packed_buf, status);
+
+ return 0;
+}
+
+/* Not const because unpacking priv->static_config into buffers and preparing
+ * for upload requires the recalculation of table CRCs and updating the
+ * structures with these.
+ */
+int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
+ void *config_buf, int buf_len)
+{
+ struct sja1105_static_config *config = &priv->static_config;
+ struct sja1105_table_header final_header;
+ sja1105_config_valid_t valid;
+ char *final_header_ptr;
+ int crc_len;
+
+ valid = sja1105_static_config_check_valid(config,
+ priv->info->max_frame_mem);
+ if (valid != SJA1105_CONFIG_OK) {
+ dev_err(&priv->spidev->dev,
+ sja1105_static_config_error_msg[valid]);
+ return -EINVAL;
+ }
+
+ /* Write Device ID and config tables to config_buf */
+ sja1105_static_config_pack(config_buf, config);
+ /* Recalculate CRC of the last header (right now 0xDEADBEEF).
+ * Don't include the CRC field itself.
+ */
+ crc_len = buf_len - 4;
+ /* Read the whole table header */
+ final_header_ptr = config_buf + buf_len - SJA1105_SIZE_TABLE_HEADER;
+ sja1105_table_header_packing(final_header_ptr, &final_header, UNPACK);
+ /* Modify */
+ final_header.crc = sja1105_crc32(config_buf, crc_len);
+ /* Rewrite */
+ sja1105_table_header_packing(final_header_ptr, &final_header, PACK);
+
+ return 0;
+}
+
+#define RETRIES 10
+
+int sja1105_static_config_upload(struct sja1105_private *priv)
+{
+ struct sja1105_static_config *config = &priv->static_config;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = &priv->spidev->dev;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_status status;
+ int rc, retries = RETRIES;
+ u8 *config_buf;
+ int buf_len;
+
+ buf_len = sja1105_static_config_get_length(config);
+ config_buf = kcalloc(buf_len, sizeof(char), GFP_KERNEL);
+ if (!config_buf)
+ return -ENOMEM;
+
+ rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
+ if (rc < 0) {
+ dev_err(dev, "Invalid config, cannot upload\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ /* Prevent PHY jabbering during switch reset by inhibiting
+ * Tx on all ports and waiting for current packet to drain.
+ * Otherwise, the PHY will see an unterminated Ethernet packet.
+ */
+ rc = sja1105_inhibit_tx(priv, GENMASK_ULL(ds->num_ports - 1, 0), true);
+ if (rc < 0) {
+ dev_err(dev, "Failed to inhibit Tx on ports\n");
+ rc = -ENXIO;
+ goto out;
+ }
+ /* Wait for an eventual egress packet to finish transmission
+ * (reach IFG). It is guaranteed that a second one will not
+ * follow, and that switch cold reset is thus safe
+ */
+ usleep_range(500, 1000);
+ do {
+ /* Put the SJA1105 in programming mode */
+ rc = priv->info->reset_cmd(priv->ds);
+ if (rc < 0) {
+ dev_err(dev, "Failed to reset switch, retrying...\n");
+ continue;
+ }
+ /* Wait for the switch to come out of reset */
+ usleep_range(1000, 5000);
+ /* Upload the static config to the device */
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->config,
+ config_buf, buf_len);
+ if (rc < 0) {
+ dev_err(dev, "Failed to upload config, retrying...\n");
+ continue;
+ }
+ /* Check that SJA1105 responded well to the config upload */
+ rc = sja1105_status_get(priv, &status);
+ if (rc < 0)
+ continue;
+
+ if (status.ids == 1) {
+ dev_err(dev, "Mismatch between hardware and static config "
+ "device id. Wrote 0x%llx, wants 0x%llx\n",
+ config->device_id, priv->info->device_id);
+ continue;
+ }
+ if (status.crcchkl == 1) {
+ dev_err(dev, "Switch reported invalid local CRC on "
+ "the uploaded config, retrying...\n");
+ continue;
+ }
+ if (status.crcchkg == 1) {
+ dev_err(dev, "Switch reported invalid global CRC on "
+ "the uploaded config, retrying...\n");
+ continue;
+ }
+ if (status.configs == 0) {
+ dev_err(dev, "Switch reported that configuration is "
+ "invalid, retrying...\n");
+ continue;
+ }
+ /* Success! */
+ break;
+ } while (--retries);
+
+ if (!retries) {
+ rc = -EIO;
+ dev_err(dev, "Failed to upload config to device, giving up\n");
+ goto out;
+ } else if (retries != RETRIES) {
+ dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
+ }
+
+out:
+ kfree(config_buf);
+ return rc;
+}
+
+static const struct sja1105_regs sja1105et_regs = {
+ .device_id = 0x0,
+ .prod_id = 0x100BC3,
+ .status = 0x1,
+ .port_control = 0x11,
+ .vl_status = 0x10000,
+ .config = 0x020000,
+ .rgu = 0x100440,
+ /* UM10944.pdf, Table 86, ACU Register overview */
+ .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
+ .rmii_pll1 = 0x10000A,
+ .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
+ .stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208},
+ .stats[HL1] = {0x400, 0x410, 0x420, 0x430, 0x440},
+ .stats[HL2] = {0x600, 0x610, 0x620, 0x630, 0x640},
+ /* UM10944.pdf, Table 78, CGU Register overview */
+ .mii_tx_clk = {0x100013, 0x10001A, 0x100021, 0x100028, 0x10002F},
+ .mii_rx_clk = {0x100014, 0x10001B, 0x100022, 0x100029, 0x100030},
+ .mii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
+ .mii_ext_rx_clk = {0x100019, 0x100020, 0x100027, 0x10002E, 0x100035},
+ .rgmii_tx_clk = {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032},
+ .rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
+ .rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
+ .ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
+ .ptpschtm = 0x12, /* Spans 0x12 to 0x13 */
+ .ptppinst = 0x14,
+ .ptppindur = 0x16,
+ .ptp_control = 0x17,
+ .ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
+ .ptpclkrate = 0x1A,
+ .ptpclkcorp = 0x1D,
+ .mdio_100base_tx = SJA1105_RSV_ADDR,
+ .mdio_100base_t1 = SJA1105_RSV_ADDR,
+};
+
+static const struct sja1105_regs sja1105pqrs_regs = {
+ .device_id = 0x0,
+ .prod_id = 0x100BC3,
+ .status = 0x1,
+ .port_control = 0x12,
+ .vl_status = 0x10000,
+ .config = 0x020000,
+ .rgu = 0x100440,
+ /* UM10944.pdf, Table 86, ACU Register overview */
+ .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
+ .pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
+ .rmii_pll1 = 0x10000A,
+ .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
+ .stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208},
+ .stats[HL1] = {0x400, 0x410, 0x420, 0x430, 0x440},
+ .stats[HL2] = {0x600, 0x610, 0x620, 0x630, 0x640},
+ .stats[ETHER] = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460},
+ /* UM11040.pdf, Table 114 */
+ .mii_tx_clk = {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B},
+ .mii_rx_clk = {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C},
+ .mii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
+ .mii_ext_rx_clk = {0x100018, 0x10001E, 0x100024, 0x10002A, 0x100030},
+ .rgmii_tx_clk = {0x100016, 0x10001C, 0x100022, 0x100028, 0x10002E},
+ .rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D},
+ .rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
+ .ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
+ .ptpschtm = 0x13, /* Spans 0x13 to 0x14 */
+ .ptppinst = 0x15,
+ .ptppindur = 0x17,
+ .ptp_control = 0x18,
+ .ptpclkval = 0x19,
+ .ptpclkrate = 0x1B,
+ .ptpclkcorp = 0x1E,
+ .ptpsyncts = 0x1F,
+ .mdio_100base_tx = SJA1105_RSV_ADDR,
+ .mdio_100base_t1 = SJA1105_RSV_ADDR,
+};
+
+static const struct sja1105_regs sja1110_regs = {
+ .device_id = SJA1110_SPI_ADDR(0x0),
+ .prod_id = SJA1110_ACU_ADDR(0xf00),
+ .status = SJA1110_SPI_ADDR(0x4),
+ .port_control = SJA1110_SPI_ADDR(0x50), /* actually INHIB_TX */
+ .vl_status = 0x10000,
+ .config = 0x020000,
+ .rgu = SJA1110_RGU_ADDR(0x100), /* Reset Control Register 0 */
+ /* Ports 2 and 3 are capable of xMII, but there isn't anything to
+ * configure in the CGU/ACU for them.
+ */
+ .pad_mii_tx = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .pad_mii_rx = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .pad_mii_id = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1110_ACU_ADDR(0x18), SJA1110_ACU_ADDR(0x28),
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .rmii_pll1 = SJA1105_RSV_ADDR,
+ .cgu_idiv = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208, 0x20a,
+ 0x20c, 0x20e, 0x210, 0x212, 0x214},
+ .stats[HL1] = {0x400, 0x410, 0x420, 0x430, 0x440, 0x450,
+ 0x460, 0x470, 0x480, 0x490, 0x4a0},
+ .stats[HL2] = {0x600, 0x610, 0x620, 0x630, 0x640, 0x650,
+ 0x660, 0x670, 0x680, 0x690, 0x6a0},
+ .stats[ETHER] = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460, 0x1478,
+ 0x1490, 0x14a8, 0x14c0, 0x14d8, 0x14f0},
+ .mii_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .mii_rx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .mii_ext_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .mii_ext_rx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .rgmii_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .rmii_ref_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .rmii_ext_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .ptpschtm = SJA1110_SPI_ADDR(0x54),
+ .ptppinst = SJA1110_SPI_ADDR(0x5c),
+ .ptppindur = SJA1110_SPI_ADDR(0x64),
+ .ptp_control = SJA1110_SPI_ADDR(0x68),
+ .ptpclkval = SJA1110_SPI_ADDR(0x6c),
+ .ptpclkrate = SJA1110_SPI_ADDR(0x74),
+ .ptpclkcorp = SJA1110_SPI_ADDR(0x80),
+ .ptpsyncts = SJA1110_SPI_ADDR(0x84),
+ .mdio_100base_tx = 0x1c2400,
+ .mdio_100base_t1 = 0x1c1000,
+ .pcs_base = {SJA1105_RSV_ADDR, 0x1c1400, 0x1c1800, 0x1c1c00, 0x1c2000,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+};
+
+const struct sja1105_info sja1105e_info = {
+ .device_id = SJA1105E_DEVICE_ID,
+ .part_no = SJA1105ET_PART_NO,
+ .static_ops = sja1105e_table_ops,
+ .dyn_ops = sja1105et_dyn_ops,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
+ .can_limit_mcast_flood = false,
+ .ptp_ts_bits = 24,
+ .ptpegr_ts_bytes = 4,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
+ .num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
+ .reset_cmd = sja1105et_reset_cmd,
+ .fdb_add_cmd = sja1105et_fdb_add,
+ .fdb_del_cmd = sja1105et_fdb_del,
+ .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .regs = &sja1105et_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .name = "SJA1105E",
+};
+
+const struct sja1105_info sja1105t_info = {
+ .device_id = SJA1105T_DEVICE_ID,
+ .part_no = SJA1105ET_PART_NO,
+ .static_ops = sja1105t_table_ops,
+ .dyn_ops = sja1105et_dyn_ops,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
+ .can_limit_mcast_flood = false,
+ .ptp_ts_bits = 24,
+ .ptpegr_ts_bytes = 4,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
+ .num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
+ .reset_cmd = sja1105et_reset_cmd,
+ .fdb_add_cmd = sja1105et_fdb_add,
+ .fdb_del_cmd = sja1105et_fdb_del,
+ .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .regs = &sja1105et_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .name = "SJA1105T",
+};
+
+const struct sja1105_info sja1105p_info = {
+ .device_id = SJA1105PR_DEVICE_ID,
+ .part_no = SJA1105P_PART_NO,
+ .static_ops = sja1105p_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
+ .can_limit_mcast_flood = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .regs = &sja1105pqrs_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .name = "SJA1105P",
+};
+
+const struct sja1105_info sja1105q_info = {
+ .device_id = SJA1105QS_DEVICE_ID,
+ .part_no = SJA1105Q_PART_NO,
+ .static_ops = sja1105q_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
+ .can_limit_mcast_flood = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .regs = &sja1105pqrs_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .name = "SJA1105Q",
+};
+
+const struct sja1105_info sja1105r_info = {
+ .device_id = SJA1105PR_DEVICE_ID,
+ .part_no = SJA1105R_PART_NO,
+ .static_ops = sja1105r_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
+ .can_limit_mcast_flood = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .pcs_mdio_read = sja1105_pcs_mdio_read,
+ .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .regs = &sja1105pqrs_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .supports_sgmii = {false, false, false, false, true},
+ .name = "SJA1105R",
+};
+
+const struct sja1105_info sja1105s_info = {
+ .device_id = SJA1105QS_DEVICE_ID,
+ .part_no = SJA1105S_PART_NO,
+ .static_ops = sja1105s_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .regs = &sja1105pqrs_regs,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
+ .can_limit_mcast_flood = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
+ .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .pcs_mdio_read = sja1105_pcs_mdio_read,
+ .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .supports_sgmii = {false, false, false, false, true},
+ .name = "SJA1105S",
+};
+
+const struct sja1105_info sja1110a_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110A_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .disable_microcontroller = sja1110_disable_microcontroller,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, true, true, true, false,
+ true, true, true, true, true, true},
+ .supports_rmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, true, true, true, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1},
+ .name = "SJA1110A",
+};
+
+const struct sja1105_info sja1110b_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110B_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .disable_microcontroller = sja1110_disable_microcontroller,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, true, true, true, false,
+ true, true, true, true, true, false},
+ .supports_rmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_NO_PHY},
+ .name = "SJA1110B",
+};
+
+const struct sja1105_info sja1110c_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110C_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .disable_microcontroller = sja1110_disable_microcontroller,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, true, true, true, false,
+ true, true, true, false, false, false},
+ .supports_rmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, false, false, false, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, false, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY},
+ .name = "SJA1110C",
+};
+
+const struct sja1105_info sja1110d_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110D_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .disable_microcontroller = sja1110_disable_microcontroller,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, false, true, false, false,
+ true, true, true, false, false, false},
+ .supports_rmii = {false, false, true, false, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, false, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, true, true, true, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY},
+ .name = "SJA1110D",
+};
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
new file mode 100644
index 000000000..baba204ad
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -0,0 +1,1952 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright 2016-2018 NXP
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105_static_config.h"
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+/* Convenience wrappers over the generic packing functions. These take into
+ * account the SJA1105 memory layout quirks and provide some level of
+ * programmer protection against incorrect API use. The errors are not expected
+ * to occur durring runtime, therefore printing and swallowing them here is
+ * appropriate instead of clutterring up higher-level code.
+ */
+void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len)
+{
+ int rc = packing(buf, (u64 *)val, start, end, len,
+ PACK, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL) {
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ } else if (rc == -ERANGE) {
+ if ((start - end + 1) > 64)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ else
+ pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
+ *val, start, end);
+ }
+ dump_stack();
+}
+
+void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len)
+{
+ int rc = packing((void *)buf, val, start, end, len,
+ UNPACK, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL)
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ else if (rc == -ERANGE)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ dump_stack();
+}
+
+void sja1105_packing(void *buf, u64 *val, int start, int end,
+ size_t len, enum packing_op op)
+{
+ int rc = packing(buf, val, start, end, len, op, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL) {
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ } else if (rc == -ERANGE) {
+ if ((start - end + 1) > 64)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ else
+ pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
+ *val, start, end);
+ }
+ dump_stack();
+}
+
+/* Little-endian Ethernet CRC32 of data packed as big-endian u32 words */
+u32 sja1105_crc32(const void *buf, size_t len)
+{
+ unsigned int i;
+ u64 word;
+ u32 crc;
+
+ /* seed */
+ crc = ~0;
+ for (i = 0; i < len; i += 4) {
+ sja1105_unpack(buf + i, &word, 31, 0, 4);
+ crc = crc32_le(crc, (u8 *)&word, 4);
+ }
+ return ~crc;
+}
+
+static size_t sja1105et_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY;
+ struct sja1105_avb_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->destmeta, 95, 48, size, op);
+ sja1105_packing(buf, &entry->srcmeta, 47, 0, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
+ struct sja1105_avb_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->cas_master, 126, 126, size, op);
+ sja1105_packing(buf, &entry->destmeta, 125, 78, size, op);
+ sja1105_packing(buf, &entry->srcmeta, 77, 30, size, op);
+ return size;
+}
+
+static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY;
+ struct sja1105_general_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vllupformat, 319, 319, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 318, 318, size, op);
+ sja1105_packing(buf, &entry->switchid, 317, 315, size, op);
+ sja1105_packing(buf, &entry->hostprio, 314, 312, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 311, 264, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 263, 216, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 215, 168, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 167, 120, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 119, 119, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 118, 118, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 117, 117, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 116, 116, size, op);
+ sja1105_packing(buf, &entry->casc_port, 115, 113, size, op);
+ sja1105_packing(buf, &entry->host_port, 112, 110, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 109, 107, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 106, 75, size, op);
+ sja1105_packing(buf, &entry->vlmask, 74, 43, size, op);
+ sja1105_packing(buf, &entry->tpid, 42, 27, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 26, 26, size, op);
+ sja1105_packing(buf, &entry->tpid2, 25, 10, size, op);
+ return size;
+}
+
+/* TPID and TPID2 are intentionally reversed so that semantic
+ * compatibility with E/T is kept.
+ */
+size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY;
+ struct sja1105_general_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vllupformat, 351, 351, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 350, 350, size, op);
+ sja1105_packing(buf, &entry->switchid, 349, 347, size, op);
+ sja1105_packing(buf, &entry->hostprio, 346, 344, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 343, 296, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 295, 248, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 247, 200, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 199, 152, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 151, 151, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 150, 150, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 149, 149, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 148, 148, size, op);
+ sja1105_packing(buf, &entry->casc_port, 147, 145, size, op);
+ sja1105_packing(buf, &entry->host_port, 144, 142, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op);
+ sja1105_packing(buf, &entry->vlmask, 106, 75, size, op);
+ sja1105_packing(buf, &entry->tpid2, 74, 59, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op);
+ sja1105_packing(buf, &entry->tpid, 57, 42, size, op);
+ sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op);
+ sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op);
+ sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op);
+ sja1105_packing(buf, &entry->egrmirrdei, 25, 25, size, op);
+ sja1105_packing(buf, &entry->replay_port, 24, 22, size, op);
+ return size;
+}
+
+size_t sja1110_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_general_params_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_GENERAL_PARAMS_ENTRY;
+
+ sja1105_packing(buf, &entry->vllupformat, 447, 447, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 446, 446, size, op);
+ sja1105_packing(buf, &entry->switchid, 445, 442, size, op);
+ sja1105_packing(buf, &entry->hostprio, 441, 439, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 438, 391, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 390, 343, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 342, 295, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 294, 247, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 246, 246, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 245, 245, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 244, 244, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 243, 243, size, op);
+ sja1105_packing(buf, &entry->casc_port, 242, 232, size, op);
+ sja1105_packing(buf, &entry->host_port, 231, 228, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 227, 224, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 223, 192, size, op);
+ sja1105_packing(buf, &entry->vlmask, 191, 160, size, op);
+ sja1105_packing(buf, &entry->tpid2, 159, 144, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 143, 143, size, op);
+ sja1105_packing(buf, &entry->tpid, 142, 127, size, op);
+ sja1105_packing(buf, &entry->queue_ts, 126, 126, size, op);
+ sja1105_packing(buf, &entry->egrmirrvid, 125, 114, size, op);
+ sja1105_packing(buf, &entry->egrmirrpcp, 113, 111, size, op);
+ sja1105_packing(buf, &entry->egrmirrdei, 110, 110, size, op);
+ sja1105_packing(buf, &entry->replay_port, 109, 106, size, op);
+ sja1105_packing(buf, &entry->tdmaconfigidx, 70, 67, size, op);
+ sja1105_packing(buf, &entry->header_type, 64, 49, size, op);
+ sja1105_packing(buf, &entry->tte_en, 16, 16, size, op);
+ return size;
+}
+
+static size_t
+sja1105_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY;
+ struct sja1105_l2_forwarding_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->max_dynp, 95, 93, size, op);
+ for (i = 0, offset = 13; i < 8; i++, offset += 10)
+ sja1105_packing(buf, &entry->part_spc[i],
+ offset + 9, offset + 0, size, op);
+ return size;
+}
+
+size_t sja1110_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_forwarding_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->max_dynp, 95, 93, size, op);
+ for (i = 0, offset = 5; i < 8; i++, offset += 11)
+ sja1105_packing(buf, &entry->part_spc[i],
+ offset + 10, offset + 0, size, op);
+ return size;
+}
+
+size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ struct sja1105_l2_forwarding_entry *entry = entry_ptr;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->bc_domain, 63, 59, size, op);
+ sja1105_packing(buf, &entry->reach_port, 58, 54, size, op);
+ sja1105_packing(buf, &entry->fl_domain, 53, 49, size, op);
+ for (i = 0, offset = 25; i < 8; i++, offset += 3)
+ sja1105_packing(buf, &entry->vlan_pmap[i],
+ offset + 2, offset + 0, size, op);
+ return size;
+}
+
+size_t sja1110_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_forwarding_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ int offset, i;
+
+ if (entry->type_egrpcp2outputq) {
+ for (i = 0, offset = 31; i < SJA1110_NUM_PORTS;
+ i++, offset += 3) {
+ sja1105_packing(buf, &entry->vlan_pmap[i],
+ offset + 2, offset + 0, size, op);
+ }
+ } else {
+ sja1105_packing(buf, &entry->bc_domain, 63, 53, size, op);
+ sja1105_packing(buf, &entry->reach_port, 52, 42, size, op);
+ sja1105_packing(buf, &entry->fl_domain, 41, 31, size, op);
+ }
+ return size;
+}
+
+static size_t
+sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->maxage, 31, 17, size, op);
+ sja1105_packing(buf, &entry->dyn_tbsz, 16, 14, size, op);
+ sja1105_packing(buf, &entry->poly, 13, 6, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 5, 5, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 4, 4, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 3, 3, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 58; i < 5; i++, offset += 11)
+ sja1105_packing(buf, &entry->maxaddrp[i],
+ offset + 10, offset + 0, size, op);
+ sja1105_packing(buf, &entry->maxage, 57, 43, size, op);
+ sja1105_packing(buf, &entry->start_dynspc, 42, 33, size, op);
+ sja1105_packing(buf, &entry->drpnolearn, 32, 28, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 27, 27, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 26, 26, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 25, 25, size, op);
+ sja1105_packing(buf, &entry->use_static, 24, 24, size, op);
+ sja1105_packing(buf, &entry->owr_dyn, 23, 23, size, op);
+ sja1105_packing(buf, &entry->learn_once, 22, 22, size, op);
+ return size;
+}
+
+size_t sja1110_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 70; i < SJA1110_NUM_PORTS; i++, offset += 11)
+ sja1105_packing(buf, &entry->maxaddrp[i],
+ offset + 10, offset + 0, size, op);
+ sja1105_packing(buf, &entry->maxage, 69, 55, size, op);
+ sja1105_packing(buf, &entry->start_dynspc, 54, 45, size, op);
+ sja1105_packing(buf, &entry->drpnolearn, 44, 34, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 33, 33, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 32, 32, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 31, 31, size, op);
+ sja1105_packing(buf, &entry->use_static, 30, 30, size, op);
+ sja1105_packing(buf, &entry->owr_dyn, 29, 29, size, op);
+ sja1105_packing(buf, &entry->learn_once, 28, 28, size, op);
+ return size;
+}
+
+size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vlanid, 95, 84, size, op);
+ sja1105_packing(buf, &entry->macaddr, 83, 36, size, op);
+ sja1105_packing(buf, &entry->destports, 35, 31, size, op);
+ sja1105_packing(buf, &entry->enfport, 30, 30, size, op);
+ sja1105_packing(buf, &entry->index, 29, 20, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ if (entry->lockeds) {
+ sja1105_packing(buf, &entry->tsreg, 159, 159, size, op);
+ sja1105_packing(buf, &entry->mirrvlan, 158, 147, size, op);
+ sja1105_packing(buf, &entry->takets, 146, 146, size, op);
+ sja1105_packing(buf, &entry->mirr, 145, 145, size, op);
+ sja1105_packing(buf, &entry->retag, 144, 144, size, op);
+ } else {
+ sja1105_packing(buf, &entry->touched, 159, 159, size, op);
+ sja1105_packing(buf, &entry->age, 158, 144, size, op);
+ }
+ sja1105_packing(buf, &entry->mask_iotag, 143, 143, size, op);
+ sja1105_packing(buf, &entry->mask_vlanid, 142, 131, size, op);
+ sja1105_packing(buf, &entry->mask_macaddr, 130, 83, size, op);
+ sja1105_packing(buf, &entry->iotag, 82, 82, size, op);
+ sja1105_packing(buf, &entry->vlanid, 81, 70, size, op);
+ sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
+ sja1105_packing(buf, &entry->destports, 21, 17, size, op);
+ sja1105_packing(buf, &entry->enfport, 16, 16, size, op);
+ sja1105_packing(buf, &entry->index, 15, 6, size, op);
+ return size;
+}
+
+size_t sja1110_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ if (entry->lockeds) {
+ sja1105_packing(buf, &entry->trap, 168, 168, size, op);
+ sja1105_packing(buf, &entry->mirrvlan, 167, 156, size, op);
+ sja1105_packing(buf, &entry->takets, 155, 155, size, op);
+ sja1105_packing(buf, &entry->mirr, 154, 154, size, op);
+ sja1105_packing(buf, &entry->retag, 153, 153, size, op);
+ } else {
+ sja1105_packing(buf, &entry->touched, 168, 168, size, op);
+ sja1105_packing(buf, &entry->age, 167, 153, size, op);
+ }
+ sja1105_packing(buf, &entry->mask_iotag, 152, 152, size, op);
+ sja1105_packing(buf, &entry->mask_vlanid, 151, 140, size, op);
+ sja1105_packing(buf, &entry->mask_macaddr, 139, 92, size, op);
+ sja1105_packing(buf, &entry->mask_srcport, 91, 88, size, op);
+ sja1105_packing(buf, &entry->iotag, 87, 87, size, op);
+ sja1105_packing(buf, &entry->vlanid, 86, 75, size, op);
+ sja1105_packing(buf, &entry->macaddr, 74, 27, size, op);
+ sja1105_packing(buf, &entry->srcport, 26, 23, size, op);
+ sja1105_packing(buf, &entry->destports, 22, 12, size, op);
+ sja1105_packing(buf, &entry->enfport, 11, 11, size, op);
+ sja1105_packing(buf, &entry->index, 10, 1, size, op);
+ return size;
+}
+
+static size_t sja1105_l2_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_POLICING_ENTRY;
+ struct sja1105_l2_policing_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->sharindx, 63, 58, size, op);
+ sja1105_packing(buf, &entry->smax, 57, 42, size, op);
+ sja1105_packing(buf, &entry->rate, 41, 26, size, op);
+ sja1105_packing(buf, &entry->maxlen, 25, 15, size, op);
+ sja1105_packing(buf, &entry->partition, 14, 12, size, op);
+ return size;
+}
+
+size_t sja1110_l2_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_policing_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_L2_POLICING_ENTRY;
+
+ sja1105_packing(buf, &entry->sharindx, 63, 57, size, op);
+ sja1105_packing(buf, &entry->smax, 56, 39, size, op);
+ sja1105_packing(buf, &entry->rate, 38, 21, size, op);
+ sja1105_packing(buf, &entry->maxlen, 20, 10, size, op);
+ sja1105_packing(buf, &entry->partition, 9, 7, size, op);
+ return size;
+}
+
+static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 72; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->ifg, 71, 67, size, op);
+ sja1105_packing(buf, &entry->speed, 66, 65, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 64, 49, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 48, 33, size, op);
+ sja1105_packing(buf, &entry->maxage, 32, 25, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 24, 22, size, op);
+ sja1105_packing(buf, &entry->vlanid, 21, 10, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 9, 9, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 8, 8, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 7, 7, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 6, 6, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 5, 5, size, op);
+ sja1105_packing(buf, &entry->retag, 4, 4, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 3, 3, size, op);
+ sja1105_packing(buf, &entry->egress, 2, 2, size, op);
+ sja1105_packing(buf, &entry->ingress, 1, 1, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 104; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->ifg, 103, 99, size, op);
+ sja1105_packing(buf, &entry->speed, 98, 97, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 96, 81, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 80, 65, size, op);
+ sja1105_packing(buf, &entry->maxage, 64, 57, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 56, 54, size, op);
+ sja1105_packing(buf, &entry->vlanid, 53, 42, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 41, 41, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 40, 40, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 39, 39, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 38, 38, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 35, 35, size, op);
+ sja1105_packing(buf, &entry->retag, 34, 34, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 33, 33, size, op);
+ sja1105_packing(buf, &entry->egress, 32, 32, size, op);
+ sja1105_packing(buf, &entry->ingress, 31, 31, size, op);
+ return size;
+}
+
+size_t sja1110_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 104; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->speed, 98, 96, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 95, 80, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 79, 64, size, op);
+ sja1105_packing(buf, &entry->maxage, 63, 56, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 55, 53, size, op);
+ sja1105_packing(buf, &entry->vlanid, 52, 41, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 40, 40, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 39, 39, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 38, 38, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 37, 37, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 34, 34, size, op);
+ sja1105_packing(buf, &entry->retag, 33, 33, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 32, 32, size, op);
+ sja1105_packing(buf, &entry->egress, 31, 31, size, op);
+ sja1105_packing(buf, &entry->ingress, 30, 30, size, op);
+ sja1105_packing(buf, &entry->ifg, 10, 5, size, op);
+ return size;
+}
+
+static size_t
+sja1105_schedule_entry_points_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_entry_points_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY;
+
+ sja1105_packing(buf, &entry->clksrc, 31, 30, size, op);
+ sja1105_packing(buf, &entry->actsubsch, 29, 27, size, op);
+ return size;
+}
+
+static size_t
+sja1105_schedule_entry_points_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_entry_points_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY;
+
+ sja1105_packing(buf, &entry->subschindx, 31, 29, size, op);
+ sja1105_packing(buf, &entry->delta, 28, 11, size, op);
+ sja1105_packing(buf, &entry->address, 10, 1, size, op);
+ return size;
+}
+
+static size_t
+sja1110_schedule_entry_points_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_entry_points_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY;
+
+ sja1105_packing(buf, &entry->subschindx, 63, 61, size, op);
+ sja1105_packing(buf, &entry->delta, 60, 43, size, op);
+ sja1105_packing(buf, &entry->address, 42, 31, size, op);
+ return size;
+}
+
+static size_t sja1105_schedule_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY;
+ struct sja1105_schedule_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 16; i < 8; i++, offset += 10)
+ sja1105_packing(buf, &entry->subscheind[i],
+ offset + 9, offset + 0, size, op);
+ return size;
+}
+
+static size_t sja1110_schedule_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 0; i < 8; i++, offset += 12)
+ sja1105_packing(buf, &entry->subscheind[i],
+ offset + 11, offset + 0, size, op);
+ return size;
+}
+
+static size_t sja1105_schedule_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_SCHEDULE_ENTRY;
+ struct sja1105_schedule_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->winstindex, 63, 54, size, op);
+ sja1105_packing(buf, &entry->winend, 53, 53, size, op);
+ sja1105_packing(buf, &entry->winst, 52, 52, size, op);
+ sja1105_packing(buf, &entry->destports, 51, 47, size, op);
+ sja1105_packing(buf, &entry->setvalid, 46, 46, size, op);
+ sja1105_packing(buf, &entry->txen, 45, 45, size, op);
+ sja1105_packing(buf, &entry->resmedia_en, 44, 44, size, op);
+ sja1105_packing(buf, &entry->resmedia, 43, 36, size, op);
+ sja1105_packing(buf, &entry->vlindex, 35, 26, size, op);
+ sja1105_packing(buf, &entry->delta, 25, 8, size, op);
+ return size;
+}
+
+static size_t sja1110_schedule_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1110_SIZE_SCHEDULE_ENTRY;
+ struct sja1105_schedule_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->winstindex, 95, 84, size, op);
+ sja1105_packing(buf, &entry->winend, 83, 83, size, op);
+ sja1105_packing(buf, &entry->winst, 82, 82, size, op);
+ sja1105_packing(buf, &entry->destports, 81, 71, size, op);
+ sja1105_packing(buf, &entry->setvalid, 70, 70, size, op);
+ sja1105_packing(buf, &entry->txen, 69, 69, size, op);
+ sja1105_packing(buf, &entry->resmedia_en, 68, 68, size, op);
+ sja1105_packing(buf, &entry->resmedia, 67, 60, size, op);
+ sja1105_packing(buf, &entry->vlindex, 59, 48, size, op);
+ sja1105_packing(buf, &entry->delta, 47, 30, size, op);
+ return size;
+}
+
+static size_t
+sja1105_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 16; i < 8; i++, offset += 10)
+ sja1105_packing(buf, &entry->partspc[i],
+ offset + 9, offset + 0, size, op);
+ sja1105_packing(buf, &entry->debugen, 15, 15, size, op);
+ return size;
+}
+
+static size_t
+sja1110_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 8; i < 8; i++, offset += 11)
+ sja1105_packing(buf, &entry->partspc[i],
+ offset + 10, offset + 0, size, op);
+ sja1105_packing(buf, &entry->debugen, 7, 7, size, op);
+ return size;
+}
+
+static size_t sja1105_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 31, 31, size, op);
+ sja1105_packing(buf, &entry->priority, 30, 28, size, op);
+ sja1105_packing(buf, &entry->partition, 27, 25, size, op);
+ sja1105_packing(buf, &entry->destports, 24, 20, size, op);
+ return size;
+}
+
+static size_t sja1110_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 31, 31, size, op);
+ sja1105_packing(buf, &entry->priority, 30, 28, size, op);
+ sja1105_packing(buf, &entry->partition, 27, 25, size, op);
+ sja1105_packing(buf, &entry->destports, 24, 14, size, op);
+ return size;
+}
+
+size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_LOOKUP_ENTRY;
+
+ if (entry->format == SJA1105_VL_FORMAT_PSFP) {
+ /* Interpreting vllupformat as 0 */
+ sja1105_packing(buf, &entry->destports,
+ 95, 91, size, op);
+ sja1105_packing(buf, &entry->iscritical,
+ 90, 90, size, op);
+ sja1105_packing(buf, &entry->macaddr,
+ 89, 42, size, op);
+ sja1105_packing(buf, &entry->vlanid,
+ 41, 30, size, op);
+ sja1105_packing(buf, &entry->port,
+ 29, 27, size, op);
+ sja1105_packing(buf, &entry->vlanprior,
+ 26, 24, size, op);
+ } else {
+ /* Interpreting vllupformat as 1 */
+ sja1105_packing(buf, &entry->egrmirr,
+ 95, 91, size, op);
+ sja1105_packing(buf, &entry->ingrmirr,
+ 90, 90, size, op);
+ sja1105_packing(buf, &entry->vlid,
+ 57, 42, size, op);
+ sja1105_packing(buf, &entry->port,
+ 29, 27, size, op);
+ }
+ return size;
+}
+
+size_t sja1110_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_LOOKUP_ENTRY;
+
+ if (entry->format == SJA1105_VL_FORMAT_PSFP) {
+ /* Interpreting vllupformat as 0 */
+ sja1105_packing(buf, &entry->destports,
+ 94, 84, size, op);
+ sja1105_packing(buf, &entry->iscritical,
+ 83, 83, size, op);
+ sja1105_packing(buf, &entry->macaddr,
+ 82, 35, size, op);
+ sja1105_packing(buf, &entry->vlanid,
+ 34, 23, size, op);
+ sja1105_packing(buf, &entry->port,
+ 22, 19, size, op);
+ sja1105_packing(buf, &entry->vlanprior,
+ 18, 16, size, op);
+ } else {
+ /* Interpreting vllupformat as 1 */
+ sja1105_packing(buf, &entry->egrmirr,
+ 94, 84, size, op);
+ sja1105_packing(buf, &entry->ingrmirr,
+ 83, 83, size, op);
+ sja1105_packing(buf, &entry->vlid,
+ 50, 35, size, op);
+ sja1105_packing(buf, &entry->port,
+ 22, 19, size, op);
+ }
+ return size;
+}
+
+static size_t sja1105_vl_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_policing_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_POLICING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 63, 63, size, op);
+ sja1105_packing(buf, &entry->maxlen, 62, 52, size, op);
+ sja1105_packing(buf, &entry->sharindx, 51, 42, size, op);
+ if (entry->type == 0) {
+ sja1105_packing(buf, &entry->bag, 41, 28, size, op);
+ sja1105_packing(buf, &entry->jitter, 27, 18, size, op);
+ }
+ return size;
+}
+
+size_t sja1110_vl_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_policing_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_POLICING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 63, 63, size, op);
+ sja1105_packing(buf, &entry->maxlen, 62, 52, size, op);
+ sja1105_packing(buf, &entry->sharindx, 51, 40, size, op);
+ if (entry->type == 0) {
+ sja1105_packing(buf, &entry->bag, 41, 28, size, op);
+ sja1105_packing(buf, &entry->jitter, 27, 18, size, op);
+ }
+ return size;
+}
+
+size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY;
+ struct sja1105_vlan_lookup_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->ving_mirr, 63, 59, size, op);
+ sja1105_packing(buf, &entry->vegr_mirr, 58, 54, size, op);
+ sja1105_packing(buf, &entry->vmemb_port, 53, 49, size, op);
+ sja1105_packing(buf, &entry->vlan_bc, 48, 44, size, op);
+ sja1105_packing(buf, &entry->tag_port, 43, 39, size, op);
+ sja1105_packing(buf, &entry->vlanid, 38, 27, size, op);
+ return size;
+}
+
+size_t sja1110_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vlan_lookup_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_VLAN_LOOKUP_ENTRY;
+
+ sja1105_packing(buf, &entry->ving_mirr, 95, 85, size, op);
+ sja1105_packing(buf, &entry->vegr_mirr, 84, 74, size, op);
+ sja1105_packing(buf, &entry->vmemb_port, 73, 63, size, op);
+ sja1105_packing(buf, &entry->vlan_bc, 62, 52, size, op);
+ sja1105_packing(buf, &entry->tag_port, 51, 41, size, op);
+ sja1105_packing(buf, &entry->type_entry, 40, 39, size, op);
+ sja1105_packing(buf, &entry->vlanid, 38, 27, size, op);
+ return size;
+}
+
+static size_t sja1105_xmii_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_XMII_PARAMS_ENTRY;
+ struct sja1105_xmii_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 17; i < 5; i++, offset += 3) {
+ sja1105_packing(buf, &entry->xmii_mode[i],
+ offset + 1, offset + 0, size, op);
+ sja1105_packing(buf, &entry->phy_mac[i],
+ offset + 2, offset + 2, size, op);
+ }
+ return size;
+}
+
+size_t sja1110_xmii_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1110_SIZE_XMII_PARAMS_ENTRY;
+ struct sja1105_xmii_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 20; i < SJA1110_NUM_PORTS; i++, offset += 4) {
+ sja1105_packing(buf, &entry->xmii_mode[i],
+ offset + 1, offset + 0, size, op);
+ sja1105_packing(buf, &entry->phy_mac[i],
+ offset + 2, offset + 2, size, op);
+ sja1105_packing(buf, &entry->special[i],
+ offset + 3, offset + 3, size, op);
+ }
+ return size;
+}
+
+size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_retagging_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_RETAGGING_ENTRY;
+
+ sja1105_packing(buf, &entry->egr_port, 63, 59, size, op);
+ sja1105_packing(buf, &entry->ing_port, 58, 54, size, op);
+ sja1105_packing(buf, &entry->vlan_ing, 53, 42, size, op);
+ sja1105_packing(buf, &entry->vlan_egr, 41, 30, size, op);
+ sja1105_packing(buf, &entry->do_not_learn, 29, 29, size, op);
+ sja1105_packing(buf, &entry->use_dest_ports, 28, 28, size, op);
+ sja1105_packing(buf, &entry->destports, 27, 23, size, op);
+ return size;
+}
+
+size_t sja1110_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_retagging_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_RETAGGING_ENTRY;
+
+ sja1105_packing(buf, &entry->egr_port, 63, 53, size, op);
+ sja1105_packing(buf, &entry->ing_port, 52, 42, size, op);
+ sja1105_packing(buf, &entry->vlan_ing, 41, 30, size, op);
+ sja1105_packing(buf, &entry->vlan_egr, 29, 18, size, op);
+ sja1105_packing(buf, &entry->do_not_learn, 17, 17, size, op);
+ sja1105_packing(buf, &entry->use_dest_ports, 16, 16, size, op);
+ sja1105_packing(buf, &entry->destports, 15, 5, size, op);
+ return size;
+}
+
+static size_t sja1110_pcp_remapping_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1110_pcp_remapping_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_PCP_REMAPPING_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 8; i < SJA1105_NUM_TC; i++, offset += 3)
+ sja1105_packing(buf, &entry->egrpcp[i],
+ offset + 2, offset + 0, size, op);
+
+ return size;
+}
+
+size_t sja1105_table_header_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_TABLE_HEADER;
+ struct sja1105_table_header *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->block_id, 31, 24, size, op);
+ sja1105_packing(buf, &entry->len, 55, 32, size, op);
+ sja1105_packing(buf, &entry->crc, 95, 64, size, op);
+ return size;
+}
+
+/* WARNING: the *hdr pointer is really non-const, because it is
+ * modifying the CRC of the header for a 2-stage packing operation
+ */
+void
+sja1105_table_header_pack_with_crc(void *buf, struct sja1105_table_header *hdr)
+{
+ /* First pack the table as-is, then calculate the CRC, and
+ * finally put the proper CRC into the packed buffer
+ */
+ memset(buf, 0, SJA1105_SIZE_TABLE_HEADER);
+ sja1105_table_header_packing(buf, hdr, PACK);
+ hdr->crc = sja1105_crc32(buf, SJA1105_SIZE_TABLE_HEADER - 4);
+ sja1105_pack(buf + SJA1105_SIZE_TABLE_HEADER - 4, &hdr->crc, 31, 0, 4);
+}
+
+static void sja1105_table_write_crc(u8 *table_start, u8 *crc_ptr)
+{
+ u64 computed_crc;
+ int len_bytes;
+
+ len_bytes = (uintptr_t)(crc_ptr - table_start);
+ computed_crc = sja1105_crc32(table_start, len_bytes);
+ sja1105_pack(crc_ptr, &computed_crc, 31, 0, 4);
+}
+
+/* The block IDs that the switches support are unfortunately sparse, so keep a
+ * mapping table to "block indices" and translate back and forth so that we
+ * don't waste useless memory in struct sja1105_static_config.
+ * Also, since the block id comes from essentially untrusted input (unpacking
+ * the static config from userspace) it has to be sanitized (range-checked)
+ * before blindly indexing kernel memory with the blk_idx.
+ */
+static u64 blk_id_map[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = BLKID_SCHEDULE,
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = BLKID_SCHEDULE_ENTRY_POINTS,
+ [BLK_IDX_VL_LOOKUP] = BLKID_VL_LOOKUP,
+ [BLK_IDX_VL_POLICING] = BLKID_VL_POLICING,
+ [BLK_IDX_VL_FORWARDING] = BLKID_VL_FORWARDING,
+ [BLK_IDX_L2_LOOKUP] = BLKID_L2_LOOKUP,
+ [BLK_IDX_L2_POLICING] = BLKID_L2_POLICING,
+ [BLK_IDX_VLAN_LOOKUP] = BLKID_VLAN_LOOKUP,
+ [BLK_IDX_L2_FORWARDING] = BLKID_L2_FORWARDING,
+ [BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG,
+ [BLK_IDX_SCHEDULE_PARAMS] = BLKID_SCHEDULE_PARAMS,
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = BLKID_SCHEDULE_ENTRY_POINTS_PARAMS,
+ [BLK_IDX_VL_FORWARDING_PARAMS] = BLKID_VL_FORWARDING_PARAMS,
+ [BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS,
+ [BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS,
+ [BLK_IDX_AVB_PARAMS] = BLKID_AVB_PARAMS,
+ [BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS,
+ [BLK_IDX_RETAGGING] = BLKID_RETAGGING,
+ [BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS,
+ [BLK_IDX_PCP_REMAPPING] = BLKID_PCP_REMAPPING,
+};
+
+const char *sja1105_static_config_error_msg[] = {
+ [SJA1105_CONFIG_OK] = "",
+ [SJA1105_TTETHERNET_NOT_SUPPORTED] =
+ "schedule-table present, but TTEthernet is "
+ "only supported on T and Q/S",
+ [SJA1105_INCORRECT_TTETHERNET_CONFIGURATION] =
+ "schedule-table present, but one of "
+ "schedule-entry-points-table, schedule-parameters-table or "
+ "schedule-entry-points-parameters table is empty",
+ [SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION] =
+ "vl-lookup-table present, but one of vl-policing-table, "
+ "vl-forwarding-table or vl-forwarding-parameters-table is empty",
+ [SJA1105_MISSING_L2_POLICING_TABLE] =
+ "l2-policing-table needs to have at least one entry",
+ [SJA1105_MISSING_L2_FORWARDING_TABLE] =
+ "l2-forwarding-table is either missing or incomplete",
+ [SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE] =
+ "l2-forwarding-parameters-table is missing",
+ [SJA1105_MISSING_GENERAL_PARAMS_TABLE] =
+ "general-parameters-table is missing",
+ [SJA1105_MISSING_VLAN_TABLE] =
+ "vlan-lookup-table needs to have at least the default untagged VLAN",
+ [SJA1105_MISSING_XMII_TABLE] =
+ "xmii-table is missing",
+ [SJA1105_MISSING_MAC_TABLE] =
+ "mac-configuration-table needs to contain an entry for each port",
+ [SJA1105_OVERCOMMITTED_FRAME_MEMORY] =
+ "Not allowed to overcommit frame memory. L2 memory partitions "
+ "and VL memory partitions share the same space. The sum of all "
+ "16 memory partitions is not allowed to be larger than 929 "
+ "128-byte blocks (or 910 with retagging). Please adjust "
+ "l2-forwarding-parameters-table.part_spc and/or "
+ "vl-forwarding-parameters-table.partspc.",
+};
+
+static sja1105_config_valid_t
+static_config_check_memory_size(const struct sja1105_table *tables, int max_mem)
+{
+ const struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
+ const struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
+ int i, mem = 0;
+
+ l2_fwd_params = tables[BLK_IDX_L2_FORWARDING_PARAMS].entries;
+
+ for (i = 0; i < 8; i++)
+ mem += l2_fwd_params->part_spc[i];
+
+ if (tables[BLK_IDX_VL_FORWARDING_PARAMS].entry_count) {
+ vl_fwd_params = tables[BLK_IDX_VL_FORWARDING_PARAMS].entries;
+ for (i = 0; i < 8; i++)
+ mem += vl_fwd_params->partspc[i];
+ }
+
+ if (tables[BLK_IDX_RETAGGING].entry_count)
+ max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD;
+
+ if (mem > max_mem)
+ return SJA1105_OVERCOMMITTED_FRAME_MEMORY;
+
+ return SJA1105_CONFIG_OK;
+}
+
+sja1105_config_valid_t
+sja1105_static_config_check_valid(const struct sja1105_static_config *config,
+ int max_mem)
+{
+ const struct sja1105_table *tables = config->tables;
+#define IS_FULL(blk_idx) \
+ (tables[blk_idx].entry_count == tables[blk_idx].ops->max_entry_count)
+
+ if (tables[BLK_IDX_SCHEDULE].entry_count) {
+ if (!tables[BLK_IDX_SCHEDULE].ops->max_entry_count)
+ return SJA1105_TTETHERNET_NOT_SUPPORTED;
+
+ if (tables[BLK_IDX_SCHEDULE_ENTRY_POINTS].entry_count == 0)
+ return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
+
+ if (!IS_FULL(BLK_IDX_SCHEDULE_PARAMS))
+ return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
+
+ if (!IS_FULL(BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS))
+ return SJA1105_INCORRECT_TTETHERNET_CONFIGURATION;
+ }
+ if (tables[BLK_IDX_VL_LOOKUP].entry_count) {
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ bool has_critical_links = false;
+ int i;
+
+ vl_lookup = tables[BLK_IDX_VL_LOOKUP].entries;
+
+ for (i = 0; i < tables[BLK_IDX_VL_LOOKUP].entry_count; i++) {
+ if (vl_lookup[i].iscritical) {
+ has_critical_links = true;
+ break;
+ }
+ }
+
+ if (tables[BLK_IDX_VL_POLICING].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+
+ if (tables[BLK_IDX_VL_FORWARDING].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+
+ if (tables[BLK_IDX_VL_FORWARDING_PARAMS].entry_count == 0 &&
+ has_critical_links)
+ return SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION;
+ }
+
+ if (tables[BLK_IDX_L2_POLICING].entry_count == 0)
+ return SJA1105_MISSING_L2_POLICING_TABLE;
+
+ if (tables[BLK_IDX_VLAN_LOOKUP].entry_count == 0)
+ return SJA1105_MISSING_VLAN_TABLE;
+
+ if (!IS_FULL(BLK_IDX_L2_FORWARDING))
+ return SJA1105_MISSING_L2_FORWARDING_TABLE;
+
+ if (!IS_FULL(BLK_IDX_MAC_CONFIG))
+ return SJA1105_MISSING_MAC_TABLE;
+
+ if (!IS_FULL(BLK_IDX_L2_FORWARDING_PARAMS))
+ return SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE;
+
+ if (!IS_FULL(BLK_IDX_GENERAL_PARAMS))
+ return SJA1105_MISSING_GENERAL_PARAMS_TABLE;
+
+ if (!IS_FULL(BLK_IDX_XMII_PARAMS))
+ return SJA1105_MISSING_XMII_TABLE;
+
+ return static_config_check_memory_size(tables, max_mem);
+#undef IS_FULL
+}
+
+void
+sja1105_static_config_pack(void *buf, struct sja1105_static_config *config)
+{
+ struct sja1105_table_header header = {0};
+ enum sja1105_blk_idx i;
+ char *p = buf;
+ int j;
+
+ sja1105_pack(p, &config->device_id, 31, 0, 4);
+ p += SJA1105_SIZE_DEVICE_ID;
+
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ const struct sja1105_table *table;
+ char *table_start;
+
+ table = &config->tables[i];
+ if (!table->entry_count)
+ continue;
+
+ header.block_id = blk_id_map[i];
+ header.len = table->entry_count *
+ table->ops->packed_entry_size / 4;
+ sja1105_table_header_pack_with_crc(p, &header);
+ p += SJA1105_SIZE_TABLE_HEADER;
+ table_start = p;
+ for (j = 0; j < table->entry_count; j++) {
+ u8 *entry_ptr = table->entries;
+
+ entry_ptr += j * table->ops->unpacked_entry_size;
+ memset(p, 0, table->ops->packed_entry_size);
+ table->ops->packing(p, entry_ptr, PACK);
+ p += table->ops->packed_entry_size;
+ }
+ sja1105_table_write_crc(table_start, p);
+ p += 4;
+ }
+ /* Final header:
+ * Block ID does not matter
+ * Length of 0 marks that header is final
+ * CRC will be replaced on-the-fly on "config upload"
+ */
+ header.block_id = 0;
+ header.len = 0;
+ header.crc = 0xDEADBEEF;
+ memset(p, 0, SJA1105_SIZE_TABLE_HEADER);
+ sja1105_table_header_packing(p, &header, PACK);
+}
+
+size_t
+sja1105_static_config_get_length(const struct sja1105_static_config *config)
+{
+ unsigned int sum;
+ unsigned int header_count;
+ enum sja1105_blk_idx i;
+
+ /* Ending header */
+ header_count = 1;
+ sum = SJA1105_SIZE_DEVICE_ID;
+
+ /* Tables (headers and entries) */
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ const struct sja1105_table *table;
+
+ table = &config->tables[i];
+ if (table->entry_count)
+ header_count++;
+
+ sum += table->ops->packed_entry_size * table->entry_count;
+ }
+ /* Headers have an additional CRC at the end */
+ sum += header_count * (SJA1105_SIZE_TABLE_HEADER + 4);
+ /* Last header does not have an extra CRC because there is no data */
+ sum -= 4;
+
+ return sum;
+}
+
+/* Compatibility matrices */
+
+/* SJA1105E: First generation, no TTEthernet */
+const struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105et_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105et_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105et_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105et_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105et_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105T: First generation, TTEthernet */
+const struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = {
+ .packing = sja1105_schedule_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
+ .packing = sja1105_schedule_entry_points_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
+ },
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105et_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105et_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_PARAMS] = {
+ .packing = sja1105_schedule_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
+ .packing = sja1105_schedule_entry_points_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105et_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105et_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105et_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105P: Second generation, no TTEthernet, no SGMII */
+const struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105Q: Second generation, TTEthernet, no SGMII */
+const struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = {
+ .packing = sja1105_schedule_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
+ .packing = sja1105_schedule_entry_points_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
+ },
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_PARAMS] = {
+ .packing = sja1105_schedule_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
+ .packing = sja1105_schedule_entry_points_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105R: Second generation, no TTEthernet, SGMII */
+const struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105S: Second generation, TTEthernet, SGMII */
+const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = {
+ .packing = sja1105_schedule_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
+ .packing = sja1105_schedule_entry_points_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
+ },
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1105_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1105_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1105_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_PARAMS] = {
+ .packing = sja1105_schedule_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
+ .packing = sja1105_schedule_entry_points_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1105_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1105_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1110A: Third generation */
+const struct sja1105_table_ops sja1110_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = {
+ .packing = sja1110_schedule_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
+ .packed_entry_size = SJA1110_SIZE_SCHEDULE_ENTRY,
+ .max_entry_count = SJA1110_MAX_SCHEDULE_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
+ .packing = sja1110_schedule_entry_points_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
+ .packed_entry_size = SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
+ },
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1110_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1110_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1110_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1110_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1110_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1110_MAX_VL_FORWARDING_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1110_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1110_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1110_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1110_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1110_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1110_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1110_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1110_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1110_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_PARAMS] = {
+ .packing = sja1110_schedule_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
+ .packing = sja1105_schedule_entry_points_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1110_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1110_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1110_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1110_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1110_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1110_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1110_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1110_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+ [BLK_IDX_PCP_REMAPPING] = {
+ .packing = sja1110_pcp_remapping_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1110_pcp_remapping_entry),
+ .packed_entry_size = SJA1110_SIZE_PCP_REMAPPING_ENTRY,
+ .max_entry_count = SJA1110_MAX_PCP_REMAPPING_COUNT,
+ },
+};
+
+int sja1105_static_config_init(struct sja1105_static_config *config,
+ const struct sja1105_table_ops *static_ops,
+ u64 device_id)
+{
+ enum sja1105_blk_idx i;
+
+ *config = (struct sja1105_static_config) {0};
+
+ /* Transfer static_ops array from priv into per-table ops
+ * for handier access
+ */
+ for (i = 0; i < BLK_IDX_MAX; i++)
+ config->tables[i].ops = &static_ops[i];
+
+ config->device_id = device_id;
+ return 0;
+}
+
+void sja1105_static_config_free(struct sja1105_static_config *config)
+{
+ enum sja1105_blk_idx i;
+
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ if (config->tables[i].entry_count) {
+ kfree(config->tables[i].entries);
+ config->tables[i].entry_count = 0;
+ }
+ }
+}
+
+int sja1105_table_delete_entry(struct sja1105_table *table, int i)
+{
+ size_t entry_size = table->ops->unpacked_entry_size;
+ u8 *entries = table->entries;
+
+ if (i > table->entry_count)
+ return -ERANGE;
+
+ memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
+ (table->entry_count - i) * entry_size);
+
+ table->entry_count--;
+
+ return 0;
+}
+
+/* No pointers to table->entries should be kept when this is called. */
+int sja1105_table_resize(struct sja1105_table *table, size_t new_count)
+{
+ size_t entry_size = table->ops->unpacked_entry_size;
+ void *new_entries, *old_entries = table->entries;
+
+ if (new_count > table->ops->max_entry_count)
+ return -ERANGE;
+
+ new_entries = kcalloc(new_count, entry_size, GFP_KERNEL);
+ if (!new_entries)
+ return -ENOMEM;
+
+ memcpy(new_entries, old_entries, min(new_count, table->entry_count) *
+ entry_size);
+
+ table->entries = new_entries;
+ table->entry_count = new_count;
+ kfree(old_entries);
+ return 0;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
new file mode 100644
index 000000000..6a372d5f2
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -0,0 +1,548 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright 2016-2018 NXP
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_STATIC_CONFIG_H
+#define _SJA1105_STATIC_CONFIG_H
+
+#include <linux/packing.h>
+#include <linux/types.h>
+#include <asm/types.h>
+
+#define SJA1105_NUM_PORTS 5
+#define SJA1110_NUM_PORTS 11
+#define SJA1105_MAX_NUM_PORTS SJA1110_NUM_PORTS
+#define SJA1105_NUM_TC 8
+
+#define SJA1105_SIZE_SPI_MSG_HEADER 4
+#define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
+#define SJA1105_SIZE_DEVICE_ID 4
+#define SJA1105_SIZE_TABLE_HEADER 12
+#define SJA1105_SIZE_SCHEDULE_ENTRY 8
+#define SJA1110_SIZE_SCHEDULE_ENTRY 12
+#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY 4
+#define SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY 8
+#define SJA1105_SIZE_VL_LOOKUP_ENTRY 12
+#define SJA1105_SIZE_VL_POLICING_ENTRY 8
+#define SJA1105_SIZE_VL_FORWARDING_ENTRY 4
+#define SJA1105_SIZE_L2_POLICING_ENTRY 8
+#define SJA1105_SIZE_VLAN_LOOKUP_ENTRY 8
+#define SJA1110_SIZE_VLAN_LOOKUP_ENTRY 12
+#define SJA1105_SIZE_L2_FORWARDING_ENTRY 8
+#define SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY 12
+#define SJA1105_SIZE_RETAGGING_ENTRY 8
+#define SJA1105_SIZE_XMII_PARAMS_ENTRY 4
+#define SJA1110_SIZE_XMII_PARAMS_ENTRY 8
+#define SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY 12
+#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY 4
+#define SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY 12
+#define SJA1105ET_SIZE_L2_LOOKUP_ENTRY 12
+#define SJA1105ET_SIZE_MAC_CONFIG_ENTRY 28
+#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4
+#define SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY 40
+#define SJA1105ET_SIZE_AVB_PARAMS_ENTRY 12
+#define SJA1105ET_SIZE_CBS_ENTRY 16
+#define SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY 20
+#define SJA1110_SIZE_L2_LOOKUP_ENTRY 24
+#define SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY 32
+#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY 16
+#define SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY 28
+#define SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY 44
+#define SJA1110_SIZE_GENERAL_PARAMS_ENTRY 56
+#define SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY 16
+#define SJA1105PQRS_SIZE_CBS_ENTRY 20
+#define SJA1110_SIZE_PCP_REMAPPING_ENTRY 4
+
+/* UM10944.pdf Page 11, Table 2. Configuration Blocks */
+enum {
+ BLKID_SCHEDULE = 0x00,
+ BLKID_SCHEDULE_ENTRY_POINTS = 0x01,
+ BLKID_VL_LOOKUP = 0x02,
+ BLKID_VL_POLICING = 0x03,
+ BLKID_VL_FORWARDING = 0x04,
+ BLKID_L2_LOOKUP = 0x05,
+ BLKID_L2_POLICING = 0x06,
+ BLKID_VLAN_LOOKUP = 0x07,
+ BLKID_L2_FORWARDING = 0x08,
+ BLKID_MAC_CONFIG = 0x09,
+ BLKID_SCHEDULE_PARAMS = 0x0A,
+ BLKID_SCHEDULE_ENTRY_POINTS_PARAMS = 0x0B,
+ BLKID_VL_FORWARDING_PARAMS = 0x0C,
+ BLKID_L2_LOOKUP_PARAMS = 0x0D,
+ BLKID_L2_FORWARDING_PARAMS = 0x0E,
+ BLKID_AVB_PARAMS = 0x10,
+ BLKID_GENERAL_PARAMS = 0x11,
+ BLKID_RETAGGING = 0x12,
+ BLKID_CBS = 0x13,
+ BLKID_PCP_REMAPPING = 0x1C,
+ BLKID_XMII_PARAMS = 0x4E,
+};
+
+enum sja1105_blk_idx {
+ BLK_IDX_SCHEDULE = 0,
+ BLK_IDX_SCHEDULE_ENTRY_POINTS,
+ BLK_IDX_VL_LOOKUP,
+ BLK_IDX_VL_POLICING,
+ BLK_IDX_VL_FORWARDING,
+ BLK_IDX_L2_LOOKUP,
+ BLK_IDX_L2_POLICING,
+ BLK_IDX_VLAN_LOOKUP,
+ BLK_IDX_L2_FORWARDING,
+ BLK_IDX_MAC_CONFIG,
+ BLK_IDX_SCHEDULE_PARAMS,
+ BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS,
+ BLK_IDX_VL_FORWARDING_PARAMS,
+ BLK_IDX_L2_LOOKUP_PARAMS,
+ BLK_IDX_L2_FORWARDING_PARAMS,
+ BLK_IDX_AVB_PARAMS,
+ BLK_IDX_GENERAL_PARAMS,
+ BLK_IDX_RETAGGING,
+ BLK_IDX_CBS,
+ BLK_IDX_XMII_PARAMS,
+ BLK_IDX_PCP_REMAPPING,
+ BLK_IDX_MAX,
+ /* Fake block indices that are only valid for dynamic access */
+ BLK_IDX_MGMT_ROUTE,
+ BLK_IDX_MAX_DYN,
+ BLK_IDX_INVAL = -1,
+};
+
+#define SJA1105_MAX_SCHEDULE_COUNT 1024
+#define SJA1110_MAX_SCHEDULE_COUNT 4096
+#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT 2048
+#define SJA1105_MAX_VL_LOOKUP_COUNT 1024
+#define SJA1110_MAX_VL_LOOKUP_COUNT 4096
+#define SJA1105_MAX_VL_POLICING_COUNT 1024
+#define SJA1110_MAX_VL_POLICING_COUNT 4096
+#define SJA1105_MAX_VL_FORWARDING_COUNT 1024
+#define SJA1110_MAX_VL_FORWARDING_COUNT 4096
+#define SJA1105_MAX_L2_LOOKUP_COUNT 1024
+#define SJA1105_MAX_L2_POLICING_COUNT 45
+#define SJA1110_MAX_L2_POLICING_COUNT 110
+#define SJA1105_MAX_VLAN_LOOKUP_COUNT 4096
+#define SJA1105_MAX_L2_FORWARDING_COUNT 13
+#define SJA1110_MAX_L2_FORWARDING_COUNT 19
+#define SJA1105_MAX_MAC_CONFIG_COUNT 5
+#define SJA1110_MAX_MAC_CONFIG_COUNT 11
+#define SJA1105_MAX_SCHEDULE_PARAMS_COUNT 1
+#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT 1
+#define SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT 1
+#define SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT 1
+#define SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT 1
+#define SJA1105_MAX_GENERAL_PARAMS_COUNT 1
+#define SJA1105_MAX_RETAGGING_COUNT 32
+#define SJA1105_MAX_XMII_PARAMS_COUNT 1
+#define SJA1105_MAX_AVB_PARAMS_COUNT 1
+#define SJA1105ET_MAX_CBS_COUNT 10
+#define SJA1105PQRS_MAX_CBS_COUNT 16
+#define SJA1110_MAX_CBS_COUNT 80
+#define SJA1110_MAX_PCP_REMAPPING_COUNT 11
+
+#define SJA1105_MAX_FRAME_MEMORY 929
+#define SJA1110_MAX_FRAME_MEMORY 1820
+#define SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD 19
+#define SJA1105_VL_FRAME_MEMORY 100
+
+#define SJA1105E_DEVICE_ID 0x9C00000Cull
+#define SJA1105T_DEVICE_ID 0x9E00030Eull
+#define SJA1105PR_DEVICE_ID 0xAF00030Eull
+#define SJA1105QS_DEVICE_ID 0xAE00030Eull
+#define SJA1110_DEVICE_ID 0xB700030Full
+
+#define SJA1105ET_PART_NO 0x9A83
+#define SJA1105P_PART_NO 0x9A84
+#define SJA1105Q_PART_NO 0x9A85
+#define SJA1105R_PART_NO 0x9A86
+#define SJA1105S_PART_NO 0x9A87
+#define SJA1110A_PART_NO 0x1110
+#define SJA1110B_PART_NO 0x1111
+#define SJA1110C_PART_NO 0x1112
+#define SJA1110D_PART_NO 0x1113
+
+#define SJA1110_ACU 0x1c4400
+#define SJA1110_RGU 0x1c6000
+#define SJA1110_CGU 0x1c6400
+
+#define SJA1110_SPI_ADDR(x) ((x) / 4)
+#define SJA1110_ACU_ADDR(x) (SJA1110_ACU + SJA1110_SPI_ADDR(x))
+#define SJA1110_CGU_ADDR(x) (SJA1110_CGU + SJA1110_SPI_ADDR(x))
+#define SJA1110_RGU_ADDR(x) (SJA1110_RGU + SJA1110_SPI_ADDR(x))
+
+#define SJA1105_RSV_ADDR 0xffffffffffffffffull
+
+struct sja1105_schedule_entry {
+ u64 winstindex;
+ u64 winend;
+ u64 winst;
+ u64 destports;
+ u64 setvalid;
+ u64 txen;
+ u64 resmedia_en;
+ u64 resmedia;
+ u64 vlindex;
+ u64 delta;
+};
+
+struct sja1105_schedule_params_entry {
+ u64 subscheind[8];
+};
+
+struct sja1105_general_params_entry {
+ u64 vllupformat;
+ u64 mirr_ptacu;
+ u64 switchid;
+ u64 hostprio;
+ u64 mac_fltres1;
+ u64 mac_fltres0;
+ u64 mac_flt1;
+ u64 mac_flt0;
+ u64 incl_srcpt1;
+ u64 incl_srcpt0;
+ u64 send_meta1;
+ u64 send_meta0;
+ u64 casc_port;
+ u64 host_port;
+ u64 mirr_port;
+ u64 vlmarker;
+ u64 vlmask;
+ u64 tpid;
+ u64 ignore2stf;
+ u64 tpid2;
+ /* P/Q/R/S only */
+ u64 queue_ts;
+ u64 egrmirrvid;
+ u64 egrmirrpcp;
+ u64 egrmirrdei;
+ u64 replay_port;
+ /* SJA1110 only */
+ u64 tte_en;
+ u64 tdmaconfigidx;
+ u64 header_type;
+};
+
+struct sja1105_schedule_entry_points_entry {
+ u64 subschindx;
+ u64 delta;
+ u64 address;
+};
+
+struct sja1105_schedule_entry_points_params_entry {
+ u64 clksrc;
+ u64 actsubsch;
+};
+
+struct sja1105_vlan_lookup_entry {
+ u64 ving_mirr;
+ u64 vegr_mirr;
+ u64 vmemb_port;
+ u64 vlan_bc;
+ u64 tag_port;
+ u64 vlanid;
+ u64 type_entry; /* SJA1110 only */
+};
+
+struct sja1105_l2_lookup_entry {
+ u64 vlanid;
+ u64 macaddr;
+ u64 destports;
+ u64 enfport;
+ u64 index;
+ /* P/Q/R/S only */
+ u64 mask_iotag;
+ u64 mask_vlanid;
+ u64 mask_macaddr;
+ u64 mask_srcport;
+ u64 iotag;
+ u64 srcport;
+ u64 lockeds;
+ union {
+ /* LOCKEDS=1: Static FDB entries */
+ struct {
+ /* TSREG is deprecated in SJA1110, TRAP is supported only
+ * in SJA1110.
+ */
+ u64 trap;
+ u64 tsreg;
+ u64 mirrvlan;
+ u64 takets;
+ u64 mirr;
+ u64 retag;
+ };
+ /* LOCKEDS=0: Dynamically learned FDB entries */
+ struct {
+ u64 touched;
+ u64 age;
+ };
+ };
+};
+
+struct sja1105_l2_lookup_params_entry {
+ u64 maxaddrp[SJA1105_MAX_NUM_PORTS]; /* P/Q/R/S only */
+ u64 start_dynspc; /* P/Q/R/S only */
+ u64 drpnolearn; /* P/Q/R/S only */
+ u64 use_static; /* P/Q/R/S only */
+ u64 owr_dyn; /* P/Q/R/S only */
+ u64 learn_once; /* P/Q/R/S only */
+ u64 maxage; /* Shared */
+ u64 dyn_tbsz; /* E/T only */
+ u64 poly; /* E/T only */
+ u64 shared_learn; /* Shared */
+ u64 no_enf_hostprt; /* Shared */
+ u64 no_mgmt_learn; /* Shared */
+};
+
+struct sja1105_l2_forwarding_entry {
+ u64 bc_domain;
+ u64 reach_port;
+ u64 fl_domain;
+ /* This is actually max(SJA1105_NUM_TC, SJA1105_MAX_NUM_PORTS) */
+ u64 vlan_pmap[SJA1105_MAX_NUM_PORTS];
+ bool type_egrpcp2outputq;
+};
+
+struct sja1105_l2_forwarding_params_entry {
+ u64 max_dynp;
+ u64 part_spc[8];
+};
+
+struct sja1105_l2_policing_entry {
+ u64 sharindx;
+ u64 smax;
+ u64 rate;
+ u64 maxlen;
+ u64 partition;
+};
+
+struct sja1105_avb_params_entry {
+ u64 cas_master;
+ u64 destmeta;
+ u64 srcmeta;
+};
+
+struct sja1105_mac_config_entry {
+ u64 top[8];
+ u64 base[8];
+ u64 enabled[8];
+ u64 ifg;
+ u64 speed;
+ u64 tp_delin;
+ u64 tp_delout;
+ u64 maxage;
+ u64 vlanprio;
+ u64 vlanid;
+ u64 ing_mirr;
+ u64 egr_mirr;
+ u64 drpnona664;
+ u64 drpdtag;
+ u64 drpuntag;
+ u64 retag;
+ u64 dyn_learn;
+ u64 egress;
+ u64 ingress;
+};
+
+struct sja1105_retagging_entry {
+ u64 egr_port;
+ u64 ing_port;
+ u64 vlan_ing;
+ u64 vlan_egr;
+ u64 do_not_learn;
+ u64 use_dest_ports;
+ u64 destports;
+};
+
+struct sja1105_cbs_entry {
+ u64 port; /* Not used for SJA1110 */
+ u64 prio; /* Not used for SJA1110 */
+ u64 credit_hi;
+ u64 credit_lo;
+ u64 send_slope;
+ u64 idle_slope;
+};
+
+struct sja1105_xmii_params_entry {
+ u64 phy_mac[SJA1105_MAX_NUM_PORTS];
+ u64 xmii_mode[SJA1105_MAX_NUM_PORTS];
+ /* The SJA1110 insists being a snowflake, and requires SGMII,
+ * 2500base-x and internal MII ports connected to the 100base-TX PHY to
+ * set this bit. We set it unconditionally from the high-level logic,
+ * and only sja1110_xmii_params_entry_packing writes it to the static
+ * config. I have no better name for it than "special".
+ */
+ u64 special[SJA1105_MAX_NUM_PORTS];
+};
+
+struct sja1110_pcp_remapping_entry {
+ u64 egrpcp[SJA1105_NUM_TC];
+};
+
+enum {
+ SJA1105_VL_FORMAT_PSFP = 0,
+ SJA1105_VL_FORMAT_ARINC664 = 1,
+};
+
+struct sja1105_vl_lookup_entry {
+ u64 format;
+ u64 port;
+ union {
+ /* SJA1105_VL_FORMAT_PSFP */
+ struct {
+ u64 destports;
+ u64 iscritical;
+ u64 macaddr;
+ u64 vlanid;
+ u64 vlanprior;
+ };
+ /* SJA1105_VL_FORMAT_ARINC664 */
+ struct {
+ u64 egrmirr;
+ u64 ingrmirr;
+ u64 vlid;
+ };
+ };
+ /* Not part of hardware structure */
+ unsigned long flow_cookie;
+};
+
+struct sja1105_vl_policing_entry {
+ u64 type;
+ u64 maxlen;
+ u64 sharindx;
+ u64 bag;
+ u64 jitter;
+};
+
+struct sja1105_vl_forwarding_entry {
+ u64 type;
+ u64 priority;
+ u64 partition;
+ u64 destports;
+};
+
+struct sja1105_vl_forwarding_params_entry {
+ u64 partspc[8];
+ u64 debugen;
+};
+
+struct sja1105_table_header {
+ u64 block_id;
+ u64 len;
+ u64 crc;
+};
+
+struct sja1105_table_ops {
+ size_t (*packing)(void *buf, void *entry_ptr, enum packing_op op);
+ size_t unpacked_entry_size;
+ size_t packed_entry_size;
+ size_t max_entry_count;
+};
+
+struct sja1105_table {
+ const struct sja1105_table_ops *ops;
+ size_t entry_count;
+ void *entries;
+};
+
+struct sja1105_static_config {
+ u64 device_id;
+ struct sja1105_table tables[BLK_IDX_MAX];
+};
+
+extern const struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1110_table_ops[BLK_IDX_MAX];
+
+size_t sja1105_table_header_packing(void *buf, void *hdr, enum packing_op op);
+void
+sja1105_table_header_pack_with_crc(void *buf, struct sja1105_table_header *hdr);
+size_t
+sja1105_static_config_get_length(const struct sja1105_static_config *config);
+
+typedef enum {
+ SJA1105_CONFIG_OK = 0,
+ SJA1105_TTETHERNET_NOT_SUPPORTED,
+ SJA1105_INCORRECT_TTETHERNET_CONFIGURATION,
+ SJA1105_INCORRECT_VIRTUAL_LINK_CONFIGURATION,
+ SJA1105_MISSING_L2_POLICING_TABLE,
+ SJA1105_MISSING_L2_FORWARDING_TABLE,
+ SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE,
+ SJA1105_MISSING_GENERAL_PARAMS_TABLE,
+ SJA1105_MISSING_VLAN_TABLE,
+ SJA1105_MISSING_XMII_TABLE,
+ SJA1105_MISSING_MAC_TABLE,
+ SJA1105_OVERCOMMITTED_FRAME_MEMORY,
+} sja1105_config_valid_t;
+
+extern const char *sja1105_static_config_error_msg[];
+
+sja1105_config_valid_t
+sja1105_static_config_check_valid(const struct sja1105_static_config *config,
+ int max_mem);
+void
+sja1105_static_config_pack(void *buf, struct sja1105_static_config *config);
+int sja1105_static_config_init(struct sja1105_static_config *config,
+ const struct sja1105_table_ops *static_ops,
+ u64 device_id);
+void sja1105_static_config_free(struct sja1105_static_config *config);
+
+int sja1105_table_delete_entry(struct sja1105_table *table, int i);
+int sja1105_table_resize(struct sja1105_table *table, size_t new_count);
+
+u32 sja1105_crc32(const void *buf, size_t len);
+
+void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len);
+void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len);
+void sja1105_packing(void *buf, u64 *val, int start, int end,
+ size_t len, enum packing_op op);
+
+/* Common implementations for the static and dynamic configs */
+size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_vl_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_xmii_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
new file mode 100644
index 000000000..e6153848a
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -0,0 +1,897 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+#define SJA1105_TAS_CLKSRC_DISABLED 0
+#define SJA1105_TAS_CLKSRC_STANDALONE 1
+#define SJA1105_TAS_CLKSRC_AS6802 2
+#define SJA1105_TAS_CLKSRC_PTP 3
+#define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0)
+
+#define work_to_sja1105_tas(d) \
+ container_of((d), struct sja1105_tas_data, tas_work)
+#define tas_to_sja1105(d) \
+ container_of((d), struct sja1105_private, tas_data)
+
+static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
+ struct dsa_switch *ds = priv->ds;
+ s64 earliest_base_time = S64_MAX;
+ s64 latest_base_time = 0;
+ s64 its_cycle_time = 0;
+ s64 max_cycle_time = 0;
+ int port;
+
+ tas_data->enabled = false;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ const struct tc_taprio_qopt_offload *offload;
+
+ offload = tas_data->offload[port];
+ if (!offload)
+ continue;
+
+ tas_data->enabled = true;
+
+ if (max_cycle_time < offload->cycle_time)
+ max_cycle_time = offload->cycle_time;
+ if (latest_base_time < offload->base_time)
+ latest_base_time = offload->base_time;
+ if (earliest_base_time > offload->base_time) {
+ earliest_base_time = offload->base_time;
+ its_cycle_time = offload->cycle_time;
+ }
+ }
+
+ if (!list_empty(&gating_cfg->entries)) {
+ tas_data->enabled = true;
+
+ if (max_cycle_time < gating_cfg->cycle_time)
+ max_cycle_time = gating_cfg->cycle_time;
+ if (latest_base_time < gating_cfg->base_time)
+ latest_base_time = gating_cfg->base_time;
+ if (earliest_base_time > gating_cfg->base_time) {
+ earliest_base_time = gating_cfg->base_time;
+ its_cycle_time = gating_cfg->cycle_time;
+ }
+ }
+
+ if (!tas_data->enabled)
+ return 0;
+
+ /* Roll the earliest base time over until it is in a comparable
+ * time base with the latest, then compare their deltas.
+ * We want to enforce that all ports' base times are within
+ * SJA1105_TAS_MAX_DELTA 200ns cycles of one another.
+ */
+ earliest_base_time = future_base_time(earliest_base_time,
+ its_cycle_time,
+ latest_base_time);
+ while (earliest_base_time > latest_base_time)
+ earliest_base_time -= its_cycle_time;
+ if (latest_base_time - earliest_base_time >
+ sja1105_delta_to_ns(SJA1105_TAS_MAX_DELTA)) {
+ dev_err(ds->dev,
+ "Base times too far apart: min %llu max %llu\n",
+ earliest_base_time, latest_base_time);
+ return -ERANGE;
+ }
+
+ tas_data->earliest_base_time = earliest_base_time;
+ tas_data->max_cycle_time = max_cycle_time;
+
+ dev_dbg(ds->dev, "earliest base time %lld ns\n", earliest_base_time);
+ dev_dbg(ds->dev, "latest base time %lld ns\n", latest_base_time);
+ dev_dbg(ds->dev, "longest cycle time %lld ns\n", max_cycle_time);
+
+ return 0;
+}
+
+/* Lo and behold: the egress scheduler from hell.
+ *
+ * At the hardware level, the Time-Aware Shaper holds a global linear arrray of
+ * all schedule entries for all ports. These are the Gate Control List (GCL)
+ * entries, let's call them "timeslots" for short. This linear array of
+ * timeslots is held in BLK_IDX_SCHEDULE.
+ *
+ * Then there are a maximum of 8 "execution threads" inside the switch, which
+ * iterate cyclically through the "schedule". Each "cycle" has an entry point
+ * and an exit point, both being timeslot indices in the schedule table. The
+ * hardware calls each cycle a "subschedule".
+ *
+ * Subschedule (cycle) i starts when
+ * ptpclkval >= ptpschtm + BLK_IDX_SCHEDULE_ENTRY_POINTS[i].delta.
+ *
+ * The hardware scheduler iterates BLK_IDX_SCHEDULE with a k ranging from
+ * k = BLK_IDX_SCHEDULE_ENTRY_POINTS[i].address to
+ * k = BLK_IDX_SCHEDULE_PARAMS.subscheind[i]
+ *
+ * For each schedule entry (timeslot) k, the engine executes the gate control
+ * list entry for the duration of BLK_IDX_SCHEDULE[k].delta.
+ *
+ * +---------+
+ * | | BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS
+ * +---------+
+ * |
+ * +-----------------+
+ * | .actsubsch
+ * BLK_IDX_SCHEDULE_ENTRY_POINTS v
+ * +-------+-------+
+ * |cycle 0|cycle 1|
+ * +-------+-------+
+ * | | | |
+ * +----------------+ | | +-------------------------------------+
+ * | .subschindx | | .subschindx |
+ * | | +---------------+ |
+ * | .address | .address | |
+ * | | | |
+ * | | | |
+ * | BLK_IDX_SCHEDULE v v |
+ * | +-------+-------+-------+-------+-------+------+ |
+ * | |entry 0|entry 1|entry 2|entry 3|entry 4|entry5| |
+ * | +-------+-------+-------+-------+-------+------+ |
+ * | ^ ^ ^ ^ |
+ * | | | | | |
+ * | +-------------------------+ | | | |
+ * | | +-------------------------------+ | | |
+ * | | | +-------------------+ | |
+ * | | | | | |
+ * | +---------------------------------------------------------------+ |
+ * | |subscheind[0]<=subscheind[1]<=subscheind[2]<=...<=subscheind[7]| |
+ * | +---------------------------------------------------------------+ |
+ * | ^ ^ BLK_IDX_SCHEDULE_PARAMS |
+ * | | | |
+ * +--------+ +-------------------------------------------+
+ *
+ * In the above picture there are two subschedules (cycles):
+ *
+ * - cycle 0: iterates the schedule table from 0 to 2 (and back)
+ * - cycle 1: iterates the schedule table from 3 to 5 (and back)
+ *
+ * All other possible execution threads must be marked as unused by making
+ * their "subschedule end index" (subscheind) equal to the last valid
+ * subschedule's end index (in this case 5).
+ */
+int sja1105_init_scheduling(struct sja1105_private *priv)
+{
+ struct sja1105_schedule_entry_points_entry *schedule_entry_points;
+ struct sja1105_schedule_entry_points_params_entry
+ *schedule_entry_points_params;
+ struct sja1105_schedule_params_entry *schedule_params;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
+ struct sja1105_schedule_entry *schedule;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int schedule_start_idx;
+ s64 entry_point_delta;
+ int schedule_end_idx;
+ int num_entries = 0;
+ int num_cycles = 0;
+ int cycle = 0;
+ int i, k = 0;
+ int port, rc;
+
+ rc = sja1105_tas_set_runtime_params(priv);
+ if (rc < 0)
+ return rc;
+
+ /* Discard previous Schedule Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous Schedule Entry Points Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous Schedule Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous Schedule Entry Points Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Figure out the dimensioning of the problem */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (tas_data->offload[port]) {
+ num_entries += tas_data->offload[port]->num_entries;
+ num_cycles++;
+ }
+ }
+
+ if (!list_empty(&gating_cfg->entries)) {
+ num_entries += gating_cfg->num_entries;
+ num_cycles++;
+ }
+
+ /* Nothing to do */
+ if (!num_cycles)
+ return 0;
+
+ /* Pre-allocate space in the static config tables */
+
+ /* Schedule Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE];
+ table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = num_entries;
+ schedule = table->entries;
+
+ /* Schedule Points Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS];
+ table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ /* Previously allocated memory will be freed automatically in
+ * sja1105_static_config_free. This is true for all early
+ * returns below.
+ */
+ return -ENOMEM;
+ table->entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT;
+ schedule_entry_points_params = table->entries;
+
+ /* Schedule Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_PARAMS];
+ table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT;
+ schedule_params = table->entries;
+
+ /* Schedule Entry Points Table */
+ table = &priv->static_config.tables[BLK_IDX_SCHEDULE_ENTRY_POINTS];
+ table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = num_cycles;
+ schedule_entry_points = table->entries;
+
+ /* Finally start populating the static config tables */
+ schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP;
+ schedule_entry_points_params->actsubsch = num_cycles - 1;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ const struct tc_taprio_qopt_offload *offload;
+ /* Relative base time */
+ s64 rbt;
+
+ offload = tas_data->offload[port];
+ if (!offload)
+ continue;
+
+ schedule_start_idx = k;
+ schedule_end_idx = k + offload->num_entries - 1;
+ /* This is the base time expressed as a number of TAS ticks
+ * relative to PTPSCHTM, which we'll (perhaps improperly) call
+ * the operational base time.
+ */
+ rbt = future_base_time(offload->base_time,
+ offload->cycle_time,
+ tas_data->earliest_base_time);
+ rbt -= tas_data->earliest_base_time;
+ /* UM10944.pdf 4.2.2. Schedule Entry Points table says that
+ * delta cannot be zero, which is shitty. Advance all relative
+ * base times by 1 TAS delta, so that even the earliest base
+ * time becomes 1 in relative terms. Then start the operational
+ * base time (PTPSCHTM) one TAS delta earlier than planned.
+ */
+ entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
+
+ schedule_entry_points[cycle].subschindx = cycle;
+ schedule_entry_points[cycle].delta = entry_point_delta;
+ schedule_entry_points[cycle].address = schedule_start_idx;
+
+ /* The subschedule end indices need to be
+ * monotonically increasing.
+ */
+ for (i = cycle; i < 8; i++)
+ schedule_params->subscheind[i] = schedule_end_idx;
+
+ for (i = 0; i < offload->num_entries; i++, k++) {
+ s64 delta_ns = offload->entries[i].interval;
+
+ schedule[k].delta = ns_to_sja1105_delta(delta_ns);
+ schedule[k].destports = BIT(port);
+ schedule[k].resmedia_en = true;
+ schedule[k].resmedia = SJA1105_GATE_MASK &
+ ~offload->entries[i].gate_mask;
+ }
+ cycle++;
+ }
+
+ if (!list_empty(&gating_cfg->entries)) {
+ struct sja1105_gate_entry *e;
+
+ /* Relative base time */
+ s64 rbt;
+
+ schedule_start_idx = k;
+ schedule_end_idx = k + gating_cfg->num_entries - 1;
+ rbt = future_base_time(gating_cfg->base_time,
+ gating_cfg->cycle_time,
+ tas_data->earliest_base_time);
+ rbt -= tas_data->earliest_base_time;
+ entry_point_delta = ns_to_sja1105_delta(rbt) + 1;
+
+ schedule_entry_points[cycle].subschindx = cycle;
+ schedule_entry_points[cycle].delta = entry_point_delta;
+ schedule_entry_points[cycle].address = schedule_start_idx;
+
+ for (i = cycle; i < 8; i++)
+ schedule_params->subscheind[i] = schedule_end_idx;
+
+ list_for_each_entry(e, &gating_cfg->entries, list) {
+ schedule[k].delta = ns_to_sja1105_delta(e->interval);
+ schedule[k].destports = e->rule->vl.destports;
+ schedule[k].setvalid = true;
+ schedule[k].txen = true;
+ schedule[k].vlindex = e->rule->vl.sharindx;
+ schedule[k].winstindex = e->rule->vl.sharindx;
+ if (e->gate_state) /* Gate open */
+ schedule[k].winst = true;
+ else /* Gate closed */
+ schedule[k].winend = true;
+ k++;
+ }
+ }
+
+ return 0;
+}
+
+/* Be there 2 port subschedules, each executing an arbitrary number of gate
+ * open/close events cyclically.
+ * None of those gate events must ever occur at the exact same time, otherwise
+ * the switch is known to act in exotically strange ways.
+ * However the hardware doesn't bother performing these integrity checks.
+ * So here we are with the task of validating whether the new @admin offload
+ * has any conflict with the already established TAS configuration in
+ * tas_data->offload. We already know the other ports are in harmony with one
+ * another, otherwise we wouldn't have saved them.
+ * Each gate event executes periodically, with a period of @cycle_time and a
+ * phase given by its cycle's @base_time plus its offset within the cycle
+ * (which in turn is given by the length of the events prior to it).
+ * There are two aspects to possible collisions:
+ * - Collisions within one cycle's (actually the longest cycle's) time frame.
+ * For that, we need to compare the cartesian product of each possible
+ * occurrence of each event within one cycle time.
+ * - Collisions in the future. Events may not collide within one cycle time,
+ * but if two port schedules don't have the same periodicity (aka the cycle
+ * times aren't multiples of one another), they surely will some time in the
+ * future (actually they will collide an infinite amount of times).
+ */
+static bool
+sja1105_tas_check_conflicts(struct sja1105_private *priv, int port,
+ const struct tc_taprio_qopt_offload *admin)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ const struct tc_taprio_qopt_offload *offload;
+ s64 max_cycle_time, min_cycle_time;
+ s64 delta1, delta2;
+ s64 rbt1, rbt2;
+ s64 stop_time;
+ s64 t1, t2;
+ int i, j;
+ s32 rem;
+
+ offload = tas_data->offload[port];
+ if (!offload)
+ return false;
+
+ /* Check if the two cycle times are multiples of one another.
+ * If they aren't, then they will surely collide.
+ */
+ max_cycle_time = max(offload->cycle_time, admin->cycle_time);
+ min_cycle_time = min(offload->cycle_time, admin->cycle_time);
+ div_s64_rem(max_cycle_time, min_cycle_time, &rem);
+ if (rem)
+ return true;
+
+ /* Calculate the "reduced" base time of each of the two cycles
+ * (transposed back as close to 0 as possible) by dividing to
+ * the cycle time.
+ */
+ div_s64_rem(offload->base_time, offload->cycle_time, &rem);
+ rbt1 = rem;
+
+ div_s64_rem(admin->base_time, admin->cycle_time, &rem);
+ rbt2 = rem;
+
+ stop_time = max_cycle_time + max(rbt1, rbt2);
+
+ /* delta1 is the relative base time of each GCL entry within
+ * the established ports' TAS config.
+ */
+ for (i = 0, delta1 = 0;
+ i < offload->num_entries;
+ delta1 += offload->entries[i].interval, i++) {
+ /* delta2 is the relative base time of each GCL entry
+ * within the newly added TAS config.
+ */
+ for (j = 0, delta2 = 0;
+ j < admin->num_entries;
+ delta2 += admin->entries[j].interval, j++) {
+ /* t1 follows all possible occurrences of the
+ * established ports' GCL entry i within the
+ * first cycle time.
+ */
+ for (t1 = rbt1 + delta1;
+ t1 <= stop_time;
+ t1 += offload->cycle_time) {
+ /* t2 follows all possible occurrences
+ * of the newly added GCL entry j
+ * within the first cycle time.
+ */
+ for (t2 = rbt2 + delta2;
+ t2 <= stop_time;
+ t2 += admin->cycle_time) {
+ if (t1 == t2) {
+ dev_warn(priv->ds->dev,
+ "GCL entry %d collides with entry %d of port %d\n",
+ j, i, port);
+ return true;
+ }
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+/* Check the tc-taprio configuration on @port for conflicts with the tc-gate
+ * global subschedule. If @port is -1, check it against all ports.
+ * To reuse the sja1105_tas_check_conflicts logic without refactoring it,
+ * convert the gating configuration to a dummy tc-taprio offload structure.
+ */
+bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+ size_t num_entries = gating_cfg->num_entries;
+ struct tc_taprio_qopt_offload *dummy;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_gate_entry *e;
+ bool conflict;
+ int i = 0;
+
+ if (list_empty(&gating_cfg->entries))
+ return false;
+
+ dummy = kzalloc(struct_size(dummy, entries, num_entries), GFP_KERNEL);
+ if (!dummy) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory");
+ return true;
+ }
+
+ dummy->num_entries = num_entries;
+ dummy->base_time = gating_cfg->base_time;
+ dummy->cycle_time = gating_cfg->cycle_time;
+
+ list_for_each_entry(e, &gating_cfg->entries, list)
+ dummy->entries[i++].interval = e->interval;
+
+ if (port != -1) {
+ conflict = sja1105_tas_check_conflicts(priv, port, dummy);
+ } else {
+ for (port = 0; port < ds->num_ports; port++) {
+ conflict = sja1105_tas_check_conflicts(priv, port,
+ dummy);
+ if (conflict)
+ break;
+ }
+ }
+
+ kfree(dummy);
+
+ return conflict;
+}
+
+int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
+ struct tc_taprio_qopt_offload *admin)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ int other_port, rc, i;
+
+ /* Can't change an already configured port (must delete qdisc first).
+ * Can't delete the qdisc from an unconfigured port.
+ */
+ if (!!tas_data->offload[port] == admin->enable)
+ return -EINVAL;
+
+ if (!admin->enable) {
+ taprio_offload_free(tas_data->offload[port]);
+ tas_data->offload[port] = NULL;
+
+ rc = sja1105_init_scheduling(priv);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
+ }
+
+ /* The cycle time extension is the amount of time the last cycle from
+ * the old OPER needs to be extended in order to phase-align with the
+ * base time of the ADMIN when that becomes the new OPER.
+ * But of course our switch needs to be reset to switch-over between
+ * the ADMIN and the OPER configs - so much for a seamless transition.
+ * So don't add insult over injury and just say we don't support cycle
+ * time extension.
+ */
+ if (admin->cycle_time_extension)
+ return -ENOTSUPP;
+
+ for (i = 0; i < admin->num_entries; i++) {
+ s64 delta_ns = admin->entries[i].interval;
+ s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
+ bool too_long, too_short;
+
+ too_long = (delta_cycles >= SJA1105_TAS_MAX_DELTA);
+ too_short = (delta_cycles == 0);
+ if (too_long || too_short) {
+ dev_err(priv->ds->dev,
+ "Interval %llu too %s for GCL entry %d\n",
+ delta_ns, too_long ? "long" : "short", i);
+ return -ERANGE;
+ }
+ }
+
+ for (other_port = 0; other_port < ds->num_ports; other_port++) {
+ if (other_port == port)
+ continue;
+
+ if (sja1105_tas_check_conflicts(priv, other_port, admin))
+ return -ERANGE;
+ }
+
+ if (sja1105_gating_check_conflicts(priv, port, NULL)) {
+ dev_err(ds->dev, "Conflict with tc-gate schedule\n");
+ return -ERANGE;
+ }
+
+ tas_data->offload[port] = taprio_offload_get(admin);
+
+ rc = sja1105_init_scheduling(priv);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
+}
+
+static int sja1105_tas_check_running(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_ptp_cmd cmd = {0};
+ int rc;
+
+ rc = sja1105_ptp_commit(ds, &cmd, SPI_READ);
+ if (rc < 0)
+ return rc;
+
+ if (cmd.ptpstrtsch == 1)
+ /* Schedule successfully started */
+ tas_data->state = SJA1105_TAS_STATE_RUNNING;
+ else if (cmd.ptpstopsch == 1)
+ /* Schedule is stopped */
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+ else
+ /* Schedule is probably not configured with PTP clock source */
+ rc = -EINVAL;
+
+ return rc;
+}
+
+/* Write to PTPCLKCORP */
+static int sja1105_tas_adjust_drift(struct sja1105_private *priv,
+ u64 correction)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 ptpclkcorp = ns_to_sja1105_ticks(correction);
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkcorp,
+ &ptpclkcorp, NULL);
+}
+
+/* Write to PTPSCHTM */
+static int sja1105_tas_set_base_time(struct sja1105_private *priv,
+ u64 base_time)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u64 ptpschtm = ns_to_sja1105_ticks(base_time);
+
+ return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpschtm,
+ &ptpschtm, NULL);
+}
+
+static int sja1105_tas_start(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
+ struct dsa_switch *ds = priv->ds;
+ int rc;
+
+ dev_dbg(ds->dev, "Starting the TAS\n");
+
+ if (tas_data->state == SJA1105_TAS_STATE_ENABLED_NOT_RUNNING ||
+ tas_data->state == SJA1105_TAS_STATE_RUNNING) {
+ dev_err(ds->dev, "TAS already started\n");
+ return -EINVAL;
+ }
+
+ cmd->ptpstrtsch = 1;
+ cmd->ptpstopsch = 0;
+
+ rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
+ if (rc < 0)
+ return rc;
+
+ tas_data->state = SJA1105_TAS_STATE_ENABLED_NOT_RUNNING;
+
+ return 0;
+}
+
+static int sja1105_tas_stop(struct sja1105_private *priv)
+{
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+ struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd;
+ struct dsa_switch *ds = priv->ds;
+ int rc;
+
+ dev_dbg(ds->dev, "Stopping the TAS\n");
+
+ if (tas_data->state == SJA1105_TAS_STATE_DISABLED) {
+ dev_err(ds->dev, "TAS already disabled\n");
+ return -EINVAL;
+ }
+
+ cmd->ptpstopsch = 1;
+ cmd->ptpstrtsch = 0;
+
+ rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE);
+ if (rc < 0)
+ return rc;
+
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+
+ return 0;
+}
+
+/* The schedule engine and the PTP clock are driven by the same oscillator, and
+ * they run in parallel. But whilst the PTP clock can keep an absolute
+ * time-of-day, the schedule engine is only running in 'ticks' (25 ticks make
+ * up a delta, which is 200ns), and wrapping around at the end of each cycle.
+ * The schedule engine is started when the PTP clock reaches the PTPSCHTM time
+ * (in PTP domain).
+ * Because the PTP clock can be rate-corrected (accelerated or slowed down) by
+ * a software servo, and the schedule engine clock runs in parallel to the PTP
+ * clock, there is logic internal to the switch that periodically keeps the
+ * schedule engine from drifting away. The frequency with which this internal
+ * syntonization happens is the PTP clock correction period (PTPCLKCORP). It is
+ * a value also in the PTP clock domain, and is also rate-corrected.
+ * To be precise, during a correction period, there is logic to determine by
+ * how many scheduler clock ticks has the PTP clock drifted. At the end of each
+ * correction period/beginning of new one, the length of a delta is shrunk or
+ * expanded with an integer number of ticks, compared with the typical 25.
+ * So a delta lasts for 200ns (or 25 ticks) only on average.
+ * Sometimes it is longer, sometimes it is shorter. The internal syntonization
+ * logic can adjust for at most 5 ticks each 20 ticks.
+ *
+ * The first implication is that you should choose your schedule correction
+ * period to be an integer multiple of the schedule length. Preferably one.
+ * In case there are schedules of multiple ports active, then the correction
+ * period needs to be a multiple of them all. Given the restriction that the
+ * cycle times have to be multiples of one another anyway, this means the
+ * correction period can simply be the largest cycle time, hence the current
+ * choice. This way, the updates are always synchronous to the transmission
+ * cycle, and therefore predictable.
+ *
+ * The second implication is that at the beginning of a correction period, the
+ * first few deltas will be modulated in time, until the schedule engine is
+ * properly phase-aligned with the PTP clock. For this reason, you should place
+ * your best-effort traffic at the beginning of a cycle, and your
+ * time-triggered traffic afterwards.
+ *
+ * The third implication is that once the schedule engine is started, it can
+ * only adjust for so much drift within a correction period. In the servo you
+ * can only change the PTPCLKRATE, but not step the clock (PTPCLKADD). If you
+ * want to do the latter, you need to stop and restart the schedule engine,
+ * which is what the state machine handles.
+ */
+static void sja1105_tas_state_machine(struct work_struct *work)
+{
+ struct sja1105_tas_data *tas_data = work_to_sja1105_tas(work);
+ struct sja1105_private *priv = tas_to_sja1105(tas_data);
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct timespec64 base_time_ts, now_ts;
+ struct dsa_switch *ds = priv->ds;
+ struct timespec64 diff;
+ s64 base_time, now;
+ int rc = 0;
+
+ mutex_lock(&ptp_data->lock);
+
+ switch (tas_data->state) {
+ case SJA1105_TAS_STATE_DISABLED:
+ /* Can't do anything at all if clock is still being stepped */
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ)
+ break;
+
+ rc = sja1105_tas_adjust_drift(priv, tas_data->max_cycle_time);
+ if (rc < 0)
+ break;
+
+ rc = __sja1105_ptp_gettimex(ds, &now, NULL);
+ if (rc < 0)
+ break;
+
+ /* Plan to start the earliest schedule first. The others
+ * will be started in hardware, by way of their respective
+ * entry points delta.
+ * Try our best to avoid fringe cases (race condition between
+ * ptpschtm and ptpstrtsch) by pushing the oper_base_time at
+ * least one second in the future from now. This is not ideal,
+ * but this only needs to buy us time until the
+ * sja1105_tas_start command below gets executed.
+ */
+ base_time = future_base_time(tas_data->earliest_base_time,
+ tas_data->max_cycle_time,
+ now + 1ull * NSEC_PER_SEC);
+ base_time -= sja1105_delta_to_ns(1);
+
+ rc = sja1105_tas_set_base_time(priv, base_time);
+ if (rc < 0)
+ break;
+
+ tas_data->oper_base_time = base_time;
+
+ rc = sja1105_tas_start(priv);
+ if (rc < 0)
+ break;
+
+ base_time_ts = ns_to_timespec64(base_time);
+ now_ts = ns_to_timespec64(now);
+
+ dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
+ base_time_ts.tv_sec, base_time_ts.tv_nsec,
+ now_ts.tv_sec, now_ts.tv_nsec);
+
+ break;
+
+ case SJA1105_TAS_STATE_ENABLED_NOT_RUNNING:
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
+ /* Clock was stepped.. bad news for TAS */
+ sja1105_tas_stop(priv);
+ break;
+ }
+
+ /* Check if TAS has actually started, by comparing the
+ * scheduled start time with the SJA1105 PTP clock
+ */
+ rc = __sja1105_ptp_gettimex(ds, &now, NULL);
+ if (rc < 0)
+ break;
+
+ if (now < tas_data->oper_base_time) {
+ /* TAS has not started yet */
+ diff = ns_to_timespec64(tas_data->oper_base_time - now);
+ dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
+ diff.tv_sec, diff.tv_nsec);
+ break;
+ }
+
+ /* Time elapsed, what happened? */
+ rc = sja1105_tas_check_running(priv);
+ if (rc < 0)
+ break;
+
+ if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
+ /* TAS has started */
+ dev_err(ds->dev,
+ "TAS not started despite time elapsed\n");
+
+ break;
+
+ case SJA1105_TAS_STATE_RUNNING:
+ /* Clock was stepped.. bad news for TAS */
+ if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) {
+ sja1105_tas_stop(priv);
+ break;
+ }
+
+ rc = sja1105_tas_check_running(priv);
+ if (rc < 0)
+ break;
+
+ if (tas_data->state != SJA1105_TAS_STATE_RUNNING)
+ dev_err(ds->dev, "TAS surprisingly stopped\n");
+
+ break;
+
+ default:
+ if (net_ratelimit())
+ dev_err(ds->dev, "TAS in an invalid state (incorrect use of API)!\n");
+ }
+
+ if (rc && net_ratelimit())
+ dev_err(ds->dev, "An operation returned %d\n", rc);
+
+ mutex_unlock(&ptp_data->lock);
+}
+
+void sja1105_tas_clockstep(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ if (!tas_data->enabled)
+ return;
+
+ tas_data->last_op = SJA1105_PTP_CLOCKSTEP;
+ schedule_work(&tas_data->tas_work);
+}
+
+void sja1105_tas_adjfreq(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ if (!tas_data->enabled)
+ return;
+
+ /* No reason to schedule the workqueue, nothing changed */
+ if (tas_data->state == SJA1105_TAS_STATE_RUNNING)
+ return;
+
+ tas_data->last_op = SJA1105_PTP_ADJUSTFREQ;
+ schedule_work(&tas_data->tas_work);
+}
+
+void sja1105_tas_setup(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tas_data *tas_data = &priv->tas_data;
+
+ INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine);
+ tas_data->state = SJA1105_TAS_STATE_DISABLED;
+ tas_data->last_op = SJA1105_PTP_NONE;
+
+ INIT_LIST_HEAD(&tas_data->gating_cfg.entries);
+}
+
+void sja1105_tas_teardown(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct tc_taprio_qopt_offload *offload;
+ int port;
+
+ cancel_work_sync(&priv->tas_data.tas_work);
+
+ for (port = 0; port < ds->num_ports; port++) {
+ offload = priv->tas_data.offload[port];
+ if (!offload)
+ continue;
+
+ taprio_offload_free(offload);
+ }
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h
new file mode 100644
index 000000000..c05bd07e8
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_tas.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_TAS_H
+#define _SJA1105_TAS_H
+
+#include <net/pkt_sched.h>
+
+#define SJA1105_TAS_MAX_DELTA BIT(18)
+
+struct sja1105_private;
+
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS)
+
+enum sja1105_tas_state {
+ SJA1105_TAS_STATE_DISABLED,
+ SJA1105_TAS_STATE_ENABLED_NOT_RUNNING,
+ SJA1105_TAS_STATE_RUNNING,
+};
+
+enum sja1105_ptp_op {
+ SJA1105_PTP_NONE,
+ SJA1105_PTP_CLOCKSTEP,
+ SJA1105_PTP_ADJUSTFREQ,
+};
+
+struct sja1105_gate_entry {
+ struct list_head list;
+ struct sja1105_rule *rule;
+ s64 interval;
+ u8 gate_state;
+};
+
+struct sja1105_gating_config {
+ u64 cycle_time;
+ s64 base_time;
+ int num_entries;
+ struct list_head entries;
+};
+
+struct sja1105_tas_data {
+ struct tc_taprio_qopt_offload *offload[SJA1105_MAX_NUM_PORTS];
+ struct sja1105_gating_config gating_cfg;
+ enum sja1105_tas_state state;
+ enum sja1105_ptp_op last_op;
+ struct work_struct tas_work;
+ s64 earliest_base_time;
+ s64 oper_base_time;
+ u64 max_cycle_time;
+ bool enabled;
+};
+
+int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
+ struct tc_taprio_qopt_offload *admin);
+
+void sja1105_tas_setup(struct dsa_switch *ds);
+
+void sja1105_tas_teardown(struct dsa_switch *ds);
+
+void sja1105_tas_clockstep(struct dsa_switch *ds);
+
+void sja1105_tas_adjfreq(struct dsa_switch *ds);
+
+bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack);
+
+int sja1105_init_scheduling(struct sja1105_private *priv);
+
+#else
+
+/* C doesn't allow empty structures, bah! */
+struct sja1105_tas_data {
+ u8 dummy;
+};
+
+static inline int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
+ struct tc_taprio_qopt_offload *admin)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void sja1105_tas_setup(struct dsa_switch *ds) { }
+
+static inline void sja1105_tas_teardown(struct dsa_switch *ds) { }
+
+static inline void sja1105_tas_clockstep(struct dsa_switch *ds) { }
+
+static inline void sja1105_tas_adjfreq(struct dsa_switch *ds) { }
+
+static inline bool
+sja1105_gating_check_conflicts(struct dsa_switch *ds, int port,
+ struct netlink_ext_ack *extack)
+{
+ return true;
+}
+
+static inline int sja1105_init_scheduling(struct sja1105_private *priv)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS) */
+
+#endif /* _SJA1105_TAS_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
new file mode 100644
index 000000000..b7e95d60a
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -0,0 +1,802 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2020 NXP
+ */
+#include <net/tc_act/tc_gate.h>
+#include <linux/dsa/8021q.h>
+#include "sja1105_vl.h"
+
+#define SJA1105_SIZE_VL_STATUS 8
+
+/* Insert into the global gate list, sorted by gate action time. */
+static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
+ struct sja1105_rule *rule,
+ u8 gate_state, s64 entry_time,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gate_entry *e;
+ int rc;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->rule = rule;
+ e->gate_state = gate_state;
+ e->interval = entry_time;
+
+ if (list_empty(&gating_cfg->entries)) {
+ list_add(&e->list, &gating_cfg->entries);
+ } else {
+ struct sja1105_gate_entry *p;
+
+ list_for_each_entry(p, &gating_cfg->entries, list) {
+ if (p->interval == e->interval) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Gate conflict");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (e->interval < p->interval)
+ break;
+ }
+ list_add(&e->list, p->list.prev);
+ }
+
+ gating_cfg->num_entries++;
+
+ return 0;
+err:
+ kfree(e);
+ return rc;
+}
+
+/* The gate entries contain absolute times in their e->interval field. Convert
+ * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
+ */
+static void
+sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
+ u64 cycle_time)
+{
+ struct sja1105_gate_entry *last_e;
+ struct sja1105_gate_entry *e;
+ struct list_head *prev;
+
+ list_for_each_entry(e, &gating_cfg->entries, list) {
+ struct sja1105_gate_entry *p;
+
+ prev = e->list.prev;
+
+ if (prev == &gating_cfg->entries)
+ continue;
+
+ p = list_entry(prev, struct sja1105_gate_entry, list);
+ p->interval = e->interval - p->interval;
+ }
+ last_e = list_last_entry(&gating_cfg->entries,
+ struct sja1105_gate_entry, list);
+ last_e->interval = cycle_time - last_e->interval;
+}
+
+static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
+{
+ struct sja1105_gate_entry *e, *n;
+
+ list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
+ list_del(&e->list);
+ kfree(e);
+ }
+}
+
+static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+ struct sja1105_rule *rule;
+ s64 max_cycle_time = 0;
+ s64 its_base_time = 0;
+ int i, rc = 0;
+
+ sja1105_free_gating_config(gating_cfg);
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ if (max_cycle_time < rule->vl.cycle_time) {
+ max_cycle_time = rule->vl.cycle_time;
+ its_base_time = rule->vl.base_time;
+ }
+ }
+
+ if (!max_cycle_time)
+ return 0;
+
+ dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
+ max_cycle_time, its_base_time);
+
+ gating_cfg->base_time = its_base_time;
+ gating_cfg->cycle_time = max_cycle_time;
+ gating_cfg->num_entries = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ s64 time;
+ s64 rbt;
+
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ /* Calculate the difference between this gating schedule's
+ * base time, and the base time of the gating schedule with the
+ * longest cycle time. We call it the relative base time (rbt).
+ */
+ rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
+ its_base_time);
+ rbt -= its_base_time;
+
+ time = rbt;
+
+ for (i = 0; i < rule->vl.num_entries; i++) {
+ u8 gate_state = rule->vl.entries[i].gate_state;
+ s64 entry_time = time;
+
+ while (entry_time < max_cycle_time) {
+ rc = sja1105_insert_gate_entry(gating_cfg, rule,
+ gate_state,
+ entry_time,
+ extack);
+ if (rc)
+ goto err;
+
+ entry_time += rule->vl.cycle_time;
+ }
+ time += rule->vl.entries[i].interval;
+ }
+ }
+
+ sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+
+ return 0;
+err:
+ sja1105_free_gating_config(gating_cfg);
+ return rc;
+}
+
+/* The switch flow classification core implements TTEthernet, which 'thinks' in
+ * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
+ * However it also has one other operating mode (VLLUPFORMAT=0) where it acts
+ * somewhat closer to a pre-standard implementation of IEEE 802.1Qci
+ * (Per-Stream Filtering and Policing), which is what the driver is going to be
+ * implementing.
+ *
+ * VL Lookup
+ * Key = {DMAC && VLANID +---------+ Key = { (DMAC[47:16] & VLMASK ==
+ * && VLAN PCP | | VLMARKER)
+ * && INGRESS PORT} +---------+ (both fixed)
+ * (exact match, | && DMAC[15:0] == VLID
+ * all specified in rule) | (specified in rule)
+ * v && INGRESS PORT }
+ * ------------
+ * 0 (PSFP) / \ 1 (ARINC664)
+ * +-----------/ VLLUPFORMAT \----------+
+ * | \ (fixed) / |
+ * | \ / |
+ * 0 (forwarding) v ------------ |
+ * ------------ |
+ * / \ 1 (QoS classification) |
+ * +---/ ISCRITICAL \-----------+ |
+ * | \ (per rule) / | |
+ * | \ / VLID taken from VLID taken from
+ * v ------------ index of rule contents of rule
+ * select that matched that matched
+ * DESTPORTS | |
+ * | +---------+--------+
+ * | |
+ * | v
+ * | VL Forwarding
+ * | (indexed by VLID)
+ * | +---------+
+ * | +--------------| |
+ * | | select TYPE +---------+
+ * | v
+ * | 0 (rate ------------ 1 (time
+ * | constrained) / \ triggered)
+ * | +------/ TYPE \------------+
+ * | | \ (per VLID) / |
+ * | v \ / v
+ * | VL Policing ------------ VL Policing
+ * | (indexed by VLID) (indexed by VLID)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | select SHARINDX select SHARINDX to
+ * | to rate-limit re-enter VL Forwarding
+ * | groups of VL's with new VLID for egress
+ * | to same quota |
+ * | | |
+ * | select MAXLEN -> exceed => drop select MAXLEN -> exceed => drop
+ * | | |
+ * | v v
+ * | VL Forwarding VL Forwarding
+ * | (indexed by SHARINDX) (indexed by SHARINDX)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | select PRIORITY, select PRIORITY,
+ * | PARTITION, DESTPORTS PARTITION, DESTPORTS
+ * | | |
+ * | v v
+ * | VL Policing VL Policing
+ * | (indexed by SHARINDX) (indexed by SHARINDX)
+ * | +---------+ +---------+
+ * | | TYPE=0 | | TYPE=1 |
+ * | +---------+ +---------+
+ * | | |
+ * | v |
+ * | select BAG, -> exceed => drop |
+ * | JITTER v
+ * | | ----------------------------------------------
+ * | | / Reception Window is open for this VL \
+ * | | / (the Schedule Table executes an entry i \
+ * | | / M <= i < N, for which these conditions hold): \ no
+ * | | +----/ \-+
+ * | | |yes \ WINST[M] == 1 && WINSTINDEX[M] == VLID / |
+ * | | | \ WINEND[N] == 1 && WINSTINDEX[N] == VLID / |
+ * | | | \ / |
+ * | | | \ (the VL window has opened and not yet closed)/ |
+ * | | | ---------------------------------------------- |
+ * | | v v
+ * | | dispatch to DESTPORTS when the Schedule Table drop
+ * | | executes an entry i with TXEN == 1 && VLINDEX == i
+ * v v
+ * dispatch immediately to DESTPORTS
+ *
+ * The per-port classification key is always composed of {DMAC, VID, PCP} and
+ * is non-maskable. This 'looks like' the NULL stream identification function
+ * from IEEE 802.1CB clause 6, except for the extra VLAN PCP. When the switch
+ * ports operate as VLAN-unaware, we do allow the user to not specify the VLAN
+ * ID and PCP, and then the port-based defaults will be used.
+ *
+ * In TTEthernet, routing is something that needs to be done manually for each
+ * Virtual Link. So the flow action must always include one of:
+ * a. 'redirect', 'trap' or 'drop': select the egress port list
+ * Additionally, the following actions may be applied on a Virtual Link,
+ * turning it into 'critical' traffic:
+ * b. 'police': turn it into a rate-constrained VL, with bandwidth limitation
+ * given by the maximum frame length, bandwidth allocation gap (BAG) and
+ * maximum jitter.
+ * c. 'gate': turn it into a time-triggered VL, which can be only be received
+ * and forwarded according to a given schedule.
+ */
+
+static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
+ struct sja1105_vl_lookup_entry *b)
+{
+ if (a->macaddr < b->macaddr)
+ return true;
+ if (a->macaddr > b->macaddr)
+ return false;
+ if (a->vlanid < b->vlanid)
+ return true;
+ if (a->vlanid > b->vlanid)
+ return false;
+ if (a->port < b->port)
+ return true;
+ if (a->port > b->port)
+ return false;
+ if (a->vlanprior < b->vlanprior)
+ return true;
+ if (a->vlanprior > b->vlanprior)
+ return false;
+ /* Keys are equal */
+ return false;
+}
+
+/* FIXME: this should change when the bridge upper of the port changes. */
+static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp)
+{
+ unsigned long bridge_num;
+
+ if (!dp->bridge)
+ return dsa_tag_8021q_standalone_vid(dp);
+
+ bridge_num = dsa_port_bridge_num_get(dp);
+
+ return dsa_tag_8021q_bridge_vid(bridge_num);
+}
+
+static int sja1105_init_virtual_links(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_vl_policing_entry *vl_policing;
+ struct sja1105_vl_forwarding_entry *vl_fwd;
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ bool have_critical_virtual_links = false;
+ struct sja1105_table *table;
+ struct sja1105_rule *rule;
+ int num_virtual_links = 0;
+ int max_sharindx = 0;
+ int i, j, k;
+
+ /* Figure out the dimensioning of the problem */
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ /* Each VL lookup entry matches on a single ingress port */
+ num_virtual_links += hweight_long(rule->port_mask);
+
+ if (rule->vl.type != SJA1105_VL_NONCRITICAL)
+ have_critical_virtual_links = true;
+ if (max_sharindx < rule->vl.sharindx)
+ max_sharindx = rule->vl.sharindx;
+ }
+
+ if (num_virtual_links > SJA1105_MAX_VL_LOOKUP_COUNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Not enough VL entries available");
+ return -ENOSPC;
+ }
+
+ if (max_sharindx + 1 > SJA1105_MAX_VL_LOOKUP_COUNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer index out of range");
+ return -ENOSPC;
+ }
+
+ max_sharindx = max_t(int, num_virtual_links, max_sharindx) + 1;
+
+ /* Discard previous VL Lookup Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Policing Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Forwarding Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Discard previous VL Forwarding Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Nothing to do */
+ if (!num_virtual_links)
+ return 0;
+
+ /* Pre-allocate space in the static config tables */
+
+ /* VL Lookup Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ table->entries = kcalloc(num_virtual_links,
+ table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = num_virtual_links;
+ vl_lookup = table->entries;
+
+ k = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ unsigned long port;
+
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+
+ for_each_set_bit(port, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
+ vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
+ vl_lookup[k].port = port;
+ vl_lookup[k].macaddr = rule->key.vl.dmac;
+ if (rule->key.type == SJA1105_KEY_VLAN_AWARE_VL) {
+ vl_lookup[k].vlanid = rule->key.vl.vid;
+ vl_lookup[k].vlanprior = rule->key.vl.pcp;
+ } else {
+ /* FIXME */
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ u16 vid = sja1105_port_get_tag_8021q_vid(dp);
+
+ vl_lookup[k].vlanid = vid;
+ vl_lookup[k].vlanprior = 0;
+ }
+ /* For critical VLs, the DESTPORTS mask is taken from
+ * the VL Forwarding Table, so no point in putting it
+ * in the VL Lookup Table
+ */
+ if (rule->vl.type == SJA1105_VL_NONCRITICAL)
+ vl_lookup[k].destports = rule->vl.destports;
+ else
+ vl_lookup[k].iscritical = true;
+ vl_lookup[k].flow_cookie = rule->cookie;
+ k++;
+ }
+ }
+
+ /* UM10944.pdf chapter 4.2.3 VL Lookup table:
+ * "the entries in the VL Lookup table must be sorted in ascending
+ * order (i.e. the smallest value must be loaded first) according to
+ * the following sort order: MACADDR, VLANID, PORT, VLANPRIOR."
+ */
+ for (i = 0; i < num_virtual_links; i++) {
+ struct sja1105_vl_lookup_entry *a = &vl_lookup[i];
+
+ for (j = i + 1; j < num_virtual_links; j++) {
+ struct sja1105_vl_lookup_entry *b = &vl_lookup[j];
+
+ if (sja1105_vl_key_lower(b, a)) {
+ struct sja1105_vl_lookup_entry tmp = *a;
+
+ *a = *b;
+ *b = tmp;
+ }
+ }
+ }
+
+ if (!have_critical_virtual_links)
+ return 0;
+
+ /* VL Policing Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
+ table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = max_sharindx;
+ vl_policing = table->entries;
+
+ /* VL Forwarding Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
+ table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = max_sharindx;
+ vl_fwd = table->entries;
+
+ /* VL Forwarding Parameters Table */
+ table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
+ table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+ table->entry_count = 1;
+
+ for (i = 0; i < num_virtual_links; i++) {
+ unsigned long cookie = vl_lookup[i].flow_cookie;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+
+ if (rule->vl.type == SJA1105_VL_NONCRITICAL)
+ continue;
+ if (rule->vl.type == SJA1105_VL_TIME_TRIGGERED) {
+ int sharindx = rule->vl.sharindx;
+
+ vl_policing[i].type = 1;
+ vl_policing[i].sharindx = sharindx;
+ vl_policing[i].maxlen = rule->vl.maxlen;
+ vl_policing[sharindx].type = 1;
+
+ vl_fwd[i].type = 1;
+ vl_fwd[sharindx].type = 1;
+ vl_fwd[sharindx].priority = rule->vl.ipv;
+ vl_fwd[sharindx].partition = 0;
+ vl_fwd[sharindx].destports = rule->vl.destports;
+ }
+ }
+
+ sja1105_frame_memory_partitioning(priv);
+
+ return 0;
+}
+
+int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, unsigned long destports,
+ bool append)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ bool vlan_aware = dsa_port_is_vlan_filtering(dp);
+ int rc;
+
+ if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only redirect based on DMAC");
+ return -EOPNOTSUPP;
+ } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only redirect based on {DMAC, VID, PCP}");
+ return -EOPNOTSUPP;
+ }
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_VL;
+ rule->key = *key;
+ list_add(&rule->list, &priv->flow_block.rules);
+ }
+
+ rule->port_mask |= BIT(port);
+ if (append)
+ rule->vl.destports |= destports;
+ else
+ rule->vl.destports = destports;
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc) {
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+
+ return rc;
+}
+
+int sja1105_vl_delete(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+
+ rc = sja1105_compose_gating_subschedule(priv, extack);
+ if (rc)
+ return rc;
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc)
+ return rc;
+
+ rc = sja1105_init_scheduling(priv);
+ if (rc < 0)
+ return rc;
+
+ return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
+}
+
+int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time, u64 cycle_time_ext,
+ u32 num_entries, struct action_gate_entry *entries)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ bool vlan_aware = dsa_port_is_vlan_filtering(dp);
+ int ipv = -1;
+ int i, rc;
+ s32 rem;
+
+ if (cycle_time_ext) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cycle time extension not supported");
+ return -EOPNOTSUPP;
+ }
+
+ div_s64_rem(base_time, sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Base time must be multiple of 200 ns");
+ return -ERANGE;
+ }
+
+ div_s64_rem(cycle_time, sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cycle time must be multiple of 200 ns");
+ return -ERANGE;
+ }
+
+ if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only gate based on DMAC");
+ return -EOPNOTSUPP;
+ } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only gate based on {DMAC, VID, PCP}");
+ return -EOPNOTSUPP;
+ }
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ list_add(&rule->list, &priv->flow_block.rules);
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_VL;
+ rule->key = *key;
+ rule->vl.type = SJA1105_VL_TIME_TRIGGERED;
+ rule->vl.sharindx = index;
+ rule->vl.base_time = base_time;
+ rule->vl.cycle_time = cycle_time;
+ rule->vl.num_entries = num_entries;
+ rule->vl.entries = kcalloc(num_entries,
+ sizeof(struct action_gate_entry),
+ GFP_KERNEL);
+ if (!rule->vl.entries) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ div_s64_rem(entries[i].interval,
+ sja1105_delta_to_ns(1), &rem);
+ if (rem) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interval must be multiple of 200 ns");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (!entries[i].interval) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interval cannot be zero");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (ns_to_sja1105_delta(entries[i].interval) >
+ SJA1105_TAS_MAX_DELTA) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Maximum interval is 52 ms");
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (entries[i].maxoctets != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload IntervalOctetMax");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (ipv == -1) {
+ ipv = entries[i].ipv;
+ } else if (ipv != entries[i].ipv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only support a single IPV per VL");
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ rule->vl.entries[i] = entries[i];
+ }
+
+ if (ipv == -1) {
+ if (key->type == SJA1105_KEY_VLAN_AWARE_VL)
+ ipv = key->vl.pcp;
+ else
+ ipv = 0;
+ }
+
+ /* TODO: support per-flow MTU */
+ rule->vl.maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+ rule->vl.ipv = ipv;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ rc = sja1105_compose_gating_subschedule(priv, extack);
+ if (rc)
+ goto out;
+
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc)
+ goto out;
+
+ if (sja1105_gating_check_conflicts(priv, -1, extack)) {
+ NL_SET_ERR_MSG_MOD(extack, "Conflict with tc-taprio schedule");
+ rc = -ERANGE;
+ goto out;
+ }
+
+out:
+ if (rc) {
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ list_del(&rule->list);
+ kfree(rule->vl.entries);
+ kfree(rule);
+ }
+ }
+
+ return rc;
+}
+
+static int sja1105_find_vlid(struct sja1105_private *priv, int port,
+ struct sja1105_key *key)
+{
+ struct sja1105_vl_lookup_entry *vl_lookup;
+ struct sja1105_table *table;
+ int i;
+
+ if (WARN_ON(key->type != SJA1105_KEY_VLAN_AWARE_VL &&
+ key->type != SJA1105_KEY_VLAN_UNAWARE_VL))
+ return -1;
+
+ table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
+ vl_lookup = table->entries;
+
+ for (i = 0; i < table->entry_count; i++) {
+ if (key->type == SJA1105_KEY_VLAN_AWARE_VL) {
+ if (vl_lookup[i].port == port &&
+ vl_lookup[i].macaddr == key->vl.dmac &&
+ vl_lookup[i].vlanid == key->vl.vid &&
+ vl_lookup[i].vlanprior == key->vl.pcp)
+ return i;
+ } else {
+ if (vl_lookup[i].port == port &&
+ vl_lookup[i].macaddr == key->vl.dmac)
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct flow_stats *stats,
+ struct netlink_ext_ack *extack)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 buf[SJA1105_SIZE_VL_STATUS] = {0};
+ u64 unreleased;
+ u64 timingerr;
+ u64 lengtherr;
+ int vlid, rc;
+ u64 pkts;
+
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ return 0;
+
+ vlid = sja1105_find_vlid(priv, port, &rule->key);
+ if (vlid < 0)
+ return 0;
+
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->vl_status + 2 * vlid, buf,
+ SJA1105_SIZE_VL_STATUS);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "SPI access failed");
+ return rc;
+ }
+
+ sja1105_unpack(buf, &timingerr, 31, 16, SJA1105_SIZE_VL_STATUS);
+ sja1105_unpack(buf, &unreleased, 15, 0, SJA1105_SIZE_VL_STATUS);
+ sja1105_unpack(buf, &lengtherr, 47, 32, SJA1105_SIZE_VL_STATUS);
+
+ pkts = timingerr + unreleased + lengtherr;
+
+ flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, 0,
+ jiffies - rule->vl.stats.lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ rule->vl.stats.pkts = pkts;
+ rule->vl.stats.lastused = jiffies;
+
+ return 0;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.h b/drivers/net/dsa/sja1105/sja1105_vl.h
new file mode 100644
index 000000000..51fba0dce
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_vl.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2020 NXP
+ */
+#ifndef _SJA1105_VL_H
+#define _SJA1105_VL_H
+
+#include "sja1105.h"
+
+#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_VL)
+
+int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, unsigned long destports,
+ bool append);
+
+int sja1105_vl_delete(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule,
+ struct netlink_ext_ack *extack);
+
+int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack, unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time, u64 cycle_time_ext,
+ u32 num_entries, struct action_gate_entry *entries);
+
+int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule, struct flow_stats *stats,
+ struct netlink_ext_ack *extack);
+
+#else
+
+static inline int sja1105_vl_redirect(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key,
+ unsigned long destports,
+ bool append)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_delete(struct sja1105_private *priv,
+ int port, struct sja1105_rule *rule,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_gate(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie,
+ struct sja1105_key *key, u32 index, s32 prio,
+ u64 base_time, u64 cycle_time,
+ u64 cycle_time_ext, u32 num_entries,
+ struct action_gate_entry *entries)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+static inline int sja1105_vl_stats(struct sja1105_private *priv, int port,
+ struct sja1105_rule *rule,
+ struct flow_stats *stats,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Virtual Links not compiled in");
+ return -EOPNOTSUPP;
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_VL) */
+
+#endif /* _SJA1105_VL_H */
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
new file mode 100644
index 000000000..3efd55669
--- /dev/null
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -0,0 +1,1233 @@
+// SPDX-License-Identifier: GPL-2.0
+/* DSA driver for:
+ * Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
+ *
+ * These switches have a built-in 8051 CPU and can download and execute a
+ * firmware in this CPU. They can also be configured to use an external CPU
+ * handling the switch in a memory-mapped manner by connecting to that external
+ * CPU's memory bus.
+ *
+ * Copyright (C) 2018 Linus Wallej <linus.walleij@linaro.org>
+ * Includes portions of code from the firmware uploader by:
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+#include <linux/etherdevice.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/random.h>
+#include <net/dsa.h>
+
+#include "vitesse-vsc73xx.h"
+
+#define VSC73XX_BLOCK_MAC 0x1 /* Subblocks 0-4, 6 (CPU port) */
+#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
+#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
+#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
+#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
+#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
+#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
+
+#define CPU_PORT 6 /* CPU port */
+
+/* MAC Block registers */
+#define VSC73XX_MAC_CFG 0x00
+#define VSC73XX_MACHDXGAP 0x02
+#define VSC73XX_FCCONF 0x04
+#define VSC73XX_FCMACHI 0x08
+#define VSC73XX_FCMACLO 0x0c
+#define VSC73XX_MAXLEN 0x10
+#define VSC73XX_ADVPORTM 0x19
+#define VSC73XX_TXUPDCFG 0x24
+#define VSC73XX_TXQ_SELECT_CFG 0x28
+#define VSC73XX_RXOCT 0x50
+#define VSC73XX_TXOCT 0x51
+#define VSC73XX_C_RX0 0x52
+#define VSC73XX_C_RX1 0x53
+#define VSC73XX_C_RX2 0x54
+#define VSC73XX_C_TX0 0x55
+#define VSC73XX_C_TX1 0x56
+#define VSC73XX_C_TX2 0x57
+#define VSC73XX_C_CFG 0x58
+#define VSC73XX_CAT_DROP 0x6e
+#define VSC73XX_CAT_PR_MISC_L2 0x6f
+#define VSC73XX_CAT_PR_USR_PRIO 0x75
+#define VSC73XX_Q_MISC_CONF 0xdf
+
+/* MAC_CFG register bits */
+#define VSC73XX_MAC_CFG_WEXC_DIS BIT(31)
+#define VSC73XX_MAC_CFG_PORT_RST BIT(29)
+#define VSC73XX_MAC_CFG_TX_EN BIT(28)
+#define VSC73XX_MAC_CFG_SEED_LOAD BIT(27)
+#define VSC73XX_MAC_CFG_SEED_MASK GENMASK(26, 19)
+#define VSC73XX_MAC_CFG_SEED_OFFSET 19
+#define VSC73XX_MAC_CFG_FDX BIT(18)
+#define VSC73XX_MAC_CFG_GIGA_MODE BIT(17)
+#define VSC73XX_MAC_CFG_RX_EN BIT(16)
+#define VSC73XX_MAC_CFG_VLAN_DBLAWR BIT(15)
+#define VSC73XX_MAC_CFG_VLAN_AWR BIT(14)
+#define VSC73XX_MAC_CFG_100_BASE_T BIT(13) /* Not in manual */
+#define VSC73XX_MAC_CFG_TX_IPG_MASK GENMASK(10, 6)
+#define VSC73XX_MAC_CFG_TX_IPG_OFFSET 6
+#define VSC73XX_MAC_CFG_TX_IPG_1000M (6 << VSC73XX_MAC_CFG_TX_IPG_OFFSET)
+#define VSC73XX_MAC_CFG_TX_IPG_100_10M (17 << VSC73XX_MAC_CFG_TX_IPG_OFFSET)
+#define VSC73XX_MAC_CFG_MAC_RX_RST BIT(5)
+#define VSC73XX_MAC_CFG_MAC_TX_RST BIT(4)
+#define VSC73XX_MAC_CFG_CLK_SEL_MASK GENMASK(2, 0)
+#define VSC73XX_MAC_CFG_CLK_SEL_OFFSET 0
+#define VSC73XX_MAC_CFG_CLK_SEL_1000M 1
+#define VSC73XX_MAC_CFG_CLK_SEL_100M 2
+#define VSC73XX_MAC_CFG_CLK_SEL_10M 3
+#define VSC73XX_MAC_CFG_CLK_SEL_EXT 4
+
+#define VSC73XX_MAC_CFG_1000M_F_PHY (VSC73XX_MAC_CFG_FDX | \
+ VSC73XX_MAC_CFG_GIGA_MODE | \
+ VSC73XX_MAC_CFG_TX_IPG_1000M | \
+ VSC73XX_MAC_CFG_CLK_SEL_EXT)
+#define VSC73XX_MAC_CFG_100_10M_F_PHY (VSC73XX_MAC_CFG_FDX | \
+ VSC73XX_MAC_CFG_TX_IPG_100_10M | \
+ VSC73XX_MAC_CFG_CLK_SEL_EXT)
+#define VSC73XX_MAC_CFG_100_10M_H_PHY (VSC73XX_MAC_CFG_TX_IPG_100_10M | \
+ VSC73XX_MAC_CFG_CLK_SEL_EXT)
+#define VSC73XX_MAC_CFG_1000M_F_RGMII (VSC73XX_MAC_CFG_FDX | \
+ VSC73XX_MAC_CFG_GIGA_MODE | \
+ VSC73XX_MAC_CFG_TX_IPG_1000M | \
+ VSC73XX_MAC_CFG_CLK_SEL_1000M)
+#define VSC73XX_MAC_CFG_RESET (VSC73XX_MAC_CFG_PORT_RST | \
+ VSC73XX_MAC_CFG_MAC_RX_RST | \
+ VSC73XX_MAC_CFG_MAC_TX_RST)
+
+/* Flow control register bits */
+#define VSC73XX_FCCONF_ZERO_PAUSE_EN BIT(17)
+#define VSC73XX_FCCONF_FLOW_CTRL_OBEY BIT(16)
+#define VSC73XX_FCCONF_PAUSE_VAL_MASK GENMASK(15, 0)
+
+/* ADVPORTM advanced port setup register bits */
+#define VSC73XX_ADVPORTM_IFG_PPM BIT(7)
+#define VSC73XX_ADVPORTM_EXC_COL_CONT BIT(6)
+#define VSC73XX_ADVPORTM_EXT_PORT BIT(5)
+#define VSC73XX_ADVPORTM_INV_GTX BIT(4)
+#define VSC73XX_ADVPORTM_ENA_GTX BIT(3)
+#define VSC73XX_ADVPORTM_DDR_MODE BIT(2)
+#define VSC73XX_ADVPORTM_IO_LOOPBACK BIT(1)
+#define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0)
+
+/* CAT_DROP categorizer frame dropping register bits */
+#define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA BIT(6)
+#define VSC73XX_CAT_DROP_FWD_CTRL_ENA BIT(4)
+#define VSC73XX_CAT_DROP_FWD_PAUSE_ENA BIT(3)
+#define VSC73XX_CAT_DROP_UNTAGGED_ENA BIT(2)
+#define VSC73XX_CAT_DROP_TAGGED_ENA BIT(1)
+#define VSC73XX_CAT_DROP_NULL_MAC_ENA BIT(0)
+
+#define VSC73XX_Q_MISC_CONF_EXTENT_MEM BIT(31)
+#define VSC73XX_Q_MISC_CONF_EARLY_TX_MASK GENMASK(4, 1)
+#define VSC73XX_Q_MISC_CONF_EARLY_TX_512 (1 << 1)
+#define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE BIT(0)
+
+/* Frame analyzer block 2 registers */
+#define VSC73XX_STORMLIMIT 0x02
+#define VSC73XX_ADVLEARN 0x03
+#define VSC73XX_IFLODMSK 0x04
+#define VSC73XX_VLANMASK 0x05
+#define VSC73XX_MACHDATA 0x06
+#define VSC73XX_MACLDATA 0x07
+#define VSC73XX_ANMOVED 0x08
+#define VSC73XX_ANAGEFIL 0x09
+#define VSC73XX_ANEVENTS 0x0a
+#define VSC73XX_ANCNTMASK 0x0b
+#define VSC73XX_ANCNTVAL 0x0c
+#define VSC73XX_LEARNMASK 0x0d
+#define VSC73XX_UFLODMASK 0x0e
+#define VSC73XX_MFLODMASK 0x0f
+#define VSC73XX_RECVMASK 0x10
+#define VSC73XX_AGGRCTRL 0x20
+#define VSC73XX_AGGRMSKS 0x30 /* Until 0x3f */
+#define VSC73XX_DSTMASKS 0x40 /* Until 0x7f */
+#define VSC73XX_SRCMASKS 0x80 /* Until 0x87 */
+#define VSC73XX_CAPENAB 0xa0
+#define VSC73XX_MACACCESS 0xb0
+#define VSC73XX_IPMCACCESS 0xb1
+#define VSC73XX_MACTINDX 0xc0
+#define VSC73XX_VLANACCESS 0xd0
+#define VSC73XX_VLANTIDX 0xe0
+#define VSC73XX_AGENCTRL 0xf0
+#define VSC73XX_CAPRST 0xff
+
+#define VSC73XX_MACACCESS_CPU_COPY BIT(14)
+#define VSC73XX_MACACCESS_FWD_KILL BIT(13)
+#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12)
+#define VSC73XX_MACACCESS_AGED_FLAG BIT(11)
+#define VSC73XX_MACACCESS_VALID BIT(10)
+#define VSC73XX_MACACCESS_LOCKED BIT(9)
+#define VSC73XX_MACACCESS_DEST_IDX_MASK GENMASK(8, 3)
+#define VSC73XX_MACACCESS_CMD_MASK GENMASK(2, 0)
+#define VSC73XX_MACACCESS_CMD_IDLE 0
+#define VSC73XX_MACACCESS_CMD_LEARN 1
+#define VSC73XX_MACACCESS_CMD_FORGET 2
+#define VSC73XX_MACACCESS_CMD_AGE_TABLE 3
+#define VSC73XX_MACACCESS_CMD_FLUSH_TABLE 4
+#define VSC73XX_MACACCESS_CMD_CLEAR_TABLE 5
+#define VSC73XX_MACACCESS_CMD_READ_ENTRY 6
+#define VSC73XX_MACACCESS_CMD_WRITE_ENTRY 7
+
+#define VSC73XX_VLANACCESS_LEARN_DISABLED BIT(30)
+#define VSC73XX_VLANACCESS_VLAN_MIRROR BIT(29)
+#define VSC73XX_VLANACCESS_VLAN_SRC_CHECK BIT(28)
+#define VSC73XX_VLANACCESS_VLAN_PORT_MASK GENMASK(9, 2)
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(2, 0)
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE 0
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY 1
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY 2
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3
+
+/* MII block 3 registers */
+#define VSC73XX_MII_STAT 0x0
+#define VSC73XX_MII_CMD 0x1
+#define VSC73XX_MII_DATA 0x2
+
+/* Arbiter block 5 registers */
+#define VSC73XX_ARBEMPTY 0x0c
+#define VSC73XX_ARBDISC 0x0e
+#define VSC73XX_SBACKWDROP 0x12
+#define VSC73XX_DBACKWDROP 0x13
+#define VSC73XX_ARBBURSTPROB 0x15
+
+/* System block 7 registers */
+#define VSC73XX_ICPU_SIPAD 0x01
+#define VSC73XX_GMIIDELAY 0x05
+#define VSC73XX_ICPU_CTRL 0x10
+#define VSC73XX_ICPU_ADDR 0x11
+#define VSC73XX_ICPU_SRAM 0x12
+#define VSC73XX_HWSEM 0x13
+#define VSC73XX_GLORESET 0x14
+#define VSC73XX_ICPU_MBOX_VAL 0x15
+#define VSC73XX_ICPU_MBOX_SET 0x16
+#define VSC73XX_ICPU_MBOX_CLR 0x17
+#define VSC73XX_CHIPID 0x18
+#define VSC73XX_GPIO 0x34
+
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE 0
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS 1
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS 2
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS 3
+
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE (0 << 4)
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS (1 << 4)
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS (2 << 4)
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS (3 << 4)
+
+#define VSC73XX_ICPU_CTRL_WATCHDOG_RST BIT(31)
+#define VSC73XX_ICPU_CTRL_CLK_DIV_MASK GENMASK(12, 8)
+#define VSC73XX_ICPU_CTRL_SRST_HOLD BIT(7)
+#define VSC73XX_ICPU_CTRL_ICPU_PI_EN BIT(6)
+#define VSC73XX_ICPU_CTRL_BOOT_EN BIT(3)
+#define VSC73XX_ICPU_CTRL_EXT_ACC_EN BIT(2)
+#define VSC73XX_ICPU_CTRL_CLK_EN BIT(1)
+#define VSC73XX_ICPU_CTRL_SRST BIT(0)
+
+#define VSC73XX_CHIPID_ID_SHIFT 12
+#define VSC73XX_CHIPID_ID_MASK 0xffff
+#define VSC73XX_CHIPID_REV_SHIFT 28
+#define VSC73XX_CHIPID_REV_MASK 0xf
+#define VSC73XX_CHIPID_ID_7385 0x7385
+#define VSC73XX_CHIPID_ID_7388 0x7388
+#define VSC73XX_CHIPID_ID_7395 0x7395
+#define VSC73XX_CHIPID_ID_7398 0x7398
+
+#define VSC73XX_GLORESET_STROBE BIT(4)
+#define VSC73XX_GLORESET_ICPU_LOCK BIT(3)
+#define VSC73XX_GLORESET_MEM_LOCK BIT(2)
+#define VSC73XX_GLORESET_PHY_RESET BIT(1)
+#define VSC73XX_GLORESET_MASTER_RESET BIT(0)
+
+#define VSC7385_CLOCK_DELAY ((3 << 4) | 3)
+#define VSC7385_CLOCK_DELAY_MASK ((3 << 4) | 3)
+
+#define VSC73XX_ICPU_CTRL_STOP (VSC73XX_ICPU_CTRL_SRST_HOLD | \
+ VSC73XX_ICPU_CTRL_BOOT_EN | \
+ VSC73XX_ICPU_CTRL_EXT_ACC_EN)
+
+#define VSC73XX_ICPU_CTRL_START (VSC73XX_ICPU_CTRL_CLK_DIV | \
+ VSC73XX_ICPU_CTRL_BOOT_EN | \
+ VSC73XX_ICPU_CTRL_CLK_EN | \
+ VSC73XX_ICPU_CTRL_SRST)
+
+#define IS_7385(a) ((a)->chipid == VSC73XX_CHIPID_ID_7385)
+#define IS_7388(a) ((a)->chipid == VSC73XX_CHIPID_ID_7388)
+#define IS_7395(a) ((a)->chipid == VSC73XX_CHIPID_ID_7395)
+#define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398)
+#define IS_739X(a) (IS_7395(a) || IS_7398(a))
+
+struct vsc73xx_counter {
+ u8 counter;
+ const char *name;
+};
+
+/* Counters are named according to the MIB standards where applicable.
+ * Some counters are custom, non-standard. The standard counters are
+ * named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex
+ * 30A Counters.
+ */
+static const struct vsc73xx_counter vsc73xx_rx_counters[] = {
+ { 0, "RxEtherStatsPkts" },
+ { 1, "RxBroadcast+MulticastPkts" }, /* non-standard counter */
+ { 2, "RxTotalErrorPackets" }, /* non-standard counter */
+ { 3, "RxEtherStatsBroadcastPkts" },
+ { 4, "RxEtherStatsMulticastPkts" },
+ { 5, "RxEtherStatsPkts64Octets" },
+ { 6, "RxEtherStatsPkts65to127Octets" },
+ { 7, "RxEtherStatsPkts128to255Octets" },
+ { 8, "RxEtherStatsPkts256to511Octets" },
+ { 9, "RxEtherStatsPkts512to1023Octets" },
+ { 10, "RxEtherStatsPkts1024to1518Octets" },
+ { 11, "RxJumboFrames" }, /* non-standard counter */
+ { 12, "RxaPauseMACControlFramesTransmitted" },
+ { 13, "RxFIFODrops" }, /* non-standard counter */
+ { 14, "RxBackwardDrops" }, /* non-standard counter */
+ { 15, "RxClassifierDrops" }, /* non-standard counter */
+ { 16, "RxEtherStatsCRCAlignErrors" },
+ { 17, "RxEtherStatsUndersizePkts" },
+ { 18, "RxEtherStatsOversizePkts" },
+ { 19, "RxEtherStatsFragments" },
+ { 20, "RxEtherStatsJabbers" },
+ { 21, "RxaMACControlFramesReceived" },
+ /* 22-24 are undefined */
+ { 25, "RxaFramesReceivedOK" },
+ { 26, "RxQoSClass0" }, /* non-standard counter */
+ { 27, "RxQoSClass1" }, /* non-standard counter */
+ { 28, "RxQoSClass2" }, /* non-standard counter */
+ { 29, "RxQoSClass3" }, /* non-standard counter */
+};
+
+static const struct vsc73xx_counter vsc73xx_tx_counters[] = {
+ { 0, "TxEtherStatsPkts" },
+ { 1, "TxBroadcast+MulticastPkts" }, /* non-standard counter */
+ { 2, "TxTotalErrorPackets" }, /* non-standard counter */
+ { 3, "TxEtherStatsBroadcastPkts" },
+ { 4, "TxEtherStatsMulticastPkts" },
+ { 5, "TxEtherStatsPkts64Octets" },
+ { 6, "TxEtherStatsPkts65to127Octets" },
+ { 7, "TxEtherStatsPkts128to255Octets" },
+ { 8, "TxEtherStatsPkts256to511Octets" },
+ { 9, "TxEtherStatsPkts512to1023Octets" },
+ { 10, "TxEtherStatsPkts1024to1518Octets" },
+ { 11, "TxJumboFrames" }, /* non-standard counter */
+ { 12, "TxaPauseMACControlFramesTransmitted" },
+ { 13, "TxFIFODrops" }, /* non-standard counter */
+ { 14, "TxDrops" }, /* non-standard counter */
+ { 15, "TxEtherStatsCollisions" },
+ { 16, "TxEtherStatsCRCAlignErrors" },
+ { 17, "TxEtherStatsUndersizePkts" },
+ { 18, "TxEtherStatsOversizePkts" },
+ { 19, "TxEtherStatsFragments" },
+ { 20, "TxEtherStatsJabbers" },
+ /* 21-24 are undefined */
+ { 25, "TxaFramesReceivedOK" },
+ { 26, "TxQoSClass0" }, /* non-standard counter */
+ { 27, "TxQoSClass1" }, /* non-standard counter */
+ { 28, "TxQoSClass2" }, /* non-standard counter */
+ { 29, "TxQoSClass3" }, /* non-standard counter */
+};
+
+int vsc73xx_is_addr_valid(u8 block, u8 subblock)
+{
+ switch (block) {
+ case VSC73XX_BLOCK_MAC:
+ switch (subblock) {
+ case 0 ... 4:
+ case 6:
+ return 1;
+ }
+ break;
+
+ case VSC73XX_BLOCK_ANALYZER:
+ case VSC73XX_BLOCK_SYSTEM:
+ switch (subblock) {
+ case 0:
+ return 1;
+ }
+ break;
+
+ case VSC73XX_BLOCK_MII:
+ case VSC73XX_BLOCK_CAPTURE:
+ case VSC73XX_BLOCK_ARBITER:
+ switch (subblock) {
+ case 0 ... 1:
+ return 1;
+ }
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(vsc73xx_is_addr_valid);
+
+static int vsc73xx_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 *val)
+{
+ return vsc->ops->read(vsc, block, subblock, reg, val);
+}
+
+static int vsc73xx_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 val)
+{
+ return vsc->ops->write(vsc, block, subblock, reg, val);
+}
+
+static int vsc73xx_update_bits(struct vsc73xx *vsc, u8 block, u8 subblock,
+ u8 reg, u32 mask, u32 val)
+{
+ u32 tmp, orig;
+ int ret;
+
+ /* Same read-modify-write algorithm as e.g. regmap */
+ ret = vsc73xx_read(vsc, block, subblock, reg, &orig);
+ if (ret)
+ return ret;
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ return vsc73xx_write(vsc, block, subblock, reg, tmp);
+}
+
+static int vsc73xx_detect(struct vsc73xx *vsc)
+{
+ bool icpu_si_boot_en;
+ bool icpu_pi_en;
+ u32 val;
+ u32 rev;
+ int ret;
+ u32 id;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_ICPU_MBOX_VAL, &val);
+ if (ret) {
+ dev_err(vsc->dev, "unable to read mailbox (%d)\n", ret);
+ return ret;
+ }
+
+ if (val == 0xffffffff) {
+ dev_info(vsc->dev, "chip seems dead.\n");
+ return -EAGAIN;
+ }
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_CHIPID, &val);
+ if (ret) {
+ dev_err(vsc->dev, "unable to read chip id (%d)\n", ret);
+ return ret;
+ }
+
+ id = (val >> VSC73XX_CHIPID_ID_SHIFT) &
+ VSC73XX_CHIPID_ID_MASK;
+ switch (id) {
+ case VSC73XX_CHIPID_ID_7385:
+ case VSC73XX_CHIPID_ID_7388:
+ case VSC73XX_CHIPID_ID_7395:
+ case VSC73XX_CHIPID_ID_7398:
+ break;
+ default:
+ dev_err(vsc->dev, "unsupported chip, id=%04x\n", id);
+ return -ENODEV;
+ }
+
+ vsc->chipid = id;
+ rev = (val >> VSC73XX_CHIPID_REV_SHIFT) &
+ VSC73XX_CHIPID_REV_MASK;
+ dev_info(vsc->dev, "VSC%04X (rev: %d) switch found\n", id, rev);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_ICPU_CTRL, &val);
+ if (ret) {
+ dev_err(vsc->dev, "unable to read iCPU control\n");
+ return ret;
+ }
+
+ /* The iCPU can always be used but can boot in different ways.
+ * If it is initially disabled and has no external memory,
+ * we are in control and can do whatever we like, else we
+ * are probably in trouble (we need some way to communicate
+ * with the running firmware) so we bail out for now.
+ */
+ icpu_pi_en = !!(val & VSC73XX_ICPU_CTRL_ICPU_PI_EN);
+ icpu_si_boot_en = !!(val & VSC73XX_ICPU_CTRL_BOOT_EN);
+ if (icpu_si_boot_en && icpu_pi_en) {
+ dev_err(vsc->dev,
+ "iCPU enabled boots from SI, has external memory\n");
+ dev_err(vsc->dev, "no idea how to deal with this\n");
+ return -ENODEV;
+ }
+ if (icpu_si_boot_en && !icpu_pi_en) {
+ dev_err(vsc->dev,
+ "iCPU enabled boots from PI/SI, no external memory\n");
+ return -EAGAIN;
+ }
+ if (!icpu_si_boot_en && icpu_pi_en) {
+ dev_err(vsc->dev,
+ "iCPU enabled, boots from PI external memory\n");
+ dev_err(vsc->dev, "no idea how to deal with this\n");
+ return -ENODEV;
+ }
+ /* !icpu_si_boot_en && !cpu_pi_en */
+ dev_info(vsc->dev, "iCPU disabled, no external memory\n");
+
+ return 0;
+}
+
+static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ struct vsc73xx *vsc = ds->priv;
+ u32 cmd;
+ u32 val;
+ int ret;
+
+ /* Setting bit 26 means "read" */
+ cmd = BIT(26) | (phy << 21) | (regnum << 16);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ if (ret)
+ return ret;
+ msleep(2);
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
+ if (ret)
+ return ret;
+ if (val & BIT(16)) {
+ dev_err(vsc->dev, "reading reg %02x from phy%d failed\n",
+ regnum, phy);
+ return -EIO;
+ }
+ val &= 0xFFFFU;
+
+ dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n",
+ regnum, phy, val);
+
+ return val;
+}
+
+static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ struct vsc73xx *vsc = ds->priv;
+ u32 cmd;
+ int ret;
+
+ /* It was found through tedious experiments that this router
+ * chip really hates to have it's PHYs reset. They
+ * never recover if that happens: autonegotiation stops
+ * working after a reset. Just filter out this command.
+ * (Resetting the whole chip is OK.)
+ */
+ if (regnum == 0 && (val & BIT(15))) {
+ dev_info(vsc->dev, "reset PHY - disallowed\n");
+ return 0;
+ }
+
+ cmd = (phy << 21) | (regnum << 16);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ if (ret)
+ return ret;
+
+ dev_dbg(vsc->dev, "write %04x to reg %02x in phy%d\n",
+ val, regnum, phy);
+ return 0;
+}
+
+static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ /* The switch internally uses a 8 byte header with length,
+ * source port, tag, LPA and priority. This is supposedly
+ * only accessible when operating the switch using the internal
+ * CPU or with an external CPU mapping the device in, but not
+ * when operating the switch over SPI and putting frames in/out
+ * on port 6 (the CPU port). So far we must assume that we
+ * cannot access the tag. (See "Internal frame header" section
+ * 3.9.1 in the manual.)
+ */
+ return DSA_TAG_PROTO_NONE;
+}
+
+static int vsc73xx_setup(struct dsa_switch *ds)
+{
+ struct vsc73xx *vsc = ds->priv;
+ int i;
+
+ dev_info(vsc->dev, "set up the switch\n");
+
+ /* Issue RESET */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
+ VSC73XX_GLORESET_MASTER_RESET);
+ usleep_range(125, 200);
+
+ /* Initialize memory, initialize RAM bank 0..15 except 6 and 7
+ * This sequence appears in the
+ * VSC7385 SparX-G5 datasheet section 6.6.1
+ * VSC7395 SparX-G5e datasheet section 6.6.1
+ * "initialization sequence".
+ * No explanation is given to the 0x1010400 magic number.
+ */
+ for (i = 0; i <= 15; i++) {
+ if (i != 6 && i != 7) {
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MEMINIT,
+ 2,
+ 0, 0x1010400 + i);
+ mdelay(1);
+ }
+ }
+ mdelay(30);
+
+ /* Clear MAC table */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS,
+ VSC73XX_MACACCESS_CMD_CLEAR_TABLE);
+
+ /* Clear VLAN table */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_VLANACCESS,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE);
+
+ msleep(40);
+
+ /* Use 20KiB buffers on all ports on VSC7395
+ * The VSC7385 has 16KiB buffers and that is the
+ * default if we don't set this up explicitly.
+ * Port "31" is "all ports".
+ */
+ if (IS_739X(vsc))
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 0x1f,
+ VSC73XX_Q_MISC_CONF,
+ VSC73XX_Q_MISC_CONF_EXTENT_MEM);
+
+ /* Put all ports into reset until enabled */
+ for (i = 0; i < 7; i++) {
+ if (i == 5)
+ continue;
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 4,
+ VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
+ }
+
+ /* MII delay, set both GTX and RX delay to 2 ns */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
+ VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS |
+ VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS);
+ /* Enable reception of frames on all ports */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK,
+ 0x5f);
+ /* IP multicast flood mask (table 144) */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK,
+ 0xff);
+
+ mdelay(50);
+
+ /* Release reset from the internal PHYs */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
+ VSC73XX_GLORESET_PHY_RESET);
+
+ udelay(4);
+
+ return 0;
+}
+
+static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
+{
+ u32 val;
+
+ /* MAC configure, first reset the port and then write defaults */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET);
+
+ /* Take up the port in 1Gbit mode by default, this will be
+ * augmented after auto-negotiation on the PHY-facing
+ * ports.
+ */
+ if (port == CPU_PORT)
+ val = VSC73XX_MAC_CFG_1000M_F_RGMII;
+ else
+ val = VSC73XX_MAC_CFG_1000M_F_PHY;
+
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_MAC_CFG,
+ val |
+ VSC73XX_MAC_CFG_TX_EN |
+ VSC73XX_MAC_CFG_RX_EN);
+
+ /* Flow control for the CPU port:
+ * Use a zero delay pause frame when pause condition is left
+ * Obey pause control frames
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_FCCONF,
+ VSC73XX_FCCONF_ZERO_PAUSE_EN |
+ VSC73XX_FCCONF_FLOW_CTRL_OBEY);
+
+ /* Issue pause control frames on PHY facing ports.
+ * Allow early initiation of MAC transmission if the amount
+ * of egress data is below 512 bytes on CPU port.
+ * FIXME: enable 20KiB buffers?
+ */
+ if (port == CPU_PORT)
+ val = VSC73XX_Q_MISC_CONF_EARLY_TX_512;
+ else
+ val = VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE;
+ val |= VSC73XX_Q_MISC_CONF_EXTENT_MEM;
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_Q_MISC_CONF,
+ val);
+
+ /* Flow control MAC: a MAC address used in flow control frames */
+ val = (vsc->addr[5] << 16) | (vsc->addr[4] << 8) | (vsc->addr[3]);
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_FCMACHI,
+ val);
+ val = (vsc->addr[2] << 16) | (vsc->addr[1] << 8) | (vsc->addr[0]);
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_FCMACLO,
+ val);
+
+ /* Tell the categorizer to forward pause frames, not control
+ * frame. Do not drop anything.
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port,
+ VSC73XX_CAT_DROP,
+ VSC73XX_CAT_DROP_FWD_PAUSE_ENA);
+
+ /* Clear all counters */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ port, VSC73XX_C_RX0, 0);
+}
+
+static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
+ int port, struct phy_device *phydev,
+ u32 initval)
+{
+ u32 val = initval;
+ u8 seed;
+
+ /* Reset this port FIXME: break out subroutine */
+ val |= VSC73XX_MAC_CFG_RESET;
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
+
+ /* Seed the port randomness with randomness */
+ get_random_bytes(&seed, 1);
+ val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
+ val |= VSC73XX_MAC_CFG_SEED_LOAD;
+ val |= VSC73XX_MAC_CFG_WEXC_DIS;
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
+
+ /* Flow control for the PHY facing ports:
+ * Use a zero delay pause frame when pause condition is left
+ * Obey pause control frames
+ * When generating pause frames, use 0xff as pause value
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_FCCONF,
+ VSC73XX_FCCONF_ZERO_PAUSE_EN |
+ VSC73XX_FCCONF_FLOW_CTRL_OBEY |
+ 0xff);
+
+ /* Disallow backward dropping of frames from this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_SBACKWDROP, BIT(port), 0);
+
+ /* Enable TX, RX, deassert reset, stop loading seed */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET | VSC73XX_MAC_CFG_SEED_LOAD |
+ VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN,
+ VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN);
+}
+
+static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct vsc73xx *vsc = ds->priv;
+ u32 val;
+
+ /* Special handling of the CPU-facing port */
+ if (port == CPU_PORT) {
+ /* Other ports are already initialized but not this one */
+ vsc73xx_init_port(vsc, CPU_PORT);
+ /* Select the external port for this interface (EXT_PORT)
+ * Enable the GMII GTX external clock
+ * Use double data rate (DDR mode)
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ CPU_PORT,
+ VSC73XX_ADVPORTM,
+ VSC73XX_ADVPORTM_EXT_PORT |
+ VSC73XX_ADVPORTM_ENA_GTX |
+ VSC73XX_ADVPORTM_DDR_MODE);
+ }
+
+ /* This is the MAC confiuration that always need to happen
+ * after a PHY or the CPU port comes up or down.
+ */
+ if (!phydev->link) {
+ int maxloop = 10;
+
+ dev_dbg(vsc->dev, "port %d: went down\n",
+ port);
+
+ /* Disable RX on this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RX_EN, 0);
+
+ /* Discard packets */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBDISC, BIT(port), BIT(port));
+
+ /* Wait until queue is empty */
+ vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBEMPTY, &val);
+ while (!(val & BIT(port))) {
+ msleep(1);
+ vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBEMPTY, &val);
+ if (--maxloop == 0) {
+ dev_err(vsc->dev,
+ "timeout waiting for block arbiter\n");
+ /* Continue anyway */
+ break;
+ }
+ }
+
+ /* Put this port into reset */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET);
+
+ /* Accept packets again */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBDISC, BIT(port), 0);
+
+ /* Allow backward dropping of frames from this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_SBACKWDROP, BIT(port), BIT(port));
+
+ /* Receive mask (disable forwarding) */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_RECVMASK, BIT(port), 0);
+
+ return;
+ }
+
+ /* Figure out what speed was negotiated */
+ if (phydev->speed == SPEED_1000) {
+ dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n",
+ port);
+
+ /* Set up default for internal port or external RGMII */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ val = VSC73XX_MAC_CFG_1000M_F_RGMII;
+ else
+ val = VSC73XX_MAC_CFG_1000M_F_PHY;
+ vsc73xx_adjust_enable_port(vsc, port, phydev, val);
+ } else if (phydev->speed == SPEED_100) {
+ if (phydev->duplex == DUPLEX_FULL) {
+ val = VSC73XX_MAC_CFG_100_10M_F_PHY;
+ dev_dbg(vsc->dev,
+ "port %d: 100 Mbit full duplex mode\n",
+ port);
+ } else {
+ val = VSC73XX_MAC_CFG_100_10M_H_PHY;
+ dev_dbg(vsc->dev,
+ "port %d: 100 Mbit half duplex mode\n",
+ port);
+ }
+ vsc73xx_adjust_enable_port(vsc, port, phydev, val);
+ } else if (phydev->speed == SPEED_10) {
+ if (phydev->duplex == DUPLEX_FULL) {
+ val = VSC73XX_MAC_CFG_100_10M_F_PHY;
+ dev_dbg(vsc->dev,
+ "port %d: 10 Mbit full duplex mode\n",
+ port);
+ } else {
+ val = VSC73XX_MAC_CFG_100_10M_H_PHY;
+ dev_dbg(vsc->dev,
+ "port %d: 10 Mbit half duplex mode\n",
+ port);
+ }
+ vsc73xx_adjust_enable_port(vsc, port, phydev, val);
+ } else {
+ dev_err(vsc->dev,
+ "could not adjust link: unknown speed\n");
+ }
+
+ /* Enable port (forwarding) in the receieve mask */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_RECVMASK, BIT(port), BIT(port));
+}
+
+static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ dev_info(vsc->dev, "enable port %d\n", port);
+ vsc73xx_init_port(vsc, port);
+
+ return 0;
+}
+
+static void vsc73xx_port_disable(struct dsa_switch *ds, int port)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ /* Just put the port into reset */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
+}
+
+static const struct vsc73xx_counter *
+vsc73xx_find_counter(struct vsc73xx *vsc,
+ u8 counter,
+ bool tx)
+{
+ const struct vsc73xx_counter *cnts;
+ int num_cnts;
+ int i;
+
+ if (tx) {
+ cnts = vsc73xx_tx_counters;
+ num_cnts = ARRAY_SIZE(vsc73xx_tx_counters);
+ } else {
+ cnts = vsc73xx_rx_counters;
+ num_cnts = ARRAY_SIZE(vsc73xx_rx_counters);
+ }
+
+ for (i = 0; i < num_cnts; i++) {
+ const struct vsc73xx_counter *cnt;
+
+ cnt = &cnts[i];
+ if (cnt->counter == counter)
+ return cnt;
+ }
+
+ return NULL;
+}
+
+static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ const struct vsc73xx_counter *cnt;
+ struct vsc73xx *vsc = ds->priv;
+ u8 indices[6];
+ int i, j;
+ u32 val;
+ int ret;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_C_CFG, &val);
+ if (ret)
+ return;
+
+ indices[0] = (val & 0x1f); /* RX counter 0 */
+ indices[1] = ((val >> 5) & 0x1f); /* RX counter 1 */
+ indices[2] = ((val >> 10) & 0x1f); /* RX counter 2 */
+ indices[3] = ((val >> 16) & 0x1f); /* TX counter 0 */
+ indices[4] = ((val >> 21) & 0x1f); /* TX counter 1 */
+ indices[5] = ((val >> 26) & 0x1f); /* TX counter 2 */
+
+ /* The first counters is the RX octets */
+ j = 0;
+ strncpy(data + j * ETH_GSTRING_LEN,
+ "RxEtherStatsOctets", ETH_GSTRING_LEN);
+ j++;
+
+ /* Each port supports recording 3 RX counters and 3 TX counters,
+ * figure out what counters we use in this set-up and return the
+ * names of them. The hardware default counters will be number of
+ * packets on RX/TX, combined broadcast+multicast packets RX/TX and
+ * total error packets RX/TX.
+ */
+ for (i = 0; i < 3; i++) {
+ cnt = vsc73xx_find_counter(vsc, indices[i], false);
+ if (cnt)
+ strncpy(data + j * ETH_GSTRING_LEN,
+ cnt->name, ETH_GSTRING_LEN);
+ j++;
+ }
+
+ /* TX stats begins with the number of TX octets */
+ strncpy(data + j * ETH_GSTRING_LEN,
+ "TxEtherStatsOctets", ETH_GSTRING_LEN);
+ j++;
+
+ for (i = 3; i < 6; i++) {
+ cnt = vsc73xx_find_counter(vsc, indices[i], true);
+ if (cnt)
+ strncpy(data + j * ETH_GSTRING_LEN,
+ cnt->name, ETH_GSTRING_LEN);
+ j++;
+ }
+}
+
+static int vsc73xx_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ /* We only support SS_STATS */
+ if (sset != ETH_SS_STATS)
+ return 0;
+ /* RX and TX packets, then 3 RX counters, 3 TX counters */
+ return 8;
+}
+
+static void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct vsc73xx *vsc = ds->priv;
+ u8 regs[] = {
+ VSC73XX_RXOCT,
+ VSC73XX_C_RX0,
+ VSC73XX_C_RX1,
+ VSC73XX_C_RX2,
+ VSC73XX_TXOCT,
+ VSC73XX_C_TX0,
+ VSC73XX_C_TX1,
+ VSC73XX_C_TX2,
+ };
+ u32 val;
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port,
+ regs[i], &val);
+ if (ret) {
+ dev_err(vsc->dev, "error reading counter %d\n", i);
+ return;
+ }
+ data[i] = val;
+ }
+}
+
+static int vsc73xx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ return vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAXLEN, new_mtu + ETH_HLEN + ETH_FCS_LEN);
+}
+
+/* According to application not "VSC7398 Jumbo Frames" setting
+ * up the frame size to 9.6 KB does not affect the performance on standard
+ * frames. It is clear from the application note that
+ * "9.6 kilobytes" == 9600 bytes.
+ */
+static int vsc73xx_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return 9600 - ETH_HLEN - ETH_FCS_LEN;
+}
+
+static const struct dsa_switch_ops vsc73xx_ds_ops = {
+ .get_tag_protocol = vsc73xx_get_tag_protocol,
+ .setup = vsc73xx_setup,
+ .phy_read = vsc73xx_phy_read,
+ .phy_write = vsc73xx_phy_write,
+ .adjust_link = vsc73xx_adjust_link,
+ .get_strings = vsc73xx_get_strings,
+ .get_ethtool_stats = vsc73xx_get_ethtool_stats,
+ .get_sset_count = vsc73xx_get_sset_count,
+ .port_enable = vsc73xx_port_enable,
+ .port_disable = vsc73xx_port_disable,
+ .port_change_mtu = vsc73xx_change_mtu,
+ .port_max_mtu = vsc73xx_get_max_mtu,
+};
+
+static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+ u32 val;
+ int ret;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(offset));
+}
+
+static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+ u32 tmp = val ? BIT(offset) : 0;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset), tmp);
+}
+
+static int vsc73xx_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+ u32 tmp = val ? BIT(offset) : 0;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset + 4) | BIT(offset),
+ BIT(offset + 4) | tmp);
+}
+
+static int vsc73xx_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset + 4),
+ 0);
+}
+
+static int vsc73xx_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct vsc73xx *vsc = gpiochip_get_data(chip);
+ u32 val;
+ int ret;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, &val);
+ if (ret)
+ return ret;
+
+ return !(val & BIT(offset + 4));
+}
+
+static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
+{
+ int ret;
+
+ vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
+ vsc->chipid);
+ if (!vsc->gc.label)
+ return -ENOMEM;
+ vsc->gc.ngpio = 4;
+ vsc->gc.owner = THIS_MODULE;
+ vsc->gc.parent = vsc->dev;
+ vsc->gc.base = -1;
+ vsc->gc.get = vsc73xx_gpio_get;
+ vsc->gc.set = vsc73xx_gpio_set;
+ vsc->gc.direction_input = vsc73xx_gpio_direction_input;
+ vsc->gc.direction_output = vsc73xx_gpio_direction_output;
+ vsc->gc.get_direction = vsc73xx_gpio_get_direction;
+ vsc->gc.can_sleep = true;
+ ret = devm_gpiochip_add_data(vsc->dev, &vsc->gc, vsc);
+ if (ret) {
+ dev_err(vsc->dev, "unable to register GPIO chip\n");
+ return ret;
+ }
+ return 0;
+}
+
+int vsc73xx_probe(struct vsc73xx *vsc)
+{
+ struct device *dev = vsc->dev;
+ int ret;
+
+ /* Release reset, if any */
+ vsc->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(vsc->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(vsc->reset);
+ }
+ if (vsc->reset)
+ /* Wait 20ms according to datasheet table 245 */
+ msleep(20);
+
+ ret = vsc73xx_detect(vsc);
+ if (ret == -EAGAIN) {
+ dev_err(vsc->dev,
+ "Chip seems to be out of control. Assert reset and try again.\n");
+ gpiod_set_value_cansleep(vsc->reset, 1);
+ /* Reset pulse should be 20ns minimum, according to datasheet
+ * table 245, so 10us should be fine
+ */
+ usleep_range(10, 100);
+ gpiod_set_value_cansleep(vsc->reset, 0);
+ /* Wait 20ms according to datasheet table 245 */
+ msleep(20);
+ ret = vsc73xx_detect(vsc);
+ }
+ if (ret) {
+ dev_err(dev, "no chip found (%d)\n", ret);
+ return -ENODEV;
+ }
+
+ eth_random_addr(vsc->addr);
+ dev_info(vsc->dev,
+ "MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ vsc->addr[0], vsc->addr[1], vsc->addr[2],
+ vsc->addr[3], vsc->addr[4], vsc->addr[5]);
+
+ /* The VSC7395 switch chips have 5+1 ports which means 5
+ * ordinary ports and a sixth CPU port facing the processor
+ * with an RGMII interface. These ports are numbered 0..4
+ * and 6, so they leave a "hole" in the port map for port 5,
+ * which is invalid.
+ *
+ * The VSC7398 has 8 ports, port 7 is again the CPU port.
+ *
+ * We allocate 8 ports and avoid access to the nonexistant
+ * ports.
+ */
+ vsc->ds = devm_kzalloc(dev, sizeof(*vsc->ds), GFP_KERNEL);
+ if (!vsc->ds)
+ return -ENOMEM;
+
+ vsc->ds->dev = dev;
+ vsc->ds->num_ports = 8;
+ vsc->ds->priv = vsc;
+
+ vsc->ds->ops = &vsc73xx_ds_ops;
+ ret = dsa_register_switch(vsc->ds);
+ if (ret) {
+ dev_err(dev, "unable to register switch (%d)\n", ret);
+ return ret;
+ }
+
+ ret = vsc73xx_gpio_probe(vsc);
+ if (ret) {
+ dsa_unregister_switch(vsc->ds);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(vsc73xx_probe);
+
+void vsc73xx_remove(struct vsc73xx *vsc)
+{
+ dsa_unregister_switch(vsc->ds);
+ gpiod_set_value(vsc->reset, 1);
+}
+EXPORT_SYMBOL(vsc73xx_remove);
+
+void vsc73xx_shutdown(struct vsc73xx *vsc)
+{
+ dsa_switch_shutdown(vsc->ds);
+}
+EXPORT_SYMBOL(vsc73xx_shutdown);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
new file mode 100644
index 000000000..bd4206e8f
--- /dev/null
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/* DSA driver for:
+ * Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
+ *
+ * This driver takes control of the switch chip connected over CPU-attached
+ * address bus and configures it to route packages around when connected to
+ * a CPU port.
+ *
+ * Copyright (C) 2019 Pawel Dembicki <paweldembicki@gmail.com>
+ * Based on vitesse-vsc-spi.c by:
+ * Copyright (C) 2018 Linus Wallej <linus.walleij@linaro.org>
+ * Includes portions of code from the firmware uploader by:
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "vitesse-vsc73xx.h"
+
+#define VSC73XX_CMD_PLATFORM_BLOCK_SHIFT 14
+#define VSC73XX_CMD_PLATFORM_BLOCK_MASK 0x7
+#define VSC73XX_CMD_PLATFORM_SUBBLOCK_SHIFT 10
+#define VSC73XX_CMD_PLATFORM_SUBBLOCK_MASK 0xf
+#define VSC73XX_CMD_PLATFORM_REGISTER_SHIFT 2
+
+/*
+ * struct vsc73xx_platform - VSC73xx Platform state container
+ */
+struct vsc73xx_platform {
+ struct platform_device *pdev;
+ void __iomem *base_addr;
+ struct vsc73xx vsc;
+};
+
+static const struct vsc73xx_ops vsc73xx_platform_ops;
+
+static u32 vsc73xx_make_addr(u8 block, u8 subblock, u8 reg)
+{
+ u32 ret;
+
+ ret = (block & VSC73XX_CMD_PLATFORM_BLOCK_MASK)
+ << VSC73XX_CMD_PLATFORM_BLOCK_SHIFT;
+ ret |= (subblock & VSC73XX_CMD_PLATFORM_SUBBLOCK_MASK)
+ << VSC73XX_CMD_PLATFORM_SUBBLOCK_SHIFT;
+ ret |= reg << VSC73XX_CMD_PLATFORM_REGISTER_SHIFT;
+
+ return ret;
+}
+
+static int vsc73xx_platform_read(struct vsc73xx *vsc, u8 block, u8 subblock,
+ u8 reg, u32 *val)
+{
+ struct vsc73xx_platform *vsc_platform = vsc->priv;
+ u32 offset;
+
+ if (!vsc73xx_is_addr_valid(block, subblock))
+ return -EINVAL;
+
+ offset = vsc73xx_make_addr(block, subblock, reg);
+ /* By default vsc73xx running in big-endian mode.
+ * (See "Register Addressing" section 5.5.3 in the VSC7385 manual.)
+ */
+ *val = ioread32be(vsc_platform->base_addr + offset);
+
+ return 0;
+}
+
+static int vsc73xx_platform_write(struct vsc73xx *vsc, u8 block, u8 subblock,
+ u8 reg, u32 val)
+{
+ struct vsc73xx_platform *vsc_platform = vsc->priv;
+ u32 offset;
+
+ if (!vsc73xx_is_addr_valid(block, subblock))
+ return -EINVAL;
+
+ offset = vsc73xx_make_addr(block, subblock, reg);
+ iowrite32be(val, vsc_platform->base_addr + offset);
+
+ return 0;
+}
+
+static int vsc73xx_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct vsc73xx_platform *vsc_platform;
+ int ret;
+
+ vsc_platform = devm_kzalloc(dev, sizeof(*vsc_platform), GFP_KERNEL);
+ if (!vsc_platform)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, vsc_platform);
+ vsc_platform->pdev = pdev;
+ vsc_platform->vsc.dev = dev;
+ vsc_platform->vsc.priv = vsc_platform;
+ vsc_platform->vsc.ops = &vsc73xx_platform_ops;
+
+ /* obtain I/O memory space */
+ vsc_platform->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vsc_platform->base_addr)) {
+ dev_err(&pdev->dev, "cannot request I/O memory space\n");
+ ret = -ENXIO;
+ return ret;
+ }
+
+ return vsc73xx_probe(&vsc_platform->vsc);
+}
+
+static int vsc73xx_platform_remove(struct platform_device *pdev)
+{
+ struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
+
+ if (!vsc_platform)
+ return 0;
+
+ vsc73xx_remove(&vsc_platform->vsc);
+
+ return 0;
+}
+
+static void vsc73xx_platform_shutdown(struct platform_device *pdev)
+{
+ struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
+
+ if (!vsc_platform)
+ return;
+
+ vsc73xx_shutdown(&vsc_platform->vsc);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct vsc73xx_ops vsc73xx_platform_ops = {
+ .read = vsc73xx_platform_read,
+ .write = vsc73xx_platform_write,
+};
+
+static const struct of_device_id vsc73xx_of_match[] = {
+ {
+ .compatible = "vitesse,vsc7385",
+ },
+ {
+ .compatible = "vitesse,vsc7388",
+ },
+ {
+ .compatible = "vitesse,vsc7395",
+ },
+ {
+ .compatible = "vitesse,vsc7398",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
+
+static struct platform_driver vsc73xx_platform_driver = {
+ .probe = vsc73xx_platform_probe,
+ .remove = vsc73xx_platform_remove,
+ .shutdown = vsc73xx_platform_shutdown,
+ .driver = {
+ .name = "vsc73xx-platform",
+ .of_match_table = vsc73xx_of_match,
+ },
+};
+module_platform_driver(vsc73xx_platform_driver);
+
+MODULE_AUTHOR("Pawel Dembicki <paweldembicki@gmail.com>");
+MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 Platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
new file mode 100644
index 000000000..85b9a0f51
--- /dev/null
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/* DSA driver for:
+ * Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
+ *
+ * This driver takes control of the switch chip over SPI and
+ * configures it to route packages around when connected to a CPU port.
+ *
+ * Copyright (C) 2018 Linus Wallej <linus.walleij@linaro.org>
+ * Includes portions of code from the firmware uploader by:
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+#include "vitesse-vsc73xx.h"
+
+#define VSC73XX_CMD_SPI_MODE_READ 0
+#define VSC73XX_CMD_SPI_MODE_WRITE 1
+#define VSC73XX_CMD_SPI_MODE_SHIFT 4
+#define VSC73XX_CMD_SPI_BLOCK_SHIFT 5
+#define VSC73XX_CMD_SPI_BLOCK_MASK 0x7
+#define VSC73XX_CMD_SPI_SUBBLOCK_MASK 0xf
+
+/*
+ * struct vsc73xx_spi - VSC73xx SPI state container
+ */
+struct vsc73xx_spi {
+ struct spi_device *spi;
+ struct mutex lock; /* Protects SPI traffic */
+ struct vsc73xx vsc;
+};
+
+static const struct vsc73xx_ops vsc73xx_spi_ops;
+
+static u8 vsc73xx_make_addr(u8 mode, u8 block, u8 subblock)
+{
+ u8 ret;
+
+ ret =
+ (block & VSC73XX_CMD_SPI_BLOCK_MASK) << VSC73XX_CMD_SPI_BLOCK_SHIFT;
+ ret |= (mode & 1) << VSC73XX_CMD_SPI_MODE_SHIFT;
+ ret |= subblock & VSC73XX_CMD_SPI_SUBBLOCK_MASK;
+
+ return ret;
+}
+
+static int vsc73xx_spi_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 *val)
+{
+ struct vsc73xx_spi *vsc_spi = vsc->priv;
+ struct spi_transfer t[2];
+ struct spi_message m;
+ u8 cmd[4];
+ u8 buf[4];
+ int ret;
+
+ if (!vsc73xx_is_addr_valid(block, subblock))
+ return -EINVAL;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = sizeof(buf);
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_SPI_MODE_READ, block, subblock);
+ cmd[1] = reg;
+ cmd[2] = 0;
+ cmd[3] = 0;
+
+ mutex_lock(&vsc_spi->lock);
+ ret = spi_sync(vsc_spi->spi, &m);
+ mutex_unlock(&vsc_spi->lock);
+
+ if (ret)
+ return ret;
+
+ *val = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
+
+ return 0;
+}
+
+static int vsc73xx_spi_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 val)
+{
+ struct vsc73xx_spi *vsc_spi = vsc->priv;
+ struct spi_transfer t[2];
+ struct spi_message m;
+ u8 cmd[2];
+ u8 buf[4];
+ int ret;
+
+ if (!vsc73xx_is_addr_valid(block, subblock))
+ return -EINVAL;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = sizeof(buf);
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_SPI_MODE_WRITE, block, subblock);
+ cmd[1] = reg;
+
+ buf[0] = (val >> 24) & 0xff;
+ buf[1] = (val >> 16) & 0xff;
+ buf[2] = (val >> 8) & 0xff;
+ buf[3] = val & 0xff;
+
+ mutex_lock(&vsc_spi->lock);
+ ret = spi_sync(vsc_spi->spi, &m);
+ mutex_unlock(&vsc_spi->lock);
+
+ return ret;
+}
+
+static int vsc73xx_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct vsc73xx_spi *vsc_spi;
+ int ret;
+
+ vsc_spi = devm_kzalloc(dev, sizeof(*vsc_spi), GFP_KERNEL);
+ if (!vsc_spi)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, vsc_spi);
+ vsc_spi->spi = spi_dev_get(spi);
+ vsc_spi->vsc.dev = dev;
+ vsc_spi->vsc.priv = vsc_spi;
+ vsc_spi->vsc.ops = &vsc73xx_spi_ops;
+ mutex_init(&vsc_spi->lock);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(dev, "spi setup failed.\n");
+ return ret;
+ }
+
+ return vsc73xx_probe(&vsc_spi->vsc);
+}
+
+static void vsc73xx_spi_remove(struct spi_device *spi)
+{
+ struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
+
+ if (!vsc_spi)
+ return;
+
+ vsc73xx_remove(&vsc_spi->vsc);
+}
+
+static void vsc73xx_spi_shutdown(struct spi_device *spi)
+{
+ struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
+
+ if (!vsc_spi)
+ return;
+
+ vsc73xx_shutdown(&vsc_spi->vsc);
+
+ spi_set_drvdata(spi, NULL);
+}
+
+static const struct vsc73xx_ops vsc73xx_spi_ops = {
+ .read = vsc73xx_spi_read,
+ .write = vsc73xx_spi_write,
+};
+
+static const struct of_device_id vsc73xx_of_match[] = {
+ {
+ .compatible = "vitesse,vsc7385",
+ },
+ {
+ .compatible = "vitesse,vsc7388",
+ },
+ {
+ .compatible = "vitesse,vsc7395",
+ },
+ {
+ .compatible = "vitesse,vsc7398",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
+
+static const struct spi_device_id vsc73xx_spi_ids[] = {
+ { "vsc7385" },
+ { "vsc7388" },
+ { "vsc7395" },
+ { "vsc7398" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, vsc73xx_spi_ids);
+
+static struct spi_driver vsc73xx_spi_driver = {
+ .probe = vsc73xx_spi_probe,
+ .remove = vsc73xx_spi_remove,
+ .shutdown = vsc73xx_spi_shutdown,
+ .id_table = vsc73xx_spi_ids,
+ .driver = {
+ .name = "vsc73xx-spi",
+ .of_match_table = vsc73xx_of_match,
+ },
+};
+module_spi_driver(vsc73xx_spi_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
new file mode 100644
index 000000000..30b1f0a36
--- /dev/null
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/gpio/driver.h>
+
+/**
+ * struct vsc73xx - VSC73xx state container
+ */
+struct vsc73xx {
+ struct device *dev;
+ struct gpio_desc *reset;
+ struct dsa_switch *ds;
+ struct gpio_chip gc;
+ u16 chipid;
+ u8 addr[ETH_ALEN];
+ const struct vsc73xx_ops *ops;
+ void *priv;
+};
+
+struct vsc73xx_ops {
+ int (*read)(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 *val);
+ int (*write)(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+ u32 val);
+};
+
+int vsc73xx_is_addr_valid(u8 block, u8 subblock);
+int vsc73xx_probe(struct vsc73xx *vsc);
+void vsc73xx_remove(struct vsc73xx *vsc);
+void vsc73xx_shutdown(struct vsc73xx *vsc);
diff --git a/drivers/net/dsa/xrs700x/Kconfig b/drivers/net/dsa/xrs700x/Kconfig
new file mode 100644
index 000000000..d10a4dce1
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_XRS700X
+ tristate
+ depends on NET_DSA
+ select NET_DSA_TAG_XRS700X
+ select REGMAP
+ help
+ This enables support for Arrow SpeedChips XRS7003/7004 gigabit
+ Ethernet switches.
+
+config NET_DSA_XRS700X_I2C
+ tristate "Arrow XRS7000X series switch in I2C mode"
+ depends on NET_DSA && I2C
+ select NET_DSA_XRS700X
+ select REGMAP_I2C
+ help
+ Enable I2C support for Arrow SpeedChips XRS7003/7004 gigabit Ethernet
+ switches.
+
+config NET_DSA_XRS700X_MDIO
+ tristate "Arrow XRS7000X series switch in MDIO mode"
+ depends on NET_DSA
+ select NET_DSA_XRS700X
+ help
+ Enable MDIO support for Arrow SpeedChips XRS7003/7004 gigabit Ethernet
+ switches.
diff --git a/drivers/net/dsa/xrs700x/Makefile b/drivers/net/dsa/xrs700x/Makefile
new file mode 100644
index 000000000..51a3a7d92
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_XRS700X) += xrs700x.o
+obj-$(CONFIG_NET_DSA_XRS700X_I2C) += xrs700x_i2c.o
+obj-$(CONFIG_NET_DSA_XRS700X_MDIO) += xrs700x_mdio.o
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
new file mode 100644
index 000000000..fa622639d
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -0,0 +1,827 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 NovaTech LLC
+ * George McCollister <george.mccollister@gmail.com>
+ */
+
+#include <net/dsa.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/of_device.h>
+#include <linux/netdev_features.h>
+#include <linux/if_hsr.h>
+#include "xrs700x.h"
+#include "xrs700x_reg.h"
+
+#define XRS700X_MIB_INTERVAL msecs_to_jiffies(3000)
+
+#define XRS7000X_SUPPORTED_HSR_FEATURES \
+ (NETIF_F_HW_HSR_TAG_INS | NETIF_F_HW_HSR_TAG_RM | \
+ NETIF_F_HW_HSR_FWD | NETIF_F_HW_HSR_DUP)
+
+#define XRS7003E_ID 0x100
+#define XRS7003F_ID 0x101
+#define XRS7004E_ID 0x200
+#define XRS7004F_ID 0x201
+
+const struct xrs700x_info xrs7003e_info = {XRS7003E_ID, "XRS7003E", 3};
+EXPORT_SYMBOL(xrs7003e_info);
+
+const struct xrs700x_info xrs7003f_info = {XRS7003F_ID, "XRS7003F", 3};
+EXPORT_SYMBOL(xrs7003f_info);
+
+const struct xrs700x_info xrs7004e_info = {XRS7004E_ID, "XRS7004E", 4};
+EXPORT_SYMBOL(xrs7004e_info);
+
+const struct xrs700x_info xrs7004f_info = {XRS7004F_ID, "XRS7004F", 4};
+EXPORT_SYMBOL(xrs7004f_info);
+
+struct xrs700x_regfield {
+ struct reg_field rf;
+ struct regmap_field **rmf;
+};
+
+struct xrs700x_mib {
+ unsigned int offset;
+ const char *name;
+ int stats64_offset;
+};
+
+#define XRS700X_MIB_ETHTOOL_ONLY(o, n) {o, n, -1}
+#define XRS700X_MIB(o, n, m) {o, n, offsetof(struct rtnl_link_stats64, m)}
+
+static const struct xrs700x_mib xrs700x_mibs[] = {
+ XRS700X_MIB(XRS_RX_GOOD_OCTETS_L, "rx_good_octets", rx_bytes),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_BAD_OCTETS_L, "rx_bad_octets"),
+ XRS700X_MIB(XRS_RX_UNICAST_L, "rx_unicast", rx_packets),
+ XRS700X_MIB(XRS_RX_BROADCAST_L, "rx_broadcast", rx_packets),
+ XRS700X_MIB(XRS_RX_MULTICAST_L, "rx_multicast", multicast),
+ XRS700X_MIB(XRS_RX_UNDERSIZE_L, "rx_undersize", rx_length_errors),
+ XRS700X_MIB(XRS_RX_FRAGMENTS_L, "rx_fragments", rx_length_errors),
+ XRS700X_MIB(XRS_RX_OVERSIZE_L, "rx_oversize", rx_length_errors),
+ XRS700X_MIB(XRS_RX_JABBER_L, "rx_jabber", rx_length_errors),
+ XRS700X_MIB(XRS_RX_ERR_L, "rx_err", rx_errors),
+ XRS700X_MIB(XRS_RX_CRC_L, "rx_crc", rx_crc_errors),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_64_L, "rx_64"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_65_127_L, "rx_65_127"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_128_255_L, "rx_128_255"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_256_511_L, "rx_256_511"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_512_1023_L, "rx_512_1023"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_1024_1536_L, "rx_1024_1536"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_HSR_PRP_L, "rx_hsr_prp"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_WRONGLAN_L, "rx_wronglan"),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_DUPLICATE_L, "rx_duplicate"),
+ XRS700X_MIB(XRS_TX_OCTETS_L, "tx_octets", tx_bytes),
+ XRS700X_MIB(XRS_TX_UNICAST_L, "tx_unicast", tx_packets),
+ XRS700X_MIB(XRS_TX_BROADCAST_L, "tx_broadcast", tx_packets),
+ XRS700X_MIB(XRS_TX_MULTICAST_L, "tx_multicast", tx_packets),
+ XRS700X_MIB_ETHTOOL_ONLY(XRS_TX_HSR_PRP_L, "tx_hsr_prp"),
+ XRS700X_MIB(XRS_PRIQ_DROP_L, "priq_drop", tx_dropped),
+ XRS700X_MIB(XRS_EARLY_DROP_L, "early_drop", tx_dropped),
+};
+
+static const u8 eth_hsrsup_addr[ETH_ALEN] = {
+ 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00};
+
+static void xrs700x_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
+ strscpy(data, xrs700x_mibs[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static int xrs700x_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return ARRAY_SIZE(xrs700x_mibs);
+}
+
+static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
+{
+ struct xrs700x_port *p = &priv->ports[port];
+ struct rtnl_link_stats64 stats;
+ unsigned long flags;
+ int i;
+
+ memset(&stats, 0, sizeof(stats));
+
+ mutex_lock(&p->mib_mutex);
+
+ /* Capture counter values */
+ regmap_write(priv->regmap, XRS_CNT_CTRL(port), 1);
+
+ for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
+ unsigned int high = 0, low = 0, reg;
+
+ reg = xrs700x_mibs[i].offset + XRS_PORT_OFFSET * port;
+ regmap_read(priv->regmap, reg, &low);
+ regmap_read(priv->regmap, reg + 2, &high);
+
+ p->mib_data[i] += (high << 16) | low;
+
+ if (xrs700x_mibs[i].stats64_offset >= 0) {
+ u8 *s = (u8 *)&stats + xrs700x_mibs[i].stats64_offset;
+ *(u64 *)s += p->mib_data[i];
+ }
+ }
+
+ /* multicast must be added to rx_packets (which already includes
+ * unicast and broadcast)
+ */
+ stats.rx_packets += stats.multicast;
+
+ flags = u64_stats_update_begin_irqsave(&p->syncp);
+ p->stats64 = stats;
+ u64_stats_update_end_irqrestore(&p->syncp, flags);
+
+ mutex_unlock(&p->mib_mutex);
+}
+
+static void xrs700x_mib_work(struct work_struct *work)
+{
+ struct xrs700x *priv = container_of(work, struct xrs700x,
+ mib_work.work);
+ int i;
+
+ for (i = 0; i < priv->ds->num_ports; i++)
+ xrs700x_read_port_counters(priv, i);
+
+ schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
+}
+
+static void xrs700x_get_ethtool_stats(struct dsa_switch *ds, int port,
+ u64 *data)
+{
+ struct xrs700x *priv = ds->priv;
+ struct xrs700x_port *p = &priv->ports[port];
+
+ xrs700x_read_port_counters(priv, port);
+
+ mutex_lock(&p->mib_mutex);
+ memcpy(data, p->mib_data, sizeof(*data) * ARRAY_SIZE(xrs700x_mibs));
+ mutex_unlock(&p->mib_mutex);
+}
+
+static void xrs700x_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct xrs700x *priv = ds->priv;
+ struct xrs700x_port *p = &priv->ports[port];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ *s = p->stats64;
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+}
+
+static int xrs700x_setup_regmap_range(struct xrs700x *priv)
+{
+ struct xrs700x_regfield regfields[] = {
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 0, 1,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_forward
+ },
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 2, 3,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_management
+ },
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 4, 9,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_sel_speed
+ },
+ {
+ .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 10, 11,
+ priv->ds->num_ports,
+ XRS_PORT_OFFSET),
+ .rmf = &priv->ps_cur_speed
+ }
+ };
+ int i = 0;
+
+ for (; i < ARRAY_SIZE(regfields); i++) {
+ *regfields[i].rmf = devm_regmap_field_alloc(priv->dev,
+ priv->regmap,
+ regfields[i].rf);
+ if (IS_ERR(*regfields[i].rmf))
+ return PTR_ERR(*regfields[i].rmf);
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol xrs700x_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol m)
+{
+ return DSA_TAG_PROTO_XRS700X;
+}
+
+static int xrs700x_reset(struct dsa_switch *ds)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_write(priv->regmap, XRS_GENERAL, XRS_GENERAL_RESET);
+ if (ret)
+ goto error;
+
+ ret = regmap_read_poll_timeout(priv->regmap, XRS_GENERAL,
+ val, !(val & XRS_GENERAL_RESET),
+ 10, 1000);
+error:
+ if (ret) {
+ dev_err_ratelimited(priv->dev, "error resetting switch: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static void xrs700x_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int bpdus = 1;
+ unsigned int val;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ bpdus = 0;
+ fallthrough;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = XRS_PORT_DISABLED;
+ break;
+ case BR_STATE_LEARNING:
+ val = XRS_PORT_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = XRS_PORT_FORWARDING;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ regmap_fields_write(priv->ps_forward, port, val);
+
+ /* Enable/disable inbound policy added by xrs700x_port_add_bpdu_ipf()
+ * which allows BPDU forwarding to the CPU port when the front facing
+ * port is in disabled/learning state.
+ */
+ regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 1, bpdus);
+
+ dev_dbg_ratelimited(priv->dev, "%s - port: %d, state: %u, val: 0x%x\n",
+ __func__, port, state, val);
+}
+
+/* Add an inbound policy filter which matches the BPDU destination MAC
+ * and forwards to the CPU port. Leave the policy disabled, it will be
+ * enabled as needed.
+ */
+static int xrs700x_port_add_bpdu_ipf(struct dsa_switch *ds, int port)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val = 0;
+ int i = 0;
+ int ret;
+
+ /* Compare all 48 bits of the destination MAC address. */
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 48 << 2);
+ if (ret)
+ return ret;
+
+ /* match BPDU destination 01:80:c2:00:00:00 */
+ for (i = 0; i < sizeof(eth_stp_addr); i += 2) {
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 0) + i,
+ eth_stp_addr[i] |
+ (eth_stp_addr[i + 1] << 8));
+ if (ret)
+ return ret;
+ }
+
+ /* Mirror BPDU to CPU port */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ val |= BIT(i);
+ }
+
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 0), val);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 0), 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Add an inbound policy filter which matches the HSR/PRP supervision MAC
+ * range and forwards to the CPU port without discarding duplicates.
+ * This is required to correctly populate the HSR/PRP node_table.
+ * Leave the policy disabled, it will be enabled as needed.
+ */
+static int xrs700x_port_add_hsrsup_ipf(struct dsa_switch *ds, int port,
+ int fwdport)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val = 0;
+ int i = 0;
+ int ret;
+
+ /* Compare 40 bits of the destination MAC address. */
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 40 << 2);
+ if (ret)
+ return ret;
+
+ /* match HSR/PRP supervision destination 01:15:4e:00:01:XX */
+ for (i = 0; i < sizeof(eth_hsrsup_addr); i += 2) {
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 1) + i,
+ eth_hsrsup_addr[i] |
+ (eth_hsrsup_addr[i + 1] << 8));
+ if (ret)
+ return ret;
+ }
+
+ /* Mirror HSR/PRP supervision to CPU port */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ val |= BIT(i);
+ }
+
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 1), val);
+ if (ret)
+ return ret;
+
+ if (fwdport >= 0)
+ val |= BIT(fwdport);
+
+ /* Allow must be set prevent duplicate discard */
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 1), val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int xrs700x_port_setup(struct dsa_switch *ds, int port)
+{
+ bool cpu_port = dsa_is_cpu_port(ds, port);
+ struct xrs700x *priv = ds->priv;
+ unsigned int val = 0;
+ int ret, i;
+
+ xrs700x_port_stp_state_set(ds, port, BR_STATE_DISABLED);
+
+ /* Disable forwarding to non-CPU ports */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (!dsa_is_cpu_port(ds, i))
+ val |= BIT(i);
+ }
+
+ /* 1 = Disable forwarding to the port */
+ ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
+ if (ret)
+ return ret;
+
+ val = cpu_port ? XRS_PORT_MODE_MANAGEMENT : XRS_PORT_MODE_NORMAL;
+ ret = regmap_fields_write(priv->ps_management, port, val);
+ if (ret)
+ return ret;
+
+ if (!cpu_port) {
+ ret = xrs700x_port_add_bpdu_ipf(ds, port);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xrs700x_setup(struct dsa_switch *ds)
+{
+ struct xrs700x *priv = ds->priv;
+ int ret, i;
+
+ ret = xrs700x_reset(ds);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ ret = xrs700x_port_setup(ds, i);
+ if (ret)
+ return ret;
+ }
+
+ schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
+
+ return 0;
+}
+
+static void xrs700x_teardown(struct dsa_switch *ds)
+{
+ struct xrs700x *priv = ds->priv;
+
+ cancel_delayed_work_sync(&priv->mib_work);
+}
+
+static void xrs700x_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ case 0:
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ config->mac_capabilities = MAC_10FD | MAC_100FD;
+ break;
+
+ case 1:
+ case 2:
+ case 3:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ config->mac_capabilities = MAC_10FD | MAC_100FD | MAC_1000FD;
+ break;
+
+ default:
+ dev_err(ds->dev, "Unsupported port: %i\n", port);
+ break;
+ }
+}
+
+static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val;
+
+ switch (speed) {
+ case SPEED_1000:
+ val = XRS_PORT_SPEED_1000;
+ break;
+ case SPEED_100:
+ val = XRS_PORT_SPEED_100;
+ break;
+ case SPEED_10:
+ val = XRS_PORT_SPEED_10;
+ break;
+ default:
+ return;
+ }
+
+ regmap_fields_write(priv->ps_sel_speed, port, val);
+
+ dev_dbg_ratelimited(priv->dev, "%s: port: %d mode: %u speed: %u\n",
+ __func__, port, mode, speed);
+}
+
+static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge, bool join)
+{
+ unsigned int i, cpu_mask = 0, mask = 0;
+ struct xrs700x *priv = ds->priv;
+ int ret;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+
+ cpu_mask |= BIT(i);
+
+ if (dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+
+ mask |= BIT(i);
+ }
+
+ for (i = 0; i < ds->num_ports; i++) {
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+
+ /* 1 = Disable forwarding to the port */
+ ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(i), mask);
+ if (ret)
+ return ret;
+ }
+
+ if (!join) {
+ ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port),
+ cpu_mask);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ return xrs700x_bridge_common(ds, port, bridge, true);
+}
+
+static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ xrs700x_bridge_common(ds, port, bridge, false);
+}
+
+static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
+ struct net_device *hsr)
+{
+ unsigned int val = XRS_HSR_CFG_HSR_PRP;
+ struct dsa_port *partner = NULL, *dp;
+ struct xrs700x *priv = ds->priv;
+ struct net_device *slave;
+ int ret, i, hsr_pair[2];
+ enum hsr_version ver;
+ bool fwd = false;
+
+ ret = hsr_get_version(hsr, &ver);
+ if (ret)
+ return ret;
+
+ /* Only ports 1 and 2 can be HSR/PRP redundant ports. */
+ if (port != 1 && port != 2)
+ return -EOPNOTSUPP;
+
+ if (ver == HSR_V1)
+ val |= XRS_HSR_CFG_HSR;
+ else if (ver == PRP_V1)
+ val |= XRS_HSR_CFG_PRP;
+ else
+ return -EOPNOTSUPP;
+
+ dsa_hsr_foreach_port(dp, ds, hsr) {
+ if (dp->index != port) {
+ partner = dp;
+ break;
+ }
+ }
+
+ /* We can't enable redundancy on the switch until both
+ * redundant ports have signed up.
+ */
+ if (!partner)
+ return 0;
+
+ regmap_fields_write(priv->ps_forward, partner->index,
+ XRS_PORT_DISABLED);
+ regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
+
+ regmap_write(priv->regmap, XRS_HSR_CFG(partner->index),
+ val | XRS_HSR_CFG_LANID_A);
+ regmap_write(priv->regmap, XRS_HSR_CFG(port),
+ val | XRS_HSR_CFG_LANID_B);
+
+ /* Clear bits for both redundant ports (HSR only) and the CPU port to
+ * enable forwarding.
+ */
+ val = GENMASK(ds->num_ports - 1, 0);
+ if (ver == HSR_V1) {
+ val &= ~BIT(partner->index);
+ val &= ~BIT(port);
+ fwd = true;
+ }
+ val &= ~BIT(dsa_upstream_port(ds, port));
+ regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
+ regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
+
+ regmap_fields_write(priv->ps_forward, partner->index,
+ XRS_PORT_FORWARDING);
+ regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
+
+ /* Enable inbound policy which allows HSR/PRP supervision forwarding
+ * to the CPU port without discarding duplicates. Continue to
+ * forward to redundant ports when in HSR mode while discarding
+ * duplicates.
+ */
+ ret = xrs700x_port_add_hsrsup_ipf(ds, partner->index, fwd ? port : -1);
+ if (ret)
+ return ret;
+
+ ret = xrs700x_port_add_hsrsup_ipf(ds, port, fwd ? partner->index : -1);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(priv->regmap,
+ XRS_ETH_ADDR_CFG(partner->index, 1), 1, 1);
+ regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 1);
+
+ hsr_pair[0] = port;
+ hsr_pair[1] = partner->index;
+ for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
+ slave = dsa_to_port(ds, hsr_pair[i])->slave;
+ slave->features |= XRS7000X_SUPPORTED_HSR_FEATURES;
+ }
+
+ return 0;
+}
+
+static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
+ struct net_device *hsr)
+{
+ struct dsa_port *partner = NULL, *dp;
+ struct xrs700x *priv = ds->priv;
+ struct net_device *slave;
+ int i, hsr_pair[2];
+ unsigned int val;
+
+ dsa_hsr_foreach_port(dp, ds, hsr) {
+ if (dp->index != port) {
+ partner = dp;
+ break;
+ }
+ }
+
+ if (!partner)
+ return 0;
+
+ regmap_fields_write(priv->ps_forward, partner->index,
+ XRS_PORT_DISABLED);
+ regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
+
+ regmap_write(priv->regmap, XRS_HSR_CFG(partner->index), 0);
+ regmap_write(priv->regmap, XRS_HSR_CFG(port), 0);
+
+ /* Clear bit for the CPU port to enable forwarding. */
+ val = GENMASK(ds->num_ports - 1, 0);
+ val &= ~BIT(dsa_upstream_port(ds, port));
+ regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
+ regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
+
+ regmap_fields_write(priv->ps_forward, partner->index,
+ XRS_PORT_FORWARDING);
+ regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
+
+ /* Disable inbound policy added by xrs700x_port_add_hsrsup_ipf()
+ * which allows HSR/PRP supervision forwarding to the CPU port without
+ * discarding duplicates.
+ */
+ regmap_update_bits(priv->regmap,
+ XRS_ETH_ADDR_CFG(partner->index, 1), 1, 0);
+ regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 0);
+
+ hsr_pair[0] = port;
+ hsr_pair[1] = partner->index;
+ for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
+ slave = dsa_to_port(ds, hsr_pair[i])->slave;
+ slave->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES;
+ }
+
+ return 0;
+}
+
+static const struct dsa_switch_ops xrs700x_ops = {
+ .get_tag_protocol = xrs700x_get_tag_protocol,
+ .setup = xrs700x_setup,
+ .teardown = xrs700x_teardown,
+ .port_stp_state_set = xrs700x_port_stp_state_set,
+ .phylink_get_caps = xrs700x_phylink_get_caps,
+ .phylink_mac_link_up = xrs700x_mac_link_up,
+ .get_strings = xrs700x_get_strings,
+ .get_sset_count = xrs700x_get_sset_count,
+ .get_ethtool_stats = xrs700x_get_ethtool_stats,
+ .get_stats64 = xrs700x_get_stats64,
+ .port_bridge_join = xrs700x_bridge_join,
+ .port_bridge_leave = xrs700x_bridge_leave,
+ .port_hsr_join = xrs700x_hsr_join,
+ .port_hsr_leave = xrs700x_hsr_leave,
+};
+
+static int xrs700x_detect(struct xrs700x *priv)
+{
+ const struct xrs700x_info *info;
+ unsigned int id;
+ int ret;
+
+ ret = regmap_read(priv->regmap, XRS_DEV_ID0, &id);
+ if (ret) {
+ dev_err(priv->dev, "error %d while reading switch id.\n",
+ ret);
+ return ret;
+ }
+
+ info = of_device_get_match_data(priv->dev);
+ if (!info)
+ return -EINVAL;
+
+ if (info->id == id) {
+ priv->ds->num_ports = info->num_ports;
+ dev_info(priv->dev, "%s detected.\n", info->name);
+ return 0;
+ }
+
+ dev_err(priv->dev, "expected switch id 0x%x but found 0x%x.\n",
+ info->id, id);
+
+ return -ENODEV;
+}
+
+struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv)
+{
+ struct dsa_switch *ds;
+ struct xrs700x *priv;
+
+ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return NULL;
+
+ ds->dev = base;
+
+ priv = devm_kzalloc(base, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work);
+
+ ds->ops = &xrs700x_ops;
+ ds->priv = priv;
+ priv->dev = base;
+
+ priv->ds = ds;
+ priv->priv = devpriv;
+
+ return priv;
+}
+EXPORT_SYMBOL(xrs700x_switch_alloc);
+
+static int xrs700x_alloc_port_mib(struct xrs700x *priv, int port)
+{
+ struct xrs700x_port *p = &priv->ports[port];
+
+ p->mib_data = devm_kcalloc(priv->dev, ARRAY_SIZE(xrs700x_mibs),
+ sizeof(*p->mib_data), GFP_KERNEL);
+ if (!p->mib_data)
+ return -ENOMEM;
+
+ mutex_init(&p->mib_mutex);
+ u64_stats_init(&p->syncp);
+
+ return 0;
+}
+
+int xrs700x_switch_register(struct xrs700x *priv)
+{
+ int ret;
+ int i;
+
+ ret = xrs700x_detect(priv);
+ if (ret)
+ return ret;
+
+ ret = xrs700x_setup_regmap_range(priv);
+ if (ret)
+ return ret;
+
+ priv->ports = devm_kcalloc(priv->dev, priv->ds->num_ports,
+ sizeof(*priv->ports), GFP_KERNEL);
+ if (!priv->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ ret = xrs700x_alloc_port_mib(priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return dsa_register_switch(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_register);
+
+void xrs700x_switch_remove(struct xrs700x *priv)
+{
+ dsa_unregister_switch(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_remove);
+
+void xrs700x_switch_shutdown(struct xrs700x *priv)
+{
+ dsa_switch_shutdown(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_shutdown);
+
+MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
+MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x.h b/drivers/net/dsa/xrs700x/xrs700x.h
new file mode 100644
index 000000000..4d5825747
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/workqueue.h>
+#include <linux/u64_stats_sync.h>
+#include <uapi/linux/if_link.h>
+
+struct xrs700x_info {
+ unsigned int id;
+ const char *name;
+ size_t num_ports;
+};
+
+extern const struct xrs700x_info xrs7003e_info;
+extern const struct xrs700x_info xrs7003f_info;
+extern const struct xrs700x_info xrs7004e_info;
+extern const struct xrs700x_info xrs7004f_info;
+
+struct xrs700x_port {
+ struct mutex mib_mutex; /* protects mib_data */
+ u64 *mib_data;
+ struct rtnl_link_stats64 stats64;
+ struct u64_stats_sync syncp;
+};
+
+struct xrs700x {
+ struct dsa_switch *ds;
+ struct device *dev;
+ void *priv;
+ struct regmap *regmap;
+ struct regmap_field *ps_forward;
+ struct regmap_field *ps_management;
+ struct regmap_field *ps_sel_speed;
+ struct regmap_field *ps_cur_speed;
+ struct delayed_work mib_work;
+ struct xrs700x_port *ports;
+};
+
+struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv);
+int xrs700x_switch_register(struct xrs700x *priv);
+void xrs700x_switch_remove(struct xrs700x *priv);
+void xrs700x_switch_shutdown(struct xrs700x *priv);
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
new file mode 100644
index 000000000..54065cded
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 NovaTech LLC
+ * George McCollister <george.mccollister@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include "xrs700x.h"
+#include "xrs700x_reg.h"
+
+struct xrs700x_i2c_cmd {
+ __be32 reg;
+ __be16 val;
+} __packed;
+
+static int xrs700x_i2c_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct xrs700x_i2c_cmd cmd;
+ int ret;
+
+ cmd.reg = cpu_to_be32(reg | 1);
+
+ ret = i2c_master_send(i2c, (char *)&cmd.reg, sizeof(cmd.reg));
+ if (ret < 0) {
+ dev_err(dev, "xrs i2c_master_send returned %d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_master_recv(i2c, (char *)&cmd.val, sizeof(cmd.val));
+ if (ret < 0) {
+ dev_err(dev, "xrs i2c_master_recv returned %d\n", ret);
+ return ret;
+ }
+
+ *val = be16_to_cpu(cmd.val);
+ return 0;
+}
+
+static int xrs700x_i2c_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct xrs700x_i2c_cmd cmd;
+ int ret;
+
+ cmd.reg = cpu_to_be32(reg);
+ cmd.val = cpu_to_be16(val);
+
+ ret = i2c_master_send(i2c, (char *)&cmd, sizeof(cmd));
+ if (ret < 0) {
+ dev_err(dev, "xrs i2c_master_send returned %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config xrs700x_i2c_regmap_config = {
+ .val_bits = 16,
+ .reg_stride = 2,
+ .reg_bits = 32,
+ .pad_bits = 0,
+ .write_flag_mask = 0,
+ .read_flag_mask = 0,
+ .reg_read = xrs700x_i2c_reg_read,
+ .reg_write = xrs700x_i2c_reg_write,
+ .max_register = 0,
+ .cache_type = REGCACHE_NONE,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG
+};
+
+static int xrs700x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *i2c_id)
+{
+ struct xrs700x *priv;
+ int ret;
+
+ priv = xrs700x_switch_alloc(&i2c->dev, i2c);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init(&i2c->dev, NULL, &i2c->dev,
+ &xrs700x_i2c_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&i2c->dev, "Failed to initialize regmap: %d\n", ret);
+ return ret;
+ }
+
+ i2c_set_clientdata(i2c, priv);
+
+ ret = xrs700x_switch_register(priv);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void xrs700x_i2c_remove(struct i2c_client *i2c)
+{
+ struct xrs700x *priv = i2c_get_clientdata(i2c);
+
+ if (!priv)
+ return;
+
+ xrs700x_switch_remove(priv);
+}
+
+static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
+{
+ struct xrs700x *priv = i2c_get_clientdata(i2c);
+
+ if (!priv)
+ return;
+
+ xrs700x_switch_shutdown(priv);
+
+ i2c_set_clientdata(i2c, NULL);
+}
+
+static const struct i2c_device_id xrs700x_i2c_id[] = {
+ { "xrs700x-switch", 0 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, xrs700x_i2c_id);
+
+static const struct of_device_id __maybe_unused xrs700x_i2c_dt_ids[] = {
+ { .compatible = "arrow,xrs7003e", .data = &xrs7003e_info },
+ { .compatible = "arrow,xrs7003f", .data = &xrs7003f_info },
+ { .compatible = "arrow,xrs7004e", .data = &xrs7004e_info },
+ { .compatible = "arrow,xrs7004f", .data = &xrs7004f_info },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xrs700x_i2c_dt_ids);
+
+static struct i2c_driver xrs700x_i2c_driver = {
+ .driver = {
+ .name = "xrs700x-i2c",
+ .of_match_table = of_match_ptr(xrs700x_i2c_dt_ids),
+ },
+ .probe = xrs700x_i2c_probe,
+ .remove = xrs700x_i2c_remove,
+ .shutdown = xrs700x_i2c_shutdown,
+ .id_table = xrs700x_i2c_id,
+};
+
+module_i2c_driver(xrs700x_i2c_driver);
+
+MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
+MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
new file mode 100644
index 000000000..5f7d344b5
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 NovaTech LLC
+ * George McCollister <george.mccollister@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/of.h>
+#include "xrs700x.h"
+#include "xrs700x_reg.h"
+
+#define XRS_MDIO_IBA0 0x10
+#define XRS_MDIO_IBA1 0x11
+#define XRS_MDIO_IBD 0x14
+
+#define XRS_IB_READ 0x0
+#define XRS_IB_WRITE 0x1
+
+static int xrs700x_mdio_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct mdio_device *mdiodev = context;
+ struct device *dev = &mdiodev->dev;
+ u16 uval;
+ int ret;
+
+ uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
+
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_READ);
+
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ ret = mdiodev_read(mdiodev, XRS_MDIO_IBD);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_read returned %d\n", ret);
+ return ret;
+ }
+
+ *val = (unsigned int)ret;
+
+ return 0;
+}
+
+static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct mdio_device *mdiodev = context;
+ struct device *dev = &mdiodev->dev;
+ u16 uval;
+ int ret;
+
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBD, (u16)val);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
+
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_WRITE);
+
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval);
+ if (ret < 0) {
+ dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config xrs700x_mdio_regmap_config = {
+ .val_bits = 16,
+ .reg_stride = 2,
+ .reg_bits = 32,
+ .pad_bits = 0,
+ .write_flag_mask = 0,
+ .read_flag_mask = 0,
+ .reg_read = xrs700x_mdio_reg_read,
+ .reg_write = xrs700x_mdio_reg_write,
+ .max_register = XRS_VLAN(VLAN_N_VID - 1),
+ .cache_type = REGCACHE_NONE,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG
+};
+
+static int xrs700x_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct xrs700x *priv;
+ int ret;
+
+ priv = xrs700x_switch_alloc(&mdiodev->dev, mdiodev);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, mdiodev,
+ &xrs700x_mdio_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&mdiodev->dev, "Failed to initialize regmap: %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ ret = xrs700x_switch_register(priv);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ xrs700x_switch_remove(priv);
+}
+
+static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ xrs700x_switch_shutdown(priv);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static const struct of_device_id __maybe_unused xrs700x_mdio_dt_ids[] = {
+ { .compatible = "arrow,xrs7003e", .data = &xrs7003e_info },
+ { .compatible = "arrow,xrs7003f", .data = &xrs7003f_info },
+ { .compatible = "arrow,xrs7004e", .data = &xrs7004e_info },
+ { .compatible = "arrow,xrs7004f", .data = &xrs7004f_info },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xrs700x_mdio_dt_ids);
+
+static struct mdio_driver xrs700x_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "xrs700x-mdio",
+ .of_match_table = of_match_ptr(xrs700x_mdio_dt_ids),
+ },
+ .probe = xrs700x_mdio_probe,
+ .remove = xrs700x_mdio_remove,
+ .shutdown = xrs700x_mdio_shutdown,
+};
+
+mdio_module_driver(xrs700x_mdio_driver);
+
+MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
+MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA MDIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x_reg.h b/drivers/net/dsa/xrs700x/xrs700x_reg.h
new file mode 100644
index 000000000..470d00e07
--- /dev/null
+++ b/drivers/net/dsa/xrs700x/xrs700x_reg.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Register Base Addresses */
+#define XRS_DEVICE_ID_BASE 0x0
+#define XRS_GPIO_BASE 0x10000
+#define XRS_PORT_OFFSET 0x10000
+#define XRS_PORT_BASE(x) (0x200000 + XRS_PORT_OFFSET * (x))
+#define XRS_RTC_BASE 0x280000
+#define XRS_TS_OFFSET 0x8000
+#define XRS_TS_BASE(x) (0x290000 + XRS_TS_OFFSET * (x))
+#define XRS_SWITCH_CONF_BASE 0x300000
+
+/* Device Identification Registers */
+#define XRS_DEV_ID0 (XRS_DEVICE_ID_BASE + 0)
+#define XRS_DEV_ID1 (XRS_DEVICE_ID_BASE + 2)
+#define XRS_INT_ID0 (XRS_DEVICE_ID_BASE + 4)
+#define XRS_INT_ID1 (XRS_DEVICE_ID_BASE + 6)
+#define XRS_REV_ID (XRS_DEVICE_ID_BASE + 8)
+
+/* GPIO Registers */
+#define XRS_CONFIG0 (XRS_GPIO_BASE + 0x1000)
+#define XRS_INPUT_STATUS0 (XRS_GPIO_BASE + 0x1002)
+#define XRS_CONFIG1 (XRS_GPIO_BASE + 0x1004)
+#define XRS_INPUT_STATUS1 (XRS_GPIO_BASE + 0x1006)
+#define XRS_CONFIG2 (XRS_GPIO_BASE + 0x1008)
+#define XRS_INPUT_STATUS2 (XRS_GPIO_BASE + 0x100a)
+
+/* Port Configuration Registers */
+#define XRS_PORT_GEN_BASE(x) (XRS_PORT_BASE(x) + 0x0)
+#define XRS_PORT_HSR_BASE(x) (XRS_PORT_BASE(x) + 0x2000)
+#define XRS_PORT_PTP_BASE(x) (XRS_PORT_BASE(x) + 0x4000)
+#define XRS_PORT_CNT_BASE(x) (XRS_PORT_BASE(x) + 0x6000)
+#define XRS_PORT_IPO_BASE(x) (XRS_PORT_BASE(x) + 0x8000)
+
+/* Port Configuration Registers - General and State */
+#define XRS_PORT_STATE(x) (XRS_PORT_GEN_BASE(x) + 0x0)
+#define XRS_PORT_FORWARDING 0
+#define XRS_PORT_LEARNING 1
+#define XRS_PORT_DISABLED 2
+#define XRS_PORT_MODE_NORMAL 0
+#define XRS_PORT_MODE_MANAGEMENT 1
+#define XRS_PORT_SPEED_1000 0x12
+#define XRS_PORT_SPEED_100 0x20
+#define XRS_PORT_SPEED_10 0x30
+#define XRS_PORT_VLAN(x) (XRS_PORT_GEN_BASE(x) + 0x10)
+#define XRS_PORT_VLAN0_MAPPING(x) (XRS_PORT_GEN_BASE(x) + 0x12)
+#define XRS_PORT_FWD_MASK(x) (XRS_PORT_GEN_BASE(x) + 0x14)
+#define XRS_PORT_VLAN_PRIO(x) (XRS_PORT_GEN_BASE(x) + 0x16)
+
+/* Port Configuration Registers - HSR/PRP */
+#define XRS_HSR_CFG(x) (XRS_PORT_HSR_BASE(x) + 0x0)
+#define XRS_HSR_CFG_HSR_PRP BIT(0)
+#define XRS_HSR_CFG_HSR 0
+#define XRS_HSR_CFG_PRP BIT(8)
+#define XRS_HSR_CFG_LANID_A 0
+#define XRS_HSR_CFG_LANID_B BIT(10)
+
+/* Port Configuration Registers - PTP */
+#define XRS_PTP_RX_SYNC_DELAY_NS_LO(x) (XRS_PORT_PTP_BASE(x) + 0x2)
+#define XRS_PTP_RX_SYNC_DELAY_NS_HI(x) (XRS_PORT_PTP_BASE(x) + 0x4)
+#define XRS_PTP_RX_EVENT_DELAY_NS(x) (XRS_PORT_PTP_BASE(x) + 0xa)
+#define XRS_PTP_TX_EVENT_DELAY_NS(x) (XRS_PORT_PTP_BASE(x) + 0x12)
+
+/* Port Configuration Registers - Counter */
+#define XRS_CNT_CTRL(x) (XRS_PORT_CNT_BASE(x) + 0x0)
+#define XRS_RX_GOOD_OCTETS_L (XRS_PORT_CNT_BASE(0) + 0x200)
+#define XRS_RX_GOOD_OCTETS_H (XRS_PORT_CNT_BASE(0) + 0x202)
+#define XRS_RX_BAD_OCTETS_L (XRS_PORT_CNT_BASE(0) + 0x204)
+#define XRS_RX_BAD_OCTETS_H (XRS_PORT_CNT_BASE(0) + 0x206)
+#define XRS_RX_UNICAST_L (XRS_PORT_CNT_BASE(0) + 0x208)
+#define XRS_RX_UNICAST_H (XRS_PORT_CNT_BASE(0) + 0x20a)
+#define XRS_RX_BROADCAST_L (XRS_PORT_CNT_BASE(0) + 0x20c)
+#define XRS_RX_BROADCAST_H (XRS_PORT_CNT_BASE(0) + 0x20e)
+#define XRS_RX_MULTICAST_L (XRS_PORT_CNT_BASE(0) + 0x210)
+#define XRS_RX_MULTICAST_H (XRS_PORT_CNT_BASE(0) + 0x212)
+#define XRS_RX_UNDERSIZE_L (XRS_PORT_CNT_BASE(0) + 0x214)
+#define XRS_RX_UNDERSIZE_H (XRS_PORT_CNT_BASE(0) + 0x216)
+#define XRS_RX_FRAGMENTS_L (XRS_PORT_CNT_BASE(0) + 0x218)
+#define XRS_RX_FRAGMENTS_H (XRS_PORT_CNT_BASE(0) + 0x21a)
+#define XRS_RX_OVERSIZE_L (XRS_PORT_CNT_BASE(0) + 0x21c)
+#define XRS_RX_OVERSIZE_H (XRS_PORT_CNT_BASE(0) + 0x21e)
+#define XRS_RX_JABBER_L (XRS_PORT_CNT_BASE(0) + 0x220)
+#define XRS_RX_JABBER_H (XRS_PORT_CNT_BASE(0) + 0x222)
+#define XRS_RX_ERR_L (XRS_PORT_CNT_BASE(0) + 0x224)
+#define XRS_RX_ERR_H (XRS_PORT_CNT_BASE(0) + 0x226)
+#define XRS_RX_CRC_L (XRS_PORT_CNT_BASE(0) + 0x228)
+#define XRS_RX_CRC_H (XRS_PORT_CNT_BASE(0) + 0x22a)
+#define XRS_RX_64_L (XRS_PORT_CNT_BASE(0) + 0x22c)
+#define XRS_RX_64_H (XRS_PORT_CNT_BASE(0) + 0x22e)
+#define XRS_RX_65_127_L (XRS_PORT_CNT_BASE(0) + 0x230)
+#define XRS_RX_65_127_H (XRS_PORT_CNT_BASE(0) + 0x232)
+#define XRS_RX_128_255_L (XRS_PORT_CNT_BASE(0) + 0x234)
+#define XRS_RX_128_255_H (XRS_PORT_CNT_BASE(0) + 0x236)
+#define XRS_RX_256_511_L (XRS_PORT_CNT_BASE(0) + 0x238)
+#define XRS_RX_256_511_H (XRS_PORT_CNT_BASE(0) + 0x23a)
+#define XRS_RX_512_1023_L (XRS_PORT_CNT_BASE(0) + 0x23c)
+#define XRS_RX_512_1023_H (XRS_PORT_CNT_BASE(0) + 0x23e)
+#define XRS_RX_1024_1536_L (XRS_PORT_CNT_BASE(0) + 0x240)
+#define XRS_RX_1024_1536_H (XRS_PORT_CNT_BASE(0) + 0x242)
+#define XRS_RX_HSR_PRP_L (XRS_PORT_CNT_BASE(0) + 0x244)
+#define XRS_RX_HSR_PRP_H (XRS_PORT_CNT_BASE(0) + 0x246)
+#define XRS_RX_WRONGLAN_L (XRS_PORT_CNT_BASE(0) + 0x248)
+#define XRS_RX_WRONGLAN_H (XRS_PORT_CNT_BASE(0) + 0x24a)
+#define XRS_RX_DUPLICATE_L (XRS_PORT_CNT_BASE(0) + 0x24c)
+#define XRS_RX_DUPLICATE_H (XRS_PORT_CNT_BASE(0) + 0x24e)
+#define XRS_TX_OCTETS_L (XRS_PORT_CNT_BASE(0) + 0x280)
+#define XRS_TX_OCTETS_H (XRS_PORT_CNT_BASE(0) + 0x282)
+#define XRS_TX_UNICAST_L (XRS_PORT_CNT_BASE(0) + 0x284)
+#define XRS_TX_UNICAST_H (XRS_PORT_CNT_BASE(0) + 0x286)
+#define XRS_TX_BROADCAST_L (XRS_PORT_CNT_BASE(0) + 0x288)
+#define XRS_TX_BROADCAST_H (XRS_PORT_CNT_BASE(0) + 0x28a)
+#define XRS_TX_MULTICAST_L (XRS_PORT_CNT_BASE(0) + 0x28c)
+#define XRS_TX_MULTICAST_H (XRS_PORT_CNT_BASE(0) + 0x28e)
+#define XRS_TX_HSR_PRP_L (XRS_PORT_CNT_BASE(0) + 0x290)
+#define XRS_TX_HSR_PRP_H (XRS_PORT_CNT_BASE(0) + 0x292)
+#define XRS_PRIQ_DROP_L (XRS_PORT_CNT_BASE(0) + 0x2c0)
+#define XRS_PRIQ_DROP_H (XRS_PORT_CNT_BASE(0) + 0x2c2)
+#define XRS_EARLY_DROP_L (XRS_PORT_CNT_BASE(0) + 0x2c4)
+#define XRS_EARLY_DROP_H (XRS_PORT_CNT_BASE(0) + 0x2c6)
+
+/* Port Configuration Registers - Inbound Policy 0 - 15 */
+#define XRS_ETH_ADDR_CFG(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x0)
+#define XRS_ETH_ADDR_FWD_ALLOW(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x2)
+#define XRS_ETH_ADDR_FWD_MIRROR(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x4)
+#define XRS_ETH_ADDR_0(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0x8)
+#define XRS_ETH_ADDR_1(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0xa)
+#define XRS_ETH_ADDR_2(x, p) (XRS_PORT_IPO_BASE(x) + \
+ (p) * 0x20 + 0xc)
+
+/* RTC Registers */
+#define XRS_CUR_NSEC0 (XRS_RTC_BASE + 0x1004)
+#define XRS_CUR_NSEC1 (XRS_RTC_BASE + 0x1006)
+#define XRS_CUR_SEC0 (XRS_RTC_BASE + 0x1008)
+#define XRS_CUR_SEC1 (XRS_RTC_BASE + 0x100a)
+#define XRS_CUR_SEC2 (XRS_RTC_BASE + 0x100c)
+#define XRS_TIME_CC0 (XRS_RTC_BASE + 0x1010)
+#define XRS_TIME_CC1 (XRS_RTC_BASE + 0x1012)
+#define XRS_TIME_CC2 (XRS_RTC_BASE + 0x1014)
+#define XRS_STEP_SIZE0 (XRS_RTC_BASE + 0x1020)
+#define XRS_STEP_SIZE1 (XRS_RTC_BASE + 0x1022)
+#define XRS_STEP_SIZE2 (XRS_RTC_BASE + 0x1024)
+#define XRS_ADJUST_NSEC0 (XRS_RTC_BASE + 0x1034)
+#define XRS_ADJUST_NSEC1 (XRS_RTC_BASE + 0x1036)
+#define XRS_ADJUST_SEC0 (XRS_RTC_BASE + 0x1038)
+#define XRS_ADJUST_SEC1 (XRS_RTC_BASE + 0x103a)
+#define XRS_ADJUST_SEC2 (XRS_RTC_BASE + 0x103c)
+#define XRS_TIME_CMD (XRS_RTC_BASE + 0x1040)
+
+/* Time Stamper Registers */
+#define XRS_TS_CTRL(x) (XRS_TS_BASE(x) + 0x1000)
+#define XRS_TS_INT_MASK(x) (XRS_TS_BASE(x) + 0x1008)
+#define XRS_TS_INT_STATUS(x) (XRS_TS_BASE(x) + 0x1010)
+#define XRS_TS_NSEC0(x) (XRS_TS_BASE(x) + 0x1104)
+#define XRS_TS_NSEC1(x) (XRS_TS_BASE(x) + 0x1106)
+#define XRS_TS_SEC0(x) (XRS_TS_BASE(x) + 0x1108)
+#define XRS_TS_SEC1(x) (XRS_TS_BASE(x) + 0x110a)
+#define XRS_TS_SEC2(x) (XRS_TS_BASE(x) + 0x110c)
+#define XRS_PNCT0(x) (XRS_TS_BASE(x) + 0x1110)
+#define XRS_PNCT1(x) (XRS_TS_BASE(x) + 0x1112)
+
+/* Switch Configuration Registers */
+#define XRS_SWITCH_GEN_BASE (XRS_SWITCH_CONF_BASE + 0x0)
+#define XRS_SWITCH_TS_BASE (XRS_SWITCH_CONF_BASE + 0x2000)
+#define XRS_SWITCH_VLAN_BASE (XRS_SWITCH_CONF_BASE + 0x4000)
+
+/* Switch Configuration Registers - General */
+#define XRS_GENERAL (XRS_SWITCH_GEN_BASE + 0x10)
+#define XRS_GENERAL_TIME_TRAILER BIT(9)
+#define XRS_GENERAL_MOD_SYNC BIT(10)
+#define XRS_GENERAL_CUT_THRU BIT(13)
+#define XRS_GENERAL_CLR_MAC_TBL BIT(14)
+#define XRS_GENERAL_RESET BIT(15)
+#define XRS_MT_CLEAR_MASK (XRS_SWITCH_GEN_BASE + 0x12)
+#define XRS_ADDRESS_AGING (XRS_SWITCH_GEN_BASE + 0x20)
+#define XRS_TS_CTRL_TX (XRS_SWITCH_GEN_BASE + 0x28)
+#define XRS_TS_CTRL_RX (XRS_SWITCH_GEN_BASE + 0x2a)
+#define XRS_INT_MASK (XRS_SWITCH_GEN_BASE + 0x2c)
+#define XRS_INT_STATUS (XRS_SWITCH_GEN_BASE + 0x2e)
+#define XRS_MAC_TABLE0 (XRS_SWITCH_GEN_BASE + 0x200)
+#define XRS_MAC_TABLE1 (XRS_SWITCH_GEN_BASE + 0x202)
+#define XRS_MAC_TABLE2 (XRS_SWITCH_GEN_BASE + 0x204)
+#define XRS_MAC_TABLE3 (XRS_SWITCH_GEN_BASE + 0x206)
+
+/* Switch Configuration Registers - Frame Timestamp */
+#define XRS_TX_TS_NS_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x0)
+#define XRS_TX_TS_NS_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x2)
+#define XRS_TX_TS_S_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x4)
+#define XRS_TX_TS_S_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + 0x6)
+#define XRS_TX_TS_HDR(t, h) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x2 * (h) + 0xe)
+#define XRS_RX_TS_NS_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x200)
+#define XRS_RX_TS_NS_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x202)
+#define XRS_RX_TS_S_LO(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x204)
+#define XRS_RX_TS_S_HI(t) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x206)
+#define XRS_RX_TS_HDR(t, h) (XRS_SWITCH_TS_BASE + 0x80 * (t) + \
+ 0x2 * (h) + 0xe)
+
+/* Switch Configuration Registers - VLAN */
+#define XRS_VLAN(v) (XRS_SWITCH_VLAN_BASE + 0x2 * (v))