summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mscc
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/mscc/Kconfig34
-rw-r--r--drivers/net/ethernet/mscc/Makefile13
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1648
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h118
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c773
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c151
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c1087
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c209
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h36
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c351
-rw-r--r--drivers/net/ethernet/mscc/ocelot_qs.h78
-rw-r--r--drivers/net/ethernet/mscc/ocelot_rew.h81
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c1357
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.h316
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c1325
15 files changed, 7577 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
new file mode 100644
index 000000000..ee7bb7e24
--- /dev/null
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: (GPL-2.0 OR MIT)
+config NET_VENDOR_MICROSEMI
+ bool "Microsemi devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Microsemi devices.
+
+if NET_VENDOR_MICROSEMI
+
+# Users should depend on NET_SWITCHDEV, HAS_IOMEM
+config MSCC_OCELOT_SWITCH_LIB
+ select REGMAP_MMIO
+ select PHYLIB
+ tristate
+ help
+ This is a hardware support library for Ocelot network switches. It is
+ used by switchdev as well as by DSA drivers.
+
+config MSCC_OCELOT_SWITCH
+ tristate "Ocelot switch driver"
+ depends on NET_SWITCHDEV
+ depends on HAS_IOMEM
+ depends on OF_NET
+ select MSCC_OCELOT_SWITCH_LIB
+ select GENERIC_PHY
+ help
+ This driver supports the Ocelot network switch device as present on
+ the Ocelot SoCs (VSC7514).
+
+endif # NET_VENDOR_MICROSEMI
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
new file mode 100644
index 000000000..58f94c3d8
--- /dev/null
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: (GPL-2.0 OR MIT)
+obj-$(CONFIG_MSCC_OCELOT_SWITCH_LIB) += mscc_ocelot_switch_lib.o
+mscc_ocelot_switch_lib-y := \
+ ocelot.o \
+ ocelot_io.o \
+ ocelot_police.o \
+ ocelot_vcap.o \
+ ocelot_flower.o \
+ ocelot_ptp.o
+obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o
+mscc_ocelot-y := \
+ ocelot_vsc7514.o \
+ ocelot_net.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
new file mode 100644
index 000000000..a55861ea4
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -0,0 +1,1648 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include <linux/if_bridge.h>
+#include <soc/mscc/ocelot_vcap.h>
+#include "ocelot.h"
+#include "ocelot_vcap.h"
+
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+
+struct ocelot_mact_entry {
+ u8 mac[ETH_ALEN];
+ u16 vid;
+ enum macaccess_entry_type type;
+};
+
+static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_TABLES_MACACCESS);
+}
+
+static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
+{
+ u32 val;
+
+ return readx_poll_timeout(ocelot_mact_read_macaccess,
+ ocelot, val,
+ (val & ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M) ==
+ MACACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void ocelot_mact_select(struct ocelot *ocelot,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid)
+{
+ u32 macl = 0, mach = 0;
+
+ /* Set the MAC address to handle and the vlan associated in a format
+ * understood by the hardware.
+ */
+ mach |= vid << 16;
+ mach |= mac[0] << 8;
+ mach |= mac[1] << 0;
+ macl |= mac[2] << 24;
+ macl |= mac[3] << 16;
+ macl |= mac[4] << 8;
+ macl |= mac[5] << 0;
+
+ ocelot_write(ocelot, macl, ANA_TABLES_MACLDATA);
+ ocelot_write(ocelot, mach, ANA_TABLES_MACHDATA);
+
+}
+
+int ocelot_mact_learn(struct ocelot *ocelot, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
+{
+ u32 cmd = ANA_TABLES_MACACCESS_VALID |
+ ANA_TABLES_MACACCESS_DEST_IDX(port) |
+ ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
+ unsigned int mc_ports;
+
+ /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
+ if (type == ENTRYTYPE_MACv4)
+ mc_ports = (mac[1] << 8) | mac[2];
+ else if (type == ENTRYTYPE_MACv6)
+ mc_ports = (mac[0] << 8) | mac[1];
+ else
+ mc_ports = 0;
+
+ if (mc_ports & BIT(ocelot->num_phys_ports))
+ cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
+
+ ocelot_mact_select(ocelot, mac, vid);
+
+ /* Issue a write command */
+ ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
+
+ return ocelot_mact_wait_for_completion(ocelot);
+}
+EXPORT_SYMBOL(ocelot_mact_learn);
+
+int ocelot_mact_forget(struct ocelot *ocelot,
+ const unsigned char mac[ETH_ALEN], unsigned int vid)
+{
+ ocelot_mact_select(ocelot, mac, vid);
+
+ /* Issue a forget command */
+ ocelot_write(ocelot,
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET),
+ ANA_TABLES_MACACCESS);
+
+ return ocelot_mact_wait_for_completion(ocelot);
+}
+EXPORT_SYMBOL(ocelot_mact_forget);
+
+static void ocelot_mact_init(struct ocelot *ocelot)
+{
+ /* Configure the learning mode entries attributes:
+ * - Do not copy the frame to the CPU extraction queues.
+ * - Use the vlan and mac_cpoy for dmac lookup.
+ */
+ ocelot_rmw(ocelot, 0,
+ ANA_AGENCTRL_LEARN_CPU_COPY | ANA_AGENCTRL_IGNORE_DMAC_FLAGS
+ | ANA_AGENCTRL_LEARN_FWD_KILL
+ | ANA_AGENCTRL_LEARN_IGNORE_VLAN,
+ ANA_AGENCTRL);
+
+ /* Clear the MAC table */
+ ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
+}
+
+static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
+{
+ ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
+ ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa),
+ ANA_PORT_VCAP_S2_CFG, port);
+
+ ocelot_write_gix(ocelot, ANA_PORT_VCAP_CFG_S1_ENA,
+ ANA_PORT_VCAP_CFG, port);
+
+ ocelot_rmw_gix(ocelot, REW_PORT_CFG_ES0_EN,
+ REW_PORT_CFG_ES0_EN,
+ REW_PORT_CFG, port);
+}
+
+static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
+}
+
+static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
+{
+ u32 val;
+
+ return readx_poll_timeout(ocelot_vlant_read_vlanaccess,
+ ocelot,
+ val,
+ (val & ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M) ==
+ ANA_TABLES_VLANACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+}
+
+static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
+{
+ /* Select the VID to configure */
+ ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid),
+ ANA_TABLES_VLANTIDX);
+ /* Set the vlan port members mask and issue a write command */
+ ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) |
+ ANA_TABLES_VLANACCESS_CMD_WRITE,
+ ANA_TABLES_VLANACCESS);
+
+ return ocelot_vlant_wait_for_completion(ocelot);
+}
+
+static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
+ u16 vid)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u32 val = 0;
+
+ if (ocelot_port->vid != vid) {
+ /* Always permit deleting the native VLAN (vid = 0) */
+ if (ocelot_port->vid && vid) {
+ dev_err(ocelot->dev,
+ "Port already has a native VLAN: %d\n",
+ ocelot_port->vid);
+ return -EBUSY;
+ }
+ ocelot_port->vid = vid;
+ }
+
+ ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid),
+ REW_PORT_VLAN_CFG_PORT_VID_M,
+ REW_PORT_VLAN_CFG, port);
+
+ if (ocelot_port->vlan_aware && !ocelot_port->vid)
+ /* If port is vlan-aware and tagged, drop untagged and priority
+ * tagged frames.
+ */
+ val = ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
+ ANA_PORT_DROP_CFG, port);
+
+ if (ocelot_port->vlan_aware) {
+ if (ocelot_port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+ val = REW_TAG_CFG_TAG_CFG(1);
+ else
+ /* Tag all frames */
+ val = REW_TAG_CFG_TAG_CFG(3);
+ } else {
+ /* Port tagging disabled. */
+ val = REW_TAG_CFG_TAG_CFG(0);
+ }
+ ocelot_rmw_gix(ocelot, val,
+ REW_TAG_CFG_TAG_CFG_M,
+ REW_TAG_CFG, port);
+
+ return 0;
+}
+
+int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+ bool vlan_aware, struct switchdev_trans *trans)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u32 val;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
+ struct ocelot_vcap_filter *filter;
+
+ list_for_each_entry(filter, &block->rules, list) {
+ if (filter->ingress_port_mask & BIT(port) &&
+ filter->action.vid_replace_ena) {
+ dev_err(ocelot->dev,
+ "Cannot change VLAN state with vlan modify rules active\n");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+ }
+
+ ocelot_port->vlan_aware = vlan_aware;
+
+ if (vlan_aware)
+ val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
+ else
+ val = 0;
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
+ ANA_PORT_VLAN_CFG, port);
+
+ ocelot_port_set_native_vlan(ocelot, port, ocelot_port->vid);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_port_vlan_filtering);
+
+/* Default vlan to clasify for untagged frames (may be zero) */
+static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, u16 pvid)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
+ ANA_PORT_VLAN_CFG_VLAN_VID_M,
+ ANA_PORT_VLAN_CFG, port);
+
+ ocelot_port->pvid = pvid;
+}
+
+int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ bool untagged)
+{
+ int ret;
+
+ /* Make the port a member of the VLAN */
+ ocelot->vlan_mask[vid] |= BIT(port);
+ ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ if (ret)
+ return ret;
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ ocelot_port_set_pvid(ocelot, port, vid);
+
+ /* Untagged egress vlan clasification */
+ if (untagged) {
+ ret = ocelot_port_set_native_vlan(ocelot, port, vid);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_vlan_add);
+
+int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int ret;
+
+ /* Stop the port from being a member of the vlan */
+ ocelot->vlan_mask[vid] &= ~BIT(port);
+ ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ if (ret)
+ return ret;
+
+ /* Ingress */
+ if (ocelot_port->pvid == vid)
+ ocelot_port_set_pvid(ocelot, port, 0);
+
+ /* Egress */
+ if (ocelot_port->vid == vid)
+ ocelot_port_set_native_vlan(ocelot, port, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_vlan_del);
+
+static void ocelot_vlan_init(struct ocelot *ocelot)
+{
+ u16 port, vid;
+
+ /* Clear VLAN table, by default all ports are members of all VLANs */
+ ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT,
+ ANA_TABLES_VLANACCESS);
+ ocelot_vlant_wait_for_completion(ocelot);
+
+ /* Configure the port VLAN memberships */
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ ocelot->vlan_mask[vid] = 0;
+ ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ }
+
+ /* Because VLAN filtering is enabled, we need VID 0 to get untagged
+ * traffic. It is added automatically if 8021q module is loaded, but
+ * we can't rely on it since module may be not loaded.
+ */
+ ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0);
+ ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]);
+
+ /* Set vlan ingress filter mask to all ports but the CPU port by
+ * default.
+ */
+ ocelot_write(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
+ ANA_VLANMASK);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
+ ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port);
+ }
+}
+
+static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
+{
+ return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
+}
+
+int ocelot_port_flush(struct ocelot *ocelot, int port)
+{
+ unsigned int pause_ena;
+ int err, val;
+
+ /* Disable dequeuing from the egress queues */
+ ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ QSYS_PORT_MODE, port);
+
+ /* Disable flow control */
+ ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+
+ /* Disable priority flow control */
+ ocelot_fields_write(ocelot, port,
+ QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
+
+ /* Wait at least the time it takes to receive a frame of maximum length
+ * at the port.
+ * Worst-case delays for 10 kilobyte jumbo frames are:
+ * 8 ms on a 10M port
+ * 800 μs on a 100M port
+ * 80 μs on a 1G port
+ * 32 μs on a 2.5G port
+ */
+ usleep_range(8000, 10000);
+
+ /* Disable half duplex backpressure. */
+ ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
+ SYS_FRONT_PORT_MODE, port);
+
+ /* Flush the queues associated with the port. */
+ ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
+ REW_PORT_CFG, port);
+
+ /* Enable dequeuing from the egress queues. */
+ ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
+ port);
+
+ /* Wait until flushing is complete. */
+ err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
+ 100, 2000000, false, ocelot, port);
+
+ /* Clear flushing again. */
+ ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
+
+ /* Re-enable flow control */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
+
+ return err;
+}
+EXPORT_SYMBOL(ocelot_port_flush);
+
+void ocelot_adjust_link(struct ocelot *ocelot, int port,
+ struct phy_device *phydev)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int speed, mode = 0;
+
+ switch (phydev->speed) {
+ case SPEED_10:
+ speed = OCELOT_SPEED_10;
+ break;
+ case SPEED_100:
+ speed = OCELOT_SPEED_100;
+ break;
+ case SPEED_1000:
+ speed = OCELOT_SPEED_1000;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
+ break;
+ case SPEED_2500:
+ speed = OCELOT_SPEED_2500;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
+ break;
+ default:
+ dev_err(ocelot->dev, "Unsupported PHY speed on port %d: %d\n",
+ port, phydev->speed);
+ return;
+ }
+
+ phy_print_status(phydev);
+
+ if (!phydev->link)
+ return;
+
+ /* Only full duplex supported for now */
+ ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA |
+ mode, DEV_MAC_MODE_CFG);
+
+ /* Disable HDX fast control */
+ ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
+ DEV_PORT_MISC);
+
+ /* SGMII only for now */
+ ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ PCS1G_MODE_CFG);
+ ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
+
+ /* Enable PCS */
+ ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
+
+ /* No aneg on SGMII */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
+
+ /* No loopback */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
+
+ /* Enable MAC module */
+ ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
+ DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
+
+ /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
+ * reset */
+ ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed),
+ DEV_CLOCK_CFG);
+
+ /* No PFC */
+ ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed),
+ ANA_PFC_PFC_CFG, port);
+
+ /* Core: Enable port for frame transfer */
+ ocelot_fields_write(ocelot, port,
+ QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
+
+ /* Flow control */
+ ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
+ SYS_MAC_FC_CFG_RX_FC_ENA | SYS_MAC_FC_CFG_TX_FC_ENA |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
+ SYS_MAC_FC_CFG_FC_LINK_SPEED(speed),
+ SYS_MAC_FC_CFG, port);
+ ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
+}
+EXPORT_SYMBOL(ocelot_adjust_link);
+
+void ocelot_port_enable(struct ocelot *ocelot, int port,
+ struct phy_device *phy)
+{
+ /* Enable receiving frames on the port, and activate auto-learning of
+ * MAC addresses.
+ */
+ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
+ ANA_PORT_PORT_CFG_RECV_ENA |
+ ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG, port);
+}
+EXPORT_SYMBOL(ocelot_port_enable);
+
+void ocelot_port_disable(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
+ ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
+}
+EXPORT_SYMBOL(ocelot_port_disable);
+
+void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ spin_lock(&ocelot_port->ts_id_lock);
+
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Store timestamp ID in cb[0] of sk_buff */
+ clone->cb[0] = ocelot_port->ts_id;
+ ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
+ skb_queue_tail(&ocelot_port->tx_skbs, clone);
+
+ spin_unlock(&ocelot_port->ts_id_lock);
+}
+EXPORT_SYMBOL(ocelot_port_add_txtstamp_skb);
+
+static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
+ struct timespec64 *ts)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ /* Read current PTP time to get seconds */
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+ ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+
+ /* Read packet HW timestamp from FIFO */
+ val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
+ ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
+
+ /* Sec has incremented since the ts was registered */
+ if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+}
+
+void ocelot_get_txtstamp(struct ocelot *ocelot)
+{
+ int budget = OCELOT_PTP_QUEUE_SZ;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct ocelot_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 val, id, txport;
+
+ val = ocelot_read(ocelot, SYS_PTP_STATUS);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
+ break;
+
+ WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
+
+ /* Retrieve the ts ID and Tx port */
+ id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
+ txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+
+ /* Retrieve its associated skb */
+ port = ocelot->ports[txport];
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (skb->cb[0] != id)
+ continue;
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ /* Get the h/w timestamp */
+ ocelot_get_hwtimestamp(ocelot, &ts);
+
+ /* Set the timestamp into the skb */
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_complete_tx_timestamp(skb_match, &shhwtstamps);
+
+ /* Next ts */
+ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+ }
+}
+EXPORT_SYMBOL(ocelot_get_txtstamp);
+
+int ocelot_fdb_add(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int pgid = port;
+
+ if (port == ocelot->npi)
+ pgid = PGID_CPU;
+
+ if (!vid) {
+ if (!ocelot_port->vlan_aware)
+ /* If the bridge is not VLAN aware and no VID was
+ * provided, set it to pvid to ensure the MAC entry
+ * matches incoming untagged packets
+ */
+ vid = ocelot_port->pvid;
+ else
+ /* If the bridge is VLAN aware a VID must be provided as
+ * otherwise the learnt entry wouldn't match any frame.
+ */
+ return -EINVAL;
+ }
+
+ return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED);
+}
+EXPORT_SYMBOL(ocelot_fdb_add);
+
+int ocelot_fdb_del(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid)
+{
+ return ocelot_mact_forget(ocelot, addr, vid);
+}
+EXPORT_SYMBOL(ocelot_fdb_del);
+
+int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
+ bool is_static, void *data)
+{
+ struct ocelot_dump_ctx *dump = data;
+ u32 portid = NETLINK_CB(dump->cb->skb).portid;
+ u32 seq = dump->cb->nlh->nlmsg_seq;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ if (dump->idx < dump->cb->args[2])
+ goto skip;
+
+ nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+ sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dump->dev->ifindex;
+ ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
+
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
+ goto nla_put_failure;
+
+ if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
+ goto nla_put_failure;
+
+ nlmsg_end(dump->skb, nlh);
+
+skip:
+ dump->idx++;
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(dump->skb, nlh);
+ return -EMSGSIZE;
+}
+EXPORT_SYMBOL(ocelot_port_fdb_do_dump);
+
+static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
+ struct ocelot_mact_entry *entry)
+{
+ u32 val, dst, macl, mach;
+ char mac[ETH_ALEN];
+
+ /* Set row and column to read from */
+ ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row);
+ ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_BUCKET, col);
+
+ /* Issue a read command */
+ ocelot_write(ocelot,
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
+ ANA_TABLES_MACACCESS);
+
+ if (ocelot_mact_wait_for_completion(ocelot))
+ return -ETIMEDOUT;
+
+ /* Read the entry flags */
+ val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
+ if (!(val & ANA_TABLES_MACACCESS_VALID))
+ return -EINVAL;
+
+ /* If the entry read has another port configured as its destination,
+ * do not report it.
+ */
+ dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3;
+ if (dst != port)
+ return -EINVAL;
+
+ /* Get the entry's MAC address and VLAN id */
+ macl = ocelot_read(ocelot, ANA_TABLES_MACLDATA);
+ mach = ocelot_read(ocelot, ANA_TABLES_MACHDATA);
+
+ mac[0] = (mach >> 8) & 0xff;
+ mac[1] = (mach >> 0) & 0xff;
+ mac[2] = (macl >> 24) & 0xff;
+ mac[3] = (macl >> 16) & 0xff;
+ mac[4] = (macl >> 8) & 0xff;
+ mac[5] = (macl >> 0) & 0xff;
+
+ entry->vid = (mach >> 16) & 0xfff;
+ ether_addr_copy(entry->mac, mac);
+
+ return 0;
+}
+
+int ocelot_fdb_dump(struct ocelot *ocelot, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ int i, j;
+
+ /* Loop through all the mac tables entries. */
+ for (i = 0; i < ocelot->num_mact_rows; i++) {
+ for (j = 0; j < 4; j++) {
+ struct ocelot_mact_entry entry;
+ bool is_static;
+ int ret;
+
+ ret = ocelot_mact_read(ocelot, port, i, j, &entry);
+ /* If the entry is invalid (wrong port, invalid...),
+ * skip it.
+ */
+ if (ret == -EINVAL)
+ continue;
+ else if (ret)
+ return ret;
+
+ is_static = (entry.type == ENTRYTYPE_LOCKED);
+
+ ret = cb(entry.mac, entry.vid, is_static, data);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_fdb_dump);
+
+int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
+{
+ return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
+ sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(ocelot_hwstamp_get);
+
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ /* Tx type sanity check */
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_ON:
+ ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
+ * need to update the origin time.
+ */
+ ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ ocelot_port->ptp_cmd = 0;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ mutex_lock(&ocelot->ptp_lock);
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ mutex_unlock(&ocelot->ptp_lock);
+ return -ERANGE;
+ }
+
+ /* Commit back the result & save it */
+ memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
+ mutex_unlock(&ocelot->ptp_lock);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(ocelot_hwstamp_set);
+
+void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
+{
+ int i;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ocelot->num_stats; i++)
+ memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
+ ETH_GSTRING_LEN);
+}
+EXPORT_SYMBOL(ocelot_get_strings);
+
+/* Caller must hold &ocelot->stats_lock */
+static void ocelot_update_stats(struct ocelot *ocelot)
+{
+ int i, j;
+
+ for (i = 0; i < ocelot->num_phys_ports; i++) {
+ /* Configure the port to read the stats from */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
+
+ for (j = 0; j < ocelot->num_stats; j++) {
+ u32 val;
+ unsigned int idx = i * ocelot->num_stats + j;
+
+ val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
+ ocelot->stats_layout[j].offset);
+
+ if (val < (ocelot->stats[idx] & U32_MAX))
+ ocelot->stats[idx] += (u64)1 << 32;
+
+ ocelot->stats[idx] = (ocelot->stats[idx] &
+ ~(u64)U32_MAX) + val;
+ }
+ }
+}
+
+static void ocelot_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct ocelot *ocelot = container_of(del_work, struct ocelot,
+ stats_work);
+
+ mutex_lock(&ocelot->stats_lock);
+ ocelot_update_stats(ocelot);
+ mutex_unlock(&ocelot->stats_lock);
+
+ queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
+ OCELOT_STATS_CHECK_DELAY);
+}
+
+void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
+{
+ int i;
+
+ mutex_lock(&ocelot->stats_lock);
+
+ /* check and update now */
+ ocelot_update_stats(ocelot);
+
+ /* Copy all counters */
+ for (i = 0; i < ocelot->num_stats; i++)
+ *data++ = ocelot->stats[port * ocelot->num_stats + i];
+
+ mutex_unlock(&ocelot->stats_lock);
+}
+EXPORT_SYMBOL(ocelot_get_ethtool_stats);
+
+int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return ocelot->num_stats;
+}
+EXPORT_SYMBOL(ocelot_get_sset_count);
+
+int ocelot_get_ts_info(struct ocelot *ocelot, int port,
+ struct ethtool_ts_info *info)
+{
+ info->phc_index = ocelot->ptp_clock ?
+ ptp_clock_index(ocelot->ptp_clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_get_ts_info);
+
+void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
+{
+ u32 port_cfg;
+ int p, i;
+
+ if (!(BIT(port) & ocelot->bridge_mask))
+ return;
+
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port);
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ ocelot->bridge_fwd_mask |= BIT(port);
+ fallthrough;
+ case BR_STATE_LEARNING:
+ port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
+ break;
+
+ default:
+ port_cfg &= ~ANA_PORT_PORT_CFG_LEARN_ENA;
+ ocelot->bridge_fwd_mask &= ~BIT(port);
+ break;
+ }
+
+ ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, port);
+
+ /* Apply FWD mask. The loop is needed to add/remove the current port as
+ * a source for the other ports.
+ */
+ for (p = 0; p < ocelot->num_phys_ports; p++) {
+ if (ocelot->bridge_fwd_mask & BIT(p)) {
+ unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(p);
+
+ for (i = 0; i < ocelot->num_phys_ports; i++) {
+ unsigned long bond_mask = ocelot->lags[i];
+
+ if (!bond_mask)
+ continue;
+
+ if (bond_mask & BIT(p)) {
+ mask &= ~bond_mask;
+ break;
+ }
+ }
+
+ ocelot_write_rix(ocelot, mask,
+ ANA_PGID_PGID, PGID_SRC + p);
+ } else {
+ ocelot_write_rix(ocelot, 0,
+ ANA_PGID_PGID, PGID_SRC + p);
+ }
+ }
+}
+EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
+
+void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
+{
+ unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000);
+
+ /* Setting AGE_PERIOD to zero effectively disables automatic aging,
+ * which is clearly not what our intention is. So avoid that.
+ */
+ if (!age_period)
+ age_period = 1;
+
+ ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE);
+}
+EXPORT_SYMBOL(ocelot_set_ageing_time);
+
+static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct ocelot_multicast *mc;
+
+ list_for_each_entry(mc, &ocelot->multicast, list) {
+ if (ether_addr_equal(mc->addr, addr) && mc->vid == vid)
+ return mc;
+ }
+
+ return NULL;
+}
+
+static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr)
+{
+ if (addr[0] == 0x01 && addr[1] == 0x00 && addr[2] == 0x5e)
+ return ENTRYTYPE_MACv4;
+ if (addr[0] == 0x33 && addr[1] == 0x33)
+ return ENTRYTYPE_MACv6;
+ return ENTRYTYPE_NORMAL;
+}
+
+static int ocelot_mdb_get_pgid(struct ocelot *ocelot,
+ enum macaccess_entry_type entry_type)
+{
+ int pgid;
+
+ /* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and
+ * 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the
+ * destination mask table (PGID), the destination set is programmed as
+ * part of the entry MAC address.", and the DEST_IDX is set to 0.
+ */
+ if (entry_type == ENTRYTYPE_MACv4 ||
+ entry_type == ENTRYTYPE_MACv6)
+ return 0;
+
+ for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) {
+ struct ocelot_multicast *mc;
+ bool used = false;
+
+ list_for_each_entry(mc, &ocelot->multicast, list) {
+ if (mc->pgid == pgid) {
+ used = true;
+ break;
+ }
+ }
+
+ if (!used)
+ return pgid;
+ }
+
+ return -1;
+}
+
+static void ocelot_encode_ports_to_mdb(unsigned char *addr,
+ struct ocelot_multicast *mc,
+ enum macaccess_entry_type entry_type)
+{
+ memcpy(addr, mc->addr, ETH_ALEN);
+
+ if (entry_type == ENTRYTYPE_MACv4) {
+ addr[0] = 0;
+ addr[1] = mc->ports >> 8;
+ addr[2] = mc->ports & 0xff;
+ } else if (entry_type == ENTRYTYPE_MACv6) {
+ addr[0] = mc->ports >> 8;
+ addr[1] = mc->ports & 0xff;
+ }
+}
+
+int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ enum macaccess_entry_type entry_type;
+ unsigned char addr[ETH_ALEN];
+ struct ocelot_multicast *mc;
+ u16 vid = mdb->vid;
+ bool new = false;
+
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
+ if (!vid)
+ vid = ocelot_port->pvid;
+
+ entry_type = ocelot_classify_mdb(mdb->addr);
+
+ mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
+ if (!mc) {
+ int pgid = ocelot_mdb_get_pgid(ocelot, entry_type);
+
+ if (pgid < 0) {
+ dev_err(ocelot->dev,
+ "No more PGIDs available for mdb %pM vid %d\n",
+ mdb->addr, vid);
+ return -ENOSPC;
+ }
+
+ mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+
+ memcpy(mc->addr, mdb->addr, ETH_ALEN);
+ mc->vid = vid;
+ mc->pgid = pgid;
+
+ list_add_tail(&mc->list, &ocelot->multicast);
+ new = true;
+ }
+
+ if (!new) {
+ ocelot_encode_ports_to_mdb(addr, mc, entry_type);
+ ocelot_mact_forget(ocelot, addr, vid);
+ }
+
+ mc->ports |= BIT(port);
+ ocelot_encode_ports_to_mdb(addr, mc, entry_type);
+
+ return ocelot_mact_learn(ocelot, mc->pgid, addr, vid, entry_type);
+}
+EXPORT_SYMBOL(ocelot_port_mdb_add);
+
+int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ enum macaccess_entry_type entry_type;
+ unsigned char addr[ETH_ALEN];
+ struct ocelot_multicast *mc;
+ u16 vid = mdb->vid;
+
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
+ if (!vid)
+ vid = ocelot_port->pvid;
+
+ mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
+ if (!mc)
+ return -ENOENT;
+
+ entry_type = ocelot_classify_mdb(mdb->addr);
+
+ ocelot_encode_ports_to_mdb(addr, mc, entry_type);
+ ocelot_mact_forget(ocelot, addr, vid);
+
+ mc->ports &= ~BIT(port);
+ if (!mc->ports) {
+ list_del(&mc->list);
+ devm_kfree(ocelot->dev, mc);
+ return 0;
+ }
+
+ ocelot_encode_ports_to_mdb(addr, mc, entry_type);
+
+ return ocelot_mact_learn(ocelot, mc->pgid, addr, vid, entry_type);
+}
+EXPORT_SYMBOL(ocelot_port_mdb_del);
+
+int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
+{
+ if (!ocelot->bridge_mask) {
+ ocelot->hw_bridge_dev = bridge;
+ } else {
+ if (ocelot->hw_bridge_dev != bridge)
+ /* This is adding the port to a second bridge, this is
+ * unsupported */
+ return -ENODEV;
+ }
+
+ ocelot->bridge_mask |= BIT(port);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_port_bridge_join);
+
+int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
+{
+ struct switchdev_trans trans;
+ int ret;
+
+ ocelot->bridge_mask &= ~BIT(port);
+
+ if (!ocelot->bridge_mask)
+ ocelot->hw_bridge_dev = NULL;
+
+ trans.ph_prepare = true;
+ ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
+ if (ret)
+ return ret;
+
+ trans.ph_prepare = false;
+ ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
+ if (ret)
+ return ret;
+
+ ocelot_port_set_pvid(ocelot, port, 0);
+ return ocelot_port_set_native_vlan(ocelot, port, 0);
+}
+EXPORT_SYMBOL(ocelot_port_bridge_leave);
+
+static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
+{
+ int i, port, lag;
+
+ /* Reset destination and aggregation PGIDS */
+ for_each_unicast_dest_pgid(ocelot, port)
+ ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
+
+ for_each_aggr_pgid(ocelot, i)
+ ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
+ ANA_PGID_PGID, i);
+
+ /* Now, set PGIDs for each LAG */
+ for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
+ unsigned long bond_mask;
+ int aggr_count = 0;
+ u8 aggr_idx[16];
+
+ bond_mask = ocelot->lags[lag];
+ if (!bond_mask)
+ continue;
+
+ for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
+ // Destination mask
+ ocelot_write_rix(ocelot, bond_mask,
+ ANA_PGID_PGID, port);
+ aggr_idx[aggr_count] = port;
+ aggr_count++;
+ }
+
+ for_each_aggr_pgid(ocelot, i) {
+ u32 ac;
+
+ ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
+ ac &= ~bond_mask;
+ ac |= BIT(aggr_idx[i % aggr_count]);
+ ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i);
+ }
+ }
+}
+
+static void ocelot_setup_lag(struct ocelot *ocelot, int lag)
+{
+ unsigned long bond_mask = ocelot->lags[lag];
+ unsigned int p;
+
+ for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) {
+ u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+
+ port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+
+ /* Use lag port as logical port for port i */
+ ocelot_write_gix(ocelot, port_cfg |
+ ANA_PORT_PORT_CFG_PORTID_VAL(lag),
+ ANA_PORT_PORT_CFG, p);
+ }
+}
+
+int ocelot_port_lag_join(struct ocelot *ocelot, int port,
+ struct net_device *bond)
+{
+ struct net_device *ndev;
+ u32 bond_mask = 0;
+ int lag, lp;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(bond, ndev) {
+ struct ocelot_port_private *priv = netdev_priv(ndev);
+
+ bond_mask |= BIT(priv->chip_port);
+ }
+ rcu_read_unlock();
+
+ lp = __ffs(bond_mask);
+
+ /* If the new port is the lowest one, use it as the logical port from
+ * now on
+ */
+ if (port == lp) {
+ lag = port;
+ ocelot->lags[port] = bond_mask;
+ bond_mask &= ~BIT(port);
+ if (bond_mask) {
+ lp = __ffs(bond_mask);
+ ocelot->lags[lp] = 0;
+ }
+ } else {
+ lag = lp;
+ ocelot->lags[lp] |= BIT(port);
+ }
+
+ ocelot_setup_lag(ocelot, lag);
+ ocelot_set_aggr_pgids(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_port_lag_join);
+
+void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
+ struct net_device *bond)
+{
+ u32 port_cfg;
+ int i;
+
+ /* Remove port from any lag */
+ for (i = 0; i < ocelot->num_phys_ports; i++)
+ ocelot->lags[i] &= ~BIT(port);
+
+ /* if it was the logical port of the lag, move the lag config to the
+ * next port
+ */
+ if (ocelot->lags[port]) {
+ int n = __ffs(ocelot->lags[port]);
+
+ ocelot->lags[n] = ocelot->lags[port];
+ ocelot->lags[port] = 0;
+
+ ocelot_setup_lag(ocelot, n);
+ }
+
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port);
+ port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+ ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG, port);
+
+ ocelot_set_aggr_pgids(ocelot);
+}
+EXPORT_SYMBOL(ocelot_port_lag_leave);
+
+/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
+ * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
+ * In the special case that it's the NPI port that we're configuring, the
+ * length of the tag and optional prefix needs to be accounted for privately,
+ * in order to be able to sustain communication at the requested @sdu.
+ */
+void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
+ int pause_start, pause_stop;
+ int atop, atop_tot;
+
+ if (port == ocelot->npi) {
+ maxlen += OCELOT_TAG_LEN;
+
+ if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT)
+ maxlen += OCELOT_SHORT_PREFIX_LEN;
+ else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG)
+ maxlen += OCELOT_LONG_PREFIX_LEN;
+ }
+
+ ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
+
+ /* Set Pause watermark hysteresis */
+ pause_start = 6 * maxlen / OCELOT_BUFFER_CELL_SZ;
+ pause_stop = 4 * maxlen / OCELOT_BUFFER_CELL_SZ;
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_START,
+ pause_start);
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP,
+ pause_stop);
+
+ /* Tail dropping watermarks */
+ atop_tot = (ocelot->shared_queue_sz - 9 * maxlen) /
+ OCELOT_BUFFER_CELL_SZ;
+ atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ;
+ ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port);
+ ocelot_write(ocelot, ocelot->ops->wm_enc(atop_tot), SYS_ATOP_TOT_CFG);
+}
+EXPORT_SYMBOL(ocelot_port_set_maxlen);
+
+int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
+{
+ int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN;
+
+ if (port == ocelot->npi) {
+ max_mtu -= OCELOT_TAG_LEN;
+
+ if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT)
+ max_mtu -= OCELOT_SHORT_PREFIX_LEN;
+ else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG)
+ max_mtu -= OCELOT_LONG_PREFIX_LEN;
+ }
+
+ return max_mtu;
+}
+EXPORT_SYMBOL(ocelot_get_max_mtu);
+
+void ocelot_init_port(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ skb_queue_head_init(&ocelot_port->tx_skbs);
+ spin_lock_init(&ocelot_port->ts_id_lock);
+
+ /* Basic L2 initialization */
+
+ /* Set MAC IFG Gaps
+ * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
+ * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
+ */
+ ocelot_port_writel(ocelot_port, DEV_MAC_IFG_CFG_TX_IFG(5),
+ DEV_MAC_IFG_CFG);
+
+ /* Load seed (0) and set MAC HDX late collision */
+ ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
+ DEV_MAC_HDX_CFG_SEED_LOAD,
+ DEV_MAC_HDX_CFG);
+ mdelay(1);
+ ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
+ DEV_MAC_HDX_CFG);
+
+ /* Set Max Length and maximum tags allowed */
+ ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN);
+ ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
+ DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA |
+ DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
+ DEV_MAC_TAGS_CFG);
+
+ /* Set SMAC of Pause frame (00:00:00:00:00:00) */
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
+ ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG);
+
+ /* Enable transmission of pause frames */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
+
+ /* Drop frames with multicast source address */
+ ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
+ ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
+ ANA_PORT_DROP_CFG, port);
+
+ /* Set default VLAN and tag type to 8021Q. */
+ ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q),
+ REW_PORT_VLAN_CFG_PORT_TPID_M,
+ REW_PORT_VLAN_CFG, port);
+
+ /* Enable vcap lookups */
+ ocelot_vcap_enable(ocelot, port);
+}
+EXPORT_SYMBOL(ocelot_init_port);
+
+/* Configure and enable the CPU port module, which is a set of queues
+ * accessible through register MMIO, frame DMA or Ethernet (in case
+ * NPI mode is used).
+ */
+static void ocelot_cpu_port_init(struct ocelot *ocelot)
+{
+ int cpu = ocelot->num_phys_ports;
+
+ /* The unicast destination PGID for the CPU port module is unused */
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
+ /* Instead set up a multicast destination PGID for traffic copied to
+ * the CPU. Whitelisted MAC addresses like the port netdevice MAC
+ * addresses will be copied to the CPU via this PGID.
+ */
+ ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
+ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
+ ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
+ ANA_PORT_PORT_CFG, cpu);
+
+ /* Enable CPU port module */
+ ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
+ /* CPU port Injection/Extraction configuration */
+ ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR,
+ ocelot->xtr_prefix);
+ ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR,
+ ocelot->inj_prefix);
+
+ /* Configure the CPU port to be VLAN aware */
+ ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
+ ANA_PORT_VLAN_CFG, cpu);
+}
+
+int ocelot_init(struct ocelot *ocelot)
+{
+ char queue_name[32];
+ int i, ret;
+ u32 port;
+
+ if (ocelot->ops->reset) {
+ ret = ocelot->ops->reset(ocelot);
+ if (ret) {
+ dev_err(ocelot->dev, "Switch reset failed\n");
+ return ret;
+ }
+ }
+
+ ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
+ sizeof(u32), GFP_KERNEL);
+ if (!ocelot->lags)
+ return -ENOMEM;
+
+ ocelot->stats = devm_kcalloc(ocelot->dev,
+ ocelot->num_phys_ports * ocelot->num_stats,
+ sizeof(u64), GFP_KERNEL);
+ if (!ocelot->stats)
+ return -ENOMEM;
+
+ mutex_init(&ocelot->stats_lock);
+ mutex_init(&ocelot->ptp_lock);
+ spin_lock_init(&ocelot->ptp_clock_lock);
+ snprintf(queue_name, sizeof(queue_name), "%s-stats",
+ dev_name(ocelot->dev));
+ ocelot->stats_queue = create_singlethread_workqueue(queue_name);
+ if (!ocelot->stats_queue)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ocelot->multicast);
+ ocelot_mact_init(ocelot);
+ ocelot_vlan_init(ocelot);
+ ocelot_vcap_init(ocelot);
+ ocelot_cpu_port_init(ocelot);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ /* Clear all counters (5 groups) */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) |
+ SYS_STAT_CFG_STAT_CLEAR_SHOT(0x7f),
+ SYS_STAT_CFG);
+ }
+
+ /* Only use S-Tag */
+ ocelot_write(ocelot, ETH_P_8021AD, SYS_VLAN_ETYPE_CFG);
+
+ /* Aggregation mode */
+ ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA |
+ ANA_AGGR_CFG_AC_DMAC_ENA |
+ ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA |
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, ANA_AGGR_CFG);
+
+ /* Set MAC age time to default value. The entry is aged after
+ * 2*AGE_PERIOD
+ */
+ ocelot_write(ocelot,
+ ANA_AUTOAGE_AGE_PERIOD(BR_DEFAULT_AGEING_TIME / 2 / HZ),
+ ANA_AUTOAGE);
+
+ /* Disable learning for frames discarded by VLAN ingress filtering */
+ regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1);
+
+ /* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */
+ ocelot_write(ocelot, SYS_FRM_AGING_AGE_TX_ENA |
+ SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
+
+ /* Setup flooding PGIDs */
+ for (i = 0; i < ocelot->num_flooding_pgids; i++)
+ ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
+ ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST(PGID_UC),
+ ANA_FLOODING, i);
+ ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
+ ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
+ ANA_FLOODING_IPMC_FLD_MC4_CTRL(PGID_MC),
+ ANA_FLOODING_IPMC);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ /* Transmit the frame to the local port. */
+ ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
+ /* Do not forward BPDU frames to the front ports. */
+ ocelot_write_gix(ocelot,
+ ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
+ ANA_PORT_CPU_FWD_BPDU_CFG,
+ port);
+ /* Ensure bridging is disabled */
+ ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
+ }
+
+ /* Allow broadcast MAC frames. */
+ for_each_nonreserved_multicast_dest_pgid(ocelot, i) {
+ u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
+
+ ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
+ }
+ ocelot_write_rix(ocelot,
+ ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
+ ANA_PGID_PGID, PGID_MC);
+ ocelot_write_rix(ocelot,
+ ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
+ ANA_PGID_PGID, PGID_MCIPV4);
+ ocelot_write_rix(ocelot,
+ ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
+ ANA_PGID_PGID, PGID_MCIPV6);
+
+ /* Allow manual injection via DEVCPU_QS registers, and byte swap these
+ * registers endianness.
+ */
+ ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_BYTE_SWAP |
+ QS_INJ_GRP_CFG_MODE(1), QS_INJ_GRP_CFG, 0);
+ ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_BYTE_SWAP |
+ QS_XTR_GRP_CFG_MODE(1), QS_XTR_GRP_CFG, 0);
+ ocelot_write(ocelot, ANA_CPUQ_CFG_CPUQ_MIRROR(2) |
+ ANA_CPUQ_CFG_CPUQ_LRN(2) |
+ ANA_CPUQ_CFG_CPUQ_MAC_COPY(2) |
+ ANA_CPUQ_CFG_CPUQ_SRC_COPY(2) |
+ ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(2) |
+ ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(6) |
+ ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(6) |
+ ANA_CPUQ_CFG_CPUQ_IGMP(6) |
+ ANA_CPUQ_CFG_CPUQ_MLD(6), ANA_CPUQ_CFG);
+ for (i = 0; i < 16; i++)
+ ocelot_write_rix(ocelot, ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(6) |
+ ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
+ ANA_CPUQ_8021_CFG, i);
+
+ INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
+ queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
+ OCELOT_STATS_CHECK_DELAY);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_init);
+
+void ocelot_deinit(struct ocelot *ocelot)
+{
+ cancel_delayed_work(&ocelot->stats_work);
+ destroy_workqueue(ocelot->stats_queue);
+ mutex_destroy(&ocelot->stats_lock);
+}
+EXPORT_SYMBOL(ocelot_deinit);
+
+void ocelot_deinit_port(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ skb_queue_purge(&ocelot_port->tx_skbs);
+}
+EXPORT_SYMBOL(ocelot_deinit_port);
+
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
new file mode 100644
index 000000000..abb407dff
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_H_
+#define _MSCC_OCELOT_H_
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <soc/mscc/ocelot_qsys.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot_dev.h>
+#include <soc/mscc/ocelot_ana.h>
+#include <soc/mscc/ocelot_ptp.h>
+#include <soc/mscc/ocelot.h>
+#include "ocelot_rew.h"
+#include "ocelot_qs.h"
+
+#define OCELOT_BUFFER_CELL_SZ 60
+
+#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
+
+#define OCELOT_PTP_QUEUE_SZ 128
+
+struct frame_info {
+ u32 len;
+ u16 port;
+ u16 vid;
+ u8 tag_type;
+ u16 rew_op;
+ u32 timestamp; /* rew_val */
+};
+
+struct ocelot_multicast {
+ struct list_head list;
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+ u16 ports;
+ int pgid;
+};
+
+struct ocelot_port_tc {
+ bool block_shared;
+ unsigned long offload_cnt;
+
+ unsigned long police_id;
+};
+
+struct ocelot_port_private {
+ struct ocelot_port port;
+ struct net_device *dev;
+ struct phy_device *phy;
+ u8 chip_port;
+
+ struct phy *serdes;
+
+ struct ocelot_port_tc tc;
+};
+
+struct ocelot_dump_ctx {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx;
+};
+
+/* MAC table entry types.
+ * ENTRYTYPE_NORMAL is subject to aging.
+ * ENTRYTYPE_LOCKED is not subject to aging.
+ * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
+ * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
+ */
+enum macaccess_entry_type {
+ ENTRYTYPE_NORMAL = 0,
+ ENTRYTYPE_LOCKED,
+ ENTRYTYPE_MACv4,
+ ENTRYTYPE_MACv6,
+};
+
+int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
+ bool is_static, void *data);
+int ocelot_mact_learn(struct ocelot *ocelot, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type);
+int ocelot_mact_forget(struct ocelot *ocelot,
+ const unsigned char mac[ETH_ALEN], unsigned int vid);
+int ocelot_port_lag_join(struct ocelot *ocelot, int port,
+ struct net_device *bond);
+void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
+ struct net_device *bond);
+struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port);
+int ocelot_netdev_to_port(struct net_device *dev);
+
+u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
+void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
+
+int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
+ struct phy_device *phy);
+
+void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction);
+
+extern struct notifier_block ocelot_netdevice_nb;
+extern struct notifier_block ocelot_switchdev_nb;
+extern struct notifier_block ocelot_switchdev_blocking_nb;
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
new file mode 100644
index 000000000..e56e540ce
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -0,0 +1,773 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <soc/mscc/ocelot_vcap.h>
+#include "ocelot_vcap.h"
+
+/* Arbitrarily chosen constants for encoding the VCAP block and lookup number
+ * into the chain number. This is UAPI.
+ */
+#define VCAP_BLOCK 10000
+#define VCAP_LOOKUP 1000
+#define VCAP_IS1_NUM_LOOKUPS 3
+#define VCAP_IS2_NUM_LOOKUPS 2
+#define VCAP_IS2_NUM_PAG 256
+#define VCAP_IS1_CHAIN(lookup) \
+ (1 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP)
+#define VCAP_IS2_CHAIN(lookup, pag) \
+ (2 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP + (pag))
+
+static int ocelot_chain_to_block(int chain, bool ingress)
+{
+ int lookup, pag;
+
+ if (!ingress) {
+ if (chain == 0)
+ return VCAP_ES0;
+ return -EOPNOTSUPP;
+ }
+
+ /* Backwards compatibility with older, single-chain tc-flower
+ * offload support in Ocelot
+ */
+ if (chain == 0)
+ return VCAP_IS2;
+
+ for (lookup = 0; lookup < VCAP_IS1_NUM_LOOKUPS; lookup++)
+ if (chain == VCAP_IS1_CHAIN(lookup))
+ return VCAP_IS1;
+
+ for (lookup = 0; lookup < VCAP_IS2_NUM_LOOKUPS; lookup++)
+ for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
+ if (chain == VCAP_IS2_CHAIN(lookup, pag))
+ return VCAP_IS2;
+
+ return -EOPNOTSUPP;
+}
+
+/* Caller must ensure this is a valid IS1 or IS2 chain first,
+ * by calling ocelot_chain_to_block.
+ */
+static int ocelot_chain_to_lookup(int chain)
+{
+ /* Backwards compatibility with older, single-chain tc-flower
+ * offload support in Ocelot
+ */
+ if (chain == 0)
+ return 0;
+
+ return (chain / VCAP_LOOKUP) % 10;
+}
+
+/* Caller must ensure this is a valid IS2 chain first,
+ * by calling ocelot_chain_to_block.
+ */
+static int ocelot_chain_to_pag(int chain)
+{
+ int lookup;
+
+ /* Backwards compatibility with older, single-chain tc-flower
+ * offload support in Ocelot
+ */
+ if (chain == 0)
+ return 0;
+
+ lookup = ocelot_chain_to_lookup(chain);
+
+ /* calculate PAG value as chain index relative to the first PAG */
+ return chain - VCAP_IS2_CHAIN(lookup, 0);
+}
+
+static bool ocelot_is_goto_target_valid(int goto_target, int chain,
+ bool ingress)
+{
+ int pag;
+
+ /* Can't offload GOTO in VCAP ES0 */
+ if (!ingress)
+ return (goto_target < 0);
+
+ /* Non-optional GOTOs */
+ if (chain == 0)
+ /* VCAP IS1 can be skipped, either partially or completely */
+ return (goto_target == VCAP_IS1_CHAIN(0) ||
+ goto_target == VCAP_IS1_CHAIN(1) ||
+ goto_target == VCAP_IS1_CHAIN(2) ||
+ goto_target == VCAP_IS2_CHAIN(0, 0) ||
+ goto_target == VCAP_IS2_CHAIN(1, 0));
+
+ if (chain == VCAP_IS1_CHAIN(0))
+ return (goto_target == VCAP_IS1_CHAIN(1));
+
+ if (chain == VCAP_IS1_CHAIN(1))
+ return (goto_target == VCAP_IS1_CHAIN(2));
+
+ /* Lookup 2 of VCAP IS1 can really support non-optional GOTOs,
+ * using a Policy Association Group (PAG) value, which is an 8-bit
+ * value encoding a VCAP IS2 target chain.
+ */
+ if (chain == VCAP_IS1_CHAIN(2)) {
+ for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
+ if (goto_target == VCAP_IS2_CHAIN(0, pag))
+ return true;
+
+ return false;
+ }
+
+ /* Non-optional GOTO from VCAP IS2 lookup 0 to lookup 1.
+ * We cannot change the PAG at this point.
+ */
+ for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
+ if (chain == VCAP_IS2_CHAIN(0, pag))
+ return (goto_target == VCAP_IS2_CHAIN(1, pag));
+
+ /* VCAP IS2 lookup 1 cannot jump anywhere */
+ return false;
+}
+
+static struct ocelot_vcap_filter *
+ocelot_find_vcap_filter_that_points_at(struct ocelot *ocelot, int chain)
+{
+ struct ocelot_vcap_filter *filter;
+ struct ocelot_vcap_block *block;
+ int block_id;
+
+ block_id = ocelot_chain_to_block(chain, true);
+ if (block_id < 0)
+ return NULL;
+
+ if (block_id == VCAP_IS2) {
+ block = &ocelot->block[VCAP_IS1];
+
+ list_for_each_entry(filter, &block->rules, list)
+ if (filter->type == OCELOT_VCAP_FILTER_PAG &&
+ filter->goto_target == chain)
+ return filter;
+ }
+
+ list_for_each_entry(filter, &ocelot->dummy_rules, list)
+ if (filter->goto_target == chain)
+ return filter;
+
+ return NULL;
+}
+
+static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
+ bool ingress, struct flow_cls_offload *f,
+ struct ocelot_vcap_filter *filter)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct netlink_ext_ack *extack = f->common.extack;
+ bool allow_missing_goto_target = false;
+ const struct flow_action_entry *a;
+ enum ocelot_tag_tpid_sel tpid;
+ int i, chain, egress_port;
+ u64 rate;
+
+ if (!flow_action_basic_hw_stats_check(&f->rule->action,
+ f->common.extack))
+ return -EOPNOTSUPP;
+
+ chain = f->common.chain_index;
+ filter->block_id = ocelot_chain_to_block(chain, ingress);
+ if (filter->block_id < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload to this chain");
+ return -EOPNOTSUPP;
+ }
+ if (filter->block_id == VCAP_IS1 || filter->block_id == VCAP_IS2)
+ filter->lookup = ocelot_chain_to_lookup(chain);
+ if (filter->block_id == VCAP_IS2)
+ filter->pag = ocelot_chain_to_pag(chain);
+
+ filter->goto_target = -1;
+ filter->type = OCELOT_VCAP_FILTER_DUMMY;
+
+ flow_action_for_each(i, a, &f->rule->action) {
+ switch (a->id) {
+ case FLOW_ACTION_DROP:
+ if (filter->block_id != VCAP_IS2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Drop action can only be offloaded to VCAP IS2");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ filter->action.port_mask = 0;
+ filter->action.police_ena = true;
+ filter->action.pol_ix = OCELOT_POLICER_DISCARD;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_TRAP:
+ if (filter->block_id != VCAP_IS2 ||
+ filter->lookup != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Trap action can only be offloaded to VCAP IS2 lookup 0");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ filter->action.port_mask = 0;
+ filter->action.cpu_copy_ena = true;
+ filter->action.cpu_qu_num = 0;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_POLICE:
+ if (filter->block_id != VCAP_IS2 ||
+ filter->lookup != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Police action can only be offloaded to VCAP IS2 lookup 0");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.police_ena = true;
+ rate = a->police.rate_bytes_ps;
+ filter->action.pol.rate = div_u64(rate, 1000) * 8;
+ filter->action.pol.burst = a->police.burst;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_REDIRECT:
+ if (filter->block_id != VCAP_IS2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Redirect action can only be offloaded to VCAP IS2");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ egress_port = ocelot->ops->netdev_to_port(a->dev);
+ if (egress_port < 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an ocelot port");
+ return -EOPNOTSUPP;
+ }
+ filter->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ filter->action.port_mask = BIT(egress_port);
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ if (filter->block_id != VCAP_IS1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN pop action can only be offloaded to VCAP IS1");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.vlan_pop_cnt_ena = true;
+ filter->action.vlan_pop_cnt++;
+ if (filter->action.vlan_pop_cnt > 2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot pop more than 2 VLAN headers");
+ return -EOPNOTSUPP;
+ }
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_VLAN_MANGLE:
+ if (filter->block_id != VCAP_IS1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN modify action can only be offloaded to VCAP IS1");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ if (!ocelot_port->vlan_aware) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only modify VLAN under VLAN aware bridge");
+ return -EOPNOTSUPP;
+ }
+ filter->action.vid_replace_ena = true;
+ filter->action.pcp_dei_ena = true;
+ filter->action.vid = a->vlan.vid;
+ filter->action.pcp = a->vlan.prio;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_PRIORITY:
+ if (filter->block_id != VCAP_IS1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Priority action can only be offloaded to VCAP IS1");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.qos_ena = true;
+ filter->action.qos_val = a->priority;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_GOTO:
+ filter->goto_target = a->chain_index;
+
+ if (filter->block_id == VCAP_IS1 && filter->lookup == 2) {
+ int pag = ocelot_chain_to_pag(filter->goto_target);
+
+ filter->action.pag_override_mask = 0xff;
+ filter->action.pag_val = pag;
+ filter->type = OCELOT_VCAP_FILTER_PAG;
+ }
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (filter->block_id != VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN push action can only be offloaded to VCAP ES0");
+ return -EOPNOTSUPP;
+ }
+ switch (ntohs(a->vlan.proto)) {
+ case ETH_P_8021Q:
+ tpid = OCELOT_TAG_TPID_SEL_8021Q;
+ break;
+ case ETH_P_8021AD:
+ tpid = OCELOT_TAG_TPID_SEL_8021AD;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot push custom TPID");
+ return -EOPNOTSUPP;
+ }
+ filter->action.tag_a_tpid_sel = tpid;
+ filter->action.push_outer_tag = OCELOT_ES0_TAG;
+ filter->action.tag_a_vid_sel = 1;
+ filter->action.vid_a_val = a->vlan.vid;
+ filter->action.pcp_a_val = a->vlan.prio;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload action");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (filter->goto_target == -1) {
+ if ((filter->block_id == VCAP_IS2 && filter->lookup == 1) ||
+ chain == 0) {
+ allow_missing_goto_target = true;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Missing GOTO action");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!ocelot_is_goto_target_valid(filter->goto_target, chain, ingress) &&
+ !allow_missing_goto_target) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload this GOTO target");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ocelot_flower_parse_indev(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f,
+ struct ocelot_vcap_filter *filter)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
+ int key_length = vcap->keys[VCAP_ES0_IGR_PORT].length;
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct net_device *dev, *indev;
+ struct flow_match_meta match;
+ int ingress_port;
+
+ flow_rule_match_meta(rule, &match);
+
+ if (!match.mask->ingress_ifindex)
+ return 0;
+
+ if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
+ return -EOPNOTSUPP;
+ }
+
+ dev = ocelot->ops->port_to_netdev(ocelot, port);
+ if (!dev)
+ return -EINVAL;
+
+ indev = __dev_get_by_index(dev_net(dev), match.key->ingress_ifindex);
+ if (!indev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't find the ingress port to match on");
+ return -ENOENT;
+ }
+
+ ingress_port = ocelot->ops->netdev_to_port(indev);
+ if (ingress_port < 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload an ocelot ingress port");
+ return -EOPNOTSUPP;
+ }
+ if (ingress_port == port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress port is equal to the egress port");
+ return -EINVAL;
+ }
+
+ filter->ingress_port.value = ingress_port;
+ filter->ingress_port.mask = GENMASK(key_length - 1, 0);
+
+ return 0;
+}
+
+static int
+ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
+ struct flow_cls_offload *f,
+ struct ocelot_vcap_filter *filter)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct netlink_ext_ack *extack = f->common.extack;
+ u16 proto = ntohs(f->common.protocol);
+ bool match_protocol = true;
+ int ret;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_META) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ return -EOPNOTSUPP;
+ }
+
+ /* For VCAP ES0 (egress rewriter) we can match on the ingress port */
+ if (!ingress) {
+ ret = ocelot_flower_parse_indev(ocelot, port, f, filter);
+ if (ret)
+ return ret;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ filter->key_type = OCELOT_VCAP_KEY_ANY;
+ filter->vlan.vid.value = match.key->vlan_id;
+ filter->vlan.vid.mask = match.mask->vlan_id;
+ filter->vlan.pcp.value[0] = match.key->vlan_priority;
+ filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
+ match_protocol = false;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on MAC address");
+ return -EOPNOTSUPP;
+ }
+
+ /* The hw support mac matches only for MAC_ETYPE key,
+ * therefore if other matches(port, tcp flags, etc) are added
+ * then just bail out
+ */
+ if ((dissector->used_keys &
+ (BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL))) !=
+ (BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL)))
+ return -EOPNOTSUPP;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (filter->block_id == VCAP_IS1 &&
+ !is_zero_ether_addr(match.mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Key type S1_NORMAL cannot match on destination MAC");
+ return -EOPNOTSUPP;
+ }
+
+ filter->key_type = OCELOT_VCAP_KEY_ETYPE;
+ ether_addr_copy(filter->key.etype.dmac.value,
+ match.key->dst);
+ ether_addr_copy(filter->key.etype.smac.value,
+ match.key->src);
+ ether_addr_copy(filter->key.etype.dmac.mask,
+ match.mask->dst);
+ ether_addr_copy(filter->key.etype.smac.mask,
+ match.mask->src);
+ goto finished_key_parsing;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ if (ntohs(match.key->n_proto) == ETH_P_IP) {
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on IP protocol");
+ return -EOPNOTSUPP;
+ }
+
+ filter->key_type = OCELOT_VCAP_KEY_IPV4;
+ filter->key.ipv4.proto.value[0] =
+ match.key->ip_proto;
+ filter->key.ipv4.proto.mask[0] =
+ match.mask->ip_proto;
+ match_protocol = false;
+ }
+ if (ntohs(match.key->n_proto) == ETH_P_IPV6) {
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on IP protocol");
+ return -EOPNOTSUPP;
+ }
+
+ filter->key_type = OCELOT_VCAP_KEY_IPV6;
+ filter->key.ipv6.proto.value[0] =
+ match.key->ip_proto;
+ filter->key.ipv6.proto.mask[0] =
+ match.mask->ip_proto;
+ match_protocol = false;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) &&
+ proto == ETH_P_IP) {
+ struct flow_match_ipv4_addrs match;
+ u8 *tmp;
+
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on IP address");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+
+ if (filter->block_id == VCAP_IS1 && *(u32 *)&match.mask->dst) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Key type S1_NORMAL cannot match on destination IP");
+ return -EOPNOTSUPP;
+ }
+
+ tmp = &filter->key.ipv4.sip.value.addr[0];
+ memcpy(tmp, &match.key->src, 4);
+
+ tmp = &filter->key.ipv4.sip.mask.addr[0];
+ memcpy(tmp, &match.mask->src, 4);
+
+ tmp = &filter->key.ipv4.dip.value.addr[0];
+ memcpy(tmp, &match.key->dst, 4);
+
+ tmp = &filter->key.ipv4.dip.mask.addr[0];
+ memcpy(tmp, &match.mask->dst, 4);
+ match_protocol = false;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) &&
+ proto == ETH_P_IPV6) {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on L4 ports");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_ports(rule, &match);
+ filter->key.ipv4.sport.value = ntohs(match.key->src);
+ filter->key.ipv4.sport.mask = ntohs(match.mask->src);
+ filter->key.ipv4.dport.value = ntohs(match.key->dst);
+ filter->key.ipv4.dport.mask = ntohs(match.mask->dst);
+ match_protocol = false;
+ }
+
+finished_key_parsing:
+ if (match_protocol && proto != ETH_P_ALL) {
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on L2 proto");
+ return -EOPNOTSUPP;
+ }
+
+ /* TODO: support SNAP, LLC etc */
+ if (proto < ETH_P_802_3_MIN)
+ return -EOPNOTSUPP;
+ filter->key_type = OCELOT_VCAP_KEY_ETYPE;
+ *(__be16 *)filter->key.etype.etype.value = htons(proto);
+ *(__be16 *)filter->key.etype.etype.mask = htons(0xffff);
+ }
+ /* else, a filter of type OCELOT_VCAP_KEY_ANY is implicitly added */
+
+ return 0;
+}
+
+static int ocelot_flower_parse(struct ocelot *ocelot, int port, bool ingress,
+ struct flow_cls_offload *f,
+ struct ocelot_vcap_filter *filter)
+{
+ int ret;
+
+ filter->prio = f->common.prio;
+ filter->id = f->cookie;
+
+ ret = ocelot_flower_parse_action(ocelot, port, ingress, f, filter);
+ if (ret)
+ return ret;
+
+ return ocelot_flower_parse_key(ocelot, port, ingress, f, filter);
+}
+
+static struct ocelot_vcap_filter
+*ocelot_vcap_filter_create(struct ocelot *ocelot, int port, bool ingress,
+ struct flow_cls_offload *f)
+{
+ struct ocelot_vcap_filter *filter;
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return NULL;
+
+ if (ingress) {
+ filter->ingress_port_mask = BIT(port);
+ } else {
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
+ int key_length = vcap->keys[VCAP_ES0_EGR_PORT].length;
+
+ filter->egress_port.value = port;
+ filter->egress_port.mask = GENMASK(key_length - 1, 0);
+ }
+
+ return filter;
+}
+
+static int ocelot_vcap_dummy_filter_add(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ list_add(&filter->list, &ocelot->dummy_rules);
+
+ return 0;
+}
+
+static int ocelot_vcap_dummy_filter_del(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ list_del(&filter->list);
+ kfree(filter);
+
+ return 0;
+}
+
+int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct ocelot_vcap_filter *filter;
+ int chain = f->common.chain_index;
+ int ret;
+
+ if (chain && !ocelot_find_vcap_filter_that_points_at(ocelot, chain)) {
+ NL_SET_ERR_MSG_MOD(extack, "No default GOTO action points to this chain");
+ return -EOPNOTSUPP;
+ }
+
+ filter = ocelot_vcap_filter_create(ocelot, port, ingress, f);
+ if (!filter)
+ return -ENOMEM;
+
+ ret = ocelot_flower_parse(ocelot, port, ingress, f, filter);
+ if (ret) {
+ kfree(filter);
+ return ret;
+ }
+
+ /* The non-optional GOTOs for the TCAM skeleton don't need
+ * to be actually offloaded.
+ */
+ if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
+ return ocelot_vcap_dummy_filter_add(ocelot, filter);
+
+ return ocelot_vcap_filter_add(ocelot, filter, f->common.extack);
+}
+EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
+
+int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress)
+{
+ struct ocelot_vcap_filter *filter;
+ struct ocelot_vcap_block *block;
+ int block_id;
+
+ block_id = ocelot_chain_to_block(f->common.chain_index, ingress);
+ if (block_id < 0)
+ return 0;
+
+ block = &ocelot->block[block_id];
+
+ filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie);
+ if (!filter)
+ return 0;
+
+ if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
+ return ocelot_vcap_dummy_filter_del(ocelot, filter);
+
+ return ocelot_vcap_filter_del(ocelot, filter);
+}
+EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy);
+
+int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress)
+{
+ struct ocelot_vcap_filter *filter;
+ struct ocelot_vcap_block *block;
+ int block_id, ret;
+
+ block_id = ocelot_chain_to_block(f->common.chain_index, ingress);
+ if (block_id < 0)
+ return 0;
+
+ block = &ocelot->block[block_id];
+
+ filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie);
+ if (!filter || filter->type == OCELOT_VCAP_FILTER_DUMMY)
+ return 0;
+
+ ret = ocelot_vcap_filter_stats_update(ocelot, filter);
+ if (ret)
+ return ret;
+
+ flow_stats_update(&f->stats, 0x0, filter->stats.pkts, 0, 0x0,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_cls_flower_stats);
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
new file mode 100644
index 000000000..7390fa398
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "ocelot.h"
+
+u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
+{
+ u16 target = reg >> TARGET_OFFSET;
+ u32 val;
+
+ WARN_ON(!target);
+
+ regmap_read(ocelot->targets[target],
+ ocelot->map[target][reg & REG_MASK] + offset, &val);
+ return val;
+}
+EXPORT_SYMBOL_GPL(__ocelot_read_ix);
+
+void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
+{
+ u16 target = reg >> TARGET_OFFSET;
+
+ WARN_ON(!target);
+
+ regmap_write(ocelot->targets[target],
+ ocelot->map[target][reg & REG_MASK] + offset, val);
+}
+EXPORT_SYMBOL_GPL(__ocelot_write_ix);
+
+void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
+ u32 offset)
+{
+ u16 target = reg >> TARGET_OFFSET;
+
+ WARN_ON(!target);
+
+ regmap_update_bits(ocelot->targets[target],
+ ocelot->map[target][reg & REG_MASK] + offset,
+ mask, val);
+}
+EXPORT_SYMBOL_GPL(__ocelot_rmw_ix);
+
+u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
+{
+ struct ocelot *ocelot = port->ocelot;
+ u16 target = reg >> TARGET_OFFSET;
+ u32 val;
+
+ WARN_ON(!target);
+
+ regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val);
+ return val;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_readl);
+
+void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
+{
+ struct ocelot *ocelot = port->ocelot;
+ u16 target = reg >> TARGET_OFFSET;
+
+ WARN_ON(!target);
+
+ regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_writel);
+
+void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
+{
+ u32 cur = ocelot_port_readl(port, reg);
+
+ ocelot_port_writel(port, (cur & (~mask)) | val, reg);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_rmwl);
+
+u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
+ u32 reg, u32 offset)
+{
+ u32 val;
+
+ regmap_read(ocelot->targets[target],
+ ocelot->map[target][reg] + offset, &val);
+ return val;
+}
+
+void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
+ u32 val, u32 reg, u32 offset)
+{
+ regmap_write(ocelot->targets[target],
+ ocelot->map[target][reg] + offset, val);
+}
+
+int ocelot_regfields_init(struct ocelot *ocelot,
+ const struct reg_field *const regfields)
+{
+ unsigned int i;
+ u16 target;
+
+ for (i = 0; i < REGFIELD_MAX; i++) {
+ struct reg_field regfield = {};
+ u32 reg = regfields[i].reg;
+
+ if (!reg)
+ continue;
+
+ target = regfields[i].reg >> TARGET_OFFSET;
+
+ regfield.reg = ocelot->map[target][reg & REG_MASK];
+ regfield.lsb = regfields[i].lsb;
+ regfield.msb = regfields[i].msb;
+ regfield.id_size = regfields[i].id_size;
+ regfield.id_offset = regfields[i].id_offset;
+
+ ocelot->regfields[i] =
+ devm_regmap_field_alloc(ocelot->dev,
+ ocelot->targets[target],
+ regfield);
+
+ if (IS_ERR(ocelot->regfields[i]))
+ return PTR_ERR(ocelot->regfields[i]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_regfields_init);
+
+static struct regmap_config ocelot_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
+{
+ void __iomem *regs;
+
+ regs = devm_ioremap_resource(ocelot->dev, res);
+ if (IS_ERR(regs))
+ return ERR_CAST(regs);
+
+ ocelot_regmap_config.name = res->name;
+
+ return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
+}
+EXPORT_SYMBOL_GPL(ocelot_regmap_init);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
new file mode 100644
index 000000000..d60cd4326
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -0,0 +1,1087 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017, 2019 Microsemi Corporation
+ */
+
+#include <linux/if_bridge.h>
+#include "ocelot.h"
+#include "ocelot_vcap.h"
+
+int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
+ struct flow_cls_offload *f,
+ bool ingress)
+{
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ if (!ingress)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return ocelot_cls_flower_replace(ocelot, port, f, ingress);
+ case FLOW_CLS_DESTROY:
+ return ocelot_cls_flower_destroy(ocelot, port, f, ingress);
+ case FLOW_CLS_STATS:
+ return ocelot_cls_flower_stats(ocelot, port, f, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_policer pol = { 0 };
+ struct flow_action_entry *action;
+ int port = priv->chip_port;
+ int err;
+
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported");
+ return -EOPNOTSUPP;
+ }
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one action is supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->tc.block_shared) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rate limit is not supported on shared blocks");
+ return -EOPNOTSUPP;
+ }
+
+ action = &f->rule->action.entries[0];
+
+ if (action->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one policer per port is supported");
+ return -EEXIST;
+ }
+
+ pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
+ pol.burst = action->police.burst;
+
+ err = ocelot_port_policer_add(ocelot, port, &pol);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
+ return err;
+ }
+
+ priv->tc.police_id = f->cookie;
+ priv->tc.offload_cnt++;
+ return 0;
+ case TC_CLSMATCHALL_DESTROY:
+ if (priv->tc.police_id != f->cookie)
+ return -ENOENT;
+
+ err = ocelot_port_policer_del(ocelot, port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not delete policer");
+ return err;
+ }
+ priv->tc.police_id = 0;
+ priv->tc.offload_cnt--;
+ return 0;
+ case TC_CLSMATCHALL_STATS:
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ocelot_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv, bool ingress)
+{
+ struct ocelot_port_private *priv = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return ocelot_setup_tc_cls_matchall(priv, type_data, ingress);
+ case TC_SETUP_CLSFLOWER:
+ return ocelot_setup_tc_cls_flower(priv, type_data, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ocelot_setup_tc_block_cb_ig(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return ocelot_setup_tc_block_cb(type, type_data,
+ cb_priv, true);
+}
+
+static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return ocelot_setup_tc_block_cb(type, type_data,
+ cb_priv, false);
+}
+
+static LIST_HEAD(ocelot_block_cb_list);
+
+static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
+ struct flow_block_offload *f)
+{
+ struct flow_block_cb *block_cb;
+ flow_setup_cb_t *cb;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = ocelot_setup_tc_block_cb_ig;
+ priv->tc.block_shared = f->block_shared;
+ } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = ocelot_setup_tc_block_cb_eg;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ f->driver_block_list = &ocelot_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ if (flow_block_cb_is_busy(cb, priv, &ocelot_block_cb_list))
+ return -EBUSY;
+
+ block_cb = flow_block_cb_alloc(cb, priv, priv, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, f->driver_block_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, priv);
+ if (!block_cb)
+ return -ENOENT;
+
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return ocelot_setup_tc_block(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void ocelot_port_adjust_link(struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ ocelot_adjust_link(ocelot, port, dev->phydev);
+}
+
+static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
+ bool untagged)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+ int ret;
+
+ ret = ocelot_vlan_add(ocelot, port, vid, pvid, untagged);
+ if (ret)
+ return ret;
+
+ /* Add the port MAC address to with the right VLAN information */
+ ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
+ ENTRYTYPE_LOCKED);
+
+ return 0;
+}
+
+static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+ int ret;
+
+ /* 8021q removes VID 0 on module unload for all interfaces
+ * with VLAN filtering feature. We need to keep it to receive
+ * untagged traffic.
+ */
+ if (vid == 0)
+ return 0;
+
+ ret = ocelot_vlan_del(ocelot, port, vid);
+ if (ret)
+ return ret;
+
+ /* Del the port MAC address to with the right VLAN information */
+ ocelot_mact_forget(ocelot, dev->dev_addr, vid);
+
+ return 0;
+}
+
+static int ocelot_port_open(struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+ int err;
+
+ if (priv->serdes) {
+ err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET,
+ ocelot_port->phy_mode);
+ if (err) {
+ netdev_err(dev, "Could not set mode of SerDes\n");
+ return err;
+ }
+ }
+
+ err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link,
+ ocelot_port->phy_mode);
+ if (err) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return err;
+ }
+
+ dev->phydev = priv->phy;
+
+ phy_attached_info(priv->phy);
+ phy_start(priv->phy);
+
+ ocelot_port_enable(ocelot, port, priv->phy);
+
+ return 0;
+}
+
+static int ocelot_port_stop(struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ phy_disconnect(priv->phy);
+
+ dev->phydev = NULL;
+
+ ocelot_port_disable(ocelot, port);
+
+ return 0;
+}
+
+/* Generate the IFH for frame injection
+ *
+ * The IFH is a 128bit-value
+ * bit 127: bypass the analyzer processing
+ * bit 56-67: destination mask
+ * bit 28-29: pop_cnt: 3 disables all rewriting of the frame
+ * bit 20-27: cpu extraction queue mask
+ * bit 16: tag type 0: C-tag, 1: S-tag
+ * bit 0-11: VID
+ */
+static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
+{
+ ifh[0] = IFH_INJ_BYPASS | ((0x1ff & info->rew_op) << 21);
+ ifh[1] = (0xf00 & info->port) >> 8;
+ ifh[2] = (0xff & info->port) << 24;
+ ifh[3] = (info->tag_type << 16) | info->vid;
+
+ return 0;
+}
+
+static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ u32 val, ifh[OCELOT_TAG_LEN / 4];
+ struct frame_info info = {};
+ u8 grp = 0; /* Send everything on CPU group 0 */
+ unsigned int i, count, last;
+ int port = priv->chip_port;
+
+ val = ocelot_read(ocelot, QS_INJ_STATUS);
+ if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) ||
+ (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp))))
+ return NETDEV_TX_BUSY;
+
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+ QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
+
+ info.port = BIT(port);
+ info.tag_type = IFH_TAG_TYPE_C;
+ info.vid = skb_vlan_tag_get(skb);
+
+ /* Check if timestamping is needed */
+ if (ocelot->ptp && (shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
+ info.rew_op = ocelot_port->ptp_cmd;
+
+ if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ struct sk_buff *clone;
+
+ clone = skb_clone_sk(skb);
+ if (!clone) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ ocelot_port_add_txtstamp_skb(ocelot, port, clone);
+
+ info.rew_op |= clone->cb[0] << 3;
+ }
+ }
+
+ if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
+ info.rew_op = ocelot_port->ptp_cmd;
+ if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ info.rew_op |= skb->cb[0] << 3;
+ }
+
+ ocelot_gen_ifh(ifh, &info);
+
+ for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
+ ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
+ QS_INJ_WR, grp);
+
+ count = (skb->len + 3) / 4;
+ last = skb->len % 4;
+ for (i = 0; i < count; i++)
+ ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
+
+ /* Add padding */
+ while (i < (OCELOT_BUFFER_CELL_SZ / 4)) {
+ ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
+ i++;
+ }
+
+ /* Indicate EOF and valid bytes in last word */
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+ QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
+ QS_INJ_CTRL_EOF,
+ QS_INJ_CTRL, grp);
+
+ /* Add dummy CRC */
+ ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
+ skb_tx_timestamp(skb);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid);
+}
+
+static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ return ocelot_mact_learn(ocelot, PGID_CPU, addr, ocelot_port->pvid,
+ ENTRYTYPE_LOCKED);
+}
+
+static void ocelot_set_rx_mode(struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ u32 val;
+ int i;
+
+ /* This doesn't handle promiscuous mode because the bridge core is
+ * setting IFF_PROMISC on all slave interfaces and all frames would be
+ * forwarded to the CPU port.
+ */
+ val = GENMASK(ocelot->num_phys_ports - 1, 0);
+ for_each_nonreserved_multicast_dest_pgid(ocelot, i)
+ ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
+
+ __dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync);
+}
+
+static int ocelot_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ int port = priv->chip_port;
+ int ret;
+
+ ret = snprintf(buf, len, "p%d", port);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ const struct sockaddr *addr = p;
+
+ /* Learn the new net device MAC address in the mac table. */
+ ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, ocelot_port->pvid,
+ ENTRYTYPE_LOCKED);
+ /* Then forget the previous one. */
+ ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid);
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+ return 0;
+}
+
+static void ocelot_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ /* Configure the port to read the stats from */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
+ SYS_STAT_CFG);
+
+ /* Get Rx stats */
+ stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS);
+ stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) +
+ ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) +
+ ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) +
+ ocelot_read(ocelot, SYS_COUNT_RX_LONGS) +
+ ocelot_read(ocelot, SYS_COUNT_RX_64) +
+ ocelot_read(ocelot, SYS_COUNT_RX_65_127) +
+ ocelot_read(ocelot, SYS_COUNT_RX_128_255) +
+ ocelot_read(ocelot, SYS_COUNT_RX_256_1023) +
+ ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) +
+ ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX);
+ stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST);
+ stats->rx_dropped = dev->stats.rx_dropped;
+
+ /* Get Tx stats */
+ stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS);
+ stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) +
+ ocelot_read(ocelot, SYS_COUNT_TX_65_127) +
+ ocelot_read(ocelot, SYS_COUNT_TX_128_511) +
+ ocelot_read(ocelot, SYS_COUNT_TX_512_1023) +
+ ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) +
+ ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX);
+ stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) +
+ ocelot_read(ocelot, SYS_COUNT_TX_AGING);
+ stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
+}
+
+static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid);
+}
+
+static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid);
+}
+
+static int ocelot_port_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev, int *idx)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_dump_ctx dump = {
+ .dev = dev,
+ .skb = skb,
+ .cb = cb,
+ .idx = *idx,
+ };
+ int port = priv->chip_port;
+ int ret;
+
+ ret = ocelot_fdb_dump(ocelot, port, ocelot_port_fdb_do_dump, &dump);
+
+ *idx = dump.idx;
+
+ return ret;
+}
+
+static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ return ocelot_vlan_vid_add(dev, vid, false, false);
+}
+
+static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ return ocelot_vlan_vid_del(dev, vid);
+}
+
+static void ocelot_vlan_mode(struct ocelot *ocelot, int port,
+ netdev_features_t features)
+{
+ u32 val;
+
+ /* Filtering */
+ val = ocelot_read(ocelot, ANA_VLANMASK);
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ val |= BIT(port);
+ else
+ val &= ~BIT(port);
+ ocelot_write(ocelot, val, ANA_VLANMASK);
+}
+
+static int ocelot_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = dev->features ^ features;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
+ priv->tc.offload_cnt) {
+ netdev_err(dev,
+ "Cannot disable HW TC offload while offloads active\n");
+ return -EBUSY;
+ }
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
+ ocelot_vlan_mode(ocelot, port, features);
+
+ return 0;
+}
+
+static int ocelot_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+
+ ppid->id_len = sizeof(ocelot->base_mac);
+ memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ /* If the attached PHY device isn't capable of timestamping operations,
+ * use our own (when possible).
+ */
+ if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return ocelot_hwstamp_set(ocelot, port, ifr);
+ case SIOCGHWTSTAMP:
+ return ocelot_hwstamp_get(ocelot, port, ifr);
+ }
+ }
+
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
+static const struct net_device_ops ocelot_port_netdev_ops = {
+ .ndo_open = ocelot_port_open,
+ .ndo_stop = ocelot_port_stop,
+ .ndo_start_xmit = ocelot_port_xmit,
+ .ndo_set_rx_mode = ocelot_set_rx_mode,
+ .ndo_get_phys_port_name = ocelot_port_get_phys_port_name,
+ .ndo_set_mac_address = ocelot_port_set_mac_address,
+ .ndo_get_stats64 = ocelot_get_stats64,
+ .ndo_fdb_add = ocelot_port_fdb_add,
+ .ndo_fdb_del = ocelot_port_fdb_del,
+ .ndo_fdb_dump = ocelot_port_fdb_dump,
+ .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
+ .ndo_set_features = ocelot_set_features,
+ .ndo_get_port_parent_id = ocelot_get_port_parent_id,
+ .ndo_setup_tc = ocelot_setup_tc,
+ .ndo_do_ioctl = ocelot_ioctl,
+};
+
+struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_port_private *priv;
+
+ if (!ocelot_port)
+ return NULL;
+
+ priv = container_of(ocelot_port, struct ocelot_port_private, port);
+
+ return priv->dev;
+}
+
+/* Checks if the net_device instance given to us originates from our driver */
+static bool ocelot_netdevice_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &ocelot_port_netdev_ops;
+}
+
+int ocelot_netdev_to_port(struct net_device *dev)
+{
+ struct ocelot_port_private *priv;
+
+ if (!dev || !ocelot_netdevice_dev_check(dev))
+ return -EINVAL;
+
+ priv = netdev_priv(dev);
+
+ return priv->chip_port;
+}
+
+static void ocelot_port_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ struct ocelot_port_private *priv = netdev_priv(netdev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ ocelot_get_strings(ocelot, port, sset, data);
+}
+
+static void ocelot_port_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ ocelot_get_ethtool_stats(ocelot, port, data);
+}
+
+static int ocelot_port_get_sset_count(struct net_device *dev, int sset)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_get_sset_count(ocelot, port, sset);
+}
+
+static int ocelot_port_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ if (!ocelot->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ return ocelot_get_ts_info(ocelot, port, info);
+}
+
+static const struct ethtool_ops ocelot_ethtool_ops = {
+ .get_strings = ocelot_port_get_strings,
+ .get_ethtool_stats = ocelot_port_get_ethtool_stats,
+ .get_sset_count = ocelot_port_get_sset_count,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_ts_info = ocelot_port_get_ts_info,
+};
+
+static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
+ struct switchdev_trans *trans,
+ u8 state)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return;
+
+ ocelot_bridge_stp_state_set(ocelot, port, state);
+}
+
+static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
+
+ ocelot_set_ageing_time(ocelot, ageing_time);
+}
+
+static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
+{
+ u32 cpu_fwd_mcast = ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
+ ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA;
+ u32 val = 0;
+
+ if (mc)
+ val = cpu_fwd_mcast;
+
+ ocelot_rmw_gix(ocelot, val, cpu_fwd_mcast,
+ ANA_PORT_CPU_FWD_CFG, port);
+}
+
+static int ocelot_port_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ocelot_port_attr_stp_state_set(ocelot, port, trans,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ ocelot_port_vlan_filtering(ocelot, port,
+ attr->u.vlan_filtering, trans);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int ocelot_port_obj_add_vlan(struct net_device *dev,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ int ret;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ret = ocelot_vlan_vid_add(dev, vid,
+ vlan->flags & BRIDGE_VLAN_INFO_PVID,
+ vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ocelot_port_vlan_del_vlan(struct net_device *dev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ int ret;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ret = ocelot_vlan_vid_del(dev, vid);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ocelot_port_obj_add_mdb(struct net_device *dev,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return ocelot_port_mdb_add(ocelot, port, mdb);
+}
+
+static int ocelot_port_obj_del_mdb(struct net_device *dev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_port_mdb_del(ocelot, port, mdb);
+}
+
+static int ocelot_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ int ret = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ ret = ocelot_port_obj_add_vlan(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj),
+ trans);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
+ trans);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int ocelot_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ int ret = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ ret = ocelot_port_vlan_del_vlan(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int ocelot_netdevice_port_event(struct net_device *dev,
+ unsigned long event,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+ int err = 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking) {
+ err = ocelot_port_bridge_join(ocelot, port,
+ info->upper_dev);
+ } else {
+ err = ocelot_port_bridge_leave(ocelot, port,
+ info->upper_dev);
+ }
+ }
+ if (netif_is_lag_master(info->upper_dev)) {
+ if (info->linking)
+ err = ocelot_port_lag_join(ocelot, port,
+ info->upper_dev);
+ else
+ ocelot_port_lag_leave(ocelot, port,
+ info->upper_dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int ocelot_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int ret = 0;
+
+ if (event == NETDEV_PRECHANGEUPPER &&
+ ocelot_netdevice_dev_check(dev) &&
+ netif_is_lag_master(info->upper_dev)) {
+ struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
+ struct netlink_ext_ack *extack;
+
+ if (lag_upper_info &&
+ lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ extack = netdev_notifier_info_to_extack(&info->info);
+ NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
+
+ ret = -EINVAL;
+ goto notify;
+ }
+ }
+
+ if (netif_is_lag_master(dev)) {
+ struct net_device *slave;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(dev, slave, iter) {
+ ret = ocelot_netdevice_port_event(slave, event, info);
+ if (ret)
+ goto notify;
+ }
+ } else {
+ ret = ocelot_netdevice_port_event(dev, event, info);
+ }
+
+notify:
+ return notifier_from_errno(ret);
+}
+
+struct notifier_block ocelot_netdevice_nb __read_mostly = {
+ .notifier_call = ocelot_netdevice_event,
+};
+
+static int ocelot_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ ocelot_netdevice_dev_check,
+ ocelot_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct notifier_block ocelot_switchdev_nb __read_mostly = {
+ .notifier_call = ocelot_switchdev_event,
+};
+
+static int ocelot_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ /* Blocking events. */
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ ocelot_netdevice_dev_check,
+ ocelot_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ ocelot_netdevice_dev_check,
+ ocelot_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ ocelot_netdevice_dev_check,
+ ocelot_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
+ .notifier_call = ocelot_switchdev_blocking_event,
+};
+
+int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
+ struct phy_device *phy)
+{
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct ocelot_port_private));
+ if (!dev)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, ocelot->dev);
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+ priv->phy = phy;
+ priv->chip_port = port;
+ ocelot_port = &priv->port;
+ ocelot_port->ocelot = ocelot;
+ ocelot_port->target = target;
+ ocelot->ports[port] = ocelot_port;
+
+ dev->netdev_ops = &ocelot_port_netdev_ops;
+ dev->ethtool_ops = &ocelot_ethtool_ops;
+
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS |
+ NETIF_F_HW_TC;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
+
+ memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
+ dev->dev_addr[ETH_ALEN - 1] += port;
+ ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
+ ENTRYTYPE_LOCKED);
+
+ ocelot_init_port(ocelot, port);
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(ocelot->dev, "register_netdev failed\n");
+ free_netdev(dev);
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c
new file mode 100644
index 000000000..6f5068c10
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_police.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#include <soc/mscc/ocelot.h>
+#include "ocelot_police.h"
+
+/* Types for ANA:POL[0-192]:POL_MODE_CFG.FRM_MODE */
+#define POL_MODE_LINERATE 0 /* Incl IPG. Unit: 33 1/3 kbps, 4096 bytes */
+#define POL_MODE_DATARATE 1 /* Excl IPG. Unit: 33 1/3 kbps, 4096 bytes */
+#define POL_MODE_FRMRATE_HI 2 /* Unit: 33 1/3 fps, 32.8 frames */
+#define POL_MODE_FRMRATE_LO 3 /* Unit: 1/3 fps, 0.3 frames */
+
+/* Policer indexes */
+#define POL_IX_PORT 0 /* 0-11 : Port policers */
+#define POL_IX_QUEUE 32 /* 32-127 : Queue policers */
+
+/* Default policer order */
+#define POL_ORDER 0x1d3 /* Ocelot policer order: Serial (QoS -> Port -> VCAP) */
+
+int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
+ struct qos_policer_conf *conf)
+{
+ u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE;
+ u32 cir = 0, cbs = 0, pir = 0, pbs = 0;
+ bool cir_discard = 0, pir_discard = 0;
+ u32 pbs_max = 0, cbs_max = 0;
+ u8 ipg = 20;
+ u32 value;
+
+ pir = conf->pir;
+ pbs = conf->pbs;
+
+ switch (conf->mode) {
+ case MSCC_QOS_RATE_MODE_LINE:
+ case MSCC_QOS_RATE_MODE_DATA:
+ if (conf->mode == MSCC_QOS_RATE_MODE_LINE) {
+ frm_mode = POL_MODE_LINERATE;
+ ipg = min_t(u8, GENMASK(4, 0), conf->ipg);
+ } else {
+ frm_mode = POL_MODE_DATARATE;
+ }
+ if (conf->dlb) {
+ cir_ena = 1;
+ cir = conf->cir;
+ cbs = conf->cbs;
+ if (cir == 0 && cbs == 0) {
+ /* Discard cir frames */
+ cir_discard = 1;
+ } else {
+ cir = DIV_ROUND_UP(cir, 100);
+ cir *= 3; /* 33 1/3 kbps */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ cbs = (cbs ? cbs : 1); /* No zero burst size */
+ cbs_max = 60; /* Limit burst size */
+ cf = conf->cf;
+ if (cf)
+ pir += conf->cir;
+ }
+ }
+ if (pir == 0 && pbs == 0) {
+ /* Discard PIR frames */
+ pir_discard = 1;
+ } else {
+ pir = DIV_ROUND_UP(pir, 100);
+ pir *= 3; /* 33 1/3 kbps */
+ pbs = DIV_ROUND_UP(pbs, 4096);
+ pbs = (pbs ? pbs : 1); /* No zero burst size */
+ pbs_max = 60; /* Limit burst size */
+ }
+ break;
+ case MSCC_QOS_RATE_MODE_FRAME:
+ if (pir >= 100) {
+ frm_mode = POL_MODE_FRMRATE_HI;
+ pir = DIV_ROUND_UP(pir, 100);
+ pir *= 3; /* 33 1/3 fps */
+ pbs = (pbs * 10) / 328; /* 32.8 frames */
+ pbs = (pbs ? pbs : 1); /* No zero burst size */
+ pbs_max = GENMASK(6, 0); /* Limit burst size */
+ } else {
+ frm_mode = POL_MODE_FRMRATE_LO;
+ if (pir == 0 && pbs == 0) {
+ /* Discard all frames */
+ pir_discard = 1;
+ cir_discard = 1;
+ } else {
+ pir *= 3; /* 1/3 fps */
+ pbs = (pbs * 10) / 3; /* 0.3 frames */
+ pbs = (pbs ? pbs : 1); /* No zero burst size */
+ pbs_max = 61; /* Limit burst size */
+ }
+ }
+ break;
+ default: /* MSCC_QOS_RATE_MODE_DISABLED */
+ /* Disable policer using maximum rate and zero burst */
+ pir = GENMASK(15, 0);
+ pbs = 0;
+ break;
+ }
+
+ /* Check limits */
+ if (pir > GENMASK(15, 0)) {
+ dev_err(ocelot->dev, "Invalid pir for port %d: %u (max %lu)\n",
+ port, pir, GENMASK(15, 0));
+ return -EINVAL;
+ }
+
+ if (cir > GENMASK(15, 0)) {
+ dev_err(ocelot->dev, "Invalid cir for port %d: %u (max %lu)\n",
+ port, cir, GENMASK(15, 0));
+ return -EINVAL;
+ }
+
+ if (pbs > pbs_max) {
+ dev_err(ocelot->dev, "Invalid pbs for port %d: %u (max %u)\n",
+ port, pbs, pbs_max);
+ return -EINVAL;
+ }
+
+ if (cbs > cbs_max) {
+ dev_err(ocelot->dev, "Invalid cbs for port %d: %u (max %u)\n",
+ port, cbs, cbs_max);
+ return -EINVAL;
+ }
+
+ value = (ANA_POL_MODE_CFG_IPG_SIZE(ipg) |
+ ANA_POL_MODE_CFG_FRM_MODE(frm_mode) |
+ (cf ? ANA_POL_MODE_CFG_DLB_COUPLED : 0) |
+ (cir_ena ? ANA_POL_MODE_CFG_CIR_ENA : 0) |
+ ANA_POL_MODE_CFG_OVERSHOOT_ENA);
+
+ ocelot_write_gix(ocelot, value, ANA_POL_MODE_CFG, pol_ix);
+
+ ocelot_write_gix(ocelot,
+ ANA_POL_PIR_CFG_PIR_RATE(pir) |
+ ANA_POL_PIR_CFG_PIR_BURST(pbs),
+ ANA_POL_PIR_CFG, pol_ix);
+
+ ocelot_write_gix(ocelot,
+ (pir_discard ? GENMASK(22, 0) : 0),
+ ANA_POL_PIR_STATE, pol_ix);
+
+ ocelot_write_gix(ocelot,
+ ANA_POL_CIR_CFG_CIR_RATE(cir) |
+ ANA_POL_CIR_CFG_CIR_BURST(cbs),
+ ANA_POL_CIR_CFG, pol_ix);
+
+ ocelot_write_gix(ocelot,
+ (cir_discard ? GENMASK(22, 0) : 0),
+ ANA_POL_CIR_STATE, pol_ix);
+
+ return 0;
+}
+
+int ocelot_port_policer_add(struct ocelot *ocelot, int port,
+ struct ocelot_policer *pol)
+{
+ struct qos_policer_conf pp = { 0 };
+ int err;
+
+ if (!pol)
+ return -EINVAL;
+
+ pp.mode = MSCC_QOS_RATE_MODE_DATA;
+ pp.pir = pol->rate;
+ pp.pbs = pol->burst;
+
+ dev_dbg(ocelot->dev, "%s: port %u pir %u kbps, pbs %u bytes\n",
+ __func__, port, pp.pir, pp.pbs);
+
+ err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp);
+ if (err)
+ return err;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_POL_CFG_PORT_POL_ENA |
+ ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
+ ANA_PORT_POL_CFG_PORT_POL_ENA |
+ ANA_PORT_POL_CFG_POL_ORDER_M,
+ ANA_PORT_POL_CFG, port);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_port_policer_add);
+
+int ocelot_port_policer_del(struct ocelot *ocelot, int port)
+{
+ struct qos_policer_conf pp = { 0 };
+ int err;
+
+ dev_dbg(ocelot->dev, "%s: port %u\n", __func__, port);
+
+ pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
+
+ err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp);
+ if (err)
+ return err;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
+ ANA_PORT_POL_CFG_PORT_POL_ENA |
+ ANA_PORT_POL_CFG_POL_ORDER_M,
+ ANA_PORT_POL_CFG, port);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_port_policer_del);
diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h
new file mode 100644
index 000000000..7adb05f71
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_police.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_POLICE_H_
+#define _MSCC_OCELOT_POLICE_H_
+
+#include "ocelot.h"
+
+enum mscc_qos_rate_mode {
+ MSCC_QOS_RATE_MODE_DISABLED, /* Policer/shaper disabled */
+ MSCC_QOS_RATE_MODE_LINE, /* Measure line rate in kbps incl. IPG */
+ MSCC_QOS_RATE_MODE_DATA, /* Measures data rate in kbps excl. IPG */
+ MSCC_QOS_RATE_MODE_FRAME, /* Measures frame rate in fps */
+ __MSCC_QOS_RATE_MODE_END,
+ NUM_MSCC_QOS_RATE_MODE = __MSCC_QOS_RATE_MODE_END,
+ MSCC_QOS_RATE_MODE_MAX = __MSCC_QOS_RATE_MODE_END - 1,
+};
+
+struct qos_policer_conf {
+ enum mscc_qos_rate_mode mode;
+ bool dlb; /* Enable DLB (dual leaky bucket mode */
+ bool cf; /* Coupling flag (ignored in SLB mode) */
+ u32 cir; /* CIR in kbps/fps (ignored in SLB mode) */
+ u32 cbs; /* CBS in bytes/frames (ignored in SLB mode) */
+ u32 pir; /* PIR in kbps/fps */
+ u32 pbs; /* PBS in bytes/frames */
+ u8 ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */
+};
+
+int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
+ struct qos_policer_conf *conf);
+
+#endif /* _MSCC_OCELOT_POLICE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
new file mode 100644
index 000000000..a33ab315c
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot PTP clock driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ * Copyright 2020 NXP
+ */
+#include <soc/mscc/ocelot_ptp.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot.h>
+
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ time64_t s;
+ u32 val;
+ s64 ns;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ s = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN) & 0xffff;
+ s <<= 32;
+ s += ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ns = ocelot_read_rix(ocelot, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if (ns >= 0x3ffffff0 && ns <= 0x3fffffff) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_gettime64);
+
+int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, lower_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_LSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, upper_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_MSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, ts->tv_nsec, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_LOAD);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_settime64);
+
+int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ struct ocelot *ocelot = container_of(ptp, struct ocelot,
+ ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK |
+ PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, delta, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK |
+ PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_DELTA);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using ocelot_ptp_settime64 which is not exact. */
+ struct timespec64 ts;
+ u64 now;
+
+ ocelot_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ ocelot_ptp_settime64(ptp, &ts);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_adjtime);
+
+int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ u32 unit = 0, direction = 0;
+ unsigned long flags;
+ u64 adj = 0;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ if (!scaled_ppm)
+ goto disable_adj;
+
+ if (scaled_ppm < 0) {
+ direction = PTP_CFG_CLK_ADJ_CFG_DIR;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ adj = PSEC_PER_SEC << 16;
+ do_div(adj, scaled_ppm);
+ do_div(adj, 1000);
+
+ /* If the adjustment value is too large, use ns instead */
+ if (adj >= (1L << 30)) {
+ unit = PTP_CFG_CLK_ADJ_FREQ_NS;
+ do_div(adj, 1000);
+ }
+
+ /* Still too big */
+ if (adj >= (1L << 30))
+ goto disable_adj;
+
+ ocelot_write(ocelot, unit | adj, PTP_CLK_CFG_ADJ_FREQ);
+ ocelot_write(ocelot, PTP_CFG_CLK_ADJ_CFG_ENA | direction,
+ PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+
+disable_adj:
+ ocelot_write(ocelot, 0, PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_adjfine);
+
+int ocelot_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_EXTTS:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_verify);
+
+int ocelot_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ struct timespec64 ts_phase, ts_period;
+ enum ocelot_ptp_pins ptp_pin;
+ unsigned long flags;
+ bool pps = false;
+ int pin = -1;
+ s64 wf_high;
+ s64 wf_low;
+ u32 val;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin == 0)
+ ptp_pin = PTP_PIN_0;
+ else if (pin == 1)
+ ptp_pin = PTP_PIN_1;
+ else if (pin == 2)
+ ptp_pin = PTP_PIN_2;
+ else if (pin == 3)
+ ptp_pin = PTP_PIN_3;
+ else
+ return -EBUSY;
+
+ ts_period.tv_sec = rq->perout.period.sec;
+ ts_period.tv_nsec = rq->perout.period.nsec;
+
+ if (ts_period.tv_sec == 1 && ts_period.tv_nsec == 0)
+ pps = true;
+
+ /* Handle turning off */
+ if (!on) {
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+ val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ break;
+ }
+
+ if (rq->perout.flags & PTP_PEROUT_PHASE) {
+ ts_phase.tv_sec = rq->perout.phase.sec;
+ ts_phase.tv_nsec = rq->perout.phase.nsec;
+ } else {
+ /* Compatibility */
+ ts_phase.tv_sec = rq->perout.start.sec;
+ ts_phase.tv_nsec = rq->perout.start.nsec;
+ }
+ if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) {
+ dev_warn(ocelot->dev,
+ "Absolute start time not supported!\n");
+ dev_warn(ocelot->dev,
+ "Accept nsec for PPS phase adjustment, otherwise start time should be 0 0.\n");
+ return -EINVAL;
+ }
+
+ /* Calculate waveform high and low times */
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on;
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+
+ wf_high = timespec64_to_ns(&ts_on);
+ } else {
+ if (pps) {
+ wf_high = 1000;
+ } else {
+ wf_high = timespec64_to_ns(&ts_period);
+ wf_high = div_s64(wf_high, 2);
+ }
+ }
+
+ wf_low = timespec64_to_ns(&ts_period);
+ wf_low -= wf_high;
+
+ /* Handle PPS request */
+ if (pps) {
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+ ocelot_write_rix(ocelot, ts_phase.tv_nsec,
+ PTP_PIN_WF_LOW_PERIOD, ptp_pin);
+ ocelot_write_rix(ocelot, wf_high,
+ PTP_PIN_WF_HIGH_PERIOD, ptp_pin);
+ val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK);
+ val |= PTP_PIN_CFG_SYNC;
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ break;
+ }
+
+ /* Handle periodic clock */
+ if (wf_high > 0x3fffffff || wf_high <= 0x6)
+ return -EINVAL;
+ if (wf_low > 0x3fffffff || wf_low <= 0x6)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+ ocelot_write_rix(ocelot, wf_low, PTP_PIN_WF_LOW_PERIOD,
+ ptp_pin);
+ ocelot_write_rix(ocelot, wf_high, PTP_PIN_WF_HIGH_PERIOD,
+ ptp_pin);
+ val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_enable);
+
+int ocelot_init_timestamp(struct ocelot *ocelot,
+ const struct ptp_clock_info *info)
+{
+ struct ptp_clock *ptp_clock;
+ int i;
+
+ ocelot->ptp_info = *info;
+
+ for (i = 0; i < OCELOT_PTP_PINS_NUM; i++) {
+ struct ptp_pin_desc *p = &ocelot->ptp_pins[i];
+
+ snprintf(p->name, sizeof(p->name), "switch_1588_dat%d", i);
+ p->index = i;
+ p->func = PTP_PF_NONE;
+ }
+
+ ocelot->ptp_info.pin_config = &ocelot->ptp_pins[0];
+
+ ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
+ if (IS_ERR(ptp_clock))
+ return PTR_ERR(ptp_clock);
+ /* Check if PHC support is missing at the configuration level */
+ if (!ptp_clock)
+ return 0;
+
+ ocelot->ptp_clock = ptp_clock;
+
+ ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
+
+ ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC);
+
+ /* There is no device reconfiguration, PTP Rx stamping is always
+ * enabled.
+ */
+ ocelot->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_init_timestamp);
+
+int ocelot_deinit_timestamp(struct ocelot *ocelot)
+{
+ if (ocelot->ptp_clock)
+ ptp_clock_unregister(ocelot->ptp_clock);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_deinit_timestamp);
diff --git a/drivers/net/ethernet/mscc/ocelot_qs.h b/drivers/net/ethernet/mscc/ocelot_qs.h
new file mode 100644
index 000000000..d18ae726c
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_qs.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_QS_H_
+#define _MSCC_OCELOT_QS_H_
+
+/* TODO handle BE */
+#define XTR_EOF_0 0x00000080U
+#define XTR_EOF_1 0x01000080U
+#define XTR_EOF_2 0x02000080U
+#define XTR_EOF_3 0x03000080U
+#define XTR_PRUNED 0x04000080U
+#define XTR_ABORT 0x05000080U
+#define XTR_ESCAPE 0x06000080U
+#define XTR_NOT_READY 0x07000080U
+#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
+
+#define QS_XTR_GRP_CFG_RSZ 0x4
+
+#define QS_XTR_GRP_CFG_MODE(x) (((x) << 2) & GENMASK(3, 2))
+#define QS_XTR_GRP_CFG_MODE_M GENMASK(3, 2)
+#define QS_XTR_GRP_CFG_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS BIT(1)
+#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0)
+
+#define QS_XTR_RD_RSZ 0x4
+
+#define QS_XTR_FRM_PRUNING_RSZ 0x4
+
+#define QS_XTR_CFG_DP_WM(x) (((x) << 5) & GENMASK(7, 5))
+#define QS_XTR_CFG_DP_WM_M GENMASK(7, 5)
+#define QS_XTR_CFG_DP_WM_X(x) (((x) & GENMASK(7, 5)) >> 5)
+#define QS_XTR_CFG_SCH_WM(x) (((x) << 2) & GENMASK(4, 2))
+#define QS_XTR_CFG_SCH_WM_M GENMASK(4, 2)
+#define QS_XTR_CFG_SCH_WM_X(x) (((x) & GENMASK(4, 2)) >> 2)
+#define QS_XTR_CFG_OFLW_ERR_STICKY(x) ((x) & GENMASK(1, 0))
+#define QS_XTR_CFG_OFLW_ERR_STICKY_M GENMASK(1, 0)
+
+#define QS_INJ_GRP_CFG_RSZ 0x4
+
+#define QS_INJ_GRP_CFG_MODE(x) (((x) << 2) & GENMASK(3, 2))
+#define QS_INJ_GRP_CFG_MODE_M GENMASK(3, 2)
+#define QS_INJ_GRP_CFG_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0)
+
+#define QS_INJ_WR_RSZ 0x4
+
+#define QS_INJ_CTRL_RSZ 0x4
+
+#define QS_INJ_CTRL_GAP_SIZE(x) (((x) << 21) & GENMASK(24, 21))
+#define QS_INJ_CTRL_GAP_SIZE_M GENMASK(24, 21)
+#define QS_INJ_CTRL_GAP_SIZE_X(x) (((x) & GENMASK(24, 21)) >> 21)
+#define QS_INJ_CTRL_ABORT BIT(20)
+#define QS_INJ_CTRL_EOF BIT(19)
+#define QS_INJ_CTRL_SOF BIT(18)
+#define QS_INJ_CTRL_VLD_BYTES(x) (((x) << 16) & GENMASK(17, 16))
+#define QS_INJ_CTRL_VLD_BYTES_M GENMASK(17, 16)
+#define QS_INJ_CTRL_VLD_BYTES_X(x) (((x) & GENMASK(17, 16)) >> 16)
+
+#define QS_INJ_STATUS_WMARK_REACHED(x) (((x) << 4) & GENMASK(5, 4))
+#define QS_INJ_STATUS_WMARK_REACHED_M GENMASK(5, 4)
+#define QS_INJ_STATUS_WMARK_REACHED_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define QS_INJ_STATUS_FIFO_RDY(x) (((x) << 2) & GENMASK(3, 2))
+#define QS_INJ_STATUS_FIFO_RDY_M GENMASK(3, 2)
+#define QS_INJ_STATUS_FIFO_RDY_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define QS_INJ_STATUS_INJ_IN_PROGRESS(x) ((x) & GENMASK(1, 0))
+#define QS_INJ_STATUS_INJ_IN_PROGRESS_M GENMASK(1, 0)
+
+#define QS_INJ_ERR_RSZ 0x4
+
+#define QS_INJ_ERR_ABORT_ERR_STICKY BIT(1)
+#define QS_INJ_ERR_WR_ERR_STICKY BIT(0)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_rew.h b/drivers/net/ethernet/mscc/ocelot_rew.h
new file mode 100644
index 000000000..210914b7e
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_rew.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_REW_H_
+#define _MSCC_OCELOT_REW_H_
+
+#define REW_PORT_VLAN_CFG_GSZ 0x80
+
+#define REW_PORT_VLAN_CFG_PORT_TPID(x) (((x) << 16) & GENMASK(31, 16))
+#define REW_PORT_VLAN_CFG_PORT_TPID_M GENMASK(31, 16)
+#define REW_PORT_VLAN_CFG_PORT_TPID_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define REW_PORT_VLAN_CFG_PORT_DEI BIT(15)
+#define REW_PORT_VLAN_CFG_PORT_PCP(x) (((x) << 12) & GENMASK(14, 12))
+#define REW_PORT_VLAN_CFG_PORT_PCP_M GENMASK(14, 12)
+#define REW_PORT_VLAN_CFG_PORT_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12)
+#define REW_PORT_VLAN_CFG_PORT_VID(x) ((x) & GENMASK(11, 0))
+#define REW_PORT_VLAN_CFG_PORT_VID_M GENMASK(11, 0)
+
+#define REW_TAG_CFG_GSZ 0x80
+
+#define REW_TAG_CFG_TAG_CFG(x) (((x) << 7) & GENMASK(8, 7))
+#define REW_TAG_CFG_TAG_CFG_M GENMASK(8, 7)
+#define REW_TAG_CFG_TAG_CFG_X(x) (((x) & GENMASK(8, 7)) >> 7)
+#define REW_TAG_CFG_TAG_TPID_CFG(x) (((x) << 5) & GENMASK(6, 5))
+#define REW_TAG_CFG_TAG_TPID_CFG_M GENMASK(6, 5)
+#define REW_TAG_CFG_TAG_TPID_CFG_X(x) (((x) & GENMASK(6, 5)) >> 5)
+#define REW_TAG_CFG_TAG_VID_CFG BIT(4)
+#define REW_TAG_CFG_TAG_PCP_CFG(x) (((x) << 2) & GENMASK(3, 2))
+#define REW_TAG_CFG_TAG_PCP_CFG_M GENMASK(3, 2)
+#define REW_TAG_CFG_TAG_PCP_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define REW_TAG_CFG_TAG_DEI_CFG(x) ((x) & GENMASK(1, 0))
+#define REW_TAG_CFG_TAG_DEI_CFG_M GENMASK(1, 0)
+
+#define REW_PORT_CFG_GSZ 0x80
+
+#define REW_PORT_CFG_ES0_EN BIT(5)
+#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG(x) (((x) << 3) & GENMASK(4, 3))
+#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_M GENMASK(4, 3)
+#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_X(x) (((x) & GENMASK(4, 3)) >> 3)
+#define REW_PORT_CFG_FCS_UPDATE_CPU_ENA BIT(2)
+#define REW_PORT_CFG_FLUSH_ENA BIT(1)
+#define REW_PORT_CFG_AGE_DIS BIT(0)
+
+#define REW_DSCP_CFG_GSZ 0x80
+
+#define REW_PCP_DEI_QOS_MAP_CFG_GSZ 0x80
+#define REW_PCP_DEI_QOS_MAP_CFG_RSZ 0x4
+
+#define REW_PCP_DEI_QOS_MAP_CFG_DEI_QOS_VAL BIT(3)
+#define REW_PCP_DEI_QOS_MAP_CFG_PCP_QOS_VAL(x) ((x) & GENMASK(2, 0))
+#define REW_PCP_DEI_QOS_MAP_CFG_PCP_QOS_VAL_M GENMASK(2, 0)
+
+#define REW_PTP_CFG_GSZ 0x80
+
+#define REW_PTP_CFG_PTP_BACKPLANE_MODE BIT(7)
+#define REW_PTP_CFG_GP_CFG_UNUSED(x) (((x) << 3) & GENMASK(6, 3))
+#define REW_PTP_CFG_GP_CFG_UNUSED_M GENMASK(6, 3)
+#define REW_PTP_CFG_GP_CFG_UNUSED_X(x) (((x) & GENMASK(6, 3)) >> 3)
+#define REW_PTP_CFG_PTP_1STEP_DIS BIT(2)
+#define REW_PTP_CFG_PTP_2STEP_DIS BIT(1)
+#define REW_PTP_CFG_PTP_UDP_KEEP BIT(0)
+
+#define REW_PTP_DLY1_CFG_GSZ 0x80
+
+#define REW_RED_TAG_CFG_GSZ 0x80
+
+#define REW_RED_TAG_CFG_RED_TAG_CFG BIT(0)
+
+#define REW_DSCP_REMAP_DP1_CFG_RSZ 0x4
+
+#define REW_DSCP_REMAP_CFG_RSZ 0x4
+
+#define REW_REW_STICKY_ES0_TAGB_PUSH_FAILED BIT(0)
+
+#define REW_PPT_RSZ 0x4
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
new file mode 100644
index 000000000..118572590
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -0,0 +1,1357 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#include <linux/iopoll.h>
+#include <linux/proc_fs.h>
+
+#include <soc/mscc/ocelot_vcap.h>
+#include "ocelot_police.h"
+#include "ocelot_vcap.h"
+
+#define ENTRY_WIDTH 32
+
+enum vcap_sel {
+ VCAP_SEL_ENTRY = 0x1,
+ VCAP_SEL_ACTION = 0x2,
+ VCAP_SEL_COUNTER = 0x4,
+ VCAP_SEL_ALL = 0x7,
+};
+
+enum vcap_cmd {
+ VCAP_CMD_WRITE = 0, /* Copy from Cache to TCAM */
+ VCAP_CMD_READ = 1, /* Copy from TCAM to Cache */
+ VCAP_CMD_MOVE_UP = 2, /* Move <count> up */
+ VCAP_CMD_MOVE_DOWN = 3, /* Move <count> down */
+ VCAP_CMD_INITIALIZE = 4, /* Write all (from cache) */
+};
+
+#define VCAP_ENTRY_WIDTH 12 /* Max entry width (32bit words) */
+#define VCAP_COUNTER_WIDTH 4 /* Max counter width (32bit words) */
+
+struct vcap_data {
+ u32 entry[VCAP_ENTRY_WIDTH]; /* ENTRY_DAT */
+ u32 mask[VCAP_ENTRY_WIDTH]; /* MASK_DAT */
+ u32 action[VCAP_ENTRY_WIDTH]; /* ACTION_DAT */
+ u32 counter[VCAP_COUNTER_WIDTH]; /* CNT_DAT */
+ u32 tg; /* TG_DAT */
+ u32 type; /* Action type */
+ u32 tg_sw; /* Current type-group */
+ u32 cnt; /* Current counter */
+ u32 key_offset; /* Current entry offset */
+ u32 action_offset; /* Current action offset */
+ u32 counter_offset; /* Current counter offset */
+ u32 tg_value; /* Current type-group value */
+ u32 tg_mask; /* Current type-group mask */
+};
+
+static u32 vcap_read_update_ctrl(struct ocelot *ocelot,
+ const struct vcap_props *vcap)
+{
+ return ocelot_target_read(ocelot, vcap->target, VCAP_CORE_UPDATE_CTRL);
+}
+
+static void vcap_cmd(struct ocelot *ocelot, const struct vcap_props *vcap,
+ u16 ix, int cmd, int sel)
+{
+ u32 value = (VCAP_CORE_UPDATE_CTRL_UPDATE_CMD(cmd) |
+ VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR(ix) |
+ VCAP_CORE_UPDATE_CTRL_UPDATE_SHOT);
+
+ if ((sel & VCAP_SEL_ENTRY) && ix >= vcap->entry_count)
+ return;
+
+ if (!(sel & VCAP_SEL_ENTRY))
+ value |= VCAP_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS;
+
+ if (!(sel & VCAP_SEL_ACTION))
+ value |= VCAP_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS;
+
+ if (!(sel & VCAP_SEL_COUNTER))
+ value |= VCAP_CORE_UPDATE_CTRL_UPDATE_CNT_DIS;
+
+ ocelot_target_write(ocelot, vcap->target, value, VCAP_CORE_UPDATE_CTRL);
+
+ read_poll_timeout(vcap_read_update_ctrl, value,
+ (value & VCAP_CORE_UPDATE_CTRL_UPDATE_SHOT) == 0,
+ 10, 100000, false, ocelot, vcap);
+}
+
+/* Convert from 0-based row to VCAP entry row and run command */
+static void vcap_row_cmd(struct ocelot *ocelot, const struct vcap_props *vcap,
+ u32 row, int cmd, int sel)
+{
+ vcap_cmd(ocelot, vcap, vcap->entry_count - row - 1, cmd, sel);
+}
+
+static void vcap_entry2cache(struct ocelot *ocelot,
+ const struct vcap_props *vcap,
+ struct vcap_data *data)
+{
+ u32 entry_words, i;
+
+ entry_words = DIV_ROUND_UP(vcap->entry_width, ENTRY_WIDTH);
+
+ for (i = 0; i < entry_words; i++) {
+ ocelot_target_write_rix(ocelot, vcap->target, data->entry[i],
+ VCAP_CACHE_ENTRY_DAT, i);
+ ocelot_target_write_rix(ocelot, vcap->target, ~data->mask[i],
+ VCAP_CACHE_MASK_DAT, i);
+ }
+ ocelot_target_write(ocelot, vcap->target, data->tg, VCAP_CACHE_TG_DAT);
+}
+
+static void vcap_cache2entry(struct ocelot *ocelot,
+ const struct vcap_props *vcap,
+ struct vcap_data *data)
+{
+ u32 entry_words, i;
+
+ entry_words = DIV_ROUND_UP(vcap->entry_width, ENTRY_WIDTH);
+
+ for (i = 0; i < entry_words; i++) {
+ data->entry[i] = ocelot_target_read_rix(ocelot, vcap->target,
+ VCAP_CACHE_ENTRY_DAT, i);
+ // Invert mask
+ data->mask[i] = ~ocelot_target_read_rix(ocelot, vcap->target,
+ VCAP_CACHE_MASK_DAT, i);
+ }
+ data->tg = ocelot_target_read(ocelot, vcap->target, VCAP_CACHE_TG_DAT);
+}
+
+static void vcap_action2cache(struct ocelot *ocelot,
+ const struct vcap_props *vcap,
+ struct vcap_data *data)
+{
+ u32 action_words, mask;
+ int i, width;
+
+ /* Encode action type */
+ width = vcap->action_type_width;
+ if (width) {
+ mask = GENMASK(width, 0);
+ data->action[0] = ((data->action[0] & ~mask) | data->type);
+ }
+
+ action_words = DIV_ROUND_UP(vcap->action_width, ENTRY_WIDTH);
+
+ for (i = 0; i < action_words; i++)
+ ocelot_target_write_rix(ocelot, vcap->target, data->action[i],
+ VCAP_CACHE_ACTION_DAT, i);
+
+ for (i = 0; i < vcap->counter_words; i++)
+ ocelot_target_write_rix(ocelot, vcap->target, data->counter[i],
+ VCAP_CACHE_CNT_DAT, i);
+}
+
+static void vcap_cache2action(struct ocelot *ocelot,
+ const struct vcap_props *vcap,
+ struct vcap_data *data)
+{
+ u32 action_words;
+ int i, width;
+
+ action_words = DIV_ROUND_UP(vcap->action_width, ENTRY_WIDTH);
+
+ for (i = 0; i < action_words; i++)
+ data->action[i] = ocelot_target_read_rix(ocelot, vcap->target,
+ VCAP_CACHE_ACTION_DAT,
+ i);
+
+ for (i = 0; i < vcap->counter_words; i++)
+ data->counter[i] = ocelot_target_read_rix(ocelot, vcap->target,
+ VCAP_CACHE_CNT_DAT,
+ i);
+
+ /* Extract action type */
+ width = vcap->action_type_width;
+ data->type = (width ? (data->action[0] & GENMASK(width, 0)) : 0);
+}
+
+/* Calculate offsets for entry */
+static void vcap_data_offset_get(const struct vcap_props *vcap,
+ struct vcap_data *data, int ix)
+{
+ int num_subwords_per_entry, num_subwords_per_action;
+ int i, col, offset, num_entries_per_row, base;
+ u32 width = vcap->tg_width;
+
+ switch (data->tg_sw) {
+ case VCAP_TG_FULL:
+ num_entries_per_row = 1;
+ break;
+ case VCAP_TG_HALF:
+ num_entries_per_row = 2;
+ break;
+ case VCAP_TG_QUARTER:
+ num_entries_per_row = 4;
+ break;
+ default:
+ return;
+ }
+
+ col = (ix % num_entries_per_row);
+ num_subwords_per_entry = (vcap->sw_count / num_entries_per_row);
+ base = (vcap->sw_count - col * num_subwords_per_entry -
+ num_subwords_per_entry);
+ data->tg_value = 0;
+ data->tg_mask = 0;
+ for (i = 0; i < num_subwords_per_entry; i++) {
+ offset = ((base + i) * width);
+ data->tg_value |= (data->tg_sw << offset);
+ data->tg_mask |= GENMASK(offset + width - 1, offset);
+ }
+
+ /* Calculate key/action/counter offsets */
+ col = (num_entries_per_row - col - 1);
+ data->key_offset = (base * vcap->entry_width) / vcap->sw_count;
+ data->counter_offset = (num_subwords_per_entry * col *
+ vcap->counter_width);
+ i = data->type;
+ width = vcap->action_table[i].width;
+ num_subwords_per_action = vcap->action_table[i].count;
+ data->action_offset = ((num_subwords_per_action * col * width) /
+ num_entries_per_row);
+ data->action_offset += vcap->action_type_width;
+}
+
+static void vcap_data_set(u32 *data, u32 offset, u32 len, u32 value)
+{
+ u32 i, v, m;
+
+ for (i = 0; i < len; i++, offset++) {
+ v = data[offset / ENTRY_WIDTH];
+ m = (1 << (offset % ENTRY_WIDTH));
+ if (value & (1 << i))
+ v |= m;
+ else
+ v &= ~m;
+ data[offset / ENTRY_WIDTH] = v;
+ }
+}
+
+static u32 vcap_data_get(u32 *data, u32 offset, u32 len)
+{
+ u32 i, v, m, value = 0;
+
+ for (i = 0; i < len; i++, offset++) {
+ v = data[offset / ENTRY_WIDTH];
+ m = (1 << (offset % ENTRY_WIDTH));
+ if (v & m)
+ value |= (1 << i);
+ }
+ return value;
+}
+
+static void vcap_key_field_set(struct vcap_data *data, u32 offset, u32 width,
+ u32 value, u32 mask)
+{
+ vcap_data_set(data->entry, offset + data->key_offset, width, value);
+ vcap_data_set(data->mask, offset + data->key_offset, width, mask);
+}
+
+static void vcap_key_set(const struct vcap_props *vcap, struct vcap_data *data,
+ int field, u32 value, u32 mask)
+{
+ u32 offset = vcap->keys[field].offset;
+ u32 length = vcap->keys[field].length;
+
+ vcap_key_field_set(data, offset, length, value, mask);
+}
+
+static void vcap_key_bytes_set(const struct vcap_props *vcap,
+ struct vcap_data *data, int field,
+ u8 *val, u8 *msk)
+{
+ u32 offset = vcap->keys[field].offset;
+ u32 count = vcap->keys[field].length;
+ u32 i, j, n = 0, value = 0, mask = 0;
+
+ WARN_ON(count % 8);
+
+ /* Data wider than 32 bits are split up in chunks of maximum 32 bits.
+ * The 32 LSB of the data are written to the 32 MSB of the TCAM.
+ */
+ offset += count;
+ count /= 8;
+
+ for (i = 0; i < count; i++) {
+ j = (count - i - 1);
+ value += (val[j] << n);
+ mask += (msk[j] << n);
+ n += 8;
+ if (n == ENTRY_WIDTH || (i + 1) == count) {
+ offset -= n;
+ vcap_key_field_set(data, offset, n, value, mask);
+ n = 0;
+ value = 0;
+ mask = 0;
+ }
+ }
+}
+
+static void vcap_key_l4_port_set(const struct vcap_props *vcap,
+ struct vcap_data *data, int field,
+ struct ocelot_vcap_udp_tcp *port)
+{
+ u32 offset = vcap->keys[field].offset;
+ u32 length = vcap->keys[field].length;
+
+ WARN_ON(length != 16);
+
+ vcap_key_field_set(data, offset, length, port->value, port->mask);
+}
+
+static void vcap_key_bit_set(const struct vcap_props *vcap,
+ struct vcap_data *data, int field,
+ enum ocelot_vcap_bit val)
+{
+ u32 value = (val == OCELOT_VCAP_BIT_1 ? 1 : 0);
+ u32 msk = (val == OCELOT_VCAP_BIT_ANY ? 0 : 1);
+ u32 offset = vcap->keys[field].offset;
+ u32 length = vcap->keys[field].length;
+
+ WARN_ON(length != 1);
+
+ vcap_key_field_set(data, offset, length, value, msk);
+}
+
+static void vcap_action_set(const struct vcap_props *vcap,
+ struct vcap_data *data, int field, u32 value)
+{
+ int offset = vcap->actions[field].offset;
+ int length = vcap->actions[field].length;
+
+ vcap_data_set(data->action, offset + data->action_offset, length,
+ value);
+}
+
+static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
+ struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS2];
+ struct ocelot_vcap_action *a = &filter->action;
+
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_MASK_MODE, a->mask_mode);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_PORT_MASK, a->port_mask);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_ENA, a->police_ena);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_IDX, a->pol_ix);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_QU_NUM, a->cpu_qu_num);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_COPY_ENA, a->cpu_copy_ena);
+}
+
+static void is2_entry_set(struct ocelot *ocelot, int ix,
+ struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS2];
+ struct ocelot_vcap_key_vlan *tag = &filter->vlan;
+ u32 val, msk, type, type_mask = 0xf, i, count;
+ struct ocelot_vcap_u64 payload;
+ struct vcap_data data;
+ int row = (ix / 2);
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&data, 0, sizeof(data));
+
+ /* Read row */
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL);
+ vcap_cache2entry(ocelot, vcap, &data);
+ vcap_cache2action(ocelot, vcap, &data);
+
+ data.tg_sw = VCAP_TG_HALF;
+ vcap_data_offset_get(vcap, &data, ix);
+ data.tg = (data.tg & ~data.tg_mask);
+ if (filter->prio != 0)
+ data.tg |= data.tg_value;
+
+ data.type = IS2_ACTION_TYPE_NORMAL;
+
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_PAG, filter->pag, 0xff);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST,
+ (filter->lookup == 0) ? OCELOT_VCAP_BIT_1 :
+ OCELOT_VCAP_BIT_0);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
+ ~filter->ingress_port_mask);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH,
+ OCELOT_VCAP_BIT_ANY);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_BC, filter->dmac_bc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_VID,
+ tag->vid.value, tag->vid.mask);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_PCP,
+ tag->pcp.value[0], tag->pcp.mask[0]);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_DEI, tag->dei);
+
+ switch (filter->key_type) {
+ case OCELOT_VCAP_KEY_ETYPE: {
+ struct ocelot_vcap_key_etype *etype = &filter->key.etype;
+
+ type = IS2_TYPE_ETYPE;
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC,
+ etype->dmac.value, etype->dmac.mask);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC,
+ etype->smac.value, etype->smac.mask);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_ETYPE,
+ etype->etype.value, etype->etype.mask);
+ /* Clear unused bits */
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
+ 0, 0);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1,
+ 0, 0);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2,
+ 0, 0);
+ vcap_key_bytes_set(vcap, &data,
+ VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
+ etype->data.value, etype->data.mask);
+ break;
+ }
+ case OCELOT_VCAP_KEY_LLC: {
+ struct ocelot_vcap_key_llc *llc = &filter->key.llc;
+
+ type = IS2_TYPE_LLC;
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC,
+ llc->dmac.value, llc->dmac.mask);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC,
+ llc->smac.value, llc->smac.mask);
+ for (i = 0; i < 4; i++) {
+ payload.value[i] = llc->llc.value[i];
+ payload.mask[i] = llc->llc.mask[i];
+ }
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_LLC_L2_LLC,
+ payload.value, payload.mask);
+ break;
+ }
+ case OCELOT_VCAP_KEY_SNAP: {
+ struct ocelot_vcap_key_snap *snap = &filter->key.snap;
+
+ type = IS2_TYPE_SNAP;
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC,
+ snap->dmac.value, snap->dmac.mask);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC,
+ snap->smac.value, snap->smac.mask);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP,
+ filter->key.snap.snap.value,
+ filter->key.snap.snap.mask);
+ break;
+ }
+ case OCELOT_VCAP_KEY_ARP: {
+ struct ocelot_vcap_key_arp *arp = &filter->key.arp;
+
+ type = IS2_TYPE_ARP;
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_SMAC,
+ arp->smac.value, arp->smac.mask);
+ vcap_key_bit_set(vcap, &data,
+ VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK,
+ arp->ethernet);
+ vcap_key_bit_set(vcap, &data,
+ VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK,
+ arp->ip);
+ vcap_key_bit_set(vcap, &data,
+ VCAP_IS2_HK_MAC_ARP_LEN_OK,
+ arp->length);
+ vcap_key_bit_set(vcap, &data,
+ VCAP_IS2_HK_MAC_ARP_TARGET_MATCH,
+ arp->dmac_match);
+ vcap_key_bit_set(vcap, &data,
+ VCAP_IS2_HK_MAC_ARP_SENDER_MATCH,
+ arp->smac_match);
+ vcap_key_bit_set(vcap, &data,
+ VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN,
+ arp->unknown);
+
+ /* OPCODE is inverse, bit 0 is reply flag, bit 1 is RARP flag */
+ val = ((arp->req == OCELOT_VCAP_BIT_0 ? 1 : 0) |
+ (arp->arp == OCELOT_VCAP_BIT_0 ? 2 : 0));
+ msk = ((arp->req == OCELOT_VCAP_BIT_ANY ? 0 : 1) |
+ (arp->arp == OCELOT_VCAP_BIT_ANY ? 0 : 2));
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_OPCODE,
+ val, msk);
+ vcap_key_bytes_set(vcap, &data,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP,
+ arp->dip.value.addr, arp->dip.mask.addr);
+ vcap_key_bytes_set(vcap, &data,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP,
+ arp->sip.value.addr, arp->sip.mask.addr);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP,
+ 0, 0);
+ break;
+ }
+ case OCELOT_VCAP_KEY_IPV4:
+ case OCELOT_VCAP_KEY_IPV6: {
+ enum ocelot_vcap_bit sip_eq_dip, sport_eq_dport, seq_zero, tcp;
+ enum ocelot_vcap_bit ttl, fragment, options, tcp_ack, tcp_urg;
+ enum ocelot_vcap_bit tcp_fin, tcp_syn, tcp_rst, tcp_psh;
+ struct ocelot_vcap_key_ipv4 *ipv4 = NULL;
+ struct ocelot_vcap_key_ipv6 *ipv6 = NULL;
+ struct ocelot_vcap_udp_tcp *sport, *dport;
+ struct ocelot_vcap_ipv4 sip, dip;
+ struct ocelot_vcap_u8 proto, ds;
+ struct ocelot_vcap_u48 *ip_data;
+
+ if (filter->key_type == OCELOT_VCAP_KEY_IPV4) {
+ ipv4 = &filter->key.ipv4;
+ ttl = ipv4->ttl;
+ fragment = ipv4->fragment;
+ options = ipv4->options;
+ proto = ipv4->proto;
+ ds = ipv4->ds;
+ ip_data = &ipv4->data;
+ sip = ipv4->sip;
+ dip = ipv4->dip;
+ sport = &ipv4->sport;
+ dport = &ipv4->dport;
+ tcp_fin = ipv4->tcp_fin;
+ tcp_syn = ipv4->tcp_syn;
+ tcp_rst = ipv4->tcp_rst;
+ tcp_psh = ipv4->tcp_psh;
+ tcp_ack = ipv4->tcp_ack;
+ tcp_urg = ipv4->tcp_urg;
+ sip_eq_dip = ipv4->sip_eq_dip;
+ sport_eq_dport = ipv4->sport_eq_dport;
+ seq_zero = ipv4->seq_zero;
+ } else {
+ ipv6 = &filter->key.ipv6;
+ ttl = ipv6->ttl;
+ fragment = OCELOT_VCAP_BIT_ANY;
+ options = OCELOT_VCAP_BIT_ANY;
+ proto = ipv6->proto;
+ ds = ipv6->ds;
+ ip_data = &ipv6->data;
+ for (i = 0; i < 8; i++) {
+ val = ipv6->sip.value[i + 8];
+ msk = ipv6->sip.mask[i + 8];
+ if (i < 4) {
+ dip.value.addr[i] = val;
+ dip.mask.addr[i] = msk;
+ } else {
+ sip.value.addr[i - 4] = val;
+ sip.mask.addr[i - 4] = msk;
+ }
+ }
+ sport = &ipv6->sport;
+ dport = &ipv6->dport;
+ tcp_fin = ipv6->tcp_fin;
+ tcp_syn = ipv6->tcp_syn;
+ tcp_rst = ipv6->tcp_rst;
+ tcp_psh = ipv6->tcp_psh;
+ tcp_ack = ipv6->tcp_ack;
+ tcp_urg = ipv6->tcp_urg;
+ sip_eq_dip = ipv6->sip_eq_dip;
+ sport_eq_dport = ipv6->sport_eq_dport;
+ seq_zero = ipv6->seq_zero;
+ }
+
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_IP4,
+ ipv4 ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L3_FRAGMENT,
+ fragment);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_L3_FRAG_OFS_GT0, 0, 0);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L3_OPTIONS,
+ options);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_IP4_L3_TTL_GT0,
+ ttl);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_TOS,
+ ds.value, ds.mask);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_IP4_DIP,
+ dip.value.addr, dip.mask.addr);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_IP4_SIP,
+ sip.value.addr, sip.mask.addr);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_DIP_EQ_SIP,
+ sip_eq_dip);
+ val = proto.value[0];
+ msk = proto.mask[0];
+ type = IS2_TYPE_IP_UDP_TCP;
+ if (msk == 0xff && (val == 6 || val == 17)) {
+ /* UDP/TCP protocol match */
+ tcp = (val == 6 ?
+ OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_TCP, tcp);
+ vcap_key_l4_port_set(vcap, &data,
+ VCAP_IS2_HK_L4_DPORT, dport);
+ vcap_key_l4_port_set(vcap, &data,
+ VCAP_IS2_HK_L4_SPORT, sport);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_RNG, 0, 0);
+ vcap_key_bit_set(vcap, &data,
+ VCAP_IS2_HK_L4_SPORT_EQ_DPORT,
+ sport_eq_dport);
+ vcap_key_bit_set(vcap, &data,
+ VCAP_IS2_HK_L4_SEQUENCE_EQ0,
+ seq_zero);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_FIN,
+ tcp_fin);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_SYN,
+ tcp_syn);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_RST,
+ tcp_rst);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_PSH,
+ tcp_psh);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_ACK,
+ tcp_ack);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_URG,
+ tcp_urg);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_1588_DOM,
+ 0, 0);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_1588_VER,
+ 0, 0);
+ } else {
+ if (msk == 0) {
+ /* Any IP protocol match */
+ type_mask = IS2_TYPE_MASK_IP_ANY;
+ } else {
+ /* Non-UDP/TCP protocol match */
+ type = IS2_TYPE_IP_OTHER;
+ for (i = 0; i < 6; i++) {
+ payload.value[i] = ip_data->value[i];
+ payload.mask[i] = ip_data->mask[i];
+ }
+ }
+ vcap_key_bytes_set(vcap, &data,
+ VCAP_IS2_HK_IP4_L3_PROTO,
+ proto.value, proto.mask);
+ vcap_key_bytes_set(vcap, &data,
+ VCAP_IS2_HK_L3_PAYLOAD,
+ payload.value, payload.mask);
+ }
+ break;
+ }
+ case OCELOT_VCAP_KEY_ANY:
+ default:
+ type = 0;
+ type_mask = 0;
+ count = vcap->entry_width / 2;
+ /* Iterate over the non-common part of the key and
+ * clear entry data
+ */
+ for (i = vcap->keys[VCAP_IS2_HK_L2_DMAC].offset;
+ i < count; i += ENTRY_WIDTH) {
+ vcap_key_field_set(&data, i, min(32u, count - i), 0, 0);
+ }
+ break;
+ }
+
+ vcap_key_set(vcap, &data, VCAP_IS2_TYPE, type, type_mask);
+ is2_action_set(ocelot, &data, filter);
+ vcap_data_set(data.counter, data.counter_offset,
+ vcap->counter_width, filter->stats.pkts);
+
+ /* Write row */
+ vcap_entry2cache(ocelot, vcap, &data);
+ vcap_action2cache(ocelot, vcap, &data);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
+}
+
+static void is1_action_set(struct ocelot *ocelot, struct vcap_data *data,
+ const struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1];
+ const struct ocelot_vcap_action *a = &filter->action;
+
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_VID_REPLACE_ENA,
+ a->vid_replace_ena);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_VID_ADD_VAL, a->vid);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_VLAN_POP_CNT_ENA,
+ a->vlan_pop_cnt_ena);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_VLAN_POP_CNT,
+ a->vlan_pop_cnt);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_PCP_DEI_ENA, a->pcp_dei_ena);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_PCP_VAL, a->pcp);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_DEI_VAL, a->dei);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_QOS_ENA, a->qos_ena);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_QOS_VAL, a->qos_val);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_PAG_OVERRIDE_MASK,
+ a->pag_override_mask);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_PAG_VAL, a->pag_val);
+}
+
+static void is1_entry_set(struct ocelot *ocelot, int ix,
+ struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1];
+ struct ocelot_vcap_key_vlan *tag = &filter->vlan;
+ struct ocelot_vcap_u64 payload;
+ struct vcap_data data;
+ int row = ix / 2;
+ u32 type;
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&data, 0, sizeof(data));
+
+ /* Read row */
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL);
+ vcap_cache2entry(ocelot, vcap, &data);
+ vcap_cache2action(ocelot, vcap, &data);
+
+ data.tg_sw = VCAP_TG_HALF;
+ data.type = IS1_ACTION_TYPE_NORMAL;
+ vcap_data_offset_get(vcap, &data, ix);
+ data.tg = (data.tg & ~data.tg_mask);
+ if (filter->prio != 0)
+ data.tg |= data.tg_value;
+
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_LOOKUP, filter->lookup, 0x3);
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_IGR_PORT_MASK, 0,
+ ~filter->ingress_port_mask);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged);
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_VID,
+ tag->vid.value, tag->vid.mask);
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP,
+ tag->pcp.value[0], tag->pcp.mask[0]);
+ type = IS1_TYPE_S1_NORMAL;
+
+ switch (filter->key_type) {
+ case OCELOT_VCAP_KEY_ETYPE: {
+ struct ocelot_vcap_key_etype *etype = &filter->key.etype;
+
+ vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_L2_SMAC,
+ etype->smac.value, etype->smac.mask);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_ETYPE,
+ etype->etype.value, etype->etype.mask);
+ break;
+ }
+ case OCELOT_VCAP_KEY_IPV4: {
+ struct ocelot_vcap_key_ipv4 *ipv4 = &filter->key.ipv4;
+ struct ocelot_vcap_udp_tcp *sport = &ipv4->sport;
+ struct ocelot_vcap_udp_tcp *dport = &ipv4->dport;
+ enum ocelot_vcap_bit tcp_udp = OCELOT_VCAP_BIT_0;
+ struct ocelot_vcap_u8 proto = ipv4->proto;
+ struct ocelot_vcap_ipv4 sip = ipv4->sip;
+ u32 val, msk;
+
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_IP_SNAP,
+ OCELOT_VCAP_BIT_1);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_IP4,
+ OCELOT_VCAP_BIT_1);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_ETYPE_LEN,
+ OCELOT_VCAP_BIT_1);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_L3_IP4_SIP,
+ sip.value.addr, sip.mask.addr);
+
+ val = proto.value[0];
+ msk = proto.mask[0];
+
+ if ((val == NEXTHDR_TCP || val == NEXTHDR_UDP) && msk == 0xff)
+ tcp_udp = OCELOT_VCAP_BIT_1;
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TCP_UDP, tcp_udp);
+
+ if (tcp_udp) {
+ enum ocelot_vcap_bit tcp = OCELOT_VCAP_BIT_0;
+
+ if (val == NEXTHDR_TCP)
+ tcp = OCELOT_VCAP_BIT_1;
+
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TCP, tcp);
+ vcap_key_l4_port_set(vcap, &data, VCAP_IS1_HK_L4_SPORT,
+ sport);
+ /* Overloaded field */
+ vcap_key_l4_port_set(vcap, &data, VCAP_IS1_HK_ETYPE,
+ dport);
+ } else {
+ /* IPv4 "other" frame */
+ struct ocelot_vcap_u16 etype = {0};
+
+ /* Overloaded field */
+ etype.value[0] = proto.value[0];
+ etype.mask[0] = proto.mask[0];
+
+ vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_ETYPE,
+ etype.value, etype.mask);
+ }
+ }
+ default:
+ break;
+ }
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TYPE,
+ type ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
+
+ is1_action_set(ocelot, &data, filter);
+ vcap_data_set(data.counter, data.counter_offset,
+ vcap->counter_width, filter->stats.pkts);
+
+ /* Write row */
+ vcap_entry2cache(ocelot, vcap, &data);
+ vcap_action2cache(ocelot, vcap, &data);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
+}
+
+static void es0_action_set(struct ocelot *ocelot, struct vcap_data *data,
+ const struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
+ const struct ocelot_vcap_action *a = &filter->action;
+
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_PUSH_OUTER_TAG,
+ a->push_outer_tag);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_PUSH_INNER_TAG,
+ a->push_inner_tag);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_TPID_SEL,
+ a->tag_a_tpid_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_VID_SEL,
+ a->tag_a_vid_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_PCP_SEL,
+ a->tag_a_pcp_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_VID_A_VAL, a->vid_a_val);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_PCP_A_VAL, a->pcp_a_val);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_TPID_SEL,
+ a->tag_b_tpid_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_VID_SEL,
+ a->tag_b_vid_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_PCP_SEL,
+ a->tag_b_pcp_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_VID_B_VAL, a->vid_b_val);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_PCP_B_VAL, a->pcp_b_val);
+}
+
+static void es0_entry_set(struct ocelot *ocelot, int ix,
+ struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
+ struct ocelot_vcap_key_vlan *tag = &filter->vlan;
+ struct ocelot_vcap_u64 payload;
+ struct vcap_data data;
+ int row = ix;
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&data, 0, sizeof(data));
+
+ /* Read row */
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL);
+ vcap_cache2entry(ocelot, vcap, &data);
+ vcap_cache2action(ocelot, vcap, &data);
+
+ data.tg_sw = VCAP_TG_FULL;
+ data.type = ES0_ACTION_TYPE_NORMAL;
+ vcap_data_offset_get(vcap, &data, ix);
+ data.tg = (data.tg & ~data.tg_mask);
+ if (filter->prio != 0)
+ data.tg |= data.tg_value;
+
+ vcap_key_set(vcap, &data, VCAP_ES0_IGR_PORT, filter->ingress_port.value,
+ filter->ingress_port.mask);
+ vcap_key_set(vcap, &data, VCAP_ES0_EGR_PORT, filter->egress_port.value,
+ filter->egress_port.mask);
+ vcap_key_bit_set(vcap, &data, VCAP_ES0_L2_MC, filter->dmac_mc);
+ vcap_key_bit_set(vcap, &data, VCAP_ES0_L2_BC, filter->dmac_bc);
+ vcap_key_set(vcap, &data, VCAP_ES0_VID,
+ tag->vid.value, tag->vid.mask);
+ vcap_key_set(vcap, &data, VCAP_ES0_PCP,
+ tag->pcp.value[0], tag->pcp.mask[0]);
+
+ es0_action_set(ocelot, &data, filter);
+ vcap_data_set(data.counter, data.counter_offset,
+ vcap->counter_width, filter->stats.pkts);
+
+ /* Write row */
+ vcap_entry2cache(ocelot, vcap, &data);
+ vcap_action2cache(ocelot, vcap, &data);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
+}
+
+static void vcap_entry_get(struct ocelot *ocelot, int ix,
+ struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[filter->block_id];
+ struct vcap_data data;
+ int row, count;
+ u32 cnt;
+
+ if (filter->block_id == VCAP_ES0)
+ data.tg_sw = VCAP_TG_FULL;
+ else
+ data.tg_sw = VCAP_TG_HALF;
+
+ count = (1 << (data.tg_sw - 1));
+ row = (ix / count);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
+ vcap_cache2action(ocelot, vcap, &data);
+ vcap_data_offset_get(vcap, &data, ix);
+ cnt = vcap_data_get(data.counter, data.counter_offset,
+ vcap->counter_width);
+
+ filter->stats.pkts = cnt;
+}
+
+static void vcap_entry_set(struct ocelot *ocelot, int ix,
+ struct ocelot_vcap_filter *filter)
+{
+ if (filter->block_id == VCAP_IS1)
+ return is1_entry_set(ocelot, ix, filter);
+ if (filter->block_id == VCAP_IS2)
+ return is2_entry_set(ocelot, ix, filter);
+ if (filter->block_id == VCAP_ES0)
+ return es0_entry_set(ocelot, ix, filter);
+}
+
+static int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
+ struct ocelot_policer *pol)
+{
+ struct qos_policer_conf pp = { 0 };
+
+ if (!pol)
+ return -EINVAL;
+
+ pp.mode = MSCC_QOS_RATE_MODE_DATA;
+ pp.pir = pol->rate;
+ pp.pbs = pol->burst;
+
+ return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+}
+
+static void ocelot_vcap_policer_del(struct ocelot *ocelot,
+ struct ocelot_vcap_block *block,
+ u32 pol_ix)
+{
+ struct ocelot_vcap_filter *filter;
+ struct qos_policer_conf pp = {0};
+ int index = -1;
+
+ if (pol_ix < block->pol_lpr)
+ return;
+
+ list_for_each_entry(filter, &block->rules, list) {
+ index++;
+ if (filter->block_id == VCAP_IS2 &&
+ filter->action.police_ena &&
+ filter->action.pol_ix < pol_ix) {
+ filter->action.pol_ix += 1;
+ ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
+ &filter->action.pol);
+ is2_entry_set(ocelot, index, filter);
+ }
+ }
+
+ pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
+ qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+
+ block->pol_lpr++;
+}
+
+static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
+ struct ocelot_vcap_block *block,
+ struct ocelot_vcap_filter *filter)
+{
+ struct ocelot_vcap_filter *tmp;
+ struct list_head *pos, *n;
+
+ if (filter->block_id == VCAP_IS2 && filter->action.police_ena) {
+ block->pol_lpr--;
+ filter->action.pol_ix = block->pol_lpr;
+ ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
+ &filter->action.pol);
+ }
+
+ block->count++;
+
+ if (list_empty(&block->rules)) {
+ list_add(&filter->list, &block->rules);
+ return;
+ }
+
+ list_for_each_safe(pos, n, &block->rules) {
+ tmp = list_entry(pos, struct ocelot_vcap_filter, list);
+ if (filter->prio < tmp->prio)
+ break;
+ }
+ list_add(&filter->list, pos->prev);
+}
+
+static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block,
+ struct ocelot_vcap_filter *filter)
+{
+ struct ocelot_vcap_filter *tmp;
+ int index = 0;
+
+ list_for_each_entry(tmp, &block->rules, list) {
+ if (filter->id == tmp->id)
+ return index;
+ index++;
+ }
+
+ return -ENOENT;
+}
+
+static struct ocelot_vcap_filter*
+ocelot_vcap_block_find_filter_by_index(struct ocelot_vcap_block *block,
+ int index)
+{
+ struct ocelot_vcap_filter *tmp;
+ int i = 0;
+
+ list_for_each_entry(tmp, &block->rules, list) {
+ if (i == index)
+ return tmp;
+ ++i;
+ }
+
+ return NULL;
+}
+
+struct ocelot_vcap_filter *
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id)
+{
+ struct ocelot_vcap_filter *filter;
+
+ list_for_each_entry(filter, &block->rules, list)
+ if (filter->id == id)
+ return filter;
+
+ return NULL;
+}
+
+/* If @on=false, then SNAP, ARP, IP and OAM frames will not match on keys based
+ * on destination and source MAC addresses, but only on higher-level protocol
+ * information. The only frame types to match on keys containing MAC addresses
+ * in this case are non-SNAP, non-ARP, non-IP and non-OAM frames.
+ *
+ * If @on=true, then the above frame types (SNAP, ARP, IP and OAM) will match
+ * on MAC_ETYPE keys such as destination and source MAC on this ingress port.
+ * However the setting has the side effect of making these frames not matching
+ * on any _other_ keys than MAC_ETYPE ones.
+ */
+static void ocelot_match_all_as_mac_etype(struct ocelot *ocelot, int port,
+ int lookup, bool on)
+{
+ u32 val = 0;
+
+ if (on)
+ val = ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(BIT(lookup));
+
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(BIT(lookup)),
+ ANA_PORT_VCAP_S2_CFG, port);
+}
+
+static bool
+ocelot_vcap_is_problematic_mac_etype(struct ocelot_vcap_filter *filter)
+{
+ u16 proto, mask;
+
+ if (filter->key_type != OCELOT_VCAP_KEY_ETYPE)
+ return false;
+
+ proto = ntohs(*(__be16 *)filter->key.etype.etype.value);
+ mask = ntohs(*(__be16 *)filter->key.etype.etype.mask);
+
+ /* ETH_P_ALL match, so all protocols below are included */
+ if (mask == 0)
+ return true;
+ if (proto == ETH_P_ARP)
+ return true;
+ if (proto == ETH_P_IP)
+ return true;
+ if (proto == ETH_P_IPV6)
+ return true;
+
+ return false;
+}
+
+static bool
+ocelot_vcap_is_problematic_non_mac_etype(struct ocelot_vcap_filter *filter)
+{
+ if (filter->key_type == OCELOT_VCAP_KEY_SNAP)
+ return true;
+ if (filter->key_type == OCELOT_VCAP_KEY_ARP)
+ return true;
+ if (filter->key_type == OCELOT_VCAP_KEY_IPV4)
+ return true;
+ if (filter->key_type == OCELOT_VCAP_KEY_IPV6)
+ return true;
+ return false;
+}
+
+static bool
+ocelot_exclusive_mac_etype_filter_rules(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
+ struct ocelot_vcap_filter *tmp;
+ unsigned long port;
+ int i;
+
+ /* We only have the S2_IP_TCPUDP_DIS set of knobs for VCAP IS2 */
+ if (filter->block_id != VCAP_IS2)
+ return true;
+
+ if (ocelot_vcap_is_problematic_mac_etype(filter)) {
+ /* Search for any non-MAC_ETYPE rules on the port */
+ for (i = 0; i < block->count; i++) {
+ tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ if (tmp->ingress_port_mask & filter->ingress_port_mask &&
+ tmp->lookup == filter->lookup &&
+ ocelot_vcap_is_problematic_non_mac_etype(tmp))
+ return false;
+ }
+
+ for_each_set_bit(port, &filter->ingress_port_mask,
+ ocelot->num_phys_ports)
+ ocelot_match_all_as_mac_etype(ocelot, port,
+ filter->lookup, true);
+ } else if (ocelot_vcap_is_problematic_non_mac_etype(filter)) {
+ /* Search for any MAC_ETYPE rules on the port */
+ for (i = 0; i < block->count; i++) {
+ tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ if (tmp->ingress_port_mask & filter->ingress_port_mask &&
+ tmp->lookup == filter->lookup &&
+ ocelot_vcap_is_problematic_mac_etype(tmp))
+ return false;
+ }
+
+ for_each_set_bit(port, &filter->ingress_port_mask,
+ ocelot->num_phys_ports)
+ ocelot_match_all_as_mac_etype(ocelot, port,
+ filter->lookup, false);
+ }
+
+ return true;
+}
+
+int ocelot_vcap_filter_add(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
+ int i, index;
+
+ if (!ocelot_exclusive_mac_etype_filter_rules(ocelot, filter)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot mix MAC_ETYPE with non-MAC_ETYPE rules, use the other IS2 lookup");
+ return -EBUSY;
+ }
+
+ /* Add filter to the linked list */
+ ocelot_vcap_filter_add_to_block(ocelot, block, filter);
+
+ /* Get the index of the inserted filter */
+ index = ocelot_vcap_block_get_filter_index(block, filter);
+ if (index < 0)
+ return index;
+
+ /* Move down the rules to make place for the new filter */
+ for (i = block->count - 1; i > index; i--) {
+ struct ocelot_vcap_filter *tmp;
+
+ tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ /* Read back the filter's counters before moving it */
+ vcap_entry_get(ocelot, i - 1, tmp);
+ vcap_entry_set(ocelot, i, tmp);
+ }
+
+ /* Now insert the new filter */
+ vcap_entry_set(ocelot, index, filter);
+ return 0;
+}
+
+static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
+ struct ocelot_vcap_block *block,
+ struct ocelot_vcap_filter *filter)
+{
+ struct ocelot_vcap_filter *tmp;
+ struct list_head *pos, *q;
+
+ list_for_each_safe(pos, q, &block->rules) {
+ tmp = list_entry(pos, struct ocelot_vcap_filter, list);
+ if (tmp->id == filter->id) {
+ if (tmp->block_id == VCAP_IS2 &&
+ tmp->action.police_ena)
+ ocelot_vcap_policer_del(ocelot, block,
+ tmp->action.pol_ix);
+
+ list_del(pos);
+ kfree(tmp);
+ }
+ }
+
+ block->count--;
+}
+
+int ocelot_vcap_filter_del(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
+ struct ocelot_vcap_filter del_filter;
+ int i, index;
+
+ /* Need to inherit the block_id so that vcap_entry_set()
+ * does not get confused and knows where to install it.
+ */
+ memset(&del_filter, 0, sizeof(del_filter));
+ del_filter.block_id = filter->block_id;
+
+ /* Gets index of the filter */
+ index = ocelot_vcap_block_get_filter_index(block, filter);
+ if (index < 0)
+ return index;
+
+ /* Delete filter */
+ ocelot_vcap_block_remove_filter(ocelot, block, filter);
+
+ /* Move up all the blocks over the deleted filter */
+ for (i = index; i < block->count; i++) {
+ struct ocelot_vcap_filter *tmp;
+
+ tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ /* Read back the filter's counters before moving it */
+ vcap_entry_get(ocelot, i + 1, tmp);
+ vcap_entry_set(ocelot, i, tmp);
+ }
+
+ /* Now delete the last filter, because it is duplicated */
+ vcap_entry_set(ocelot, block->count, &del_filter);
+
+ return 0;
+}
+
+int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
+ struct ocelot_vcap_filter tmp;
+ int index;
+
+ index = ocelot_vcap_block_get_filter_index(block, filter);
+ if (index < 0)
+ return index;
+
+ vcap_entry_get(ocelot, index, filter);
+
+ /* After we get the result we need to clear the counters */
+ tmp = *filter;
+ tmp.stats.pkts = 0;
+ vcap_entry_set(ocelot, index, &tmp);
+
+ return 0;
+}
+
+static void ocelot_vcap_init_one(struct ocelot *ocelot,
+ const struct vcap_props *vcap)
+{
+ struct vcap_data data;
+
+ memset(&data, 0, sizeof(data));
+
+ vcap_entry2cache(ocelot, vcap, &data);
+ ocelot_target_write(ocelot, vcap->target, vcap->entry_count,
+ VCAP_CORE_MV_CFG);
+ vcap_cmd(ocelot, vcap, 0, VCAP_CMD_INITIALIZE, VCAP_SEL_ENTRY);
+
+ vcap_action2cache(ocelot, vcap, &data);
+ ocelot_target_write(ocelot, vcap->target, vcap->action_count,
+ VCAP_CORE_MV_CFG);
+ vcap_cmd(ocelot, vcap, 0, VCAP_CMD_INITIALIZE,
+ VCAP_SEL_ACTION | VCAP_SEL_COUNTER);
+}
+
+static void ocelot_vcap_detect_constants(struct ocelot *ocelot,
+ struct vcap_props *vcap)
+{
+ int counter_memory_width;
+ int num_default_actions;
+ int version;
+
+ version = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_VCAP_VER);
+ /* Only version 0 VCAP supported for now */
+ if (WARN_ON(version != 0))
+ return;
+
+ /* Width in bits of type-group field */
+ vcap->tg_width = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ENTRY_TG_WIDTH);
+ /* Number of subwords per TCAM row */
+ vcap->sw_count = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ENTRY_SWCNT);
+ /* Number of rows in TCAM. There can be this many full keys, or double
+ * this number half keys, or 4 times this number quarter keys.
+ */
+ vcap->entry_count = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ENTRY_CNT);
+ /* Assuming there are 4 subwords per TCAM row, their layout in the
+ * actual TCAM (not in the cache) would be:
+ *
+ * | SW 3 | TG 3 | SW 2 | TG 2 | SW 1 | TG 1 | SW 0 | TG 0 |
+ *
+ * (where SW=subword and TG=Type-Group).
+ *
+ * What VCAP_CONST_ENTRY_CNT is giving us is the width of one full TCAM
+ * row. But when software accesses the TCAM through the cache
+ * registers, the Type-Group values are written through another set of
+ * registers VCAP_TG_DAT, and therefore, it appears as though the 4
+ * subwords are contiguous in the cache memory.
+ * Important mention: regardless of the number of key entries per row
+ * (and therefore of key size: 1 full key or 2 half keys or 4 quarter
+ * keys), software always has to configure 4 Type-Group values. For
+ * example, in the case of 1 full key, the driver needs to set all 4
+ * Type-Group to be full key.
+ *
+ * For this reason, we need to fix up the value that the hardware is
+ * giving us. We don't actually care about the width of the entry in
+ * the TCAM. What we care about is the width of the entry in the cache
+ * registers, which is how we get to interact with it. And since the
+ * VCAP_ENTRY_DAT cache registers access only the subwords and not the
+ * Type-Groups, this means we need to subtract the width of the
+ * Type-Groups when packing and unpacking key entry data in a TCAM row.
+ */
+ vcap->entry_width = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ENTRY_WIDTH);
+ vcap->entry_width -= vcap->tg_width * vcap->sw_count;
+ num_default_actions = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ACTION_DEF_CNT);
+ vcap->action_count = vcap->entry_count + num_default_actions;
+ vcap->action_width = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ACTION_WIDTH);
+ /* The width of the counter memory, this is the complete width of all
+ * counter-fields associated with one full-word entry. There is one
+ * counter per entry sub-word (see CAP_CORE::ENTRY_SWCNT for number of
+ * subwords.)
+ */
+ vcap->counter_words = vcap->sw_count;
+ counter_memory_width = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_CNT_WIDTH);
+ vcap->counter_width = counter_memory_width / vcap->counter_words;
+}
+
+int ocelot_vcap_init(struct ocelot *ocelot)
+{
+ int i;
+
+ /* Create a policer that will drop the frames for the cpu.
+ * This policer will be used as action in the acl rules to drop
+ * frames.
+ */
+ ocelot_write_gix(ocelot, 0x299, ANA_POL_MODE_CFG,
+ OCELOT_POLICER_DISCARD);
+ ocelot_write_gix(ocelot, 0x1, ANA_POL_PIR_CFG,
+ OCELOT_POLICER_DISCARD);
+ ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_PIR_STATE,
+ OCELOT_POLICER_DISCARD);
+ ocelot_write_gix(ocelot, 0x0, ANA_POL_CIR_CFG,
+ OCELOT_POLICER_DISCARD);
+ ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_CIR_STATE,
+ OCELOT_POLICER_DISCARD);
+
+ for (i = 0; i < OCELOT_NUM_VCAP_BLOCKS; i++) {
+ struct ocelot_vcap_block *block = &ocelot->block[i];
+ struct vcap_props *vcap = &ocelot->vcap[i];
+
+ INIT_LIST_HEAD(&block->rules);
+ block->pol_lpr = OCELOT_POLICER_DISCARD - 1;
+
+ ocelot_vcap_detect_constants(ocelot, vcap);
+ ocelot_vcap_init_one(ocelot, vcap);
+ }
+
+ INIT_LIST_HEAD(&ocelot->dummy_rules);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h
new file mode 100644
index 000000000..82fd10581
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_VCAP_H_
+#define _MSCC_OCELOT_VCAP_H_
+
+#include "ocelot.h"
+#include "ocelot_police.h"
+#include <net/sch_generic.h>
+#include <net/pkt_cls.h>
+
+#define OCELOT_POLICER_DISCARD 0x17f
+
+struct ocelot_ipv4 {
+ u8 addr[4];
+};
+
+enum ocelot_vcap_bit {
+ OCELOT_VCAP_BIT_ANY,
+ OCELOT_VCAP_BIT_0,
+ OCELOT_VCAP_BIT_1
+};
+
+struct ocelot_vcap_u8 {
+ u8 value[1];
+ u8 mask[1];
+};
+
+struct ocelot_vcap_u16 {
+ u8 value[2];
+ u8 mask[2];
+};
+
+struct ocelot_vcap_u24 {
+ u8 value[3];
+ u8 mask[3];
+};
+
+struct ocelot_vcap_u32 {
+ u8 value[4];
+ u8 mask[4];
+};
+
+struct ocelot_vcap_u40 {
+ u8 value[5];
+ u8 mask[5];
+};
+
+struct ocelot_vcap_u48 {
+ u8 value[6];
+ u8 mask[6];
+};
+
+struct ocelot_vcap_u64 {
+ u8 value[8];
+ u8 mask[8];
+};
+
+struct ocelot_vcap_u128 {
+ u8 value[16];
+ u8 mask[16];
+};
+
+struct ocelot_vcap_vid {
+ u16 value;
+ u16 mask;
+};
+
+struct ocelot_vcap_ipv4 {
+ struct ocelot_ipv4 value;
+ struct ocelot_ipv4 mask;
+};
+
+struct ocelot_vcap_udp_tcp {
+ u16 value;
+ u16 mask;
+};
+
+struct ocelot_vcap_port {
+ u8 value;
+ u8 mask;
+};
+
+enum ocelot_vcap_key_type {
+ OCELOT_VCAP_KEY_ANY,
+ OCELOT_VCAP_KEY_ETYPE,
+ OCELOT_VCAP_KEY_LLC,
+ OCELOT_VCAP_KEY_SNAP,
+ OCELOT_VCAP_KEY_ARP,
+ OCELOT_VCAP_KEY_IPV4,
+ OCELOT_VCAP_KEY_IPV6
+};
+
+struct ocelot_vcap_key_vlan {
+ struct ocelot_vcap_vid vid; /* VLAN ID (12 bit) */
+ struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */
+ enum ocelot_vcap_bit dei; /* DEI */
+ enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */
+};
+
+struct ocelot_vcap_key_etype {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+ struct ocelot_vcap_u16 etype;
+ struct ocelot_vcap_u16 data; /* MAC data */
+};
+
+struct ocelot_vcap_key_llc {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+
+ /* LLC header: DSAP at byte 0, SSAP at byte 1, Control at byte 2 */
+ struct ocelot_vcap_u32 llc;
+};
+
+struct ocelot_vcap_key_snap {
+ struct ocelot_vcap_u48 dmac;
+ struct ocelot_vcap_u48 smac;
+
+ /* SNAP header: Organization Code at byte 0, Type at byte 3 */
+ struct ocelot_vcap_u40 snap;
+};
+
+struct ocelot_vcap_key_arp {
+ struct ocelot_vcap_u48 smac;
+ enum ocelot_vcap_bit arp; /* Opcode ARP/RARP */
+ enum ocelot_vcap_bit req; /* Opcode request/reply */
+ enum ocelot_vcap_bit unknown; /* Opcode unknown */
+ enum ocelot_vcap_bit smac_match; /* Sender MAC matches SMAC */
+ enum ocelot_vcap_bit dmac_match; /* Target MAC matches DMAC */
+
+ /**< Protocol addr. length 4, hardware length 6 */
+ enum ocelot_vcap_bit length;
+
+ enum ocelot_vcap_bit ip; /* Protocol address type IP */
+ enum ocelot_vcap_bit ethernet; /* Hardware address type Ethernet */
+ struct ocelot_vcap_ipv4 sip; /* Sender IP address */
+ struct ocelot_vcap_ipv4 dip; /* Target IP address */
+};
+
+struct ocelot_vcap_key_ipv4 {
+ enum ocelot_vcap_bit ttl; /* TTL zero */
+ enum ocelot_vcap_bit fragment; /* Fragment */
+ enum ocelot_vcap_bit options; /* Header options */
+ struct ocelot_vcap_u8 ds;
+ struct ocelot_vcap_u8 proto; /* Protocol */
+ struct ocelot_vcap_ipv4 sip; /* Source IP address */
+ struct ocelot_vcap_ipv4 dip; /* Destination IP address */
+ struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
+ struct ocelot_vcap_udp_tcp sport; /* UDP/TCP: Source port */
+ struct ocelot_vcap_udp_tcp dport; /* UDP/TCP: Destination port */
+ enum ocelot_vcap_bit tcp_fin;
+ enum ocelot_vcap_bit tcp_syn;
+ enum ocelot_vcap_bit tcp_rst;
+ enum ocelot_vcap_bit tcp_psh;
+ enum ocelot_vcap_bit tcp_ack;
+ enum ocelot_vcap_bit tcp_urg;
+ enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
+ enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
+ enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
+};
+
+struct ocelot_vcap_key_ipv6 {
+ struct ocelot_vcap_u8 proto; /* IPv6 protocol */
+ struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */
+ struct ocelot_vcap_u128 dip; /* IPv6 destination (byte 0-7 ignored) */
+ enum ocelot_vcap_bit ttl; /* TTL zero */
+ struct ocelot_vcap_u8 ds;
+ struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
+ struct ocelot_vcap_udp_tcp sport;
+ struct ocelot_vcap_udp_tcp dport;
+ enum ocelot_vcap_bit tcp_fin;
+ enum ocelot_vcap_bit tcp_syn;
+ enum ocelot_vcap_bit tcp_rst;
+ enum ocelot_vcap_bit tcp_psh;
+ enum ocelot_vcap_bit tcp_ack;
+ enum ocelot_vcap_bit tcp_urg;
+ enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
+ enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
+ enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
+};
+
+enum ocelot_mask_mode {
+ OCELOT_MASK_MODE_NONE,
+ OCELOT_MASK_MODE_PERMIT_DENY,
+ OCELOT_MASK_MODE_POLICY,
+ OCELOT_MASK_MODE_REDIRECT,
+};
+
+enum ocelot_es0_tag {
+ OCELOT_NO_ES0_TAG,
+ OCELOT_ES0_TAG,
+ OCELOT_FORCE_PORT_TAG,
+ OCELOT_FORCE_UNTAG,
+};
+
+enum ocelot_tag_tpid_sel {
+ OCELOT_TAG_TPID_SEL_8021Q,
+ OCELOT_TAG_TPID_SEL_8021AD,
+};
+
+struct ocelot_vcap_action {
+ union {
+ /* VCAP ES0 */
+ struct {
+ enum ocelot_es0_tag push_outer_tag;
+ enum ocelot_es0_tag push_inner_tag;
+ enum ocelot_tag_tpid_sel tag_a_tpid_sel;
+ int tag_a_vid_sel;
+ int tag_a_pcp_sel;
+ u16 vid_a_val;
+ u8 pcp_a_val;
+ u8 dei_a_val;
+ enum ocelot_tag_tpid_sel tag_b_tpid_sel;
+ int tag_b_vid_sel;
+ int tag_b_pcp_sel;
+ u16 vid_b_val;
+ u8 pcp_b_val;
+ u8 dei_b_val;
+ };
+
+ /* VCAP IS1 */
+ struct {
+ bool vid_replace_ena;
+ u16 vid;
+ bool vlan_pop_cnt_ena;
+ int vlan_pop_cnt;
+ bool pcp_dei_ena;
+ u8 pcp;
+ u8 dei;
+ bool qos_ena;
+ u8 qos_val;
+ u8 pag_override_mask;
+ u8 pag_val;
+ };
+
+ /* VCAP IS2 */
+ struct {
+ bool cpu_copy_ena;
+ u8 cpu_qu_num;
+ enum ocelot_mask_mode mask_mode;
+ unsigned long port_mask;
+ bool police_ena;
+ struct ocelot_policer pol;
+ u32 pol_ix;
+ };
+ };
+};
+
+struct ocelot_vcap_stats {
+ u64 bytes;
+ u64 pkts;
+ u64 used;
+};
+
+enum ocelot_vcap_filter_type {
+ OCELOT_VCAP_FILTER_DUMMY,
+ OCELOT_VCAP_FILTER_PAG,
+ OCELOT_VCAP_FILTER_OFFLOAD,
+};
+
+struct ocelot_vcap_filter {
+ struct list_head list;
+
+ enum ocelot_vcap_filter_type type;
+ int block_id;
+ int goto_target;
+ int lookup;
+ u8 pag;
+ u16 prio;
+ u32 id;
+
+ struct ocelot_vcap_action action;
+ struct ocelot_vcap_stats stats;
+ /* For VCAP IS1 and IS2 */
+ unsigned long ingress_port_mask;
+ /* For VCAP ES0 */
+ struct ocelot_vcap_port ingress_port;
+ struct ocelot_vcap_port egress_port;
+
+ enum ocelot_vcap_bit dmac_mc;
+ enum ocelot_vcap_bit dmac_bc;
+ struct ocelot_vcap_key_vlan vlan;
+
+ enum ocelot_vcap_key_type key_type;
+ union {
+ /* OCELOT_VCAP_KEY_ANY: No specific fields */
+ struct ocelot_vcap_key_etype etype;
+ struct ocelot_vcap_key_llc llc;
+ struct ocelot_vcap_key_snap snap;
+ struct ocelot_vcap_key_arp arp;
+ struct ocelot_vcap_key_ipv4 ipv4;
+ struct ocelot_vcap_key_ipv6 ipv6;
+ } key;
+};
+
+int ocelot_vcap_filter_add(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *rule,
+ struct netlink_ext_ack *extack);
+int ocelot_vcap_filter_del(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *rule);
+int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *rule);
+struct ocelot_vcap_filter *
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id);
+
+void ocelot_detect_vcap_constants(struct ocelot *ocelot);
+int ocelot_vcap_init(struct ocelot *ocelot);
+
+int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
+ struct flow_cls_offload *f,
+ bool ingress);
+
+#endif /* _MSCC_OCELOT_VCAP_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
new file mode 100644
index 000000000..9cf2bc5f4
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -0,0 +1,1325 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
+#include <linux/skbuff.h>
+#include <net/switchdev.h>
+
+#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/ocelot_hsio.h>
+#include "ocelot.h"
+
+#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0))
+
+static const u32 ocelot_ana_regmap[] = {
+ REG(ANA_ADVLEARN, 0x009000),
+ REG(ANA_VLANMASK, 0x009004),
+ REG(ANA_PORT_B_DOMAIN, 0x009008),
+ REG(ANA_ANAGEFIL, 0x00900c),
+ REG(ANA_ANEVENTS, 0x009010),
+ REG(ANA_STORMLIMIT_BURST, 0x009014),
+ REG(ANA_STORMLIMIT_CFG, 0x009018),
+ REG(ANA_ISOLATED_PORTS, 0x009028),
+ REG(ANA_COMMUNITY_PORTS, 0x00902c),
+ REG(ANA_AUTOAGE, 0x009030),
+ REG(ANA_MACTOPTIONS, 0x009034),
+ REG(ANA_LEARNDISC, 0x009038),
+ REG(ANA_AGENCTRL, 0x00903c),
+ REG(ANA_MIRRORPORTS, 0x009040),
+ REG(ANA_EMIRRORPORTS, 0x009044),
+ REG(ANA_FLOODING, 0x009048),
+ REG(ANA_FLOODING_IPMC, 0x00904c),
+ REG(ANA_SFLOW_CFG, 0x009050),
+ REG(ANA_PORT_MODE, 0x009080),
+ REG(ANA_PGID_PGID, 0x008c00),
+ REG(ANA_TABLES_ANMOVED, 0x008b30),
+ REG(ANA_TABLES_MACHDATA, 0x008b34),
+ REG(ANA_TABLES_MACLDATA, 0x008b38),
+ REG(ANA_TABLES_MACACCESS, 0x008b3c),
+ REG(ANA_TABLES_MACTINDX, 0x008b40),
+ REG(ANA_TABLES_VLANACCESS, 0x008b44),
+ REG(ANA_TABLES_VLANTIDX, 0x008b48),
+ REG(ANA_TABLES_ISDXACCESS, 0x008b4c),
+ REG(ANA_TABLES_ISDXTIDX, 0x008b50),
+ REG(ANA_TABLES_ENTRYLIM, 0x008b00),
+ REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54),
+ REG(ANA_TABLES_PTP_ID_LOW, 0x008b58),
+ REG(ANA_MSTI_STATE, 0x008e00),
+ REG(ANA_PORT_VLAN_CFG, 0x007000),
+ REG(ANA_PORT_DROP_CFG, 0x007004),
+ REG(ANA_PORT_QOS_CFG, 0x007008),
+ REG(ANA_PORT_VCAP_CFG, 0x00700c),
+ REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010),
+ REG(ANA_PORT_VCAP_S2_CFG, 0x00701c),
+ REG(ANA_PORT_PCP_DEI_MAP, 0x007020),
+ REG(ANA_PORT_CPU_FWD_CFG, 0x007060),
+ REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064),
+ REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068),
+ REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c),
+ REG(ANA_PORT_PORT_CFG, 0x007070),
+ REG(ANA_PORT_POL_CFG, 0x007074),
+ REG(ANA_PORT_PTP_CFG, 0x007078),
+ REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c),
+ REG(ANA_OAM_UPM_LM_CNT, 0x007c00),
+ REG(ANA_PORT_PTP_DLY2_CFG, 0x007080),
+ REG(ANA_PFC_PFC_CFG, 0x008800),
+ REG(ANA_PFC_PFC_TIMER, 0x008804),
+ REG(ANA_IPT_OAM_MEP_CFG, 0x008000),
+ REG(ANA_IPT_IPT, 0x008004),
+ REG(ANA_PPT_PPT, 0x008ac0),
+ REG(ANA_FID_MAP_FID_MAP, 0x000000),
+ REG(ANA_AGGR_CFG, 0x0090b4),
+ REG(ANA_CPUQ_CFG, 0x0090b8),
+ REG(ANA_CPUQ_CFG2, 0x0090bc),
+ REG(ANA_CPUQ_8021_CFG, 0x0090c0),
+ REG(ANA_DSCP_CFG, 0x009100),
+ REG(ANA_DSCP_REWR_CFG, 0x009200),
+ REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240),
+ REG(ANA_VCAP_RNG_VAL_CFG, 0x009260),
+ REG(ANA_VRAP_CFG, 0x009280),
+ REG(ANA_VRAP_HDR_DATA, 0x009284),
+ REG(ANA_VRAP_HDR_MASK, 0x009288),
+ REG(ANA_DISCARD_CFG, 0x00928c),
+ REG(ANA_FID_CFG, 0x009290),
+ REG(ANA_POL_PIR_CFG, 0x004000),
+ REG(ANA_POL_CIR_CFG, 0x004004),
+ REG(ANA_POL_MODE_CFG, 0x004008),
+ REG(ANA_POL_PIR_STATE, 0x00400c),
+ REG(ANA_POL_CIR_STATE, 0x004010),
+ REG(ANA_POL_STATE, 0x004014),
+ REG(ANA_POL_FLOWC, 0x008b80),
+ REG(ANA_POL_HYST, 0x008bec),
+ REG(ANA_POL_MISC_CFG, 0x008bf0),
+};
+
+static const u32 ocelot_qs_regmap[] = {
+ REG(QS_XTR_GRP_CFG, 0x000000),
+ REG(QS_XTR_RD, 0x000008),
+ REG(QS_XTR_FRM_PRUNING, 0x000010),
+ REG(QS_XTR_FLUSH, 0x000018),
+ REG(QS_XTR_DATA_PRESENT, 0x00001c),
+ REG(QS_XTR_CFG, 0x000020),
+ REG(QS_INJ_GRP_CFG, 0x000024),
+ REG(QS_INJ_WR, 0x00002c),
+ REG(QS_INJ_CTRL, 0x000034),
+ REG(QS_INJ_STATUS, 0x00003c),
+ REG(QS_INJ_ERR, 0x000040),
+ REG(QS_INH_DBG, 0x000048),
+};
+
+static const u32 ocelot_qsys_regmap[] = {
+ REG(QSYS_PORT_MODE, 0x011200),
+ REG(QSYS_SWITCH_PORT_MODE, 0x011234),
+ REG(QSYS_STAT_CNT_CFG, 0x011264),
+ REG(QSYS_EEE_CFG, 0x011268),
+ REG(QSYS_EEE_THRES, 0x011294),
+ REG(QSYS_IGR_NO_SHARING, 0x011298),
+ REG(QSYS_EGR_NO_SHARING, 0x01129c),
+ REG(QSYS_SW_STATUS, 0x0112a0),
+ REG(QSYS_EXT_CPU_CFG, 0x0112d0),
+ REG(QSYS_PAD_CFG, 0x0112d4),
+ REG(QSYS_CPU_GROUP_MAP, 0x0112d8),
+ REG(QSYS_QMAP, 0x0112dc),
+ REG(QSYS_ISDX_SGRP, 0x011400),
+ REG(QSYS_TIMED_FRAME_ENTRY, 0x014000),
+ REG(QSYS_TFRM_MISC, 0x011310),
+ REG(QSYS_TFRM_PORT_DLY, 0x011314),
+ REG(QSYS_TFRM_TIMER_CFG_1, 0x011318),
+ REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c),
+ REG(QSYS_TFRM_TIMER_CFG_3, 0x011320),
+ REG(QSYS_TFRM_TIMER_CFG_4, 0x011324),
+ REG(QSYS_TFRM_TIMER_CFG_5, 0x011328),
+ REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c),
+ REG(QSYS_TFRM_TIMER_CFG_7, 0x011330),
+ REG(QSYS_TFRM_TIMER_CFG_8, 0x011334),
+ REG(QSYS_RED_PROFILE, 0x011338),
+ REG(QSYS_RES_QOS_MODE, 0x011378),
+ REG(QSYS_RES_CFG, 0x012000),
+ REG(QSYS_RES_STAT, 0x012004),
+ REG(QSYS_EGR_DROP_MODE, 0x01137c),
+ REG(QSYS_EQ_CTRL, 0x011380),
+ REG(QSYS_EVENTS_CORE, 0x011384),
+ REG(QSYS_CIR_CFG, 0x000000),
+ REG(QSYS_EIR_CFG, 0x000004),
+ REG(QSYS_SE_CFG, 0x000008),
+ REG(QSYS_SE_DWRR_CFG, 0x00000c),
+ REG(QSYS_SE_CONNECT, 0x00003c),
+ REG(QSYS_SE_DLB_SENSE, 0x000040),
+ REG(QSYS_CIR_STATE, 0x000044),
+ REG(QSYS_EIR_STATE, 0x000048),
+ REG(QSYS_SE_STATE, 0x00004c),
+ REG(QSYS_HSCH_MISC_CFG, 0x011388),
+};
+
+static const u32 ocelot_rew_regmap[] = {
+ REG(REW_PORT_VLAN_CFG, 0x000000),
+ REG(REW_TAG_CFG, 0x000004),
+ REG(REW_PORT_CFG, 0x000008),
+ REG(REW_DSCP_CFG, 0x00000c),
+ REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
+ REG(REW_PTP_CFG, 0x000050),
+ REG(REW_PTP_DLY1_CFG, 0x000054),
+ REG(REW_DSCP_REMAP_DP1_CFG, 0x000690),
+ REG(REW_DSCP_REMAP_CFG, 0x000790),
+ REG(REW_STAT_CFG, 0x000890),
+ REG(REW_PPT, 0x000680),
+};
+
+static const u32 ocelot_sys_regmap[] = {
+ REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
+ REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
+ REG(SYS_COUNT_RX_SHORTS, 0x000010),
+ REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
+ REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
+ REG(SYS_COUNT_RX_64, 0x000024),
+ REG(SYS_COUNT_RX_65_127, 0x000028),
+ REG(SYS_COUNT_RX_128_255, 0x00002c),
+ REG(SYS_COUNT_RX_256_1023, 0x000030),
+ REG(SYS_COUNT_RX_1024_1526, 0x000034),
+ REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_PAUSE, 0x00003c),
+ REG(SYS_COUNT_RX_CONTROL, 0x000040),
+ REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
+ REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
+ REG(SYS_COUNT_TX_COLLISION, 0x000110),
+ REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
+ REG(SYS_COUNT_TX_64, 0x00011c),
+ REG(SYS_COUNT_TX_65_127, 0x000120),
+ REG(SYS_COUNT_TX_128_511, 0x000124),
+ REG(SYS_COUNT_TX_512_1023, 0x000128),
+ REG(SYS_COUNT_TX_1024_1526, 0x00012c),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000130),
+ REG(SYS_COUNT_TX_AGING, 0x000170),
+ REG(SYS_RESET_CFG, 0x000508),
+ REG(SYS_CMID, 0x00050c),
+ REG(SYS_VLAN_ETYPE_CFG, 0x000510),
+ REG(SYS_PORT_MODE, 0x000514),
+ REG(SYS_FRONT_PORT_MODE, 0x000548),
+ REG(SYS_FRM_AGING, 0x000574),
+ REG(SYS_STAT_CFG, 0x000578),
+ REG(SYS_SW_STATUS, 0x00057c),
+ REG(SYS_MISC_CFG, 0x0005ac),
+ REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0),
+ REG(SYS_REW_MAC_LOW_CFG, 0x0005dc),
+ REG(SYS_CM_ADDR, 0x000500),
+ REG(SYS_CM_DATA, 0x000504),
+ REG(SYS_PAUSE_CFG, 0x000608),
+ REG(SYS_PAUSE_TOT_CFG, 0x000638),
+ REG(SYS_ATOP, 0x00063c),
+ REG(SYS_ATOP_TOT_CFG, 0x00066c),
+ REG(SYS_MAC_FC_CFG, 0x000670),
+ REG(SYS_MMGT, 0x00069c),
+ REG(SYS_MMGT_FAST, 0x0006a0),
+ REG(SYS_EVENTS_DIF, 0x0006a4),
+ REG(SYS_EVENTS_CORE, 0x0006b4),
+ REG(SYS_CNT, 0x000000),
+ REG(SYS_PTP_STATUS, 0x0006b8),
+ REG(SYS_PTP_TXSTAMP, 0x0006bc),
+ REG(SYS_PTP_NXT, 0x0006c0),
+ REG(SYS_PTP_CFG, 0x0006c4),
+};
+
+static const u32 ocelot_vcap_regmap[] = {
+ /* VCAP_CORE_CFG */
+ REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
+ REG(VCAP_CORE_MV_CFG, 0x000004),
+ /* VCAP_CORE_CACHE */
+ REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
+ REG(VCAP_CACHE_MASK_DAT, 0x000108),
+ REG(VCAP_CACHE_ACTION_DAT, 0x000208),
+ REG(VCAP_CACHE_CNT_DAT, 0x000308),
+ REG(VCAP_CACHE_TG_DAT, 0x000388),
+ /* VCAP_CONST */
+ REG(VCAP_CONST_VCAP_VER, 0x000398),
+ REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
+ REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
+ REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
+ REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
+ REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
+ REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
+ REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
+ REG(VCAP_CONST_CORE_CNT, 0x0003b8),
+ REG(VCAP_CONST_IF_CNT, 0x0003bc),
+};
+
+static const u32 ocelot_ptp_regmap[] = {
+ REG(PTP_PIN_CFG, 0x000000),
+ REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
+ REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
+ REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
+ REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
+ REG(PTP_CFG_MISC, 0x0000a0),
+ REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
+ REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
+};
+
+static const u32 ocelot_dev_gmii_regmap[] = {
+ REG(DEV_CLOCK_CFG, 0x0),
+ REG(DEV_PORT_MISC, 0x4),
+ REG(DEV_EVENTS, 0x8),
+ REG(DEV_EEE_CFG, 0xc),
+ REG(DEV_RX_PATH_DELAY, 0x10),
+ REG(DEV_TX_PATH_DELAY, 0x14),
+ REG(DEV_PTP_PREDICT_CFG, 0x18),
+ REG(DEV_MAC_ENA_CFG, 0x1c),
+ REG(DEV_MAC_MODE_CFG, 0x20),
+ REG(DEV_MAC_MAXLEN_CFG, 0x24),
+ REG(DEV_MAC_TAGS_CFG, 0x28),
+ REG(DEV_MAC_ADV_CHK_CFG, 0x2c),
+ REG(DEV_MAC_IFG_CFG, 0x30),
+ REG(DEV_MAC_HDX_CFG, 0x34),
+ REG(DEV_MAC_DBG_CFG, 0x38),
+ REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
+ REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
+ REG(DEV_MAC_STICKY, 0x44),
+ REG(PCS1G_CFG, 0x48),
+ REG(PCS1G_MODE_CFG, 0x4c),
+ REG(PCS1G_SD_CFG, 0x50),
+ REG(PCS1G_ANEG_CFG, 0x54),
+ REG(PCS1G_ANEG_NP_CFG, 0x58),
+ REG(PCS1G_LB_CFG, 0x5c),
+ REG(PCS1G_DBG_CFG, 0x60),
+ REG(PCS1G_CDET_CFG, 0x64),
+ REG(PCS1G_ANEG_STATUS, 0x68),
+ REG(PCS1G_ANEG_NP_STATUS, 0x6c),
+ REG(PCS1G_LINK_STATUS, 0x70),
+ REG(PCS1G_LINK_DOWN_CNT, 0x74),
+ REG(PCS1G_STICKY, 0x78),
+ REG(PCS1G_DEBUG_STATUS, 0x7c),
+ REG(PCS1G_LPI_CFG, 0x80),
+ REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84),
+ REG(PCS1G_LPI_STATUS, 0x88),
+ REG(PCS1G_TSTPAT_MODE_CFG, 0x8c),
+ REG(PCS1G_TSTPAT_STATUS, 0x90),
+ REG(DEV_PCS_FX100_CFG, 0x94),
+ REG(DEV_PCS_FX100_STATUS, 0x98),
+};
+
+static const u32 *ocelot_regmap[TARGET_MAX] = {
+ [ANA] = ocelot_ana_regmap,
+ [QS] = ocelot_qs_regmap,
+ [QSYS] = ocelot_qsys_regmap,
+ [REW] = ocelot_rew_regmap,
+ [SYS] = ocelot_sys_regmap,
+ [S0] = ocelot_vcap_regmap,
+ [S1] = ocelot_vcap_regmap,
+ [S2] = ocelot_vcap_regmap,
+ [PTP] = ocelot_ptp_regmap,
+ [DEV_GMII] = ocelot_dev_gmii_regmap,
+};
+
+static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
+ [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11),
+ [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10),
+ [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27),
+ [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26),
+ [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25),
+ [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
+ [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23),
+ [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
+ [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
+ [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
+ [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
+ [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
+ [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
+ [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
+ [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
+ [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14),
+ [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
+ [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
+ [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
+ [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
+ [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
+ [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
+ [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
+ [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
+ [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
+ [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
+ [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
+ [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
+ [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
+ [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
+ [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18),
+ [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11),
+ [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0),
+ [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2),
+ [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1),
+ [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0),
+ /* Replicated per number of ports (12), register size 4 per port */
+ [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 12, 4),
+ [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 12, 4),
+ [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 12, 4),
+ [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 12, 4),
+ [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 12, 4),
+ [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 12, 4),
+ [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 12, 4),
+ [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
+};
+
+static const struct ocelot_stat_layout ocelot_stats_layout[] = {
+ { .name = "rx_octets", .offset = 0x00, },
+ { .name = "rx_unicast", .offset = 0x01, },
+ { .name = "rx_multicast", .offset = 0x02, },
+ { .name = "rx_broadcast", .offset = 0x03, },
+ { .name = "rx_shorts", .offset = 0x04, },
+ { .name = "rx_fragments", .offset = 0x05, },
+ { .name = "rx_jabbers", .offset = 0x06, },
+ { .name = "rx_crc_align_errs", .offset = 0x07, },
+ { .name = "rx_sym_errs", .offset = 0x08, },
+ { .name = "rx_frames_below_65_octets", .offset = 0x09, },
+ { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, },
+ { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, },
+ { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, },
+ { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, },
+ { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, },
+ { .name = "rx_frames_over_1526_octets", .offset = 0x0F, },
+ { .name = "rx_pause", .offset = 0x10, },
+ { .name = "rx_control", .offset = 0x11, },
+ { .name = "rx_longs", .offset = 0x12, },
+ { .name = "rx_classified_drops", .offset = 0x13, },
+ { .name = "rx_red_prio_0", .offset = 0x14, },
+ { .name = "rx_red_prio_1", .offset = 0x15, },
+ { .name = "rx_red_prio_2", .offset = 0x16, },
+ { .name = "rx_red_prio_3", .offset = 0x17, },
+ { .name = "rx_red_prio_4", .offset = 0x18, },
+ { .name = "rx_red_prio_5", .offset = 0x19, },
+ { .name = "rx_red_prio_6", .offset = 0x1A, },
+ { .name = "rx_red_prio_7", .offset = 0x1B, },
+ { .name = "rx_yellow_prio_0", .offset = 0x1C, },
+ { .name = "rx_yellow_prio_1", .offset = 0x1D, },
+ { .name = "rx_yellow_prio_2", .offset = 0x1E, },
+ { .name = "rx_yellow_prio_3", .offset = 0x1F, },
+ { .name = "rx_yellow_prio_4", .offset = 0x20, },
+ { .name = "rx_yellow_prio_5", .offset = 0x21, },
+ { .name = "rx_yellow_prio_6", .offset = 0x22, },
+ { .name = "rx_yellow_prio_7", .offset = 0x23, },
+ { .name = "rx_green_prio_0", .offset = 0x24, },
+ { .name = "rx_green_prio_1", .offset = 0x25, },
+ { .name = "rx_green_prio_2", .offset = 0x26, },
+ { .name = "rx_green_prio_3", .offset = 0x27, },
+ { .name = "rx_green_prio_4", .offset = 0x28, },
+ { .name = "rx_green_prio_5", .offset = 0x29, },
+ { .name = "rx_green_prio_6", .offset = 0x2A, },
+ { .name = "rx_green_prio_7", .offset = 0x2B, },
+ { .name = "tx_octets", .offset = 0x40, },
+ { .name = "tx_unicast", .offset = 0x41, },
+ { .name = "tx_multicast", .offset = 0x42, },
+ { .name = "tx_broadcast", .offset = 0x43, },
+ { .name = "tx_collision", .offset = 0x44, },
+ { .name = "tx_drops", .offset = 0x45, },
+ { .name = "tx_pause", .offset = 0x46, },
+ { .name = "tx_frames_below_65_octets", .offset = 0x47, },
+ { .name = "tx_frames_65_to_127_octets", .offset = 0x48, },
+ { .name = "tx_frames_128_255_octets", .offset = 0x49, },
+ { .name = "tx_frames_256_511_octets", .offset = 0x4A, },
+ { .name = "tx_frames_512_1023_octets", .offset = 0x4B, },
+ { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, },
+ { .name = "tx_frames_over_1526_octets", .offset = 0x4D, },
+ { .name = "tx_yellow_prio_0", .offset = 0x4E, },
+ { .name = "tx_yellow_prio_1", .offset = 0x4F, },
+ { .name = "tx_yellow_prio_2", .offset = 0x50, },
+ { .name = "tx_yellow_prio_3", .offset = 0x51, },
+ { .name = "tx_yellow_prio_4", .offset = 0x52, },
+ { .name = "tx_yellow_prio_5", .offset = 0x53, },
+ { .name = "tx_yellow_prio_6", .offset = 0x54, },
+ { .name = "tx_yellow_prio_7", .offset = 0x55, },
+ { .name = "tx_green_prio_0", .offset = 0x56, },
+ { .name = "tx_green_prio_1", .offset = 0x57, },
+ { .name = "tx_green_prio_2", .offset = 0x58, },
+ { .name = "tx_green_prio_3", .offset = 0x59, },
+ { .name = "tx_green_prio_4", .offset = 0x5A, },
+ { .name = "tx_green_prio_5", .offset = 0x5B, },
+ { .name = "tx_green_prio_6", .offset = 0x5C, },
+ { .name = "tx_green_prio_7", .offset = 0x5D, },
+ { .name = "tx_aged", .offset = 0x5E, },
+ { .name = "drop_local", .offset = 0x80, },
+ { .name = "drop_tail", .offset = 0x81, },
+ { .name = "drop_yellow_prio_0", .offset = 0x82, },
+ { .name = "drop_yellow_prio_1", .offset = 0x83, },
+ { .name = "drop_yellow_prio_2", .offset = 0x84, },
+ { .name = "drop_yellow_prio_3", .offset = 0x85, },
+ { .name = "drop_yellow_prio_4", .offset = 0x86, },
+ { .name = "drop_yellow_prio_5", .offset = 0x87, },
+ { .name = "drop_yellow_prio_6", .offset = 0x88, },
+ { .name = "drop_yellow_prio_7", .offset = 0x89, },
+ { .name = "drop_green_prio_0", .offset = 0x8A, },
+ { .name = "drop_green_prio_1", .offset = 0x8B, },
+ { .name = "drop_green_prio_2", .offset = 0x8C, },
+ { .name = "drop_green_prio_3", .offset = 0x8D, },
+ { .name = "drop_green_prio_4", .offset = 0x8E, },
+ { .name = "drop_green_prio_5", .offset = 0x8F, },
+ { .name = "drop_green_prio_6", .offset = 0x90, },
+ { .name = "drop_green_prio_7", .offset = 0x91, },
+};
+
+static void ocelot_pll5_init(struct ocelot *ocelot)
+{
+ /* Configure PLL5. This will need a proper CCF driver
+ * The values are coming from the VTSS API for Ocelot
+ */
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4,
+ HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
+ HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8));
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0,
+ HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
+ HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) |
+ HSIO_PLL5G_CFG0_ENA_BIAS |
+ HSIO_PLL5G_CFG0_ENA_VCO_BUF |
+ HSIO_PLL5G_CFG0_ENA_CP1 |
+ HSIO_PLL5G_CFG0_SELCPI(2) |
+ HSIO_PLL5G_CFG0_LOOP_BW_RES(0xe) |
+ HSIO_PLL5G_CFG0_SELBGV820(4) |
+ HSIO_PLL5G_CFG0_DIV4 |
+ HSIO_PLL5G_CFG0_ENA_CLKTREE |
+ HSIO_PLL5G_CFG0_ENA_LANE);
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2,
+ HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
+ HSIO_PLL5G_CFG2_EN_RESET_OVERRUN |
+ HSIO_PLL5G_CFG2_GAIN_TEST(0x8) |
+ HSIO_PLL5G_CFG2_ENA_AMPCTRL |
+ HSIO_PLL5G_CFG2_PWD_AMPCTRL_N |
+ HSIO_PLL5G_CFG2_AMPC_SEL(0x10));
+}
+
+static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
+{
+ int ret;
+
+ ocelot->map = ocelot_regmap;
+ ocelot->stats_layout = ocelot_stats_layout;
+ ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
+ ocelot->shared_queue_sz = 224 * 1024;
+ ocelot->num_mact_rows = 1024;
+ ocelot->ops = ops;
+
+ ret = ocelot_regfields_init(ocelot, ocelot_regfields);
+ if (ret)
+ return ret;
+
+ ocelot_pll5_init(ocelot);
+
+ eth_random_addr(ocelot->base_mac);
+ ocelot->base_mac[5] &= 0xf0;
+
+ return 0;
+}
+
+static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info)
+{
+ u8 llen, wlen;
+ u64 ifh[2];
+
+ ifh[0] = be64_to_cpu(((__force __be64 *)_ifh)[0]);
+ ifh[1] = be64_to_cpu(((__force __be64 *)_ifh)[1]);
+
+ wlen = IFH_EXTRACT_BITFIELD64(ifh[0], 7, 8);
+ llen = IFH_EXTRACT_BITFIELD64(ifh[0], 15, 6);
+
+ info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80;
+
+ info->timestamp = IFH_EXTRACT_BITFIELD64(ifh[0], 21, 32);
+
+ info->port = IFH_EXTRACT_BITFIELD64(ifh[1], 43, 4);
+
+ info->tag_type = IFH_EXTRACT_BITFIELD64(ifh[1], 16, 1);
+ info->vid = IFH_EXTRACT_BITFIELD64(ifh[1], 0, 12);
+
+ return 0;
+}
+
+static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
+ u32 *rval)
+{
+ u32 val;
+ u32 bytes_valid;
+
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ if (val == XTR_NOT_READY) {
+ if (ifh)
+ return -EIO;
+
+ do {
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ } while (val == XTR_NOT_READY);
+ }
+
+ switch (val) {
+ case XTR_ABORT:
+ return -EIO;
+ case XTR_EOF_0:
+ case XTR_EOF_1:
+ case XTR_EOF_2:
+ case XTR_EOF_3:
+ case XTR_PRUNED:
+ bytes_valid = XTR_VALID_BYTES(val);
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ if (val == XTR_ESCAPE)
+ *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ else
+ *rval = val;
+
+ return bytes_valid;
+ case XTR_ESCAPE:
+ *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+
+ return 4;
+ default:
+ *rval = val;
+
+ return 4;
+ }
+}
+
+static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
+{
+ struct ocelot *ocelot = arg;
+ int i = 0, grp = 0;
+ int err = 0;
+
+ if (!(ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)))
+ return IRQ_NONE;
+
+ do {
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
+ u64 tod_in_ns, full_ts_in_ns;
+ struct frame_info info = {};
+ struct net_device *dev;
+ u32 ifh[4], val, *buf;
+ struct timespec64 ts;
+ int sz, len, buf_len;
+ struct sk_buff *skb;
+
+ for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
+ err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]);
+ if (err != 4)
+ break;
+ }
+
+ if (err != 4)
+ break;
+
+ /* At this point the IFH was read correctly, so it is safe to
+ * presume that there is no error. The err needs to be reset
+ * otherwise a frame could come in CPU queue between the while
+ * condition and the check for error later on. And in that case
+ * the new frame is just removed and not processed.
+ */
+ err = 0;
+
+ ocelot_parse_ifh(ifh, &info);
+
+ ocelot_port = ocelot->ports[info.port];
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+ dev = priv->dev;
+
+ skb = netdev_alloc_skb(dev, info.len);
+
+ if (unlikely(!skb)) {
+ netdev_err(dev, "Unable to allocate sk_buff\n");
+ err = -ENOMEM;
+ break;
+ }
+ buf_len = info.len - ETH_FCS_LEN;
+ buf = (u32 *)skb_put(skb, buf_len);
+
+ len = 0;
+ do {
+ sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+ *buf++ = val;
+ len += sz;
+ } while (len < buf_len);
+
+ /* Read the FCS */
+ sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+ /* Update the statistics if part of the FCS was read before */
+ len -= ETH_FCS_LEN - sz;
+
+ if (unlikely(dev->features & NETIF_F_RXFCS)) {
+ buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
+ *buf = val;
+ }
+
+ if (sz < 0) {
+ err = sz;
+ break;
+ }
+
+ if (ocelot->ptp) {
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+
+ tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+ if ((tod_in_ns & 0xffffffff) < info.timestamp)
+ full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
+ info.timestamp;
+ else
+ full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
+ info.timestamp;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+ }
+
+ /* Everything we see on an interface that is in the HW bridge
+ * has already been forwarded.
+ */
+ if (ocelot->bridge_mask & BIT(info.port))
+ skb->offload_fwd_mark = 1;
+
+ skb->protocol = eth_type_trans(skb, dev);
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ } while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp));
+
+ if (err)
+ while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
+ ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ocelot_ptp_rdy_irq_handler(int irq, void *arg)
+{
+ struct ocelot *ocelot = arg;
+
+ ocelot_get_txtstamp(ocelot);
+
+ return IRQ_HANDLED;
+}
+
+static const struct of_device_id mscc_ocelot_match[] = {
+ { .compatible = "mscc,vsc7514-switch" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
+
+static int ocelot_reset(struct ocelot *ocelot)
+{
+ int retries = 100;
+ u32 val;
+
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+
+ do {
+ msleep(1);
+ regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
+ &val);
+ } while (val && --retries);
+
+ if (!retries)
+ return -ETIMEDOUT;
+
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+
+ return 0;
+}
+
+/* Watermark encode
+ * Bit 8: Unit; 0:1, 1:16
+ * Bit 7-0: Value to be multiplied with unit
+ */
+static u16 ocelot_wm_enc(u16 value)
+{
+ WARN_ON(value >= 16 * BIT(8));
+
+ if (value >= BIT(8))
+ return BIT(8) | (value / 16);
+
+ return value;
+}
+
+static const struct ocelot_ops ocelot_ops = {
+ .reset = ocelot_reset,
+ .wm_enc = ocelot_wm_enc,
+ .port_to_netdev = ocelot_port_to_netdev,
+ .netdev_to_port = ocelot_netdev_to_port,
+};
+
+static const struct vcap_field vsc7514_vcap_es0_keys[] = {
+ [VCAP_ES0_EGR_PORT] = { 0, 4},
+ [VCAP_ES0_IGR_PORT] = { 4, 4},
+ [VCAP_ES0_RSV] = { 8, 2},
+ [VCAP_ES0_L2_MC] = { 10, 1},
+ [VCAP_ES0_L2_BC] = { 11, 1},
+ [VCAP_ES0_VID] = { 12, 12},
+ [VCAP_ES0_DP] = { 24, 1},
+ [VCAP_ES0_PCP] = { 25, 3},
+};
+
+static const struct vcap_field vsc7514_vcap_es0_actions[] = {
+ [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
+ [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
+ [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
+ [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
+ [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
+ [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
+ [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
+ [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
+ [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
+ [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
+ [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
+ [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
+ [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
+ [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
+ [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
+ [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
+ [VCAP_ES0_ACT_RSV] = { 49, 24},
+ [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1},
+};
+
+static const struct vcap_field vsc7514_vcap_is1_keys[] = {
+ [VCAP_IS1_HK_TYPE] = { 0, 1},
+ [VCAP_IS1_HK_LOOKUP] = { 1, 2},
+ [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12},
+ [VCAP_IS1_HK_RSV] = { 15, 9},
+ [VCAP_IS1_HK_OAM_Y1731] = { 24, 1},
+ [VCAP_IS1_HK_L2_MC] = { 25, 1},
+ [VCAP_IS1_HK_L2_BC] = { 26, 1},
+ [VCAP_IS1_HK_IP_MC] = { 27, 1},
+ [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1},
+ [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1},
+ [VCAP_IS1_HK_TPID] = { 30, 1},
+ [VCAP_IS1_HK_VID] = { 31, 12},
+ [VCAP_IS1_HK_DEI] = { 43, 1},
+ [VCAP_IS1_HK_PCP] = { 44, 3},
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ [VCAP_IS1_HK_L2_SMAC] = { 47, 48},
+ [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1},
+ [VCAP_IS1_HK_ETYPE] = { 96, 16},
+ [VCAP_IS1_HK_IP_SNAP] = {112, 1},
+ [VCAP_IS1_HK_IP4] = {113, 1},
+ /* Layer-3 Information */
+ [VCAP_IS1_HK_L3_FRAGMENT] = {114, 1},
+ [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {115, 1},
+ [VCAP_IS1_HK_L3_OPTIONS] = {116, 1},
+ [VCAP_IS1_HK_L3_DSCP] = {117, 6},
+ [VCAP_IS1_HK_L3_IP4_SIP] = {123, 32},
+ /* Layer-4 Information */
+ [VCAP_IS1_HK_TCP_UDP] = {155, 1},
+ [VCAP_IS1_HK_TCP] = {156, 1},
+ [VCAP_IS1_HK_L4_SPORT] = {157, 16},
+ [VCAP_IS1_HK_L4_RNG] = {173, 8},
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1},
+ [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12},
+ [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1},
+ [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3},
+ [VCAP_IS1_HK_IP4_IP4] = { 64, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1},
+ [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1},
+ [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6},
+ [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32},
+ [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {106, 32},
+ [VCAP_IS1_HK_IP4_L3_PROTO] = {138, 8},
+ [VCAP_IS1_HK_IP4_TCP_UDP] = {146, 1},
+ [VCAP_IS1_HK_IP4_TCP] = {147, 1},
+ [VCAP_IS1_HK_IP4_L4_RNG] = {148, 8},
+ [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {156, 32},
+};
+
+static const struct vcap_field vsc7514_vcap_is1_actions[] = {
+ [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
+ [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
+ [VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
+ [VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
+ [VCAP_IS1_ACT_DP_ENA] = { 11, 1},
+ [VCAP_IS1_ACT_DP_VAL] = { 12, 1},
+ [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
+ [VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
+ [VCAP_IS1_ACT_RSV] = { 29, 9},
+ /* The fields below are incorrectly shifted by 2 in the manual */
+ [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1},
+ [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12},
+ [VCAP_IS1_ACT_FID_SEL] = { 51, 2},
+ [VCAP_IS1_ACT_FID_VAL] = { 53, 13},
+ [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1},
+ [VCAP_IS1_ACT_PCP_VAL] = { 67, 3},
+ [VCAP_IS1_ACT_DEI_VAL] = { 70, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2},
+ [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4},
+ [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1},
+};
+
+static const struct vcap_field vsc7514_vcap_is2_keys[] = {
+ /* Common: 46 bits */
+ [VCAP_IS2_TYPE] = { 0, 4},
+ [VCAP_IS2_HK_FIRST] = { 4, 1},
+ [VCAP_IS2_HK_PAG] = { 5, 8},
+ [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12},
+ [VCAP_IS2_HK_RSV2] = { 25, 1},
+ [VCAP_IS2_HK_HOST_MATCH] = { 26, 1},
+ [VCAP_IS2_HK_L2_MC] = { 27, 1},
+ [VCAP_IS2_HK_L2_BC] = { 28, 1},
+ [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1},
+ [VCAP_IS2_HK_VID] = { 30, 12},
+ [VCAP_IS2_HK_DEI] = { 42, 1},
+ [VCAP_IS2_HK_PCP] = { 43, 3},
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ [VCAP_IS2_HK_L2_DMAC] = { 46, 48},
+ [VCAP_IS2_HK_L2_SMAC] = { 94, 48},
+ /* MAC_ETYPE (TYPE=000) */
+ [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {142, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {158, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {174, 8},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {182, 3},
+ /* MAC_LLC (TYPE=001) */
+ [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {142, 40},
+ /* MAC_SNAP (TYPE=010) */
+ [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {142, 40},
+ /* MAC_ARP (TYPE=011) */
+ [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48},
+ [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1},
+ [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1},
+ [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1},
+ [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1},
+ [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE] = {100, 2},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = {102, 32},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {134, 32},
+ [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {166, 1},
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ [VCAP_IS2_HK_IP4] = { 46, 1},
+ [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1},
+ [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1},
+ [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1},
+ [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1},
+ [VCAP_IS2_HK_L3_TOS] = { 51, 8},
+ [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32},
+ [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32},
+ [VCAP_IS2_HK_DIP_EQ_SIP] = {123, 1},
+ /* IP4_TCP_UDP (TYPE=100) */
+ [VCAP_IS2_HK_TCP] = {124, 1},
+ [VCAP_IS2_HK_L4_DPORT] = {125, 16},
+ [VCAP_IS2_HK_L4_SPORT] = {141, 16},
+ [VCAP_IS2_HK_L4_RNG] = {157, 8},
+ [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {165, 1},
+ [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {166, 1},
+ [VCAP_IS2_HK_L4_FIN] = {167, 1},
+ [VCAP_IS2_HK_L4_SYN] = {168, 1},
+ [VCAP_IS2_HK_L4_RST] = {169, 1},
+ [VCAP_IS2_HK_L4_PSH] = {170, 1},
+ [VCAP_IS2_HK_L4_ACK] = {171, 1},
+ [VCAP_IS2_HK_L4_URG] = {172, 1},
+ [VCAP_IS2_HK_L4_1588_DOM] = {173, 8},
+ [VCAP_IS2_HK_L4_1588_VER] = {181, 4},
+ /* IP4_OTHER (TYPE=101) */
+ [VCAP_IS2_HK_IP4_L3_PROTO] = {124, 8},
+ [VCAP_IS2_HK_L3_PAYLOAD] = {132, 56},
+ /* IP6_STD (TYPE=110) */
+ [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1},
+ [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128},
+ [VCAP_IS2_HK_IP6_L3_PROTO] = {175, 8},
+ /* OAM (TYPE=111) */
+ [VCAP_IS2_HK_OAM_MEL_FLAGS] = {142, 7},
+ [VCAP_IS2_HK_OAM_VER] = {149, 5},
+ [VCAP_IS2_HK_OAM_OPCODE] = {154, 8},
+ [VCAP_IS2_HK_OAM_FLAGS] = {162, 8},
+ [VCAP_IS2_HK_OAM_MEPID] = {170, 16},
+ [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {186, 1},
+ [VCAP_IS2_HK_OAM_IS_Y1731] = {187, 1},
+};
+
+static const struct vcap_field vsc7514_vcap_is2_actions[] = {
+ [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
+ [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
+ [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
+ [VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
+ [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
+ [VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
+ [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
+ [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9},
+ [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1},
+ [VCAP_IS2_ACT_PORT_MASK] = { 20, 11},
+ [VCAP_IS2_ACT_REW_OP] = { 31, 9},
+ [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1},
+ [VCAP_IS2_ACT_RSV] = { 41, 2},
+ [VCAP_IS2_ACT_ACL_ID] = { 43, 6},
+ [VCAP_IS2_ACT_HIT_CNT] = { 49, 32},
+};
+
+static struct vcap_props vsc7514_vcap_props[] = {
+ [VCAP_ES0] = {
+ .action_type_width = 0,
+ .action_table = {
+ [ES0_ACTION_TYPE_NORMAL] = {
+ .width = 73, /* HIT_STICKY not included */
+ .count = 1,
+ },
+ },
+ .target = S0,
+ .keys = vsc7514_vcap_es0_keys,
+ .actions = vsc7514_vcap_es0_actions,
+ },
+ [VCAP_IS1] = {
+ .action_type_width = 0,
+ .action_table = {
+ [IS1_ACTION_TYPE_NORMAL] = {
+ .width = 78, /* HIT_STICKY not included */
+ .count = 4,
+ },
+ },
+ .target = S1,
+ .keys = vsc7514_vcap_is1_keys,
+ .actions = vsc7514_vcap_is1_actions,
+ },
+ [VCAP_IS2] = {
+ .action_type_width = 1,
+ .action_table = {
+ [IS2_ACTION_TYPE_NORMAL] = {
+ .width = 49,
+ .count = 2
+ },
+ [IS2_ACTION_TYPE_SMAC_SIP] = {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .target = S2,
+ .keys = vsc7514_vcap_is2_keys,
+ .actions = vsc7514_vcap_is2_actions,
+ },
+};
+
+static struct ptp_clock_info ocelot_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "ocelot ptp",
+ .max_adj = 0x7fffffff,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = OCELOT_PTP_PINS_NUM,
+ .n_pins = OCELOT_PTP_PINS_NUM,
+ .pps = 0,
+ .gettime64 = ocelot_ptp_gettime64,
+ .settime64 = ocelot_ptp_settime64,
+ .adjtime = ocelot_ptp_adjtime,
+ .adjfine = ocelot_ptp_adjfine,
+ .verify = ocelot_ptp_verify,
+ .enable = ocelot_ptp_enable,
+};
+
+static void mscc_ocelot_release_ports(struct ocelot *ocelot)
+{
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
+
+ ocelot_port = ocelot->ports[port];
+ if (!ocelot_port)
+ continue;
+
+ ocelot_deinit_port(ocelot, port);
+
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+
+ unregister_netdev(priv->dev);
+ free_netdev(priv->dev);
+ }
+}
+
+static int mscc_ocelot_init_ports(struct platform_device *pdev,
+ struct device_node *ports)
+{
+ struct ocelot *ocelot = platform_get_drvdata(pdev);
+ struct device_node *portnp;
+ int err;
+
+ ocelot->ports = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
+ sizeof(struct ocelot_port *), GFP_KERNEL);
+ if (!ocelot->ports)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(ports, portnp) {
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
+ struct device_node *phy_node;
+ phy_interface_t phy_mode;
+ struct phy_device *phy;
+ struct regmap *target;
+ struct resource *res;
+ struct phy *serdes;
+ char res_name[8];
+ u32 port;
+
+ if (of_property_read_u32(portnp, "reg", &port))
+ continue;
+
+ snprintf(res_name, sizeof(res_name), "port%d", port);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ res_name);
+ target = ocelot_regmap_init(ocelot, res);
+ if (IS_ERR(target))
+ continue;
+
+ phy_node = of_parse_phandle(portnp, "phy-handle", 0);
+ if (!phy_node)
+ continue;
+
+ phy = of_phy_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!phy)
+ continue;
+
+ err = ocelot_probe_port(ocelot, port, target, phy);
+ if (err) {
+ of_node_put(portnp);
+ return err;
+ }
+
+ ocelot_port = ocelot->ports[port];
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+
+ of_get_phy_mode(portnp, &phy_mode);
+
+ ocelot_port->phy_mode = phy_mode;
+
+ switch (ocelot_port->phy_mode) {
+ case PHY_INTERFACE_MODE_NA:
+ continue;
+ case PHY_INTERFACE_MODE_SGMII:
+ break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ /* Ensure clock signals and speed is set on all
+ * QSGMII links
+ */
+ ocelot_port_writel(ocelot_port,
+ DEV_CLOCK_CFG_LINK_SPEED
+ (OCELOT_SPEED_1000),
+ DEV_CLOCK_CFG);
+ break;
+ default:
+ dev_err(ocelot->dev,
+ "invalid phy mode for port%d, (Q)SGMII only\n",
+ port);
+ of_node_put(portnp);
+ return -EINVAL;
+ }
+
+ serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = PTR_ERR(serdes);
+ if (err == -EPROBE_DEFER)
+ dev_dbg(ocelot->dev, "deferring probe\n");
+ else
+ dev_err(ocelot->dev,
+ "missing SerDes phys for port%d\n",
+ port);
+
+ of_node_put(portnp);
+ return err;
+ }
+
+ priv->serdes = serdes;
+ }
+
+ return 0;
+}
+
+static int mscc_ocelot_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int err, irq_xtr, irq_ptp_rdy;
+ struct device_node *ports;
+ struct ocelot *ocelot;
+ struct regmap *hsio;
+ unsigned int i;
+
+ struct {
+ enum ocelot_target id;
+ char *name;
+ u8 optional:1;
+ } io_target[] = {
+ { SYS, "sys" },
+ { REW, "rew" },
+ { QSYS, "qsys" },
+ { ANA, "ana" },
+ { QS, "qs" },
+ { S0, "s0" },
+ { S1, "s1" },
+ { S2, "s2" },
+ { PTP, "ptp", 1 },
+ };
+
+ if (!np && !pdev->dev.platform_data)
+ return -ENODEV;
+
+ ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), GFP_KERNEL);
+ if (!ocelot)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ocelot);
+ ocelot->dev = &pdev->dev;
+
+ for (i = 0; i < ARRAY_SIZE(io_target); i++) {
+ struct regmap *target;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ io_target[i].name);
+
+ target = ocelot_regmap_init(ocelot, res);
+ if (IS_ERR(target)) {
+ if (io_target[i].optional) {
+ ocelot->targets[io_target[i].id] = NULL;
+ continue;
+ }
+ return PTR_ERR(target);
+ }
+
+ ocelot->targets[io_target[i].id] = target;
+ }
+
+ hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
+ if (IS_ERR(hsio)) {
+ dev_err(&pdev->dev, "missing hsio syscon\n");
+ return PTR_ERR(hsio);
+ }
+
+ ocelot->targets[HSIO] = hsio;
+
+ err = ocelot_chip_init(ocelot, &ocelot_ops);
+ if (err)
+ return err;
+
+ irq_xtr = platform_get_irq_byname(pdev, "xtr");
+ if (irq_xtr < 0)
+ return -ENODEV;
+
+ err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL,
+ ocelot_xtr_irq_handler, IRQF_ONESHOT,
+ "frame extraction", ocelot);
+ if (err)
+ return err;
+
+ irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy");
+ if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) {
+ err = devm_request_threaded_irq(&pdev->dev, irq_ptp_rdy, NULL,
+ ocelot_ptp_rdy_irq_handler,
+ IRQF_ONESHOT, "ptp ready",
+ ocelot);
+ if (err)
+ return err;
+
+ /* Both the PTP interrupt and the PTP bank are available */
+ ocelot->ptp = 1;
+ }
+
+ ports = of_get_child_by_name(np, "ethernet-ports");
+ if (!ports) {
+ dev_err(ocelot->dev, "no ethernet-ports child node found\n");
+ return -ENODEV;
+ }
+
+ ocelot->num_phys_ports = of_get_child_count(ports);
+ ocelot->num_flooding_pgids = 1;
+
+ ocelot->vcap = vsc7514_vcap_props;
+ ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE;
+ ocelot->xtr_prefix = OCELOT_TAG_PREFIX_NONE;
+ ocelot->npi = -1;
+
+ err = ocelot_init(ocelot);
+ if (err)
+ goto out_put_ports;
+
+ err = mscc_ocelot_init_ports(pdev, ports);
+ if (err)
+ goto out_ocelot_deinit;
+
+ if (ocelot->ptp) {
+ err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
+ if (err) {
+ dev_err(ocelot->dev,
+ "Timestamp initialization failed\n");
+ ocelot->ptp = 0;
+ }
+ }
+
+ register_netdevice_notifier(&ocelot_netdevice_nb);
+ register_switchdev_notifier(&ocelot_switchdev_nb);
+ register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
+
+ of_node_put(ports);
+
+ dev_info(&pdev->dev, "Ocelot switch probed\n");
+
+ return 0;
+
+out_ocelot_deinit:
+ ocelot_deinit(ocelot);
+out_put_ports:
+ of_node_put(ports);
+ return err;
+}
+
+static int mscc_ocelot_remove(struct platform_device *pdev)
+{
+ struct ocelot *ocelot = platform_get_drvdata(pdev);
+
+ ocelot_deinit_timestamp(ocelot);
+ mscc_ocelot_release_ports(ocelot);
+ ocelot_deinit(ocelot);
+ unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
+ unregister_switchdev_notifier(&ocelot_switchdev_nb);
+ unregister_netdevice_notifier(&ocelot_netdevice_nb);
+
+ return 0;
+}
+
+static struct platform_driver mscc_ocelot_driver = {
+ .probe = mscc_ocelot_probe,
+ .remove = mscc_ocelot_remove,
+ .driver = {
+ .name = "ocelot-switch",
+ .of_match_table = mscc_ocelot_match,
+ },
+};
+
+module_platform_driver(mscc_ocelot_driver);
+
+MODULE_DESCRIPTION("Microsemi Ocelot switch driver");
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@bootlin.com>");
+MODULE_LICENSE("Dual MIT/GPL");