diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ethernet/mscc | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/mscc')
22 files changed, 13697 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig new file mode 100644 index 0000000000..81e605691b --- /dev/null +++ b/drivers/net/ethernet/mscc/Kconfig @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: (GPL-2.0 OR MIT) +config NET_VENDOR_MICROSEMI + bool "Microsemi devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Microsemi devices. + +if NET_VENDOR_MICROSEMI + +# Users should depend on NET_SWITCHDEV, HAS_IOMEM, BRIDGE +config MSCC_OCELOT_SWITCH_LIB + depends on PTP_1588_CLOCK_OPTIONAL + select NET_DEVLINK + select REGMAP_MMIO + select PACKING + select PHYLINK + tristate + help + This is a hardware support library for Ocelot network switches. It is + used by switchdev as well as by DSA drivers. + +config MSCC_OCELOT_SWITCH + tristate "Ocelot switch driver" + depends on PTP_1588_CLOCK_OPTIONAL + depends on BRIDGE || BRIDGE=n + depends on NET_SWITCHDEV + depends on HAS_IOMEM + depends on OF + select MSCC_OCELOT_SWITCH_LIB + select GENERIC_PHY + help + This driver supports the Ocelot network switch device as present on + the Ocelot SoCs (VSC7514). + +endif # NET_VENDOR_MICROSEMI diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile new file mode 100644 index 0000000000..16987b72df --- /dev/null +++ b/drivers/net/ethernet/mscc/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: (GPL-2.0 OR MIT) +obj-$(CONFIG_MSCC_OCELOT_SWITCH_LIB) += mscc_ocelot_switch_lib.o +mscc_ocelot_switch_lib-y := \ + ocelot.o \ + ocelot_devlink.o \ + ocelot_flower.o \ + ocelot_io.o \ + ocelot_mm.o \ + ocelot_police.o \ + ocelot_ptp.o \ + ocelot_stats.o \ + ocelot_vcap.o \ + vsc7514_regs.o +mscc_ocelot_switch_lib-$(CONFIG_BRIDGE_MRP) += ocelot_mrp.o +obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o +mscc_ocelot-y := \ + ocelot_fdma.o \ + ocelot_net.o \ + ocelot_vsc7514.o diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c new file mode 100644 index 0000000000..56ccbd4c37 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -0,0 +1,3081 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ +#include <linux/dsa/ocelot.h> +#include <linux/if_bridge.h> +#include <linux/iopoll.h> +#include <linux/phy/phy.h> +#include <net/pkt_sched.h> +#include <soc/mscc/ocelot_hsio.h> +#include <soc/mscc/ocelot_vcap.h> +#include "ocelot.h" +#include "ocelot_vcap.h" + +#define TABLE_UPDATE_SLEEP_US 10 +#define TABLE_UPDATE_TIMEOUT_US 100000 +#define MEM_INIT_SLEEP_US 1000 +#define MEM_INIT_TIMEOUT_US 100000 + +#define OCELOT_RSV_VLAN_RANGE_START 4000 + +struct ocelot_mact_entry { + u8 mac[ETH_ALEN]; + u16 vid; + enum macaccess_entry_type type; +}; + +/* Caller must hold &ocelot->mact_lock */ +static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot) +{ + return ocelot_read(ocelot, ANA_TABLES_MACACCESS); +} + +/* Caller must hold &ocelot->mact_lock */ +static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot) +{ + u32 val; + + return readx_poll_timeout(ocelot_mact_read_macaccess, + ocelot, val, + (val & ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M) == + MACACCESS_CMD_IDLE, + TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); +} + +/* Caller must hold &ocelot->mact_lock */ +static void ocelot_mact_select(struct ocelot *ocelot, + const unsigned char mac[ETH_ALEN], + unsigned int vid) +{ + u32 macl = 0, mach = 0; + + /* Set the MAC address to handle and the vlan associated in a format + * understood by the hardware. + */ + mach |= vid << 16; + mach |= mac[0] << 8; + mach |= mac[1] << 0; + macl |= mac[2] << 24; + macl |= mac[3] << 16; + macl |= mac[4] << 8; + macl |= mac[5] << 0; + + ocelot_write(ocelot, macl, ANA_TABLES_MACLDATA); + ocelot_write(ocelot, mach, ANA_TABLES_MACHDATA); + +} + +static int __ocelot_mact_learn(struct ocelot *ocelot, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, enum macaccess_entry_type type) +{ + u32 cmd = ANA_TABLES_MACACCESS_VALID | + ANA_TABLES_MACACCESS_DEST_IDX(port) | + ANA_TABLES_MACACCESS_ENTRYTYPE(type) | + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN); + unsigned int mc_ports; + int err; + + /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */ + if (type == ENTRYTYPE_MACv4) + mc_ports = (mac[1] << 8) | mac[2]; + else if (type == ENTRYTYPE_MACv6) + mc_ports = (mac[0] << 8) | mac[1]; + else + mc_ports = 0; + + if (mc_ports & BIT(ocelot->num_phys_ports)) + cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY; + + ocelot_mact_select(ocelot, mac, vid); + + /* Issue a write command */ + ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS); + + err = ocelot_mact_wait_for_completion(ocelot); + + return err; +} + +int ocelot_mact_learn(struct ocelot *ocelot, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, enum macaccess_entry_type type) +{ + int ret; + + mutex_lock(&ocelot->mact_lock); + ret = __ocelot_mact_learn(ocelot, port, mac, vid, type); + mutex_unlock(&ocelot->mact_lock); + + return ret; +} +EXPORT_SYMBOL(ocelot_mact_learn); + +int ocelot_mact_forget(struct ocelot *ocelot, + const unsigned char mac[ETH_ALEN], unsigned int vid) +{ + int err; + + mutex_lock(&ocelot->mact_lock); + + ocelot_mact_select(ocelot, mac, vid); + + /* Issue a forget command */ + ocelot_write(ocelot, + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET), + ANA_TABLES_MACACCESS); + + err = ocelot_mact_wait_for_completion(ocelot); + + mutex_unlock(&ocelot->mact_lock); + + return err; +} +EXPORT_SYMBOL(ocelot_mact_forget); + +int ocelot_mact_lookup(struct ocelot *ocelot, int *dst_idx, + const unsigned char mac[ETH_ALEN], + unsigned int vid, enum macaccess_entry_type *type) +{ + int val; + + mutex_lock(&ocelot->mact_lock); + + ocelot_mact_select(ocelot, mac, vid); + + /* Issue a read command with MACACCESS_VALID=1. */ + ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID | + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ), + ANA_TABLES_MACACCESS); + + if (ocelot_mact_wait_for_completion(ocelot)) { + mutex_unlock(&ocelot->mact_lock); + return -ETIMEDOUT; + } + + /* Read back the entry flags */ + val = ocelot_read(ocelot, ANA_TABLES_MACACCESS); + + mutex_unlock(&ocelot->mact_lock); + + if (!(val & ANA_TABLES_MACACCESS_VALID)) + return -ENOENT; + + *dst_idx = ANA_TABLES_MACACCESS_DEST_IDX_X(val); + *type = ANA_TABLES_MACACCESS_ENTRYTYPE_X(val); + + return 0; +} +EXPORT_SYMBOL(ocelot_mact_lookup); + +int ocelot_mact_learn_streamdata(struct ocelot *ocelot, int dst_idx, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type, + int sfid, int ssid) +{ + int ret; + + mutex_lock(&ocelot->mact_lock); + + ocelot_write(ocelot, + (sfid < 0 ? 0 : ANA_TABLES_STREAMDATA_SFID_VALID) | + ANA_TABLES_STREAMDATA_SFID(sfid) | + (ssid < 0 ? 0 : ANA_TABLES_STREAMDATA_SSID_VALID) | + ANA_TABLES_STREAMDATA_SSID(ssid), + ANA_TABLES_STREAMDATA); + + ret = __ocelot_mact_learn(ocelot, dst_idx, mac, vid, type); + + mutex_unlock(&ocelot->mact_lock); + + return ret; +} +EXPORT_SYMBOL(ocelot_mact_learn_streamdata); + +static void ocelot_mact_init(struct ocelot *ocelot) +{ + /* Configure the learning mode entries attributes: + * - Do not copy the frame to the CPU extraction queues. + * - Use the vlan and mac_cpoy for dmac lookup. + */ + ocelot_rmw(ocelot, 0, + ANA_AGENCTRL_LEARN_CPU_COPY | ANA_AGENCTRL_IGNORE_DMAC_FLAGS + | ANA_AGENCTRL_LEARN_FWD_KILL + | ANA_AGENCTRL_LEARN_IGNORE_VLAN, + ANA_AGENCTRL); + + /* Clear the MAC table. We are not concurrent with anyone, so + * holding &ocelot->mact_lock is pointless. + */ + ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS); +} + +void ocelot_pll5_init(struct ocelot *ocelot) +{ + /* Configure PLL5. This will need a proper CCF driver + * The values are coming from the VTSS API for Ocelot + */ + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4, + HSIO_PLL5G_CFG4_IB_CTRL(0x7600) | + HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8)); + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0, + HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) | + HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) | + HSIO_PLL5G_CFG0_ENA_BIAS | + HSIO_PLL5G_CFG0_ENA_VCO_BUF | + HSIO_PLL5G_CFG0_ENA_CP1 | + HSIO_PLL5G_CFG0_SELCPI(2) | + HSIO_PLL5G_CFG0_LOOP_BW_RES(0xe) | + HSIO_PLL5G_CFG0_SELBGV820(4) | + HSIO_PLL5G_CFG0_DIV4 | + HSIO_PLL5G_CFG0_ENA_CLKTREE | + HSIO_PLL5G_CFG0_ENA_LANE); + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2, + HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET | + HSIO_PLL5G_CFG2_EN_RESET_OVERRUN | + HSIO_PLL5G_CFG2_GAIN_TEST(0x8) | + HSIO_PLL5G_CFG2_ENA_AMPCTRL | + HSIO_PLL5G_CFG2_PWD_AMPCTRL_N | + HSIO_PLL5G_CFG2_AMPC_SEL(0x10)); +} +EXPORT_SYMBOL(ocelot_pll5_init); + +static void ocelot_vcap_enable(struct ocelot *ocelot, int port) +{ + ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA | + ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa), + ANA_PORT_VCAP_S2_CFG, port); + + ocelot_write_gix(ocelot, ANA_PORT_VCAP_CFG_S1_ENA, + ANA_PORT_VCAP_CFG, port); + + ocelot_rmw_gix(ocelot, REW_PORT_CFG_ES0_EN, + REW_PORT_CFG_ES0_EN, + REW_PORT_CFG, port); +} + +static int ocelot_single_vlan_aware_bridge(struct ocelot *ocelot, + struct netlink_ext_ack *extack) +{ + struct net_device *bridge = NULL; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port || !ocelot_port->bridge || + !br_vlan_enabled(ocelot_port->bridge)) + continue; + + if (!bridge) { + bridge = ocelot_port->bridge; + continue; + } + + if (bridge == ocelot_port->bridge) + continue; + + NL_SET_ERR_MSG_MOD(extack, + "Only one VLAN-aware bridge is supported"); + return -EBUSY; + } + + return 0; +} + +static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot) +{ + return ocelot_read(ocelot, ANA_TABLES_VLANACCESS); +} + +static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot) +{ + u32 val; + + return readx_poll_timeout(ocelot_vlant_read_vlanaccess, + ocelot, + val, + (val & ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M) == + ANA_TABLES_VLANACCESS_CMD_IDLE, + TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); +} + +static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask) +{ + /* Select the VID to configure */ + ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid), + ANA_TABLES_VLANTIDX); + /* Set the vlan port members mask and issue a write command */ + ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) | + ANA_TABLES_VLANACCESS_CMD_WRITE, + ANA_TABLES_VLANACCESS); + + return ocelot_vlant_wait_for_completion(ocelot); +} + +static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port) +{ + struct ocelot_bridge_vlan *vlan; + int num_untagged = 0; + + list_for_each_entry(vlan, &ocelot->vlans, list) { + if (!(vlan->portmask & BIT(port))) + continue; + + /* Ignore the VLAN added by ocelot_add_vlan_unaware_pvid(), + * because this is never active in hardware at the same time as + * the bridge VLANs, which only matter in VLAN-aware mode. + */ + if (vlan->vid >= OCELOT_RSV_VLAN_RANGE_START) + continue; + + if (vlan->untagged & BIT(port)) + num_untagged++; + } + + return num_untagged; +} + +static int ocelot_port_num_tagged_vlans(struct ocelot *ocelot, int port) +{ + struct ocelot_bridge_vlan *vlan; + int num_tagged = 0; + + list_for_each_entry(vlan, &ocelot->vlans, list) { + if (!(vlan->portmask & BIT(port))) + continue; + + if (!(vlan->untagged & BIT(port))) + num_tagged++; + } + + return num_tagged; +} + +/* We use native VLAN when we have to mix egress-tagged VLANs with exactly + * _one_ egress-untagged VLAN (_the_ native VLAN) + */ +static bool ocelot_port_uses_native_vlan(struct ocelot *ocelot, int port) +{ + return ocelot_port_num_tagged_vlans(ocelot, port) && + ocelot_port_num_untagged_vlans(ocelot, port) == 1; +} + +static struct ocelot_bridge_vlan * +ocelot_port_find_native_vlan(struct ocelot *ocelot, int port) +{ + struct ocelot_bridge_vlan *vlan; + + list_for_each_entry(vlan, &ocelot->vlans, list) + if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port)) + return vlan; + + return NULL; +} + +/* Keep in sync REW_TAG_CFG_TAG_CFG and, if applicable, + * REW_PORT_VLAN_CFG_PORT_VID, with the bridge VLAN table and VLAN awareness + * state of the port. + */ +static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + enum ocelot_port_tag_config tag_cfg; + bool uses_native_vlan = false; + + if (ocelot_port->vlan_aware) { + uses_native_vlan = ocelot_port_uses_native_vlan(ocelot, port); + + if (uses_native_vlan) + tag_cfg = OCELOT_PORT_TAG_NATIVE; + else if (ocelot_port_num_untagged_vlans(ocelot, port)) + tag_cfg = OCELOT_PORT_TAG_DISABLED; + else + tag_cfg = OCELOT_PORT_TAG_TRUNK; + } else { + tag_cfg = OCELOT_PORT_TAG_DISABLED; + } + + ocelot_rmw_gix(ocelot, REW_TAG_CFG_TAG_CFG(tag_cfg), + REW_TAG_CFG_TAG_CFG_M, + REW_TAG_CFG, port); + + if (uses_native_vlan) { + struct ocelot_bridge_vlan *native_vlan; + + /* Not having a native VLAN is impossible, because + * ocelot_port_num_untagged_vlans has returned 1. + * So there is no use in checking for NULL here. + */ + native_vlan = ocelot_port_find_native_vlan(ocelot, port); + + ocelot_rmw_gix(ocelot, + REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid), + REW_PORT_VLAN_CFG_PORT_VID_M, + REW_PORT_VLAN_CFG, port); + } +} + +int ocelot_bridge_num_find(struct ocelot *ocelot, + const struct net_device *bridge) +{ + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (ocelot_port && ocelot_port->bridge == bridge) + return ocelot_port->bridge_num; + } + + return -1; +} +EXPORT_SYMBOL_GPL(ocelot_bridge_num_find); + +static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot, + const struct net_device *bridge) +{ + int bridge_num; + + /* Standalone ports use VID 0 */ + if (!bridge) + return 0; + + bridge_num = ocelot_bridge_num_find(ocelot, bridge); + if (WARN_ON(bridge_num < 0)) + return 0; + + /* VLAN-unaware bridges use a reserved VID going from 4095 downwards */ + return VLAN_N_VID - bridge_num - 1; +} + +/* Default vlan to clasify for untagged frames (may be zero) */ +static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, + const struct ocelot_bridge_vlan *pvid_vlan) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge); + u32 val = 0; + + ocelot_port->pvid_vlan = pvid_vlan; + + if (ocelot_port->vlan_aware && pvid_vlan) + pvid = pvid_vlan->vid; + + ocelot_rmw_gix(ocelot, + ANA_PORT_VLAN_CFG_VLAN_VID(pvid), + ANA_PORT_VLAN_CFG_VLAN_VID_M, + ANA_PORT_VLAN_CFG, port); + + /* If there's no pvid, we should drop not only untagged traffic (which + * happens automatically), but also 802.1p traffic which gets + * classified to VLAN 0, but that is always in our RX filter, so it + * would get accepted were it not for this setting. + */ + if (!pvid_vlan && ocelot_port->vlan_aware) + val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; + + ocelot_rmw_gix(ocelot, val, + ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, + ANA_PORT_DROP_CFG, port); +} + +static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot, + u16 vid) +{ + struct ocelot_bridge_vlan *vlan; + + list_for_each_entry(vlan, &ocelot->vlans, list) + if (vlan->vid == vid) + return vlan; + + return NULL; +} + +static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid, + bool untagged) +{ + struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid); + unsigned long portmask; + int err; + + if (vlan) { + portmask = vlan->portmask | BIT(port); + + err = ocelot_vlant_set_mask(ocelot, vid, portmask); + if (err) + return err; + + vlan->portmask = portmask; + /* Bridge VLANs can be overwritten with a different + * egress-tagging setting, so make sure to override an untagged + * with a tagged VID if that's going on. + */ + if (untagged) + vlan->untagged |= BIT(port); + else + vlan->untagged &= ~BIT(port); + + return 0; + } + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) + return -ENOMEM; + + portmask = BIT(port); + + err = ocelot_vlant_set_mask(ocelot, vid, portmask); + if (err) { + kfree(vlan); + return err; + } + + vlan->vid = vid; + vlan->portmask = portmask; + if (untagged) + vlan->untagged = BIT(port); + INIT_LIST_HEAD(&vlan->list); + list_add_tail(&vlan->list, &ocelot->vlans); + + return 0; +} + +static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid) +{ + struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid); + unsigned long portmask; + int err; + + if (!vlan) + return 0; + + portmask = vlan->portmask & ~BIT(port); + + err = ocelot_vlant_set_mask(ocelot, vid, portmask); + if (err) + return err; + + vlan->portmask = portmask; + if (vlan->portmask) + return 0; + + list_del(&vlan->list); + kfree(vlan); + + return 0; +} + +static int ocelot_add_vlan_unaware_pvid(struct ocelot *ocelot, int port, + const struct net_device *bridge) +{ + u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + return ocelot_vlan_member_add(ocelot, port, vid, true); +} + +static int ocelot_del_vlan_unaware_pvid(struct ocelot *ocelot, int port, + const struct net_device *bridge) +{ + u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + return ocelot_vlan_member_del(ocelot, port, vid); +} + +int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, + bool vlan_aware, struct netlink_ext_ack *extack) +{ + struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1]; + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_vcap_filter *filter; + int err = 0; + u32 val; + + list_for_each_entry(filter, &block->rules, list) { + if (filter->ingress_port_mask & BIT(port) && + filter->action.vid_replace_ena) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot change VLAN state with vlan modify rules active"); + return -EBUSY; + } + } + + err = ocelot_single_vlan_aware_bridge(ocelot, extack); + if (err) + return err; + + if (vlan_aware) + err = ocelot_del_vlan_unaware_pvid(ocelot, port, + ocelot_port->bridge); + else if (ocelot_port->bridge) + err = ocelot_add_vlan_unaware_pvid(ocelot, port, + ocelot_port->bridge); + if (err) + return err; + + ocelot_port->vlan_aware = vlan_aware; + + if (vlan_aware) + val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1); + else + val = 0; + ocelot_rmw_gix(ocelot, val, + ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M, + ANA_PORT_VLAN_CFG, port); + + ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan); + ocelot_port_manage_port_tag(ocelot, port); + + return 0; +} +EXPORT_SYMBOL(ocelot_port_vlan_filtering); + +int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid, + bool untagged, struct netlink_ext_ack *extack) +{ + if (untagged) { + /* We are adding an egress-tagged VLAN */ + if (ocelot_port_uses_native_vlan(ocelot, port)) { + NL_SET_ERR_MSG_MOD(extack, + "Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN"); + return -EBUSY; + } + } else { + /* We are adding an egress-tagged VLAN */ + if (ocelot_port_num_untagged_vlans(ocelot, port) > 1) { + NL_SET_ERR_MSG_MOD(extack, + "Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs"); + return -EBUSY; + } + } + + if (vid > OCELOT_RSV_VLAN_RANGE_START) { + NL_SET_ERR_MSG_MOD(extack, + "VLAN range 4000-4095 reserved for VLAN-unaware bridging"); + return -EBUSY; + } + + return 0; +} +EXPORT_SYMBOL(ocelot_vlan_prepare); + +int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, + bool untagged) +{ + int err; + + /* Ignore VID 0 added to our RX filter by the 8021q module, since + * that collides with OCELOT_STANDALONE_PVID and changes it from + * egress-untagged to egress-tagged. + */ + if (!vid) + return 0; + + err = ocelot_vlan_member_add(ocelot, port, vid, untagged); + if (err) + return err; + + /* Default ingress vlan classification */ + if (pvid) + ocelot_port_set_pvid(ocelot, port, + ocelot_bridge_vlan_find(ocelot, vid)); + + /* Untagged egress vlan clasification */ + ocelot_port_manage_port_tag(ocelot, port); + + return 0; +} +EXPORT_SYMBOL(ocelot_vlan_add); + +int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + bool del_pvid = false; + int err; + + if (!vid) + return 0; + + if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) + del_pvid = true; + + err = ocelot_vlan_member_del(ocelot, port, vid); + if (err) + return err; + + /* Ingress */ + if (del_pvid) + ocelot_port_set_pvid(ocelot, port, NULL); + + /* Egress */ + ocelot_port_manage_port_tag(ocelot, port); + + return 0; +} +EXPORT_SYMBOL(ocelot_vlan_del); + +static void ocelot_vlan_init(struct ocelot *ocelot) +{ + unsigned long all_ports = GENMASK(ocelot->num_phys_ports - 1, 0); + u16 port, vid; + + /* Clear VLAN table, by default all ports are members of all VLANs */ + ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT, + ANA_TABLES_VLANACCESS); + ocelot_vlant_wait_for_completion(ocelot); + + /* Configure the port VLAN memberships */ + for (vid = 1; vid < VLAN_N_VID; vid++) + ocelot_vlant_set_mask(ocelot, vid, 0); + + /* We need VID 0 to get traffic on standalone ports. + * It is added automatically if the 8021q module is loaded, but we + * can't rely on that since it might not be. + */ + ocelot_vlant_set_mask(ocelot, OCELOT_STANDALONE_PVID, all_ports); + + /* Set vlan ingress filter mask to all ports but the CPU port by + * default. + */ + ocelot_write(ocelot, all_ports, ANA_VLANMASK); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port); + ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port); + } +} + +static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port) +{ + return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port); +} + +static int ocelot_port_flush(struct ocelot *ocelot, int port) +{ + unsigned int pause_ena; + int err, val; + + /* Disable dequeuing from the egress queues */ + ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS, + QSYS_PORT_MODE_DEQUEUE_DIS, + QSYS_PORT_MODE, port); + + /* Disable flow control */ + ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena); + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); + + /* Disable priority flow control */ + ocelot_fields_write(ocelot, port, + QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0); + + /* Wait at least the time it takes to receive a frame of maximum length + * at the port. + * Worst-case delays for 10 kilobyte jumbo frames are: + * 8 ms on a 10M port + * 800 μs on a 100M port + * 80 μs on a 1G port + * 32 μs on a 2.5G port + */ + usleep_range(8000, 10000); + + /* Disable half duplex backpressure. */ + ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE, + SYS_FRONT_PORT_MODE, port); + + /* Flush the queues associated with the port. */ + ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA, + REW_PORT_CFG, port); + + /* Enable dequeuing from the egress queues. */ + ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE, + port); + + /* Wait until flushing is complete. */ + err = read_poll_timeout(ocelot_read_eq_avail, val, !val, + 100, 2000000, false, ocelot, port); + + /* Clear flushing again. */ + ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port); + + /* Re-enable flow control */ + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena); + + return err; +} + +int ocelot_port_configure_serdes(struct ocelot *ocelot, int port, + struct device_node *portnp) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct device *dev = ocelot->dev; + int err; + + /* Ensure clock signals and speed are set on all QSGMII links */ + if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_QSGMII) + ocelot_port_rmwl(ocelot_port, 0, + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_RX_RST, + DEV_CLOCK_CFG); + + if (ocelot_port->phy_mode != PHY_INTERFACE_MODE_INTERNAL) { + struct phy *serdes = of_phy_get(portnp, NULL); + + if (IS_ERR(serdes)) { + err = PTR_ERR(serdes); + dev_err_probe(dev, err, + "missing SerDes phys for port %d\n", + port); + return err; + } + + err = phy_set_mode_ext(serdes, PHY_MODE_ETHERNET, + ocelot_port->phy_mode); + of_phy_put(serdes); + if (err) { + dev_err(dev, "Could not SerDes mode on port %d: %pe\n", + port, ERR_PTR(err)); + return err; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_port_configure_serdes); + +void ocelot_phylink_mac_config(struct ocelot *ocelot, int port, + unsigned int link_an_mode, + const struct phylink_link_state *state) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + /* Disable HDX fast control */ + ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS, + DEV_PORT_MISC); + + /* SGMII only for now */ + ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA, + PCS1G_MODE_CFG); + ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG); + + /* Enable PCS */ + ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG); + + /* No aneg on SGMII */ + ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG); + + /* No loopback */ + ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG); +} +EXPORT_SYMBOL_GPL(ocelot_phylink_mac_config); + +void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port, + unsigned int link_an_mode, + phy_interface_t interface, + unsigned long quirks) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int err; + + ocelot_port->speed = SPEED_UNKNOWN; + + ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA, + DEV_MAC_ENA_CFG); + + if (ocelot->ops->cut_through_fwd) { + mutex_lock(&ocelot->fwd_domain_lock); + ocelot->ops->cut_through_fwd(ocelot); + mutex_unlock(&ocelot->fwd_domain_lock); + } + + ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); + + err = ocelot_port_flush(ocelot, port); + if (err) + dev_err(ocelot->dev, "failed to flush port %d: %d\n", + port, err); + + /* Put the port in reset. */ + if (interface != PHY_INTERFACE_MODE_QSGMII || + !(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP)) + ocelot_port_rmwl(ocelot_port, + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_RX_RST, + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_RX_RST, + DEV_CLOCK_CFG); +} +EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down); + +void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port, + struct phy_device *phydev, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause, + unsigned long quirks) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int mac_speed, mode = 0; + u32 mac_fc_cfg; + + ocelot_port->speed = speed; + + /* The MAC might be integrated in systems where the MAC speed is fixed + * and it's the PCS who is performing the rate adaptation, so we have + * to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG + * (which is also its default value). + */ + if ((quirks & OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION) || + speed == SPEED_1000) { + mac_speed = OCELOT_SPEED_1000; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; + } else if (speed == SPEED_2500) { + mac_speed = OCELOT_SPEED_2500; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; + } else if (speed == SPEED_100) { + mac_speed = OCELOT_SPEED_100; + } else { + mac_speed = OCELOT_SPEED_10; + } + + if (duplex == DUPLEX_FULL) + mode |= DEV_MAC_MODE_CFG_FDX_ENA; + + ocelot_port_writel(ocelot_port, mode, DEV_MAC_MODE_CFG); + + /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and + * PORT_RST bits in DEV_CLOCK_CFG. + */ + ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(mac_speed), + DEV_CLOCK_CFG); + + switch (speed) { + case SPEED_10: + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_10); + break; + case SPEED_100: + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_100); + break; + case SPEED_1000: + case SPEED_2500: + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_1000); + break; + default: + dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n", + port, speed); + return; + } + + if (rx_pause) + mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA; + + if (tx_pause) + mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA | + SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | + SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA; + + /* Flow control. Link speed is only used here to evaluate the time + * specification in incoming pause frames. + */ + ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port); + + ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); + + /* Don't attempt to send PAUSE frames on the NPI port, it's broken */ + if (port != ocelot->npi) + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, + tx_pause); + + /* Undo the effects of ocelot_phylink_mac_link_down: + * enable MAC module + */ + ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | + DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); + + /* If the port supports cut-through forwarding, update the masks before + * enabling forwarding on the port. + */ + if (ocelot->ops->cut_through_fwd) { + mutex_lock(&ocelot->fwd_domain_lock); + /* Workaround for hardware bug - FP doesn't work + * at all link speeds for all PHY modes. The function + * below also calls ocelot->ops->cut_through_fwd(), + * so we don't need to do it twice. + */ + ocelot_port_update_active_preemptible_tcs(ocelot, port); + mutex_unlock(&ocelot->fwd_domain_lock); + } + + /* Core: Enable port for frame transfer */ + ocelot_fields_write(ocelot, port, + QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); +} +EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up); + +static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh, + u32 *rval) +{ + u32 bytes_valid, val; + + val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + if (val == XTR_NOT_READY) { + if (ifh) + return -EIO; + + do { + val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + } while (val == XTR_NOT_READY); + } + + switch (val) { + case XTR_ABORT: + return -EIO; + case XTR_EOF_0: + case XTR_EOF_1: + case XTR_EOF_2: + case XTR_EOF_3: + case XTR_PRUNED: + bytes_valid = XTR_VALID_BYTES(val); + val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + if (val == XTR_ESCAPE) + *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + else + *rval = val; + + return bytes_valid; + case XTR_ESCAPE: + *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + + return 4; + default: + *rval = val; + + return 4; + } +} + +static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh) +{ + int i, err = 0; + + for (i = 0; i < OCELOT_TAG_LEN / 4; i++) { + err = ocelot_rx_frame_word(ocelot, grp, true, &xfh[i]); + if (err != 4) + return (err < 0) ? err : -EIO; + } + + return 0; +} + +void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb, + u64 timestamp) +{ + struct skb_shared_hwtstamps *shhwtstamps; + u64 tod_in_ns, full_ts_in_ns; + struct timespec64 ts; + + ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); + + tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); + if ((tod_in_ns & 0xffffffff) < timestamp) + full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) | + timestamp; + else + full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) | + timestamp; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamps->hwtstamp = full_ts_in_ns; +} +EXPORT_SYMBOL(ocelot_ptp_rx_timestamp); + +int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) +{ + u64 timestamp, src_port, len; + u32 xfh[OCELOT_TAG_LEN / 4]; + struct net_device *dev; + struct sk_buff *skb; + int sz, buf_len; + u32 val, *buf; + int err; + + err = ocelot_xtr_poll_xfh(ocelot, grp, xfh); + if (err) + return err; + + ocelot_xfh_get_src_port(xfh, &src_port); + ocelot_xfh_get_len(xfh, &len); + ocelot_xfh_get_rew_val(xfh, ×tamp); + + if (WARN_ON(src_port >= ocelot->num_phys_ports)) + return -EINVAL; + + dev = ocelot->ops->port_to_netdev(ocelot, src_port); + if (!dev) + return -EINVAL; + + skb = netdev_alloc_skb(dev, len); + if (unlikely(!skb)) { + netdev_err(dev, "Unable to allocate sk_buff\n"); + return -ENOMEM; + } + + buf_len = len - ETH_FCS_LEN; + buf = (u32 *)skb_put(skb, buf_len); + + len = 0; + do { + sz = ocelot_rx_frame_word(ocelot, grp, false, &val); + if (sz < 0) { + err = sz; + goto out_free_skb; + } + *buf++ = val; + len += sz; + } while (len < buf_len); + + /* Read the FCS */ + sz = ocelot_rx_frame_word(ocelot, grp, false, &val); + if (sz < 0) { + err = sz; + goto out_free_skb; + } + + /* Update the statistics if part of the FCS was read before */ + len -= ETH_FCS_LEN - sz; + + if (unlikely(dev->features & NETIF_F_RXFCS)) { + buf = (u32 *)skb_put(skb, ETH_FCS_LEN); + *buf = val; + } + + if (ocelot->ptp) + ocelot_ptp_rx_timestamp(ocelot, skb, timestamp); + + /* Everything we see on an interface that is in the HW bridge + * has already been forwarded. + */ + if (ocelot->ports[src_port]->bridge) + skb->offload_fwd_mark = 1; + + skb->protocol = eth_type_trans(skb, dev); + + *nskb = skb; + + return 0; + +out_free_skb: + kfree_skb(skb); + return err; +} +EXPORT_SYMBOL(ocelot_xtr_poll_frame); + +bool ocelot_can_inject(struct ocelot *ocelot, int grp) +{ + u32 val = ocelot_read(ocelot, QS_INJ_STATUS); + + if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp)))) + return false; + if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp))) + return false; + + return true; +} +EXPORT_SYMBOL(ocelot_can_inject); + +void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag) +{ + ocelot_ifh_set_bypass(ifh, 1); + ocelot_ifh_set_dest(ifh, BIT_ULL(port)); + ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C); + if (vlan_tag) + ocelot_ifh_set_vlan_tci(ifh, vlan_tag); + if (rew_op) + ocelot_ifh_set_rew_op(ifh, rew_op); +} +EXPORT_SYMBOL(ocelot_ifh_port_set); + +void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, + u32 rew_op, struct sk_buff *skb) +{ + u32 ifh[OCELOT_TAG_LEN / 4] = {0}; + unsigned int i, count, last; + + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | + QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); + + ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb)); + + for (i = 0; i < OCELOT_TAG_LEN / 4; i++) + ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp); + + count = DIV_ROUND_UP(skb->len, 4); + last = skb->len % 4; + for (i = 0; i < count; i++) + ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp); + + /* Add padding */ + while (i < (OCELOT_BUFFER_CELL_SZ / 4)) { + ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); + i++; + } + + /* Indicate EOF and valid bytes in last word */ + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | + QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) | + QS_INJ_CTRL_EOF, + QS_INJ_CTRL, grp); + + /* Add dummy CRC */ + ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); + skb_tx_timestamp(skb); + + skb->dev->stats.tx_packets++; + skb->dev->stats.tx_bytes += skb->len; +} +EXPORT_SYMBOL(ocelot_port_inject_frame); + +void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp) +{ + while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) + ocelot_read_rix(ocelot, QS_XTR_RD, grp); +} +EXPORT_SYMBOL(ocelot_drain_cpu_queue); + +int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr, + u16 vid, const struct net_device *bridge) +{ + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED); +} +EXPORT_SYMBOL(ocelot_fdb_add); + +int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr, + u16 vid, const struct net_device *bridge) +{ + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + return ocelot_mact_forget(ocelot, addr, vid); +} +EXPORT_SYMBOL(ocelot_fdb_del); + +/* Caller must hold &ocelot->mact_lock */ +static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col, + struct ocelot_mact_entry *entry) +{ + u32 val, dst, macl, mach; + char mac[ETH_ALEN]; + + /* Set row and column to read from */ + ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row); + ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_BUCKET, col); + + /* Issue a read command */ + ocelot_write(ocelot, + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ), + ANA_TABLES_MACACCESS); + + if (ocelot_mact_wait_for_completion(ocelot)) + return -ETIMEDOUT; + + /* Read the entry flags */ + val = ocelot_read(ocelot, ANA_TABLES_MACACCESS); + if (!(val & ANA_TABLES_MACACCESS_VALID)) + return -EINVAL; + + /* If the entry read has another port configured as its destination, + * do not report it. + */ + dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3; + if (dst != port) + return -EINVAL; + + /* Get the entry's MAC address and VLAN id */ + macl = ocelot_read(ocelot, ANA_TABLES_MACLDATA); + mach = ocelot_read(ocelot, ANA_TABLES_MACHDATA); + + mac[0] = (mach >> 8) & 0xff; + mac[1] = (mach >> 0) & 0xff; + mac[2] = (macl >> 24) & 0xff; + mac[3] = (macl >> 16) & 0xff; + mac[4] = (macl >> 8) & 0xff; + mac[5] = (macl >> 0) & 0xff; + + entry->vid = (mach >> 16) & 0xfff; + ether_addr_copy(entry->mac, mac); + + return 0; +} + +int ocelot_mact_flush(struct ocelot *ocelot, int port) +{ + int err; + + mutex_lock(&ocelot->mact_lock); + + /* Program ageing filter for a single port */ + ocelot_write(ocelot, ANA_ANAGEFIL_PID_EN | ANA_ANAGEFIL_PID_VAL(port), + ANA_ANAGEFIL); + + /* Flushing dynamic FDB entries requires two successive age scans */ + ocelot_write(ocelot, + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE), + ANA_TABLES_MACACCESS); + + err = ocelot_mact_wait_for_completion(ocelot); + if (err) { + mutex_unlock(&ocelot->mact_lock); + return err; + } + + /* And second... */ + ocelot_write(ocelot, + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE), + ANA_TABLES_MACACCESS); + + err = ocelot_mact_wait_for_completion(ocelot); + + /* Restore ageing filter */ + ocelot_write(ocelot, 0, ANA_ANAGEFIL); + + mutex_unlock(&ocelot->mact_lock); + + return err; +} +EXPORT_SYMBOL_GPL(ocelot_mact_flush); + +int ocelot_fdb_dump(struct ocelot *ocelot, int port, + dsa_fdb_dump_cb_t *cb, void *data) +{ + int err = 0; + int i, j; + + /* We could take the lock just around ocelot_mact_read, but doing so + * thousands of times in a row seems rather pointless and inefficient. + */ + mutex_lock(&ocelot->mact_lock); + + /* Loop through all the mac tables entries. */ + for (i = 0; i < ocelot->num_mact_rows; i++) { + for (j = 0; j < 4; j++) { + struct ocelot_mact_entry entry; + bool is_static; + + err = ocelot_mact_read(ocelot, port, i, j, &entry); + /* If the entry is invalid (wrong port, invalid...), + * skip it. + */ + if (err == -EINVAL) + continue; + else if (err) + break; + + is_static = (entry.type == ENTRYTYPE_LOCKED); + + /* Hide the reserved VLANs used for + * VLAN-unaware bridging. + */ + if (entry.vid > OCELOT_RSV_VLAN_RANGE_START) + entry.vid = 0; + + err = cb(entry.mac, entry.vid, is_static, data); + if (err) + break; + } + } + + mutex_unlock(&ocelot->mact_lock); + + return err; +} +EXPORT_SYMBOL(ocelot_fdb_dump); + +int ocelot_trap_add(struct ocelot *ocelot, int port, + unsigned long cookie, bool take_ts, + void (*populate)(struct ocelot_vcap_filter *f)) +{ + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot_vcap_filter *trap; + bool new = false; + int err; + + block_vcap_is2 = &ocelot->block[VCAP_IS2]; + + trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie, + false); + if (!trap) { + trap = kzalloc(sizeof(*trap), GFP_KERNEL); + if (!trap) + return -ENOMEM; + + populate(trap); + trap->prio = 1; + trap->id.cookie = cookie; + trap->id.tc_offload = false; + trap->block_id = VCAP_IS2; + trap->type = OCELOT_VCAP_FILTER_OFFLOAD; + trap->lookup = 0; + trap->action.cpu_copy_ena = true; + trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; + trap->action.port_mask = 0; + trap->take_ts = take_ts; + trap->is_trap = true; + new = true; + } + + trap->ingress_port_mask |= BIT(port); + + if (new) + err = ocelot_vcap_filter_add(ocelot, trap, NULL); + else + err = ocelot_vcap_filter_replace(ocelot, trap); + if (err) { + trap->ingress_port_mask &= ~BIT(port); + if (!trap->ingress_port_mask) + kfree(trap); + return err; + } + + return 0; +} + +int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie) +{ + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot_vcap_filter *trap; + + block_vcap_is2 = &ocelot->block[VCAP_IS2]; + + trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie, + false); + if (!trap) + return 0; + + trap->ingress_port_mask &= ~BIT(port); + if (!trap->ingress_port_mask) + return ocelot_vcap_filter_del(ocelot, trap); + + return ocelot_vcap_filter_replace(ocelot, trap); +} + +static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond) +{ + u32 mask = 0; + int port; + + lockdep_assert_held(&ocelot->fwd_domain_lock); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port) + continue; + + if (ocelot_port->bond == bond) + mask |= BIT(port); + } + + return mask; +} + +/* The logical port number of a LAG is equal to the lowest numbered physical + * port ID present in that LAG. It may change if that port ever leaves the LAG. + */ +int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond) +{ + int bond_mask = ocelot_get_bond_mask(ocelot, bond); + + if (!bond_mask) + return -ENOENT; + + return __ffs(bond_mask); +} +EXPORT_SYMBOL_GPL(ocelot_bond_get_id); + +/* Returns the mask of user ports assigned to this DSA tag_8021q CPU port. + * Note that when CPU ports are in a LAG, the user ports are assigned to the + * 'primary' CPU port, the one whose physical port number gives the logical + * port number of the LAG. + * + * We leave PGID_SRC poorly configured for the 'secondary' CPU port in the LAG + * (to which no user port is assigned), but it appears that forwarding from + * this secondary CPU port looks at the PGID_SRC associated with the logical + * port ID that it's assigned to, which *is* configured properly. + */ +static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot, + struct ocelot_port *cpu) +{ + u32 mask = 0; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port) + continue; + + if (ocelot_port->dsa_8021q_cpu == cpu) + mask |= BIT(port); + } + + if (cpu->bond) + mask &= ~ocelot_get_bond_mask(ocelot, cpu->bond); + + return mask; +} + +/* Returns the DSA tag_8021q CPU port that the given port is assigned to, + * or the bit mask of CPU ports if said CPU port is in a LAG. + */ +u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_port *cpu_port = ocelot_port->dsa_8021q_cpu; + + if (!cpu_port) + return 0; + + if (cpu_port->bond) + return ocelot_get_bond_mask(ocelot, cpu_port->bond); + + return BIT(cpu_port->index); +} +EXPORT_SYMBOL_GPL(ocelot_port_assigned_dsa_8021q_cpu_mask); + +u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[src_port]; + const struct net_device *bridge; + u32 mask = 0; + int port; + + if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING) + return 0; + + bridge = ocelot_port->bridge; + if (!bridge) + return 0; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + ocelot_port = ocelot->ports[port]; + + if (!ocelot_port) + continue; + + if (ocelot_port->stp_state == BR_STATE_FORWARDING && + ocelot_port->bridge == bridge) + mask |= BIT(port); + } + + return mask; +} +EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask); + +static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) +{ + int port; + + lockdep_assert_held(&ocelot->fwd_domain_lock); + + /* If cut-through forwarding is supported, update the masks before a + * port joins the forwarding domain, to avoid potential underruns if it + * has the highest speed from the new domain. + */ + if (joining && ocelot->ops->cut_through_fwd) + ocelot->ops->cut_through_fwd(ocelot); + + /* Apply FWD mask. The loop is needed to add/remove the current port as + * a source for the other ports. + */ + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + unsigned long mask; + + if (!ocelot_port) { + /* Unused ports can't send anywhere */ + mask = 0; + } else if (ocelot_port->is_dsa_8021q_cpu) { + /* The DSA tag_8021q CPU ports need to be able to + * forward packets to all ports assigned to them. + */ + mask = ocelot_dsa_8021q_cpu_assigned_ports(ocelot, + ocelot_port); + } else if (ocelot_port->bridge) { + struct net_device *bond = ocelot_port->bond; + + mask = ocelot_get_bridge_fwd_mask(ocelot, port); + mask &= ~BIT(port); + + mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot, + port); + + if (bond) + mask &= ~ocelot_get_bond_mask(ocelot, bond); + } else { + /* Standalone ports forward only to DSA tag_8021q CPU + * ports (if those exist), or to the hardware CPU port + * module otherwise. + */ + mask = ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot, + port); + } + + ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port); + } + + /* If cut-through forwarding is supported and a port is leaving, there + * is a chance that cut-through was disabled on the other ports due to + * the port which is leaving (it has a higher link speed). We need to + * update the cut-through masks of the remaining ports no earlier than + * after the port has left, to prevent underruns from happening between + * the cut-through update and the forwarding domain update. + */ + if (!joining && ocelot->ops->cut_through_fwd) + ocelot->ops->cut_through_fwd(ocelot); +} + +/* Update PGID_CPU which is the destination port mask used for whitelisting + * unicast addresses filtered towards the host. In the normal and NPI modes, + * this points to the analyzer entry for the CPU port module, while in DSA + * tag_8021q mode, it is a bit mask of all active CPU ports. + * PGID_SRC will take care of forwarding a packet from one user port to + * no more than a single CPU port. + */ +static void ocelot_update_pgid_cpu(struct ocelot *ocelot) +{ + int pgid_cpu = 0; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port || !ocelot_port->is_dsa_8021q_cpu) + continue; + + pgid_cpu |= BIT(port); + } + + if (!pgid_cpu) + pgid_cpu = BIT(ocelot->num_phys_ports); + + ocelot_write_rix(ocelot, pgid_cpu, ANA_PGID_PGID, PGID_CPU); +} + +void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu) +{ + struct ocelot_port *cpu_port = ocelot->ports[cpu]; + u16 vid; + + mutex_lock(&ocelot->fwd_domain_lock); + + cpu_port->is_dsa_8021q_cpu = true; + + for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) + ocelot_vlan_member_add(ocelot, cpu, vid, true); + + ocelot_update_pgid_cpu(ocelot); + + mutex_unlock(&ocelot->fwd_domain_lock); +} +EXPORT_SYMBOL_GPL(ocelot_port_setup_dsa_8021q_cpu); + +void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu) +{ + struct ocelot_port *cpu_port = ocelot->ports[cpu]; + u16 vid; + + mutex_lock(&ocelot->fwd_domain_lock); + + cpu_port->is_dsa_8021q_cpu = false; + + for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) + ocelot_vlan_member_del(ocelot, cpu_port->index, vid); + + ocelot_update_pgid_cpu(ocelot); + + mutex_unlock(&ocelot->fwd_domain_lock); +} +EXPORT_SYMBOL_GPL(ocelot_port_teardown_dsa_8021q_cpu); + +void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port, + int cpu) +{ + struct ocelot_port *cpu_port = ocelot->ports[cpu]; + + mutex_lock(&ocelot->fwd_domain_lock); + + ocelot->ports[port]->dsa_8021q_cpu = cpu_port; + ocelot_apply_bridge_fwd_mask(ocelot, true); + + mutex_unlock(&ocelot->fwd_domain_lock); +} +EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu); + +void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port) +{ + mutex_lock(&ocelot->fwd_domain_lock); + + ocelot->ports[port]->dsa_8021q_cpu = NULL; + ocelot_apply_bridge_fwd_mask(ocelot, true); + + mutex_unlock(&ocelot->fwd_domain_lock); +} +EXPORT_SYMBOL_GPL(ocelot_port_unassign_dsa_8021q_cpu); + +void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u32 learn_ena = 0; + + mutex_lock(&ocelot->fwd_domain_lock); + + ocelot_port->stp_state = state; + + if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) && + ocelot_port->learn_ena) + learn_ena = ANA_PORT_PORT_CFG_LEARN_ENA; + + ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA, + ANA_PORT_PORT_CFG, port); + + ocelot_apply_bridge_fwd_mask(ocelot, state == BR_STATE_FORWARDING); + + mutex_unlock(&ocelot->fwd_domain_lock); +} +EXPORT_SYMBOL(ocelot_bridge_stp_state_set); + +void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs) +{ + unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000); + + /* Setting AGE_PERIOD to zero effectively disables automatic aging, + * which is clearly not what our intention is. So avoid that. + */ + if (!age_period) + age_period = 1; + + ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE); +} +EXPORT_SYMBOL(ocelot_set_ageing_time); + +static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, + const unsigned char *addr, + u16 vid) +{ + struct ocelot_multicast *mc; + + list_for_each_entry(mc, &ocelot->multicast, list) { + if (ether_addr_equal(mc->addr, addr) && mc->vid == vid) + return mc; + } + + return NULL; +} + +static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr) +{ + if (addr[0] == 0x01 && addr[1] == 0x00 && addr[2] == 0x5e) + return ENTRYTYPE_MACv4; + if (addr[0] == 0x33 && addr[1] == 0x33) + return ENTRYTYPE_MACv6; + return ENTRYTYPE_LOCKED; +} + +static struct ocelot_pgid *ocelot_pgid_alloc(struct ocelot *ocelot, int index, + unsigned long ports) +{ + struct ocelot_pgid *pgid; + + pgid = kzalloc(sizeof(*pgid), GFP_KERNEL); + if (!pgid) + return ERR_PTR(-ENOMEM); + + pgid->ports = ports; + pgid->index = index; + refcount_set(&pgid->refcount, 1); + list_add_tail(&pgid->list, &ocelot->pgids); + + return pgid; +} + +static void ocelot_pgid_free(struct ocelot *ocelot, struct ocelot_pgid *pgid) +{ + if (!refcount_dec_and_test(&pgid->refcount)) + return; + + list_del(&pgid->list); + kfree(pgid); +} + +static struct ocelot_pgid *ocelot_mdb_get_pgid(struct ocelot *ocelot, + const struct ocelot_multicast *mc) +{ + struct ocelot_pgid *pgid; + int index; + + /* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and + * 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the + * destination mask table (PGID), the destination set is programmed as + * part of the entry MAC address.", and the DEST_IDX is set to 0. + */ + if (mc->entry_type == ENTRYTYPE_MACv4 || + mc->entry_type == ENTRYTYPE_MACv6) + return ocelot_pgid_alloc(ocelot, 0, mc->ports); + + list_for_each_entry(pgid, &ocelot->pgids, list) { + /* When searching for a nonreserved multicast PGID, ignore the + * dummy PGID of zero that we have for MACv4/MACv6 entries + */ + if (pgid->index && pgid->ports == mc->ports) { + refcount_inc(&pgid->refcount); + return pgid; + } + } + + /* Search for a free index in the nonreserved multicast PGID area */ + for_each_nonreserved_multicast_dest_pgid(ocelot, index) { + bool used = false; + + list_for_each_entry(pgid, &ocelot->pgids, list) { + if (pgid->index == index) { + used = true; + break; + } + } + + if (!used) + return ocelot_pgid_alloc(ocelot, index, mc->ports); + } + + return ERR_PTR(-ENOSPC); +} + +static void ocelot_encode_ports_to_mdb(unsigned char *addr, + struct ocelot_multicast *mc) +{ + ether_addr_copy(addr, mc->addr); + + if (mc->entry_type == ENTRYTYPE_MACv4) { + addr[0] = 0; + addr[1] = mc->ports >> 8; + addr[2] = mc->ports & 0xff; + } else if (mc->entry_type == ENTRYTYPE_MACv6) { + addr[0] = mc->ports >> 8; + addr[1] = mc->ports & 0xff; + } +} + +int ocelot_port_mdb_add(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb, + const struct net_device *bridge) +{ + unsigned char addr[ETH_ALEN]; + struct ocelot_multicast *mc; + struct ocelot_pgid *pgid; + u16 vid = mdb->vid; + + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + mc = ocelot_multicast_get(ocelot, mdb->addr, vid); + if (!mc) { + /* New entry */ + mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL); + if (!mc) + return -ENOMEM; + + mc->entry_type = ocelot_classify_mdb(mdb->addr); + ether_addr_copy(mc->addr, mdb->addr); + mc->vid = vid; + + list_add_tail(&mc->list, &ocelot->multicast); + } else { + /* Existing entry. Clean up the current port mask from + * hardware now, because we'll be modifying it. + */ + ocelot_pgid_free(ocelot, mc->pgid); + ocelot_encode_ports_to_mdb(addr, mc); + ocelot_mact_forget(ocelot, addr, vid); + } + + mc->ports |= BIT(port); + + pgid = ocelot_mdb_get_pgid(ocelot, mc); + if (IS_ERR(pgid)) { + dev_err(ocelot->dev, + "Cannot allocate PGID for mdb %pM vid %d\n", + mc->addr, mc->vid); + devm_kfree(ocelot->dev, mc); + return PTR_ERR(pgid); + } + mc->pgid = pgid; + + ocelot_encode_ports_to_mdb(addr, mc); + + if (mc->entry_type != ENTRYTYPE_MACv4 && + mc->entry_type != ENTRYTYPE_MACv6) + ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, + pgid->index); + + return ocelot_mact_learn(ocelot, pgid->index, addr, vid, + mc->entry_type); +} +EXPORT_SYMBOL(ocelot_port_mdb_add); + +int ocelot_port_mdb_del(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb, + const struct net_device *bridge) +{ + unsigned char addr[ETH_ALEN]; + struct ocelot_multicast *mc; + struct ocelot_pgid *pgid; + u16 vid = mdb->vid; + + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + mc = ocelot_multicast_get(ocelot, mdb->addr, vid); + if (!mc) + return -ENOENT; + + ocelot_encode_ports_to_mdb(addr, mc); + ocelot_mact_forget(ocelot, addr, vid); + + ocelot_pgid_free(ocelot, mc->pgid); + mc->ports &= ~BIT(port); + if (!mc->ports) { + list_del(&mc->list); + devm_kfree(ocelot->dev, mc); + return 0; + } + + /* We have a PGID with fewer ports now */ + pgid = ocelot_mdb_get_pgid(ocelot, mc); + if (IS_ERR(pgid)) + return PTR_ERR(pgid); + mc->pgid = pgid; + + ocelot_encode_ports_to_mdb(addr, mc); + + if (mc->entry_type != ENTRYTYPE_MACv4 && + mc->entry_type != ENTRYTYPE_MACv6) + ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, + pgid->index); + + return ocelot_mact_learn(ocelot, pgid->index, addr, vid, + mc->entry_type); +} +EXPORT_SYMBOL(ocelot_port_mdb_del); + +int ocelot_port_bridge_join(struct ocelot *ocelot, int port, + struct net_device *bridge, int bridge_num, + struct netlink_ext_ack *extack) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int err; + + err = ocelot_single_vlan_aware_bridge(ocelot, extack); + if (err) + return err; + + mutex_lock(&ocelot->fwd_domain_lock); + + ocelot_port->bridge = bridge; + ocelot_port->bridge_num = bridge_num; + + ocelot_apply_bridge_fwd_mask(ocelot, true); + + mutex_unlock(&ocelot->fwd_domain_lock); + + if (br_vlan_enabled(bridge)) + return 0; + + return ocelot_add_vlan_unaware_pvid(ocelot, port, bridge); +} +EXPORT_SYMBOL(ocelot_port_bridge_join); + +void ocelot_port_bridge_leave(struct ocelot *ocelot, int port, + struct net_device *bridge) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + mutex_lock(&ocelot->fwd_domain_lock); + + if (!br_vlan_enabled(bridge)) + ocelot_del_vlan_unaware_pvid(ocelot, port, bridge); + + ocelot_port->bridge = NULL; + ocelot_port->bridge_num = -1; + + ocelot_port_set_pvid(ocelot, port, NULL); + ocelot_port_manage_port_tag(ocelot, port); + ocelot_apply_bridge_fwd_mask(ocelot, false); + + mutex_unlock(&ocelot->fwd_domain_lock); +} +EXPORT_SYMBOL(ocelot_port_bridge_leave); + +static void ocelot_set_aggr_pgids(struct ocelot *ocelot) +{ + unsigned long visited = GENMASK(ocelot->num_phys_ports - 1, 0); + int i, port, lag; + + /* Reset destination and aggregation PGIDS */ + for_each_unicast_dest_pgid(ocelot, port) + ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); + + for_each_aggr_pgid(ocelot, i) + ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), + ANA_PGID_PGID, i); + + /* The visited ports bitmask holds the list of ports offloading any + * bonding interface. Initially we mark all these ports as unvisited, + * then every time we visit a port in this bitmask, we know that it is + * the lowest numbered port, i.e. the one whose logical ID == physical + * port ID == LAG ID. So we mark as visited all further ports in the + * bitmask that are offloading the same bonding interface. This way, + * we set up the aggregation PGIDs only once per bonding interface. + */ + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port || !ocelot_port->bond) + continue; + + visited &= ~BIT(port); + } + + /* Now, set PGIDs for each active LAG */ + for (lag = 0; lag < ocelot->num_phys_ports; lag++) { + struct net_device *bond = ocelot->ports[lag]->bond; + int num_active_ports = 0; + unsigned long bond_mask; + u8 aggr_idx[16]; + + if (!bond || (visited & BIT(lag))) + continue; + + bond_mask = ocelot_get_bond_mask(ocelot, bond); + + for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + // Destination mask + ocelot_write_rix(ocelot, bond_mask, + ANA_PGID_PGID, port); + + if (ocelot_port->lag_tx_active) + aggr_idx[num_active_ports++] = port; + } + + for_each_aggr_pgid(ocelot, i) { + u32 ac; + + ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i); + ac &= ~bond_mask; + /* Don't do division by zero if there was no active + * port. Just make all aggregation codes zero. + */ + if (num_active_ports) + ac |= BIT(aggr_idx[i % num_active_ports]); + ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i); + } + + /* Mark all ports in the same LAG as visited to avoid applying + * the same config again. + */ + for (port = lag; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port) + continue; + + if (ocelot_port->bond == bond) + visited |= BIT(port); + } + } +} + +/* When offloading a bonding interface, the switch ports configured under the + * same bond must have the same logical port ID, equal to the physical port ID + * of the lowest numbered physical port in that bond. Otherwise, in standalone/ + * bridged mode, each port has a logical port ID equal to its physical port ID. + */ +static void ocelot_setup_logical_port_ids(struct ocelot *ocelot) +{ + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct net_device *bond; + + if (!ocelot_port) + continue; + + bond = ocelot_port->bond; + if (bond) { + int lag = ocelot_bond_get_id(ocelot, bond); + + ocelot_rmw_gix(ocelot, + ANA_PORT_PORT_CFG_PORTID_VAL(lag), + ANA_PORT_PORT_CFG_PORTID_VAL_M, + ANA_PORT_PORT_CFG, port); + } else { + ocelot_rmw_gix(ocelot, + ANA_PORT_PORT_CFG_PORTID_VAL(port), + ANA_PORT_PORT_CFG_PORTID_VAL_M, + ANA_PORT_PORT_CFG, port); + } + } +} + +static int ocelot_migrate_mc(struct ocelot *ocelot, struct ocelot_multicast *mc, + unsigned long from_mask, unsigned long to_mask) +{ + unsigned char addr[ETH_ALEN]; + struct ocelot_pgid *pgid; + u16 vid = mc->vid; + + dev_dbg(ocelot->dev, + "Migrating multicast %pM vid %d from port mask 0x%lx to 0x%lx\n", + mc->addr, mc->vid, from_mask, to_mask); + + /* First clean up the current port mask from hardware, because + * we'll be modifying it. + */ + ocelot_pgid_free(ocelot, mc->pgid); + ocelot_encode_ports_to_mdb(addr, mc); + ocelot_mact_forget(ocelot, addr, vid); + + mc->ports &= ~from_mask; + mc->ports |= to_mask; + + pgid = ocelot_mdb_get_pgid(ocelot, mc); + if (IS_ERR(pgid)) { + dev_err(ocelot->dev, + "Cannot allocate PGID for mdb %pM vid %d\n", + mc->addr, mc->vid); + devm_kfree(ocelot->dev, mc); + return PTR_ERR(pgid); + } + mc->pgid = pgid; + + ocelot_encode_ports_to_mdb(addr, mc); + + if (mc->entry_type != ENTRYTYPE_MACv4 && + mc->entry_type != ENTRYTYPE_MACv6) + ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, + pgid->index); + + return ocelot_mact_learn(ocelot, pgid->index, addr, vid, + mc->entry_type); +} + +int ocelot_migrate_mdbs(struct ocelot *ocelot, unsigned long from_mask, + unsigned long to_mask) +{ + struct ocelot_multicast *mc; + int err; + + list_for_each_entry(mc, &ocelot->multicast, list) { + if (!(mc->ports & from_mask)) + continue; + + err = ocelot_migrate_mc(ocelot, mc, from_mask, to_mask); + if (err) + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_migrate_mdbs); + +/* Documentation for PORTID_VAL says: + * Logical port number for front port. If port is not a member of a LLAG, + * then PORTID must be set to the physical port number. + * If port is a member of a LLAG, then PORTID must be set to the common + * PORTID_VAL used for all member ports of the LLAG. + * The value must not exceed the number of physical ports on the device. + * + * This means we have little choice but to migrate FDB entries pointing towards + * a logical port when that changes. + */ +static void ocelot_migrate_lag_fdbs(struct ocelot *ocelot, + struct net_device *bond, + int lag) +{ + struct ocelot_lag_fdb *fdb; + int err; + + lockdep_assert_held(&ocelot->fwd_domain_lock); + + list_for_each_entry(fdb, &ocelot->lag_fdbs, list) { + if (fdb->bond != bond) + continue; + + err = ocelot_mact_forget(ocelot, fdb->addr, fdb->vid); + if (err) { + dev_err(ocelot->dev, + "failed to delete LAG %s FDB %pM vid %d: %pe\n", + bond->name, fdb->addr, fdb->vid, ERR_PTR(err)); + } + + err = ocelot_mact_learn(ocelot, lag, fdb->addr, fdb->vid, + ENTRYTYPE_LOCKED); + if (err) { + dev_err(ocelot->dev, + "failed to migrate LAG %s FDB %pM vid %d: %pe\n", + bond->name, fdb->addr, fdb->vid, ERR_PTR(err)); + } + } +} + +int ocelot_port_lag_join(struct ocelot *ocelot, int port, + struct net_device *bond, + struct netdev_lag_upper_info *info, + struct netlink_ext_ack *extack) +{ + if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + NL_SET_ERR_MSG_MOD(extack, + "Can only offload LAG using hash TX type"); + return -EOPNOTSUPP; + } + + mutex_lock(&ocelot->fwd_domain_lock); + + ocelot->ports[port]->bond = bond; + + ocelot_setup_logical_port_ids(ocelot); + ocelot_apply_bridge_fwd_mask(ocelot, true); + ocelot_set_aggr_pgids(ocelot); + + mutex_unlock(&ocelot->fwd_domain_lock); + + return 0; +} +EXPORT_SYMBOL(ocelot_port_lag_join); + +void ocelot_port_lag_leave(struct ocelot *ocelot, int port, + struct net_device *bond) +{ + int old_lag_id, new_lag_id; + + mutex_lock(&ocelot->fwd_domain_lock); + + old_lag_id = ocelot_bond_get_id(ocelot, bond); + + ocelot->ports[port]->bond = NULL; + + ocelot_setup_logical_port_ids(ocelot); + ocelot_apply_bridge_fwd_mask(ocelot, false); + ocelot_set_aggr_pgids(ocelot); + + new_lag_id = ocelot_bond_get_id(ocelot, bond); + + if (new_lag_id >= 0 && old_lag_id != new_lag_id) + ocelot_migrate_lag_fdbs(ocelot, bond, new_lag_id); + + mutex_unlock(&ocelot->fwd_domain_lock); +} +EXPORT_SYMBOL(ocelot_port_lag_leave); + +void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + mutex_lock(&ocelot->fwd_domain_lock); + + ocelot_port->lag_tx_active = lag_tx_active; + + /* Rebalance the LAGs */ + ocelot_set_aggr_pgids(ocelot); + + mutex_unlock(&ocelot->fwd_domain_lock); +} +EXPORT_SYMBOL(ocelot_port_lag_change); + +int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond, + const unsigned char *addr, u16 vid, + const struct net_device *bridge) +{ + struct ocelot_lag_fdb *fdb; + int lag, err; + + fdb = kzalloc(sizeof(*fdb), GFP_KERNEL); + if (!fdb) + return -ENOMEM; + + mutex_lock(&ocelot->fwd_domain_lock); + + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + ether_addr_copy(fdb->addr, addr); + fdb->vid = vid; + fdb->bond = bond; + + lag = ocelot_bond_get_id(ocelot, bond); + + err = ocelot_mact_learn(ocelot, lag, addr, vid, ENTRYTYPE_LOCKED); + if (err) { + mutex_unlock(&ocelot->fwd_domain_lock); + kfree(fdb); + return err; + } + + list_add_tail(&fdb->list, &ocelot->lag_fdbs); + mutex_unlock(&ocelot->fwd_domain_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_lag_fdb_add); + +int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond, + const unsigned char *addr, u16 vid, + const struct net_device *bridge) +{ + struct ocelot_lag_fdb *fdb, *tmp; + + mutex_lock(&ocelot->fwd_domain_lock); + + if (!vid) + vid = ocelot_vlan_unaware_pvid(ocelot, bridge); + + list_for_each_entry_safe(fdb, tmp, &ocelot->lag_fdbs, list) { + if (!ether_addr_equal(fdb->addr, addr) || fdb->vid != vid || + fdb->bond != bond) + continue; + + ocelot_mact_forget(ocelot, addr, vid); + list_del(&fdb->list); + mutex_unlock(&ocelot->fwd_domain_lock); + kfree(fdb); + + return 0; + } + + mutex_unlock(&ocelot->fwd_domain_lock); + + return -ENOENT; +} +EXPORT_SYMBOL_GPL(ocelot_lag_fdb_del); + +/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu. + * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG. + * In the special case that it's the NPI port that we're configuring, the + * length of the tag and optional prefix needs to be accounted for privately, + * in order to be able to sustain communication at the requested @sdu. + */ +void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN; + int pause_start, pause_stop; + int atop, atop_tot; + + if (port == ocelot->npi) { + maxlen += OCELOT_TAG_LEN; + + if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT) + maxlen += OCELOT_SHORT_PREFIX_LEN; + else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG) + maxlen += OCELOT_LONG_PREFIX_LEN; + } + + ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG); + + /* Set Pause watermark hysteresis */ + pause_start = 6 * maxlen / OCELOT_BUFFER_CELL_SZ; + pause_stop = 4 * maxlen / OCELOT_BUFFER_CELL_SZ; + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_START, + pause_start); + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP, + pause_stop); + + /* Tail dropping watermarks */ + atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) / + OCELOT_BUFFER_CELL_SZ; + atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ; + ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port); + ocelot_write(ocelot, ocelot->ops->wm_enc(atop_tot), SYS_ATOP_TOT_CFG); +} +EXPORT_SYMBOL(ocelot_port_set_maxlen); + +int ocelot_get_max_mtu(struct ocelot *ocelot, int port) +{ + int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN; + + if (port == ocelot->npi) { + max_mtu -= OCELOT_TAG_LEN; + + if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT) + max_mtu -= OCELOT_SHORT_PREFIX_LEN; + else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG) + max_mtu -= OCELOT_LONG_PREFIX_LEN; + } + + return max_mtu; +} +EXPORT_SYMBOL(ocelot_get_max_mtu); + +static void ocelot_port_set_learning(struct ocelot *ocelot, int port, + bool enabled) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u32 val = 0; + + if (enabled) + val = ANA_PORT_PORT_CFG_LEARN_ENA; + + ocelot_rmw_gix(ocelot, val, ANA_PORT_PORT_CFG_LEARN_ENA, + ANA_PORT_PORT_CFG, port); + + ocelot_port->learn_ena = enabled; +} + +static void ocelot_port_set_ucast_flood(struct ocelot *ocelot, int port, + bool enabled) +{ + u32 val = 0; + + if (enabled) + val = BIT(port); + + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_UC); +} + +static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port, + bool enabled) +{ + u32 val = 0; + + if (enabled) + val = BIT(port); + + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC); + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4); + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6); +} + +static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port, + bool enabled) +{ + u32 val = 0; + + if (enabled) + val = BIT(port); + + ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_BC); +} + +int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | + BR_BCAST_FLOOD)) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(ocelot_port_pre_bridge_flags); + +void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & BR_LEARNING) + ocelot_port_set_learning(ocelot, port, + !!(flags.val & BR_LEARNING)); + + if (flags.mask & BR_FLOOD) + ocelot_port_set_ucast_flood(ocelot, port, + !!(flags.val & BR_FLOOD)); + + if (flags.mask & BR_MCAST_FLOOD) + ocelot_port_set_mcast_flood(ocelot, port, + !!(flags.val & BR_MCAST_FLOOD)); + + if (flags.mask & BR_BCAST_FLOOD) + ocelot_port_set_bcast_flood(ocelot, port, + !!(flags.val & BR_BCAST_FLOOD)); +} +EXPORT_SYMBOL(ocelot_port_bridge_flags); + +int ocelot_port_get_default_prio(struct ocelot *ocelot, int port) +{ + int val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port); + + return ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val); +} +EXPORT_SYMBOL_GPL(ocelot_port_get_default_prio); + +int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio) +{ + if (prio >= OCELOT_NUM_TC) + return -ERANGE; + + ocelot_rmw_gix(ocelot, + ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(prio), + ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M, + ANA_PORT_QOS_CFG, + port); + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio); + +int ocelot_port_get_dscp_prio(struct ocelot *ocelot, int port, u8 dscp) +{ + int qos_cfg = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port); + int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp); + + /* Return error if DSCP prioritization isn't enabled */ + if (!(qos_cfg & ANA_PORT_QOS_CFG_QOS_DSCP_ENA)) + return -EOPNOTSUPP; + + if (qos_cfg & ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA) { + dscp = ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(dscp_cfg); + /* Re-read ANA_DSCP_CFG for the translated DSCP */ + dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp); + } + + /* If the DSCP value is not trusted, the QoS classification falls back + * to VLAN PCP or port-based default. + */ + if (!(dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA)) + return -EOPNOTSUPP; + + return ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg); +} +EXPORT_SYMBOL_GPL(ocelot_port_get_dscp_prio); + +int ocelot_port_add_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio) +{ + int mask, val; + + if (prio >= OCELOT_NUM_TC) + return -ERANGE; + + /* There is at least one app table priority (this one), so we need to + * make sure DSCP prioritization is enabled on the port. + * Also make sure DSCP translation is disabled + * (dcbnl doesn't support it). + */ + mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA | + ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA; + + ocelot_rmw_gix(ocelot, ANA_PORT_QOS_CFG_QOS_DSCP_ENA, mask, + ANA_PORT_QOS_CFG, port); + + /* Trust this DSCP value and map it to the given QoS class */ + val = ANA_DSCP_CFG_DSCP_TRUST_ENA | ANA_DSCP_CFG_QOS_DSCP_VAL(prio); + + ocelot_write_rix(ocelot, val, ANA_DSCP_CFG, dscp); + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_port_add_dscp_prio); + +int ocelot_port_del_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio) +{ + int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp); + int mask, i; + + /* During a "dcb app replace" command, the new app table entry will be + * added first, then the old one will be deleted. But the hardware only + * supports one QoS class per DSCP value (duh), so if we blindly delete + * the app table entry for this DSCP value, we end up deleting the + * entry with the new priority. Avoid that by checking whether user + * space wants to delete the priority which is currently configured, or + * something else which is no longer current. + */ + if (ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg) != prio) + return 0; + + /* Untrust this DSCP value */ + ocelot_write_rix(ocelot, 0, ANA_DSCP_CFG, dscp); + + for (i = 0; i < 64; i++) { + int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, i); + + /* There are still app table entries on the port, so we need to + * keep DSCP enabled, nothing to do. + */ + if (dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA) + return 0; + } + + /* Disable DSCP QoS classification if there isn't any trusted + * DSCP value left. + */ + mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA | + ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA; + + ocelot_rmw_gix(ocelot, 0, mask, ANA_PORT_QOS_CFG, port); + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_port_del_dscp_prio); + +struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to, + struct netlink_ext_ack *extack) +{ + struct ocelot_mirror *m = ocelot->mirror; + + if (m) { + if (m->to != to) { + NL_SET_ERR_MSG_MOD(extack, + "Mirroring already configured towards different egress port"); + return ERR_PTR(-EBUSY); + } + + refcount_inc(&m->refcount); + return m; + } + + m = kzalloc(sizeof(*m), GFP_KERNEL); + if (!m) + return ERR_PTR(-ENOMEM); + + m->to = to; + refcount_set(&m->refcount, 1); + ocelot->mirror = m; + + /* Program the mirror port to hardware */ + ocelot_write(ocelot, BIT(to), ANA_MIRRORPORTS); + + return m; +} + +void ocelot_mirror_put(struct ocelot *ocelot) +{ + struct ocelot_mirror *m = ocelot->mirror; + + if (!refcount_dec_and_test(&m->refcount)) + return; + + ocelot_write(ocelot, 0, ANA_MIRRORPORTS); + ocelot->mirror = NULL; + kfree(m); +} + +int ocelot_port_mirror_add(struct ocelot *ocelot, int from, int to, + bool ingress, struct netlink_ext_ack *extack) +{ + struct ocelot_mirror *m = ocelot_mirror_get(ocelot, to, extack); + + if (IS_ERR(m)) + return PTR_ERR(m); + + if (ingress) { + ocelot_rmw_gix(ocelot, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA, + ANA_PORT_PORT_CFG_SRC_MIRROR_ENA, + ANA_PORT_PORT_CFG, from); + } else { + ocelot_rmw(ocelot, BIT(from), BIT(from), + ANA_EMIRRORPORTS); + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_port_mirror_add); + +void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress) +{ + if (ingress) { + ocelot_rmw_gix(ocelot, 0, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA, + ANA_PORT_PORT_CFG, from); + } else { + ocelot_rmw(ocelot, 0, BIT(from), ANA_EMIRRORPORTS); + } + + ocelot_mirror_put(ocelot); +} +EXPORT_SYMBOL_GPL(ocelot_port_mirror_del); + +static void ocelot_port_reset_mqprio(struct ocelot *ocelot, int port) +{ + struct net_device *dev = ocelot->ops->port_to_netdev(ocelot, port); + + netdev_reset_tc(dev); + ocelot_port_change_fp(ocelot, port, 0); +} + +int ocelot_port_mqprio(struct ocelot *ocelot, int port, + struct tc_mqprio_qopt_offload *mqprio) +{ + struct net_device *dev = ocelot->ops->port_to_netdev(ocelot, port); + struct netlink_ext_ack *extack = mqprio->extack; + struct tc_mqprio_qopt *qopt = &mqprio->qopt; + int num_tc = qopt->num_tc; + int tc, err; + + if (!num_tc) { + ocelot_port_reset_mqprio(ocelot, port); + return 0; + } + + err = netdev_set_num_tc(dev, num_tc); + if (err) + return err; + + for (tc = 0; tc < num_tc; tc++) { + if (qopt->count[tc] != 1) { + NL_SET_ERR_MSG_MOD(extack, + "Only one TXQ per TC supported"); + return -EINVAL; + } + + err = netdev_set_tc_queue(dev, tc, 1, qopt->offset[tc]); + if (err) + goto err_reset_tc; + } + + err = netif_set_real_num_tx_queues(dev, num_tc); + if (err) + goto err_reset_tc; + + ocelot_port_change_fp(ocelot, port, mqprio->preemptible_tcs); + + return 0; + +err_reset_tc: + ocelot_port_reset_mqprio(ocelot, port); + return err; +} +EXPORT_SYMBOL_GPL(ocelot_port_mqprio); + +void ocelot_init_port(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + skb_queue_head_init(&ocelot_port->tx_skbs); + + /* Basic L2 initialization */ + + /* Set MAC IFG Gaps + * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0 + * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5 + */ + ocelot_port_writel(ocelot_port, DEV_MAC_IFG_CFG_TX_IFG(5), + DEV_MAC_IFG_CFG); + + /* Load seed (0) and set MAC HDX late collision */ + ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) | + DEV_MAC_HDX_CFG_SEED_LOAD, + DEV_MAC_HDX_CFG); + mdelay(1); + ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67), + DEV_MAC_HDX_CFG); + + /* Set Max Length and maximum tags allowed */ + ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN); + ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) | + DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | + DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA | + DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, + DEV_MAC_TAGS_CFG); + + /* Set SMAC of Pause frame (00:00:00:00:00:00) */ + ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG); + ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG); + + /* Enable transmission of pause frames */ + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); + + /* Drop frames with multicast source address */ + ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA, + ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA, + ANA_PORT_DROP_CFG, port); + + /* Set default VLAN and tag type to 8021Q. */ + ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q), + REW_PORT_VLAN_CFG_PORT_TPID_M, + REW_PORT_VLAN_CFG, port); + + /* Disable source address learning for standalone mode */ + ocelot_port_set_learning(ocelot, port, false); + + /* Set the port's initial logical port ID value, enable receiving + * frames on it, and configure the MAC address learning type to + * automatic. + */ + ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO | + ANA_PORT_PORT_CFG_RECV_ENA | + ANA_PORT_PORT_CFG_PORTID_VAL(port), + ANA_PORT_PORT_CFG, port); + + /* Enable vcap lookups */ + ocelot_vcap_enable(ocelot, port); +} +EXPORT_SYMBOL(ocelot_init_port); + +/* Configure and enable the CPU port module, which is a set of queues + * accessible through register MMIO, frame DMA or Ethernet (in case + * NPI mode is used). + */ +static void ocelot_cpu_port_init(struct ocelot *ocelot) +{ + int cpu = ocelot->num_phys_ports; + + /* The unicast destination PGID for the CPU port module is unused */ + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu); + /* Instead set up a multicast destination PGID for traffic copied to + * the CPU. Whitelisted MAC addresses like the port netdevice MAC + * addresses will be copied to the CPU via this PGID. + */ + ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU); + ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA | + ANA_PORT_PORT_CFG_PORTID_VAL(cpu), + ANA_PORT_PORT_CFG, cpu); + + /* Enable CPU port module */ + ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); + /* CPU port Injection/Extraction configuration */ + ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR, + OCELOT_TAG_PREFIX_NONE); + ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR, + OCELOT_TAG_PREFIX_NONE); + + /* Configure the CPU port to be VLAN aware */ + ocelot_write_gix(ocelot, + ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_STANDALONE_PVID) | + ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), + ANA_PORT_VLAN_CFG, cpu); +} + +static void ocelot_detect_features(struct ocelot *ocelot) +{ + int mmgt, eq_ctrl; + + /* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds + * the number of 240-byte free memory words (aka 4-cell chunks) and not + * 192 bytes as the documentation incorrectly says. + */ + mmgt = ocelot_read(ocelot, SYS_MMGT); + ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt); + + eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL); + ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl); +} + +static int ocelot_mem_init_status(struct ocelot *ocelot) +{ + unsigned int val; + int err; + + err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], + &val); + + return err ?: val; +} + +int ocelot_reset(struct ocelot *ocelot) +{ + int err; + u32 val; + + err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1); + if (err) + return err; + + err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); + if (err) + return err; + + /* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be + * 100us) before enabling the switch core. + */ + err = readx_poll_timeout(ocelot_mem_init_status, ocelot, val, !val, + MEM_INIT_SLEEP_US, MEM_INIT_TIMEOUT_US); + if (err) + return err; + + err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); + if (err) + return err; + + return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1); +} +EXPORT_SYMBOL(ocelot_reset); + +int ocelot_init(struct ocelot *ocelot) +{ + int i, ret; + u32 port; + + if (ocelot->ops->reset) { + ret = ocelot->ops->reset(ocelot); + if (ret) { + dev_err(ocelot->dev, "Switch reset failed\n"); + return ret; + } + } + + mutex_init(&ocelot->mact_lock); + mutex_init(&ocelot->fwd_domain_lock); + spin_lock_init(&ocelot->ptp_clock_lock); + spin_lock_init(&ocelot->ts_id_lock); + + ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0); + if (!ocelot->owq) + return -ENOMEM; + + ret = ocelot_stats_init(ocelot); + if (ret) + goto err_stats_init; + + INIT_LIST_HEAD(&ocelot->multicast); + INIT_LIST_HEAD(&ocelot->pgids); + INIT_LIST_HEAD(&ocelot->vlans); + INIT_LIST_HEAD(&ocelot->lag_fdbs); + ocelot_detect_features(ocelot); + ocelot_mact_init(ocelot); + ocelot_vlan_init(ocelot); + ocelot_vcap_init(ocelot); + ocelot_cpu_port_init(ocelot); + + if (ocelot->ops->psfp_init) + ocelot->ops->psfp_init(ocelot); + + if (ocelot->mm_supported) { + ret = ocelot_mm_init(ocelot); + if (ret) + goto err_mm_init; + } + + for (port = 0; port < ocelot->num_phys_ports; port++) { + /* Clear all counters (5 groups) */ + ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) | + SYS_STAT_CFG_STAT_CLEAR_SHOT(0x7f), + SYS_STAT_CFG); + } + + /* Only use S-Tag */ + ocelot_write(ocelot, ETH_P_8021AD, SYS_VLAN_ETYPE_CFG); + + /* Aggregation mode */ + ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA | + ANA_AGGR_CFG_AC_DMAC_ENA | + ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA | + ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA | + ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA | + ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, + ANA_AGGR_CFG); + + /* Set MAC age time to default value. The entry is aged after + * 2*AGE_PERIOD + */ + ocelot_write(ocelot, + ANA_AUTOAGE_AGE_PERIOD(BR_DEFAULT_AGEING_TIME / 2 / HZ), + ANA_AUTOAGE); + + /* Disable learning for frames discarded by VLAN ingress filtering */ + regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1); + + /* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */ + ocelot_write(ocelot, SYS_FRM_AGING_AGE_TX_ENA | + SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING); + + /* Setup flooding PGIDs */ + for (i = 0; i < ocelot->num_flooding_pgids; i++) + ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) | + ANA_FLOODING_FLD_BROADCAST(PGID_BC) | + ANA_FLOODING_FLD_UNICAST(PGID_UC), + ANA_FLOODING, i); + ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) | + ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) | + ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) | + ANA_FLOODING_IPMC_FLD_MC4_CTRL(PGID_MC), + ANA_FLOODING_IPMC); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + /* Transmit the frame to the local port. */ + ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); + /* Do not forward BPDU frames to the front ports. */ + ocelot_write_gix(ocelot, + ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), + ANA_PORT_CPU_FWD_BPDU_CFG, + port); + /* Ensure bridging is disabled */ + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port); + } + + for_each_nonreserved_multicast_dest_pgid(ocelot, i) { + u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0)); + + ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); + } + + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_BLACKHOLE); + + /* Allow broadcast and unknown L2 multicast to the CPU. */ + ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), + ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), + ANA_PGID_PGID, PGID_MC); + ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), + ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)), + ANA_PGID_PGID, PGID_BC); + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4); + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6); + + /* Allow manual injection via DEVCPU_QS registers, and byte swap these + * registers endianness. + */ + ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_BYTE_SWAP | + QS_INJ_GRP_CFG_MODE(1), QS_INJ_GRP_CFG, 0); + ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_BYTE_SWAP | + QS_XTR_GRP_CFG_MODE(1), QS_XTR_GRP_CFG, 0); + ocelot_write(ocelot, ANA_CPUQ_CFG_CPUQ_MIRROR(2) | + ANA_CPUQ_CFG_CPUQ_LRN(2) | + ANA_CPUQ_CFG_CPUQ_MAC_COPY(2) | + ANA_CPUQ_CFG_CPUQ_SRC_COPY(2) | + ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(2) | + ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(6) | + ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(6) | + ANA_CPUQ_CFG_CPUQ_IGMP(6) | + ANA_CPUQ_CFG_CPUQ_MLD(6), ANA_CPUQ_CFG); + for (i = 0; i < 16; i++) + ocelot_write_rix(ocelot, ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(6) | + ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), + ANA_CPUQ_8021_CFG, i); + + return 0; + +err_mm_init: + ocelot_stats_deinit(ocelot); +err_stats_init: + destroy_workqueue(ocelot->owq); + return ret; +} +EXPORT_SYMBOL(ocelot_init); + +void ocelot_deinit(struct ocelot *ocelot) +{ + ocelot_stats_deinit(ocelot); + destroy_workqueue(ocelot->owq); +} +EXPORT_SYMBOL(ocelot_deinit); + +void ocelot_deinit_port(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + skb_queue_purge(&ocelot_port->tx_skbs); +} +EXPORT_SYMBOL(ocelot_deinit_port); + +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h new file mode 100644 index 0000000000..e50be508c1 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_H_ +#define _MSCC_OCELOT_H_ + +#include <linux/bitops.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/net_tstamp.h> +#include <linux/phylink.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <soc/mscc/ocelot_qsys.h> +#include <soc/mscc/ocelot_sys.h> +#include <soc/mscc/ocelot_dev.h> +#include <soc/mscc/ocelot_ana.h> +#include <soc/mscc/ocelot_ptp.h> +#include <soc/mscc/ocelot_vcap.h> +#include <soc/mscc/ocelot.h> +#include "ocelot_rew.h" +#include "ocelot_qs.h" + +#define OCELOT_STANDALONE_PVID 0 +#define OCELOT_BUFFER_CELL_SZ 60 + +#define OCELOT_STATS_CHECK_DELAY (2 * HZ) + +#define OCELOT_PTP_QUEUE_SZ 128 + +#define OCELOT_JUMBO_MTU 9000 + +struct ocelot_port_tc { + bool block_shared; + unsigned long offload_cnt; + unsigned long ingress_mirred_id; + unsigned long egress_mirred_id; + unsigned long police_id; +}; + +struct ocelot_port_private { + struct ocelot_port port; + struct net_device *dev; + struct phylink *phylink; + struct phylink_config phylink_config; + struct ocelot_port_tc tc; +}; + +/* A (PGID) port mask structure, encoding the 2^ocelot->num_phys_ports + * possibilities of egress port masks for L2 multicast traffic. + * For a switch with 9 user ports, there are 512 possible port masks, but the + * hardware only has 46 individual PGIDs that it can forward multicast traffic + * to. So we need a structure that maps the limited PGID indices to the port + * destinations requested by the user for L2 multicast. + */ +struct ocelot_pgid { + unsigned long ports; + int index; + refcount_t refcount; + struct list_head list; +}; + +struct ocelot_multicast { + struct list_head list; + enum macaccess_entry_type entry_type; + unsigned char addr[ETH_ALEN]; + u16 vid; + u16 ports; + struct ocelot_pgid *pgid; +}; + +static inline void ocelot_reg_to_target_addr(struct ocelot *ocelot, + enum ocelot_reg reg, + enum ocelot_target *target, + u32 *addr) +{ + *target = reg >> TARGET_OFFSET; + *addr = ocelot->map[*target][reg & REG_MASK]; +} + +int ocelot_bridge_num_find(struct ocelot *ocelot, + const struct net_device *bridge); + +int ocelot_mact_learn(struct ocelot *ocelot, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, enum macaccess_entry_type type); +int ocelot_mact_forget(struct ocelot *ocelot, + const unsigned char mac[ETH_ALEN], unsigned int vid); +struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port); +int ocelot_netdev_to_port(struct net_device *dev); + +int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, + struct device_node *portnp); +void ocelot_release_port(struct ocelot_port *ocelot_port); +int ocelot_port_devlink_init(struct ocelot *ocelot, int port, + enum devlink_port_flavour flavour); +void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port); + +int ocelot_trap_add(struct ocelot *ocelot, int port, + unsigned long cookie, bool take_ts, + void (*populate)(struct ocelot_vcap_filter *f)); +int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie); + +struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to, + struct netlink_ext_ack *extack); +void ocelot_mirror_put(struct ocelot *ocelot); + +int ocelot_stats_init(struct ocelot *ocelot); +void ocelot_stats_deinit(struct ocelot *ocelot); + +int ocelot_mm_init(struct ocelot *ocelot); +void ocelot_port_change_fp(struct ocelot *ocelot, int port, + unsigned long preemptible_tcs); +void ocelot_port_update_active_preemptible_tcs(struct ocelot *ocelot, int port); + +extern struct notifier_block ocelot_netdevice_nb; +extern struct notifier_block ocelot_switchdev_nb; +extern struct notifier_block ocelot_switchdev_blocking_nb; +extern const struct devlink_ops ocelot_devlink_ops; + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c new file mode 100644 index 0000000000..d9ea75a14f --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_devlink.c @@ -0,0 +1,916 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright 2020-2021 NXP + */ +#include <net/devlink.h> +#include "ocelot.h" + +/* The queue system tracks four resource consumptions: + * Resource 0: Memory tracked per source port + * Resource 1: Frame references tracked per source port + * Resource 2: Memory tracked per destination port + * Resource 3: Frame references tracked per destination port + */ +#define OCELOT_RESOURCE_SZ 256 +#define OCELOT_NUM_RESOURCES 4 + +#define BUF_xxxx_I (0 * OCELOT_RESOURCE_SZ) +#define REF_xxxx_I (1 * OCELOT_RESOURCE_SZ) +#define BUF_xxxx_E (2 * OCELOT_RESOURCE_SZ) +#define REF_xxxx_E (3 * OCELOT_RESOURCE_SZ) + +/* For each resource type there are 4 types of watermarks: + * Q_RSRV: reservation per QoS class per port + * PRIO_SHR: sharing watermark per QoS class across all ports + * P_RSRV: reservation per port + * COL_SHR: sharing watermark per color (drop precedence) across all ports + */ +#define xxx_Q_RSRV_x 0 +#define xxx_PRIO_SHR_x 216 +#define xxx_P_RSRV_x 224 +#define xxx_COL_SHR_x 254 + +/* Reservation Watermarks + * ---------------------- + * + * For setting up the reserved areas, egress watermarks exist per port and per + * QoS class for both ingress and egress. + */ + +/* Amount of packet buffer + * | per QoS class + * | | reserved + * | | | per egress port + * | | | | + * V V v v + * BUF_Q_RSRV_E + */ +#define BUF_Q_RSRV_E(port, prio) \ + (BUF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio)) + +/* Amount of packet buffer + * | for all port's traffic classes + * | | reserved + * | | | per egress port + * | | | | + * V V v v + * BUF_P_RSRV_E + */ +#define BUF_P_RSRV_E(port) \ + (BUF_xxxx_E + xxx_P_RSRV_x + (port)) + +/* Amount of packet buffer + * | per QoS class + * | | reserved + * | | | per ingress port + * | | | | + * V V v v + * BUF_Q_RSRV_I + */ +#define BUF_Q_RSRV_I(port, prio) \ + (BUF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio)) + +/* Amount of packet buffer + * | for all port's traffic classes + * | | reserved + * | | | per ingress port + * | | | | + * V V v v + * BUF_P_RSRV_I + */ +#define BUF_P_RSRV_I(port) \ + (BUF_xxxx_I + xxx_P_RSRV_x + (port)) + +/* Amount of frame references + * | per QoS class + * | | reserved + * | | | per egress port + * | | | | + * V V v v + * REF_Q_RSRV_E + */ +#define REF_Q_RSRV_E(port, prio) \ + (REF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio)) + +/* Amount of frame references + * | for all port's traffic classes + * | | reserved + * | | | per egress port + * | | | | + * V V v v + * REF_P_RSRV_E + */ +#define REF_P_RSRV_E(port) \ + (REF_xxxx_E + xxx_P_RSRV_x + (port)) + +/* Amount of frame references + * | per QoS class + * | | reserved + * | | | per ingress port + * | | | | + * V V v v + * REF_Q_RSRV_I + */ +#define REF_Q_RSRV_I(port, prio) \ + (REF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio)) + +/* Amount of frame references + * | for all port's traffic classes + * | | reserved + * | | | per ingress port + * | | | | + * V V v v + * REF_P_RSRV_I + */ +#define REF_P_RSRV_I(port) \ + (REF_xxxx_I + xxx_P_RSRV_x + (port)) + +/* Sharing Watermarks + * ------------------ + * + * The shared memory area is shared between all ports. + */ + +/* Amount of buffer + * | per QoS class + * | | from the shared memory area + * | | | for egress traffic + * | | | | + * V V v v + * BUF_PRIO_SHR_E + */ +#define BUF_PRIO_SHR_E(prio) \ + (BUF_xxxx_E + xxx_PRIO_SHR_x + (prio)) + +/* Amount of buffer + * | per color (drop precedence level) + * | | from the shared memory area + * | | | for egress traffic + * | | | | + * V V v v + * BUF_COL_SHR_E + */ +#define BUF_COL_SHR_E(dp) \ + (BUF_xxxx_E + xxx_COL_SHR_x + (1 - (dp))) + +/* Amount of buffer + * | per QoS class + * | | from the shared memory area + * | | | for ingress traffic + * | | | | + * V V v v + * BUF_PRIO_SHR_I + */ +#define BUF_PRIO_SHR_I(prio) \ + (BUF_xxxx_I + xxx_PRIO_SHR_x + (prio)) + +/* Amount of buffer + * | per color (drop precedence level) + * | | from the shared memory area + * | | | for ingress traffic + * | | | | + * V V v v + * BUF_COL_SHR_I + */ +#define BUF_COL_SHR_I(dp) \ + (BUF_xxxx_I + xxx_COL_SHR_x + (1 - (dp))) + +/* Amount of frame references + * | per QoS class + * | | from the shared area + * | | | for egress traffic + * | | | | + * V V v v + * REF_PRIO_SHR_E + */ +#define REF_PRIO_SHR_E(prio) \ + (REF_xxxx_E + xxx_PRIO_SHR_x + (prio)) + +/* Amount of frame references + * | per color (drop precedence level) + * | | from the shared area + * | | | for egress traffic + * | | | | + * V V v v + * REF_COL_SHR_E + */ +#define REF_COL_SHR_E(dp) \ + (REF_xxxx_E + xxx_COL_SHR_x + (1 - (dp))) + +/* Amount of frame references + * | per QoS class + * | | from the shared area + * | | | for ingress traffic + * | | | | + * V V v v + * REF_PRIO_SHR_I + */ +#define REF_PRIO_SHR_I(prio) \ + (REF_xxxx_I + xxx_PRIO_SHR_x + (prio)) + +/* Amount of frame references + * | per color (drop precedence level) + * | | from the shared area + * | | | for ingress traffic + * | | | | + * V V v v + * REF_COL_SHR_I + */ +#define REF_COL_SHR_I(dp) \ + (REF_xxxx_I + xxx_COL_SHR_x + (1 - (dp))) + +static u32 ocelot_wm_read(struct ocelot *ocelot, int index) +{ + int wm = ocelot_read_gix(ocelot, QSYS_RES_CFG, index); + + return ocelot->ops->wm_dec(wm); +} + +static void ocelot_wm_write(struct ocelot *ocelot, int index, u32 val) +{ + u32 wm = ocelot->ops->wm_enc(val); + + ocelot_write_gix(ocelot, wm, QSYS_RES_CFG, index); +} + +static void ocelot_wm_status(struct ocelot *ocelot, int index, u32 *inuse, + u32 *maxuse) +{ + int res_stat = ocelot_read_gix(ocelot, QSYS_RES_STAT, index); + + return ocelot->ops->wm_stat(res_stat, inuse, maxuse); +} + +/* The hardware comes out of reset with strange defaults: the sum of all + * reservations for frame memory is larger than the total buffer size. + * One has to wonder how can the reservation watermarks still guarantee + * anything under congestion. + * Bring some sense into the hardware by changing the defaults to disable all + * reservations and rely only on the sharing watermark for frames with drop + * precedence 0. The user can still explicitly request reservations per port + * and per port-tc through devlink-sb. + */ +static void ocelot_disable_reservation_watermarks(struct ocelot *ocelot, + int port) +{ + int prio; + + for (prio = 0; prio < OCELOT_NUM_TC; prio++) { + ocelot_wm_write(ocelot, BUF_Q_RSRV_I(port, prio), 0); + ocelot_wm_write(ocelot, BUF_Q_RSRV_E(port, prio), 0); + ocelot_wm_write(ocelot, REF_Q_RSRV_I(port, prio), 0); + ocelot_wm_write(ocelot, REF_Q_RSRV_E(port, prio), 0); + } + + ocelot_wm_write(ocelot, BUF_P_RSRV_I(port), 0); + ocelot_wm_write(ocelot, BUF_P_RSRV_E(port), 0); + ocelot_wm_write(ocelot, REF_P_RSRV_I(port), 0); + ocelot_wm_write(ocelot, REF_P_RSRV_E(port), 0); +} + +/* We want the sharing watermarks to consume all nonreserved resources, for + * efficient resource utilization (a single traffic flow should be able to use + * up the entire buffer space and frame resources as long as there's no + * interference). + * The switch has 10 sharing watermarks per lookup: 8 per traffic class and 2 + * per color (drop precedence). + * The trouble with configuring these sharing watermarks is that: + * (1) There's a risk that we overcommit the resources if we configure + * (a) all 8 per-TC sharing watermarks to the max + * (b) all 2 per-color sharing watermarks to the max + * (2) There's a risk that we undercommit the resources if we configure + * (a) all 8 per-TC sharing watermarks to "max / 8" + * (b) all 2 per-color sharing watermarks to "max / 2" + * So for Linux, let's just disable the sharing watermarks per traffic class + * (setting them to 0 will make them always exceeded), and rely only on the + * sharing watermark for drop priority 0. So frames with drop priority set to 1 + * by QoS classification or policing will still be allowed, but only as long as + * the port and port-TC reservations are not exceeded. + */ +static void ocelot_disable_tc_sharing_watermarks(struct ocelot *ocelot) +{ + int prio; + + for (prio = 0; prio < OCELOT_NUM_TC; prio++) { + ocelot_wm_write(ocelot, BUF_PRIO_SHR_I(prio), 0); + ocelot_wm_write(ocelot, BUF_PRIO_SHR_E(prio), 0); + ocelot_wm_write(ocelot, REF_PRIO_SHR_I(prio), 0); + ocelot_wm_write(ocelot, REF_PRIO_SHR_E(prio), 0); + } +} + +static void ocelot_get_buf_rsrv(struct ocelot *ocelot, u32 *buf_rsrv_i, + u32 *buf_rsrv_e) +{ + int port, prio; + + *buf_rsrv_i = 0; + *buf_rsrv_e = 0; + + for (port = 0; port <= ocelot->num_phys_ports; port++) { + for (prio = 0; prio < OCELOT_NUM_TC; prio++) { + *buf_rsrv_i += ocelot_wm_read(ocelot, + BUF_Q_RSRV_I(port, prio)); + *buf_rsrv_e += ocelot_wm_read(ocelot, + BUF_Q_RSRV_E(port, prio)); + } + + *buf_rsrv_i += ocelot_wm_read(ocelot, BUF_P_RSRV_I(port)); + *buf_rsrv_e += ocelot_wm_read(ocelot, BUF_P_RSRV_E(port)); + } + + *buf_rsrv_i *= OCELOT_BUFFER_CELL_SZ; + *buf_rsrv_e *= OCELOT_BUFFER_CELL_SZ; +} + +static void ocelot_get_ref_rsrv(struct ocelot *ocelot, u32 *ref_rsrv_i, + u32 *ref_rsrv_e) +{ + int port, prio; + + *ref_rsrv_i = 0; + *ref_rsrv_e = 0; + + for (port = 0; port <= ocelot->num_phys_ports; port++) { + for (prio = 0; prio < OCELOT_NUM_TC; prio++) { + *ref_rsrv_i += ocelot_wm_read(ocelot, + REF_Q_RSRV_I(port, prio)); + *ref_rsrv_e += ocelot_wm_read(ocelot, + REF_Q_RSRV_E(port, prio)); + } + + *ref_rsrv_i += ocelot_wm_read(ocelot, REF_P_RSRV_I(port)); + *ref_rsrv_e += ocelot_wm_read(ocelot, REF_P_RSRV_E(port)); + } +} + +/* Calculate all reservations, then set up the sharing watermark for DP=0 to + * consume the remaining resources up to the pool's configured size. + */ +static void ocelot_setup_sharing_watermarks(struct ocelot *ocelot) +{ + u32 buf_rsrv_i, buf_rsrv_e; + u32 ref_rsrv_i, ref_rsrv_e; + u32 buf_shr_i, buf_shr_e; + u32 ref_shr_i, ref_shr_e; + + ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e); + ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e); + + buf_shr_i = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] - + buf_rsrv_i; + buf_shr_e = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] - + buf_rsrv_e; + ref_shr_i = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] - + ref_rsrv_i; + ref_shr_e = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] - + ref_rsrv_e; + + buf_shr_i /= OCELOT_BUFFER_CELL_SZ; + buf_shr_e /= OCELOT_BUFFER_CELL_SZ; + + ocelot_wm_write(ocelot, BUF_COL_SHR_I(0), buf_shr_i); + ocelot_wm_write(ocelot, BUF_COL_SHR_E(0), buf_shr_e); + ocelot_wm_write(ocelot, REF_COL_SHR_E(0), ref_shr_e); + ocelot_wm_write(ocelot, REF_COL_SHR_I(0), ref_shr_i); + ocelot_wm_write(ocelot, BUF_COL_SHR_I(1), 0); + ocelot_wm_write(ocelot, BUF_COL_SHR_E(1), 0); + ocelot_wm_write(ocelot, REF_COL_SHR_E(1), 0); + ocelot_wm_write(ocelot, REF_COL_SHR_I(1), 0); +} + +/* Ensure that all reservations can be enforced */ +static int ocelot_watermark_validate(struct ocelot *ocelot, + struct netlink_ext_ack *extack) +{ + u32 buf_rsrv_i, buf_rsrv_e; + u32 ref_rsrv_i, ref_rsrv_e; + + ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e); + ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e); + + if (buf_rsrv_i > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING]) { + NL_SET_ERR_MSG_MOD(extack, + "Ingress frame reservations exceed pool size"); + return -ERANGE; + } + if (buf_rsrv_e > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR]) { + NL_SET_ERR_MSG_MOD(extack, + "Egress frame reservations exceed pool size"); + return -ERANGE; + } + if (ref_rsrv_i > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING]) { + NL_SET_ERR_MSG_MOD(extack, + "Ingress reference reservations exceed pool size"); + return -ERANGE; + } + if (ref_rsrv_e > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR]) { + NL_SET_ERR_MSG_MOD(extack, + "Egress reference reservations exceed pool size"); + return -ERANGE; + } + + return 0; +} + +/* The hardware works like this: + * + * Frame forwarding decision taken + * | + * v + * +--------------------+--------------------+--------------------+ + * | | | | + * v v v v + * Ingress memory Egress memory Ingress frame Egress frame + * check check reference check reference check + * | | | | + * v v v v + * BUF_Q_RSRV_I ok BUF_Q_RSRV_E ok REF_Q_RSRV_I ok REF_Q_RSRV_E ok + *(src port, prio) -+ (dst port, prio) -+ (src port, prio) -+ (dst port, prio) -+ + * | | | | | | | | + * |exceeded | |exceeded | |exceeded | |exceeded | + * v | v | v | v | + * BUF_P_RSRV_I ok| BUF_P_RSRV_E ok| REF_P_RSRV_I ok| REF_P_RSRV_E ok| + * (src port) ----+ (dst port) ----+ (src port) ----+ (dst port) -----+ + * | | | | | | | | + * |exceeded | |exceeded | |exceeded | |exceeded | + * v | v | v | v | + * BUF_PRIO_SHR_I ok| BUF_PRIO_SHR_E ok| REF_PRIO_SHR_I ok| REF_PRIO_SHR_E ok| + * (prio) ------+ (prio) ------+ (prio) ------+ (prio) -------+ + * | | | | | | | | + * |exceeded | |exceeded | |exceeded | |exceeded | + * v | v | v | v | + * BUF_COL_SHR_I ok| BUF_COL_SHR_E ok| REF_COL_SHR_I ok| REF_COL_SHR_E ok| + * (dp) -------+ (dp) -------+ (dp) -------+ (dp) --------+ + * | | | | | | | | + * |exceeded | |exceeded | |exceeded | |exceeded | + * v v v v v v v v + * fail success fail success fail success fail success + * | | | | | | | | + * v v v v v v v v + * +-----+----+ +-----+----+ +-----+----+ +-----+-----+ + * | | | | + * +-------> OR <-------+ +-------> OR <-------+ + * | | + * v v + * +----------------> AND <-----------------+ + * | + * v + * FIFO drop / accept + * + * We are modeling each of the 4 parallel lookups as a devlink-sb pool. + * At least one (ingress or egress) memory pool and one (ingress or egress) + * frame reference pool need to have resources for frame acceptance to succeed. + * + * The following watermarks are controlled explicitly through devlink-sb: + * BUF_Q_RSRV_I, BUF_Q_RSRV_E, REF_Q_RSRV_I, REF_Q_RSRV_E + * BUF_P_RSRV_I, BUF_P_RSRV_E, REF_P_RSRV_I, REF_P_RSRV_E + * The following watermarks are controlled implicitly through devlink-sb: + * BUF_COL_SHR_I, BUF_COL_SHR_E, REF_COL_SHR_I, REF_COL_SHR_E + * The following watermarks are unused and disabled: + * BUF_PRIO_SHR_I, BUF_PRIO_SHR_E, REF_PRIO_SHR_I, REF_PRIO_SHR_E + * + * This function overrides the hardware defaults with more sane ones (no + * reservations by default, let sharing use all resources) and disables the + * unused watermarks. + */ +static void ocelot_watermark_init(struct ocelot *ocelot) +{ + int all_tcs = GENMASK(OCELOT_NUM_TC - 1, 0); + int port; + + ocelot_write(ocelot, all_tcs, QSYS_RES_QOS_MODE); + + for (port = 0; port <= ocelot->num_phys_ports; port++) + ocelot_disable_reservation_watermarks(ocelot, port); + + ocelot_disable_tc_sharing_watermarks(ocelot); + ocelot_setup_sharing_watermarks(ocelot); +} + +/* Watermark encode + * Bit 8: Unit; 0:1, 1:16 + * Bit 7-0: Value to be multiplied with unit + */ +u16 ocelot_wm_enc(u16 value) +{ + WARN_ON(value >= 16 * BIT(8)); + + if (value >= BIT(8)) + return BIT(8) | (value / 16); + + return value; +} +EXPORT_SYMBOL(ocelot_wm_enc); + +u16 ocelot_wm_dec(u16 wm) +{ + if (wm & BIT(8)) + return (wm & GENMASK(7, 0)) * 16; + + return wm; +} +EXPORT_SYMBOL(ocelot_wm_dec); + +void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse) +{ + *inuse = (val & GENMASK(23, 12)) >> 12; + *maxuse = val & GENMASK(11, 0); +} +EXPORT_SYMBOL(ocelot_wm_stat); + +/* Pool size and type are fixed up at runtime. Keeping this structure to + * look up the cell size multipliers. + */ +static const struct devlink_sb_pool_info ocelot_sb_pool[] = { + [OCELOT_SB_BUF] = { + .cell_size = OCELOT_BUFFER_CELL_SZ, + .threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC, + }, + [OCELOT_SB_REF] = { + .cell_size = 1, + .threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC, + }, +}; + +/* Returns the pool size configured through ocelot_sb_pool_set */ +int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index, + u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + if (sb_index >= OCELOT_SB_NUM) + return -ENODEV; + if (pool_index >= OCELOT_SB_POOL_NUM) + return -ENODEV; + + *pool_info = ocelot_sb_pool[sb_index]; + pool_info->size = ocelot->pool_size[sb_index][pool_index]; + if (pool_index) + pool_info->pool_type = DEVLINK_SB_POOL_TYPE_INGRESS; + else + pool_info->pool_type = DEVLINK_SB_POOL_TYPE_EGRESS; + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_pool_get); + +/* The pool size received here configures the total amount of resources used on + * ingress (or on egress, depending upon the pool index). The pool size, minus + * the values for the port and port-tc reservations, is written into the + * COL_SHR(dp=0) sharing watermark. + */ +int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack) +{ + u32 old_pool_size; + int err; + + if (sb_index >= OCELOT_SB_NUM) { + NL_SET_ERR_MSG_MOD(extack, + "Invalid sb, use 0 for buffers and 1 for frame references"); + return -ENODEV; + } + if (pool_index >= OCELOT_SB_POOL_NUM) { + NL_SET_ERR_MSG_MOD(extack, + "Invalid pool, use 0 for ingress and 1 for egress"); + return -ENODEV; + } + if (threshold_type != DEVLINK_SB_THRESHOLD_TYPE_STATIC) { + NL_SET_ERR_MSG_MOD(extack, + "Only static threshold supported"); + return -EOPNOTSUPP; + } + + old_pool_size = ocelot->pool_size[sb_index][pool_index]; + ocelot->pool_size[sb_index][pool_index] = size; + + err = ocelot_watermark_validate(ocelot, extack); + if (err) { + ocelot->pool_size[sb_index][pool_index] = old_pool_size; + return err; + } + + ocelot_setup_sharing_watermarks(ocelot); + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_pool_set); + +/* This retrieves the configuration made with ocelot_sb_port_pool_set */ +int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold) +{ + int wm_index; + + switch (sb_index) { + case OCELOT_SB_BUF: + if (pool_index == OCELOT_SB_POOL_ING) + wm_index = BUF_P_RSRV_I(port); + else + wm_index = BUF_P_RSRV_E(port); + break; + case OCELOT_SB_REF: + if (pool_index == OCELOT_SB_POOL_ING) + wm_index = REF_P_RSRV_I(port); + else + wm_index = REF_P_RSRV_E(port); + break; + default: + return -ENODEV; + } + + *p_threshold = ocelot_wm_read(ocelot, wm_index); + *p_threshold *= ocelot_sb_pool[sb_index].cell_size; + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_port_pool_get); + +/* This configures the P_RSRV per-port reserved resource watermark */ +int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 threshold, struct netlink_ext_ack *extack) +{ + int wm_index, err; + u32 old_thr; + + switch (sb_index) { + case OCELOT_SB_BUF: + if (pool_index == OCELOT_SB_POOL_ING) + wm_index = BUF_P_RSRV_I(port); + else + wm_index = BUF_P_RSRV_E(port); + break; + case OCELOT_SB_REF: + if (pool_index == OCELOT_SB_POOL_ING) + wm_index = REF_P_RSRV_I(port); + else + wm_index = REF_P_RSRV_E(port); + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer"); + return -ENODEV; + } + + threshold /= ocelot_sb_pool[sb_index].cell_size; + + old_thr = ocelot_wm_read(ocelot, wm_index); + ocelot_wm_write(ocelot, wm_index, threshold); + + err = ocelot_watermark_validate(ocelot, extack); + if (err) { + ocelot_wm_write(ocelot, wm_index, old_thr); + return err; + } + + ocelot_setup_sharing_watermarks(ocelot); + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_port_pool_set); + +/* This retrieves the configuration done by ocelot_sb_tc_pool_bind_set */ +int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold) +{ + int wm_index; + + switch (sb_index) { + case OCELOT_SB_BUF: + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + wm_index = BUF_Q_RSRV_I(port, tc_index); + else + wm_index = BUF_Q_RSRV_E(port, tc_index); + break; + case OCELOT_SB_REF: + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + wm_index = REF_Q_RSRV_I(port, tc_index); + else + wm_index = REF_Q_RSRV_E(port, tc_index); + break; + default: + return -ENODEV; + } + + *p_threshold = ocelot_wm_read(ocelot, wm_index); + *p_threshold *= ocelot_sb_pool[sb_index].cell_size; + + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + *p_pool_index = 0; + else + *p_pool_index = 1; + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_get); + +/* This configures the Q_RSRV per-port-tc reserved resource watermark */ +int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack) +{ + int wm_index, err; + u32 old_thr; + + /* Paranoid check? */ + if (pool_index == OCELOT_SB_POOL_ING && + pool_type != DEVLINK_SB_POOL_TYPE_INGRESS) + return -EINVAL; + if (pool_index == OCELOT_SB_POOL_EGR && + pool_type != DEVLINK_SB_POOL_TYPE_EGRESS) + return -EINVAL; + + switch (sb_index) { + case OCELOT_SB_BUF: + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + wm_index = BUF_Q_RSRV_I(port, tc_index); + else + wm_index = BUF_Q_RSRV_E(port, tc_index); + break; + case OCELOT_SB_REF: + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + wm_index = REF_Q_RSRV_I(port, tc_index); + else + wm_index = REF_Q_RSRV_E(port, tc_index); + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer"); + return -ENODEV; + } + + threshold /= ocelot_sb_pool[sb_index].cell_size; + + old_thr = ocelot_wm_read(ocelot, wm_index); + ocelot_wm_write(ocelot, wm_index, threshold); + err = ocelot_watermark_validate(ocelot, extack); + if (err) { + ocelot_wm_write(ocelot, wm_index, old_thr); + return err; + } + + ocelot_setup_sharing_watermarks(ocelot); + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_set); + +/* The hardware does not support atomic snapshots, we'll read out the + * occupancy registers individually and have this as just a stub. + */ +int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index) +{ + return 0; +} +EXPORT_SYMBOL(ocelot_sb_occ_snapshot); + +/* The watermark occupancy registers are cleared upon read, + * so let's read them. + */ +int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index) +{ + u32 inuse, maxuse; + int port, prio; + + switch (sb_index) { + case OCELOT_SB_BUF: + for (port = 0; port <= ocelot->num_phys_ports; port++) { + for (prio = 0; prio < OCELOT_NUM_TC; prio++) { + ocelot_wm_status(ocelot, BUF_Q_RSRV_I(port, prio), + &inuse, &maxuse); + ocelot_wm_status(ocelot, BUF_Q_RSRV_E(port, prio), + &inuse, &maxuse); + } + ocelot_wm_status(ocelot, BUF_P_RSRV_I(port), + &inuse, &maxuse); + ocelot_wm_status(ocelot, BUF_P_RSRV_E(port), + &inuse, &maxuse); + } + break; + case OCELOT_SB_REF: + for (port = 0; port <= ocelot->num_phys_ports; port++) { + for (prio = 0; prio < OCELOT_NUM_TC; prio++) { + ocelot_wm_status(ocelot, REF_Q_RSRV_I(port, prio), + &inuse, &maxuse); + ocelot_wm_status(ocelot, REF_Q_RSRV_E(port, prio), + &inuse, &maxuse); + } + ocelot_wm_status(ocelot, REF_P_RSRV_I(port), + &inuse, &maxuse); + ocelot_wm_status(ocelot, REF_P_RSRV_E(port), + &inuse, &maxuse); + } + break; + default: + return -ENODEV; + } + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_occ_max_clear); + +/* This retrieves the watermark occupancy for per-port P_RSRV watermarks */ +int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max) +{ + int wm_index; + + switch (sb_index) { + case OCELOT_SB_BUF: + if (pool_index == OCELOT_SB_POOL_ING) + wm_index = BUF_P_RSRV_I(port); + else + wm_index = BUF_P_RSRV_E(port); + break; + case OCELOT_SB_REF: + if (pool_index == OCELOT_SB_POOL_ING) + wm_index = REF_P_RSRV_I(port); + else + wm_index = REF_P_RSRV_E(port); + break; + default: + return -ENODEV; + } + + ocelot_wm_status(ocelot, wm_index, p_cur, p_max); + *p_cur *= ocelot_sb_pool[sb_index].cell_size; + *p_max *= ocelot_sb_pool[sb_index].cell_size; + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_occ_port_pool_get); + +/* This retrieves the watermark occupancy for per-port-tc Q_RSRV watermarks */ +int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max) +{ + int wm_index; + + switch (sb_index) { + case OCELOT_SB_BUF: + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + wm_index = BUF_Q_RSRV_I(port, tc_index); + else + wm_index = BUF_Q_RSRV_E(port, tc_index); + break; + case OCELOT_SB_REF: + if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS) + wm_index = REF_Q_RSRV_I(port, tc_index); + else + wm_index = REF_Q_RSRV_E(port, tc_index); + break; + default: + return -ENODEV; + } + + ocelot_wm_status(ocelot, wm_index, p_cur, p_max); + *p_cur *= ocelot_sb_pool[sb_index].cell_size; + *p_max *= ocelot_sb_pool[sb_index].cell_size; + + return 0; +} +EXPORT_SYMBOL(ocelot_sb_occ_tc_port_bind_get); + +int ocelot_devlink_sb_register(struct ocelot *ocelot) +{ + int err; + + err = devlink_sb_register(ocelot->devlink, OCELOT_SB_BUF, + ocelot->packet_buffer_size, 1, 1, + OCELOT_NUM_TC, OCELOT_NUM_TC); + if (err) + return err; + + err = devlink_sb_register(ocelot->devlink, OCELOT_SB_REF, + ocelot->num_frame_refs, 1, 1, + OCELOT_NUM_TC, OCELOT_NUM_TC); + if (err) { + devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF); + return err; + } + + ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] = ocelot->packet_buffer_size; + ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] = ocelot->packet_buffer_size; + ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] = ocelot->num_frame_refs; + ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] = ocelot->num_frame_refs; + + ocelot_watermark_init(ocelot); + + return 0; +} +EXPORT_SYMBOL(ocelot_devlink_sb_register); + +void ocelot_devlink_sb_unregister(struct ocelot *ocelot) +{ + devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF); + devlink_sb_unregister(ocelot->devlink, OCELOT_SB_REF); +} +EXPORT_SYMBOL(ocelot_devlink_sb_unregister); diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c new file mode 100644 index 0000000000..312a468321 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_fdma.c @@ -0,0 +1,893 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi SoCs FDMA driver + * + * Copyright (c) 2021 Microchip + * + * Page recycling code is mostly taken from gianfar driver. + */ + +#include <linux/align.h> +#include <linux/bitops.h> +#include <linux/dmapool.h> +#include <linux/dsa/ocelot.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> + +#include "ocelot_fdma.h" +#include "ocelot_qs.h" + +DEFINE_STATIC_KEY_FALSE(ocelot_fdma_enabled); + +static void ocelot_fdma_writel(struct ocelot *ocelot, u32 reg, u32 data) +{ + regmap_write(ocelot->targets[FDMA], reg, data); +} + +static u32 ocelot_fdma_readl(struct ocelot *ocelot, u32 reg) +{ + u32 retval; + + regmap_read(ocelot->targets[FDMA], reg, &retval); + + return retval; +} + +static dma_addr_t ocelot_fdma_idx_dma(dma_addr_t base, u16 idx) +{ + return base + idx * sizeof(struct ocelot_fdma_dcb); +} + +static u16 ocelot_fdma_dma_idx(dma_addr_t base, dma_addr_t dma) +{ + return (dma - base) / sizeof(struct ocelot_fdma_dcb); +} + +static u16 ocelot_fdma_idx_next(u16 idx, u16 ring_sz) +{ + return unlikely(idx == ring_sz - 1) ? 0 : idx + 1; +} + +static u16 ocelot_fdma_idx_prev(u16 idx, u16 ring_sz) +{ + return unlikely(idx == 0) ? ring_sz - 1 : idx - 1; +} + +static int ocelot_fdma_rx_ring_free(struct ocelot_fdma *fdma) +{ + struct ocelot_fdma_rx_ring *rx_ring = &fdma->rx_ring; + + if (rx_ring->next_to_use >= rx_ring->next_to_clean) + return OCELOT_FDMA_RX_RING_SIZE - + (rx_ring->next_to_use - rx_ring->next_to_clean) - 1; + else + return rx_ring->next_to_clean - rx_ring->next_to_use - 1; +} + +static int ocelot_fdma_tx_ring_free(struct ocelot_fdma *fdma) +{ + struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; + + if (tx_ring->next_to_use >= tx_ring->next_to_clean) + return OCELOT_FDMA_TX_RING_SIZE - + (tx_ring->next_to_use - tx_ring->next_to_clean) - 1; + else + return tx_ring->next_to_clean - tx_ring->next_to_use - 1; +} + +static bool ocelot_fdma_tx_ring_empty(struct ocelot_fdma *fdma) +{ + struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; + + return tx_ring->next_to_clean == tx_ring->next_to_use; +} + +static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma, + int chan) +{ + ocelot_fdma_writel(ocelot, MSCC_FDMA_DCB_LLP(chan), dma); + /* Barrier to force memory writes to DCB to be completed before starting + * the channel. + */ + wmb(); + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan)); +} + +static u32 ocelot_fdma_read_ch_safe(struct ocelot *ocelot) +{ + return ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE); +} + +static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan) +{ + u32 safe; + + return readx_poll_timeout_atomic(ocelot_fdma_read_ch_safe, ocelot, safe, + safe & BIT(chan), 0, + OCELOT_FDMA_CH_SAFE_TIMEOUT_US); +} + +static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb, + dma_addr_t dma_addr, + size_t size) +{ + u32 offset = dma_addr & 0x3; + + dcb->llp = 0; + dcb->datap = ALIGN_DOWN(dma_addr, 4); + dcb->datal = ALIGN_DOWN(size, 4); + dcb->stat = MSCC_FDMA_DCB_STAT_BLOCKO(offset); +} + +static bool ocelot_fdma_rx_alloc_page(struct ocelot *ocelot, + struct ocelot_fdma_rx_buf *rxb) +{ + dma_addr_t mapping; + struct page *page; + + page = dev_alloc_page(); + if (unlikely(!page)) + return false; + + mapping = dma_map_page(ocelot->dev, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ocelot->dev, mapping))) { + __free_page(page); + return false; + } + + rxb->page = page; + rxb->page_offset = 0; + rxb->dma_addr = mapping; + + return true; +} + +static int ocelot_fdma_alloc_rx_buffs(struct ocelot *ocelot, u16 alloc_cnt) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_rx_ring *rx_ring; + struct ocelot_fdma_rx_buf *rxb; + struct ocelot_fdma_dcb *dcb; + dma_addr_t dma_addr; + int ret = 0; + u16 idx; + + rx_ring = &fdma->rx_ring; + idx = rx_ring->next_to_use; + + while (alloc_cnt--) { + rxb = &rx_ring->bufs[idx]; + /* try reuse page */ + if (unlikely(!rxb->page)) { + if (unlikely(!ocelot_fdma_rx_alloc_page(ocelot, rxb))) { + dev_err_ratelimited(ocelot->dev, + "Failed to allocate rx\n"); + ret = -ENOMEM; + break; + } + } + + dcb = &rx_ring->dcbs[idx]; + dma_addr = rxb->dma_addr + rxb->page_offset; + ocelot_fdma_dcb_set_data(dcb, dma_addr, OCELOT_FDMA_RXB_SIZE); + + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); + /* Chain the DCB to the next one */ + dcb->llp = ocelot_fdma_idx_dma(rx_ring->dcbs_dma, idx); + } + + rx_ring->next_to_use = idx; + rx_ring->next_to_alloc = idx; + + return ret; +} + +static bool ocelot_fdma_tx_dcb_set_skb(struct ocelot *ocelot, + struct ocelot_fdma_tx_buf *tx_buf, + struct ocelot_fdma_dcb *dcb, + struct sk_buff *skb) +{ + dma_addr_t mapping; + + mapping = dma_map_single(ocelot->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(ocelot->dev, mapping))) + return false; + + dma_unmap_addr_set(tx_buf, dma_addr, mapping); + + ocelot_fdma_dcb_set_data(dcb, mapping, OCELOT_FDMA_RX_SIZE); + tx_buf->skb = skb; + dcb->stat |= MSCC_FDMA_DCB_STAT_BLOCKL(skb->len); + dcb->stat |= MSCC_FDMA_DCB_STAT_SOF | MSCC_FDMA_DCB_STAT_EOF; + + return true; +} + +static bool ocelot_fdma_check_stop_rx(struct ocelot *ocelot) +{ + u32 llp; + + /* Check if the FDMA hits the DCB with LLP == NULL */ + llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP(MSCC_FDMA_XTR_CHAN)); + if (unlikely(llp)) + return false; + + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_DISABLE, + BIT(MSCC_FDMA_XTR_CHAN)); + + return true; +} + +static void ocelot_fdma_rx_set_llp(struct ocelot_fdma_rx_ring *rx_ring) +{ + struct ocelot_fdma_dcb *dcb; + unsigned int idx; + + idx = ocelot_fdma_idx_prev(rx_ring->next_to_use, + OCELOT_FDMA_RX_RING_SIZE); + dcb = &rx_ring->dcbs[idx]; + dcb->llp = 0; +} + +static void ocelot_fdma_rx_restart(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_rx_ring *rx_ring; + const u8 chan = MSCC_FDMA_XTR_CHAN; + dma_addr_t new_llp, dma_base; + unsigned int idx; + u32 llp_prev; + int ret; + + rx_ring = &fdma->rx_ring; + ret = ocelot_fdma_wait_chan_safe(ocelot, chan); + if (ret) { + dev_err_ratelimited(ocelot->dev, + "Unable to stop RX channel\n"); + return; + } + + ocelot_fdma_rx_set_llp(rx_ring); + + /* FDMA stopped on the last DCB that contained a NULL LLP, since + * we processed some DCBs in RX, there is free space, and we must set + * DCB_LLP to point to the next DCB + */ + llp_prev = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP_PREV(chan)); + dma_base = rx_ring->dcbs_dma; + + /* Get the next DMA addr located after LLP == NULL DCB */ + idx = ocelot_fdma_dma_idx(dma_base, llp_prev); + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); + new_llp = ocelot_fdma_idx_dma(dma_base, idx); + + /* Finally reactivate the channel */ + ocelot_fdma_activate_chan(ocelot, new_llp, chan); +} + +static bool ocelot_fdma_add_rx_frag(struct ocelot_fdma_rx_buf *rxb, u32 stat, + struct sk_buff *skb, bool first) +{ + int size = MSCC_FDMA_DCB_STAT_BLOCKL(stat); + struct page *page = rxb->page; + + if (likely(first)) { + skb_put(skb, size); + } else { + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rxb->page_offset, size, OCELOT_FDMA_RX_SIZE); + } + + /* Try to reuse page */ + if (unlikely(page_ref_count(page) != 1 || page_is_pfmemalloc(page))) + return false; + + /* Change offset to the other half */ + rxb->page_offset ^= OCELOT_FDMA_RX_SIZE; + + page_ref_inc(page); + + return true; +} + +static void ocelot_fdma_reuse_rx_page(struct ocelot *ocelot, + struct ocelot_fdma_rx_buf *old_rxb) +{ + struct ocelot_fdma_rx_ring *rx_ring = &ocelot->fdma->rx_ring; + struct ocelot_fdma_rx_buf *new_rxb; + + new_rxb = &rx_ring->bufs[rx_ring->next_to_alloc]; + rx_ring->next_to_alloc = ocelot_fdma_idx_next(rx_ring->next_to_alloc, + OCELOT_FDMA_RX_RING_SIZE); + + /* Copy page reference */ + *new_rxb = *old_rxb; + + /* Sync for use by the device */ + dma_sync_single_range_for_device(ocelot->dev, old_rxb->dma_addr, + old_rxb->page_offset, + OCELOT_FDMA_RX_SIZE, DMA_FROM_DEVICE); +} + +static struct sk_buff *ocelot_fdma_get_skb(struct ocelot *ocelot, u32 stat, + struct ocelot_fdma_rx_buf *rxb, + struct sk_buff *skb) +{ + bool first = false; + + /* Allocate skb head and data */ + if (likely(!skb)) { + void *buff_addr = page_address(rxb->page) + + rxb->page_offset; + + skb = build_skb(buff_addr, OCELOT_FDMA_SKBFRAG_SIZE); + if (unlikely(!skb)) { + dev_err_ratelimited(ocelot->dev, + "build_skb failed !\n"); + return NULL; + } + first = true; + } + + dma_sync_single_range_for_cpu(ocelot->dev, rxb->dma_addr, + rxb->page_offset, OCELOT_FDMA_RX_SIZE, + DMA_FROM_DEVICE); + + if (ocelot_fdma_add_rx_frag(rxb, stat, skb, first)) { + /* Reuse the free half of the page for the next_to_alloc DCB*/ + ocelot_fdma_reuse_rx_page(ocelot, rxb); + } else { + /* page cannot be reused, unmap it */ + dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + } + + /* clear rx buff content */ + rxb->page = NULL; + + return skb; +} + +static bool ocelot_fdma_receive_skb(struct ocelot *ocelot, struct sk_buff *skb) +{ + struct net_device *ndev; + void *xfh = skb->data; + u64 timestamp; + u64 src_port; + + skb_pull(skb, OCELOT_TAG_LEN); + + ocelot_xfh_get_src_port(xfh, &src_port); + if (unlikely(src_port >= ocelot->num_phys_ports)) + return false; + + ndev = ocelot_port_to_netdev(ocelot, src_port); + if (unlikely(!ndev)) + return false; + + if (pskb_trim(skb, skb->len - ETH_FCS_LEN)) + return false; + + skb->dev = ndev; + skb->protocol = eth_type_trans(skb, skb->dev); + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + + if (ocelot->ptp) { + ocelot_xfh_get_rew_val(xfh, ×tamp); + ocelot_ptp_rx_timestamp(ocelot, skb, timestamp); + } + + if (likely(!skb_defer_rx_timestamp(skb))) + netif_receive_skb(skb); + + return true; +} + +static int ocelot_fdma_rx_get(struct ocelot *ocelot, int budget) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_rx_ring *rx_ring; + struct ocelot_fdma_rx_buf *rxb; + struct ocelot_fdma_dcb *dcb; + struct sk_buff *skb; + int work_done = 0; + int cleaned_cnt; + u32 stat; + u16 idx; + + cleaned_cnt = ocelot_fdma_rx_ring_free(fdma); + rx_ring = &fdma->rx_ring; + skb = rx_ring->skb; + + while (budget--) { + idx = rx_ring->next_to_clean; + dcb = &rx_ring->dcbs[idx]; + stat = dcb->stat; + if (MSCC_FDMA_DCB_STAT_BLOCKL(stat) == 0) + break; + + /* New packet is a start of frame but we already got a skb set, + * we probably lost an EOF packet, free skb + */ + if (unlikely(skb && (stat & MSCC_FDMA_DCB_STAT_SOF))) { + dev_kfree_skb(skb); + skb = NULL; + } + + rxb = &rx_ring->bufs[idx]; + /* Fetch next to clean buffer from the rx_ring */ + skb = ocelot_fdma_get_skb(ocelot, stat, rxb, skb); + if (unlikely(!skb)) + break; + + work_done++; + cleaned_cnt++; + + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); + rx_ring->next_to_clean = idx; + + if (unlikely(stat & MSCC_FDMA_DCB_STAT_ABORT || + stat & MSCC_FDMA_DCB_STAT_PD)) { + dev_err_ratelimited(ocelot->dev, + "DCB aborted or pruned\n"); + dev_kfree_skb(skb); + skb = NULL; + continue; + } + + /* We still need to process the other fragment of the packet + * before delivering it to the network stack + */ + if (!(stat & MSCC_FDMA_DCB_STAT_EOF)) + continue; + + if (unlikely(!ocelot_fdma_receive_skb(ocelot, skb))) + dev_kfree_skb(skb); + + skb = NULL; + } + + rx_ring->skb = skb; + + if (cleaned_cnt) + ocelot_fdma_alloc_rx_buffs(ocelot, cleaned_cnt); + + return work_done; +} + +static void ocelot_fdma_wakeup_netdev(struct ocelot *ocelot) +{ + struct ocelot_port_private *priv; + struct ocelot_port *ocelot_port; + struct net_device *dev; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + ocelot_port = ocelot->ports[port]; + if (!ocelot_port) + continue; + priv = container_of(ocelot_port, struct ocelot_port_private, + port); + dev = priv->dev; + + if (unlikely(netif_queue_stopped(dev))) + netif_wake_queue(dev); + } +} + +static void ocelot_fdma_tx_cleanup(struct ocelot *ocelot, int budget) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_tx_ring *tx_ring; + struct ocelot_fdma_tx_buf *buf; + unsigned int new_null_llp_idx; + struct ocelot_fdma_dcb *dcb; + bool end_of_list = false; + struct sk_buff *skb; + dma_addr_t dma; + u32 dcb_llp; + u16 ntc; + int ret; + + tx_ring = &fdma->tx_ring; + + /* Purge the TX packets that have been sent up to the NULL llp or the + * end of done list. + */ + while (!ocelot_fdma_tx_ring_empty(fdma)) { + ntc = tx_ring->next_to_clean; + dcb = &tx_ring->dcbs[ntc]; + if (!(dcb->stat & MSCC_FDMA_DCB_STAT_PD)) + break; + + buf = &tx_ring->bufs[ntc]; + skb = buf->skb; + dma_unmap_single(ocelot->dev, dma_unmap_addr(buf, dma_addr), + skb->len, DMA_TO_DEVICE); + napi_consume_skb(skb, budget); + dcb_llp = dcb->llp; + + /* Only update after accessing all dcb fields */ + tx_ring->next_to_clean = ocelot_fdma_idx_next(ntc, + OCELOT_FDMA_TX_RING_SIZE); + + /* If we hit the NULL LLP, stop, we might need to reload FDMA */ + if (dcb_llp == 0) { + end_of_list = true; + break; + } + } + + /* No need to try to wake if there were no TX cleaned_cnt up. */ + if (ocelot_fdma_tx_ring_free(fdma)) + ocelot_fdma_wakeup_netdev(ocelot); + + /* If there is still some DCBs to be processed by the FDMA or if the + * pending list is empty, there is no need to restart the FDMA. + */ + if (!end_of_list || ocelot_fdma_tx_ring_empty(fdma)) + return; + + ret = ocelot_fdma_wait_chan_safe(ocelot, MSCC_FDMA_INJ_CHAN); + if (ret) { + dev_warn(ocelot->dev, + "Failed to wait for TX channel to stop\n"); + return; + } + + /* Set NULL LLP to be the last DCB used */ + new_null_llp_idx = ocelot_fdma_idx_prev(tx_ring->next_to_use, + OCELOT_FDMA_TX_RING_SIZE); + dcb = &tx_ring->dcbs[new_null_llp_idx]; + dcb->llp = 0; + + dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, tx_ring->next_to_clean); + ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN); +} + +static int ocelot_fdma_napi_poll(struct napi_struct *napi, int budget) +{ + struct ocelot_fdma *fdma = container_of(napi, struct ocelot_fdma, napi); + struct ocelot *ocelot = fdma->ocelot; + int work_done = 0; + bool rx_stopped; + + ocelot_fdma_tx_cleanup(ocelot, budget); + + rx_stopped = ocelot_fdma_check_stop_rx(ocelot); + + work_done = ocelot_fdma_rx_get(ocelot, budget); + + if (rx_stopped) + ocelot_fdma_rx_restart(ocelot); + + if (work_done < budget) { + napi_complete_done(&fdma->napi, work_done); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, + BIT(MSCC_FDMA_INJ_CHAN) | + BIT(MSCC_FDMA_XTR_CHAN)); + } + + return work_done; +} + +static irqreturn_t ocelot_fdma_interrupt(int irq, void *dev_id) +{ + u32 ident, llp, frm, err, err_code; + struct ocelot *ocelot = dev_id; + + ident = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_IDENT); + frm = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_FRM); + llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_LLP); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, llp & ident); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, frm & ident); + if (frm || llp) { + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0); + napi_schedule(&ocelot->fdma->napi); + } + + err = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR); + if (unlikely(err)) { + err_code = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR_CODE); + dev_err_ratelimited(ocelot->dev, + "Error ! chans mask: %#x, code: %#x\n", + err, err_code); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR, err); + ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR_CODE, err_code); + } + + return IRQ_HANDLED; +} + +static void ocelot_fdma_send_skb(struct ocelot *ocelot, + struct ocelot_fdma *fdma, struct sk_buff *skb) +{ + struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; + struct ocelot_fdma_tx_buf *tx_buf; + struct ocelot_fdma_dcb *dcb; + dma_addr_t dma; + u16 next_idx; + + dcb = &tx_ring->dcbs[tx_ring->next_to_use]; + tx_buf = &tx_ring->bufs[tx_ring->next_to_use]; + if (!ocelot_fdma_tx_dcb_set_skb(ocelot, tx_buf, dcb, skb)) { + dev_kfree_skb_any(skb); + return; + } + + next_idx = ocelot_fdma_idx_next(tx_ring->next_to_use, + OCELOT_FDMA_TX_RING_SIZE); + skb_tx_timestamp(skb); + + /* If the FDMA TX chan is empty, then enqueue the DCB directly */ + if (ocelot_fdma_tx_ring_empty(fdma)) { + dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, + tx_ring->next_to_use); + ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN); + } else { + /* Chain the DCBs */ + dcb->llp = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, next_idx); + } + + tx_ring->next_to_use = next_idx; +} + +static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op, + struct sk_buff *skb, struct net_device *dev) +{ + int needed_headroom = max_t(int, OCELOT_TAG_LEN - skb_headroom(skb), 0); + int needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); + void *ifh; + int err; + + if (unlikely(needed_headroom || needed_tailroom || + skb_header_cloned(skb))) { + err = pskb_expand_head(skb, needed_headroom, needed_tailroom, + GFP_ATOMIC); + if (unlikely(err)) { + dev_kfree_skb_any(skb); + return 1; + } + } + + err = skb_linearize(skb); + if (err) { + net_err_ratelimited("%s: skb_linearize error (%d)!\n", + dev->name, err); + dev_kfree_skb_any(skb); + return 1; + } + + ifh = skb_push(skb, OCELOT_TAG_LEN); + skb_put(skb, ETH_FCS_LEN); + memset(ifh, 0, OCELOT_TAG_LEN); + ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb)); + + return 0; +} + +int ocelot_fdma_inject_frame(struct ocelot *ocelot, int port, u32 rew_op, + struct sk_buff *skb, struct net_device *dev) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + int ret = NETDEV_TX_OK; + + spin_lock(&fdma->tx_ring.xmit_lock); + + if (ocelot_fdma_tx_ring_free(fdma) == 0) { + netif_stop_queue(dev); + ret = NETDEV_TX_BUSY; + goto out; + } + + if (ocelot_fdma_prepare_skb(ocelot, port, rew_op, skb, dev)) + goto out; + + ocelot_fdma_send_skb(ocelot, fdma, skb); + +out: + spin_unlock(&fdma->tx_ring.xmit_lock); + + return ret; +} + +static void ocelot_fdma_free_rx_ring(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_rx_ring *rx_ring; + struct ocelot_fdma_rx_buf *rxb; + u16 idx; + + rx_ring = &fdma->rx_ring; + idx = rx_ring->next_to_clean; + + /* Free the pages held in the RX ring */ + while (idx != rx_ring->next_to_use) { + rxb = &rx_ring->bufs[idx]; + dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(rxb->page); + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); + } + + if (fdma->rx_ring.skb) + dev_kfree_skb_any(fdma->rx_ring.skb); +} + +static void ocelot_fdma_free_tx_ring(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_tx_ring *tx_ring; + struct ocelot_fdma_tx_buf *txb; + struct sk_buff *skb; + u16 idx; + + tx_ring = &fdma->tx_ring; + idx = tx_ring->next_to_clean; + + while (idx != tx_ring->next_to_use) { + txb = &tx_ring->bufs[idx]; + skb = txb->skb; + dma_unmap_single(ocelot->dev, dma_unmap_addr(txb, dma_addr), + skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_TX_RING_SIZE); + } +} + +static int ocelot_fdma_rings_alloc(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_dcb *dcbs; + unsigned int adjust; + dma_addr_t dcbs_dma; + int ret; + + /* Create a pool of consistent memory blocks for hardware descriptors */ + fdma->dcbs_base = dmam_alloc_coherent(ocelot->dev, + OCELOT_DCBS_HW_ALLOC_SIZE, + &fdma->dcbs_dma_base, GFP_KERNEL); + if (!fdma->dcbs_base) + return -ENOMEM; + + /* DCBs must be aligned on a 32bit boundary */ + dcbs = fdma->dcbs_base; + dcbs_dma = fdma->dcbs_dma_base; + if (!IS_ALIGNED(dcbs_dma, 4)) { + adjust = dcbs_dma & 0x3; + dcbs_dma = ALIGN(dcbs_dma, 4); + dcbs = (void *)dcbs + adjust; + } + + /* TX queue */ + fdma->tx_ring.dcbs = dcbs; + fdma->tx_ring.dcbs_dma = dcbs_dma; + spin_lock_init(&fdma->tx_ring.xmit_lock); + + /* RX queue */ + fdma->rx_ring.dcbs = dcbs + OCELOT_FDMA_TX_RING_SIZE; + fdma->rx_ring.dcbs_dma = dcbs_dma + OCELOT_FDMA_TX_DCB_SIZE; + ret = ocelot_fdma_alloc_rx_buffs(ocelot, + ocelot_fdma_tx_ring_free(fdma)); + if (ret) { + ocelot_fdma_free_rx_ring(ocelot); + return ret; + } + + /* Set the last DCB LLP as NULL, this is normally done when restarting + * the RX chan, but this is for the first run + */ + ocelot_fdma_rx_set_llp(&fdma->rx_ring); + + return 0; +} + +void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + + dev->needed_headroom = OCELOT_TAG_LEN; + dev->needed_tailroom = ETH_FCS_LEN; + + if (fdma->ndev) + return; + + fdma->ndev = dev; + netif_napi_add_weight(dev, &fdma->napi, ocelot_fdma_napi_poll, + OCELOT_FDMA_WEIGHT); +} + +void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, struct net_device *dev) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + + if (fdma->ndev == dev) { + netif_napi_del(&fdma->napi); + fdma->ndev = NULL; + } +} + +void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot) +{ + struct device *dev = ocelot->dev; + struct ocelot_fdma *fdma; + int ret; + + fdma = devm_kzalloc(dev, sizeof(*fdma), GFP_KERNEL); + if (!fdma) + return; + + ocelot->fdma = fdma; + ocelot->dev->coherent_dma_mask = DMA_BIT_MASK(32); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0); + + fdma->ocelot = ocelot; + fdma->irq = platform_get_irq_byname(pdev, "fdma"); + ret = devm_request_irq(dev, fdma->irq, ocelot_fdma_interrupt, 0, + dev_name(dev), ocelot); + if (ret) + goto err_free_fdma; + + ret = ocelot_fdma_rings_alloc(ocelot); + if (ret) + goto err_free_irq; + + static_branch_enable(&ocelot_fdma_enabled); + + return; + +err_free_irq: + devm_free_irq(dev, fdma->irq, fdma); +err_free_fdma: + devm_kfree(dev, fdma); + + ocelot->fdma = NULL; +} + +void ocelot_fdma_start(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + + /* Reconfigure for extraction and injection using DMA */ + ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_MODE(2), QS_INJ_GRP_CFG, 0); + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(0), QS_INJ_CTRL, 0); + + ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_MODE(2), QS_XTR_GRP_CFG, 0); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, 0xffffffff); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, 0xffffffff); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP_ENA, + BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN)); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM_ENA, + BIT(MSCC_FDMA_XTR_CHAN)); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, + BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN)); + + napi_enable(&fdma->napi); + + ocelot_fdma_activate_chan(ocelot, ocelot->fdma->rx_ring.dcbs_dma, + MSCC_FDMA_XTR_CHAN); +} + +void ocelot_fdma_deinit(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0); + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS, + BIT(MSCC_FDMA_XTR_CHAN)); + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS, + BIT(MSCC_FDMA_INJ_CHAN)); + napi_synchronize(&fdma->napi); + napi_disable(&fdma->napi); + + ocelot_fdma_free_rx_ring(ocelot); + ocelot_fdma_free_tx_ring(ocelot); +} diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.h b/drivers/net/ethernet/mscc/ocelot_fdma.h new file mode 100644 index 0000000000..2fc8e1dd72 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_fdma.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi SoCs FDMA driver + * + * Copyright (c) 2021 Microchip + */ +#ifndef _MSCC_OCELOT_FDMA_H_ +#define _MSCC_OCELOT_FDMA_H_ + +#include "ocelot.h" + +#define MSCC_FDMA_DCB_STAT_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) +#define MSCC_FDMA_DCB_STAT_BLOCKO_M GENMASK(31, 20) +#define MSCC_FDMA_DCB_STAT_BLOCKO_X(x) (((x) & GENMASK(31, 20)) >> 20) +#define MSCC_FDMA_DCB_STAT_PD BIT(19) +#define MSCC_FDMA_DCB_STAT_ABORT BIT(18) +#define MSCC_FDMA_DCB_STAT_EOF BIT(17) +#define MSCC_FDMA_DCB_STAT_SOF BIT(16) +#define MSCC_FDMA_DCB_STAT_BLOCKL_M GENMASK(15, 0) +#define MSCC_FDMA_DCB_STAT_BLOCKL(x) ((x) & GENMASK(15, 0)) + +#define MSCC_FDMA_DCB_LLP(x) ((x) * 4 + 0x0) +#define MSCC_FDMA_DCB_LLP_PREV(x) ((x) * 4 + 0xA0) +#define MSCC_FDMA_CH_SAFE 0xcc +#define MSCC_FDMA_CH_ACTIVATE 0xd0 +#define MSCC_FDMA_CH_DISABLE 0xd4 +#define MSCC_FDMA_CH_FORCEDIS 0xd8 +#define MSCC_FDMA_EVT_ERR 0x164 +#define MSCC_FDMA_EVT_ERR_CODE 0x168 +#define MSCC_FDMA_INTR_LLP 0x16c +#define MSCC_FDMA_INTR_LLP_ENA 0x170 +#define MSCC_FDMA_INTR_FRM 0x174 +#define MSCC_FDMA_INTR_FRM_ENA 0x178 +#define MSCC_FDMA_INTR_ENA 0x184 +#define MSCC_FDMA_INTR_IDENT 0x188 + +#define MSCC_FDMA_INJ_CHAN 2 +#define MSCC_FDMA_XTR_CHAN 0 + +#define OCELOT_FDMA_WEIGHT 32 + +#define OCELOT_FDMA_CH_SAFE_TIMEOUT_US 10 + +#define OCELOT_FDMA_RX_RING_SIZE 512 +#define OCELOT_FDMA_TX_RING_SIZE 128 + +#define OCELOT_FDMA_RX_DCB_SIZE (OCELOT_FDMA_RX_RING_SIZE * \ + sizeof(struct ocelot_fdma_dcb)) +#define OCELOT_FDMA_TX_DCB_SIZE (OCELOT_FDMA_TX_RING_SIZE * \ + sizeof(struct ocelot_fdma_dcb)) +/* +4 allows for word alignment after allocation */ +#define OCELOT_DCBS_HW_ALLOC_SIZE (OCELOT_FDMA_RX_DCB_SIZE + \ + OCELOT_FDMA_TX_DCB_SIZE + \ + 4) + +#define OCELOT_FDMA_RX_SIZE (PAGE_SIZE / 2) + +#define OCELOT_FDMA_SKBFRAG_OVR (4 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define OCELOT_FDMA_RXB_SIZE ALIGN_DOWN(OCELOT_FDMA_RX_SIZE - OCELOT_FDMA_SKBFRAG_OVR, 4) +#define OCELOT_FDMA_SKBFRAG_SIZE (OCELOT_FDMA_RXB_SIZE + OCELOT_FDMA_SKBFRAG_OVR) + +DECLARE_STATIC_KEY_FALSE(ocelot_fdma_enabled); + +struct ocelot_fdma_dcb { + u32 llp; + u32 datap; + u32 datal; + u32 stat; +} __packed; + +/** + * struct ocelot_fdma_tx_buf - TX buffer structure + * @skb: SKB currently used in the corresponding DCB. + * @dma_addr: SKB DMA mapped address. + */ +struct ocelot_fdma_tx_buf { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(dma_addr); +}; + +/** + * struct ocelot_fdma_tx_ring - TX ring description of DCBs + * + * @dcbs: DCBs allocated for the ring + * @dcbs_dma: DMA base address of the DCBs + * @bufs: List of TX buffer associated to the DCBs + * @xmit_lock: lock for concurrent xmit access + * @next_to_clean: Next DCB to be cleaned in tx_cleanup + * @next_to_use: Next available DCB to send SKB + */ +struct ocelot_fdma_tx_ring { + struct ocelot_fdma_dcb *dcbs; + dma_addr_t dcbs_dma; + struct ocelot_fdma_tx_buf bufs[OCELOT_FDMA_TX_RING_SIZE]; + /* Protect concurrent xmit calls */ + spinlock_t xmit_lock; + u16 next_to_clean; + u16 next_to_use; +}; + +/** + * struct ocelot_fdma_rx_buf - RX buffer structure + * @page: Struct page used in this buffer + * @page_offset: Current page offset (either 0 or PAGE_SIZE/2) + * @dma_addr: DMA address of the page + */ +struct ocelot_fdma_rx_buf { + struct page *page; + u32 page_offset; + dma_addr_t dma_addr; +}; + +/** + * struct ocelot_fdma_rx_ring - TX ring description of DCBs + * + * @dcbs: DCBs allocated for the ring + * @dcbs_dma: DMA base address of the DCBs + * @bufs: List of RX buffer associated to the DCBs + * @skb: SKB currently received by the netdev + * @next_to_clean: Next DCB to be cleaned NAPI polling + * @next_to_use: Next available DCB to send SKB + * @next_to_alloc: Next buffer that needs to be allocated (page reuse or alloc) + */ +struct ocelot_fdma_rx_ring { + struct ocelot_fdma_dcb *dcbs; + dma_addr_t dcbs_dma; + struct ocelot_fdma_rx_buf bufs[OCELOT_FDMA_RX_RING_SIZE]; + struct sk_buff *skb; + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; +}; + +/** + * struct ocelot_fdma - FDMA context + * + * @irq: FDMA interrupt + * @ndev: Net device used to initialize NAPI + * @dcbs_base: Memory coherent DCBs + * @dcbs_dma_base: DMA base address of memory coherent DCBs + * @tx_ring: Injection ring + * @rx_ring: Extraction ring + * @napi: NAPI context + * @ocelot: Back-pointer to ocelot struct + */ +struct ocelot_fdma { + int irq; + struct net_device *ndev; + struct ocelot_fdma_dcb *dcbs_base; + dma_addr_t dcbs_dma_base; + struct ocelot_fdma_tx_ring tx_ring; + struct ocelot_fdma_rx_ring rx_ring; + struct napi_struct napi; + struct ocelot *ocelot; +}; + +void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot); +void ocelot_fdma_start(struct ocelot *ocelot); +void ocelot_fdma_deinit(struct ocelot *ocelot); +int ocelot_fdma_inject_frame(struct ocelot *fdma, int port, u32 rew_op, + struct sk_buff *skb, struct net_device *dev); +void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev); +void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, + struct net_device *dev); + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c new file mode 100644 index 0000000000..33b438c6ae --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -0,0 +1,1015 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Microsemi Ocelot Switch driver + * Copyright (c) 2019 Microsemi Corporation + */ + +#include <net/pkt_cls.h> +#include <net/tc_act/tc_gact.h> +#include <soc/mscc/ocelot_vcap.h> +#include "ocelot_police.h" +#include "ocelot_vcap.h" + +/* Arbitrarily chosen constants for encoding the VCAP block and lookup number + * into the chain number. This is UAPI. + */ +#define VCAP_BLOCK 10000 +#define VCAP_LOOKUP 1000 +#define VCAP_IS1_NUM_LOOKUPS 3 +#define VCAP_IS2_NUM_LOOKUPS 2 +#define VCAP_IS2_NUM_PAG 256 +#define VCAP_IS1_CHAIN(lookup) \ + (1 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP) +#define VCAP_IS2_CHAIN(lookup, pag) \ + (2 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP + (pag)) +/* PSFP chain and block ID */ +#define PSFP_BLOCK_ID OCELOT_NUM_VCAP_BLOCKS +#define OCELOT_PSFP_CHAIN (3 * VCAP_BLOCK) + +static int ocelot_chain_to_block(int chain, bool ingress) +{ + int lookup, pag; + + if (!ingress) { + if (chain == 0) + return VCAP_ES0; + return -EOPNOTSUPP; + } + + /* Backwards compatibility with older, single-chain tc-flower + * offload support in Ocelot + */ + if (chain == 0) + return VCAP_IS2; + + for (lookup = 0; lookup < VCAP_IS1_NUM_LOOKUPS; lookup++) + if (chain == VCAP_IS1_CHAIN(lookup)) + return VCAP_IS1; + + for (lookup = 0; lookup < VCAP_IS2_NUM_LOOKUPS; lookup++) + for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++) + if (chain == VCAP_IS2_CHAIN(lookup, pag)) + return VCAP_IS2; + + if (chain == OCELOT_PSFP_CHAIN) + return PSFP_BLOCK_ID; + + return -EOPNOTSUPP; +} + +/* Caller must ensure this is a valid IS1 or IS2 chain first, + * by calling ocelot_chain_to_block. + */ +static int ocelot_chain_to_lookup(int chain) +{ + /* Backwards compatibility with older, single-chain tc-flower + * offload support in Ocelot + */ + if (chain == 0) + return 0; + + return (chain / VCAP_LOOKUP) % 10; +} + +/* Caller must ensure this is a valid IS2 chain first, + * by calling ocelot_chain_to_block. + */ +static int ocelot_chain_to_pag(int chain) +{ + int lookup; + + /* Backwards compatibility with older, single-chain tc-flower + * offload support in Ocelot + */ + if (chain == 0) + return 0; + + lookup = ocelot_chain_to_lookup(chain); + + /* calculate PAG value as chain index relative to the first PAG */ + return chain - VCAP_IS2_CHAIN(lookup, 0); +} + +static bool ocelot_is_goto_target_valid(int goto_target, int chain, + bool ingress) +{ + int pag; + + /* Can't offload GOTO in VCAP ES0 */ + if (!ingress) + return (goto_target < 0); + + /* Non-optional GOTOs */ + if (chain == 0) + /* VCAP IS1 can be skipped, either partially or completely */ + return (goto_target == VCAP_IS1_CHAIN(0) || + goto_target == VCAP_IS1_CHAIN(1) || + goto_target == VCAP_IS1_CHAIN(2) || + goto_target == VCAP_IS2_CHAIN(0, 0) || + goto_target == VCAP_IS2_CHAIN(1, 0) || + goto_target == OCELOT_PSFP_CHAIN); + + if (chain == VCAP_IS1_CHAIN(0)) + return (goto_target == VCAP_IS1_CHAIN(1)); + + if (chain == VCAP_IS1_CHAIN(1)) + return (goto_target == VCAP_IS1_CHAIN(2)); + + /* Lookup 2 of VCAP IS1 can really support non-optional GOTOs, + * using a Policy Association Group (PAG) value, which is an 8-bit + * value encoding a VCAP IS2 target chain. + */ + if (chain == VCAP_IS1_CHAIN(2)) { + for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++) + if (goto_target == VCAP_IS2_CHAIN(0, pag)) + return true; + + return false; + } + + /* Non-optional GOTO from VCAP IS2 lookup 0 to lookup 1. + * We cannot change the PAG at this point. + */ + for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++) + if (chain == VCAP_IS2_CHAIN(0, pag)) + return (goto_target == VCAP_IS2_CHAIN(1, pag)); + + /* VCAP IS2 lookup 1 can goto to PSFP block if hardware support */ + for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++) + if (chain == VCAP_IS2_CHAIN(1, pag)) + return (goto_target == OCELOT_PSFP_CHAIN); + + return false; +} + +static struct ocelot_vcap_filter * +ocelot_find_vcap_filter_that_points_at(struct ocelot *ocelot, int chain) +{ + struct ocelot_vcap_filter *filter; + struct ocelot_vcap_block *block; + int block_id; + + block_id = ocelot_chain_to_block(chain, true); + if (block_id < 0) + return NULL; + + if (block_id == VCAP_IS2) { + block = &ocelot->block[VCAP_IS1]; + + list_for_each_entry(filter, &block->rules, list) + if (filter->type == OCELOT_VCAP_FILTER_PAG && + filter->goto_target == chain) + return filter; + } + + list_for_each_entry(filter, &ocelot->dummy_rules, list) + if (filter->goto_target == chain) + return filter; + + return NULL; +} + +static int +ocelot_flower_parse_ingress_vlan_modify(struct ocelot *ocelot, int port, + struct ocelot_vcap_filter *filter, + const struct flow_action_entry *a, + struct netlink_ext_ack *extack) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (filter->goto_target != -1) { + NL_SET_ERR_MSG_MOD(extack, + "Last action must be GOTO"); + return -EOPNOTSUPP; + } + + if (!ocelot_port->vlan_aware) { + NL_SET_ERR_MSG_MOD(extack, + "Can only modify VLAN under VLAN aware bridge"); + return -EOPNOTSUPP; + } + + filter->action.vid_replace_ena = true; + filter->action.pcp_dei_ena = true; + filter->action.vid = a->vlan.vid; + filter->action.pcp = a->vlan.prio; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + + return 0; +} + +static int +ocelot_flower_parse_egress_vlan_modify(struct ocelot_vcap_filter *filter, + const struct flow_action_entry *a, + struct netlink_ext_ack *extack) +{ + enum ocelot_tag_tpid_sel tpid; + + switch (ntohs(a->vlan.proto)) { + case ETH_P_8021Q: + tpid = OCELOT_TAG_TPID_SEL_8021Q; + break; + case ETH_P_8021AD: + tpid = OCELOT_TAG_TPID_SEL_8021AD; + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Cannot modify custom TPID"); + return -EOPNOTSUPP; + } + + filter->action.tag_a_tpid_sel = tpid; + filter->action.push_outer_tag = OCELOT_ES0_TAG; + filter->action.tag_a_vid_sel = OCELOT_ES0_VID_PLUS_CLASSIFIED_VID; + filter->action.vid_a_val = a->vlan.vid; + filter->action.pcp_a_val = a->vlan.prio; + filter->action.tag_a_pcp_sel = OCELOT_ES0_PCP; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + + return 0; +} + +static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, + bool ingress, struct flow_cls_offload *f, + struct ocelot_vcap_filter *filter) +{ + const struct flow_action *action = &f->rule->action; + struct netlink_ext_ack *extack = f->common.extack; + bool allow_missing_goto_target = false; + const struct flow_action_entry *a; + enum ocelot_tag_tpid_sel tpid; + int i, chain, egress_port; + u32 pol_ix, pol_max; + u64 rate; + int err; + + if (!flow_action_basic_hw_stats_check(&f->rule->action, + f->common.extack)) + return -EOPNOTSUPP; + + chain = f->common.chain_index; + filter->block_id = ocelot_chain_to_block(chain, ingress); + if (filter->block_id < 0) { + NL_SET_ERR_MSG_MOD(extack, "Cannot offload to this chain"); + return -EOPNOTSUPP; + } + if (filter->block_id == VCAP_IS1 || filter->block_id == VCAP_IS2) + filter->lookup = ocelot_chain_to_lookup(chain); + if (filter->block_id == VCAP_IS2) + filter->pag = ocelot_chain_to_pag(chain); + + filter->goto_target = -1; + filter->type = OCELOT_VCAP_FILTER_DUMMY; + + flow_action_for_each(i, a, action) { + switch (a->id) { + case FLOW_ACTION_DROP: + if (filter->block_id != VCAP_IS2) { + NL_SET_ERR_MSG_MOD(extack, + "Drop action can only be offloaded to VCAP IS2"); + return -EOPNOTSUPP; + } + if (filter->goto_target != -1) { + NL_SET_ERR_MSG_MOD(extack, + "Last action must be GOTO"); + return -EOPNOTSUPP; + } + filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; + filter->action.port_mask = 0; + filter->action.police_ena = true; + filter->action.pol_ix = OCELOT_POLICER_DISCARD; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + break; + case FLOW_ACTION_ACCEPT: + if (filter->block_id != VCAP_ES0 && + filter->block_id != VCAP_IS1 && + filter->block_id != VCAP_IS2) { + NL_SET_ERR_MSG_MOD(extack, + "Accept action can only be offloaded to VCAP chains"); + return -EOPNOTSUPP; + } + if (filter->block_id != VCAP_ES0 && + filter->goto_target != -1) { + NL_SET_ERR_MSG_MOD(extack, + "Last action must be GOTO"); + return -EOPNOTSUPP; + } + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + break; + case FLOW_ACTION_TRAP: + if (filter->block_id != VCAP_IS2 || + filter->lookup != 0) { + NL_SET_ERR_MSG_MOD(extack, + "Trap action can only be offloaded to VCAP IS2 lookup 0"); + return -EOPNOTSUPP; + } + if (filter->goto_target != -1) { + NL_SET_ERR_MSG_MOD(extack, + "Last action must be GOTO"); + return -EOPNOTSUPP; + } + filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; + filter->action.port_mask = 0; + filter->action.cpu_copy_ena = true; + filter->action.cpu_qu_num = 0; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + filter->is_trap = true; + break; + case FLOW_ACTION_POLICE: + if (filter->block_id == PSFP_BLOCK_ID) { + filter->type = OCELOT_PSFP_FILTER_OFFLOAD; + break; + } + if (filter->block_id != VCAP_IS2 || + filter->lookup != 0) { + NL_SET_ERR_MSG_MOD(extack, + "Police action can only be offloaded to VCAP IS2 lookup 0 or PSFP"); + return -EOPNOTSUPP; + } + if (filter->goto_target != -1) { + NL_SET_ERR_MSG_MOD(extack, + "Last action must be GOTO"); + return -EOPNOTSUPP; + } + + err = ocelot_policer_validate(action, a, extack); + if (err) + return err; + + filter->action.police_ena = true; + + pol_ix = a->hw_index + ocelot->vcap_pol.base; + pol_max = ocelot->vcap_pol.max; + + if (ocelot->vcap_pol.max2 && pol_ix > pol_max) { + pol_ix += ocelot->vcap_pol.base2 - pol_max - 1; + pol_max = ocelot->vcap_pol.max2; + } + + if (pol_ix >= pol_max) + return -EINVAL; + + filter->action.pol_ix = pol_ix; + + rate = a->police.rate_bytes_ps; + filter->action.pol.rate = div_u64(rate, 1000) * 8; + filter->action.pol.burst = a->police.burst; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + break; + case FLOW_ACTION_REDIRECT: + if (filter->block_id != VCAP_IS2) { + NL_SET_ERR_MSG_MOD(extack, + "Redirect action can only be offloaded to VCAP IS2"); + return -EOPNOTSUPP; + } + if (filter->goto_target != -1) { + NL_SET_ERR_MSG_MOD(extack, + "Last action must be GOTO"); + return -EOPNOTSUPP; + } + egress_port = ocelot->ops->netdev_to_port(a->dev); + if (egress_port < 0) { + NL_SET_ERR_MSG_MOD(extack, + "Destination not an ocelot port"); + return -EOPNOTSUPP; + } + filter->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; + filter->action.port_mask = BIT(egress_port); + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + break; + case FLOW_ACTION_MIRRED: + if (filter->block_id != VCAP_IS2) { + NL_SET_ERR_MSG_MOD(extack, + "Mirror action can only be offloaded to VCAP IS2"); + return -EOPNOTSUPP; + } + if (filter->goto_target != -1) { + NL_SET_ERR_MSG_MOD(extack, + "Last action must be GOTO"); + return -EOPNOTSUPP; + } + egress_port = ocelot->ops->netdev_to_port(a->dev); + if (egress_port < 0) { + NL_SET_ERR_MSG_MOD(extack, + "Destination not an ocelot port"); + return -EOPNOTSUPP; + } + filter->egress_port.value = egress_port; + filter->action.mirror_ena = true; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + break; + case FLOW_ACTION_VLAN_POP: + if (filter->block_id != VCAP_IS1) { + NL_SET_ERR_MSG_MOD(extack, + "VLAN pop action can only be offloaded to VCAP IS1"); + return -EOPNOTSUPP; + } + if (filter->goto_target != -1) { + NL_SET_ERR_MSG_MOD(extack, + "Last action must be GOTO"); + return -EOPNOTSUPP; + } + filter->action.vlan_pop_cnt_ena = true; + filter->action.vlan_pop_cnt++; + if (filter->action.vlan_pop_cnt > 2) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot pop more than 2 VLAN headers"); + return -EOPNOTSUPP; + } + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + break; + case FLOW_ACTION_VLAN_MANGLE: + if (filter->block_id == VCAP_IS1) { + err = ocelot_flower_parse_ingress_vlan_modify(ocelot, port, + filter, a, + extack); + } else if (filter->block_id == VCAP_ES0) { + err = ocelot_flower_parse_egress_vlan_modify(filter, a, + extack); + } else { + NL_SET_ERR_MSG_MOD(extack, + "VLAN modify action can only be offloaded to VCAP IS1 or ES0"); + err = -EOPNOTSUPP; + } + if (err) + return err; + break; + case FLOW_ACTION_PRIORITY: + if (filter->block_id != VCAP_IS1) { + NL_SET_ERR_MSG_MOD(extack, + "Priority action can only be offloaded to VCAP IS1"); + return -EOPNOTSUPP; + } + if (filter->goto_target != -1) { + NL_SET_ERR_MSG_MOD(extack, + "Last action must be GOTO"); + return -EOPNOTSUPP; + } + filter->action.qos_ena = true; + filter->action.qos_val = a->priority; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + break; + case FLOW_ACTION_GOTO: + filter->goto_target = a->chain_index; + + if (filter->block_id == VCAP_IS1 && filter->lookup == 2) { + int pag = ocelot_chain_to_pag(filter->goto_target); + + filter->action.pag_override_mask = 0xff; + filter->action.pag_val = pag; + filter->type = OCELOT_VCAP_FILTER_PAG; + } + break; + case FLOW_ACTION_VLAN_PUSH: + if (filter->block_id != VCAP_ES0) { + NL_SET_ERR_MSG_MOD(extack, + "VLAN push action can only be offloaded to VCAP ES0"); + return -EOPNOTSUPP; + } + switch (ntohs(a->vlan.proto)) { + case ETH_P_8021Q: + tpid = OCELOT_TAG_TPID_SEL_8021Q; + break; + case ETH_P_8021AD: + tpid = OCELOT_TAG_TPID_SEL_8021AD; + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Cannot push custom TPID"); + return -EOPNOTSUPP; + } + filter->action.tag_a_tpid_sel = tpid; + filter->action.push_outer_tag = OCELOT_ES0_TAG; + filter->action.tag_a_vid_sel = OCELOT_ES0_VID; + filter->action.vid_a_val = a->vlan.vid; + filter->action.pcp_a_val = a->vlan.prio; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + break; + case FLOW_ACTION_GATE: + if (filter->block_id != PSFP_BLOCK_ID) { + NL_SET_ERR_MSG_MOD(extack, + "Gate action can only be offloaded to PSFP chain"); + return -EOPNOTSUPP; + } + filter->type = OCELOT_PSFP_FILTER_OFFLOAD; + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Cannot offload action"); + return -EOPNOTSUPP; + } + } + + if (filter->goto_target == -1) { + if ((filter->block_id == VCAP_IS2 && filter->lookup == 1) || + chain == 0 || filter->block_id == PSFP_BLOCK_ID) { + allow_missing_goto_target = true; + } else { + NL_SET_ERR_MSG_MOD(extack, "Missing GOTO action"); + return -EOPNOTSUPP; + } + } + + if (!ocelot_is_goto_target_valid(filter->goto_target, chain, ingress) && + !allow_missing_goto_target) { + NL_SET_ERR_MSG_MOD(extack, "Cannot offload this GOTO target"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int ocelot_flower_parse_indev(struct ocelot *ocelot, int port, + struct flow_cls_offload *f, + struct ocelot_vcap_filter *filter) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0]; + int key_length = vcap->keys[VCAP_ES0_IGR_PORT].length; + struct netlink_ext_ack *extack = f->common.extack; + struct net_device *dev, *indev; + struct flow_match_meta match; + int ingress_port; + + flow_rule_match_meta(rule, &match); + + if (!match.mask->ingress_ifindex) + return 0; + + if (match.mask->ingress_ifindex != 0xFFFFFFFF) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); + return -EOPNOTSUPP; + } + + dev = ocelot->ops->port_to_netdev(ocelot, port); + if (!dev) + return -EINVAL; + + indev = __dev_get_by_index(dev_net(dev), match.key->ingress_ifindex); + if (!indev) { + NL_SET_ERR_MSG_MOD(extack, + "Can't find the ingress port to match on"); + return -ENOENT; + } + + ingress_port = ocelot->ops->netdev_to_port(indev); + if (ingress_port < 0) { + NL_SET_ERR_MSG_MOD(extack, + "Can only offload an ocelot ingress port"); + return -EOPNOTSUPP; + } + if (ingress_port == port) { + NL_SET_ERR_MSG_MOD(extack, + "Ingress port is equal to the egress port"); + return -EINVAL; + } + + filter->ingress_port.value = ingress_port; + filter->ingress_port.mask = GENMASK(key_length - 1, 0); + + return 0; +} + +static int +ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, + struct flow_cls_offload *f, + struct ocelot_vcap_filter *filter) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; + struct netlink_ext_ack *extack = f->common.extack; + u16 proto = ntohs(f->common.protocol); + bool match_protocol = true; + int ret; + + if (dissector->used_keys & + ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_META) | + BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | + BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) { + struct flow_match_meta match; + + flow_rule_match_meta(rule, &match); + if (match.mask->l2_miss) { + NL_SET_ERR_MSG_MOD(extack, "Can't match on \"l2_miss\""); + return -EOPNOTSUPP; + } + } + + /* For VCAP ES0 (egress rewriter) we can match on the ingress port */ + if (!ingress) { + ret = ocelot_flower_parse_indev(ocelot, port, f, filter); + if (ret) + return ret; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + filter->key_type = OCELOT_VCAP_KEY_ANY; + filter->vlan.vid.value = match.key->vlan_id; + filter->vlan.vid.mask = match.mask->vlan_id; + filter->vlan.pcp.value[0] = match.key->vlan_priority; + filter->vlan.pcp.mask[0] = match.mask->vlan_priority; + match_protocol = false; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + if (filter->block_id == VCAP_ES0) { + NL_SET_ERR_MSG_MOD(extack, + "VCAP ES0 cannot match on MAC address"); + return -EOPNOTSUPP; + } + + /* The hw support mac matches only for MAC_ETYPE key, + * therefore if other matches(port, tcp flags, etc) are added + * then just bail out + */ + if ((dissector->used_keys & + (BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))) != + (BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | + BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))) + return -EOPNOTSUPP; + + flow_rule_match_eth_addrs(rule, &match); + + if (filter->block_id == VCAP_IS1 && + !is_zero_ether_addr(match.mask->dst)) { + NL_SET_ERR_MSG_MOD(extack, + "Key type S1_NORMAL cannot match on destination MAC"); + return -EOPNOTSUPP; + } + + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + ether_addr_copy(filter->key.etype.dmac.value, + match.key->dst); + ether_addr_copy(filter->key.etype.smac.value, + match.key->src); + ether_addr_copy(filter->key.etype.dmac.mask, + match.mask->dst); + ether_addr_copy(filter->key.etype.smac.mask, + match.mask->src); + goto finished_key_parsing; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + if (ntohs(match.key->n_proto) == ETH_P_IP) { + if (filter->block_id == VCAP_ES0) { + NL_SET_ERR_MSG_MOD(extack, + "VCAP ES0 cannot match on IP protocol"); + return -EOPNOTSUPP; + } + + filter->key_type = OCELOT_VCAP_KEY_IPV4; + filter->key.ipv4.proto.value[0] = + match.key->ip_proto; + filter->key.ipv4.proto.mask[0] = + match.mask->ip_proto; + match_protocol = false; + } + if (ntohs(match.key->n_proto) == ETH_P_IPV6) { + if (filter->block_id == VCAP_ES0) { + NL_SET_ERR_MSG_MOD(extack, + "VCAP ES0 cannot match on IP protocol"); + return -EOPNOTSUPP; + } + + filter->key_type = OCELOT_VCAP_KEY_IPV6; + filter->key.ipv6.proto.value[0] = + match.key->ip_proto; + filter->key.ipv6.proto.mask[0] = + match.mask->ip_proto; + match_protocol = false; + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) && + proto == ETH_P_IP) { + struct flow_match_ipv4_addrs match; + u8 *tmp; + + if (filter->block_id == VCAP_ES0) { + NL_SET_ERR_MSG_MOD(extack, + "VCAP ES0 cannot match on IP address"); + return -EOPNOTSUPP; + } + + flow_rule_match_ipv4_addrs(rule, &match); + + if (filter->block_id == VCAP_IS1 && *(u32 *)&match.mask->dst) { + NL_SET_ERR_MSG_MOD(extack, + "Key type S1_NORMAL cannot match on destination IP"); + return -EOPNOTSUPP; + } + + tmp = &filter->key.ipv4.sip.value.addr[0]; + memcpy(tmp, &match.key->src, 4); + + tmp = &filter->key.ipv4.sip.mask.addr[0]; + memcpy(tmp, &match.mask->src, 4); + + tmp = &filter->key.ipv4.dip.value.addr[0]; + memcpy(tmp, &match.key->dst, 4); + + tmp = &filter->key.ipv4.dip.mask.addr[0]; + memcpy(tmp, &match.mask->dst, 4); + match_protocol = false; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) && + proto == ETH_P_IPV6) { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + if (filter->block_id == VCAP_ES0) { + NL_SET_ERR_MSG_MOD(extack, + "VCAP ES0 cannot match on L4 ports"); + return -EOPNOTSUPP; + } + + flow_rule_match_ports(rule, &match); + filter->key.ipv4.sport.value = ntohs(match.key->src); + filter->key.ipv4.sport.mask = ntohs(match.mask->src); + filter->key.ipv4.dport.value = ntohs(match.key->dst); + filter->key.ipv4.dport.mask = ntohs(match.mask->dst); + match_protocol = false; + } + +finished_key_parsing: + if (match_protocol && proto != ETH_P_ALL) { + if (filter->block_id == VCAP_ES0) { + NL_SET_ERR_MSG_MOD(extack, + "VCAP ES0 cannot match on L2 proto"); + return -EOPNOTSUPP; + } + + /* TODO: support SNAP, LLC etc */ + if (proto < ETH_P_802_3_MIN) + return -EOPNOTSUPP; + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + *(__be16 *)filter->key.etype.etype.value = htons(proto); + *(__be16 *)filter->key.etype.etype.mask = htons(0xffff); + } + /* else, a filter of type OCELOT_VCAP_KEY_ANY is implicitly added */ + + return 0; +} + +static int ocelot_flower_parse(struct ocelot *ocelot, int port, bool ingress, + struct flow_cls_offload *f, + struct ocelot_vcap_filter *filter) +{ + int ret; + + filter->prio = f->common.prio; + filter->id.cookie = f->cookie; + filter->id.tc_offload = true; + + ret = ocelot_flower_parse_action(ocelot, port, ingress, f, filter); + if (ret) + return ret; + + /* PSFP filter need to parse key by stream identification function. */ + if (filter->type == OCELOT_PSFP_FILTER_OFFLOAD) + return 0; + + return ocelot_flower_parse_key(ocelot, port, ingress, f, filter); +} + +static struct ocelot_vcap_filter +*ocelot_vcap_filter_create(struct ocelot *ocelot, int port, bool ingress, + struct flow_cls_offload *f) +{ + struct ocelot_vcap_filter *filter; + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return NULL; + + if (ingress) { + filter->ingress_port_mask = BIT(port); + } else { + const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0]; + int key_length = vcap->keys[VCAP_ES0_EGR_PORT].length; + + filter->egress_port.value = port; + filter->egress_port.mask = GENMASK(key_length - 1, 0); + } + + return filter; +} + +static int ocelot_vcap_dummy_filter_add(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) +{ + list_add(&filter->list, &ocelot->dummy_rules); + + return 0; +} + +static int ocelot_vcap_dummy_filter_del(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) +{ + list_del(&filter->list); + kfree(filter); + + return 0; +} + +/* If we have an egress VLAN modification rule, we need to actually write the + * delta between the input VLAN (from the key) and the output VLAN (from the + * action), but the action was parsed first. So we need to patch the delta into + * the action here. + */ +static int +ocelot_flower_patch_es0_vlan_modify(struct ocelot_vcap_filter *filter, + struct netlink_ext_ack *extack) +{ + if (filter->block_id != VCAP_ES0 || + filter->action.tag_a_vid_sel != OCELOT_ES0_VID_PLUS_CLASSIFIED_VID) + return 0; + + if (filter->vlan.vid.mask != VLAN_VID_MASK) { + NL_SET_ERR_MSG_MOD(extack, + "VCAP ES0 VLAN rewriting needs a full VLAN in the key"); + return -EOPNOTSUPP; + } + + filter->action.vid_a_val -= filter->vlan.vid.value; + filter->action.vid_a_val &= VLAN_VID_MASK; + + return 0; +} + +int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, + struct flow_cls_offload *f, bool ingress) +{ + struct netlink_ext_ack *extack = f->common.extack; + struct ocelot_vcap_filter *filter; + int chain = f->common.chain_index; + int block_id, ret; + + if (chain && !ocelot_find_vcap_filter_that_points_at(ocelot, chain)) { + NL_SET_ERR_MSG_MOD(extack, "No default GOTO action points to this chain"); + return -EOPNOTSUPP; + } + + block_id = ocelot_chain_to_block(chain, ingress); + if (block_id < 0) { + NL_SET_ERR_MSG_MOD(extack, "Cannot offload to this chain"); + return -EOPNOTSUPP; + } + + filter = ocelot_vcap_block_find_filter_by_id(&ocelot->block[block_id], + f->cookie, true); + if (filter) { + /* Filter already exists on other ports */ + if (!ingress) { + NL_SET_ERR_MSG_MOD(extack, "VCAP ES0 does not support shared filters"); + return -EOPNOTSUPP; + } + + filter->ingress_port_mask |= BIT(port); + + return ocelot_vcap_filter_replace(ocelot, filter); + } + + /* Filter didn't exist, create it now */ + filter = ocelot_vcap_filter_create(ocelot, port, ingress, f); + if (!filter) + return -ENOMEM; + + ret = ocelot_flower_parse(ocelot, port, ingress, f, filter); + if (ret) { + kfree(filter); + return ret; + } + + ret = ocelot_flower_patch_es0_vlan_modify(filter, extack); + if (ret) { + kfree(filter); + return ret; + } + + /* The non-optional GOTOs for the TCAM skeleton don't need + * to be actually offloaded. + */ + if (filter->type == OCELOT_VCAP_FILTER_DUMMY) + return ocelot_vcap_dummy_filter_add(ocelot, filter); + + if (filter->type == OCELOT_PSFP_FILTER_OFFLOAD) { + kfree(filter); + if (ocelot->ops->psfp_filter_add) + return ocelot->ops->psfp_filter_add(ocelot, port, f); + + NL_SET_ERR_MSG_MOD(extack, "PSFP chain is not supported in HW"); + return -EOPNOTSUPP; + } + + return ocelot_vcap_filter_add(ocelot, filter, f->common.extack); +} +EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace); + +int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, + struct flow_cls_offload *f, bool ingress) +{ + struct ocelot_vcap_filter *filter; + struct ocelot_vcap_block *block; + int block_id; + + block_id = ocelot_chain_to_block(f->common.chain_index, ingress); + if (block_id < 0) + return 0; + + if (block_id == PSFP_BLOCK_ID) { + if (ocelot->ops->psfp_filter_del) + return ocelot->ops->psfp_filter_del(ocelot, f); + + return -EOPNOTSUPP; + } + + block = &ocelot->block[block_id]; + + filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true); + if (!filter) + return 0; + + if (filter->type == OCELOT_VCAP_FILTER_DUMMY) + return ocelot_vcap_dummy_filter_del(ocelot, filter); + + if (ingress) { + filter->ingress_port_mask &= ~BIT(port); + if (filter->ingress_port_mask) + return ocelot_vcap_filter_replace(ocelot, filter); + } + + return ocelot_vcap_filter_del(ocelot, filter); +} +EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy); + +int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, + struct flow_cls_offload *f, bool ingress) +{ + struct ocelot_vcap_filter *filter; + struct ocelot_vcap_block *block; + struct flow_stats stats = {0}; + int block_id, ret; + + block_id = ocelot_chain_to_block(f->common.chain_index, ingress); + if (block_id < 0) + return 0; + + if (block_id == PSFP_BLOCK_ID) { + if (ocelot->ops->psfp_stats_get) { + ret = ocelot->ops->psfp_stats_get(ocelot, f, &stats); + if (ret) + return ret; + + goto stats_update; + } + + return -EOPNOTSUPP; + } + + block = &ocelot->block[block_id]; + + filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true); + if (!filter || filter->type == OCELOT_VCAP_FILTER_DUMMY) + return 0; + + ret = ocelot_vcap_filter_stats_update(ocelot, filter); + if (ret) + return ret; + + stats.pkts = filter->stats.pkts; + +stats_update: + flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, 0x0, + FLOW_ACTION_HW_STATS_IMMEDIATE); + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_cls_flower_stats); diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c new file mode 100644 index 0000000000..3aa7dc29eb --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_io.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> + +#include "ocelot.h" + +int __ocelot_bulk_read_ix(struct ocelot *ocelot, enum ocelot_reg reg, + u32 offset, void *buf, int count) +{ + enum ocelot_target target; + u32 addr; + + ocelot_reg_to_target_addr(ocelot, reg, &target, &addr); + WARN_ON(!target); + + return regmap_bulk_read(ocelot->targets[target], addr + offset, + buf, count); +} +EXPORT_SYMBOL_GPL(__ocelot_bulk_read_ix); + +u32 __ocelot_read_ix(struct ocelot *ocelot, enum ocelot_reg reg, u32 offset) +{ + enum ocelot_target target; + u32 addr, val; + + ocelot_reg_to_target_addr(ocelot, reg, &target, &addr); + WARN_ON(!target); + + regmap_read(ocelot->targets[target], addr + offset, &val); + return val; +} +EXPORT_SYMBOL_GPL(__ocelot_read_ix); + +void __ocelot_write_ix(struct ocelot *ocelot, u32 val, enum ocelot_reg reg, + u32 offset) +{ + enum ocelot_target target; + u32 addr; + + ocelot_reg_to_target_addr(ocelot, reg, &target, &addr); + WARN_ON(!target); + + regmap_write(ocelot->targets[target], addr + offset, val); +} +EXPORT_SYMBOL_GPL(__ocelot_write_ix); + +void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, + enum ocelot_reg reg, u32 offset) +{ + enum ocelot_target target; + u32 addr; + + ocelot_reg_to_target_addr(ocelot, reg, &target, &addr); + WARN_ON(!target); + + regmap_update_bits(ocelot->targets[target], addr + offset, mask, val); +} +EXPORT_SYMBOL_GPL(__ocelot_rmw_ix); + +u32 ocelot_port_readl(struct ocelot_port *port, enum ocelot_reg reg) +{ + struct ocelot *ocelot = port->ocelot; + u16 target = reg >> TARGET_OFFSET; + u32 val; + + WARN_ON(!target); + + regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val); + return val; +} +EXPORT_SYMBOL_GPL(ocelot_port_readl); + +void ocelot_port_writel(struct ocelot_port *port, u32 val, enum ocelot_reg reg) +{ + struct ocelot *ocelot = port->ocelot; + u16 target = reg >> TARGET_OFFSET; + + WARN_ON(!target); + + regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val); +} +EXPORT_SYMBOL_GPL(ocelot_port_writel); + +void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, + enum ocelot_reg reg) +{ + u32 cur = ocelot_port_readl(port, reg); + + ocelot_port_writel(port, (cur & (~mask)) | val, reg); +} +EXPORT_SYMBOL_GPL(ocelot_port_rmwl); + +u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target, + u32 reg, u32 offset) +{ + u32 val; + + regmap_read(ocelot->targets[target], + ocelot->map[target][reg] + offset, &val); + return val; +} + +void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target, + u32 val, u32 reg, u32 offset) +{ + regmap_write(ocelot->targets[target], + ocelot->map[target][reg] + offset, val); +} + +int ocelot_regfields_init(struct ocelot *ocelot, + const struct reg_field *const regfields) +{ + unsigned int i; + u16 target; + + for (i = 0; i < REGFIELD_MAX; i++) { + struct reg_field regfield = {}; + u32 reg = regfields[i].reg; + + if (!reg) + continue; + + target = regfields[i].reg >> TARGET_OFFSET; + + regfield.reg = ocelot->map[target][reg & REG_MASK]; + regfield.lsb = regfields[i].lsb; + regfield.msb = regfields[i].msb; + regfield.id_size = regfields[i].id_size; + regfield.id_offset = regfields[i].id_offset; + + ocelot->regfields[i] = + devm_regmap_field_alloc(ocelot->dev, + ocelot->targets[target], + regfield); + + if (IS_ERR(ocelot->regfields[i])) + return PTR_ERR(ocelot->regfields[i]); + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_regfields_init); + +static struct regmap_config ocelot_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res) +{ + void __iomem *regs; + + regs = devm_ioremap_resource(ocelot->dev, res); + if (IS_ERR(regs)) + return ERR_CAST(regs); + + ocelot_regmap_config.name = res->name; + + return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config); +} +EXPORT_SYMBOL_GPL(ocelot_regmap_init); diff --git a/drivers/net/ethernet/mscc/ocelot_mm.c b/drivers/net/ethernet/mscc/ocelot_mm.c new file mode 100644 index 0000000000..c815ae64e3 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_mm.c @@ -0,0 +1,300 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Hardware library for MAC Merge Layer and Frame Preemption on TSN-capable + * switches (VSC9959) + * + * Copyright 2022-2023 NXP + */ +#include <linux/ethtool.h> +#include <soc/mscc/ocelot.h> +#include <soc/mscc/ocelot_dev.h> +#include <soc/mscc/ocelot_qsys.h> + +#include "ocelot.h" + +static const char * +mm_verify_state_to_string(enum ethtool_mm_verify_status state) +{ + switch (state) { + case ETHTOOL_MM_VERIFY_STATUS_INITIAL: + return "INITIAL"; + case ETHTOOL_MM_VERIFY_STATUS_VERIFYING: + return "VERIFYING"; + case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED: + return "SUCCEEDED"; + case ETHTOOL_MM_VERIFY_STATUS_FAILED: + return "FAILED"; + case ETHTOOL_MM_VERIFY_STATUS_DISABLED: + return "DISABLED"; + default: + return "UNKNOWN"; + } +} + +static enum ethtool_mm_verify_status ocelot_mm_verify_status(u32 val) +{ + switch (DEV_MM_STAT_MM_STATUS_PRMPT_VERIFY_STATE_X(val)) { + case 0: + return ETHTOOL_MM_VERIFY_STATUS_INITIAL; + case 1: + return ETHTOOL_MM_VERIFY_STATUS_VERIFYING; + case 2: + return ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED; + case 3: + return ETHTOOL_MM_VERIFY_STATUS_FAILED; + case 4: + return ETHTOOL_MM_VERIFY_STATUS_DISABLED; + default: + return ETHTOOL_MM_VERIFY_STATUS_UNKNOWN; + } +} + +void ocelot_port_update_active_preemptible_tcs(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_mm_state *mm = &ocelot->mm[port]; + u32 val = 0; + + lockdep_assert_held(&ocelot->fwd_domain_lock); + + /* Only commit preemptible TCs when MAC Merge is active. + * On NXP LS1028A, when using QSGMII, the port hangs if transmitting + * preemptible frames at any other link speed than gigabit, so avoid + * preemption at lower speeds in this PHY mode. + */ + if ((ocelot_port->phy_mode != PHY_INTERFACE_MODE_QSGMII || + ocelot_port->speed == SPEED_1000) && mm->tx_active) + val = mm->preemptible_tcs; + + /* Cut through switching doesn't work for preemptible priorities, + * so first make sure it is disabled. Also, changing the preemptible + * TCs affects the oversized frame dropping logic, so that needs to be + * re-triggered. And since tas_guard_bands_update() also implicitly + * calls cut_through_fwd(), we don't need to explicitly call it. + */ + mm->active_preemptible_tcs = val; + ocelot->ops->tas_guard_bands_update(ocelot, port); + + dev_dbg(ocelot->dev, + "port %d %s/%s, MM TX %s, preemptible TCs 0x%x, active 0x%x\n", + port, phy_modes(ocelot_port->phy_mode), + phy_speed_to_str(ocelot_port->speed), + mm->tx_active ? "active" : "inactive", mm->preemptible_tcs, + mm->active_preemptible_tcs); + + ocelot_rmw_rix(ocelot, QSYS_PREEMPTION_CFG_P_QUEUES(val), + QSYS_PREEMPTION_CFG_P_QUEUES_M, + QSYS_PREEMPTION_CFG, port); +} + +void ocelot_port_change_fp(struct ocelot *ocelot, int port, + unsigned long preemptible_tcs) +{ + struct ocelot_mm_state *mm = &ocelot->mm[port]; + + lockdep_assert_held(&ocelot->fwd_domain_lock); + + if (mm->preemptible_tcs == preemptible_tcs) + return; + + mm->preemptible_tcs = preemptible_tcs; + + ocelot_port_update_active_preemptible_tcs(ocelot, port); +} + +static void ocelot_mm_update_port_status(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_mm_state *mm = &ocelot->mm[port]; + enum ethtool_mm_verify_status verify_status; + u32 val, ack = 0; + + if (!mm->tx_enabled) + return; + + val = ocelot_port_readl(ocelot_port, DEV_MM_STATUS); + + verify_status = ocelot_mm_verify_status(val); + if (mm->verify_status != verify_status) { + dev_dbg(ocelot->dev, + "Port %d MAC Merge verification state %s\n", + port, mm_verify_state_to_string(verify_status)); + mm->verify_status = verify_status; + } + + if (val & DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STICKY) { + mm->tx_active = !!(val & DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STATUS); + + dev_dbg(ocelot->dev, "Port %d TX preemption %s\n", + port, mm->tx_active ? "active" : "inactive"); + ocelot_port_update_active_preemptible_tcs(ocelot, port); + + ack |= DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STICKY; + } + + if (val & DEV_MM_STAT_MM_STATUS_UNEXP_RX_PFRM_STICKY) { + dev_err(ocelot->dev, + "Unexpected P-frame received on port %d while verification was unsuccessful or not yet verified\n", + port); + + ack |= DEV_MM_STAT_MM_STATUS_UNEXP_RX_PFRM_STICKY; + } + + if (val & DEV_MM_STAT_MM_STATUS_UNEXP_TX_PFRM_STICKY) { + dev_err(ocelot->dev, + "Unexpected P-frame requested to be transmitted on port %d while verification was unsuccessful or not yet verified, or MM_TX_ENA=0\n", + port); + + ack |= DEV_MM_STAT_MM_STATUS_UNEXP_TX_PFRM_STICKY; + } + + if (ack) + ocelot_port_writel(ocelot_port, ack, DEV_MM_STATUS); +} + +void ocelot_mm_irq(struct ocelot *ocelot) +{ + int port; + + mutex_lock(&ocelot->fwd_domain_lock); + + for (port = 0; port < ocelot->num_phys_ports; port++) + ocelot_mm_update_port_status(ocelot, port); + + mutex_unlock(&ocelot->fwd_domain_lock); +} +EXPORT_SYMBOL_GPL(ocelot_mm_irq); + +int ocelot_port_set_mm(struct ocelot *ocelot, int port, + struct ethtool_mm_cfg *cfg, + struct netlink_ext_ack *extack) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u32 mm_enable = 0, verify_disable = 0, add_frag_size; + struct ocelot_mm_state *mm; + int err; + + if (!ocelot->mm_supported) + return -EOPNOTSUPP; + + mm = &ocelot->mm[port]; + + err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size, + &add_frag_size, extack); + if (err) + return err; + + if (cfg->pmac_enabled) + mm_enable |= DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA; + + if (cfg->tx_enabled) + mm_enable |= DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA; + + if (!cfg->verify_enabled) + verify_disable = DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS; + + mutex_lock(&ocelot->fwd_domain_lock); + + ocelot_port_rmwl(ocelot_port, mm_enable, + DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA | + DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA, + DEV_MM_ENABLE_CONFIG); + + ocelot_port_rmwl(ocelot_port, verify_disable | + DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(cfg->verify_time), + DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS | + DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M, + DEV_MM_VERIF_CONFIG); + + ocelot_rmw_rix(ocelot, + QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE(add_frag_size), + QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_M, + QSYS_PREEMPTION_CFG, + port); + + /* The switch will emit an IRQ when TX is disabled, to notify that it + * has become inactive. We optimize ocelot_mm_update_port_status() to + * not bother processing MM IRQs at all for ports with TX disabled, + * but we need to ACK this IRQ now, while mm->tx_enabled is still set, + * otherwise we get an IRQ storm. + */ + if (mm->tx_enabled && !cfg->tx_enabled) { + ocelot_mm_update_port_status(ocelot, port); + WARN_ON(mm->tx_active); + } + + mm->tx_enabled = cfg->tx_enabled; + + mutex_unlock(&ocelot->fwd_domain_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_port_set_mm); + +int ocelot_port_get_mm(struct ocelot *ocelot, int port, + struct ethtool_mm_state *state) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_mm_state *mm; + u32 val, add_frag_size; + + if (!ocelot->mm_supported) + return -EOPNOTSUPP; + + mm = &ocelot->mm[port]; + + mutex_lock(&ocelot->fwd_domain_lock); + + val = ocelot_port_readl(ocelot_port, DEV_MM_ENABLE_CONFIG); + state->pmac_enabled = !!(val & DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA); + state->tx_enabled = !!(val & DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA); + + val = ocelot_port_readl(ocelot_port, DEV_MM_VERIF_CONFIG); + state->verify_enabled = !(val & DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS); + state->verify_time = DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(val); + state->max_verify_time = 128; + + val = ocelot_read_rix(ocelot, QSYS_PREEMPTION_CFG, port); + add_frag_size = QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(val); + state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(add_frag_size); + state->rx_min_frag_size = ETH_ZLEN; + + ocelot_mm_update_port_status(ocelot, port); + state->verify_status = mm->verify_status; + state->tx_active = mm->tx_active; + + mutex_unlock(&ocelot->fwd_domain_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_port_get_mm); + +int ocelot_mm_init(struct ocelot *ocelot) +{ + struct ocelot_port *ocelot_port; + struct ocelot_mm_state *mm; + int port; + + if (!ocelot->mm_supported) + return 0; + + ocelot->mm = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports, + sizeof(*ocelot->mm), GFP_KERNEL); + if (!ocelot->mm) + return -ENOMEM; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + u32 val; + + mm = &ocelot->mm[port]; + ocelot_port = ocelot->ports[port]; + + /* Update initial status variable for the + * verification state machine + */ + val = ocelot_port_readl(ocelot_port, DEV_MM_STATUS); + mm->verify_status = ocelot_mm_verify_status(val); + } + + return 0; +} diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c new file mode 100644 index 0000000000..3ccec488a3 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_mrp.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Microsemi Ocelot Switch driver + * + * Copyright (c) 2017, 2019 Microsemi Corporation + * Copyright 2020-2021 NXP + */ + +#include <linux/if_bridge.h> +#include <linux/mrp_bridge.h> +#include <soc/mscc/ocelot_vcap.h> +#include <uapi/linux/mrp_bridge.h> +#include "ocelot.h" +#include "ocelot_vcap.h" + +static const u8 mrp_test_dmac[] = { 0x01, 0x15, 0x4e, 0x00, 0x00, 0x01 }; +static const u8 mrp_control_dmac[] = { 0x01, 0x15, 0x4e, 0x00, 0x00, 0x02 }; + +static int ocelot_mrp_find_partner_port(struct ocelot *ocelot, + struct ocelot_port *p) +{ + int i; + + for (i = 0; i < ocelot->num_phys_ports; ++i) { + struct ocelot_port *ocelot_port = ocelot->ports[i]; + + if (!ocelot_port || p == ocelot_port) + continue; + + if (ocelot_port->mrp_ring_id == p->mrp_ring_id) + return i; + } + + return -1; +} + +static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int id) +{ + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot_vcap_filter *filter; + + block_vcap_is2 = &ocelot->block[VCAP_IS2]; + filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, id, + false); + if (!filter) + return 0; + + return ocelot_vcap_filter_del(ocelot, filter); +} + +static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port, + int dst_port) +{ + const u8 mrp_test_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + struct ocelot_vcap_filter *filter; + int err; + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + filter->prio = 1; + filter->id.cookie = OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, src_port); + filter->id.tc_offload = false; + filter->block_id = VCAP_IS2; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + filter->ingress_port_mask = BIT(src_port); + ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac); + ether_addr_copy(filter->key.etype.dmac.mask, mrp_test_mask); + filter->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; + filter->action.port_mask = BIT(dst_port); + + err = ocelot_vcap_filter_add(ocelot, filter, NULL); + if (err) + kfree(filter); + + return err; +} + +static void ocelot_populate_mrp_trap_key(struct ocelot_vcap_filter *filter) +{ + const u8 mrp_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + + /* Here is possible to use control or test dmac because the mask + * doesn't cover the LSB + */ + ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac); + ether_addr_copy(filter->key.etype.dmac.mask, mrp_mask); +} + +static int ocelot_mrp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot); + + return ocelot_trap_add(ocelot, port, cookie, false, + ocelot_populate_mrp_trap_key); +} + +static int ocelot_mrp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot); + + return ocelot_trap_del(ocelot, port, cookie); +} + +static void ocelot_mrp_save_mac(struct ocelot *ocelot, + struct ocelot_port *port) +{ + ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac, + OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED); + ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac, + OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED); +} + +static void ocelot_mrp_del_mac(struct ocelot *ocelot, + struct ocelot_port *port) +{ + ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_STANDALONE_PVID); + ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_STANDALONE_PVID); +} + +int ocelot_mrp_add(struct ocelot *ocelot, int port, + const struct switchdev_obj_mrp *mrp) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_port_private *priv; + struct net_device *dev; + + if (!ocelot_port) + return -EOPNOTSUPP; + + priv = container_of(ocelot_port, struct ocelot_port_private, port); + dev = priv->dev; + + if (mrp->p_port != dev && mrp->s_port != dev) + return 0; + + ocelot_port->mrp_ring_id = mrp->ring_id; + + return 0; +} +EXPORT_SYMBOL(ocelot_mrp_add); + +int ocelot_mrp_del(struct ocelot *ocelot, int port, + const struct switchdev_obj_mrp *mrp) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port) + return -EOPNOTSUPP; + + if (ocelot_port->mrp_ring_id != mrp->ring_id) + return 0; + + ocelot_port->mrp_ring_id = 0; + + return 0; +} +EXPORT_SYMBOL(ocelot_mrp_del); + +int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port, + const struct switchdev_obj_ring_role_mrp *mrp) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int dst_port; + int err; + + if (!ocelot_port) + return -EOPNOTSUPP; + + if (mrp->ring_role != BR_MRP_RING_ROLE_MRC && !mrp->sw_backup) + return -EOPNOTSUPP; + + if (ocelot_port->mrp_ring_id != mrp->ring_id) + return 0; + + ocelot_mrp_save_mac(ocelot, ocelot_port); + + if (mrp->ring_role != BR_MRP_RING_ROLE_MRC) + return ocelot_mrp_trap_add(ocelot, port); + + dst_port = ocelot_mrp_find_partner_port(ocelot, ocelot_port); + if (dst_port == -1) + return -EINVAL; + + err = ocelot_mrp_redirect_add_vcap(ocelot, port, dst_port); + if (err) + return err; + + err = ocelot_mrp_trap_add(ocelot, port); + if (err) { + ocelot_mrp_del_vcap(ocelot, + OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port)); + return err; + } + + return 0; +} +EXPORT_SYMBOL(ocelot_mrp_add_ring_role); + +int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port, + const struct switchdev_obj_ring_role_mrp *mrp) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int err, i; + + if (!ocelot_port) + return -EOPNOTSUPP; + + if (mrp->ring_role != BR_MRP_RING_ROLE_MRC && !mrp->sw_backup) + return -EOPNOTSUPP; + + if (ocelot_port->mrp_ring_id != mrp->ring_id) + return 0; + + err = ocelot_mrp_trap_del(ocelot, port); + if (err) + return err; + + ocelot_mrp_del_vcap(ocelot, OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port)); + + for (i = 0; i < ocelot->num_phys_ports; ++i) { + ocelot_port = ocelot->ports[i]; + + if (!ocelot_port) + continue; + + if (ocelot_port->mrp_ring_id != 0) + goto out; + } + + ocelot_mrp_del_mac(ocelot, ocelot->ports[port]); +out: + return 0; +} +EXPORT_SYMBOL(ocelot_mrp_del_ring_role); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c new file mode 100644 index 0000000000..21a87a3fc5 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -0,0 +1,1868 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Microsemi Ocelot Switch driver + * + * This contains glue logic between the switchdev driver operations and the + * mscc_ocelot_switch_lib. + * + * Copyright (c) 2017, 2019 Microsemi Corporation + * Copyright 2020-2021 NXP + */ + +#include <linux/dsa/ocelot.h> +#include <linux/if_bridge.h> +#include <linux/of_net.h> +#include <linux/phy/phy.h> +#include <net/pkt_cls.h> +#include "ocelot.h" +#include "ocelot_police.h" +#include "ocelot_vcap.h" +#include "ocelot_fdma.h" + +#define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP + +struct ocelot_dump_ctx { + struct net_device *dev; + struct sk_buff *skb; + struct netlink_callback *cb; + int idx; +}; + +static bool ocelot_netdevice_dev_check(const struct net_device *dev); + +static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp) +{ + return devlink_priv(dlp->devlink); +} + +static int devlink_port_to_port(struct devlink_port *dlp) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + + return dlp - ocelot->devlink_ports; +} + +static int ocelot_devlink_sb_pool_get(struct devlink *dl, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + struct ocelot *ocelot = devlink_priv(dl); + + return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); +} + +static int ocelot_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = devlink_priv(dl); + + return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, + threshold_type, extack); +} + +static int ocelot_devlink_sb_port_pool_get(struct devlink_port *dlp, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, + p_threshold); +} + +static int ocelot_devlink_sb_port_pool_set(struct devlink_port *dlp, + unsigned int sb_index, u16 pool_index, + u32 threshold, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, + threshold, extack); +} + +static int +ocelot_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, + pool_type, p_pool_index, + p_threshold); +} + +static int +ocelot_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, + pool_type, pool_index, threshold, + extack); +} + +static int ocelot_devlink_sb_occ_snapshot(struct devlink *dl, + unsigned int sb_index) +{ + struct ocelot *ocelot = devlink_priv(dl); + + return ocelot_sb_occ_snapshot(ocelot, sb_index); +} + +static int ocelot_devlink_sb_occ_max_clear(struct devlink *dl, + unsigned int sb_index) +{ + struct ocelot *ocelot = devlink_priv(dl); + + return ocelot_sb_occ_max_clear(ocelot, sb_index); +} + +static int ocelot_devlink_sb_occ_port_pool_get(struct devlink_port *dlp, + unsigned int sb_index, + u16 pool_index, u32 *p_cur, + u32 *p_max) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, + p_cur, p_max); +} + +static int +ocelot_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, + tc_index, pool_type, + p_cur, p_max); +} + +const struct devlink_ops ocelot_devlink_ops = { + .sb_pool_get = ocelot_devlink_sb_pool_get, + .sb_pool_set = ocelot_devlink_sb_pool_set, + .sb_port_pool_get = ocelot_devlink_sb_port_pool_get, + .sb_port_pool_set = ocelot_devlink_sb_port_pool_set, + .sb_tc_pool_bind_get = ocelot_devlink_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = ocelot_devlink_sb_tc_pool_bind_set, + .sb_occ_snapshot = ocelot_devlink_sb_occ_snapshot, + .sb_occ_max_clear = ocelot_devlink_sb_occ_max_clear, + .sb_occ_port_pool_get = ocelot_devlink_sb_occ_port_pool_get, + .sb_occ_tc_port_bind_get = ocelot_devlink_sb_occ_tc_port_bind_get, +}; + +int ocelot_port_devlink_init(struct ocelot *ocelot, int port, + enum devlink_port_flavour flavour) +{ + struct devlink_port *dlp = &ocelot->devlink_ports[port]; + int id_len = sizeof(ocelot->base_mac); + struct devlink *dl = ocelot->devlink; + struct devlink_port_attrs attrs = {}; + + memset(dlp, 0, sizeof(*dlp)); + memcpy(attrs.switch_id.id, &ocelot->base_mac, id_len); + attrs.switch_id.id_len = id_len; + attrs.phys.port_number = port; + attrs.flavour = flavour; + + devlink_port_attrs_set(dlp, &attrs); + + return devlink_port_register(dl, dlp, port); +} + +void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port) +{ + struct devlink_port *dlp = &ocelot->devlink_ports[port]; + + devlink_port_unregister(dlp); +} + +int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv, + struct flow_cls_offload *f, + bool ingress) +{ + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + if (!ingress) + return -EOPNOTSUPP; + + switch (f->command) { + case FLOW_CLS_REPLACE: + return ocelot_cls_flower_replace(ocelot, port, f, ingress); + case FLOW_CLS_DESTROY: + return ocelot_cls_flower_destroy(ocelot, port, f, ingress); + case FLOW_CLS_STATS: + return ocelot_cls_flower_stats(ocelot, port, f, ingress); + default: + return -EOPNOTSUPP; + } +} + +static int ocelot_setup_tc_cls_matchall_police(struct ocelot_port_private *priv, + struct tc_cls_matchall_offload *f, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct flow_action_entry *action = &f->rule->action.entries[0]; + struct ocelot *ocelot = priv->port.ocelot; + struct ocelot_policer pol = { 0 }; + int port = priv->port.index; + int err; + + if (!ingress) { + NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported"); + return -EOPNOTSUPP; + } + + if (priv->tc.police_id && priv->tc.police_id != f->cookie) { + NL_SET_ERR_MSG_MOD(extack, + "Only one policer per port is supported"); + return -EEXIST; + } + + err = ocelot_policer_validate(&f->rule->action, action, extack); + if (err) + return err; + + pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8; + pol.burst = action->police.burst; + + err = ocelot_port_policer_add(ocelot, port, &pol); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Could not add policer"); + return err; + } + + priv->tc.police_id = f->cookie; + priv->tc.offload_cnt++; + + return 0; +} + +static int ocelot_setup_tc_cls_matchall_mirred(struct ocelot_port_private *priv, + struct tc_cls_matchall_offload *f, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct flow_action *action = &f->rule->action; + struct ocelot *ocelot = priv->port.ocelot; + struct ocelot_port_private *other_priv; + const struct flow_action_entry *a; + int err; + + if (f->common.protocol != htons(ETH_P_ALL)) + return -EOPNOTSUPP; + + if (!flow_action_basic_hw_stats_check(action, extack)) + return -EOPNOTSUPP; + + a = &action->entries[0]; + if (!a->dev) + return -EINVAL; + + if (!ocelot_netdevice_dev_check(a->dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Destination not an ocelot port"); + return -EOPNOTSUPP; + } + + other_priv = netdev_priv(a->dev); + + err = ocelot_port_mirror_add(ocelot, priv->port.index, + other_priv->port.index, ingress, extack); + if (err) + return err; + + if (ingress) + priv->tc.ingress_mirred_id = f->cookie; + else + priv->tc.egress_mirred_id = f->cookie; + priv->tc.offload_cnt++; + + return 0; +} + +static int ocelot_del_tc_cls_matchall_police(struct ocelot_port_private *priv, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + int err; + + err = ocelot_port_policer_del(ocelot, port); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Could not delete policer"); + return err; + } + + priv->tc.police_id = 0; + priv->tc.offload_cnt--; + + return 0; +} + +static int ocelot_del_tc_cls_matchall_mirred(struct ocelot_port_private *priv, + bool ingress, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + ocelot_port_mirror_del(ocelot, port, ingress); + + if (ingress) + priv->tc.ingress_mirred_id = 0; + else + priv->tc.egress_mirred_id = 0; + priv->tc.offload_cnt--; + + return 0; +} + +static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + struct netlink_ext_ack *extack = f->common.extack; + struct flow_action_entry *action; + + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + if (!flow_offload_has_one_action(&f->rule->action)) { + NL_SET_ERR_MSG_MOD(extack, + "Only one action is supported"); + return -EOPNOTSUPP; + } + + if (priv->tc.block_shared) { + NL_SET_ERR_MSG_MOD(extack, + "Matchall offloads not supported on shared blocks"); + return -EOPNOTSUPP; + } + + action = &f->rule->action.entries[0]; + + switch (action->id) { + case FLOW_ACTION_POLICE: + return ocelot_setup_tc_cls_matchall_police(priv, f, + ingress, + extack); + break; + case FLOW_ACTION_MIRRED: + return ocelot_setup_tc_cls_matchall_mirred(priv, f, + ingress, + extack); + default: + NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); + return -EOPNOTSUPP; + } + + break; + case TC_CLSMATCHALL_DESTROY: + action = &f->rule->action.entries[0]; + + if (f->cookie == priv->tc.police_id) + return ocelot_del_tc_cls_matchall_police(priv, extack); + else if (f->cookie == priv->tc.ingress_mirred_id || + f->cookie == priv->tc.egress_mirred_id) + return ocelot_del_tc_cls_matchall_mirred(priv, ingress, + extack); + else + return -ENOENT; + + break; + case TC_CLSMATCHALL_STATS: + default: + return -EOPNOTSUPP; + } +} + +static int ocelot_setup_tc_block_cb(enum tc_setup_type type, + void *type_data, + void *cb_priv, bool ingress) +{ + struct ocelot_port_private *priv = cb_priv; + + if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return ocelot_setup_tc_cls_matchall(priv, type_data, ingress); + case TC_SETUP_CLSFLOWER: + return ocelot_setup_tc_cls_flower(priv, type_data, ingress); + default: + return -EOPNOTSUPP; + } +} + +static int ocelot_setup_tc_block_cb_ig(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + return ocelot_setup_tc_block_cb(type, type_data, + cb_priv, true); +} + +static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + return ocelot_setup_tc_block_cb(type, type_data, + cb_priv, false); +} + +static LIST_HEAD(ocelot_block_cb_list); + +static int ocelot_setup_tc_block(struct ocelot_port_private *priv, + struct flow_block_offload *f) +{ + struct flow_block_cb *block_cb; + flow_setup_cb_t *cb; + + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { + cb = ocelot_setup_tc_block_cb_ig; + priv->tc.block_shared = f->block_shared; + } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { + cb = ocelot_setup_tc_block_cb_eg; + } else { + return -EOPNOTSUPP; + } + + f->driver_block_list = &ocelot_block_cb_list; + + switch (f->command) { + case FLOW_BLOCK_BIND: + if (flow_block_cb_is_busy(cb, priv, &ocelot_block_cb_list)) + return -EBUSY; + + block_cb = flow_block_cb_alloc(cb, priv, priv, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, f->driver_block_list); + return 0; + case FLOW_BLOCK_UNBIND: + block_cb = flow_block_cb_lookup(f->block, cb, priv); + if (!block_cb) + return -ENOENT; + + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + + switch (type) { + case TC_SETUP_BLOCK: + return ocelot_setup_tc_block(priv, type_data); + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, + bool untagged) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->port.index; + int ret; + + ret = ocelot_vlan_add(ocelot, port, vid, pvid, untagged); + if (ret) + return ret; + + /* Add the port MAC address to with the right VLAN information */ + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid, + ENTRYTYPE_LOCKED); + + return 0; +} + +static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + int ret; + + /* 8021q removes VID 0 on module unload for all interfaces + * with VLAN filtering feature. We need to keep it to receive + * untagged traffic. + */ + if (vid == OCELOT_STANDALONE_PVID) + return 0; + + ret = ocelot_vlan_del(ocelot, port, vid); + if (ret) + return ret; + + /* Del the port MAC address to with the right VLAN information */ + ocelot_mact_forget(ocelot, dev->dev_addr, vid); + + return 0; +} + +static int ocelot_port_open(struct net_device *dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + + phylink_start(priv->phylink); + + return 0; +} + +static int ocelot_port_stop(struct net_device *dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + + phylink_stop(priv->phylink); + + return 0; +} + +static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->port.index; + u32 rew_op = 0; + + if (!static_branch_unlikely(&ocelot_fdma_enabled) && + !ocelot_can_inject(ocelot, 0)) + return NETDEV_TX_BUSY; + + /* Check if timestamping is needed */ + if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + struct sk_buff *clone = NULL; + + if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { + kfree_skb(skb); + return NETDEV_TX_OK; + } + + if (clone) + OCELOT_SKB_CB(skb)->clone = clone; + + rew_op = ocelot_ptp_rew_op(skb); + } + + if (static_branch_unlikely(&ocelot_fdma_enabled)) { + ocelot_fdma_inject_frame(ocelot, port, rew_op, skb, dev); + } else { + ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); + + consume_skb(skb); + } + + return NETDEV_TX_OK; +} + +enum ocelot_action_type { + OCELOT_MACT_LEARN, + OCELOT_MACT_FORGET, +}; + +struct ocelot_mact_work_ctx { + struct work_struct work; + struct ocelot *ocelot; + enum ocelot_action_type type; + union { + /* OCELOT_MACT_LEARN */ + struct { + unsigned char addr[ETH_ALEN]; + u16 vid; + enum macaccess_entry_type entry_type; + int pgid; + } learn; + /* OCELOT_MACT_FORGET */ + struct { + unsigned char addr[ETH_ALEN]; + u16 vid; + } forget; + }; +}; + +#define ocelot_work_to_ctx(x) \ + container_of((x), struct ocelot_mact_work_ctx, work) + +static void ocelot_mact_work(struct work_struct *work) +{ + struct ocelot_mact_work_ctx *w = ocelot_work_to_ctx(work); + struct ocelot *ocelot = w->ocelot; + + switch (w->type) { + case OCELOT_MACT_LEARN: + ocelot_mact_learn(ocelot, w->learn.pgid, w->learn.addr, + w->learn.vid, w->learn.entry_type); + break; + case OCELOT_MACT_FORGET: + ocelot_mact_forget(ocelot, w->forget.addr, w->forget.vid); + break; + default: + break; + } + + kfree(w); +} + +static int ocelot_enqueue_mact_action(struct ocelot *ocelot, + const struct ocelot_mact_work_ctx *ctx) +{ + struct ocelot_mact_work_ctx *w = kmemdup(ctx, sizeof(*w), GFP_ATOMIC); + + if (!w) + return -ENOMEM; + + w->ocelot = ocelot; + INIT_WORK(&w->work, ocelot_mact_work); + queue_work(ocelot->owq, &w->work); + + return 0; +} + +static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_mact_work_ctx w; + + ether_addr_copy(w.forget.addr, addr); + w.forget.vid = OCELOT_STANDALONE_PVID; + w.type = OCELOT_MACT_FORGET; + + return ocelot_enqueue_mact_action(ocelot, &w); +} + +static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_mact_work_ctx w; + + ether_addr_copy(w.learn.addr, addr); + w.learn.vid = OCELOT_STANDALONE_PVID; + w.learn.pgid = PGID_CPU; + w.learn.entry_type = ENTRYTYPE_LOCKED; + w.type = OCELOT_MACT_LEARN; + + return ocelot_enqueue_mact_action(ocelot, &w); +} + +static void ocelot_set_rx_mode(struct net_device *dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + u32 val; + int i; + + /* This doesn't handle promiscuous mode because the bridge core is + * setting IFF_PROMISC on all slave interfaces and all frames would be + * forwarded to the CPU port. + */ + val = GENMASK(ocelot->num_phys_ports - 1, 0); + for_each_nonreserved_multicast_dest_pgid(ocelot, i) + ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); + + __dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync); +} + +static int ocelot_port_set_mac_address(struct net_device *dev, void *p) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + const struct sockaddr *addr = p; + + /* Learn the new net device MAC address in the mac table. */ + ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, + OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED); + /* Then forget the previous one. */ + ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_STANDALONE_PVID); + + eth_hw_addr_set(dev, addr->sa_data); + return 0; +} + +static void ocelot_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + return ocelot_port_get_stats64(ocelot, port, stats); +} + +static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, u16 flags, + struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->port.index; + + return ocelot_fdb_add(ocelot, port, addr, vid, ocelot_port->bridge); +} + +static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->port.index; + + return ocelot_fdb_del(ocelot, port, addr, vid, ocelot_port->bridge); +} + +static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, + bool is_static, void *data) +{ + struct ocelot_dump_ctx *dump = data; + u32 portid = NETLINK_CB(dump->cb->skb).portid; + u32 seq = dump->cb->nlh->nlmsg_seq; + struct nlmsghdr *nlh; + struct ndmsg *ndm; + + if (dump->idx < dump->cb->args[2]) + goto skip; + + nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, + sizeof(*ndm), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + ndm = nlmsg_data(nlh); + ndm->ndm_family = AF_BRIDGE; + ndm->ndm_pad1 = 0; + ndm->ndm_pad2 = 0; + ndm->ndm_flags = NTF_SELF; + ndm->ndm_type = 0; + ndm->ndm_ifindex = dump->dev->ifindex; + ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; + + if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) + goto nla_put_failure; + + if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) + goto nla_put_failure; + + nlmsg_end(dump->skb, nlh); + +skip: + dump->idx++; + return 0; + +nla_put_failure: + nlmsg_cancel(dump->skb, nlh); + return -EMSGSIZE; +} + +static int ocelot_port_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, int *idx) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + struct ocelot_dump_ctx dump = { + .dev = dev, + .skb = skb, + .cb = cb, + .idx = *idx, + }; + int port = priv->port.index; + int ret; + + ret = ocelot_fdb_dump(ocelot, port, ocelot_port_fdb_do_dump, &dump); + + *idx = dump.idx; + + return ret; +} + +static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + return ocelot_vlan_vid_add(dev, vid, false, false); +} + +static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + return ocelot_vlan_vid_del(dev, vid); +} + +static void ocelot_vlan_mode(struct ocelot *ocelot, int port, + netdev_features_t features) +{ + u32 val; + + /* Filtering */ + val = ocelot_read(ocelot, ANA_VLANMASK); + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + val |= BIT(port); + else + val &= ~BIT(port); + ocelot_write(ocelot, val, ANA_VLANMASK); +} + +static int ocelot_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && + priv->tc.offload_cnt) { + netdev_err(dev, + "Cannot disable HW TC offload while offloads active\n"); + return -EBUSY; + } + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) + ocelot_vlan_mode(ocelot, port, features); + + return 0; +} + +static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + /* If the attached PHY device isn't capable of timestamping operations, + * use our own (when possible). + */ + if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) { + switch (cmd) { + case SIOCSHWTSTAMP: + return ocelot_hwstamp_set(ocelot, port, ifr); + case SIOCGHWTSTAMP: + return ocelot_hwstamp_get(ocelot, port, ifr); + } + } + + return phy_mii_ioctl(dev->phydev, ifr, cmd); +} + +static int ocelot_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + + ocelot_port_set_maxlen(ocelot, priv->port.index, new_mtu); + WRITE_ONCE(dev->mtu, new_mtu); + + return 0; +} + +static const struct net_device_ops ocelot_port_netdev_ops = { + .ndo_open = ocelot_port_open, + .ndo_stop = ocelot_port_stop, + .ndo_start_xmit = ocelot_port_xmit, + .ndo_change_mtu = ocelot_change_mtu, + .ndo_set_rx_mode = ocelot_set_rx_mode, + .ndo_set_mac_address = ocelot_port_set_mac_address, + .ndo_get_stats64 = ocelot_get_stats64, + .ndo_fdb_add = ocelot_port_fdb_add, + .ndo_fdb_del = ocelot_port_fdb_del, + .ndo_fdb_dump = ocelot_port_fdb_dump, + .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid, + .ndo_set_features = ocelot_set_features, + .ndo_setup_tc = ocelot_setup_tc, + .ndo_eth_ioctl = ocelot_ioctl, +}; + +struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_port_private *priv; + + if (!ocelot_port) + return NULL; + + priv = container_of(ocelot_port, struct ocelot_port_private, port); + + return priv->dev; +} + +/* Checks if the net_device instance given to us originates from our driver */ +static bool ocelot_netdevice_dev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &ocelot_port_netdev_ops; +} + +int ocelot_netdev_to_port(struct net_device *dev) +{ + struct ocelot_port_private *priv; + + if (!dev || !ocelot_netdevice_dev_check(dev)) + return -EINVAL; + + priv = netdev_priv(dev); + + return priv->port.index; +} + +static void ocelot_port_get_strings(struct net_device *netdev, u32 sset, + u8 *data) +{ + struct ocelot_port_private *priv = netdev_priv(netdev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + ocelot_get_strings(ocelot, port, sset, data); +} + +static void ocelot_port_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + ocelot_get_ethtool_stats(ocelot, port, data); +} + +static int ocelot_port_get_sset_count(struct net_device *dev, int sset) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + return ocelot_get_sset_count(ocelot, port, sset); +} + +static int ocelot_port_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + if (!ocelot->ptp) + return ethtool_op_get_ts_info(dev, info); + + return ocelot_get_ts_info(ocelot, port, info); +} + +static const struct ethtool_ops ocelot_ethtool_ops = { + .get_strings = ocelot_port_get_strings, + .get_ethtool_stats = ocelot_port_get_ethtool_stats, + .get_sset_count = ocelot_port_get_sset_count, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ts_info = ocelot_port_get_ts_info, +}; + +static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port, + u8 state) +{ + ocelot_bridge_stp_state_set(ocelot, port, state); +} + +static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port, + unsigned long ageing_clock_t) +{ + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); + u32 ageing_time = jiffies_to_msecs(ageing_jiffies); + + ocelot_set_ageing_time(ocelot, ageing_time); +} + +static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc) +{ + u32 cpu_fwd_mcast = ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA; + u32 val = 0; + + if (mc) + val = cpu_fwd_mcast; + + ocelot_rmw_gix(ocelot, val, cpu_fwd_mcast, + ANA_PORT_CPU_FWD_CFG, port); +} + +static int ocelot_port_attr_set(struct net_device *dev, const void *ctx, + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + int err = 0; + + if (ctx && ctx != priv) + return 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + ocelot_port_attr_stp_state_set(ocelot, port, attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + ocelot_port_vlan_filtering(ocelot, port, attr->u.vlan_filtering, + extack); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: + ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled); + break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + err = ocelot_port_pre_bridge_flags(ocelot, port, + attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + ocelot_port_bridge_flags(ocelot, port, attr->u.brport_flags); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int ocelot_vlan_vid_prepare(struct net_device *dev, u16 vid, bool pvid, + bool untagged, struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->port.index; + + return ocelot_vlan_prepare(ocelot, port, vid, pvid, untagged, extack); +} + +static int ocelot_port_obj_add_vlan(struct net_device *dev, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + int ret; + + ret = ocelot_vlan_vid_prepare(dev, vlan->vid, pvid, untagged, extack); + if (ret) + return ret; + + return ocelot_vlan_vid_add(dev, vlan->vid, pvid, untagged); +} + +static int ocelot_port_obj_add_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->port.index; + + return ocelot_port_mdb_add(ocelot, port, mdb, ocelot_port->bridge); +} + +static int ocelot_port_obj_del_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->port.index; + + return ocelot_port_mdb_del(ocelot, port, mdb, ocelot_port->bridge); +} + +static int ocelot_port_obj_mrp_add(struct net_device *dev, + const struct switchdev_obj_mrp *mrp) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->port.index; + + return ocelot_mrp_add(ocelot, port, mrp); +} + +static int ocelot_port_obj_mrp_del(struct net_device *dev, + const struct switchdev_obj_mrp *mrp) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->port.index; + + return ocelot_mrp_del(ocelot, port, mrp); +} + +static int +ocelot_port_obj_mrp_add_ring_role(struct net_device *dev, + const struct switchdev_obj_ring_role_mrp *mrp) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->port.index; + + return ocelot_mrp_add_ring_role(ocelot, port, mrp); +} + +static int +ocelot_port_obj_mrp_del_ring_role(struct net_device *dev, + const struct switchdev_obj_ring_role_mrp *mrp) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->port.index; + + return ocelot_mrp_del_ring_role(ocelot, port, mrp); +} + +static int ocelot_port_obj_add(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + int ret = 0; + + if (ctx && ctx != priv) + return 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + ret = ocelot_port_obj_add_vlan(dev, + SWITCHDEV_OBJ_PORT_VLAN(obj), + extack); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + case SWITCHDEV_OBJ_ID_MRP: + ret = ocelot_port_obj_mrp_add(dev, SWITCHDEV_OBJ_MRP(obj)); + break; + case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: + ret = ocelot_port_obj_mrp_add_ring_role(dev, + SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int ocelot_port_obj_del(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + int ret = 0; + + if (ctx && ctx != priv) + return 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + ret = ocelot_vlan_vid_del(dev, + SWITCHDEV_OBJ_PORT_VLAN(obj)->vid); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + case SWITCHDEV_OBJ_ID_MRP: + ret = ocelot_port_obj_mrp_del(dev, SWITCHDEV_OBJ_MRP(obj)); + break; + case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: + ret = ocelot_port_obj_mrp_del_ring_role(dev, + SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static void ocelot_inherit_brport_flags(struct ocelot *ocelot, int port, + struct net_device *brport_dev) +{ + struct switchdev_brport_flags flags = {0}; + int flag; + + flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + + for_each_set_bit(flag, &flags.mask, 32) + if (br_port_flag_is_set(brport_dev, BIT(flag))) + flags.val |= BIT(flag); + + ocelot_port_bridge_flags(ocelot, port, flags); +} + +static void ocelot_clear_brport_flags(struct ocelot *ocelot, int port) +{ + struct switchdev_brport_flags flags; + + flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + flags.val = flags.mask & ~BR_LEARNING; + + ocelot_port_bridge_flags(ocelot, port, flags); +} + +static int ocelot_switchdev_sync(struct ocelot *ocelot, int port, + struct net_device *brport_dev, + struct net_device *bridge_dev, + struct netlink_ext_ack *extack) +{ + clock_t ageing_time; + u8 stp_state; + + ocelot_inherit_brport_flags(ocelot, port, brport_dev); + + stp_state = br_port_get_stp_state(brport_dev); + ocelot_bridge_stp_state_set(ocelot, port, stp_state); + + ageing_time = br_get_ageing_time(bridge_dev); + ocelot_port_attr_ageing_set(ocelot, port, ageing_time); + + return ocelot_port_vlan_filtering(ocelot, port, + br_vlan_enabled(bridge_dev), + extack); +} + +static int ocelot_switchdev_unsync(struct ocelot *ocelot, int port) +{ + int err; + + err = ocelot_port_vlan_filtering(ocelot, port, false, NULL); + if (err) + return err; + + ocelot_clear_brport_flags(ocelot, port); + + ocelot_bridge_stp_state_set(ocelot, port, BR_STATE_FORWARDING); + + return 0; +} + +static int ocelot_bridge_num_get(struct ocelot *ocelot, + const struct net_device *bridge_dev) +{ + int bridge_num = ocelot_bridge_num_find(ocelot, bridge_dev); + + if (bridge_num < 0) { + /* First port that offloads this bridge */ + bridge_num = find_first_zero_bit(&ocelot->bridges, + ocelot->num_phys_ports); + + set_bit(bridge_num, &ocelot->bridges); + } + + return bridge_num; +} + +static void ocelot_bridge_num_put(struct ocelot *ocelot, + const struct net_device *bridge_dev, + int bridge_num) +{ + /* Check if the bridge is still in use, otherwise it is time + * to clean it up so we can reuse this bridge_num later. + */ + if (!ocelot_bridge_num_find(ocelot, bridge_dev)) + clear_bit(bridge_num, &ocelot->bridges); +} + +static int ocelot_netdevice_bridge_join(struct net_device *dev, + struct net_device *brport_dev, + struct net_device *bridge, + struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->port.index; + int bridge_num, err; + + bridge_num = ocelot_bridge_num_get(ocelot, bridge); + + err = ocelot_port_bridge_join(ocelot, port, bridge, bridge_num, + extack); + if (err) + goto err_join; + + err = switchdev_bridge_port_offload(brport_dev, dev, priv, + &ocelot_switchdev_nb, + &ocelot_switchdev_blocking_nb, + false, extack); + if (err) + goto err_switchdev_offload; + + err = ocelot_switchdev_sync(ocelot, port, brport_dev, bridge, extack); + if (err) + goto err_switchdev_sync; + + return 0; + +err_switchdev_sync: + switchdev_bridge_port_unoffload(brport_dev, priv, + &ocelot_switchdev_nb, + &ocelot_switchdev_blocking_nb); +err_switchdev_offload: + ocelot_port_bridge_leave(ocelot, port, bridge); +err_join: + ocelot_bridge_num_put(ocelot, bridge, bridge_num); + return err; +} + +static void ocelot_netdevice_pre_bridge_leave(struct net_device *dev, + struct net_device *brport_dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + + switchdev_bridge_port_unoffload(brport_dev, priv, + &ocelot_switchdev_nb, + &ocelot_switchdev_blocking_nb); +} + +static int ocelot_netdevice_bridge_leave(struct net_device *dev, + struct net_device *brport_dev, + struct net_device *bridge) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int bridge_num = ocelot_port->bridge_num; + int port = priv->port.index; + int err; + + err = ocelot_switchdev_unsync(ocelot, port); + if (err) + return err; + + ocelot_port_bridge_leave(ocelot, port, bridge); + ocelot_bridge_num_put(ocelot, bridge, bridge_num); + + return 0; +} + +static int ocelot_netdevice_lag_join(struct net_device *dev, + struct net_device *bond, + struct netdev_lag_upper_info *info, + struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + struct net_device *bridge_dev; + int port = priv->port.index; + int err; + + err = ocelot_port_lag_join(ocelot, port, bond, info, extack); + if (err == -EOPNOTSUPP) + /* Offloading not supported, fall back to software LAG */ + return 0; + + bridge_dev = netdev_master_upper_dev_get(bond); + if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) + return 0; + + err = ocelot_netdevice_bridge_join(dev, bond, bridge_dev, extack); + if (err) + goto err_bridge_join; + + return 0; + +err_bridge_join: + ocelot_port_lag_leave(ocelot, port, bond); + return err; +} + +static void ocelot_netdevice_pre_lag_leave(struct net_device *dev, + struct net_device *bond) +{ + struct net_device *bridge_dev; + + bridge_dev = netdev_master_upper_dev_get(bond); + if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) + return; + + ocelot_netdevice_pre_bridge_leave(dev, bond); +} + +static int ocelot_netdevice_lag_leave(struct net_device *dev, + struct net_device *bond) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + struct net_device *bridge_dev; + int port = priv->port.index; + + ocelot_port_lag_leave(ocelot, port, bond); + + bridge_dev = netdev_master_upper_dev_get(bond); + if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) + return 0; + + return ocelot_netdevice_bridge_leave(dev, bond, bridge_dev); +} + +static int ocelot_netdevice_changeupper(struct net_device *dev, + struct net_device *brport_dev, + struct netdev_notifier_changeupper_info *info) +{ + struct netlink_ext_ack *extack; + int err = 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) + err = ocelot_netdevice_bridge_join(dev, brport_dev, + info->upper_dev, + extack); + else + err = ocelot_netdevice_bridge_leave(dev, brport_dev, + info->upper_dev); + } + if (netif_is_lag_master(info->upper_dev)) { + if (info->linking) + err = ocelot_netdevice_lag_join(dev, info->upper_dev, + info->upper_info, extack); + else + ocelot_netdevice_lag_leave(dev, info->upper_dev); + } + + return notifier_from_errno(err); +} + +/* Treat CHANGEUPPER events on an offloaded LAG as individual CHANGEUPPER + * events for the lower physical ports of the LAG. + * If the LAG upper isn't offloaded, ignore its CHANGEUPPER events. + * In case the LAG joined a bridge, notify that we are offloading it and can do + * forwarding in hardware towards it. + */ +static int +ocelot_netdevice_lag_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *lower; + struct list_head *iter; + int err = NOTIFY_DONE; + + netdev_for_each_lower_dev(dev, lower, iter) { + struct ocelot_port_private *priv = netdev_priv(lower); + struct ocelot_port *ocelot_port = &priv->port; + + if (ocelot_port->bond != dev) + return NOTIFY_OK; + + err = ocelot_netdevice_changeupper(lower, dev, info); + if (err) + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +static int +ocelot_netdevice_prechangeupper(struct net_device *dev, + struct net_device *brport_dev, + struct netdev_notifier_changeupper_info *info) +{ + if (netif_is_bridge_master(info->upper_dev) && !info->linking) + ocelot_netdevice_pre_bridge_leave(dev, brport_dev); + + if (netif_is_lag_master(info->upper_dev) && !info->linking) + ocelot_netdevice_pre_lag_leave(dev, info->upper_dev); + + return NOTIFY_DONE; +} + +static int +ocelot_netdevice_lag_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *lower; + struct list_head *iter; + int err = NOTIFY_DONE; + + netdev_for_each_lower_dev(dev, lower, iter) { + struct ocelot_port_private *priv = netdev_priv(lower); + struct ocelot_port *ocelot_port = &priv->port; + + if (ocelot_port->bond != dev) + return NOTIFY_OK; + + err = ocelot_netdevice_prechangeupper(dev, lower, info); + if (err) + return err; + } + + return NOTIFY_DONE; +} + +static int +ocelot_netdevice_changelowerstate(struct net_device *dev, + struct netdev_lag_lower_state_info *info) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + bool is_active = info->link_up && info->tx_enabled; + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->port.index; + + if (!ocelot_port->bond) + return NOTIFY_DONE; + + if (ocelot_port->lag_tx_active == is_active) + return NOTIFY_DONE; + + ocelot_port_lag_change(ocelot, port, is_active); + + return NOTIFY_OK; +} + +static int ocelot_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + switch (event) { + case NETDEV_PRECHANGEUPPER: { + struct netdev_notifier_changeupper_info *info = ptr; + + if (ocelot_netdevice_dev_check(dev)) + return ocelot_netdevice_prechangeupper(dev, dev, info); + + if (netif_is_lag_master(dev)) + return ocelot_netdevice_lag_prechangeupper(dev, info); + + break; + } + case NETDEV_CHANGEUPPER: { + struct netdev_notifier_changeupper_info *info = ptr; + + if (ocelot_netdevice_dev_check(dev)) + return ocelot_netdevice_changeupper(dev, dev, info); + + if (netif_is_lag_master(dev)) + return ocelot_netdevice_lag_changeupper(dev, info); + + break; + } + case NETDEV_CHANGELOWERSTATE: { + struct netdev_notifier_changelowerstate_info *info = ptr; + + if (!ocelot_netdevice_dev_check(dev)) + break; + + return ocelot_netdevice_changelowerstate(dev, + info->lower_state_info); + } + default: + break; + } + + return NOTIFY_DONE; +} + +struct notifier_block ocelot_netdevice_nb __read_mostly = { + .notifier_call = ocelot_netdevice_event, +}; + +static int ocelot_switchdev_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + ocelot_netdevice_dev_check, + ocelot_port_attr_set); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +struct notifier_block ocelot_switchdev_nb __read_mostly = { + .notifier_call = ocelot_switchdev_event, +}; + +static int ocelot_switchdev_blocking_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + /* Blocking events. */ + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add(dev, ptr, + ocelot_netdevice_dev_check, + ocelot_port_obj_add); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del(dev, ptr, + ocelot_netdevice_dev_check, + ocelot_port_obj_del); + return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + ocelot_netdevice_dev_check, + ocelot_port_attr_set); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = { + .notifier_call = ocelot_switchdev_blocking_event, +}; + +static void vsc7514_phylink_mac_config(struct phylink_config *config, + unsigned int link_an_mode, + const struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ocelot_port_private *priv = netdev_priv(ndev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + ocelot_phylink_mac_config(ocelot, port, link_an_mode, state); +} + +static void vsc7514_phylink_mac_link_down(struct phylink_config *config, + unsigned int link_an_mode, + phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ocelot_port_private *priv = netdev_priv(ndev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, + OCELOT_MAC_QUIRKS); +} + +static void vsc7514_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ocelot_port_private *priv = netdev_priv(ndev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, + interface, speed, duplex, + tx_pause, rx_pause, OCELOT_MAC_QUIRKS); +} + +static const struct phylink_mac_ops ocelot_phylink_ops = { + .mac_config = vsc7514_phylink_mac_config, + .mac_link_down = vsc7514_phylink_mac_link_down, + .mac_link_up = vsc7514_phylink_mac_link_up, +}; + +static int ocelot_port_phylink_create(struct ocelot *ocelot, int port, + struct device_node *portnp) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_port_private *priv; + struct device *dev = ocelot->dev; + phy_interface_t phy_mode; + struct phylink *phylink; + int err; + + of_get_phy_mode(portnp, &phy_mode); + /* DT bindings of internal PHY ports are broken and don't + * specify a phy-mode + */ + if (phy_mode == PHY_INTERFACE_MODE_NA) + phy_mode = PHY_INTERFACE_MODE_INTERNAL; + + if (phy_mode != PHY_INTERFACE_MODE_SGMII && + phy_mode != PHY_INTERFACE_MODE_QSGMII && + phy_mode != PHY_INTERFACE_MODE_INTERNAL) { + dev_err(dev, "unsupported phy mode %s for port %d\n", + phy_modes(phy_mode), port); + return -EINVAL; + } + + ocelot_port->phy_mode = phy_mode; + + err = ocelot_port_configure_serdes(ocelot, port, portnp); + if (err) + return err; + + priv = container_of(ocelot_port, struct ocelot_port_private, port); + + priv->phylink_config.dev = &priv->dev->dev; + priv->phylink_config.type = PHYLINK_NETDEV; + priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; + + __set_bit(ocelot_port->phy_mode, + priv->phylink_config.supported_interfaces); + + phylink = phylink_create(&priv->phylink_config, + of_fwnode_handle(portnp), + phy_mode, &ocelot_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + dev_err(dev, "Could not create phylink (%pe)\n", phylink); + return err; + } + + priv->phylink = phylink; + + err = phylink_of_phy_connect(phylink, portnp, 0); + if (err) { + dev_err(dev, "Could not connect to PHY: %pe\n", ERR_PTR(err)); + phylink_destroy(phylink); + priv->phylink = NULL; + return err; + } + + return 0; +} + +int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, + struct device_node *portnp) +{ + struct ocelot_port_private *priv; + struct ocelot_port *ocelot_port; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof(struct ocelot_port_private)); + if (!dev) + return -ENOMEM; + SET_NETDEV_DEV(dev, ocelot->dev); + priv = netdev_priv(dev); + priv->dev = dev; + ocelot_port = &priv->port; + ocelot_port->ocelot = ocelot; + ocelot_port->index = port; + ocelot_port->target = target; + ocelot->ports[port] = ocelot_port; + + dev->netdev_ops = &ocelot_port_netdev_ops; + dev->ethtool_ops = &ocelot_ethtool_ops; + dev->max_mtu = OCELOT_JUMBO_MTU; + + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS | + NETIF_F_HW_TC; + dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; + + err = of_get_ethdev_address(portnp, dev); + if (err) + eth_hw_addr_gen(dev, ocelot->base_mac, port); + + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, + OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED); + + ocelot_init_port(ocelot, port); + + err = ocelot_port_phylink_create(ocelot, port, portnp); + if (err) + goto out; + + if (ocelot->fdma) + ocelot_fdma_netdev_init(ocelot, dev); + + SET_NETDEV_DEVLINK_PORT(dev, &ocelot->devlink_ports[port]); + err = register_netdev(dev); + if (err) { + dev_err(ocelot->dev, "register_netdev failed\n"); + goto out_fdma_deinit; + } + + return 0; + +out_fdma_deinit: + if (ocelot->fdma) + ocelot_fdma_netdev_deinit(ocelot, dev); +out: + ocelot->ports[port] = NULL; + free_netdev(dev); + + return err; +} + +void ocelot_release_port(struct ocelot_port *ocelot_port) +{ + struct ocelot_port_private *priv = container_of(ocelot_port, + struct ocelot_port_private, + port); + struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_fdma *fdma = ocelot->fdma; + + unregister_netdev(priv->dev); + + if (fdma) + ocelot_fdma_netdev_deinit(ocelot, priv->dev); + + if (priv->phylink) { + rtnl_lock(); + phylink_disconnect_phy(priv->phylink); + rtnl_unlock(); + + phylink_destroy(priv->phylink); + } + + free_netdev(priv->dev); +} diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c new file mode 100644 index 0000000000..7e1f67be38 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_police.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Microsemi Ocelot Switch driver + * + * Copyright (c) 2019 Microsemi Corporation + */ + +#include <soc/mscc/ocelot.h> +#include "ocelot_police.h" + +/* Types for ANA:POL[0-192]:POL_MODE_CFG.FRM_MODE */ +#define POL_MODE_LINERATE 0 /* Incl IPG. Unit: 33 1/3 kbps, 4096 bytes */ +#define POL_MODE_DATARATE 1 /* Excl IPG. Unit: 33 1/3 kbps, 4096 bytes */ +#define POL_MODE_FRMRATE_HI 2 /* Unit: 33 1/3 fps, 32.8 frames */ +#define POL_MODE_FRMRATE_LO 3 /* Unit: 1/3 fps, 0.3 frames */ + +/* Policer indexes */ +#define POL_IX_PORT 0 /* 0-11 : Port policers */ +#define POL_IX_QUEUE 32 /* 32-127 : Queue policers */ + +/* Default policer order */ +#define POL_ORDER 0x1d3 /* Ocelot policer order: Serial (QoS -> Port -> VCAP) */ + +int qos_policer_conf_set(struct ocelot *ocelot, u32 pol_ix, + struct qos_policer_conf *conf) +{ + u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE; + u32 cir = 0, cbs = 0, pir = 0, pbs = 0; + bool cir_discard = 0, pir_discard = 0; + u32 pbs_max = 0, cbs_max = 0; + u8 ipg = 20; + u32 value; + + pir = conf->pir; + pbs = conf->pbs; + + switch (conf->mode) { + case MSCC_QOS_RATE_MODE_LINE: + case MSCC_QOS_RATE_MODE_DATA: + if (conf->mode == MSCC_QOS_RATE_MODE_LINE) { + frm_mode = POL_MODE_LINERATE; + ipg = min_t(u8, GENMASK(4, 0), conf->ipg); + } else { + frm_mode = POL_MODE_DATARATE; + } + if (conf->dlb) { + cir_ena = 1; + cir = conf->cir; + cbs = conf->cbs; + if (cir == 0 && cbs == 0) { + /* Discard cir frames */ + cir_discard = 1; + } else { + cir = DIV_ROUND_UP(cir, 100); + cir *= 3; /* 33 1/3 kbps */ + cbs = DIV_ROUND_UP(cbs, 4096); + cbs = (cbs ? cbs : 1); /* No zero burst size */ + cbs_max = 60; /* Limit burst size */ + cf = conf->cf; + if (cf) + pir += conf->cir; + } + } + if (pir == 0 && pbs == 0) { + /* Discard PIR frames */ + pir_discard = 1; + } else { + pir = DIV_ROUND_UP(pir, 100); + pir *= 3; /* 33 1/3 kbps */ + pbs = DIV_ROUND_UP(pbs, 4096); + pbs = (pbs ? pbs : 1); /* No zero burst size */ + pbs_max = 60; /* Limit burst size */ + } + break; + case MSCC_QOS_RATE_MODE_FRAME: + if (pir >= 100) { + frm_mode = POL_MODE_FRMRATE_HI; + pir = DIV_ROUND_UP(pir, 100); + pir *= 3; /* 33 1/3 fps */ + pbs = (pbs * 10) / 328; /* 32.8 frames */ + pbs = (pbs ? pbs : 1); /* No zero burst size */ + pbs_max = GENMASK(6, 0); /* Limit burst size */ + } else { + frm_mode = POL_MODE_FRMRATE_LO; + if (pir == 0 && pbs == 0) { + /* Discard all frames */ + pir_discard = 1; + cir_discard = 1; + } else { + pir *= 3; /* 1/3 fps */ + pbs = (pbs * 10) / 3; /* 0.3 frames */ + pbs = (pbs ? pbs : 1); /* No zero burst size */ + pbs_max = 61; /* Limit burst size */ + } + } + break; + default: /* MSCC_QOS_RATE_MODE_DISABLED */ + /* Disable policer using maximum rate and zero burst */ + pir = GENMASK(15, 0); + pbs = 0; + break; + } + + /* Check limits */ + if (pir > GENMASK(15, 0)) { + dev_err(ocelot->dev, + "Invalid pir for policer %u: %u (max %lu)\n", + pol_ix, pir, GENMASK(15, 0)); + return -EINVAL; + } + + if (cir > GENMASK(15, 0)) { + dev_err(ocelot->dev, + "Invalid cir for policer %u: %u (max %lu)\n", + pol_ix, cir, GENMASK(15, 0)); + return -EINVAL; + } + + if (pbs > pbs_max) { + dev_err(ocelot->dev, + "Invalid pbs for policer %u: %u (max %u)\n", + pol_ix, pbs, pbs_max); + return -EINVAL; + } + + if (cbs > cbs_max) { + dev_err(ocelot->dev, + "Invalid cbs for policer %u: %u (max %u)\n", + pol_ix, cbs, cbs_max); + return -EINVAL; + } + + value = (ANA_POL_MODE_CFG_IPG_SIZE(ipg) | + ANA_POL_MODE_CFG_FRM_MODE(frm_mode) | + (cf ? ANA_POL_MODE_CFG_DLB_COUPLED : 0) | + (cir_ena ? ANA_POL_MODE_CFG_CIR_ENA : 0) | + ANA_POL_MODE_CFG_OVERSHOOT_ENA); + + ocelot_write_gix(ocelot, value, ANA_POL_MODE_CFG, pol_ix); + + ocelot_write_gix(ocelot, + ANA_POL_PIR_CFG_PIR_RATE(pir) | + ANA_POL_PIR_CFG_PIR_BURST(pbs), + ANA_POL_PIR_CFG, pol_ix); + + ocelot_write_gix(ocelot, + (pir_discard ? GENMASK(22, 0) : 0), + ANA_POL_PIR_STATE, pol_ix); + + ocelot_write_gix(ocelot, + ANA_POL_CIR_CFG_CIR_RATE(cir) | + ANA_POL_CIR_CFG_CIR_BURST(cbs), + ANA_POL_CIR_CFG, pol_ix); + + ocelot_write_gix(ocelot, + (cir_discard ? GENMASK(22, 0) : 0), + ANA_POL_CIR_STATE, pol_ix); + + return 0; +} + +int ocelot_policer_validate(const struct flow_action *action, + const struct flow_action_entry *a, + struct netlink_ext_ack *extack) +{ + if (a->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (a->police.notexceed.act_id != FLOW_ACTION_PIPE && + a->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (a->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, a)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but police action is not last"); + return -EOPNOTSUPP; + } + + if (a->police.peakrate_bytes_ps || + a->police.avrate || a->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + if (a->police.rate_pkt_ps) { + NL_SET_ERR_MSG_MOD(extack, + "Offload does not support packets per second"); + return -EOPNOTSUPP; + } + + return 0; +} +EXPORT_SYMBOL(ocelot_policer_validate); + +int ocelot_port_policer_add(struct ocelot *ocelot, int port, + struct ocelot_policer *pol) +{ + struct qos_policer_conf pp = { 0 }; + int err; + + if (!pol) + return -EINVAL; + + pp.mode = MSCC_QOS_RATE_MODE_DATA; + pp.pir = pol->rate; + pp.pbs = pol->burst; + + dev_dbg(ocelot->dev, "%s: port %u pir %u kbps, pbs %u bytes\n", + __func__, port, pp.pir, pp.pbs); + + err = qos_policer_conf_set(ocelot, POL_IX_PORT + port, &pp); + if (err) + return err; + + ocelot_rmw_gix(ocelot, + ANA_PORT_POL_CFG_PORT_POL_ENA | + ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER), + ANA_PORT_POL_CFG_PORT_POL_ENA | + ANA_PORT_POL_CFG_POL_ORDER_M, + ANA_PORT_POL_CFG, port); + + return 0; +} +EXPORT_SYMBOL(ocelot_port_policer_add); + +int ocelot_port_policer_del(struct ocelot *ocelot, int port) +{ + struct qos_policer_conf pp = { 0 }; + int err; + + dev_dbg(ocelot->dev, "%s: port %u\n", __func__, port); + + pp.mode = MSCC_QOS_RATE_MODE_DISABLED; + + err = qos_policer_conf_set(ocelot, POL_IX_PORT + port, &pp); + if (err) + return err; + + ocelot_rmw_gix(ocelot, + ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER), + ANA_PORT_POL_CFG_PORT_POL_ENA | + ANA_PORT_POL_CFG_POL_ORDER_M, + ANA_PORT_POL_CFG, port); + + return 0; +} +EXPORT_SYMBOL(ocelot_port_policer_del); diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h new file mode 100644 index 0000000000..0749f23684 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_police.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* Microsemi Ocelot Switch driver + * + * Copyright (c) 2019 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_POLICE_H_ +#define _MSCC_OCELOT_POLICE_H_ + +#include "ocelot.h" +#include <net/flow_offload.h> + +enum mscc_qos_rate_mode { + MSCC_QOS_RATE_MODE_DISABLED, /* Policer/shaper disabled */ + MSCC_QOS_RATE_MODE_LINE, /* Measure line rate in kbps incl. IPG */ + MSCC_QOS_RATE_MODE_DATA, /* Measures data rate in kbps excl. IPG */ + MSCC_QOS_RATE_MODE_FRAME, /* Measures frame rate in fps */ + __MSCC_QOS_RATE_MODE_END, + NUM_MSCC_QOS_RATE_MODE = __MSCC_QOS_RATE_MODE_END, + MSCC_QOS_RATE_MODE_MAX = __MSCC_QOS_RATE_MODE_END - 1, +}; + +struct qos_policer_conf { + enum mscc_qos_rate_mode mode; + bool dlb; /* Enable DLB (dual leaky bucket mode */ + bool cf; /* Coupling flag (ignored in SLB mode) */ + u32 cir; /* CIR in kbps/fps (ignored in SLB mode) */ + u32 cbs; /* CBS in bytes/frames (ignored in SLB mode) */ + u32 pir; /* PIR in kbps/fps */ + u32 pbs; /* PBS in bytes/frames */ + u8 ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */ +}; + +int qos_policer_conf_set(struct ocelot *ocelot, u32 pol_ix, + struct qos_policer_conf *conf); + +int ocelot_policer_validate(const struct flow_action *action, + const struct flow_action_entry *a, + struct netlink_ext_ack *extack); + +#endif /* _MSCC_OCELOT_POLICE_H_ */ diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c new file mode 100644 index 0000000000..cb32234a5b --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_ptp.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Microsemi Ocelot PTP clock driver + * + * Copyright (c) 2017 Microsemi Corporation + * Copyright 2020 NXP + */ +#include <linux/time64.h> + +#include <linux/dsa/ocelot.h> +#include <linux/ptp_classify.h> +#include <soc/mscc/ocelot_ptp.h> +#include <soc/mscc/ocelot_sys.h> +#include <soc/mscc/ocelot_vcap.h> +#include <soc/mscc/ocelot.h> +#include "ocelot.h" + +int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info); + unsigned long flags; + time64_t s; + u32 val; + s64 ns; + + spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); + + val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); + val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM); + val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE); + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); + + s = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN) & 0xffff; + s <<= 32; + s += ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN); + ns = ocelot_read_rix(ocelot, PTP_PIN_TOD_NSEC, TOD_ACC_PIN); + + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); + + /* Deal with negative values */ + if (ns >= 0x3ffffff0 && ns <= 0x3fffffff) { + s--; + ns &= 0xf; + ns += 999999984; + } + + set_normalized_timespec64(ts, s, ns); + return 0; +} +EXPORT_SYMBOL(ocelot_ptp_gettime64); + +int ocelot_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); + + val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); + val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM); + val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE); + + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); + + ocelot_write_rix(ocelot, lower_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_LSB, + TOD_ACC_PIN); + ocelot_write_rix(ocelot, upper_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_MSB, + TOD_ACC_PIN); + ocelot_write_rix(ocelot, ts->tv_nsec, PTP_PIN_TOD_NSEC, TOD_ACC_PIN); + + val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); + val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM); + val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_LOAD); + + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); + + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); + + if (ocelot->ops->tas_clock_adjust) + ocelot->ops->tas_clock_adjust(ocelot); + + return 0; +} +EXPORT_SYMBOL(ocelot_ptp_settime64); + +int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) { + struct ocelot *ocelot = container_of(ptp, struct ocelot, + ptp_info); + unsigned long flags; + u32 val; + + spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); + + val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); + val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | + PTP_PIN_CFG_DOM); + val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE); + + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); + + ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN); + ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN); + ocelot_write_rix(ocelot, delta, PTP_PIN_TOD_NSEC, TOD_ACC_PIN); + + val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); + val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | + PTP_PIN_CFG_DOM); + val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_DELTA); + + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); + + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); + + if (ocelot->ops->tas_clock_adjust) + ocelot->ops->tas_clock_adjust(ocelot); + } else { + /* Fall back using ocelot_ptp_settime64 which is not exact. */ + struct timespec64 ts; + u64 now; + + ocelot_ptp_gettime64(ptp, &ts); + + now = ktime_to_ns(timespec64_to_ktime(ts)); + ts = ns_to_timespec64(now + delta); + + ocelot_ptp_settime64(ptp, &ts); + } + + return 0; +} +EXPORT_SYMBOL(ocelot_ptp_adjtime); + +int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info); + u32 unit = 0, direction = 0; + unsigned long flags; + u64 adj = 0; + + spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); + + if (!scaled_ppm) + goto disable_adj; + + if (scaled_ppm < 0) { + direction = PTP_CFG_CLK_ADJ_CFG_DIR; + scaled_ppm = -scaled_ppm; + } + + adj = PSEC_PER_SEC << 16; + do_div(adj, scaled_ppm); + do_div(adj, 1000); + + /* If the adjustment value is too large, use ns instead */ + if (adj >= (1L << 30)) { + unit = PTP_CFG_CLK_ADJ_FREQ_NS; + do_div(adj, 1000); + } + + /* Still too big */ + if (adj >= (1L << 30)) + goto disable_adj; + + ocelot_write(ocelot, unit | adj, PTP_CLK_CFG_ADJ_FREQ); + ocelot_write(ocelot, PTP_CFG_CLK_ADJ_CFG_ENA | direction, + PTP_CLK_CFG_ADJ_CFG); + + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); + return 0; + +disable_adj: + ocelot_write(ocelot, 0, PTP_CLK_CFG_ADJ_CFG); + + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); + return 0; +} +EXPORT_SYMBOL(ocelot_ptp_adjfine); + +int ocelot_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_PEROUT: + break; + case PTP_PF_EXTTS: + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} +EXPORT_SYMBOL(ocelot_ptp_verify); + +int ocelot_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info); + struct timespec64 ts_phase, ts_period; + enum ocelot_ptp_pins ptp_pin; + unsigned long flags; + bool pps = false; + int pin = -1; + s64 wf_high; + s64 wf_low; + u32 val; + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE)) + return -EOPNOTSUPP; + + pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin == 0) + ptp_pin = PTP_PIN_0; + else if (pin == 1) + ptp_pin = PTP_PIN_1; + else if (pin == 2) + ptp_pin = PTP_PIN_2; + else if (pin == 3) + ptp_pin = PTP_PIN_3; + else + return -EBUSY; + + ts_period.tv_sec = rq->perout.period.sec; + ts_period.tv_nsec = rq->perout.period.nsec; + + if (ts_period.tv_sec == 1 && ts_period.tv_nsec == 0) + pps = true; + + /* Handle turning off */ + if (!on) { + spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); + val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE); + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin); + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); + break; + } + + if (rq->perout.flags & PTP_PEROUT_PHASE) { + ts_phase.tv_sec = rq->perout.phase.sec; + ts_phase.tv_nsec = rq->perout.phase.nsec; + } else { + /* Compatibility */ + ts_phase.tv_sec = rq->perout.start.sec; + ts_phase.tv_nsec = rq->perout.start.nsec; + } + if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) { + dev_warn(ocelot->dev, + "Absolute start time not supported!\n"); + dev_warn(ocelot->dev, + "Accept nsec for PPS phase adjustment, otherwise start time should be 0 0.\n"); + return -EINVAL; + } + + /* Calculate waveform high and low times */ + if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) { + struct timespec64 ts_on; + + ts_on.tv_sec = rq->perout.on.sec; + ts_on.tv_nsec = rq->perout.on.nsec; + + wf_high = timespec64_to_ns(&ts_on); + } else { + if (pps) { + wf_high = 1000; + } else { + wf_high = timespec64_to_ns(&ts_period); + wf_high = div_s64(wf_high, 2); + } + } + + wf_low = timespec64_to_ns(&ts_period); + wf_low -= wf_high; + + /* Handle PPS request */ + if (pps) { + spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); + ocelot_write_rix(ocelot, ts_phase.tv_nsec, + PTP_PIN_WF_LOW_PERIOD, ptp_pin); + ocelot_write_rix(ocelot, wf_high, + PTP_PIN_WF_HIGH_PERIOD, ptp_pin); + val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK); + val |= PTP_PIN_CFG_SYNC; + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin); + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); + break; + } + + /* Handle periodic clock */ + if (wf_high > 0x3fffffff || wf_high <= 0x6) + return -EINVAL; + if (wf_low > 0x3fffffff || wf_low <= 0x6) + return -EINVAL; + + spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); + ocelot_write_rix(ocelot, wf_low, PTP_PIN_WF_LOW_PERIOD, + ptp_pin); + ocelot_write_rix(ocelot, wf_high, PTP_PIN_WF_HIGH_PERIOD, + ptp_pin); + val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK); + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin); + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} +EXPORT_SYMBOL(ocelot_ptp_enable); + +static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_ETYPE; + *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588); + *(__be16 *)trap->key.etype.etype.mask = htons(0xffff); +} + +static void +ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV4; + trap->key.ipv4.proto.value[0] = IPPROTO_UDP; + trap->key.ipv4.proto.mask[0] = 0xff; + trap->key.ipv4.dport.value = PTP_EV_PORT; + trap->key.ipv4.dport.mask = 0xffff; +} + +static void +ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV6; + trap->key.ipv6.proto.value[0] = IPPROTO_UDP; + trap->key.ipv6.proto.mask[0] = 0xff; + trap->key.ipv6.dport.value = PTP_EV_PORT; + trap->key.ipv6.dport.mask = 0xffff; +} + +static void +ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV4; + trap->key.ipv4.proto.value[0] = IPPROTO_UDP; + trap->key.ipv4.proto.mask[0] = 0xff; + trap->key.ipv4.dport.value = PTP_GEN_PORT; + trap->key.ipv4.dport.mask = 0xffff; +} + +static void +ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV6; + trap->key.ipv6.proto.value[0] = IPPROTO_UDP; + trap->key.ipv6.proto.mask[0] = 0xff; + trap->key.ipv6.dport.value = PTP_GEN_PORT; + trap->key.ipv6.dport.mask = 0xffff; +} + +static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot); + + return ocelot_trap_add(ocelot, port, l2_cookie, true, + ocelot_populate_l2_ptp_trap_key); +} + +static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot); + + return ocelot_trap_del(ocelot, port, l2_cookie); +} + +static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot); + unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot); + int err; + + err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, true, + ocelot_populate_ipv4_ptp_event_trap_key); + if (err) + return err; + + err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, false, + ocelot_populate_ipv4_ptp_general_trap_key); + if (err) + ocelot_trap_del(ocelot, port, ipv4_ev_cookie); + + return err; +} + +static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot); + unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot); + int err; + + err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie); + err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie); + return err; +} + +static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot); + unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot); + int err; + + err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, true, + ocelot_populate_ipv6_ptp_event_trap_key); + if (err) + return err; + + err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, false, + ocelot_populate_ipv6_ptp_general_trap_key); + if (err) + ocelot_trap_del(ocelot, port, ipv6_ev_cookie); + + return err; +} + +static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot); + unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot); + int err; + + err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie); + err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie); + return err; +} + +static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port, + bool l2, bool l4) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int err; + + ocelot_port->trap_proto &= ~(OCELOT_PROTO_PTP_L2 | + OCELOT_PROTO_PTP_L4); + + if (l2) + err = ocelot_l2_ptp_trap_add(ocelot, port); + else + err = ocelot_l2_ptp_trap_del(ocelot, port); + if (err) + return err; + + if (l4) { + err = ocelot_ipv4_ptp_trap_add(ocelot, port); + if (err) + goto err_ipv4; + + err = ocelot_ipv6_ptp_trap_add(ocelot, port); + if (err) + goto err_ipv6; + } else { + err = ocelot_ipv4_ptp_trap_del(ocelot, port); + + err |= ocelot_ipv6_ptp_trap_del(ocelot, port); + } + if (err) + return err; + + if (l2) + ocelot_port->trap_proto |= OCELOT_PROTO_PTP_L2; + if (l4) + ocelot_port->trap_proto |= OCELOT_PROTO_PTP_L4; + + return 0; + +err_ipv6: + ocelot_ipv4_ptp_trap_del(ocelot, port); +err_ipv4: + if (l2) + ocelot_l2_ptp_trap_del(ocelot, port); + return err; +} + +static int ocelot_traps_to_ptp_rx_filter(unsigned int proto) +{ + if ((proto & OCELOT_PROTO_PTP_L2) && (proto & OCELOT_PROTO_PTP_L4)) + return HWTSTAMP_FILTER_PTP_V2_EVENT; + else if (proto & OCELOT_PROTO_PTP_L2) + return HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + else if (proto & OCELOT_PROTO_PTP_L4) + return HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + + return HWTSTAMP_FILTER_NONE; +} + +int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct hwtstamp_config cfg = {}; + + switch (ocelot_port->ptp_cmd) { + case IFH_REW_OP_TWO_STEP_PTP: + cfg.tx_type = HWTSTAMP_TX_ON; + break; + case IFH_REW_OP_ORIGIN_PTP: + cfg.tx_type = HWTSTAMP_TX_ONESTEP_SYNC; + break; + default: + cfg.tx_type = HWTSTAMP_TX_OFF; + break; + } + + cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} +EXPORT_SYMBOL(ocelot_hwstamp_get); + +int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + bool l2 = false, l4 = false; + struct hwtstamp_config cfg; + int err; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + /* Tx type sanity check */ + switch (cfg.tx_type) { + case HWTSTAMP_TX_ON: + ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we + * need to update the origin time. + */ + ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP; + break; + case HWTSTAMP_TX_OFF: + ocelot_port->ptp_cmd = 0; + break; + default: + return -ERANGE; + } + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + l2 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + l2 = true; + l4 = true; + break; + default: + return -ERANGE; + } + + err = ocelot_setup_ptp_traps(ocelot, port, l2, l4); + if (err) + return err; + + cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} +EXPORT_SYMBOL(ocelot_hwstamp_set); + +int ocelot_get_ts_info(struct ocelot *ocelot, int port, + struct ethtool_ts_info *info) +{ + info->phc_index = ocelot->ptp_clock ? + ptp_clock_index(ocelot->ptp_clock) : -1; + if (info->phc_index == -1) { + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + return 0; + } + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | + BIT(HWTSTAMP_TX_ONESTEP_SYNC); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT); + + return 0; +} +EXPORT_SYMBOL(ocelot_get_ts_info); + +static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, + struct sk_buff *clone) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + unsigned long flags; + + spin_lock_irqsave(&ocelot->ts_id_lock, flags); + + if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID || + ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) { + spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); + return -EBUSY; + } + + skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; + /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */ + OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id; + + ocelot_port->ts_id++; + if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID) + ocelot_port->ts_id = 0; + + ocelot_port->ptp_skbs_in_flight++; + ocelot->ptp_skbs_in_flight++; + + skb_queue_tail(&ocelot_port->tx_skbs, clone); + + spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); + + return 0; +} + +static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb, + unsigned int ptp_class) +{ + struct ptp_header *hdr; + u8 msgtype, twostep; + + hdr = ptp_parse_header(skb, ptp_class); + if (!hdr) + return false; + + msgtype = ptp_get_msgtype(hdr, ptp_class); + twostep = hdr->flag_field[0] & 0x2; + + if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) + return true; + + return false; +} + +int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port, + struct sk_buff *skb, + struct sk_buff **clone) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u8 ptp_cmd = ocelot_port->ptp_cmd; + unsigned int ptp_class; + int err; + + /* Don't do anything if PTP timestamping not enabled */ + if (!ptp_cmd) + return 0; + + ptp_class = ptp_classify_raw(skb); + if (ptp_class == PTP_CLASS_NONE) + return -EINVAL; + + /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */ + if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) { + if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) { + OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; + return 0; + } + + /* Fall back to two-step timestamping */ + ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; + } + + if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { + *clone = skb_clone_sk(skb); + if (!(*clone)) + return -ENOMEM; + + err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone); + if (err) + return err; + + OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; + OCELOT_SKB_CB(*clone)->ptp_class = ptp_class; + } + + return 0; +} +EXPORT_SYMBOL(ocelot_port_txtstamp_request); + +static void ocelot_get_hwtimestamp(struct ocelot *ocelot, + struct timespec64 *ts) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); + + /* Read current PTP time to get seconds */ + val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN); + + val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM); + val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE); + ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN); + ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN); + + /* Read packet HW timestamp from FIFO */ + val = ocelot_read(ocelot, SYS_PTP_TXSTAMP); + ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val); + + /* Sec has incremented since the ts was registered */ + if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC)) + ts->tv_sec--; + + spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); +} + +static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid) +{ + struct ptp_header *hdr; + + hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class); + if (WARN_ON(!hdr)) + return false; + + return seqid == ntohs(hdr->sequence_id); +} + +void ocelot_get_txtstamp(struct ocelot *ocelot) +{ + int budget = OCELOT_PTP_QUEUE_SZ; + + while (budget--) { + struct sk_buff *skb, *skb_tmp, *skb_match = NULL; + struct skb_shared_hwtstamps shhwtstamps; + u32 val, id, seqid, txport; + struct ocelot_port *port; + struct timespec64 ts; + unsigned long flags; + + val = ocelot_read(ocelot, SYS_PTP_STATUS); + + /* Check if a timestamp can be retrieved */ + if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD)) + break; + + WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL); + + /* Retrieve the ts ID and Tx port */ + id = SYS_PTP_STATUS_PTP_MESS_ID_X(val); + txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val); + seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val); + + port = ocelot->ports[txport]; + + spin_lock(&ocelot->ts_id_lock); + port->ptp_skbs_in_flight--; + ocelot->ptp_skbs_in_flight--; + spin_unlock(&ocelot->ts_id_lock); + + /* Retrieve its associated skb */ +try_again: + spin_lock_irqsave(&port->tx_skbs.lock, flags); + + skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { + if (OCELOT_SKB_CB(skb)->ts_id != id) + continue; + __skb_unlink(skb, &port->tx_skbs); + skb_match = skb; + break; + } + + spin_unlock_irqrestore(&port->tx_skbs.lock, flags); + + if (WARN_ON(!skb_match)) + continue; + + if (!ocelot_validate_ptp_skb(skb_match, seqid)) { + dev_err_ratelimited(ocelot->dev, + "port %d received stale TX timestamp for seqid %d, discarding\n", + txport, seqid); + dev_kfree_skb_any(skb); + goto try_again; + } + + /* Get the h/w timestamp */ + ocelot_get_hwtimestamp(ocelot, &ts); + + /* Set the timestamp into the skb */ + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_complete_tx_timestamp(skb_match, &shhwtstamps); + + /* Next ts */ + ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); + } +} +EXPORT_SYMBOL(ocelot_get_txtstamp); + +int ocelot_init_timestamp(struct ocelot *ocelot, + const struct ptp_clock_info *info) +{ + struct ptp_clock *ptp_clock; + int i; + + ocelot->ptp_info = *info; + + for (i = 0; i < OCELOT_PTP_PINS_NUM; i++) { + struct ptp_pin_desc *p = &ocelot->ptp_pins[i]; + + snprintf(p->name, sizeof(p->name), "switch_1588_dat%d", i); + p->index = i; + p->func = PTP_PF_NONE; + } + + ocelot->ptp_info.pin_config = &ocelot->ptp_pins[0]; + + ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev); + if (IS_ERR(ptp_clock)) + return PTR_ERR(ptp_clock); + /* Check if PHC support is missing at the configuration level */ + if (!ptp_clock) + return 0; + + ocelot->ptp_clock = ptp_clock; + + ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG); + ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW); + ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH); + + ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC); + + return 0; +} +EXPORT_SYMBOL(ocelot_init_timestamp); + +int ocelot_deinit_timestamp(struct ocelot *ocelot) +{ + if (ocelot->ptp_clock) + ptp_clock_unregister(ocelot->ptp_clock); + return 0; +} +EXPORT_SYMBOL(ocelot_deinit_timestamp); diff --git a/drivers/net/ethernet/mscc/ocelot_qs.h b/drivers/net/ethernet/mscc/ocelot_qs.h new file mode 100644 index 0000000000..d18ae726c0 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_qs.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_QS_H_ +#define _MSCC_OCELOT_QS_H_ + +/* TODO handle BE */ +#define XTR_EOF_0 0x00000080U +#define XTR_EOF_1 0x01000080U +#define XTR_EOF_2 0x02000080U +#define XTR_EOF_3 0x03000080U +#define XTR_PRUNED 0x04000080U +#define XTR_ABORT 0x05000080U +#define XTR_ESCAPE 0x06000080U +#define XTR_NOT_READY 0x07000080U +#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3)) + +#define QS_XTR_GRP_CFG_RSZ 0x4 + +#define QS_XTR_GRP_CFG_MODE(x) (((x) << 2) & GENMASK(3, 2)) +#define QS_XTR_GRP_CFG_MODE_M GENMASK(3, 2) +#define QS_XTR_GRP_CFG_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define QS_XTR_GRP_CFG_STATUS_WORD_POS BIT(1) +#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0) + +#define QS_XTR_RD_RSZ 0x4 + +#define QS_XTR_FRM_PRUNING_RSZ 0x4 + +#define QS_XTR_CFG_DP_WM(x) (((x) << 5) & GENMASK(7, 5)) +#define QS_XTR_CFG_DP_WM_M GENMASK(7, 5) +#define QS_XTR_CFG_DP_WM_X(x) (((x) & GENMASK(7, 5)) >> 5) +#define QS_XTR_CFG_SCH_WM(x) (((x) << 2) & GENMASK(4, 2)) +#define QS_XTR_CFG_SCH_WM_M GENMASK(4, 2) +#define QS_XTR_CFG_SCH_WM_X(x) (((x) & GENMASK(4, 2)) >> 2) +#define QS_XTR_CFG_OFLW_ERR_STICKY(x) ((x) & GENMASK(1, 0)) +#define QS_XTR_CFG_OFLW_ERR_STICKY_M GENMASK(1, 0) + +#define QS_INJ_GRP_CFG_RSZ 0x4 + +#define QS_INJ_GRP_CFG_MODE(x) (((x) << 2) & GENMASK(3, 2)) +#define QS_INJ_GRP_CFG_MODE_M GENMASK(3, 2) +#define QS_INJ_GRP_CFG_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0) + +#define QS_INJ_WR_RSZ 0x4 + +#define QS_INJ_CTRL_RSZ 0x4 + +#define QS_INJ_CTRL_GAP_SIZE(x) (((x) << 21) & GENMASK(24, 21)) +#define QS_INJ_CTRL_GAP_SIZE_M GENMASK(24, 21) +#define QS_INJ_CTRL_GAP_SIZE_X(x) (((x) & GENMASK(24, 21)) >> 21) +#define QS_INJ_CTRL_ABORT BIT(20) +#define QS_INJ_CTRL_EOF BIT(19) +#define QS_INJ_CTRL_SOF BIT(18) +#define QS_INJ_CTRL_VLD_BYTES(x) (((x) << 16) & GENMASK(17, 16)) +#define QS_INJ_CTRL_VLD_BYTES_M GENMASK(17, 16) +#define QS_INJ_CTRL_VLD_BYTES_X(x) (((x) & GENMASK(17, 16)) >> 16) + +#define QS_INJ_STATUS_WMARK_REACHED(x) (((x) << 4) & GENMASK(5, 4)) +#define QS_INJ_STATUS_WMARK_REACHED_M GENMASK(5, 4) +#define QS_INJ_STATUS_WMARK_REACHED_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define QS_INJ_STATUS_FIFO_RDY(x) (((x) << 2) & GENMASK(3, 2)) +#define QS_INJ_STATUS_FIFO_RDY_M GENMASK(3, 2) +#define QS_INJ_STATUS_FIFO_RDY_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define QS_INJ_STATUS_INJ_IN_PROGRESS(x) ((x) & GENMASK(1, 0)) +#define QS_INJ_STATUS_INJ_IN_PROGRESS_M GENMASK(1, 0) + +#define QS_INJ_ERR_RSZ 0x4 + +#define QS_INJ_ERR_ABORT_ERR_STICKY BIT(1) +#define QS_INJ_ERR_WR_ERR_STICKY BIT(0) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_rew.h b/drivers/net/ethernet/mscc/ocelot_rew.h new file mode 100644 index 0000000000..210914b7e2 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_rew.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_REW_H_ +#define _MSCC_OCELOT_REW_H_ + +#define REW_PORT_VLAN_CFG_GSZ 0x80 + +#define REW_PORT_VLAN_CFG_PORT_TPID(x) (((x) << 16) & GENMASK(31, 16)) +#define REW_PORT_VLAN_CFG_PORT_TPID_M GENMASK(31, 16) +#define REW_PORT_VLAN_CFG_PORT_TPID_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define REW_PORT_VLAN_CFG_PORT_DEI BIT(15) +#define REW_PORT_VLAN_CFG_PORT_PCP(x) (((x) << 12) & GENMASK(14, 12)) +#define REW_PORT_VLAN_CFG_PORT_PCP_M GENMASK(14, 12) +#define REW_PORT_VLAN_CFG_PORT_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12) +#define REW_PORT_VLAN_CFG_PORT_VID(x) ((x) & GENMASK(11, 0)) +#define REW_PORT_VLAN_CFG_PORT_VID_M GENMASK(11, 0) + +#define REW_TAG_CFG_GSZ 0x80 + +#define REW_TAG_CFG_TAG_CFG(x) (((x) << 7) & GENMASK(8, 7)) +#define REW_TAG_CFG_TAG_CFG_M GENMASK(8, 7) +#define REW_TAG_CFG_TAG_CFG_X(x) (((x) & GENMASK(8, 7)) >> 7) +#define REW_TAG_CFG_TAG_TPID_CFG(x) (((x) << 5) & GENMASK(6, 5)) +#define REW_TAG_CFG_TAG_TPID_CFG_M GENMASK(6, 5) +#define REW_TAG_CFG_TAG_TPID_CFG_X(x) (((x) & GENMASK(6, 5)) >> 5) +#define REW_TAG_CFG_TAG_VID_CFG BIT(4) +#define REW_TAG_CFG_TAG_PCP_CFG(x) (((x) << 2) & GENMASK(3, 2)) +#define REW_TAG_CFG_TAG_PCP_CFG_M GENMASK(3, 2) +#define REW_TAG_CFG_TAG_PCP_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define REW_TAG_CFG_TAG_DEI_CFG(x) ((x) & GENMASK(1, 0)) +#define REW_TAG_CFG_TAG_DEI_CFG_M GENMASK(1, 0) + +#define REW_PORT_CFG_GSZ 0x80 + +#define REW_PORT_CFG_ES0_EN BIT(5) +#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG(x) (((x) << 3) & GENMASK(4, 3)) +#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_M GENMASK(4, 3) +#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_X(x) (((x) & GENMASK(4, 3)) >> 3) +#define REW_PORT_CFG_FCS_UPDATE_CPU_ENA BIT(2) +#define REW_PORT_CFG_FLUSH_ENA BIT(1) +#define REW_PORT_CFG_AGE_DIS BIT(0) + +#define REW_DSCP_CFG_GSZ 0x80 + +#define REW_PCP_DEI_QOS_MAP_CFG_GSZ 0x80 +#define REW_PCP_DEI_QOS_MAP_CFG_RSZ 0x4 + +#define REW_PCP_DEI_QOS_MAP_CFG_DEI_QOS_VAL BIT(3) +#define REW_PCP_DEI_QOS_MAP_CFG_PCP_QOS_VAL(x) ((x) & GENMASK(2, 0)) +#define REW_PCP_DEI_QOS_MAP_CFG_PCP_QOS_VAL_M GENMASK(2, 0) + +#define REW_PTP_CFG_GSZ 0x80 + +#define REW_PTP_CFG_PTP_BACKPLANE_MODE BIT(7) +#define REW_PTP_CFG_GP_CFG_UNUSED(x) (((x) << 3) & GENMASK(6, 3)) +#define REW_PTP_CFG_GP_CFG_UNUSED_M GENMASK(6, 3) +#define REW_PTP_CFG_GP_CFG_UNUSED_X(x) (((x) & GENMASK(6, 3)) >> 3) +#define REW_PTP_CFG_PTP_1STEP_DIS BIT(2) +#define REW_PTP_CFG_PTP_2STEP_DIS BIT(1) +#define REW_PTP_CFG_PTP_UDP_KEEP BIT(0) + +#define REW_PTP_DLY1_CFG_GSZ 0x80 + +#define REW_RED_TAG_CFG_GSZ 0x80 + +#define REW_RED_TAG_CFG_RED_TAG_CFG BIT(0) + +#define REW_DSCP_REMAP_DP1_CFG_RSZ 0x4 + +#define REW_DSCP_REMAP_CFG_RSZ 0x4 + +#define REW_REW_STICKY_ES0_TAGB_PUSH_FAILED BIT(0) + +#define REW_PPT_RSZ 0x4 + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c new file mode 100644 index 0000000000..c018783757 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_stats.c @@ -0,0 +1,989 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Statistics for Ocelot switch family + * + * Copyright (c) 2017 Microsemi Corporation + * Copyright 2022 NXP + */ +#include <linux/ethtool_netlink.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> +#include "ocelot.h" + +enum ocelot_stat { + OCELOT_STAT_RX_OCTETS, + OCELOT_STAT_RX_UNICAST, + OCELOT_STAT_RX_MULTICAST, + OCELOT_STAT_RX_BROADCAST, + OCELOT_STAT_RX_SHORTS, + OCELOT_STAT_RX_FRAGMENTS, + OCELOT_STAT_RX_JABBERS, + OCELOT_STAT_RX_CRC_ALIGN_ERRS, + OCELOT_STAT_RX_SYM_ERRS, + OCELOT_STAT_RX_64, + OCELOT_STAT_RX_65_127, + OCELOT_STAT_RX_128_255, + OCELOT_STAT_RX_256_511, + OCELOT_STAT_RX_512_1023, + OCELOT_STAT_RX_1024_1526, + OCELOT_STAT_RX_1527_MAX, + OCELOT_STAT_RX_PAUSE, + OCELOT_STAT_RX_CONTROL, + OCELOT_STAT_RX_LONGS, + OCELOT_STAT_RX_CLASSIFIED_DROPS, + OCELOT_STAT_RX_RED_PRIO_0, + OCELOT_STAT_RX_RED_PRIO_1, + OCELOT_STAT_RX_RED_PRIO_2, + OCELOT_STAT_RX_RED_PRIO_3, + OCELOT_STAT_RX_RED_PRIO_4, + OCELOT_STAT_RX_RED_PRIO_5, + OCELOT_STAT_RX_RED_PRIO_6, + OCELOT_STAT_RX_RED_PRIO_7, + OCELOT_STAT_RX_YELLOW_PRIO_0, + OCELOT_STAT_RX_YELLOW_PRIO_1, + OCELOT_STAT_RX_YELLOW_PRIO_2, + OCELOT_STAT_RX_YELLOW_PRIO_3, + OCELOT_STAT_RX_YELLOW_PRIO_4, + OCELOT_STAT_RX_YELLOW_PRIO_5, + OCELOT_STAT_RX_YELLOW_PRIO_6, + OCELOT_STAT_RX_YELLOW_PRIO_7, + OCELOT_STAT_RX_GREEN_PRIO_0, + OCELOT_STAT_RX_GREEN_PRIO_1, + OCELOT_STAT_RX_GREEN_PRIO_2, + OCELOT_STAT_RX_GREEN_PRIO_3, + OCELOT_STAT_RX_GREEN_PRIO_4, + OCELOT_STAT_RX_GREEN_PRIO_5, + OCELOT_STAT_RX_GREEN_PRIO_6, + OCELOT_STAT_RX_GREEN_PRIO_7, + OCELOT_STAT_RX_ASSEMBLY_ERRS, + OCELOT_STAT_RX_SMD_ERRS, + OCELOT_STAT_RX_ASSEMBLY_OK, + OCELOT_STAT_RX_MERGE_FRAGMENTS, + OCELOT_STAT_RX_PMAC_OCTETS, + OCELOT_STAT_RX_PMAC_UNICAST, + OCELOT_STAT_RX_PMAC_MULTICAST, + OCELOT_STAT_RX_PMAC_BROADCAST, + OCELOT_STAT_RX_PMAC_SHORTS, + OCELOT_STAT_RX_PMAC_FRAGMENTS, + OCELOT_STAT_RX_PMAC_JABBERS, + OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS, + OCELOT_STAT_RX_PMAC_SYM_ERRS, + OCELOT_STAT_RX_PMAC_64, + OCELOT_STAT_RX_PMAC_65_127, + OCELOT_STAT_RX_PMAC_128_255, + OCELOT_STAT_RX_PMAC_256_511, + OCELOT_STAT_RX_PMAC_512_1023, + OCELOT_STAT_RX_PMAC_1024_1526, + OCELOT_STAT_RX_PMAC_1527_MAX, + OCELOT_STAT_RX_PMAC_PAUSE, + OCELOT_STAT_RX_PMAC_CONTROL, + OCELOT_STAT_RX_PMAC_LONGS, + OCELOT_STAT_TX_OCTETS, + OCELOT_STAT_TX_UNICAST, + OCELOT_STAT_TX_MULTICAST, + OCELOT_STAT_TX_BROADCAST, + OCELOT_STAT_TX_COLLISION, + OCELOT_STAT_TX_DROPS, + OCELOT_STAT_TX_PAUSE, + OCELOT_STAT_TX_64, + OCELOT_STAT_TX_65_127, + OCELOT_STAT_TX_128_255, + OCELOT_STAT_TX_256_511, + OCELOT_STAT_TX_512_1023, + OCELOT_STAT_TX_1024_1526, + OCELOT_STAT_TX_1527_MAX, + OCELOT_STAT_TX_YELLOW_PRIO_0, + OCELOT_STAT_TX_YELLOW_PRIO_1, + OCELOT_STAT_TX_YELLOW_PRIO_2, + OCELOT_STAT_TX_YELLOW_PRIO_3, + OCELOT_STAT_TX_YELLOW_PRIO_4, + OCELOT_STAT_TX_YELLOW_PRIO_5, + OCELOT_STAT_TX_YELLOW_PRIO_6, + OCELOT_STAT_TX_YELLOW_PRIO_7, + OCELOT_STAT_TX_GREEN_PRIO_0, + OCELOT_STAT_TX_GREEN_PRIO_1, + OCELOT_STAT_TX_GREEN_PRIO_2, + OCELOT_STAT_TX_GREEN_PRIO_3, + OCELOT_STAT_TX_GREEN_PRIO_4, + OCELOT_STAT_TX_GREEN_PRIO_5, + OCELOT_STAT_TX_GREEN_PRIO_6, + OCELOT_STAT_TX_GREEN_PRIO_7, + OCELOT_STAT_TX_AGED, + OCELOT_STAT_TX_MM_HOLD, + OCELOT_STAT_TX_MERGE_FRAGMENTS, + OCELOT_STAT_TX_PMAC_OCTETS, + OCELOT_STAT_TX_PMAC_UNICAST, + OCELOT_STAT_TX_PMAC_MULTICAST, + OCELOT_STAT_TX_PMAC_BROADCAST, + OCELOT_STAT_TX_PMAC_PAUSE, + OCELOT_STAT_TX_PMAC_64, + OCELOT_STAT_TX_PMAC_65_127, + OCELOT_STAT_TX_PMAC_128_255, + OCELOT_STAT_TX_PMAC_256_511, + OCELOT_STAT_TX_PMAC_512_1023, + OCELOT_STAT_TX_PMAC_1024_1526, + OCELOT_STAT_TX_PMAC_1527_MAX, + OCELOT_STAT_DROP_LOCAL, + OCELOT_STAT_DROP_TAIL, + OCELOT_STAT_DROP_YELLOW_PRIO_0, + OCELOT_STAT_DROP_YELLOW_PRIO_1, + OCELOT_STAT_DROP_YELLOW_PRIO_2, + OCELOT_STAT_DROP_YELLOW_PRIO_3, + OCELOT_STAT_DROP_YELLOW_PRIO_4, + OCELOT_STAT_DROP_YELLOW_PRIO_5, + OCELOT_STAT_DROP_YELLOW_PRIO_6, + OCELOT_STAT_DROP_YELLOW_PRIO_7, + OCELOT_STAT_DROP_GREEN_PRIO_0, + OCELOT_STAT_DROP_GREEN_PRIO_1, + OCELOT_STAT_DROP_GREEN_PRIO_2, + OCELOT_STAT_DROP_GREEN_PRIO_3, + OCELOT_STAT_DROP_GREEN_PRIO_4, + OCELOT_STAT_DROP_GREEN_PRIO_5, + OCELOT_STAT_DROP_GREEN_PRIO_6, + OCELOT_STAT_DROP_GREEN_PRIO_7, + OCELOT_NUM_STATS, +}; + +struct ocelot_stat_layout { + enum ocelot_reg reg; + char name[ETH_GSTRING_LEN]; +}; + +/* 32-bit counter checked for wraparound by ocelot_port_update_stats() + * and copied to ocelot->stats. + */ +#define OCELOT_STAT(kind) \ + [OCELOT_STAT_ ## kind] = { .reg = SYS_COUNT_ ## kind } +/* Same as above, except also exported to ethtool -S. Standard counters should + * only be exposed to more specific interfaces rather than by their string name. + */ +#define OCELOT_STAT_ETHTOOL(kind, ethtool_name) \ + [OCELOT_STAT_ ## kind] = { .reg = SYS_COUNT_ ## kind, .name = ethtool_name } + +#define OCELOT_COMMON_STATS \ + OCELOT_STAT_ETHTOOL(RX_OCTETS, "rx_octets"), \ + OCELOT_STAT_ETHTOOL(RX_UNICAST, "rx_unicast"), \ + OCELOT_STAT_ETHTOOL(RX_MULTICAST, "rx_multicast"), \ + OCELOT_STAT_ETHTOOL(RX_BROADCAST, "rx_broadcast"), \ + OCELOT_STAT_ETHTOOL(RX_SHORTS, "rx_shorts"), \ + OCELOT_STAT_ETHTOOL(RX_FRAGMENTS, "rx_fragments"), \ + OCELOT_STAT_ETHTOOL(RX_JABBERS, "rx_jabbers"), \ + OCELOT_STAT_ETHTOOL(RX_CRC_ALIGN_ERRS, "rx_crc_align_errs"), \ + OCELOT_STAT_ETHTOOL(RX_SYM_ERRS, "rx_sym_errs"), \ + OCELOT_STAT_ETHTOOL(RX_64, "rx_frames_below_65_octets"), \ + OCELOT_STAT_ETHTOOL(RX_65_127, "rx_frames_65_to_127_octets"), \ + OCELOT_STAT_ETHTOOL(RX_128_255, "rx_frames_128_to_255_octets"), \ + OCELOT_STAT_ETHTOOL(RX_256_511, "rx_frames_256_to_511_octets"), \ + OCELOT_STAT_ETHTOOL(RX_512_1023, "rx_frames_512_to_1023_octets"), \ + OCELOT_STAT_ETHTOOL(RX_1024_1526, "rx_frames_1024_to_1526_octets"), \ + OCELOT_STAT_ETHTOOL(RX_1527_MAX, "rx_frames_over_1526_octets"), \ + OCELOT_STAT_ETHTOOL(RX_PAUSE, "rx_pause"), \ + OCELOT_STAT_ETHTOOL(RX_CONTROL, "rx_control"), \ + OCELOT_STAT_ETHTOOL(RX_LONGS, "rx_longs"), \ + OCELOT_STAT_ETHTOOL(RX_CLASSIFIED_DROPS, "rx_classified_drops"), \ + OCELOT_STAT_ETHTOOL(RX_RED_PRIO_0, "rx_red_prio_0"), \ + OCELOT_STAT_ETHTOOL(RX_RED_PRIO_1, "rx_red_prio_1"), \ + OCELOT_STAT_ETHTOOL(RX_RED_PRIO_2, "rx_red_prio_2"), \ + OCELOT_STAT_ETHTOOL(RX_RED_PRIO_3, "rx_red_prio_3"), \ + OCELOT_STAT_ETHTOOL(RX_RED_PRIO_4, "rx_red_prio_4"), \ + OCELOT_STAT_ETHTOOL(RX_RED_PRIO_5, "rx_red_prio_5"), \ + OCELOT_STAT_ETHTOOL(RX_RED_PRIO_6, "rx_red_prio_6"), \ + OCELOT_STAT_ETHTOOL(RX_RED_PRIO_7, "rx_red_prio_7"), \ + OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_0, "rx_yellow_prio_0"), \ + OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_1, "rx_yellow_prio_1"), \ + OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_2, "rx_yellow_prio_2"), \ + OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_3, "rx_yellow_prio_3"), \ + OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_4, "rx_yellow_prio_4"), \ + OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_5, "rx_yellow_prio_5"), \ + OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_6, "rx_yellow_prio_6"), \ + OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_7, "rx_yellow_prio_7"), \ + OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_0, "rx_green_prio_0"), \ + OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_1, "rx_green_prio_1"), \ + OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_2, "rx_green_prio_2"), \ + OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_3, "rx_green_prio_3"), \ + OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_4, "rx_green_prio_4"), \ + OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_5, "rx_green_prio_5"), \ + OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_6, "rx_green_prio_6"), \ + OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_7, "rx_green_prio_7"), \ + OCELOT_STAT_ETHTOOL(TX_OCTETS, "tx_octets"), \ + OCELOT_STAT_ETHTOOL(TX_UNICAST, "tx_unicast"), \ + OCELOT_STAT_ETHTOOL(TX_MULTICAST, "tx_multicast"), \ + OCELOT_STAT_ETHTOOL(TX_BROADCAST, "tx_broadcast"), \ + OCELOT_STAT_ETHTOOL(TX_COLLISION, "tx_collision"), \ + OCELOT_STAT_ETHTOOL(TX_DROPS, "tx_drops"), \ + OCELOT_STAT_ETHTOOL(TX_PAUSE, "tx_pause"), \ + OCELOT_STAT_ETHTOOL(TX_64, "tx_frames_below_65_octets"), \ + OCELOT_STAT_ETHTOOL(TX_65_127, "tx_frames_65_to_127_octets"), \ + OCELOT_STAT_ETHTOOL(TX_128_255, "tx_frames_128_255_octets"), \ + OCELOT_STAT_ETHTOOL(TX_256_511, "tx_frames_256_511_octets"), \ + OCELOT_STAT_ETHTOOL(TX_512_1023, "tx_frames_512_1023_octets"), \ + OCELOT_STAT_ETHTOOL(TX_1024_1526, "tx_frames_1024_1526_octets"), \ + OCELOT_STAT_ETHTOOL(TX_1527_MAX, "tx_frames_over_1526_octets"), \ + OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_0, "tx_yellow_prio_0"), \ + OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_1, "tx_yellow_prio_1"), \ + OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_2, "tx_yellow_prio_2"), \ + OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_3, "tx_yellow_prio_3"), \ + OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_4, "tx_yellow_prio_4"), \ + OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_5, "tx_yellow_prio_5"), \ + OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_6, "tx_yellow_prio_6"), \ + OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_7, "tx_yellow_prio_7"), \ + OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_0, "tx_green_prio_0"), \ + OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_1, "tx_green_prio_1"), \ + OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_2, "tx_green_prio_2"), \ + OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_3, "tx_green_prio_3"), \ + OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_4, "tx_green_prio_4"), \ + OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_5, "tx_green_prio_5"), \ + OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_6, "tx_green_prio_6"), \ + OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_7, "tx_green_prio_7"), \ + OCELOT_STAT_ETHTOOL(TX_AGED, "tx_aged"), \ + OCELOT_STAT_ETHTOOL(DROP_LOCAL, "drop_local"), \ + OCELOT_STAT_ETHTOOL(DROP_TAIL, "drop_tail"), \ + OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_0, "drop_yellow_prio_0"), \ + OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_1, "drop_yellow_prio_1"), \ + OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_2, "drop_yellow_prio_2"), \ + OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_3, "drop_yellow_prio_3"), \ + OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_4, "drop_yellow_prio_4"), \ + OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_5, "drop_yellow_prio_5"), \ + OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_6, "drop_yellow_prio_6"), \ + OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_7, "drop_yellow_prio_7"), \ + OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_0, "drop_green_prio_0"), \ + OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_1, "drop_green_prio_1"), \ + OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_2, "drop_green_prio_2"), \ + OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_3, "drop_green_prio_3"), \ + OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_4, "drop_green_prio_4"), \ + OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_5, "drop_green_prio_5"), \ + OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_6, "drop_green_prio_6"), \ + OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_7, "drop_green_prio_7") + +struct ocelot_stats_region { + struct list_head node; + enum ocelot_reg base; + enum ocelot_stat first_stat; + int count; + u32 *buf; +}; + +static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = { + OCELOT_COMMON_STATS, +}; + +static const struct ocelot_stat_layout ocelot_mm_stats_layout[OCELOT_NUM_STATS] = { + OCELOT_COMMON_STATS, + OCELOT_STAT(RX_ASSEMBLY_ERRS), + OCELOT_STAT(RX_SMD_ERRS), + OCELOT_STAT(RX_ASSEMBLY_OK), + OCELOT_STAT(RX_MERGE_FRAGMENTS), + OCELOT_STAT(TX_MERGE_FRAGMENTS), + OCELOT_STAT(TX_MM_HOLD), + OCELOT_STAT(RX_PMAC_OCTETS), + OCELOT_STAT(RX_PMAC_UNICAST), + OCELOT_STAT(RX_PMAC_MULTICAST), + OCELOT_STAT(RX_PMAC_BROADCAST), + OCELOT_STAT(RX_PMAC_SHORTS), + OCELOT_STAT(RX_PMAC_FRAGMENTS), + OCELOT_STAT(RX_PMAC_JABBERS), + OCELOT_STAT(RX_PMAC_CRC_ALIGN_ERRS), + OCELOT_STAT(RX_PMAC_SYM_ERRS), + OCELOT_STAT(RX_PMAC_64), + OCELOT_STAT(RX_PMAC_65_127), + OCELOT_STAT(RX_PMAC_128_255), + OCELOT_STAT(RX_PMAC_256_511), + OCELOT_STAT(RX_PMAC_512_1023), + OCELOT_STAT(RX_PMAC_1024_1526), + OCELOT_STAT(RX_PMAC_1527_MAX), + OCELOT_STAT(RX_PMAC_PAUSE), + OCELOT_STAT(RX_PMAC_CONTROL), + OCELOT_STAT(RX_PMAC_LONGS), + OCELOT_STAT(TX_PMAC_OCTETS), + OCELOT_STAT(TX_PMAC_UNICAST), + OCELOT_STAT(TX_PMAC_MULTICAST), + OCELOT_STAT(TX_PMAC_BROADCAST), + OCELOT_STAT(TX_PMAC_PAUSE), + OCELOT_STAT(TX_PMAC_64), + OCELOT_STAT(TX_PMAC_65_127), + OCELOT_STAT(TX_PMAC_128_255), + OCELOT_STAT(TX_PMAC_256_511), + OCELOT_STAT(TX_PMAC_512_1023), + OCELOT_STAT(TX_PMAC_1024_1526), + OCELOT_STAT(TX_PMAC_1527_MAX), +}; + +static const struct ocelot_stat_layout * +ocelot_get_stats_layout(struct ocelot *ocelot) +{ + if (ocelot->mm_supported) + return ocelot_mm_stats_layout; + + return ocelot_stats_layout; +} + +/* Read the counters from hardware and keep them in region->buf. + * Caller must hold &ocelot->stat_view_lock. + */ +static int ocelot_port_update_stats(struct ocelot *ocelot, int port) +{ + struct ocelot_stats_region *region; + int err; + + /* Configure the port to read the stats from */ + ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG); + + list_for_each_entry(region, &ocelot->stats_regions, node) { + err = ocelot_bulk_read(ocelot, region->base, region->buf, + region->count); + if (err) + return err; + } + + return 0; +} + +/* Transfer the counters from region->buf to ocelot->stats. + * Caller must hold &ocelot->stat_view_lock and &ocelot->stats_lock. + */ +static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port) +{ + struct ocelot_stats_region *region; + int j; + + list_for_each_entry(region, &ocelot->stats_regions, node) { + unsigned int idx = port * OCELOT_NUM_STATS + region->first_stat; + + for (j = 0; j < region->count; j++) { + u64 *stat = &ocelot->stats[idx + j]; + u64 val = region->buf[j]; + + if (val < (*stat & U32_MAX)) + *stat += (u64)1 << 32; + + *stat = (*stat & ~(u64)U32_MAX) + val; + } + } +} + +static void ocelot_check_stats_work(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct ocelot *ocelot = container_of(del_work, struct ocelot, + stats_work); + int port, err; + + mutex_lock(&ocelot->stat_view_lock); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + err = ocelot_port_update_stats(ocelot, port); + if (err) + break; + + spin_lock(&ocelot->stats_lock); + ocelot_port_transfer_stats(ocelot, port); + spin_unlock(&ocelot->stats_lock); + } + + if (!err && ocelot->ops->update_stats) + ocelot->ops->update_stats(ocelot); + + mutex_unlock(&ocelot->stat_view_lock); + + if (err) + dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err); + + queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, + OCELOT_STATS_CHECK_DELAY); +} + +void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data) +{ + const struct ocelot_stat_layout *layout; + enum ocelot_stat i; + + if (sset != ETH_SS_STATS) + return; + + layout = ocelot_get_stats_layout(ocelot); + + for (i = 0; i < OCELOT_NUM_STATS; i++) { + if (layout[i].name[0] == '\0') + continue; + + memcpy(data, layout[i].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } +} +EXPORT_SYMBOL(ocelot_get_strings); + +/* Update ocelot->stats for the given port and run the given callback */ +static void ocelot_port_stats_run(struct ocelot *ocelot, int port, void *priv, + void (*cb)(struct ocelot *ocelot, int port, + void *priv)) +{ + int err; + + mutex_lock(&ocelot->stat_view_lock); + + err = ocelot_port_update_stats(ocelot, port); + if (err) { + dev_err(ocelot->dev, "Failed to update port %d stats: %pe\n", + port, ERR_PTR(err)); + goto out_unlock; + } + + spin_lock(&ocelot->stats_lock); + + ocelot_port_transfer_stats(ocelot, port); + cb(ocelot, port, priv); + + spin_unlock(&ocelot->stats_lock); + +out_unlock: + mutex_unlock(&ocelot->stat_view_lock); +} + +int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset) +{ + const struct ocelot_stat_layout *layout; + enum ocelot_stat i; + int num_stats = 0; + + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + + layout = ocelot_get_stats_layout(ocelot); + + for (i = 0; i < OCELOT_NUM_STATS; i++) + if (layout[i].name[0] != '\0') + num_stats++; + + return num_stats; +} +EXPORT_SYMBOL(ocelot_get_sset_count); + +static void ocelot_port_ethtool_stats_cb(struct ocelot *ocelot, int port, + void *priv) +{ + const struct ocelot_stat_layout *layout; + enum ocelot_stat i; + u64 *data = priv; + + layout = ocelot_get_stats_layout(ocelot); + + /* Copy all supported counters */ + for (i = 0; i < OCELOT_NUM_STATS; i++) { + int index = port * OCELOT_NUM_STATS + i; + + if (layout[i].name[0] == '\0') + continue; + + *data++ = ocelot->stats[index]; + } +} + +void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data) +{ + ocelot_port_stats_run(ocelot, port, data, ocelot_port_ethtool_stats_cb); +} +EXPORT_SYMBOL(ocelot_get_ethtool_stats); + +static void ocelot_port_pause_stats_cb(struct ocelot *ocelot, int port, void *priv) +{ + u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; + struct ethtool_pause_stats *pause_stats = priv; + + pause_stats->tx_pause_frames = s[OCELOT_STAT_TX_PAUSE]; + pause_stats->rx_pause_frames = s[OCELOT_STAT_RX_PAUSE]; +} + +static void ocelot_port_pmac_pause_stats_cb(struct ocelot *ocelot, int port, + void *priv) +{ + u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; + struct ethtool_pause_stats *pause_stats = priv; + + pause_stats->tx_pause_frames = s[OCELOT_STAT_TX_PMAC_PAUSE]; + pause_stats->rx_pause_frames = s[OCELOT_STAT_RX_PMAC_PAUSE]; +} + +static void ocelot_port_mm_stats_cb(struct ocelot *ocelot, int port, + void *priv) +{ + u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; + struct ethtool_mm_stats *stats = priv; + + stats->MACMergeFrameAssErrorCount = s[OCELOT_STAT_RX_ASSEMBLY_ERRS]; + stats->MACMergeFrameSmdErrorCount = s[OCELOT_STAT_RX_SMD_ERRS]; + stats->MACMergeFrameAssOkCount = s[OCELOT_STAT_RX_ASSEMBLY_OK]; + stats->MACMergeFragCountRx = s[OCELOT_STAT_RX_MERGE_FRAGMENTS]; + stats->MACMergeFragCountTx = s[OCELOT_STAT_TX_MERGE_FRAGMENTS]; + stats->MACMergeHoldCount = s[OCELOT_STAT_TX_MM_HOLD]; +} + +void ocelot_port_get_pause_stats(struct ocelot *ocelot, int port, + struct ethtool_pause_stats *pause_stats) +{ + struct net_device *dev; + + switch (pause_stats->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + ocelot_port_stats_run(ocelot, port, pause_stats, + ocelot_port_pause_stats_cb); + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + if (ocelot->mm_supported) + ocelot_port_stats_run(ocelot, port, pause_stats, + ocelot_port_pmac_pause_stats_cb); + break; + case ETHTOOL_MAC_STATS_SRC_AGGREGATE: + dev = ocelot->ops->port_to_netdev(ocelot, port); + ethtool_aggregate_pause_stats(dev, pause_stats); + break; + } +} +EXPORT_SYMBOL_GPL(ocelot_port_get_pause_stats); + +void ocelot_port_get_mm_stats(struct ocelot *ocelot, int port, + struct ethtool_mm_stats *stats) +{ + if (!ocelot->mm_supported) + return; + + ocelot_port_stats_run(ocelot, port, stats, ocelot_port_mm_stats_cb); +} +EXPORT_SYMBOL_GPL(ocelot_port_get_mm_stats); + +static const struct ethtool_rmon_hist_range ocelot_rmon_ranges[] = { + { 64, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1526 }, + { 1527, 65535 }, + {}, +}; + +static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *priv) +{ + u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; + struct ethtool_rmon_stats *rmon_stats = priv; + + rmon_stats->undersize_pkts = s[OCELOT_STAT_RX_SHORTS]; + rmon_stats->oversize_pkts = s[OCELOT_STAT_RX_LONGS]; + rmon_stats->fragments = s[OCELOT_STAT_RX_FRAGMENTS]; + rmon_stats->jabbers = s[OCELOT_STAT_RX_JABBERS]; + + rmon_stats->hist[0] = s[OCELOT_STAT_RX_64]; + rmon_stats->hist[1] = s[OCELOT_STAT_RX_65_127]; + rmon_stats->hist[2] = s[OCELOT_STAT_RX_128_255]; + rmon_stats->hist[3] = s[OCELOT_STAT_RX_256_511]; + rmon_stats->hist[4] = s[OCELOT_STAT_RX_512_1023]; + rmon_stats->hist[5] = s[OCELOT_STAT_RX_1024_1526]; + rmon_stats->hist[6] = s[OCELOT_STAT_RX_1527_MAX]; + + rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64]; + rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127]; + rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255]; + rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_256_511]; + rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_512_1023]; + rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_1024_1526]; + rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1527_MAX]; +} + +static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port, + void *priv) +{ + u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; + struct ethtool_rmon_stats *rmon_stats = priv; + + rmon_stats->undersize_pkts = s[OCELOT_STAT_RX_PMAC_SHORTS]; + rmon_stats->oversize_pkts = s[OCELOT_STAT_RX_PMAC_LONGS]; + rmon_stats->fragments = s[OCELOT_STAT_RX_PMAC_FRAGMENTS]; + rmon_stats->jabbers = s[OCELOT_STAT_RX_PMAC_JABBERS]; + + rmon_stats->hist[0] = s[OCELOT_STAT_RX_PMAC_64]; + rmon_stats->hist[1] = s[OCELOT_STAT_RX_PMAC_65_127]; + rmon_stats->hist[2] = s[OCELOT_STAT_RX_PMAC_128_255]; + rmon_stats->hist[3] = s[OCELOT_STAT_RX_PMAC_256_511]; + rmon_stats->hist[4] = s[OCELOT_STAT_RX_PMAC_512_1023]; + rmon_stats->hist[5] = s[OCELOT_STAT_RX_PMAC_1024_1526]; + rmon_stats->hist[6] = s[OCELOT_STAT_RX_PMAC_1527_MAX]; + + rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_PMAC_64]; + rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_PMAC_65_127]; + rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_PMAC_128_255]; + rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_256_511]; + rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_512_1023]; + rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_1024_1526]; + rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1527_MAX]; +} + +void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct net_device *dev; + + *ranges = ocelot_rmon_ranges; + + switch (rmon_stats->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + ocelot_port_stats_run(ocelot, port, rmon_stats, + ocelot_port_rmon_stats_cb); + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + if (ocelot->mm_supported) + ocelot_port_stats_run(ocelot, port, rmon_stats, + ocelot_port_pmac_rmon_stats_cb); + break; + case ETHTOOL_MAC_STATS_SRC_AGGREGATE: + dev = ocelot->ops->port_to_netdev(ocelot, port); + ethtool_aggregate_rmon_stats(dev, rmon_stats); + break; + } +} +EXPORT_SYMBOL_GPL(ocelot_port_get_rmon_stats); + +static void ocelot_port_ctrl_stats_cb(struct ocelot *ocelot, int port, void *priv) +{ + u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; + struct ethtool_eth_ctrl_stats *ctrl_stats = priv; + + ctrl_stats->MACControlFramesReceived = s[OCELOT_STAT_RX_CONTROL]; +} + +static void ocelot_port_pmac_ctrl_stats_cb(struct ocelot *ocelot, int port, + void *priv) +{ + u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; + struct ethtool_eth_ctrl_stats *ctrl_stats = priv; + + ctrl_stats->MACControlFramesReceived = s[OCELOT_STAT_RX_PMAC_CONTROL]; +} + +void ocelot_port_get_eth_ctrl_stats(struct ocelot *ocelot, int port, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct net_device *dev; + + switch (ctrl_stats->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + ocelot_port_stats_run(ocelot, port, ctrl_stats, + ocelot_port_ctrl_stats_cb); + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + if (ocelot->mm_supported) + ocelot_port_stats_run(ocelot, port, ctrl_stats, + ocelot_port_pmac_ctrl_stats_cb); + break; + case ETHTOOL_MAC_STATS_SRC_AGGREGATE: + dev = ocelot->ops->port_to_netdev(ocelot, port); + ethtool_aggregate_ctrl_stats(dev, ctrl_stats); + break; + } +} +EXPORT_SYMBOL_GPL(ocelot_port_get_eth_ctrl_stats); + +static void ocelot_port_mac_stats_cb(struct ocelot *ocelot, int port, void *priv) +{ + u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; + struct ethtool_eth_mac_stats *mac_stats = priv; + + mac_stats->OctetsTransmittedOK = s[OCELOT_STAT_TX_OCTETS]; + mac_stats->FramesTransmittedOK = s[OCELOT_STAT_TX_64] + + s[OCELOT_STAT_TX_65_127] + + s[OCELOT_STAT_TX_128_255] + + s[OCELOT_STAT_TX_256_511] + + s[OCELOT_STAT_TX_512_1023] + + s[OCELOT_STAT_TX_1024_1526] + + s[OCELOT_STAT_TX_1527_MAX]; + mac_stats->OctetsReceivedOK = s[OCELOT_STAT_RX_OCTETS]; + mac_stats->FramesReceivedOK = s[OCELOT_STAT_RX_GREEN_PRIO_0] + + s[OCELOT_STAT_RX_GREEN_PRIO_1] + + s[OCELOT_STAT_RX_GREEN_PRIO_2] + + s[OCELOT_STAT_RX_GREEN_PRIO_3] + + s[OCELOT_STAT_RX_GREEN_PRIO_4] + + s[OCELOT_STAT_RX_GREEN_PRIO_5] + + s[OCELOT_STAT_RX_GREEN_PRIO_6] + + s[OCELOT_STAT_RX_GREEN_PRIO_7] + + s[OCELOT_STAT_RX_YELLOW_PRIO_0] + + s[OCELOT_STAT_RX_YELLOW_PRIO_1] + + s[OCELOT_STAT_RX_YELLOW_PRIO_2] + + s[OCELOT_STAT_RX_YELLOW_PRIO_3] + + s[OCELOT_STAT_RX_YELLOW_PRIO_4] + + s[OCELOT_STAT_RX_YELLOW_PRIO_5] + + s[OCELOT_STAT_RX_YELLOW_PRIO_6] + + s[OCELOT_STAT_RX_YELLOW_PRIO_7]; + mac_stats->MulticastFramesXmittedOK = s[OCELOT_STAT_TX_MULTICAST]; + mac_stats->BroadcastFramesXmittedOK = s[OCELOT_STAT_TX_BROADCAST]; + mac_stats->MulticastFramesReceivedOK = s[OCELOT_STAT_RX_MULTICAST]; + mac_stats->BroadcastFramesReceivedOK = s[OCELOT_STAT_RX_BROADCAST]; + mac_stats->FrameTooLongErrors = s[OCELOT_STAT_RX_LONGS]; + /* Sadly, C_RX_CRC is the sum of FCS and alignment errors, they are not + * counted individually. + */ + mac_stats->FrameCheckSequenceErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS]; + mac_stats->AlignmentErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS]; +} + +static void ocelot_port_pmac_mac_stats_cb(struct ocelot *ocelot, int port, + void *priv) +{ + u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; + struct ethtool_eth_mac_stats *mac_stats = priv; + + mac_stats->OctetsTransmittedOK = s[OCELOT_STAT_TX_PMAC_OCTETS]; + mac_stats->FramesTransmittedOK = s[OCELOT_STAT_TX_PMAC_64] + + s[OCELOT_STAT_TX_PMAC_65_127] + + s[OCELOT_STAT_TX_PMAC_128_255] + + s[OCELOT_STAT_TX_PMAC_256_511] + + s[OCELOT_STAT_TX_PMAC_512_1023] + + s[OCELOT_STAT_TX_PMAC_1024_1526] + + s[OCELOT_STAT_TX_PMAC_1527_MAX]; + mac_stats->OctetsReceivedOK = s[OCELOT_STAT_RX_PMAC_OCTETS]; + mac_stats->FramesReceivedOK = s[OCELOT_STAT_RX_PMAC_64] + + s[OCELOT_STAT_RX_PMAC_65_127] + + s[OCELOT_STAT_RX_PMAC_128_255] + + s[OCELOT_STAT_RX_PMAC_256_511] + + s[OCELOT_STAT_RX_PMAC_512_1023] + + s[OCELOT_STAT_RX_PMAC_1024_1526] + + s[OCELOT_STAT_RX_PMAC_1527_MAX]; + mac_stats->MulticastFramesXmittedOK = s[OCELOT_STAT_TX_PMAC_MULTICAST]; + mac_stats->BroadcastFramesXmittedOK = s[OCELOT_STAT_TX_PMAC_BROADCAST]; + mac_stats->MulticastFramesReceivedOK = s[OCELOT_STAT_RX_PMAC_MULTICAST]; + mac_stats->BroadcastFramesReceivedOK = s[OCELOT_STAT_RX_PMAC_BROADCAST]; + mac_stats->FrameTooLongErrors = s[OCELOT_STAT_RX_PMAC_LONGS]; + /* Sadly, C_RX_CRC is the sum of FCS and alignment errors, they are not + * counted individually. + */ + mac_stats->FrameCheckSequenceErrors = s[OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS]; + mac_stats->AlignmentErrors = s[OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS]; +} + +void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct net_device *dev; + + switch (mac_stats->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + ocelot_port_stats_run(ocelot, port, mac_stats, + ocelot_port_mac_stats_cb); + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + if (ocelot->mm_supported) + ocelot_port_stats_run(ocelot, port, mac_stats, + ocelot_port_pmac_mac_stats_cb); + break; + case ETHTOOL_MAC_STATS_SRC_AGGREGATE: + dev = ocelot->ops->port_to_netdev(ocelot, port); + ethtool_aggregate_mac_stats(dev, mac_stats); + break; + } +} +EXPORT_SYMBOL_GPL(ocelot_port_get_eth_mac_stats); + +static void ocelot_port_phy_stats_cb(struct ocelot *ocelot, int port, void *priv) +{ + u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; + struct ethtool_eth_phy_stats *phy_stats = priv; + + phy_stats->SymbolErrorDuringCarrier = s[OCELOT_STAT_RX_SYM_ERRS]; +} + +static void ocelot_port_pmac_phy_stats_cb(struct ocelot *ocelot, int port, + void *priv) +{ + u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; + struct ethtool_eth_phy_stats *phy_stats = priv; + + phy_stats->SymbolErrorDuringCarrier = s[OCELOT_STAT_RX_PMAC_SYM_ERRS]; +} + +void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct net_device *dev; + + switch (phy_stats->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + ocelot_port_stats_run(ocelot, port, phy_stats, + ocelot_port_phy_stats_cb); + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + if (ocelot->mm_supported) + ocelot_port_stats_run(ocelot, port, phy_stats, + ocelot_port_pmac_phy_stats_cb); + break; + case ETHTOOL_MAC_STATS_SRC_AGGREGATE: + dev = ocelot->ops->port_to_netdev(ocelot, port); + ethtool_aggregate_phy_stats(dev, phy_stats); + break; + } +} +EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats); + +void ocelot_port_get_stats64(struct ocelot *ocelot, int port, + struct rtnl_link_stats64 *stats) +{ + u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS]; + + spin_lock(&ocelot->stats_lock); + + /* Get Rx stats */ + stats->rx_bytes = s[OCELOT_STAT_RX_OCTETS]; + stats->rx_packets = s[OCELOT_STAT_RX_SHORTS] + + s[OCELOT_STAT_RX_FRAGMENTS] + + s[OCELOT_STAT_RX_JABBERS] + + s[OCELOT_STAT_RX_LONGS] + + s[OCELOT_STAT_RX_64] + + s[OCELOT_STAT_RX_65_127] + + s[OCELOT_STAT_RX_128_255] + + s[OCELOT_STAT_RX_256_511] + + s[OCELOT_STAT_RX_512_1023] + + s[OCELOT_STAT_RX_1024_1526] + + s[OCELOT_STAT_RX_1527_MAX]; + stats->multicast = s[OCELOT_STAT_RX_MULTICAST]; + stats->rx_missed_errors = s[OCELOT_STAT_DROP_TAIL]; + stats->rx_dropped = s[OCELOT_STAT_RX_RED_PRIO_0] + + s[OCELOT_STAT_RX_RED_PRIO_1] + + s[OCELOT_STAT_RX_RED_PRIO_2] + + s[OCELOT_STAT_RX_RED_PRIO_3] + + s[OCELOT_STAT_RX_RED_PRIO_4] + + s[OCELOT_STAT_RX_RED_PRIO_5] + + s[OCELOT_STAT_RX_RED_PRIO_6] + + s[OCELOT_STAT_RX_RED_PRIO_7] + + s[OCELOT_STAT_DROP_LOCAL] + + s[OCELOT_STAT_DROP_YELLOW_PRIO_0] + + s[OCELOT_STAT_DROP_YELLOW_PRIO_1] + + s[OCELOT_STAT_DROP_YELLOW_PRIO_2] + + s[OCELOT_STAT_DROP_YELLOW_PRIO_3] + + s[OCELOT_STAT_DROP_YELLOW_PRIO_4] + + s[OCELOT_STAT_DROP_YELLOW_PRIO_5] + + s[OCELOT_STAT_DROP_YELLOW_PRIO_6] + + s[OCELOT_STAT_DROP_YELLOW_PRIO_7] + + s[OCELOT_STAT_DROP_GREEN_PRIO_0] + + s[OCELOT_STAT_DROP_GREEN_PRIO_1] + + s[OCELOT_STAT_DROP_GREEN_PRIO_2] + + s[OCELOT_STAT_DROP_GREEN_PRIO_3] + + s[OCELOT_STAT_DROP_GREEN_PRIO_4] + + s[OCELOT_STAT_DROP_GREEN_PRIO_5] + + s[OCELOT_STAT_DROP_GREEN_PRIO_6] + + s[OCELOT_STAT_DROP_GREEN_PRIO_7]; + + /* Get Tx stats */ + stats->tx_bytes = s[OCELOT_STAT_TX_OCTETS]; + stats->tx_packets = s[OCELOT_STAT_TX_64] + + s[OCELOT_STAT_TX_65_127] + + s[OCELOT_STAT_TX_128_255] + + s[OCELOT_STAT_TX_256_511] + + s[OCELOT_STAT_TX_512_1023] + + s[OCELOT_STAT_TX_1024_1526] + + s[OCELOT_STAT_TX_1527_MAX]; + stats->tx_dropped = s[OCELOT_STAT_TX_DROPS] + + s[OCELOT_STAT_TX_AGED]; + stats->collisions = s[OCELOT_STAT_TX_COLLISION]; + + spin_unlock(&ocelot->stats_lock); +} +EXPORT_SYMBOL(ocelot_port_get_stats64); + +static int ocelot_prepare_stats_regions(struct ocelot *ocelot) +{ + struct ocelot_stats_region *region = NULL; + const struct ocelot_stat_layout *layout; + enum ocelot_reg last = 0; + enum ocelot_stat i; + + INIT_LIST_HEAD(&ocelot->stats_regions); + + layout = ocelot_get_stats_layout(ocelot); + + for (i = 0; i < OCELOT_NUM_STATS; i++) { + if (!layout[i].reg) + continue; + + /* enum ocelot_stat must be kept sorted in the same order + * as the addresses behind layout[i].reg in order to have + * efficient bulking + */ + if (last) { + WARN(ocelot->map[SYS][last & REG_MASK] >= ocelot->map[SYS][layout[i].reg & REG_MASK], + "reg 0x%x had address 0x%x but reg 0x%x has address 0x%x, bulking broken!", + last, ocelot->map[SYS][last & REG_MASK], + layout[i].reg, ocelot->map[SYS][layout[i].reg & REG_MASK]); + } + + if (region && ocelot->map[SYS][layout[i].reg & REG_MASK] == + ocelot->map[SYS][last & REG_MASK] + 4) { + region->count++; + } else { + region = devm_kzalloc(ocelot->dev, sizeof(*region), + GFP_KERNEL); + if (!region) + return -ENOMEM; + + region->base = layout[i].reg; + region->first_stat = i; + region->count = 1; + list_add_tail(®ion->node, &ocelot->stats_regions); + } + + last = layout[i].reg; + } + + list_for_each_entry(region, &ocelot->stats_regions, node) { + enum ocelot_target target; + u32 addr; + + ocelot_reg_to_target_addr(ocelot, region->base, &target, + &addr); + + dev_dbg(ocelot->dev, + "region of %d contiguous counters starting with SYS:STAT:CNT[0x%03x]\n", + region->count, addr / 4); + region->buf = devm_kcalloc(ocelot->dev, region->count, + sizeof(*region->buf), GFP_KERNEL); + if (!region->buf) + return -ENOMEM; + } + + return 0; +} + +int ocelot_stats_init(struct ocelot *ocelot) +{ + char queue_name[32]; + int ret; + + ocelot->stats = devm_kcalloc(ocelot->dev, + ocelot->num_phys_ports * OCELOT_NUM_STATS, + sizeof(u64), GFP_KERNEL); + if (!ocelot->stats) + return -ENOMEM; + + snprintf(queue_name, sizeof(queue_name), "%s-stats", + dev_name(ocelot->dev)); + ocelot->stats_queue = create_singlethread_workqueue(queue_name); + if (!ocelot->stats_queue) + return -ENOMEM; + + spin_lock_init(&ocelot->stats_lock); + mutex_init(&ocelot->stat_view_lock); + + ret = ocelot_prepare_stats_regions(ocelot); + if (ret) { + destroy_workqueue(ocelot->stats_queue); + return ret; + } + + INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work); + queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, + OCELOT_STATS_CHECK_DELAY); + + return 0; +} + +void ocelot_stats_deinit(struct ocelot *ocelot) +{ + cancel_delayed_work(&ocelot->stats_work); + destroy_workqueue(ocelot->stats_queue); +} diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c new file mode 100644 index 0000000000..73cdec5ca6 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -0,0 +1,1432 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Microsemi Ocelot Switch driver + * Copyright (c) 2019 Microsemi Corporation + */ + +#include <linux/iopoll.h> +#include <linux/proc_fs.h> + +#include <soc/mscc/ocelot_vcap.h> +#include "ocelot_police.h" +#include "ocelot_vcap.h" + +#define ENTRY_WIDTH 32 + +enum vcap_sel { + VCAP_SEL_ENTRY = 0x1, + VCAP_SEL_ACTION = 0x2, + VCAP_SEL_COUNTER = 0x4, + VCAP_SEL_ALL = 0x7, +}; + +enum vcap_cmd { + VCAP_CMD_WRITE = 0, /* Copy from Cache to TCAM */ + VCAP_CMD_READ = 1, /* Copy from TCAM to Cache */ + VCAP_CMD_MOVE_UP = 2, /* Move <count> up */ + VCAP_CMD_MOVE_DOWN = 3, /* Move <count> down */ + VCAP_CMD_INITIALIZE = 4, /* Write all (from cache) */ +}; + +#define VCAP_ENTRY_WIDTH 12 /* Max entry width (32bit words) */ +#define VCAP_COUNTER_WIDTH 4 /* Max counter width (32bit words) */ + +struct vcap_data { + u32 entry[VCAP_ENTRY_WIDTH]; /* ENTRY_DAT */ + u32 mask[VCAP_ENTRY_WIDTH]; /* MASK_DAT */ + u32 action[VCAP_ENTRY_WIDTH]; /* ACTION_DAT */ + u32 counter[VCAP_COUNTER_WIDTH]; /* CNT_DAT */ + u32 tg; /* TG_DAT */ + u32 type; /* Action type */ + u32 tg_sw; /* Current type-group */ + u32 cnt; /* Current counter */ + u32 key_offset; /* Current entry offset */ + u32 action_offset; /* Current action offset */ + u32 counter_offset; /* Current counter offset */ + u32 tg_value; /* Current type-group value */ + u32 tg_mask; /* Current type-group mask */ +}; + +static u32 vcap_read_update_ctrl(struct ocelot *ocelot, + const struct vcap_props *vcap) +{ + return ocelot_target_read(ocelot, vcap->target, VCAP_CORE_UPDATE_CTRL); +} + +static void vcap_cmd(struct ocelot *ocelot, const struct vcap_props *vcap, + u16 ix, int cmd, int sel) +{ + u32 value = (VCAP_CORE_UPDATE_CTRL_UPDATE_CMD(cmd) | + VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR(ix) | + VCAP_CORE_UPDATE_CTRL_UPDATE_SHOT); + + if ((sel & VCAP_SEL_ENTRY) && ix >= vcap->entry_count) + return; + + if (!(sel & VCAP_SEL_ENTRY)) + value |= VCAP_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS; + + if (!(sel & VCAP_SEL_ACTION)) + value |= VCAP_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS; + + if (!(sel & VCAP_SEL_COUNTER)) + value |= VCAP_CORE_UPDATE_CTRL_UPDATE_CNT_DIS; + + ocelot_target_write(ocelot, vcap->target, value, VCAP_CORE_UPDATE_CTRL); + + read_poll_timeout(vcap_read_update_ctrl, value, + (value & VCAP_CORE_UPDATE_CTRL_UPDATE_SHOT) == 0, + 10, 100000, false, ocelot, vcap); +} + +/* Convert from 0-based row to VCAP entry row and run command */ +static void vcap_row_cmd(struct ocelot *ocelot, const struct vcap_props *vcap, + u32 row, int cmd, int sel) +{ + vcap_cmd(ocelot, vcap, vcap->entry_count - row - 1, cmd, sel); +} + +static void vcap_entry2cache(struct ocelot *ocelot, + const struct vcap_props *vcap, + struct vcap_data *data) +{ + u32 entry_words, i; + + entry_words = DIV_ROUND_UP(vcap->entry_width, ENTRY_WIDTH); + + for (i = 0; i < entry_words; i++) { + ocelot_target_write_rix(ocelot, vcap->target, data->entry[i], + VCAP_CACHE_ENTRY_DAT, i); + ocelot_target_write_rix(ocelot, vcap->target, ~data->mask[i], + VCAP_CACHE_MASK_DAT, i); + } + ocelot_target_write(ocelot, vcap->target, data->tg, VCAP_CACHE_TG_DAT); +} + +static void vcap_cache2entry(struct ocelot *ocelot, + const struct vcap_props *vcap, + struct vcap_data *data) +{ + u32 entry_words, i; + + entry_words = DIV_ROUND_UP(vcap->entry_width, ENTRY_WIDTH); + + for (i = 0; i < entry_words; i++) { + data->entry[i] = ocelot_target_read_rix(ocelot, vcap->target, + VCAP_CACHE_ENTRY_DAT, i); + // Invert mask + data->mask[i] = ~ocelot_target_read_rix(ocelot, vcap->target, + VCAP_CACHE_MASK_DAT, i); + } + data->tg = ocelot_target_read(ocelot, vcap->target, VCAP_CACHE_TG_DAT); +} + +static void vcap_action2cache(struct ocelot *ocelot, + const struct vcap_props *vcap, + struct vcap_data *data) +{ + u32 action_words, mask; + int i, width; + + /* Encode action type */ + width = vcap->action_type_width; + if (width) { + mask = GENMASK(width, 0); + data->action[0] = ((data->action[0] & ~mask) | data->type); + } + + action_words = DIV_ROUND_UP(vcap->action_width, ENTRY_WIDTH); + + for (i = 0; i < action_words; i++) + ocelot_target_write_rix(ocelot, vcap->target, data->action[i], + VCAP_CACHE_ACTION_DAT, i); + + for (i = 0; i < vcap->counter_words; i++) + ocelot_target_write_rix(ocelot, vcap->target, data->counter[i], + VCAP_CACHE_CNT_DAT, i); +} + +static void vcap_cache2action(struct ocelot *ocelot, + const struct vcap_props *vcap, + struct vcap_data *data) +{ + u32 action_words; + int i, width; + + action_words = DIV_ROUND_UP(vcap->action_width, ENTRY_WIDTH); + + for (i = 0; i < action_words; i++) + data->action[i] = ocelot_target_read_rix(ocelot, vcap->target, + VCAP_CACHE_ACTION_DAT, + i); + + for (i = 0; i < vcap->counter_words; i++) + data->counter[i] = ocelot_target_read_rix(ocelot, vcap->target, + VCAP_CACHE_CNT_DAT, + i); + + /* Extract action type */ + width = vcap->action_type_width; + data->type = (width ? (data->action[0] & GENMASK(width, 0)) : 0); +} + +/* Calculate offsets for entry */ +static void vcap_data_offset_get(const struct vcap_props *vcap, + struct vcap_data *data, int ix) +{ + int num_subwords_per_entry, num_subwords_per_action; + int i, col, offset, num_entries_per_row, base; + u32 width = vcap->tg_width; + + switch (data->tg_sw) { + case VCAP_TG_FULL: + num_entries_per_row = 1; + break; + case VCAP_TG_HALF: + num_entries_per_row = 2; + break; + case VCAP_TG_QUARTER: + num_entries_per_row = 4; + break; + default: + return; + } + + col = (ix % num_entries_per_row); + num_subwords_per_entry = (vcap->sw_count / num_entries_per_row); + base = (vcap->sw_count - col * num_subwords_per_entry - + num_subwords_per_entry); + data->tg_value = 0; + data->tg_mask = 0; + for (i = 0; i < num_subwords_per_entry; i++) { + offset = ((base + i) * width); + data->tg_value |= (data->tg_sw << offset); + data->tg_mask |= GENMASK(offset + width - 1, offset); + } + + /* Calculate key/action/counter offsets */ + col = (num_entries_per_row - col - 1); + data->key_offset = (base * vcap->entry_width) / vcap->sw_count; + data->counter_offset = (num_subwords_per_entry * col * + vcap->counter_width); + i = data->type; + width = vcap->action_table[i].width; + num_subwords_per_action = vcap->action_table[i].count; + data->action_offset = ((num_subwords_per_action * col * width) / + num_entries_per_row); + data->action_offset += vcap->action_type_width; +} + +static void vcap_data_set(u32 *data, u32 offset, u32 len, u32 value) +{ + u32 i, v, m; + + for (i = 0; i < len; i++, offset++) { + v = data[offset / ENTRY_WIDTH]; + m = (1 << (offset % ENTRY_WIDTH)); + if (value & (1 << i)) + v |= m; + else + v &= ~m; + data[offset / ENTRY_WIDTH] = v; + } +} + +static u32 vcap_data_get(u32 *data, u32 offset, u32 len) +{ + u32 i, v, m, value = 0; + + for (i = 0; i < len; i++, offset++) { + v = data[offset / ENTRY_WIDTH]; + m = (1 << (offset % ENTRY_WIDTH)); + if (v & m) + value |= (1 << i); + } + return value; +} + +static void vcap_key_field_set(struct vcap_data *data, u32 offset, u32 width, + u32 value, u32 mask) +{ + vcap_data_set(data->entry, offset + data->key_offset, width, value); + vcap_data_set(data->mask, offset + data->key_offset, width, mask); +} + +static void vcap_key_set(const struct vcap_props *vcap, struct vcap_data *data, + int field, u32 value, u32 mask) +{ + u32 offset = vcap->keys[field].offset; + u32 length = vcap->keys[field].length; + + vcap_key_field_set(data, offset, length, value, mask); +} + +static void vcap_key_bytes_set(const struct vcap_props *vcap, + struct vcap_data *data, int field, + u8 *val, u8 *msk) +{ + u32 offset = vcap->keys[field].offset; + u32 count = vcap->keys[field].length; + u32 i, j, n = 0, value = 0, mask = 0; + + WARN_ON(count % 8); + + /* Data wider than 32 bits are split up in chunks of maximum 32 bits. + * The 32 LSB of the data are written to the 32 MSB of the TCAM. + */ + offset += count; + count /= 8; + + for (i = 0; i < count; i++) { + j = (count - i - 1); + value += (val[j] << n); + mask += (msk[j] << n); + n += 8; + if (n == ENTRY_WIDTH || (i + 1) == count) { + offset -= n; + vcap_key_field_set(data, offset, n, value, mask); + n = 0; + value = 0; + mask = 0; + } + } +} + +static void vcap_key_l4_port_set(const struct vcap_props *vcap, + struct vcap_data *data, int field, + struct ocelot_vcap_udp_tcp *port) +{ + u32 offset = vcap->keys[field].offset; + u32 length = vcap->keys[field].length; + + WARN_ON(length != 16); + + vcap_key_field_set(data, offset, length, port->value, port->mask); +} + +static void vcap_key_bit_set(const struct vcap_props *vcap, + struct vcap_data *data, int field, + enum ocelot_vcap_bit val) +{ + u32 value = (val == OCELOT_VCAP_BIT_1 ? 1 : 0); + u32 msk = (val == OCELOT_VCAP_BIT_ANY ? 0 : 1); + u32 offset = vcap->keys[field].offset; + u32 length = vcap->keys[field].length; + + WARN_ON(length != 1); + + vcap_key_field_set(data, offset, length, value, msk); +} + +static void vcap_action_set(const struct vcap_props *vcap, + struct vcap_data *data, int field, u32 value) +{ + int offset = vcap->actions[field].offset; + int length = vcap->actions[field].length; + + vcap_data_set(data->action, offset + data->action_offset, length, + value); +} + +static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data, + struct ocelot_vcap_filter *filter) +{ + const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS2]; + struct ocelot_vcap_action *a = &filter->action; + + vcap_action_set(vcap, data, VCAP_IS2_ACT_MASK_MODE, a->mask_mode); + vcap_action_set(vcap, data, VCAP_IS2_ACT_PORT_MASK, a->port_mask); + vcap_action_set(vcap, data, VCAP_IS2_ACT_MIRROR_ENA, a->mirror_ena); + vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_ENA, a->police_ena); + vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_IDX, a->pol_ix); + vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_QU_NUM, a->cpu_qu_num); + vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_COPY_ENA, a->cpu_copy_ena); +} + +static void is2_entry_set(struct ocelot *ocelot, int ix, + struct ocelot_vcap_filter *filter) +{ + const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS2]; + struct ocelot_vcap_key_vlan *tag = &filter->vlan; + u32 val, msk, type, type_mask = 0xf, i, count; + struct ocelot_vcap_u64 payload; + struct vcap_data data; + int row = (ix / 2); + + memset(&payload, 0, sizeof(payload)); + memset(&data, 0, sizeof(data)); + + /* Read row */ + vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL); + vcap_cache2entry(ocelot, vcap, &data); + vcap_cache2action(ocelot, vcap, &data); + + data.tg_sw = VCAP_TG_HALF; + vcap_data_offset_get(vcap, &data, ix); + data.tg = (data.tg & ~data.tg_mask); + if (filter->prio != 0) + data.tg |= data.tg_value; + + data.type = IS2_ACTION_TYPE_NORMAL; + + vcap_key_set(vcap, &data, VCAP_IS2_HK_PAG, filter->pag, 0xff); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST, + (filter->lookup == 0) ? OCELOT_VCAP_BIT_1 : + OCELOT_VCAP_BIT_0); + vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0, + ~filter->ingress_port_mask); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH, + OCELOT_VCAP_BIT_ANY); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_BC, filter->dmac_bc); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged); + vcap_key_set(vcap, &data, VCAP_IS2_HK_VID, + tag->vid.value, tag->vid.mask); + vcap_key_set(vcap, &data, VCAP_IS2_HK_PCP, + tag->pcp.value[0], tag->pcp.mask[0]); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_DEI, tag->dei); + + switch (filter->key_type) { + case OCELOT_VCAP_KEY_ETYPE: { + struct ocelot_vcap_key_etype *etype = &filter->key.etype; + + type = IS2_TYPE_ETYPE; + vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC, + etype->dmac.value, etype->dmac.mask); + vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC, + etype->smac.value, etype->smac.mask); + vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_ETYPE, + etype->etype.value, etype->etype.mask); + /* Clear unused bits */ + vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0, + 0, 0); + vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1, + 0, 0); + vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2, + 0, 0); + vcap_key_bytes_set(vcap, &data, + VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0, + etype->data.value, etype->data.mask); + break; + } + case OCELOT_VCAP_KEY_LLC: { + struct ocelot_vcap_key_llc *llc = &filter->key.llc; + + type = IS2_TYPE_LLC; + vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC, + llc->dmac.value, llc->dmac.mask); + vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC, + llc->smac.value, llc->smac.mask); + for (i = 0; i < 4; i++) { + payload.value[i] = llc->llc.value[i]; + payload.mask[i] = llc->llc.mask[i]; + } + vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_LLC_L2_LLC, + payload.value, payload.mask); + break; + } + case OCELOT_VCAP_KEY_SNAP: { + struct ocelot_vcap_key_snap *snap = &filter->key.snap; + + type = IS2_TYPE_SNAP; + vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC, + snap->dmac.value, snap->dmac.mask); + vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC, + snap->smac.value, snap->smac.mask); + vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP, + filter->key.snap.snap.value, + filter->key.snap.snap.mask); + break; + } + case OCELOT_VCAP_KEY_ARP: { + struct ocelot_vcap_key_arp *arp = &filter->key.arp; + + type = IS2_TYPE_ARP; + vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_SMAC, + arp->smac.value, arp->smac.mask); + vcap_key_bit_set(vcap, &data, + VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK, + arp->ethernet); + vcap_key_bit_set(vcap, &data, + VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK, + arp->ip); + vcap_key_bit_set(vcap, &data, + VCAP_IS2_HK_MAC_ARP_LEN_OK, + arp->length); + vcap_key_bit_set(vcap, &data, + VCAP_IS2_HK_MAC_ARP_TARGET_MATCH, + arp->dmac_match); + vcap_key_bit_set(vcap, &data, + VCAP_IS2_HK_MAC_ARP_SENDER_MATCH, + arp->smac_match); + vcap_key_bit_set(vcap, &data, + VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN, + arp->unknown); + + /* OPCODE is inverse, bit 0 is reply flag, bit 1 is RARP flag */ + val = ((arp->req == OCELOT_VCAP_BIT_0 ? 1 : 0) | + (arp->arp == OCELOT_VCAP_BIT_0 ? 2 : 0)); + msk = ((arp->req == OCELOT_VCAP_BIT_ANY ? 0 : 1) | + (arp->arp == OCELOT_VCAP_BIT_ANY ? 0 : 2)); + vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_OPCODE, + val, msk); + vcap_key_bytes_set(vcap, &data, + VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP, + arp->dip.value.addr, arp->dip.mask.addr); + vcap_key_bytes_set(vcap, &data, + VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP, + arp->sip.value.addr, arp->sip.mask.addr); + vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP, + 0, 0); + break; + } + case OCELOT_VCAP_KEY_IPV4: + case OCELOT_VCAP_KEY_IPV6: { + enum ocelot_vcap_bit sip_eq_dip, sport_eq_dport, seq_zero, tcp; + enum ocelot_vcap_bit ttl, fragment, options, tcp_ack, tcp_urg; + enum ocelot_vcap_bit tcp_fin, tcp_syn, tcp_rst, tcp_psh; + struct ocelot_vcap_key_ipv4 *ipv4 = NULL; + struct ocelot_vcap_key_ipv6 *ipv6 = NULL; + struct ocelot_vcap_udp_tcp *sport, *dport; + struct ocelot_vcap_ipv4 sip, dip; + struct ocelot_vcap_u8 proto, ds; + struct ocelot_vcap_u48 *ip_data; + + if (filter->key_type == OCELOT_VCAP_KEY_IPV4) { + ipv4 = &filter->key.ipv4; + ttl = ipv4->ttl; + fragment = ipv4->fragment; + options = ipv4->options; + proto = ipv4->proto; + ds = ipv4->ds; + ip_data = &ipv4->data; + sip = ipv4->sip; + dip = ipv4->dip; + sport = &ipv4->sport; + dport = &ipv4->dport; + tcp_fin = ipv4->tcp_fin; + tcp_syn = ipv4->tcp_syn; + tcp_rst = ipv4->tcp_rst; + tcp_psh = ipv4->tcp_psh; + tcp_ack = ipv4->tcp_ack; + tcp_urg = ipv4->tcp_urg; + sip_eq_dip = ipv4->sip_eq_dip; + sport_eq_dport = ipv4->sport_eq_dport; + seq_zero = ipv4->seq_zero; + } else { + ipv6 = &filter->key.ipv6; + ttl = ipv6->ttl; + fragment = OCELOT_VCAP_BIT_ANY; + options = OCELOT_VCAP_BIT_ANY; + proto = ipv6->proto; + ds = ipv6->ds; + ip_data = &ipv6->data; + for (i = 0; i < 8; i++) { + val = ipv6->sip.value[i + 8]; + msk = ipv6->sip.mask[i + 8]; + if (i < 4) { + dip.value.addr[i] = val; + dip.mask.addr[i] = msk; + } else { + sip.value.addr[i - 4] = val; + sip.mask.addr[i - 4] = msk; + } + } + sport = &ipv6->sport; + dport = &ipv6->dport; + tcp_fin = ipv6->tcp_fin; + tcp_syn = ipv6->tcp_syn; + tcp_rst = ipv6->tcp_rst; + tcp_psh = ipv6->tcp_psh; + tcp_ack = ipv6->tcp_ack; + tcp_urg = ipv6->tcp_urg; + sip_eq_dip = ipv6->sip_eq_dip; + sport_eq_dport = ipv6->sport_eq_dport; + seq_zero = ipv6->seq_zero; + } + + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_IP4, + ipv4 ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L3_FRAGMENT, + fragment); + vcap_key_set(vcap, &data, VCAP_IS2_HK_L3_FRAG_OFS_GT0, 0, 0); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L3_OPTIONS, + options); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_IP4_L3_TTL_GT0, + ttl); + vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_TOS, + ds.value, ds.mask); + vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_IP4_DIP, + dip.value.addr, dip.mask.addr); + vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_IP4_SIP, + sip.value.addr, sip.mask.addr); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_DIP_EQ_SIP, + sip_eq_dip); + val = proto.value[0]; + msk = proto.mask[0]; + type = IS2_TYPE_IP_UDP_TCP; + if (msk == 0xff && (val == IPPROTO_TCP || val == IPPROTO_UDP)) { + /* UDP/TCP protocol match */ + tcp = (val == IPPROTO_TCP ? + OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_TCP, tcp); + vcap_key_l4_port_set(vcap, &data, + VCAP_IS2_HK_L4_DPORT, dport); + vcap_key_l4_port_set(vcap, &data, + VCAP_IS2_HK_L4_SPORT, sport); + vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_RNG, 0, 0); + vcap_key_bit_set(vcap, &data, + VCAP_IS2_HK_L4_SPORT_EQ_DPORT, + sport_eq_dport); + vcap_key_bit_set(vcap, &data, + VCAP_IS2_HK_L4_SEQUENCE_EQ0, + seq_zero); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_FIN, + tcp_fin); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_SYN, + tcp_syn); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_RST, + tcp_rst); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_PSH, + tcp_psh); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_ACK, + tcp_ack); + vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_URG, + tcp_urg); + vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_1588_DOM, + 0, 0); + vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_1588_VER, + 0, 0); + } else { + if (msk == 0) { + /* Any IP protocol match */ + type_mask = IS2_TYPE_MASK_IP_ANY; + } else { + /* Non-UDP/TCP protocol match */ + type = IS2_TYPE_IP_OTHER; + for (i = 0; i < 6; i++) { + payload.value[i] = ip_data->value[i]; + payload.mask[i] = ip_data->mask[i]; + } + } + vcap_key_bytes_set(vcap, &data, + VCAP_IS2_HK_IP4_L3_PROTO, + proto.value, proto.mask); + vcap_key_bytes_set(vcap, &data, + VCAP_IS2_HK_L3_PAYLOAD, + payload.value, payload.mask); + } + break; + } + case OCELOT_VCAP_KEY_ANY: + default: + type = 0; + type_mask = 0; + count = vcap->entry_width / 2; + /* Iterate over the non-common part of the key and + * clear entry data + */ + for (i = vcap->keys[VCAP_IS2_HK_L2_DMAC].offset; + i < count; i += ENTRY_WIDTH) { + vcap_key_field_set(&data, i, min(32u, count - i), 0, 0); + } + break; + } + + vcap_key_set(vcap, &data, VCAP_IS2_TYPE, type, type_mask); + is2_action_set(ocelot, &data, filter); + vcap_data_set(data.counter, data.counter_offset, + vcap->counter_width, filter->stats.pkts); + + /* Write row */ + vcap_entry2cache(ocelot, vcap, &data); + vcap_action2cache(ocelot, vcap, &data); + vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL); +} + +static void is1_action_set(struct ocelot *ocelot, struct vcap_data *data, + const struct ocelot_vcap_filter *filter) +{ + const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1]; + const struct ocelot_vcap_action *a = &filter->action; + + vcap_action_set(vcap, data, VCAP_IS1_ACT_VID_REPLACE_ENA, + a->vid_replace_ena); + vcap_action_set(vcap, data, VCAP_IS1_ACT_VID_ADD_VAL, a->vid); + vcap_action_set(vcap, data, VCAP_IS1_ACT_VLAN_POP_CNT_ENA, + a->vlan_pop_cnt_ena); + vcap_action_set(vcap, data, VCAP_IS1_ACT_VLAN_POP_CNT, + a->vlan_pop_cnt); + vcap_action_set(vcap, data, VCAP_IS1_ACT_PCP_DEI_ENA, a->pcp_dei_ena); + vcap_action_set(vcap, data, VCAP_IS1_ACT_PCP_VAL, a->pcp); + vcap_action_set(vcap, data, VCAP_IS1_ACT_DEI_VAL, a->dei); + vcap_action_set(vcap, data, VCAP_IS1_ACT_QOS_ENA, a->qos_ena); + vcap_action_set(vcap, data, VCAP_IS1_ACT_QOS_VAL, a->qos_val); + vcap_action_set(vcap, data, VCAP_IS1_ACT_PAG_OVERRIDE_MASK, + a->pag_override_mask); + vcap_action_set(vcap, data, VCAP_IS1_ACT_PAG_VAL, a->pag_val); +} + +static void is1_entry_set(struct ocelot *ocelot, int ix, + struct ocelot_vcap_filter *filter) +{ + const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1]; + struct ocelot_vcap_key_vlan *tag = &filter->vlan; + struct vcap_data data; + int row = ix / 2; + u32 type; + + memset(&data, 0, sizeof(data)); + + /* Read row */ + vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL); + vcap_cache2entry(ocelot, vcap, &data); + vcap_cache2action(ocelot, vcap, &data); + + data.tg_sw = VCAP_TG_HALF; + data.type = IS1_ACTION_TYPE_NORMAL; + vcap_data_offset_get(vcap, &data, ix); + data.tg = (data.tg & ~data.tg_mask); + if (filter->prio != 0) + data.tg |= data.tg_value; + + vcap_key_set(vcap, &data, VCAP_IS1_HK_LOOKUP, filter->lookup, 0x3); + vcap_key_set(vcap, &data, VCAP_IS1_HK_IGR_PORT_MASK, 0, + ~filter->ingress_port_mask); + vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc); + vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc); + vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged); + vcap_key_set(vcap, &data, VCAP_IS1_HK_VID, + tag->vid.value, tag->vid.mask); + vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP, + tag->pcp.value[0], tag->pcp.mask[0]); + type = IS1_TYPE_S1_NORMAL; + + switch (filter->key_type) { + case OCELOT_VCAP_KEY_ETYPE: { + struct ocelot_vcap_key_etype *etype = &filter->key.etype; + + vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_L2_SMAC, + etype->smac.value, etype->smac.mask); + vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_ETYPE, + etype->etype.value, etype->etype.mask); + break; + } + case OCELOT_VCAP_KEY_IPV4: { + struct ocelot_vcap_key_ipv4 *ipv4 = &filter->key.ipv4; + struct ocelot_vcap_udp_tcp *sport = &ipv4->sport; + struct ocelot_vcap_udp_tcp *dport = &ipv4->dport; + enum ocelot_vcap_bit tcp_udp = OCELOT_VCAP_BIT_0; + struct ocelot_vcap_u8 proto = ipv4->proto; + struct ocelot_vcap_ipv4 sip = ipv4->sip; + u32 val, msk; + + vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_IP_SNAP, + OCELOT_VCAP_BIT_1); + vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_IP4, + OCELOT_VCAP_BIT_1); + vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_ETYPE_LEN, + OCELOT_VCAP_BIT_1); + vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_L3_IP4_SIP, + sip.value.addr, sip.mask.addr); + + val = proto.value[0]; + msk = proto.mask[0]; + + if ((val == NEXTHDR_TCP || val == NEXTHDR_UDP) && msk == 0xff) + tcp_udp = OCELOT_VCAP_BIT_1; + vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TCP_UDP, tcp_udp); + + if (tcp_udp) { + enum ocelot_vcap_bit tcp = OCELOT_VCAP_BIT_0; + + if (val == NEXTHDR_TCP) + tcp = OCELOT_VCAP_BIT_1; + + vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TCP, tcp); + vcap_key_l4_port_set(vcap, &data, VCAP_IS1_HK_L4_SPORT, + sport); + /* Overloaded field */ + vcap_key_l4_port_set(vcap, &data, VCAP_IS1_HK_ETYPE, + dport); + } else { + /* IPv4 "other" frame */ + struct ocelot_vcap_u16 etype = {0}; + + /* Overloaded field */ + etype.value[0] = proto.value[0]; + etype.mask[0] = proto.mask[0]; + + vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_ETYPE, + etype.value, etype.mask); + } + break; + } + default: + break; + } + vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TYPE, + type ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0); + + is1_action_set(ocelot, &data, filter); + vcap_data_set(data.counter, data.counter_offset, + vcap->counter_width, filter->stats.pkts); + + /* Write row */ + vcap_entry2cache(ocelot, vcap, &data); + vcap_action2cache(ocelot, vcap, &data); + vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL); +} + +static void es0_action_set(struct ocelot *ocelot, struct vcap_data *data, + const struct ocelot_vcap_filter *filter) +{ + const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0]; + const struct ocelot_vcap_action *a = &filter->action; + + vcap_action_set(vcap, data, VCAP_ES0_ACT_PUSH_OUTER_TAG, + a->push_outer_tag); + vcap_action_set(vcap, data, VCAP_ES0_ACT_PUSH_INNER_TAG, + a->push_inner_tag); + vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_TPID_SEL, + a->tag_a_tpid_sel); + vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_VID_SEL, + a->tag_a_vid_sel); + vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_PCP_SEL, + a->tag_a_pcp_sel); + vcap_action_set(vcap, data, VCAP_ES0_ACT_VID_A_VAL, a->vid_a_val); + vcap_action_set(vcap, data, VCAP_ES0_ACT_PCP_A_VAL, a->pcp_a_val); + vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_TPID_SEL, + a->tag_b_tpid_sel); + vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_VID_SEL, + a->tag_b_vid_sel); + vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_PCP_SEL, + a->tag_b_pcp_sel); + vcap_action_set(vcap, data, VCAP_ES0_ACT_VID_B_VAL, a->vid_b_val); + vcap_action_set(vcap, data, VCAP_ES0_ACT_PCP_B_VAL, a->pcp_b_val); +} + +static void es0_entry_set(struct ocelot *ocelot, int ix, + struct ocelot_vcap_filter *filter) +{ + const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0]; + struct ocelot_vcap_key_vlan *tag = &filter->vlan; + struct vcap_data data; + int row = ix; + + memset(&data, 0, sizeof(data)); + + /* Read row */ + vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL); + vcap_cache2entry(ocelot, vcap, &data); + vcap_cache2action(ocelot, vcap, &data); + + data.tg_sw = VCAP_TG_FULL; + data.type = ES0_ACTION_TYPE_NORMAL; + vcap_data_offset_get(vcap, &data, ix); + data.tg = (data.tg & ~data.tg_mask); + if (filter->prio != 0) + data.tg |= data.tg_value; + + vcap_key_set(vcap, &data, VCAP_ES0_IGR_PORT, filter->ingress_port.value, + filter->ingress_port.mask); + vcap_key_set(vcap, &data, VCAP_ES0_EGR_PORT, filter->egress_port.value, + filter->egress_port.mask); + vcap_key_bit_set(vcap, &data, VCAP_ES0_L2_MC, filter->dmac_mc); + vcap_key_bit_set(vcap, &data, VCAP_ES0_L2_BC, filter->dmac_bc); + vcap_key_set(vcap, &data, VCAP_ES0_VID, + tag->vid.value, tag->vid.mask); + vcap_key_set(vcap, &data, VCAP_ES0_PCP, + tag->pcp.value[0], tag->pcp.mask[0]); + + es0_action_set(ocelot, &data, filter); + vcap_data_set(data.counter, data.counter_offset, + vcap->counter_width, filter->stats.pkts); + + /* Write row */ + vcap_entry2cache(ocelot, vcap, &data); + vcap_action2cache(ocelot, vcap, &data); + vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL); +} + +static void vcap_entry_get(struct ocelot *ocelot, int ix, + struct ocelot_vcap_filter *filter) +{ + const struct vcap_props *vcap = &ocelot->vcap[filter->block_id]; + struct vcap_data data; + int row, count; + u32 cnt; + + if (filter->block_id == VCAP_ES0) + data.tg_sw = VCAP_TG_FULL; + else + data.tg_sw = VCAP_TG_HALF; + + count = (1 << (data.tg_sw - 1)); + row = (ix / count); + vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_COUNTER); + vcap_cache2action(ocelot, vcap, &data); + vcap_data_offset_get(vcap, &data, ix); + cnt = vcap_data_get(data.counter, data.counter_offset, + vcap->counter_width); + + filter->stats.pkts = cnt; +} + +static void vcap_entry_set(struct ocelot *ocelot, int ix, + struct ocelot_vcap_filter *filter) +{ + if (filter->block_id == VCAP_IS1) + return is1_entry_set(ocelot, ix, filter); + if (filter->block_id == VCAP_IS2) + return is2_entry_set(ocelot, ix, filter); + if (filter->block_id == VCAP_ES0) + return es0_entry_set(ocelot, ix, filter); +} + +struct vcap_policer_entry { + struct list_head list; + refcount_t refcount; + u32 pol_ix; +}; + +int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, + struct ocelot_policer *pol) +{ + struct qos_policer_conf pp = { 0 }; + struct vcap_policer_entry *tmp; + int ret; + + if (!pol) + return -EINVAL; + + pp.mode = MSCC_QOS_RATE_MODE_DATA; + pp.pir = pol->rate; + pp.pbs = pol->burst; + + list_for_each_entry(tmp, &ocelot->vcap_pol.pol_list, list) + if (tmp->pol_ix == pol_ix) { + refcount_inc(&tmp->refcount); + return 0; + } + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + ret = qos_policer_conf_set(ocelot, pol_ix, &pp); + if (ret) { + kfree(tmp); + return ret; + } + + tmp->pol_ix = pol_ix; + refcount_set(&tmp->refcount, 1); + list_add_tail(&tmp->list, &ocelot->vcap_pol.pol_list); + + return 0; +} +EXPORT_SYMBOL(ocelot_vcap_policer_add); + +int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix) +{ + struct qos_policer_conf pp = {0}; + struct vcap_policer_entry *tmp, *n; + u8 z = 0; + + list_for_each_entry_safe(tmp, n, &ocelot->vcap_pol.pol_list, list) + if (tmp->pol_ix == pol_ix) { + z = refcount_dec_and_test(&tmp->refcount); + if (z) { + list_del(&tmp->list); + kfree(tmp); + } + } + + if (z) { + pp.mode = MSCC_QOS_RATE_MODE_DISABLED; + return qos_policer_conf_set(ocelot, pol_ix, &pp); + } + + return 0; +} +EXPORT_SYMBOL(ocelot_vcap_policer_del); + +static int +ocelot_vcap_filter_add_aux_resources(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter, + struct netlink_ext_ack *extack) +{ + struct ocelot_mirror *m; + int ret; + + if (filter->block_id == VCAP_IS2 && filter->action.mirror_ena) { + m = ocelot_mirror_get(ocelot, filter->egress_port.value, + extack); + if (IS_ERR(m)) + return PTR_ERR(m); + } + + if (filter->block_id == VCAP_IS2 && filter->action.police_ena) { + ret = ocelot_vcap_policer_add(ocelot, filter->action.pol_ix, + &filter->action.pol); + if (ret) + return ret; + } + + return 0; +} + +static void +ocelot_vcap_filter_del_aux_resources(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) +{ + if (filter->block_id == VCAP_IS2 && filter->action.police_ena) + ocelot_vcap_policer_del(ocelot, filter->action.pol_ix); + + if (filter->block_id == VCAP_IS2 && filter->action.mirror_ena) + ocelot_mirror_put(ocelot); +} + +static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot, + struct ocelot_vcap_block *block, + struct ocelot_vcap_filter *filter, + struct netlink_ext_ack *extack) +{ + struct list_head *pos = &block->rules; + struct ocelot_vcap_filter *tmp; + int ret; + + ret = ocelot_vcap_filter_add_aux_resources(ocelot, filter, extack); + if (ret) + return ret; + + block->count++; + + list_for_each_entry(tmp, &block->rules, list) { + if (filter->prio < tmp->prio) { + pos = &tmp->list; + break; + } + } + list_add_tail(&filter->list, pos); + + return 0; +} + +static bool ocelot_vcap_filter_equal(const struct ocelot_vcap_filter *a, + const struct ocelot_vcap_filter *b) +{ + return !memcmp(&a->id, &b->id, sizeof(struct ocelot_vcap_id)); +} + +static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block, + struct ocelot_vcap_filter *filter) +{ + struct ocelot_vcap_filter *tmp; + int index = 0; + + list_for_each_entry(tmp, &block->rules, list) { + if (ocelot_vcap_filter_equal(filter, tmp)) + return index; + index++; + } + + return -ENOENT; +} + +static struct ocelot_vcap_filter* +ocelot_vcap_block_find_filter_by_index(struct ocelot_vcap_block *block, + int index) +{ + struct ocelot_vcap_filter *tmp; + int i = 0; + + list_for_each_entry(tmp, &block->rules, list) { + if (i == index) + return tmp; + ++i; + } + + return NULL; +} + +struct ocelot_vcap_filter * +ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, + unsigned long cookie, bool tc_offload) +{ + struct ocelot_vcap_filter *filter; + + list_for_each_entry(filter, &block->rules, list) + if (filter->id.tc_offload == tc_offload && + filter->id.cookie == cookie) + return filter; + + return NULL; +} +EXPORT_SYMBOL(ocelot_vcap_block_find_filter_by_id); + +/* If @on=false, then SNAP, ARP, IP and OAM frames will not match on keys based + * on destination and source MAC addresses, but only on higher-level protocol + * information. The only frame types to match on keys containing MAC addresses + * in this case are non-SNAP, non-ARP, non-IP and non-OAM frames. + * + * If @on=true, then the above frame types (SNAP, ARP, IP and OAM) will match + * on MAC_ETYPE keys such as destination and source MAC on this ingress port. + * However the setting has the side effect of making these frames not matching + * on any _other_ keys than MAC_ETYPE ones. + */ +static void ocelot_match_all_as_mac_etype(struct ocelot *ocelot, int port, + int lookup, bool on) +{ + u32 val = 0; + + if (on) + val = ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(BIT(lookup)) | + ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(BIT(lookup)) | + ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(BIT(lookup)) | + ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(BIT(lookup)) | + ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(BIT(lookup)); + + ocelot_rmw_gix(ocelot, val, + ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(BIT(lookup)) | + ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(BIT(lookup)) | + ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(BIT(lookup)) | + ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(BIT(lookup)) | + ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(BIT(lookup)), + ANA_PORT_VCAP_S2_CFG, port); +} + +static bool +ocelot_vcap_is_problematic_mac_etype(struct ocelot_vcap_filter *filter) +{ + u16 proto, mask; + + if (filter->key_type != OCELOT_VCAP_KEY_ETYPE) + return false; + + proto = ntohs(*(__be16 *)filter->key.etype.etype.value); + mask = ntohs(*(__be16 *)filter->key.etype.etype.mask); + + /* ETH_P_ALL match, so all protocols below are included */ + if (mask == 0) + return true; + if (proto == ETH_P_ARP) + return true; + if (proto == ETH_P_IP) + return true; + if (proto == ETH_P_IPV6) + return true; + + return false; +} + +static bool +ocelot_vcap_is_problematic_non_mac_etype(struct ocelot_vcap_filter *filter) +{ + if (filter->key_type == OCELOT_VCAP_KEY_SNAP) + return true; + if (filter->key_type == OCELOT_VCAP_KEY_ARP) + return true; + if (filter->key_type == OCELOT_VCAP_KEY_IPV4) + return true; + if (filter->key_type == OCELOT_VCAP_KEY_IPV6) + return true; + return false; +} + +static bool +ocelot_exclusive_mac_etype_filter_rules(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) +{ + struct ocelot_vcap_block *block = &ocelot->block[filter->block_id]; + struct ocelot_vcap_filter *tmp; + unsigned long port; + int i; + + /* We only have the S2_IP_TCPUDP_DIS set of knobs for VCAP IS2 */ + if (filter->block_id != VCAP_IS2) + return true; + + if (ocelot_vcap_is_problematic_mac_etype(filter)) { + /* Search for any non-MAC_ETYPE rules on the port */ + for (i = 0; i < block->count; i++) { + tmp = ocelot_vcap_block_find_filter_by_index(block, i); + if (tmp->ingress_port_mask & filter->ingress_port_mask && + tmp->lookup == filter->lookup && + ocelot_vcap_is_problematic_non_mac_etype(tmp)) + return false; + } + + for_each_set_bit(port, &filter->ingress_port_mask, + ocelot->num_phys_ports) + ocelot_match_all_as_mac_etype(ocelot, port, + filter->lookup, true); + } else if (ocelot_vcap_is_problematic_non_mac_etype(filter)) { + /* Search for any MAC_ETYPE rules on the port */ + for (i = 0; i < block->count; i++) { + tmp = ocelot_vcap_block_find_filter_by_index(block, i); + if (tmp->ingress_port_mask & filter->ingress_port_mask && + tmp->lookup == filter->lookup && + ocelot_vcap_is_problematic_mac_etype(tmp)) + return false; + } + + for_each_set_bit(port, &filter->ingress_port_mask, + ocelot->num_phys_ports) + ocelot_match_all_as_mac_etype(ocelot, port, + filter->lookup, false); + } + + return true; +} + +int ocelot_vcap_filter_add(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter, + struct netlink_ext_ack *extack) +{ + struct ocelot_vcap_block *block = &ocelot->block[filter->block_id]; + int i, index, ret; + + if (!ocelot_exclusive_mac_etype_filter_rules(ocelot, filter)) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot mix MAC_ETYPE with non-MAC_ETYPE rules, use the other IS2 lookup"); + return -EBUSY; + } + + /* Add filter to the linked list */ + ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter, extack); + if (ret) + return ret; + + /* Get the index of the inserted filter */ + index = ocelot_vcap_block_get_filter_index(block, filter); + if (index < 0) + return index; + + /* Move down the rules to make place for the new filter */ + for (i = block->count - 1; i > index; i--) { + struct ocelot_vcap_filter *tmp; + + tmp = ocelot_vcap_block_find_filter_by_index(block, i); + /* Read back the filter's counters before moving it */ + vcap_entry_get(ocelot, i - 1, tmp); + vcap_entry_set(ocelot, i, tmp); + } + + /* Now insert the new filter */ + vcap_entry_set(ocelot, index, filter); + return 0; +} +EXPORT_SYMBOL(ocelot_vcap_filter_add); + +static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot, + struct ocelot_vcap_block *block, + struct ocelot_vcap_filter *filter) +{ + struct ocelot_vcap_filter *tmp, *n; + + list_for_each_entry_safe(tmp, n, &block->rules, list) { + if (ocelot_vcap_filter_equal(filter, tmp)) { + ocelot_vcap_filter_del_aux_resources(ocelot, tmp); + list_del(&tmp->list); + kfree(tmp); + } + } + + block->count--; +} + +int ocelot_vcap_filter_del(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) +{ + struct ocelot_vcap_block *block = &ocelot->block[filter->block_id]; + struct ocelot_vcap_filter del_filter; + int i, index; + + /* Need to inherit the block_id so that vcap_entry_set() + * does not get confused and knows where to install it. + */ + memset(&del_filter, 0, sizeof(del_filter)); + del_filter.block_id = filter->block_id; + + /* Gets index of the filter */ + index = ocelot_vcap_block_get_filter_index(block, filter); + if (index < 0) + return index; + + /* Delete filter */ + ocelot_vcap_block_remove_filter(ocelot, block, filter); + + /* Move up all the blocks over the deleted filter */ + for (i = index; i < block->count; i++) { + struct ocelot_vcap_filter *tmp; + + tmp = ocelot_vcap_block_find_filter_by_index(block, i); + /* Read back the filter's counters before moving it */ + vcap_entry_get(ocelot, i + 1, tmp); + vcap_entry_set(ocelot, i, tmp); + } + + /* Now delete the last filter, because it is duplicated */ + vcap_entry_set(ocelot, block->count, &del_filter); + + return 0; +} +EXPORT_SYMBOL(ocelot_vcap_filter_del); + +int ocelot_vcap_filter_replace(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) +{ + struct ocelot_vcap_block *block = &ocelot->block[filter->block_id]; + int index; + + index = ocelot_vcap_block_get_filter_index(block, filter); + if (index < 0) + return index; + + vcap_entry_set(ocelot, index, filter); + + return 0; +} +EXPORT_SYMBOL(ocelot_vcap_filter_replace); + +int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) +{ + struct ocelot_vcap_block *block = &ocelot->block[filter->block_id]; + struct ocelot_vcap_filter tmp; + int index; + + index = ocelot_vcap_block_get_filter_index(block, filter); + if (index < 0) + return index; + + vcap_entry_get(ocelot, index, filter); + + /* After we get the result we need to clear the counters */ + tmp = *filter; + tmp.stats.pkts = 0; + vcap_entry_set(ocelot, index, &tmp); + + return 0; +} + +static void ocelot_vcap_init_one(struct ocelot *ocelot, + const struct vcap_props *vcap) +{ + struct vcap_data data; + + memset(&data, 0, sizeof(data)); + + vcap_entry2cache(ocelot, vcap, &data); + ocelot_target_write(ocelot, vcap->target, vcap->entry_count, + VCAP_CORE_MV_CFG); + vcap_cmd(ocelot, vcap, 0, VCAP_CMD_INITIALIZE, VCAP_SEL_ENTRY); + + vcap_action2cache(ocelot, vcap, &data); + ocelot_target_write(ocelot, vcap->target, vcap->action_count, + VCAP_CORE_MV_CFG); + vcap_cmd(ocelot, vcap, 0, VCAP_CMD_INITIALIZE, + VCAP_SEL_ACTION | VCAP_SEL_COUNTER); +} + +static void ocelot_vcap_detect_constants(struct ocelot *ocelot, + struct vcap_props *vcap) +{ + int counter_memory_width; + int num_default_actions; + int version; + + version = ocelot_target_read(ocelot, vcap->target, + VCAP_CONST_VCAP_VER); + /* Only version 0 VCAP supported for now */ + if (WARN_ON(version != 0)) + return; + + /* Width in bits of type-group field */ + vcap->tg_width = ocelot_target_read(ocelot, vcap->target, + VCAP_CONST_ENTRY_TG_WIDTH); + /* Number of subwords per TCAM row */ + vcap->sw_count = ocelot_target_read(ocelot, vcap->target, + VCAP_CONST_ENTRY_SWCNT); + /* Number of rows in TCAM. There can be this many full keys, or double + * this number half keys, or 4 times this number quarter keys. + */ + vcap->entry_count = ocelot_target_read(ocelot, vcap->target, + VCAP_CONST_ENTRY_CNT); + /* Assuming there are 4 subwords per TCAM row, their layout in the + * actual TCAM (not in the cache) would be: + * + * | SW 3 | TG 3 | SW 2 | TG 2 | SW 1 | TG 1 | SW 0 | TG 0 | + * + * (where SW=subword and TG=Type-Group). + * + * What VCAP_CONST_ENTRY_CNT is giving us is the width of one full TCAM + * row. But when software accesses the TCAM through the cache + * registers, the Type-Group values are written through another set of + * registers VCAP_TG_DAT, and therefore, it appears as though the 4 + * subwords are contiguous in the cache memory. + * Important mention: regardless of the number of key entries per row + * (and therefore of key size: 1 full key or 2 half keys or 4 quarter + * keys), software always has to configure 4 Type-Group values. For + * example, in the case of 1 full key, the driver needs to set all 4 + * Type-Group to be full key. + * + * For this reason, we need to fix up the value that the hardware is + * giving us. We don't actually care about the width of the entry in + * the TCAM. What we care about is the width of the entry in the cache + * registers, which is how we get to interact with it. And since the + * VCAP_ENTRY_DAT cache registers access only the subwords and not the + * Type-Groups, this means we need to subtract the width of the + * Type-Groups when packing and unpacking key entry data in a TCAM row. + */ + vcap->entry_width = ocelot_target_read(ocelot, vcap->target, + VCAP_CONST_ENTRY_WIDTH); + vcap->entry_width -= vcap->tg_width * vcap->sw_count; + num_default_actions = ocelot_target_read(ocelot, vcap->target, + VCAP_CONST_ACTION_DEF_CNT); + vcap->action_count = vcap->entry_count + num_default_actions; + vcap->action_width = ocelot_target_read(ocelot, vcap->target, + VCAP_CONST_ACTION_WIDTH); + /* The width of the counter memory, this is the complete width of all + * counter-fields associated with one full-word entry. There is one + * counter per entry sub-word (see CAP_CORE::ENTRY_SWCNT for number of + * subwords.) + */ + vcap->counter_words = vcap->sw_count; + counter_memory_width = ocelot_target_read(ocelot, vcap->target, + VCAP_CONST_CNT_WIDTH); + vcap->counter_width = counter_memory_width / vcap->counter_words; +} + +int ocelot_vcap_init(struct ocelot *ocelot) +{ + struct qos_policer_conf cpu_drop = { + .mode = MSCC_QOS_RATE_MODE_DATA, + }; + int ret, i; + + /* Create a policer that will drop the frames for the cpu. + * This policer will be used as action in the acl rules to drop + * frames. + */ + ret = qos_policer_conf_set(ocelot, OCELOT_POLICER_DISCARD, &cpu_drop); + if (ret) + return ret; + + for (i = 0; i < OCELOT_NUM_VCAP_BLOCKS; i++) { + struct ocelot_vcap_block *block = &ocelot->block[i]; + struct vcap_props *vcap = &ocelot->vcap[i]; + + INIT_LIST_HEAD(&block->rules); + + ocelot_vcap_detect_constants(ocelot, vcap); + ocelot_vcap_init_one(ocelot, vcap); + } + + INIT_LIST_HEAD(&ocelot->dummy_rules); + INIT_LIST_HEAD(&ocelot->traps); + INIT_LIST_HEAD(&ocelot->vcap_pol.pol_list); + + return 0; +} diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h new file mode 100644 index 0000000000..6f546695fa --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_vcap.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* Microsemi Ocelot Switch driver + * Copyright (c) 2019 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_VCAP_H_ +#define _MSCC_OCELOT_VCAP_H_ + +#include "ocelot.h" +#include <soc/mscc/ocelot_vcap.h> +#include <net/flow_offload.h> + +#define OCELOT_POLICER_DISCARD 0x17f + +int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, + struct ocelot_vcap_filter *rule); + +int ocelot_vcap_init(struct ocelot *ocelot); + +int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv, + struct flow_cls_offload *f, + bool ingress); + +#endif /* _MSCC_OCELOT_VCAP_H_ */ diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c new file mode 100644 index 0000000000..151b424653 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -0,0 +1,428 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ +#include <linux/dsa/ocelot.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of_net.h> +#include <linux/netdevice.h> +#include <linux/phylink.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/platform_device.h> +#include <linux/mfd/syscon.h> +#include <linux/skbuff.h> +#include <net/switchdev.h> + +#include <soc/mscc/ocelot.h> +#include <soc/mscc/ocelot_vcap.h> +#include <soc/mscc/vsc7514_regs.h> +#include "ocelot_fdma.h" +#include "ocelot.h" + +#define VSC7514_VCAP_POLICER_BASE 128 +#define VSC7514_VCAP_POLICER_MAX 191 + +static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops) +{ + int ret; + + ocelot->map = vsc7514_regmap; + ocelot->num_mact_rows = 1024; + ocelot->ops = ops; + + ret = ocelot_regfields_init(ocelot, vsc7514_regfields); + if (ret) + return ret; + + ocelot_pll5_init(ocelot); + + eth_random_addr(ocelot->base_mac); + ocelot->base_mac[5] &= 0xf0; + + return 0; +} + +static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) +{ + struct ocelot *ocelot = arg; + int grp = 0, err; + + while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { + struct sk_buff *skb; + + err = ocelot_xtr_poll_frame(ocelot, grp, &skb); + if (err) + goto out; + + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + + if (!skb_defer_rx_timestamp(skb)) + netif_rx(skb); + } + +out: + if (err < 0) + ocelot_drain_cpu_queue(ocelot, 0); + + return IRQ_HANDLED; +} + +static irqreturn_t ocelot_ptp_rdy_irq_handler(int irq, void *arg) +{ + struct ocelot *ocelot = arg; + + ocelot_get_txtstamp(ocelot); + + return IRQ_HANDLED; +} + +static const struct of_device_id mscc_ocelot_match[] = { + { .compatible = "mscc,vsc7514-switch" }, + { } +}; +MODULE_DEVICE_TABLE(of, mscc_ocelot_match); + +static const struct ocelot_ops ocelot_ops = { + .reset = ocelot_reset, + .wm_enc = ocelot_wm_enc, + .wm_dec = ocelot_wm_dec, + .wm_stat = ocelot_wm_stat, + .port_to_netdev = ocelot_port_to_netdev, + .netdev_to_port = ocelot_netdev_to_port, +}; + +static struct ptp_clock_info ocelot_ptp_clock_info = { + .owner = THIS_MODULE, + .name = "ocelot ptp", + .max_adj = 0x7fffffff, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = OCELOT_PTP_PINS_NUM, + .n_pins = OCELOT_PTP_PINS_NUM, + .pps = 0, + .gettime64 = ocelot_ptp_gettime64, + .settime64 = ocelot_ptp_settime64, + .adjtime = ocelot_ptp_adjtime, + .adjfine = ocelot_ptp_adjfine, + .verify = ocelot_ptp_verify, + .enable = ocelot_ptp_enable, +}; + +static void mscc_ocelot_teardown_devlink_ports(struct ocelot *ocelot) +{ + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) + ocelot_port_devlink_teardown(ocelot, port); +} + +static void mscc_ocelot_release_ports(struct ocelot *ocelot) +{ + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port; + + ocelot_port = ocelot->ports[port]; + if (!ocelot_port) + continue; + + ocelot_deinit_port(ocelot, port); + ocelot_release_port(ocelot_port); + } +} + +static int mscc_ocelot_init_ports(struct platform_device *pdev, + struct device_node *ports) +{ + struct ocelot *ocelot = platform_get_drvdata(pdev); + u32 devlink_ports_registered = 0; + struct device_node *portnp; + int port, err; + u32 reg; + + ocelot->ports = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports, + sizeof(struct ocelot_port *), GFP_KERNEL); + if (!ocelot->ports) + return -ENOMEM; + + ocelot->devlink_ports = devm_kcalloc(ocelot->dev, + ocelot->num_phys_ports, + sizeof(*ocelot->devlink_ports), + GFP_KERNEL); + if (!ocelot->devlink_ports) + return -ENOMEM; + + for_each_available_child_of_node(ports, portnp) { + struct regmap *target; + struct resource *res; + char res_name[8]; + + if (of_property_read_u32(portnp, "reg", ®)) + continue; + + port = reg; + if (port < 0 || port >= ocelot->num_phys_ports) { + dev_err(ocelot->dev, + "invalid port number: %d >= %d\n", port, + ocelot->num_phys_ports); + continue; + } + + snprintf(res_name, sizeof(res_name), "port%d", port); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + res_name); + target = ocelot_regmap_init(ocelot, res); + if (IS_ERR(target)) { + err = PTR_ERR(target); + of_node_put(portnp); + goto out_teardown; + } + + err = ocelot_port_devlink_init(ocelot, port, + DEVLINK_PORT_FLAVOUR_PHYSICAL); + if (err) { + of_node_put(portnp); + goto out_teardown; + } + + err = ocelot_probe_port(ocelot, port, target, portnp); + if (err) { + ocelot_port_devlink_teardown(ocelot, port); + continue; + } + + devlink_ports_registered |= BIT(port); + } + + /* Initialize unused devlink ports at the end */ + for (port = 0; port < ocelot->num_phys_ports; port++) { + if (devlink_ports_registered & BIT(port)) + continue; + + err = ocelot_port_devlink_init(ocelot, port, + DEVLINK_PORT_FLAVOUR_UNUSED); + if (err) + goto out_teardown; + + devlink_ports_registered |= BIT(port); + } + + return 0; + +out_teardown: + /* Unregister the network interfaces */ + mscc_ocelot_release_ports(ocelot); + /* Tear down devlink ports for the registered network interfaces */ + for (port = 0; port < ocelot->num_phys_ports; port++) { + if (devlink_ports_registered & BIT(port)) + ocelot_port_devlink_teardown(ocelot, port); + } + return err; +} + +static int mscc_ocelot_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + int err, irq_xtr, irq_ptp_rdy; + struct device_node *ports; + struct devlink *devlink; + struct ocelot *ocelot; + struct regmap *hsio; + unsigned int i; + + struct { + enum ocelot_target id; + char *name; + u8 optional:1; + } io_target[] = { + { SYS, "sys" }, + { REW, "rew" }, + { QSYS, "qsys" }, + { ANA, "ana" }, + { QS, "qs" }, + { S0, "s0" }, + { S1, "s1" }, + { S2, "s2" }, + { PTP, "ptp", 1 }, + { FDMA, "fdma", 1 }, + }; + + if (!np && !pdev->dev.platform_data) + return -ENODEV; + + devlink = + devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot), &pdev->dev); + if (!devlink) + return -ENOMEM; + + ocelot = devlink_priv(devlink); + ocelot->devlink = priv_to_devlink(ocelot); + platform_set_drvdata(pdev, ocelot); + ocelot->dev = &pdev->dev; + + for (i = 0; i < ARRAY_SIZE(io_target); i++) { + struct regmap *target; + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + io_target[i].name); + + target = ocelot_regmap_init(ocelot, res); + if (IS_ERR(target)) { + if (io_target[i].optional) { + ocelot->targets[io_target[i].id] = NULL; + continue; + } + err = PTR_ERR(target); + goto out_free_devlink; + } + + ocelot->targets[io_target[i].id] = target; + } + + if (ocelot->targets[FDMA]) + ocelot_fdma_init(pdev, ocelot); + + hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio"); + if (IS_ERR(hsio)) { + dev_err(&pdev->dev, "missing hsio syscon\n"); + err = PTR_ERR(hsio); + goto out_free_devlink; + } + + ocelot->targets[HSIO] = hsio; + + err = ocelot_chip_init(ocelot, &ocelot_ops); + if (err) + goto out_free_devlink; + + irq_xtr = platform_get_irq_byname(pdev, "xtr"); + if (irq_xtr < 0) { + err = irq_xtr; + goto out_free_devlink; + } + + err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL, + ocelot_xtr_irq_handler, IRQF_ONESHOT, + "frame extraction", ocelot); + if (err) + goto out_free_devlink; + + irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy"); + if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) { + err = devm_request_threaded_irq(&pdev->dev, irq_ptp_rdy, NULL, + ocelot_ptp_rdy_irq_handler, + IRQF_ONESHOT, "ptp ready", + ocelot); + if (err) + goto out_free_devlink; + + /* Both the PTP interrupt and the PTP bank are available */ + ocelot->ptp = 1; + } + + ports = of_get_child_by_name(np, "ethernet-ports"); + if (!ports) { + dev_err(ocelot->dev, "no ethernet-ports child node found\n"); + err = -ENODEV; + goto out_free_devlink; + } + + ocelot->num_phys_ports = of_get_child_count(ports); + ocelot->num_flooding_pgids = 1; + + ocelot->vcap = vsc7514_vcap_props; + + ocelot->vcap_pol.base = VSC7514_VCAP_POLICER_BASE; + ocelot->vcap_pol.max = VSC7514_VCAP_POLICER_MAX; + + ocelot->npi = -1; + + err = ocelot_init(ocelot); + if (err) + goto out_put_ports; + + err = mscc_ocelot_init_ports(pdev, ports); + if (err) + goto out_ocelot_devlink_unregister; + + if (ocelot->fdma) + ocelot_fdma_start(ocelot); + + err = ocelot_devlink_sb_register(ocelot); + if (err) + goto out_ocelot_release_ports; + + if (ocelot->ptp) { + err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info); + if (err) { + dev_err(ocelot->dev, + "Timestamp initialization failed\n"); + ocelot->ptp = 0; + } + } + + register_netdevice_notifier(&ocelot_netdevice_nb); + register_switchdev_notifier(&ocelot_switchdev_nb); + register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); + + of_node_put(ports); + devlink_register(devlink); + + dev_info(&pdev->dev, "Ocelot switch probed\n"); + + return 0; + +out_ocelot_release_ports: + mscc_ocelot_release_ports(ocelot); + mscc_ocelot_teardown_devlink_ports(ocelot); +out_ocelot_devlink_unregister: + ocelot_deinit(ocelot); +out_put_ports: + of_node_put(ports); +out_free_devlink: + devlink_free(devlink); + return err; +} + +static int mscc_ocelot_remove(struct platform_device *pdev) +{ + struct ocelot *ocelot = platform_get_drvdata(pdev); + + if (ocelot->fdma) + ocelot_fdma_deinit(ocelot); + devlink_unregister(ocelot->devlink); + ocelot_deinit_timestamp(ocelot); + ocelot_devlink_sb_unregister(ocelot); + mscc_ocelot_release_ports(ocelot); + mscc_ocelot_teardown_devlink_ports(ocelot); + ocelot_deinit(ocelot); + unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); + unregister_switchdev_notifier(&ocelot_switchdev_nb); + unregister_netdevice_notifier(&ocelot_netdevice_nb); + devlink_free(ocelot->devlink); + + return 0; +} + +static struct platform_driver mscc_ocelot_driver = { + .probe = mscc_ocelot_probe, + .remove = mscc_ocelot_remove, + .driver = { + .name = "ocelot-switch", + .of_match_table = mscc_ocelot_match, + }, +}; + +module_platform_driver(mscc_ocelot_driver); + +MODULE_DESCRIPTION("Microsemi Ocelot switch driver"); +MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@bootlin.com>"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c new file mode 100644 index 0000000000..5595bfe84b --- /dev/null +++ b/drivers/net/ethernet/mscc/vsc7514_regs.c @@ -0,0 +1,685 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + * Copyright (c) 2021 Innovative Advantage + */ +#include <soc/mscc/ocelot_vcap.h> +#include <soc/mscc/vsc7514_regs.h> +#include "ocelot.h" + +const struct reg_field vsc7514_regfields[REGFIELD_MAX] = { + [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11), + [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10), + [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27), + [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26), + [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25), + [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24), + [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23), + [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22), + [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21), + [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20), + [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19), + [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18), + [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17), + [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16), + [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15), + [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14), + [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13), + [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12), + [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11), + [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10), + [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9), + [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8), + [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7), + [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6), + [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5), + [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4), + [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3), + [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2), + [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1), + [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0), + [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18), + [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11), + [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9), + [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20), + [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19), + [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7), + [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3), + [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0), + [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2), + [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1), + [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0), + /* Replicated per number of ports (12), register size 4 per port */ + [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 12, 4), + [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 12, 4), + [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 12, 4), + [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 12, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 12, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 12, 4), + [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 12, 4), + [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 12, 4), + [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 12, 4), + [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 12, 4), + [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 12, 4), + [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 12, 4), + [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4), +}; +EXPORT_SYMBOL(vsc7514_regfields); + +static const u32 vsc7514_ana_regmap[] = { + REG(ANA_ADVLEARN, 0x009000), + REG(ANA_VLANMASK, 0x009004), + REG(ANA_PORT_B_DOMAIN, 0x009008), + REG(ANA_ANAGEFIL, 0x00900c), + REG(ANA_ANEVENTS, 0x009010), + REG(ANA_STORMLIMIT_BURST, 0x009014), + REG(ANA_STORMLIMIT_CFG, 0x009018), + REG(ANA_ISOLATED_PORTS, 0x009028), + REG(ANA_COMMUNITY_PORTS, 0x00902c), + REG(ANA_AUTOAGE, 0x009030), + REG(ANA_MACTOPTIONS, 0x009034), + REG(ANA_LEARNDISC, 0x009038), + REG(ANA_AGENCTRL, 0x00903c), + REG(ANA_MIRRORPORTS, 0x009040), + REG(ANA_EMIRRORPORTS, 0x009044), + REG(ANA_FLOODING, 0x009048), + REG(ANA_FLOODING_IPMC, 0x00904c), + REG(ANA_SFLOW_CFG, 0x009050), + REG(ANA_PORT_MODE, 0x009080), + REG(ANA_PGID_PGID, 0x008c00), + REG(ANA_TABLES_ANMOVED, 0x008b30), + REG(ANA_TABLES_MACHDATA, 0x008b34), + REG(ANA_TABLES_MACLDATA, 0x008b38), + REG(ANA_TABLES_MACACCESS, 0x008b3c), + REG(ANA_TABLES_MACTINDX, 0x008b40), + REG(ANA_TABLES_VLANACCESS, 0x008b44), + REG(ANA_TABLES_VLANTIDX, 0x008b48), + REG(ANA_TABLES_ISDXACCESS, 0x008b4c), + REG(ANA_TABLES_ISDXTIDX, 0x008b50), + REG(ANA_TABLES_ENTRYLIM, 0x008b00), + REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54), + REG(ANA_TABLES_PTP_ID_LOW, 0x008b58), + REG(ANA_MSTI_STATE, 0x008e00), + REG(ANA_PORT_VLAN_CFG, 0x007000), + REG(ANA_PORT_DROP_CFG, 0x007004), + REG(ANA_PORT_QOS_CFG, 0x007008), + REG(ANA_PORT_VCAP_CFG, 0x00700c), + REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010), + REG(ANA_PORT_VCAP_S2_CFG, 0x00701c), + REG(ANA_PORT_PCP_DEI_MAP, 0x007020), + REG(ANA_PORT_CPU_FWD_CFG, 0x007060), + REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064), + REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068), + REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c), + REG(ANA_PORT_PORT_CFG, 0x007070), + REG(ANA_PORT_POL_CFG, 0x007074), + REG(ANA_PORT_PTP_CFG, 0x007078), + REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c), + REG(ANA_OAM_UPM_LM_CNT, 0x007c00), + REG(ANA_PORT_PTP_DLY2_CFG, 0x007080), + REG(ANA_PFC_PFC_CFG, 0x008800), + REG(ANA_PFC_PFC_TIMER, 0x008804), + REG(ANA_IPT_OAM_MEP_CFG, 0x008000), + REG(ANA_IPT_IPT, 0x008004), + REG(ANA_PPT_PPT, 0x008ac0), + REG(ANA_FID_MAP_FID_MAP, 0x000000), + REG(ANA_AGGR_CFG, 0x0090b4), + REG(ANA_CPUQ_CFG, 0x0090b8), + REG(ANA_CPUQ_CFG2, 0x0090bc), + REG(ANA_CPUQ_8021_CFG, 0x0090c0), + REG(ANA_DSCP_CFG, 0x009100), + REG(ANA_DSCP_REWR_CFG, 0x009200), + REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240), + REG(ANA_VCAP_RNG_VAL_CFG, 0x009260), + REG(ANA_VRAP_CFG, 0x009280), + REG(ANA_VRAP_HDR_DATA, 0x009284), + REG(ANA_VRAP_HDR_MASK, 0x009288), + REG(ANA_DISCARD_CFG, 0x00928c), + REG(ANA_FID_CFG, 0x009290), + REG(ANA_POL_PIR_CFG, 0x004000), + REG(ANA_POL_CIR_CFG, 0x004004), + REG(ANA_POL_MODE_CFG, 0x004008), + REG(ANA_POL_PIR_STATE, 0x00400c), + REG(ANA_POL_CIR_STATE, 0x004010), + REG(ANA_POL_STATE, 0x004014), + REG(ANA_POL_FLOWC, 0x008b80), + REG(ANA_POL_HYST, 0x008bec), + REG(ANA_POL_MISC_CFG, 0x008bf0), +}; + +static const u32 vsc7514_qs_regmap[] = { + REG(QS_XTR_GRP_CFG, 0x000000), + REG(QS_XTR_RD, 0x000008), + REG(QS_XTR_FRM_PRUNING, 0x000010), + REG(QS_XTR_FLUSH, 0x000018), + REG(QS_XTR_DATA_PRESENT, 0x00001c), + REG(QS_XTR_CFG, 0x000020), + REG(QS_INJ_GRP_CFG, 0x000024), + REG(QS_INJ_WR, 0x00002c), + REG(QS_INJ_CTRL, 0x000034), + REG(QS_INJ_STATUS, 0x00003c), + REG(QS_INJ_ERR, 0x000040), + REG(QS_INH_DBG, 0x000048), +}; + +static const u32 vsc7514_qsys_regmap[] = { + REG(QSYS_PORT_MODE, 0x011200), + REG(QSYS_SWITCH_PORT_MODE, 0x011234), + REG(QSYS_STAT_CNT_CFG, 0x011264), + REG(QSYS_EEE_CFG, 0x011268), + REG(QSYS_EEE_THRES, 0x011294), + REG(QSYS_IGR_NO_SHARING, 0x011298), + REG(QSYS_EGR_NO_SHARING, 0x01129c), + REG(QSYS_SW_STATUS, 0x0112a0), + REG(QSYS_EXT_CPU_CFG, 0x0112d0), + REG(QSYS_PAD_CFG, 0x0112d4), + REG(QSYS_CPU_GROUP_MAP, 0x0112d8), + REG(QSYS_QMAP, 0x0112dc), + REG(QSYS_ISDX_SGRP, 0x011400), + REG(QSYS_TIMED_FRAME_ENTRY, 0x014000), + REG(QSYS_TFRM_MISC, 0x011310), + REG(QSYS_TFRM_PORT_DLY, 0x011314), + REG(QSYS_TFRM_TIMER_CFG_1, 0x011318), + REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c), + REG(QSYS_TFRM_TIMER_CFG_3, 0x011320), + REG(QSYS_TFRM_TIMER_CFG_4, 0x011324), + REG(QSYS_TFRM_TIMER_CFG_5, 0x011328), + REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c), + REG(QSYS_TFRM_TIMER_CFG_7, 0x011330), + REG(QSYS_TFRM_TIMER_CFG_8, 0x011334), + REG(QSYS_RED_PROFILE, 0x011338), + REG(QSYS_RES_QOS_MODE, 0x011378), + REG(QSYS_RES_CFG, 0x012000), + REG(QSYS_RES_STAT, 0x012004), + REG(QSYS_EGR_DROP_MODE, 0x01137c), + REG(QSYS_EQ_CTRL, 0x011380), + REG(QSYS_EVENTS_CORE, 0x011384), + REG(QSYS_CIR_CFG, 0x000000), + REG(QSYS_EIR_CFG, 0x000004), + REG(QSYS_SE_CFG, 0x000008), + REG(QSYS_SE_DWRR_CFG, 0x00000c), + REG(QSYS_SE_CONNECT, 0x00003c), + REG(QSYS_SE_DLB_SENSE, 0x000040), + REG(QSYS_CIR_STATE, 0x000044), + REG(QSYS_EIR_STATE, 0x000048), + REG(QSYS_SE_STATE, 0x00004c), + REG(QSYS_HSCH_MISC_CFG, 0x011388), +}; + +static const u32 vsc7514_rew_regmap[] = { + REG(REW_PORT_VLAN_CFG, 0x000000), + REG(REW_TAG_CFG, 0x000004), + REG(REW_PORT_CFG, 0x000008), + REG(REW_DSCP_CFG, 0x00000c), + REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010), + REG(REW_PTP_CFG, 0x000050), + REG(REW_PTP_DLY1_CFG, 0x000054), + REG(REW_DSCP_REMAP_DP1_CFG, 0x000690), + REG(REW_DSCP_REMAP_CFG, 0x000790), + REG(REW_STAT_CFG, 0x000890), + REG(REW_PPT, 0x000680), +}; + +static const u32 vsc7514_sys_regmap[] = { + REG(SYS_COUNT_RX_OCTETS, 0x000000), + REG(SYS_COUNT_RX_UNICAST, 0x000004), + REG(SYS_COUNT_RX_MULTICAST, 0x000008), + REG(SYS_COUNT_RX_BROADCAST, 0x00000c), + REG(SYS_COUNT_RX_SHORTS, 0x000010), + REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), + REG(SYS_COUNT_RX_JABBERS, 0x000018), + REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c), + REG(SYS_COUNT_RX_SYM_ERRS, 0x000020), + REG(SYS_COUNT_RX_64, 0x000024), + REG(SYS_COUNT_RX_65_127, 0x000028), + REG(SYS_COUNT_RX_128_255, 0x00002c), + REG(SYS_COUNT_RX_256_511, 0x000030), + REG(SYS_COUNT_RX_512_1023, 0x000034), + REG(SYS_COUNT_RX_1024_1526, 0x000038), + REG(SYS_COUNT_RX_1527_MAX, 0x00003c), + REG(SYS_COUNT_RX_PAUSE, 0x000040), + REG(SYS_COUNT_RX_CONTROL, 0x000044), + REG(SYS_COUNT_RX_LONGS, 0x000048), + REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c), + REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050), + REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054), + REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058), + REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c), + REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060), + REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064), + REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068), + REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c), + REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070), + REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074), + REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078), + REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c), + REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080), + REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084), + REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088), + REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c), + REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090), + REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094), + REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098), + REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c), + REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0), + REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4), + REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8), + REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac), + REG(SYS_COUNT_TX_OCTETS, 0x000100), + REG(SYS_COUNT_TX_UNICAST, 0x000104), + REG(SYS_COUNT_TX_MULTICAST, 0x000108), + REG(SYS_COUNT_TX_BROADCAST, 0x00010c), + REG(SYS_COUNT_TX_COLLISION, 0x000110), + REG(SYS_COUNT_TX_DROPS, 0x000114), + REG(SYS_COUNT_TX_PAUSE, 0x000118), + REG(SYS_COUNT_TX_64, 0x00011c), + REG(SYS_COUNT_TX_65_127, 0x000120), + REG(SYS_COUNT_TX_128_255, 0x000124), + REG(SYS_COUNT_TX_256_511, 0x000128), + REG(SYS_COUNT_TX_512_1023, 0x00012c), + REG(SYS_COUNT_TX_1024_1526, 0x000130), + REG(SYS_COUNT_TX_1527_MAX, 0x000134), + REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138), + REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c), + REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140), + REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144), + REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148), + REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c), + REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150), + REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154), + REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158), + REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c), + REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160), + REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164), + REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168), + REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c), + REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170), + REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174), + REG(SYS_COUNT_TX_AGED, 0x000178), + REG(SYS_COUNT_DROP_LOCAL, 0x000200), + REG(SYS_COUNT_DROP_TAIL, 0x000204), + REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208), + REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c), + REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210), + REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214), + REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218), + REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c), + REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220), + REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224), + REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228), + REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c), + REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230), + REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234), + REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238), + REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c), + REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240), + REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244), + REG(SYS_RESET_CFG, 0x000508), + REG(SYS_CMID, 0x00050c), + REG(SYS_VLAN_ETYPE_CFG, 0x000510), + REG(SYS_PORT_MODE, 0x000514), + REG(SYS_FRONT_PORT_MODE, 0x000548), + REG(SYS_FRM_AGING, 0x000574), + REG(SYS_STAT_CFG, 0x000578), + REG(SYS_SW_STATUS, 0x00057c), + REG(SYS_MISC_CFG, 0x0005ac), + REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0), + REG(SYS_REW_MAC_LOW_CFG, 0x0005dc), + REG(SYS_CM_ADDR, 0x000500), + REG(SYS_CM_DATA, 0x000504), + REG(SYS_PAUSE_CFG, 0x000608), + REG(SYS_PAUSE_TOT_CFG, 0x000638), + REG(SYS_ATOP, 0x00063c), + REG(SYS_ATOP_TOT_CFG, 0x00066c), + REG(SYS_MAC_FC_CFG, 0x000670), + REG(SYS_MMGT, 0x00069c), + REG(SYS_MMGT_FAST, 0x0006a0), + REG(SYS_EVENTS_DIF, 0x0006a4), + REG(SYS_EVENTS_CORE, 0x0006b4), + REG(SYS_PTP_STATUS, 0x0006b8), + REG(SYS_PTP_TXSTAMP, 0x0006bc), + REG(SYS_PTP_NXT, 0x0006c0), + REG(SYS_PTP_CFG, 0x0006c4), +}; + +static const u32 vsc7514_vcap_regmap[] = { + /* VCAP_CORE_CFG */ + REG(VCAP_CORE_UPDATE_CTRL, 0x000000), + REG(VCAP_CORE_MV_CFG, 0x000004), + /* VCAP_CORE_CACHE */ + REG(VCAP_CACHE_ENTRY_DAT, 0x000008), + REG(VCAP_CACHE_MASK_DAT, 0x000108), + REG(VCAP_CACHE_ACTION_DAT, 0x000208), + REG(VCAP_CACHE_CNT_DAT, 0x000308), + REG(VCAP_CACHE_TG_DAT, 0x000388), + /* VCAP_CONST */ + REG(VCAP_CONST_VCAP_VER, 0x000398), + REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c), + REG(VCAP_CONST_ENTRY_CNT, 0x0003a0), + REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4), + REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8), + REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac), + REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0), + REG(VCAP_CONST_CNT_WIDTH, 0x0003b4), + REG(VCAP_CONST_CORE_CNT, 0x0003b8), + REG(VCAP_CONST_IF_CNT, 0x0003bc), +}; + +static const u32 vsc7514_ptp_regmap[] = { + REG(PTP_PIN_CFG, 0x000000), + REG(PTP_PIN_TOD_SEC_MSB, 0x000004), + REG(PTP_PIN_TOD_SEC_LSB, 0x000008), + REG(PTP_PIN_TOD_NSEC, 0x00000c), + REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014), + REG(PTP_PIN_WF_LOW_PERIOD, 0x000018), + REG(PTP_CFG_MISC, 0x0000a0), + REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4), + REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8), +}; + +static const u32 vsc7514_dev_gmii_regmap[] = { + REG(DEV_CLOCK_CFG, 0x0), + REG(DEV_PORT_MISC, 0x4), + REG(DEV_EVENTS, 0x8), + REG(DEV_EEE_CFG, 0xc), + REG(DEV_RX_PATH_DELAY, 0x10), + REG(DEV_TX_PATH_DELAY, 0x14), + REG(DEV_PTP_PREDICT_CFG, 0x18), + REG(DEV_MAC_ENA_CFG, 0x1c), + REG(DEV_MAC_MODE_CFG, 0x20), + REG(DEV_MAC_MAXLEN_CFG, 0x24), + REG(DEV_MAC_TAGS_CFG, 0x28), + REG(DEV_MAC_ADV_CHK_CFG, 0x2c), + REG(DEV_MAC_IFG_CFG, 0x30), + REG(DEV_MAC_HDX_CFG, 0x34), + REG(DEV_MAC_DBG_CFG, 0x38), + REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c), + REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40), + REG(DEV_MAC_STICKY, 0x44), + REG(PCS1G_CFG, 0x48), + REG(PCS1G_MODE_CFG, 0x4c), + REG(PCS1G_SD_CFG, 0x50), + REG(PCS1G_ANEG_CFG, 0x54), + REG(PCS1G_ANEG_NP_CFG, 0x58), + REG(PCS1G_LB_CFG, 0x5c), + REG(PCS1G_DBG_CFG, 0x60), + REG(PCS1G_CDET_CFG, 0x64), + REG(PCS1G_ANEG_STATUS, 0x68), + REG(PCS1G_ANEG_NP_STATUS, 0x6c), + REG(PCS1G_LINK_STATUS, 0x70), + REG(PCS1G_LINK_DOWN_CNT, 0x74), + REG(PCS1G_STICKY, 0x78), + REG(PCS1G_DEBUG_STATUS, 0x7c), + REG(PCS1G_LPI_CFG, 0x80), + REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84), + REG(PCS1G_LPI_STATUS, 0x88), + REG(PCS1G_TSTPAT_MODE_CFG, 0x8c), + REG(PCS1G_TSTPAT_STATUS, 0x90), + REG(DEV_PCS_FX100_CFG, 0x94), + REG(DEV_PCS_FX100_STATUS, 0x98), +}; + +const u32 *vsc7514_regmap[TARGET_MAX] = { + [ANA] = vsc7514_ana_regmap, + [QS] = vsc7514_qs_regmap, + [QSYS] = vsc7514_qsys_regmap, + [REW] = vsc7514_rew_regmap, + [SYS] = vsc7514_sys_regmap, + [S0] = vsc7514_vcap_regmap, + [S1] = vsc7514_vcap_regmap, + [S2] = vsc7514_vcap_regmap, + [PTP] = vsc7514_ptp_regmap, + [DEV_GMII] = vsc7514_dev_gmii_regmap, +}; +EXPORT_SYMBOL(vsc7514_regmap); + +static const struct vcap_field vsc7514_vcap_es0_keys[] = { + [VCAP_ES0_EGR_PORT] = { 0, 4 }, + [VCAP_ES0_IGR_PORT] = { 4, 4 }, + [VCAP_ES0_RSV] = { 8, 2 }, + [VCAP_ES0_L2_MC] = { 10, 1 }, + [VCAP_ES0_L2_BC] = { 11, 1 }, + [VCAP_ES0_VID] = { 12, 12 }, + [VCAP_ES0_DP] = { 24, 1 }, + [VCAP_ES0_PCP] = { 25, 3 }, +}; + +static const struct vcap_field vsc7514_vcap_es0_actions[] = { + [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2 }, + [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1 }, + [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2 }, + [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1 }, + [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2 }, + [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2 }, + [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2 }, + [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1 }, + [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2 }, + [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2 }, + [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12 }, + [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3 }, + [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1 }, + [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12 }, + [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3 }, + [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1 }, + [VCAP_ES0_ACT_RSV] = { 49, 24 }, + [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1 }, +}; + +static const struct vcap_field vsc7514_vcap_is1_keys[] = { + [VCAP_IS1_HK_TYPE] = { 0, 1 }, + [VCAP_IS1_HK_LOOKUP] = { 1, 2 }, + [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12 }, + [VCAP_IS1_HK_RSV] = { 15, 9 }, + [VCAP_IS1_HK_OAM_Y1731] = { 24, 1 }, + [VCAP_IS1_HK_L2_MC] = { 25, 1 }, + [VCAP_IS1_HK_L2_BC] = { 26, 1 }, + [VCAP_IS1_HK_IP_MC] = { 27, 1 }, + [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1 }, + [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1 }, + [VCAP_IS1_HK_TPID] = { 30, 1 }, + [VCAP_IS1_HK_VID] = { 31, 12 }, + [VCAP_IS1_HK_DEI] = { 43, 1 }, + [VCAP_IS1_HK_PCP] = { 44, 3 }, + /* Specific Fields for IS1 Half Key S1_NORMAL */ + [VCAP_IS1_HK_L2_SMAC] = { 47, 48 }, + [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1 }, + [VCAP_IS1_HK_ETYPE] = { 96, 16 }, + [VCAP_IS1_HK_IP_SNAP] = { 112, 1 }, + [VCAP_IS1_HK_IP4] = { 113, 1 }, + /* Layer-3 Information */ + [VCAP_IS1_HK_L3_FRAGMENT] = { 114, 1 }, + [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = { 115, 1 }, + [VCAP_IS1_HK_L3_OPTIONS] = { 116, 1 }, + [VCAP_IS1_HK_L3_DSCP] = { 117, 6 }, + [VCAP_IS1_HK_L3_IP4_SIP] = { 123, 32 }, + /* Layer-4 Information */ + [VCAP_IS1_HK_TCP_UDP] = { 155, 1 }, + [VCAP_IS1_HK_TCP] = { 156, 1 }, + [VCAP_IS1_HK_L4_SPORT] = { 157, 16 }, + [VCAP_IS1_HK_L4_RNG] = { 173, 8 }, + /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */ + [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1 }, + [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12 }, + [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1 }, + [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3 }, + [VCAP_IS1_HK_IP4_IP4] = { 64, 1 }, + [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1 }, + [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1 }, + [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1 }, + [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6 }, + [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32 }, + [VCAP_IS1_HK_IP4_L3_IP4_SIP] = { 106, 32 }, + [VCAP_IS1_HK_IP4_L3_PROTO] = { 138, 8 }, + [VCAP_IS1_HK_IP4_TCP_UDP] = { 146, 1 }, + [VCAP_IS1_HK_IP4_TCP] = { 147, 1 }, + [VCAP_IS1_HK_IP4_L4_RNG] = { 148, 8 }, + [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = { 156, 32 }, +}; + +static const struct vcap_field vsc7514_vcap_is1_actions[] = { + [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1 }, + [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6 }, + [VCAP_IS1_ACT_QOS_ENA] = { 7, 1 }, + [VCAP_IS1_ACT_QOS_VAL] = { 8, 3 }, + [VCAP_IS1_ACT_DP_ENA] = { 11, 1 }, + [VCAP_IS1_ACT_DP_VAL] = { 12, 1 }, + [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8 }, + [VCAP_IS1_ACT_PAG_VAL] = { 21, 8 }, + [VCAP_IS1_ACT_RSV] = { 29, 9 }, + /* The fields below are incorrectly shifted by 2 in the manual */ + [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1 }, + [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12 }, + [VCAP_IS1_ACT_FID_SEL] = { 51, 2 }, + [VCAP_IS1_ACT_FID_VAL] = { 53, 13 }, + [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1 }, + [VCAP_IS1_ACT_PCP_VAL] = { 67, 3 }, + [VCAP_IS1_ACT_DEI_VAL] = { 70, 1 }, + [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1 }, + [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2 }, + [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4 }, + [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1 }, +}; + +static const struct vcap_field vsc7514_vcap_is2_keys[] = { + /* Common: 46 bits */ + [VCAP_IS2_TYPE] = { 0, 4 }, + [VCAP_IS2_HK_FIRST] = { 4, 1 }, + [VCAP_IS2_HK_PAG] = { 5, 8 }, + [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12 }, + [VCAP_IS2_HK_RSV2] = { 25, 1 }, + [VCAP_IS2_HK_HOST_MATCH] = { 26, 1 }, + [VCAP_IS2_HK_L2_MC] = { 27, 1 }, + [VCAP_IS2_HK_L2_BC] = { 28, 1 }, + [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1 }, + [VCAP_IS2_HK_VID] = { 30, 12 }, + [VCAP_IS2_HK_DEI] = { 42, 1 }, + [VCAP_IS2_HK_PCP] = { 43, 3 }, + /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */ + [VCAP_IS2_HK_L2_DMAC] = { 46, 48 }, + [VCAP_IS2_HK_L2_SMAC] = { 94, 48 }, + /* MAC_ETYPE (TYPE=000) */ + [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = { 142, 16 }, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = { 158, 16 }, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = { 174, 8 }, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = { 182, 3 }, + /* MAC_LLC (TYPE=001) */ + [VCAP_IS2_HK_MAC_LLC_L2_LLC] = { 142, 40 }, + /* MAC_SNAP (TYPE=010) */ + [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = { 142, 40 }, + /* MAC_ARP (TYPE=011) */ + [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48 }, + [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1 }, + [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1 }, + [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1 }, + [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1 }, + [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1 }, + [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1 }, + [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 100, 2 }, + [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = { 102, 32 }, + [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = { 134, 32 }, + [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = { 166, 1 }, + /* IP4_TCP_UDP / IP4_OTHER common */ + [VCAP_IS2_HK_IP4] = { 46, 1 }, + [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1 }, + [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1 }, + [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1 }, + [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1 }, + [VCAP_IS2_HK_L3_TOS] = { 51, 8 }, + [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32 }, + [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32 }, + [VCAP_IS2_HK_DIP_EQ_SIP] = { 123, 1 }, + /* IP4_TCP_UDP (TYPE=100) */ + [VCAP_IS2_HK_TCP] = { 124, 1 }, + [VCAP_IS2_HK_L4_DPORT] = { 125, 16 }, + [VCAP_IS2_HK_L4_SPORT] = { 141, 16 }, + [VCAP_IS2_HK_L4_RNG] = { 157, 8 }, + [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = { 165, 1 }, + [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = { 166, 1 }, + [VCAP_IS2_HK_L4_FIN] = { 167, 1 }, + [VCAP_IS2_HK_L4_SYN] = { 168, 1 }, + [VCAP_IS2_HK_L4_RST] = { 169, 1 }, + [VCAP_IS2_HK_L4_PSH] = { 170, 1 }, + [VCAP_IS2_HK_L4_ACK] = { 171, 1 }, + [VCAP_IS2_HK_L4_URG] = { 172, 1 }, + [VCAP_IS2_HK_L4_1588_DOM] = { 173, 8 }, + [VCAP_IS2_HK_L4_1588_VER] = { 181, 4 }, + /* IP4_OTHER (TYPE=101) */ + [VCAP_IS2_HK_IP4_L3_PROTO] = { 124, 8 }, + [VCAP_IS2_HK_L3_PAYLOAD] = { 132, 56 }, + /* IP6_STD (TYPE=110) */ + [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1 }, + [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128 }, + [VCAP_IS2_HK_IP6_L3_PROTO] = { 175, 8 }, + /* OAM (TYPE=111) */ + [VCAP_IS2_HK_OAM_MEL_FLAGS] = { 142, 7 }, + [VCAP_IS2_HK_OAM_VER] = { 149, 5 }, + [VCAP_IS2_HK_OAM_OPCODE] = { 154, 8 }, + [VCAP_IS2_HK_OAM_FLAGS] = { 162, 8 }, + [VCAP_IS2_HK_OAM_MEPID] = { 170, 16 }, + [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = { 186, 1 }, + [VCAP_IS2_HK_OAM_IS_Y1731] = { 187, 1 }, +}; + +static const struct vcap_field vsc7514_vcap_is2_actions[] = { + [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1 }, + [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1 }, + [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3 }, + [VCAP_IS2_ACT_MASK_MODE] = { 5, 2 }, + [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1 }, + [VCAP_IS2_ACT_LRN_DIS] = { 8, 1 }, + [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1 }, + [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9 }, + [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1 }, + [VCAP_IS2_ACT_PORT_MASK] = { 20, 11 }, + [VCAP_IS2_ACT_REW_OP] = { 31, 9 }, + [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1 }, + [VCAP_IS2_ACT_RSV] = { 41, 2 }, + [VCAP_IS2_ACT_ACL_ID] = { 43, 6 }, + [VCAP_IS2_ACT_HIT_CNT] = { 49, 32 }, +}; + +struct vcap_props vsc7514_vcap_props[] = { + [VCAP_ES0] = { + .action_type_width = 0, + .action_table = { + [ES0_ACTION_TYPE_NORMAL] = { + .width = 73, /* HIT_STICKY not included */ + .count = 1, + }, + }, + .target = S0, + .keys = vsc7514_vcap_es0_keys, + .actions = vsc7514_vcap_es0_actions, + }, + [VCAP_IS1] = { + .action_type_width = 0, + .action_table = { + [IS1_ACTION_TYPE_NORMAL] = { + .width = 78, /* HIT_STICKY not included */ + .count = 4, + }, + }, + .target = S1, + .keys = vsc7514_vcap_is1_keys, + .actions = vsc7514_vcap_is1_actions, + }, + [VCAP_IS2] = { + .action_type_width = 1, + .action_table = { + [IS2_ACTION_TYPE_NORMAL] = { + .width = 49, + .count = 2 + }, + [IS2_ACTION_TYPE_SMAC_SIP] = { + .width = 6, + .count = 4 + }, + }, + .target = S2, + .keys = vsc7514_vcap_is2_keys, + .actions = vsc7514_vcap_is2_actions, + }, +}; +EXPORT_SYMBOL(vsc7514_vcap_props); |