diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/dsa/ocelot | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/dsa/ocelot')
-rw-r--r-- | drivers/net/dsa/ocelot/Kconfig | 32 | ||||
-rw-r--r-- | drivers/net/dsa/ocelot/Makefile | 11 | ||||
-rw-r--r-- | drivers/net/dsa/ocelot/felix.c | 2150 | ||||
-rw-r--r-- | drivers/net/dsa/ocelot/felix.h | 101 | ||||
-rw-r--r-- | drivers/net/dsa/ocelot/felix_vsc9959.c | 2743 | ||||
-rw-r--r-- | drivers/net/dsa/ocelot/seville_vsc9953.c | 1119 |
6 files changed, 6156 insertions, 0 deletions
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig new file mode 100644 index 000000000..08db9cf76 --- /dev/null +++ b/drivers/net/dsa/ocelot/Kconfig @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NET_DSA_MSCC_FELIX + tristate "Ocelot / Felix Ethernet switch support" + depends on NET_DSA && PCI + depends on NET_VENDOR_MICROSEMI + depends on NET_VENDOR_FREESCALE + depends on HAS_IOMEM + depends on PTP_1588_CLOCK_OPTIONAL + depends on NET_SCH_TAPRIO || NET_SCH_TAPRIO=n + select MSCC_OCELOT_SWITCH_LIB + select NET_DSA_TAG_OCELOT_8021Q + select NET_DSA_TAG_OCELOT + select FSL_ENETC_MDIO + select PCS_LYNX + help + This driver supports the VSC9959 (Felix) switch, which is embedded as + a PCIe function of the NXP LS1028A ENETC RCiEP. + +config NET_DSA_MSCC_SEVILLE + tristate "Ocelot / Seville Ethernet switch support" + depends on NET_DSA + depends on NET_VENDOR_MICROSEMI + depends on HAS_IOMEM + depends on PTP_1588_CLOCK_OPTIONAL + select MDIO_MSCC_MIIM + select MSCC_OCELOT_SWITCH_LIB + select NET_DSA_TAG_OCELOT_8021Q + select NET_DSA_TAG_OCELOT + select PCS_LYNX + help + This driver supports the VSC9953 (Seville) switch, which is embedded + as a platform device on the NXP T1040 SoC. diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile new file mode 100644 index 000000000..f6dd131e7 --- /dev/null +++ b/drivers/net/dsa/ocelot/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o +obj-$(CONFIG_NET_DSA_MSCC_SEVILLE) += mscc_seville.o + +mscc_felix-objs := \ + felix.o \ + felix_vsc9959.o + +mscc_seville-objs := \ + felix.o \ + seville_vsc9953.o diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c new file mode 100644 index 000000000..2d2c6f941 --- /dev/null +++ b/drivers/net/dsa/ocelot/felix.c @@ -0,0 +1,2150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright 2019-2021 NXP + * + * This is an umbrella module for all network switches that are + * register-compatible with Ocelot and that perform I/O to their host CPU + * through an NPI (Node Processor Interface) Ethernet port. + */ +#include <uapi/linux/if_bridge.h> +#include <soc/mscc/ocelot_vcap.h> +#include <soc/mscc/ocelot_qsys.h> +#include <soc/mscc/ocelot_sys.h> +#include <soc/mscc/ocelot_dev.h> +#include <soc/mscc/ocelot_ana.h> +#include <soc/mscc/ocelot_ptp.h> +#include <soc/mscc/ocelot.h> +#include <linux/dsa/8021q.h> +#include <linux/dsa/ocelot.h> +#include <linux/platform_device.h> +#include <linux/ptp_classify.h> +#include <linux/module.h> +#include <linux/of_net.h> +#include <linux/pci.h> +#include <linux/of.h> +#include <net/pkt_sched.h> +#include <net/dsa.h> +#include "felix.h" + +/* Translate the DSA database API into the ocelot switch library API, + * which uses VID 0 for all ports that aren't part of a bridge, + * and expects the bridge_dev to be NULL in that case. + */ +static struct net_device *felix_classify_db(struct dsa_db db) +{ + switch (db.type) { + case DSA_DB_PORT: + case DSA_DB_LAG: + return NULL; + case DSA_DB_BRIDGE: + return db.bridge.dev; + default: + return ERR_PTR(-EOPNOTSUPP); + } +} + +static int felix_cpu_port_for_master(struct dsa_switch *ds, + struct net_device *master) +{ + struct ocelot *ocelot = ds->priv; + struct dsa_port *cpu_dp; + int lag; + + if (netif_is_lag_master(master)) { + mutex_lock(&ocelot->fwd_domain_lock); + lag = ocelot_bond_get_id(ocelot, master); + mutex_unlock(&ocelot->fwd_domain_lock); + + return lag; + } + + cpu_dp = master->dsa_ptr; + return cpu_dp->index; +} + +/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that + * the tagger can perform RX source port identification. + */ +static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port, + int upstream, u16 vid) +{ + struct ocelot_vcap_filter *outer_tagging_rule; + struct ocelot *ocelot = ds->priv; + unsigned long cookie; + int key_length, err; + + key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; + + outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), + GFP_KERNEL); + if (!outer_tagging_rule) + return -ENOMEM; + + cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream); + + outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; + outer_tagging_rule->prio = 1; + outer_tagging_rule->id.cookie = cookie; + outer_tagging_rule->id.tc_offload = false; + outer_tagging_rule->block_id = VCAP_ES0; + outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; + outer_tagging_rule->lookup = 0; + outer_tagging_rule->ingress_port.value = port; + outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0); + outer_tagging_rule->egress_port.value = upstream; + outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0); + outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG; + outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD; + outer_tagging_rule->action.tag_a_vid_sel = 1; + outer_tagging_rule->action.vid_a_val = vid; + + err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL); + if (err) + kfree(outer_tagging_rule); + + return err; +} + +static int felix_tag_8021q_vlan_del_rx(struct dsa_switch *ds, int port, + int upstream, u16 vid) +{ + struct ocelot_vcap_filter *outer_tagging_rule; + struct ocelot_vcap_block *block_vcap_es0; + struct ocelot *ocelot = ds->priv; + unsigned long cookie; + + block_vcap_es0 = &ocelot->block[VCAP_ES0]; + cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream); + + outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, + cookie, false); + if (!outer_tagging_rule) + return -ENOENT; + + return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); +} + +/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2 + * rules for steering those tagged packets towards the correct destination port + */ +static int felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port, + u16 vid) +{ + struct ocelot_vcap_filter *untagging_rule, *redirect_rule; + unsigned long cpu_ports = dsa_cpu_ports(ds); + struct ocelot *ocelot = ds->priv; + unsigned long cookie; + int err; + + untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); + if (!untagging_rule) + return -ENOMEM; + + redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); + if (!redirect_rule) { + kfree(untagging_rule); + return -ENOMEM; + } + + cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); + + untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; + untagging_rule->ingress_port_mask = cpu_ports; + untagging_rule->vlan.vid.value = vid; + untagging_rule->vlan.vid.mask = VLAN_VID_MASK; + untagging_rule->prio = 1; + untagging_rule->id.cookie = cookie; + untagging_rule->id.tc_offload = false; + untagging_rule->block_id = VCAP_IS1; + untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; + untagging_rule->lookup = 0; + untagging_rule->action.vlan_pop_cnt_ena = true; + untagging_rule->action.vlan_pop_cnt = 1; + untagging_rule->action.pag_override_mask = 0xff; + untagging_rule->action.pag_val = port; + + err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL); + if (err) { + kfree(untagging_rule); + kfree(redirect_rule); + return err; + } + + cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); + + redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; + redirect_rule->ingress_port_mask = cpu_ports; + redirect_rule->pag = port; + redirect_rule->prio = 1; + redirect_rule->id.cookie = cookie; + redirect_rule->id.tc_offload = false; + redirect_rule->block_id = VCAP_IS2; + redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; + redirect_rule->lookup = 0; + redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; + redirect_rule->action.port_mask = BIT(port); + + err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); + if (err) { + ocelot_vcap_filter_del(ocelot, untagging_rule); + kfree(redirect_rule); + return err; + } + + return 0; +} + +static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid) +{ + struct ocelot_vcap_filter *untagging_rule, *redirect_rule; + struct ocelot_vcap_block *block_vcap_is1; + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot *ocelot = ds->priv; + unsigned long cookie; + int err; + + block_vcap_is1 = &ocelot->block[VCAP_IS1]; + block_vcap_is2 = &ocelot->block[VCAP_IS2]; + + cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); + untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, + cookie, false); + if (!untagging_rule) + return -ENOENT; + + err = ocelot_vcap_filter_del(ocelot, untagging_rule); + if (err) + return err; + + cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); + redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, + cookie, false); + if (!redirect_rule) + return -ENOENT; + + return ocelot_vcap_filter_del(ocelot, redirect_rule); +} + +static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, + u16 flags) +{ + struct dsa_port *cpu_dp; + int err; + + /* tag_8021q.c assumes we are implementing this via port VLAN + * membership, which we aren't. So we don't need to add any VCAP filter + * for the CPU port. + */ + if (!dsa_is_user_port(ds, port)) + return 0; + + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid); + if (err) + return err; + } + + err = felix_tag_8021q_vlan_add_tx(ds, port, vid); + if (err) + goto add_tx_failed; + + return 0; + +add_tx_failed: + dsa_switch_for_each_cpu_port(cpu_dp, ds) + felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid); + + return err; +} + +static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) +{ + struct dsa_port *cpu_dp; + int err; + + if (!dsa_is_user_port(ds, port)) + return 0; + + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + err = felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid); + if (err) + return err; + } + + err = felix_tag_8021q_vlan_del_tx(ds, port, vid); + if (err) + goto del_tx_failed; + + return 0; + +del_tx_failed: + dsa_switch_for_each_cpu_port(cpu_dp, ds) + felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid); + + return err; +} + +static int felix_trap_get_cpu_port(struct dsa_switch *ds, + const struct ocelot_vcap_filter *trap) +{ + struct dsa_port *dp; + int first_port; + + if (WARN_ON(!trap->ingress_port_mask)) + return -1; + + first_port = __ffs(trap->ingress_port_mask); + dp = dsa_to_port(ds, first_port); + + return dp->cpu_dp->index; +} + +/* On switches with no extraction IRQ wired, trapped packets need to be + * replicated over Ethernet as well, otherwise we'd get no notification of + * their arrival when using the ocelot-8021q tagging protocol. + */ +static int felix_update_trapping_destinations(struct dsa_switch *ds, + bool using_tag_8021q) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot_vcap_filter *trap; + enum ocelot_mask_mode mask_mode; + unsigned long port_mask; + bool cpu_copy_ena; + int err; + + if (!felix->info->quirk_no_xtr_irq) + return 0; + + /* We are sure that "cpu" was found, otherwise + * dsa_tree_setup_default_cpu() would have failed earlier. + */ + block_vcap_is2 = &ocelot->block[VCAP_IS2]; + + /* Make sure all traps are set up for that destination */ + list_for_each_entry(trap, &block_vcap_is2->rules, list) { + if (!trap->is_trap) + continue; + + /* Figure out the current trapping destination */ + if (using_tag_8021q) { + /* Redirect to the tag_8021q CPU port. If timestamps + * are necessary, also copy trapped packets to the CPU + * port module. + */ + mask_mode = OCELOT_MASK_MODE_REDIRECT; + port_mask = BIT(felix_trap_get_cpu_port(ds, trap)); + cpu_copy_ena = !!trap->take_ts; + } else { + /* Trap packets only to the CPU port module, which is + * redirected to the NPI port (the DSA CPU port) + */ + mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; + port_mask = 0; + cpu_copy_ena = true; + } + + if (trap->action.mask_mode == mask_mode && + trap->action.port_mask == port_mask && + trap->action.cpu_copy_ena == cpu_copy_ena) + continue; + + trap->action.mask_mode = mask_mode; + trap->action.port_mask = port_mask; + trap->action.cpu_copy_ena = cpu_copy_ena; + + err = ocelot_vcap_filter_replace(ocelot, trap); + if (err) + return err; + } + + return 0; +} + +/* The CPU port module is connected to the Node Processor Interface (NPI). This + * is the mode through which frames can be injected from and extracted to an + * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU + * running Linux, and this forms a DSA setup together with the enetc or fman + * DSA master. + */ +static void felix_npi_port_init(struct ocelot *ocelot, int port) +{ + ocelot->npi = port; + + ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | + QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), + QSYS_EXT_CPU_CFG); + + /* NPI port Injection/Extraction configuration */ + ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, + ocelot->npi_xtr_prefix); + ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, + ocelot->npi_inj_prefix); + + /* Disable transmission of pause frames */ + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); +} + +static void felix_npi_port_deinit(struct ocelot *ocelot, int port) +{ + /* Restore hardware defaults */ + int unused_port = ocelot->num_phys_ports + 2; + + ocelot->npi = -1; + + ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), + QSYS_EXT_CPU_CFG); + + ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, + OCELOT_TAG_PREFIX_DISABLED); + ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, + OCELOT_TAG_PREFIX_DISABLED); + + /* Enable transmission of pause frames */ + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); +} + +static int felix_tag_npi_setup(struct dsa_switch *ds) +{ + struct dsa_port *dp, *first_cpu_dp = NULL; + struct ocelot *ocelot = ds->priv; + + dsa_switch_for_each_user_port(dp, ds) { + if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) { + dev_err(ds->dev, "Multiple NPI ports not supported\n"); + return -EINVAL; + } + + first_cpu_dp = dp->cpu_dp; + } + + if (!first_cpu_dp) + return -EINVAL; + + felix_npi_port_init(ocelot, first_cpu_dp->index); + + return 0; +} + +static void felix_tag_npi_teardown(struct dsa_switch *ds) +{ + struct ocelot *ocelot = ds->priv; + + felix_npi_port_deinit(ocelot, ocelot->npi); +} + +static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds) +{ + struct ocelot *ocelot = ds->priv; + + return BIT(ocelot->num_phys_ports); +} + +static int felix_tag_npi_change_master(struct dsa_switch *ds, int port, + struct net_device *master, + struct netlink_ext_ack *extack) +{ + struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; + struct ocelot *ocelot = ds->priv; + + if (netif_is_lag_master(master)) { + NL_SET_ERR_MSG_MOD(extack, + "LAG DSA master only supported using ocelot-8021q"); + return -EOPNOTSUPP; + } + + /* Changing the NPI port breaks user ports still assigned to the old + * one, so only allow it while they're down, and don't allow them to + * come back up until they're all changed to the new one. + */ + dsa_switch_for_each_user_port(other_dp, ds) { + struct net_device *slave = other_dp->slave; + + if (other_dp != dp && (slave->flags & IFF_UP) && + dsa_port_to_master(other_dp) != master) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot change while old master still has users"); + return -EOPNOTSUPP; + } + } + + felix_npi_port_deinit(ocelot, ocelot->npi); + felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master)); + + return 0; +} + +/* Alternatively to using the NPI functionality, that same hardware MAC + * connected internally to the enetc or fman DSA master can be configured to + * use the software-defined tag_8021q frame format. As far as the hardware is + * concerned, it thinks it is a "dumb switch" - the queues of the CPU port + * module are now disconnected from it, but can still be accessed through + * register-based MMIO. + */ +static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = { + .setup = felix_tag_npi_setup, + .teardown = felix_tag_npi_teardown, + .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask, + .change_master = felix_tag_npi_change_master, +}; + +static int felix_tag_8021q_setup(struct dsa_switch *ds) +{ + struct ocelot *ocelot = ds->priv; + struct dsa_port *dp; + int err; + + err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); + if (err) + return err; + + dsa_switch_for_each_cpu_port(dp, ds) + ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index); + + dsa_switch_for_each_user_port(dp, ds) + ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index, + dp->cpu_dp->index); + + dsa_switch_for_each_available_port(dp, ds) + /* This overwrites ocelot_init(): + * Do not forward BPDU frames to the CPU port module, + * for 2 reasons: + * - When these packets are injected from the tag_8021q + * CPU port, we want them to go out, not loop back + * into the system. + * - STP traffic ingressing on a user port should go to + * the tag_8021q CPU port, not to the hardware CPU + * port module. + */ + ocelot_write_gix(ocelot, + ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), + ANA_PORT_CPU_FWD_BPDU_CFG, dp->index); + + /* The ownership of the CPU port module's queues might have just been + * transferred to the tag_8021q tagger from the NPI-based tagger. + * So there might still be all sorts of crap in the queues. On the + * other hand, the MMIO-based matching of PTP frames is very brittle, + * so we need to be careful that there are no extra frames to be + * dequeued over MMIO, since we would never know to discard them. + */ + ocelot_drain_cpu_queue(ocelot, 0); + + return 0; +} + +static void felix_tag_8021q_teardown(struct dsa_switch *ds) +{ + struct ocelot *ocelot = ds->priv; + struct dsa_port *dp; + + dsa_switch_for_each_available_port(dp, ds) + /* Restore the logic from ocelot_init: + * do not forward BPDU frames to the front ports. + */ + ocelot_write_gix(ocelot, + ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), + ANA_PORT_CPU_FWD_BPDU_CFG, + dp->index); + + dsa_switch_for_each_user_port(dp, ds) + ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index); + + dsa_switch_for_each_cpu_port(dp, ds) + ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index); + + dsa_tag_8021q_unregister(ds); +} + +static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds) +{ + return dsa_cpu_ports(ds); +} + +static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port, + struct net_device *master, + struct netlink_ext_ack *extack) +{ + int cpu = felix_cpu_port_for_master(ds, master); + struct ocelot *ocelot = ds->priv; + + ocelot_port_unassign_dsa_8021q_cpu(ocelot, port); + ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu); + + return felix_update_trapping_destinations(ds, true); +} + +static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = { + .setup = felix_tag_8021q_setup, + .teardown = felix_tag_8021q_teardown, + .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask, + .change_master = felix_tag_8021q_change_master, +}; + +static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask, + bool uc, bool mc, bool bc) +{ + struct ocelot *ocelot = ds->priv; + unsigned long val; + + val = uc ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC); + + val = mc ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC); + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4); + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6); + + val = bc ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_BC); +} + +static void +felix_migrate_host_flood(struct dsa_switch *ds, + const struct felix_tag_proto_ops *proto_ops, + const struct felix_tag_proto_ops *old_proto_ops) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + unsigned long mask; + + if (old_proto_ops) { + mask = old_proto_ops->get_host_fwd_mask(ds); + felix_set_host_flood(ds, mask, false, false, false); + } + + mask = proto_ops->get_host_fwd_mask(ds); + felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, + !!felix->host_flood_mc_mask, true); +} + +static int felix_migrate_mdbs(struct dsa_switch *ds, + const struct felix_tag_proto_ops *proto_ops, + const struct felix_tag_proto_ops *old_proto_ops) +{ + struct ocelot *ocelot = ds->priv; + unsigned long from, to; + + if (!old_proto_ops) + return 0; + + from = old_proto_ops->get_host_fwd_mask(ds); + to = proto_ops->get_host_fwd_mask(ds); + + return ocelot_migrate_mdbs(ocelot, from, to); +} + +/* Configure the shared hardware resources for a transition between + * @old_proto_ops and @proto_ops. + * Manual migration is needed because as far as DSA is concerned, no change of + * the CPU port is taking place here, just of the tagging protocol. + */ +static int +felix_tag_proto_setup_shared(struct dsa_switch *ds, + const struct felix_tag_proto_ops *proto_ops, + const struct felix_tag_proto_ops *old_proto_ops) +{ + bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops); + int err; + + err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops); + if (err) + return err; + + felix_update_trapping_destinations(ds, using_tag_8021q); + + felix_migrate_host_flood(ds, proto_ops, old_proto_ops); + + return 0; +} + +/* This always leaves the switch in a consistent state, because although the + * tag_8021q setup can fail, the NPI setup can't. So either the change is made, + * or the restoration is guaranteed to work. + */ +static int felix_change_tag_protocol(struct dsa_switch *ds, + enum dsa_tag_protocol proto) +{ + const struct felix_tag_proto_ops *old_proto_ops, *proto_ops; + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + int err; + + switch (proto) { + case DSA_TAG_PROTO_SEVILLE: + case DSA_TAG_PROTO_OCELOT: + proto_ops = &felix_tag_npi_proto_ops; + break; + case DSA_TAG_PROTO_OCELOT_8021Q: + proto_ops = &felix_tag_8021q_proto_ops; + break; + default: + return -EPROTONOSUPPORT; + } + + old_proto_ops = felix->tag_proto_ops; + + if (proto_ops == old_proto_ops) + return 0; + + err = proto_ops->setup(ds); + if (err) + goto setup_failed; + + err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops); + if (err) + goto setup_shared_failed; + + if (old_proto_ops) + old_proto_ops->teardown(ds); + + felix->tag_proto_ops = proto_ops; + felix->tag_proto = proto; + + return 0; + +setup_shared_failed: + proto_ops->teardown(ds); +setup_failed: + return err; +} + +static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, + int port, + enum dsa_tag_protocol mp) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + return felix->tag_proto; +} + +static void felix_port_set_host_flood(struct dsa_switch *ds, int port, + bool uc, bool mc) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + unsigned long mask; + + if (uc) + felix->host_flood_uc_mask |= BIT(port); + else + felix->host_flood_uc_mask &= ~BIT(port); + + if (mc) + felix->host_flood_mc_mask |= BIT(port); + else + felix->host_flood_mc_mask &= ~BIT(port); + + mask = felix->tag_proto_ops->get_host_fwd_mask(ds); + felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, + !!felix->host_flood_mc_mask, true); +} + +static int felix_port_change_master(struct dsa_switch *ds, int port, + struct net_device *master, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + return felix->tag_proto_ops->change_master(ds, port, master, extack); +} + +static int felix_set_ageing_time(struct dsa_switch *ds, + unsigned int ageing_time) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_set_ageing_time(ocelot, ageing_time); + + return 0; +} + +static void felix_port_fast_age(struct dsa_switch *ds, int port) +{ + struct ocelot *ocelot = ds->priv; + int err; + + err = ocelot_mact_flush(ocelot, port); + if (err) + dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n", + port, ERR_PTR(err)); +} + +static int felix_fdb_dump(struct dsa_switch *ds, int port, + dsa_fdb_dump_cb_t *cb, void *data) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_fdb_dump(ocelot, port, cb, data); +} + +static int felix_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct dsa_port *dp = dsa_to_port(ds, port); + struct ocelot *ocelot = ds->priv; + + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_port_is_cpu(dp) && !bridge_dev && + dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) + return 0; + + if (dsa_port_is_cpu(dp)) + port = PGID_CPU; + + return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev); +} + +static int felix_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct dsa_port *dp = dsa_to_port(ds, port); + struct ocelot *ocelot = ds->priv; + + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_port_is_cpu(dp) && !bridge_dev && + dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) + return 0; + + if (dsa_port_is_cpu(dp)) + port = PGID_CPU; + + return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev); +} + +static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct ocelot *ocelot = ds->priv; + + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev); +} + +static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct ocelot *ocelot = ds->priv; + + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev); +} + +static int felix_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct ocelot *ocelot = ds->priv; + + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_is_cpu_port(ds, port) && !bridge_dev && + dsa_mdb_present_in_other_db(ds, port, mdb, db)) + return 0; + + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + + return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev); +} + +static int felix_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct ocelot *ocelot = ds->priv; + + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_is_cpu_port(ds, port) && !bridge_dev && + dsa_mdb_present_in_other_db(ds, port, mdb, db)) + return 0; + + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + + return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev); +} + +static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, + u8 state) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_bridge_stp_state_set(ocelot, port, state); +} + +static int felix_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags val, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_pre_bridge_flags(ocelot, port, val); +} + +static int felix_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags val, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + + ocelot_port_bridge_flags(ocelot, port, val); + + return 0; +} + +static int felix_bridge_join(struct dsa_switch *ds, int port, + struct dsa_bridge bridge, bool *tx_fwd_offload, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num, + extack); +} + +static void felix_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_bridge bridge) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_bridge_leave(ocelot, port, bridge.dev); +} + +static int felix_lag_join(struct dsa_switch *ds, int port, + struct dsa_lag lag, + struct netdev_lag_upper_info *info, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + int err; + + err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack); + if (err) + return err; + + /* Update the logical LAG port that serves as tag_8021q CPU port */ + if (!dsa_is_cpu_port(ds, port)) + return 0; + + return felix_port_change_master(ds, port, lag.dev, extack); +} + +static int felix_lag_leave(struct dsa_switch *ds, int port, + struct dsa_lag lag) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_lag_leave(ocelot, port, lag.dev); + + /* Update the logical LAG port that serves as tag_8021q CPU port */ + if (!dsa_is_cpu_port(ds, port)) + return 0; + + return felix_port_change_master(ds, port, lag.dev, NULL); +} + +static int felix_lag_change(struct dsa_switch *ds, int port) +{ + struct dsa_port *dp = dsa_to_port(ds, port); + struct ocelot *ocelot = ds->priv; + + ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled); + + return 0; +} + +static int felix_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + u16 flags = vlan->flags; + + /* Ocelot switches copy frames as-is to the CPU, so the flags: + * egress-untagged or not, pvid or not, make no difference. This + * behavior is already better than what DSA just tries to approximate + * when it installs the VLAN with the same flags on the CPU port. + * Just accept any configuration, and don't let ocelot deny installing + * multiple native VLANs on the NPI port, because the switch doesn't + * look at the port tag settings towards the NPI interface anyway. + */ + if (port == ocelot->npi) + return 0; + + return ocelot_vlan_prepare(ocelot, port, vlan->vid, + flags & BRIDGE_VLAN_INFO_PVID, + flags & BRIDGE_VLAN_INFO_UNTAGGED, + extack); +} + +static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_vlan_filtering(ocelot, port, enabled, extack); +} + +static int felix_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + u16 flags = vlan->flags; + int err; + + err = felix_vlan_prepare(ds, port, vlan, extack); + if (err) + return err; + + return ocelot_vlan_add(ocelot, port, vlan->vid, + flags & BRIDGE_VLAN_INFO_PVID, + flags & BRIDGE_VLAN_INFO_UNTAGGED); +} + +static int felix_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_vlan_del(ocelot, port, vlan->vid); +} + +static void felix_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + struct ocelot *ocelot = ds->priv; + + /* This driver does not make use of the speed, duplex, pause or the + * advertisement in its mac_config, so it is safe to mark this driver + * as non-legacy. + */ + config->legacy_pre_march2020 = false; + + __set_bit(ocelot->ports[port]->phy_mode, + config->supported_interfaces); +} + +static void felix_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + if (felix->info->phylink_validate) + felix->info->phylink_validate(ocelot, port, supported, state); +} + +static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds, + int port, + phy_interface_t iface) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + struct phylink_pcs *pcs = NULL; + + if (felix->pcs && felix->pcs[port]) + pcs = felix->pcs[port]; + + return pcs; +} + +static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, + unsigned int link_an_mode, + phy_interface_t interface) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, + FELIX_MAC_QUIRKS); +} + +static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int link_an_mode, + phy_interface_t interface, + struct phy_device *phydev, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, + interface, speed, duplex, tx_pause, rx_pause, + FELIX_MAC_QUIRKS); + + if (felix->info->port_sched_speed_set) + felix->info->port_sched_speed_set(ocelot, port, speed); +} + +static int felix_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct dsa_port *dp = dsa_to_port(ds, port); + struct ocelot *ocelot = ds->priv; + + if (!dsa_port_is_user(dp)) + return 0; + + if (ocelot->npi >= 0) { + struct net_device *master = dsa_port_to_master(dp); + + if (felix_cpu_port_for_master(ds, master) != ocelot->npi) { + dev_err(ds->dev, "Multiple masters are not allowed\n"); + return -EINVAL; + } + } + + return 0; +} + +static void felix_port_qos_map_init(struct ocelot *ocelot, int port) +{ + int i; + + ocelot_rmw_gix(ocelot, + ANA_PORT_QOS_CFG_QOS_PCP_ENA, + ANA_PORT_QOS_CFG_QOS_PCP_ENA, + ANA_PORT_QOS_CFG, + port); + + for (i = 0; i < OCELOT_NUM_TC * 2; i++) { + ocelot_rmw_ix(ocelot, + (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) | + ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i), + ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL | + ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M, + ANA_PORT_PCP_DEI_MAP, + port, i); + } +} + +static void felix_get_stats64(struct dsa_switch *ds, int port, + struct rtnl_link_stats64 *stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_stats64(ocelot, port, stats); +} + +static void felix_get_pause_stats(struct dsa_switch *ds, int port, + struct ethtool_pause_stats *pause_stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_pause_stats(ocelot, port, pause_stats); +} + +static void felix_get_rmon_stats(struct dsa_switch *ds, int port, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges); +} + +static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats); +} + +static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats); +} + +static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats); +} + +static void felix_get_strings(struct dsa_switch *ds, int port, + u32 stringset, u8 *data) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_get_strings(ocelot, port, stringset, data); +} + +static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_get_ethtool_stats(ocelot, port, data); +} + +static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_get_sset_count(ocelot, port, sset); +} + +static int felix_get_ts_info(struct dsa_switch *ds, int port, + struct ethtool_ts_info *info) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_get_ts_info(ocelot, port, info); +} + +static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = { + [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL, + [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII, + [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII, + [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII, + [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX, + [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX, +}; + +static int felix_validate_phy_mode(struct felix *felix, int port, + phy_interface_t phy_mode) +{ + u32 modes = felix->info->port_modes[port]; + + if (felix_phy_match_table[phy_mode] & modes) + return 0; + return -EOPNOTSUPP; +} + +static int felix_parse_ports_node(struct felix *felix, + struct device_node *ports_node, + phy_interface_t *port_phy_modes) +{ + struct device *dev = felix->ocelot.dev; + struct device_node *child; + + for_each_available_child_of_node(ports_node, child) { + phy_interface_t phy_mode; + u32 port; + int err; + + /* Get switch port number from DT */ + if (of_property_read_u32(child, "reg", &port) < 0) { + dev_err(dev, "Port number not defined in device tree " + "(property \"reg\")\n"); + of_node_put(child); + return -ENODEV; + } + + /* Get PHY mode from DT */ + err = of_get_phy_mode(child, &phy_mode); + if (err) { + dev_err(dev, "Failed to read phy-mode or " + "phy-interface-type property for port %d\n", + port); + of_node_put(child); + return -ENODEV; + } + + err = felix_validate_phy_mode(felix, port, phy_mode); + if (err < 0) { + dev_err(dev, "Unsupported PHY mode %s on port %d\n", + phy_modes(phy_mode), port); + of_node_put(child); + return err; + } + + port_phy_modes[port] = phy_mode; + } + + return 0; +} + +static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) +{ + struct device *dev = felix->ocelot.dev; + struct device_node *switch_node; + struct device_node *ports_node; + int err; + + switch_node = dev->of_node; + + ports_node = of_get_child_by_name(switch_node, "ports"); + if (!ports_node) + ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); + if (!ports_node) { + dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n"); + return -ENODEV; + } + + err = felix_parse_ports_node(felix, ports_node, port_phy_modes); + of_node_put(ports_node); + + return err; +} + +static struct regmap *felix_request_regmap_by_name(struct felix *felix, + const char *resource_name) +{ + struct ocelot *ocelot = &felix->ocelot; + struct resource res; + int i; + + for (i = 0; i < felix->info->num_resources; i++) { + if (strcmp(resource_name, felix->info->resources[i].name)) + continue; + + memcpy(&res, &felix->info->resources[i], sizeof(res)); + res.start += felix->switch_base; + res.end += felix->switch_base; + + return ocelot_regmap_init(ocelot, &res); + } + + return ERR_PTR(-ENOENT); +} + +static struct regmap *felix_request_regmap(struct felix *felix, + enum ocelot_target target) +{ + const char *resource_name = felix->info->resource_names[target]; + + /* If the driver didn't provide a resource name for the target, + * the resource is optional. + */ + if (!resource_name) + return NULL; + + return felix_request_regmap_by_name(felix, resource_name); +} + +static struct regmap *felix_request_port_regmap(struct felix *felix, int port) +{ + char resource_name[32]; + + sprintf(resource_name, "port%d", port); + + return felix_request_regmap_by_name(felix, resource_name); +} + +static int felix_init_structs(struct felix *felix, int num_phys_ports) +{ + struct ocelot *ocelot = &felix->ocelot; + phy_interface_t *port_phy_modes; + struct regmap *target; + int port, i, err; + + ocelot->num_phys_ports = num_phys_ports; + ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports, + sizeof(struct ocelot_port *), GFP_KERNEL); + if (!ocelot->ports) + return -ENOMEM; + + ocelot->map = felix->info->map; + ocelot->stats_layout = felix->info->stats_layout; + ocelot->num_mact_rows = felix->info->num_mact_rows; + ocelot->vcap = felix->info->vcap; + ocelot->vcap_pol.base = felix->info->vcap_pol_base; + ocelot->vcap_pol.max = felix->info->vcap_pol_max; + ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2; + ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2; + ocelot->ops = felix->info->ops; + ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; + ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; + ocelot->devlink = felix->ds->devlink; + + port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t), + GFP_KERNEL); + if (!port_phy_modes) + return -ENOMEM; + + err = felix_parse_dt(felix, port_phy_modes); + if (err) { + kfree(port_phy_modes); + return err; + } + + for (i = 0; i < TARGET_MAX; i++) { + target = felix_request_regmap(felix, i); + if (IS_ERR(target)) { + dev_err(ocelot->dev, + "Failed to map device memory space: %pe\n", + target); + kfree(port_phy_modes); + return PTR_ERR(target); + } + + ocelot->targets[i] = target; + } + + err = ocelot_regfields_init(ocelot, felix->info->regfields); + if (err) { + dev_err(ocelot->dev, "failed to init reg fields map\n"); + kfree(port_phy_modes); + return err; + } + + for (port = 0; port < num_phys_ports; port++) { + struct ocelot_port *ocelot_port; + + ocelot_port = devm_kzalloc(ocelot->dev, + sizeof(struct ocelot_port), + GFP_KERNEL); + if (!ocelot_port) { + dev_err(ocelot->dev, + "failed to allocate port memory\n"); + kfree(port_phy_modes); + return -ENOMEM; + } + + target = felix_request_port_regmap(felix, port); + if (IS_ERR(target)) { + dev_err(ocelot->dev, + "Failed to map memory space for port %d: %pe\n", + port, target); + kfree(port_phy_modes); + return PTR_ERR(target); + } + + ocelot_port->phy_mode = port_phy_modes[port]; + ocelot_port->ocelot = ocelot; + ocelot_port->target = target; + ocelot_port->index = port; + ocelot->ports[port] = ocelot_port; + } + + kfree(port_phy_modes); + + if (felix->info->mdio_bus_alloc) { + err = felix->info->mdio_bus_alloc(ocelot); + if (err < 0) + return err; + } + + return 0; +} + +static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port, + struct sk_buff *skb) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; + struct sk_buff *skb_match = NULL, *skb_tmp; + unsigned long flags; + + if (!clone) + return; + + spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags); + + skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { + if (skb != clone) + continue; + __skb_unlink(skb, &ocelot_port->tx_skbs); + skb_match = skb; + break; + } + + spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags); + + WARN_ONCE(!skb_match, + "Could not find skb clone in TX timestamping list\n"); +} + +#define work_to_xmit_work(w) \ + container_of((w), struct felix_deferred_xmit_work, work) + +static void felix_port_deferred_xmit(struct kthread_work *work) +{ + struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work); + struct dsa_switch *ds = xmit_work->dp->ds; + struct sk_buff *skb = xmit_work->skb; + u32 rew_op = ocelot_ptp_rew_op(skb); + struct ocelot *ocelot = ds->priv; + int port = xmit_work->dp->index; + int retries = 10; + + do { + if (ocelot_can_inject(ocelot, 0)) + break; + + cpu_relax(); + } while (--retries); + + if (!retries) { + dev_err(ocelot->dev, "port %d failed to inject skb\n", + port); + ocelot_port_purge_txtstamp_skb(ocelot, port, skb); + kfree_skb(skb); + return; + } + + ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); + + consume_skb(skb); + kfree(xmit_work); +} + +static int felix_connect_tag_protocol(struct dsa_switch *ds, + enum dsa_tag_protocol proto) +{ + struct ocelot_8021q_tagger_data *tagger_data; + + switch (proto) { + case DSA_TAG_PROTO_OCELOT_8021Q: + tagger_data = ocelot_8021q_tagger_data(ds); + tagger_data->xmit_work_fn = felix_port_deferred_xmit; + return 0; + case DSA_TAG_PROTO_OCELOT: + case DSA_TAG_PROTO_SEVILLE: + return 0; + default: + return -EPROTONOSUPPORT; + } +} + +/* Hardware initialization done here so that we can allocate structures with + * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing + * us to allocate structures twice (leak memory) and map PCI memory twice + * (which will not work). + */ +static int felix_setup(struct dsa_switch *ds) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + struct dsa_port *dp; + int err; + + err = felix_init_structs(felix, ds->num_ports); + if (err) + return err; + + err = ocelot_init(ocelot); + if (err) + goto out_mdiobus_free; + + if (ocelot->ptp) { + err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps); + if (err) { + dev_err(ocelot->dev, + "Timestamp initialization failed\n"); + ocelot->ptp = 0; + } + } + + dsa_switch_for_each_available_port(dp, ds) { + ocelot_init_port(ocelot, dp->index); + + /* Set the default QoS Classification based on PCP and DEI + * bits of vlan tag. + */ + felix_port_qos_map_init(ocelot, dp->index); + } + + err = ocelot_devlink_sb_register(ocelot); + if (err) + goto out_deinit_ports; + + /* The initial tag protocol is NPI which won't fail during initial + * setup, there's no real point in checking for errors. + */ + felix_change_tag_protocol(ds, felix->tag_proto); + + ds->mtu_enforcement_ingress = true; + ds->assisted_learning_on_cpu_port = true; + ds->fdb_isolation = true; + ds->max_num_bridges = ds->num_ports; + + return 0; + +out_deinit_ports: + dsa_switch_for_each_available_port(dp, ds) + ocelot_deinit_port(ocelot, dp->index); + + ocelot_deinit_timestamp(ocelot); + ocelot_deinit(ocelot); + +out_mdiobus_free: + if (felix->info->mdio_bus_free) + felix->info->mdio_bus_free(ocelot); + + return err; +} + +static void felix_teardown(struct dsa_switch *ds) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + struct dsa_port *dp; + + rtnl_lock(); + if (felix->tag_proto_ops) + felix->tag_proto_ops->teardown(ds); + rtnl_unlock(); + + dsa_switch_for_each_available_port(dp, ds) + ocelot_deinit_port(ocelot, dp->index); + + ocelot_devlink_sb_unregister(ocelot); + ocelot_deinit_timestamp(ocelot); + ocelot_deinit(ocelot); + + if (felix->info->mdio_bus_free) + felix->info->mdio_bus_free(ocelot); +} + +static int felix_hwtstamp_get(struct dsa_switch *ds, int port, + struct ifreq *ifr) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_hwstamp_get(ocelot, port, ifr); +} + +static int felix_hwtstamp_set(struct dsa_switch *ds, int port, + struct ifreq *ifr) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + bool using_tag_8021q; + int err; + + err = ocelot_hwstamp_set(ocelot, port, ifr); + if (err) + return err; + + using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; + + return felix_update_trapping_destinations(ds, using_tag_8021q); +} + +static bool felix_check_xtr_pkt(struct ocelot *ocelot) +{ + struct felix *felix = ocelot_to_felix(ocelot); + int err = 0, grp = 0; + + if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) + return false; + + if (!felix->info->quirk_no_xtr_irq) + return false; + + while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { + struct sk_buff *skb; + unsigned int type; + + err = ocelot_xtr_poll_frame(ocelot, grp, &skb); + if (err) + goto out; + + /* We trap to the CPU port module all PTP frames, but + * felix_rxtstamp() only gets called for event frames. + * So we need to avoid sending duplicate general + * message frames by running a second BPF classifier + * here and dropping those. + */ + __skb_push(skb, ETH_HLEN); + + type = ptp_classify_raw(skb); + + __skb_pull(skb, ETH_HLEN); + + if (type == PTP_CLASS_NONE) { + kfree_skb(skb); + continue; + } + + netif_rx(skb); + } + +out: + if (err < 0) { + dev_err_ratelimited(ocelot->dev, + "Error during packet extraction: %pe\n", + ERR_PTR(err)); + ocelot_drain_cpu_queue(ocelot, 0); + } + + return true; +} + +static bool felix_rxtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb, unsigned int type) +{ + u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo; + struct skb_shared_hwtstamps *shhwtstamps; + struct ocelot *ocelot = ds->priv; + struct timespec64 ts; + u32 tstamp_hi; + u64 tstamp; + + switch (type & PTP_CLASS_PMASK) { + case PTP_CLASS_L2: + if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L2)) + return false; + break; + case PTP_CLASS_IPV4: + case PTP_CLASS_IPV6: + if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L4)) + return false; + break; + } + + /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb + * for RX timestamping. Then free it, and poll for its copy through + * MMIO in the CPU port module, and inject that into the stack from + * ocelot_xtr_poll(). + */ + if (felix_check_xtr_pkt(ocelot)) { + kfree_skb(skb); + return true; + } + + ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); + tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + + tstamp_hi = tstamp >> 32; + if ((tstamp & 0xffffffff) < tstamp_lo) + tstamp_hi--; + + tstamp = ((u64)tstamp_hi << 32) | tstamp_lo; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamps->hwtstamp = tstamp; + return false; +} + +static void felix_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb) +{ + struct ocelot *ocelot = ds->priv; + struct sk_buff *clone = NULL; + + if (!ocelot->ptp) + return; + + if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { + dev_err_ratelimited(ds->dev, + "port %d delivering skb without TX timestamp\n", + port); + return; + } + + if (clone) + OCELOT_SKB_CB(skb)->clone = clone; +} + +static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) +{ + struct ocelot *ocelot = ds->priv; + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct felix *felix = ocelot_to_felix(ocelot); + + ocelot_port_set_maxlen(ocelot, port, new_mtu); + + mutex_lock(&ocelot->tas_lock); + + if (ocelot_port->taprio && felix->info->tas_guard_bands_update) + felix->info->tas_guard_bands_update(ocelot, port); + + mutex_unlock(&ocelot->tas_lock); + + return 0; +} + +static int felix_get_max_mtu(struct dsa_switch *ds, int port) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_get_max_mtu(ocelot, port); +} + +static int felix_cls_flower_add(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + bool using_tag_8021q; + int err; + + err = ocelot_cls_flower_replace(ocelot, port, cls, ingress); + if (err) + return err; + + using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; + + return felix_update_trapping_destinations(ds, using_tag_8021q); +} + +static int felix_cls_flower_del(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_cls_flower_destroy(ocelot, port, cls, ingress); +} + +static int felix_cls_flower_stats(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_cls_flower_stats(ocelot, port, cls, ingress); +} + +static int felix_port_policer_add(struct dsa_switch *ds, int port, + struct dsa_mall_policer_tc_entry *policer) +{ + struct ocelot *ocelot = ds->priv; + struct ocelot_policer pol = { + .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8, + .burst = policer->burst, + }; + + return ocelot_port_policer_add(ocelot, port, &pol); +} + +static void felix_port_policer_del(struct dsa_switch *ds, int port) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_policer_del(ocelot, port); +} + +static int felix_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress, struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port, + ingress, extack); +} + +static void felix_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_mirror_del(ocelot, port, mirror->ingress); +} + +static int felix_port_setup_tc(struct dsa_switch *ds, int port, + enum tc_setup_type type, + void *type_data) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + if (felix->info->port_setup_tc) + return felix->info->port_setup_tc(ds, port, type, type_data); + else + return -EOPNOTSUPP; +} + +static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index, + u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); +} + +static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, + threshold_type, extack); +} + +static int felix_sb_port_pool_get(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, + p_threshold); +} + +static int felix_sb_port_pool_set(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 pool_index, + u32 threshold, struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, + threshold, extack); +} + +static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, + pool_type, p_pool_index, + p_threshold); +} + +static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, + pool_type, pool_index, threshold, + extack); +} + +static int felix_sb_occ_snapshot(struct dsa_switch *ds, + unsigned int sb_index) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_occ_snapshot(ocelot, sb_index); +} + +static int felix_sb_occ_max_clear(struct dsa_switch *ds, + unsigned int sb_index) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_occ_max_clear(ocelot, sb_index); +} + +static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, + p_cur, p_max); +} + +static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index, + pool_type, p_cur, p_max); +} + +static int felix_mrp_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_mrp *mrp) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_mrp_add(ocelot, port, mrp); +} + +static int felix_mrp_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_mrp *mrp) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_mrp_add(ocelot, port, mrp); +} + +static int +felix_mrp_add_ring_role(struct dsa_switch *ds, int port, + const struct switchdev_obj_ring_role_mrp *mrp) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_mrp_add_ring_role(ocelot, port, mrp); +} + +static int +felix_mrp_del_ring_role(struct dsa_switch *ds, int port, + const struct switchdev_obj_ring_role_mrp *mrp) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_mrp_del_ring_role(ocelot, port, mrp); +} + +static int felix_port_get_default_prio(struct dsa_switch *ds, int port) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_get_default_prio(ocelot, port); +} + +static int felix_port_set_default_prio(struct dsa_switch *ds, int port, + u8 prio) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_set_default_prio(ocelot, port, prio); +} + +static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_get_dscp_prio(ocelot, port, dscp); +} + +static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, + u8 prio) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio); +} + +static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, + u8 prio) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio); +} + +const struct dsa_switch_ops felix_switch_ops = { + .get_tag_protocol = felix_get_tag_protocol, + .change_tag_protocol = felix_change_tag_protocol, + .connect_tag_protocol = felix_connect_tag_protocol, + .setup = felix_setup, + .teardown = felix_teardown, + .set_ageing_time = felix_set_ageing_time, + .get_stats64 = felix_get_stats64, + .get_pause_stats = felix_get_pause_stats, + .get_rmon_stats = felix_get_rmon_stats, + .get_eth_ctrl_stats = felix_get_eth_ctrl_stats, + .get_eth_mac_stats = felix_get_eth_mac_stats, + .get_eth_phy_stats = felix_get_eth_phy_stats, + .get_strings = felix_get_strings, + .get_ethtool_stats = felix_get_ethtool_stats, + .get_sset_count = felix_get_sset_count, + .get_ts_info = felix_get_ts_info, + .phylink_get_caps = felix_phylink_get_caps, + .phylink_validate = felix_phylink_validate, + .phylink_mac_select_pcs = felix_phylink_mac_select_pcs, + .phylink_mac_link_down = felix_phylink_mac_link_down, + .phylink_mac_link_up = felix_phylink_mac_link_up, + .port_enable = felix_port_enable, + .port_fast_age = felix_port_fast_age, + .port_fdb_dump = felix_fdb_dump, + .port_fdb_add = felix_fdb_add, + .port_fdb_del = felix_fdb_del, + .lag_fdb_add = felix_lag_fdb_add, + .lag_fdb_del = felix_lag_fdb_del, + .port_mdb_add = felix_mdb_add, + .port_mdb_del = felix_mdb_del, + .port_pre_bridge_flags = felix_pre_bridge_flags, + .port_bridge_flags = felix_bridge_flags, + .port_bridge_join = felix_bridge_join, + .port_bridge_leave = felix_bridge_leave, + .port_lag_join = felix_lag_join, + .port_lag_leave = felix_lag_leave, + .port_lag_change = felix_lag_change, + .port_stp_state_set = felix_bridge_stp_state_set, + .port_vlan_filtering = felix_vlan_filtering, + .port_vlan_add = felix_vlan_add, + .port_vlan_del = felix_vlan_del, + .port_hwtstamp_get = felix_hwtstamp_get, + .port_hwtstamp_set = felix_hwtstamp_set, + .port_rxtstamp = felix_rxtstamp, + .port_txtstamp = felix_txtstamp, + .port_change_mtu = felix_change_mtu, + .port_max_mtu = felix_get_max_mtu, + .port_policer_add = felix_port_policer_add, + .port_policer_del = felix_port_policer_del, + .port_mirror_add = felix_port_mirror_add, + .port_mirror_del = felix_port_mirror_del, + .cls_flower_add = felix_cls_flower_add, + .cls_flower_del = felix_cls_flower_del, + .cls_flower_stats = felix_cls_flower_stats, + .port_setup_tc = felix_port_setup_tc, + .devlink_sb_pool_get = felix_sb_pool_get, + .devlink_sb_pool_set = felix_sb_pool_set, + .devlink_sb_port_pool_get = felix_sb_port_pool_get, + .devlink_sb_port_pool_set = felix_sb_port_pool_set, + .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get, + .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set, + .devlink_sb_occ_snapshot = felix_sb_occ_snapshot, + .devlink_sb_occ_max_clear = felix_sb_occ_max_clear, + .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get, + .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get, + .port_mrp_add = felix_mrp_add, + .port_mrp_del = felix_mrp_del, + .port_mrp_add_ring_role = felix_mrp_add_ring_role, + .port_mrp_del_ring_role = felix_mrp_del_ring_role, + .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, + .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, + .port_get_default_prio = felix_port_get_default_prio, + .port_set_default_prio = felix_port_set_default_prio, + .port_get_dscp_prio = felix_port_get_dscp_prio, + .port_add_dscp_prio = felix_port_add_dscp_prio, + .port_del_dscp_prio = felix_port_del_dscp_prio, + .port_set_host_flood = felix_port_set_host_flood, + .port_change_master = felix_port_change_master, +}; + +struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) +{ + struct felix *felix = ocelot_to_felix(ocelot); + struct dsa_switch *ds = felix->ds; + + if (!dsa_is_user_port(ds, port)) + return NULL; + + return dsa_to_port(ds, port)->slave; +} + +int felix_netdev_to_port(struct net_device *dev) +{ + struct dsa_port *dp; + + dp = dsa_port_from_netdev(dev); + if (IS_ERR(dp)) + return -EINVAL; + + return dp->index; +} diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h new file mode 100644 index 000000000..c9c29999c --- /dev/null +++ b/drivers/net/dsa/ocelot/felix.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright 2019 NXP + */ +#ifndef _MSCC_FELIX_H +#define _MSCC_FELIX_H + +#define ocelot_to_felix(o) container_of((o), struct felix, ocelot) +#define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION + +#define OCELOT_PORT_MODE_INTERNAL BIT(0) +#define OCELOT_PORT_MODE_SGMII BIT(1) +#define OCELOT_PORT_MODE_QSGMII BIT(2) +#define OCELOT_PORT_MODE_2500BASEX BIT(3) +#define OCELOT_PORT_MODE_USXGMII BIT(4) +#define OCELOT_PORT_MODE_1000BASEX BIT(5) + +/* Platform-specific information */ +struct felix_info { + /* Hardcoded resources provided by the hardware instantiation. */ + const struct resource *resources; + size_t num_resources; + /* Names of the mandatory resources that will be requested during + * probe. Must have TARGET_MAX elements, since it is indexed by target. + */ + const char *const *resource_names; + const struct reg_field *regfields; + const u32 *const *map; + const struct ocelot_ops *ops; + const u32 *port_modes; + int num_mact_rows; + const struct ocelot_stat_layout *stats_layout; + int num_ports; + int num_tx_queues; + struct vcap_props *vcap; + u16 vcap_pol_base; + u16 vcap_pol_max; + u16 vcap_pol_base2; + u16 vcap_pol_max2; + const struct ptp_clock_info *ptp_caps; + + /* Some Ocelot switches are integrated into the SoC without the + * extraction IRQ line connected to the ARM GIC. By enabling this + * workaround, the few packets that are delivered to the CPU port + * module (currently only PTP) are copied not only to the hardware CPU + * port module, but also to the 802.1Q Ethernet CPU port, and polling + * the extraction registers is triggered once the DSA tagger sees a PTP + * frame. The Ethernet frame is only used as a notification: it is + * dropped, and the original frame is extracted over MMIO and annotated + * with the RX timestamp. + */ + bool quirk_no_xtr_irq; + + int (*mdio_bus_alloc)(struct ocelot *ocelot); + void (*mdio_bus_free)(struct ocelot *ocelot); + void (*phylink_validate)(struct ocelot *ocelot, int port, + unsigned long *supported, + struct phylink_link_state *state); + int (*port_setup_tc)(struct dsa_switch *ds, int port, + enum tc_setup_type type, void *type_data); + void (*tas_guard_bands_update)(struct ocelot *ocelot, int port); + void (*port_sched_speed_set)(struct ocelot *ocelot, int port, + u32 speed); +}; + +/* Methods for initializing the hardware resources specific to a tagging + * protocol (like the NPI port, for "ocelot" or "seville", or the VCAP TCAMs, + * for "ocelot-8021q"). + * It is important that the resources configured here do not have side effects + * for the other tagging protocols. If that is the case, their configuration + * needs to go to felix_tag_proto_setup_shared(). + */ +struct felix_tag_proto_ops { + int (*setup)(struct dsa_switch *ds); + void (*teardown)(struct dsa_switch *ds); + unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds); + int (*change_master)(struct dsa_switch *ds, int port, + struct net_device *master, + struct netlink_ext_ack *extack); +}; + +extern const struct dsa_switch_ops felix_switch_ops; + +/* DSA glue / front-end for struct ocelot */ +struct felix { + struct dsa_switch *ds; + const struct felix_info *info; + struct ocelot ocelot; + struct mii_bus *imdio; + struct phylink_pcs **pcs; + resource_size_t switch_base; + enum dsa_tag_protocol tag_proto; + const struct felix_tag_proto_ops *tag_proto_ops; + struct kthread_worker *xmit_worker; + unsigned long host_flood_uc_mask; + unsigned long host_flood_mc_mask; +}; + +struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port); +int felix_netdev_to_port(struct net_device *dev); + +#endif diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c new file mode 100644 index 000000000..018648219 --- /dev/null +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -0,0 +1,2743 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright 2017 Microsemi Corporation + * Copyright 2018-2019 NXP + */ +#include <linux/fsl/enetc_mdio.h> +#include <soc/mscc/ocelot_qsys.h> +#include <soc/mscc/ocelot_vcap.h> +#include <soc/mscc/ocelot_ana.h> +#include <soc/mscc/ocelot_ptp.h> +#include <soc/mscc/ocelot_sys.h> +#include <net/tc_act/tc_gate.h> +#include <soc/mscc/ocelot.h> +#include <linux/dsa/ocelot.h> +#include <linux/pcs-lynx.h> +#include <net/pkt_sched.h> +#include <linux/iopoll.h> +#include <linux/mdio.h> +#include <linux/pci.h> +#include <linux/time.h> +#include "felix.h" + +#define VSC9959_NUM_PORTS 6 + +#define VSC9959_TAS_GCL_ENTRY_MAX 63 +#define VSC9959_TAS_MIN_GATE_LEN_NS 33 +#define VSC9959_VCAP_POLICER_BASE 63 +#define VSC9959_VCAP_POLICER_MAX 383 +#define VSC9959_SWITCH_PCI_BAR 4 +#define VSC9959_IMDIO_PCI_BAR 0 + +#define VSC9959_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \ + OCELOT_PORT_MODE_QSGMII | \ + OCELOT_PORT_MODE_1000BASEX | \ + OCELOT_PORT_MODE_2500BASEX | \ + OCELOT_PORT_MODE_USXGMII) + +static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = { + VSC9959_PORT_MODE_SERDES, + VSC9959_PORT_MODE_SERDES, + VSC9959_PORT_MODE_SERDES, + VSC9959_PORT_MODE_SERDES, + OCELOT_PORT_MODE_INTERNAL, + OCELOT_PORT_MODE_INTERNAL, +}; + +static const u32 vsc9959_ana_regmap[] = { + REG(ANA_ADVLEARN, 0x0089a0), + REG(ANA_VLANMASK, 0x0089a4), + REG_RESERVED(ANA_PORT_B_DOMAIN), + REG(ANA_ANAGEFIL, 0x0089ac), + REG(ANA_ANEVENTS, 0x0089b0), + REG(ANA_STORMLIMIT_BURST, 0x0089b4), + REG(ANA_STORMLIMIT_CFG, 0x0089b8), + REG(ANA_ISOLATED_PORTS, 0x0089c8), + REG(ANA_COMMUNITY_PORTS, 0x0089cc), + REG(ANA_AUTOAGE, 0x0089d0), + REG(ANA_MACTOPTIONS, 0x0089d4), + REG(ANA_LEARNDISC, 0x0089d8), + REG(ANA_AGENCTRL, 0x0089dc), + REG(ANA_MIRRORPORTS, 0x0089e0), + REG(ANA_EMIRRORPORTS, 0x0089e4), + REG(ANA_FLOODING, 0x0089e8), + REG(ANA_FLOODING_IPMC, 0x008a08), + REG(ANA_SFLOW_CFG, 0x008a0c), + REG(ANA_PORT_MODE, 0x008a28), + REG(ANA_CUT_THRU_CFG, 0x008a48), + REG(ANA_PGID_PGID, 0x008400), + REG(ANA_TABLES_ANMOVED, 0x007f1c), + REG(ANA_TABLES_MACHDATA, 0x007f20), + REG(ANA_TABLES_MACLDATA, 0x007f24), + REG(ANA_TABLES_STREAMDATA, 0x007f28), + REG(ANA_TABLES_MACACCESS, 0x007f2c), + REG(ANA_TABLES_MACTINDX, 0x007f30), + REG(ANA_TABLES_VLANACCESS, 0x007f34), + REG(ANA_TABLES_VLANTIDX, 0x007f38), + REG(ANA_TABLES_ISDXACCESS, 0x007f3c), + REG(ANA_TABLES_ISDXTIDX, 0x007f40), + REG(ANA_TABLES_ENTRYLIM, 0x007f00), + REG(ANA_TABLES_PTP_ID_HIGH, 0x007f44), + REG(ANA_TABLES_PTP_ID_LOW, 0x007f48), + REG(ANA_TABLES_STREAMACCESS, 0x007f4c), + REG(ANA_TABLES_STREAMTIDX, 0x007f50), + REG(ANA_TABLES_SEQ_HISTORY, 0x007f54), + REG(ANA_TABLES_SEQ_MASK, 0x007f58), + REG(ANA_TABLES_SFID_MASK, 0x007f5c), + REG(ANA_TABLES_SFIDACCESS, 0x007f60), + REG(ANA_TABLES_SFIDTIDX, 0x007f64), + REG(ANA_MSTI_STATE, 0x008600), + REG(ANA_OAM_UPM_LM_CNT, 0x008000), + REG(ANA_SG_ACCESS_CTRL, 0x008a64), + REG(ANA_SG_CONFIG_REG_1, 0x007fb0), + REG(ANA_SG_CONFIG_REG_2, 0x007fb4), + REG(ANA_SG_CONFIG_REG_3, 0x007fb8), + REG(ANA_SG_CONFIG_REG_4, 0x007fbc), + REG(ANA_SG_CONFIG_REG_5, 0x007fc0), + REG(ANA_SG_GCL_GS_CONFIG, 0x007f80), + REG(ANA_SG_GCL_TI_CONFIG, 0x007f90), + REG(ANA_SG_STATUS_REG_1, 0x008980), + REG(ANA_SG_STATUS_REG_2, 0x008984), + REG(ANA_SG_STATUS_REG_3, 0x008988), + REG(ANA_PORT_VLAN_CFG, 0x007800), + REG(ANA_PORT_DROP_CFG, 0x007804), + REG(ANA_PORT_QOS_CFG, 0x007808), + REG(ANA_PORT_VCAP_CFG, 0x00780c), + REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007810), + REG(ANA_PORT_VCAP_S2_CFG, 0x00781c), + REG(ANA_PORT_PCP_DEI_MAP, 0x007820), + REG(ANA_PORT_CPU_FWD_CFG, 0x007860), + REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007864), + REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007868), + REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00786c), + REG(ANA_PORT_PORT_CFG, 0x007870), + REG(ANA_PORT_POL_CFG, 0x007874), + REG(ANA_PORT_PTP_CFG, 0x007878), + REG(ANA_PORT_PTP_DLY1_CFG, 0x00787c), + REG(ANA_PORT_PTP_DLY2_CFG, 0x007880), + REG(ANA_PORT_SFID_CFG, 0x007884), + REG(ANA_PFC_PFC_CFG, 0x008800), + REG_RESERVED(ANA_PFC_PFC_TIMER), + REG_RESERVED(ANA_IPT_OAM_MEP_CFG), + REG_RESERVED(ANA_IPT_IPT), + REG_RESERVED(ANA_PPT_PPT), + REG_RESERVED(ANA_FID_MAP_FID_MAP), + REG(ANA_AGGR_CFG, 0x008a68), + REG(ANA_CPUQ_CFG, 0x008a6c), + REG_RESERVED(ANA_CPUQ_CFG2), + REG(ANA_CPUQ_8021_CFG, 0x008a74), + REG(ANA_DSCP_CFG, 0x008ab4), + REG(ANA_DSCP_REWR_CFG, 0x008bb4), + REG(ANA_VCAP_RNG_TYPE_CFG, 0x008bf4), + REG(ANA_VCAP_RNG_VAL_CFG, 0x008c14), + REG_RESERVED(ANA_VRAP_CFG), + REG_RESERVED(ANA_VRAP_HDR_DATA), + REG_RESERVED(ANA_VRAP_HDR_MASK), + REG(ANA_DISCARD_CFG, 0x008c40), + REG(ANA_FID_CFG, 0x008c44), + REG(ANA_POL_PIR_CFG, 0x004000), + REG(ANA_POL_CIR_CFG, 0x004004), + REG(ANA_POL_MODE_CFG, 0x004008), + REG(ANA_POL_PIR_STATE, 0x00400c), + REG(ANA_POL_CIR_STATE, 0x004010), + REG_RESERVED(ANA_POL_STATE), + REG(ANA_POL_FLOWC, 0x008c48), + REG(ANA_POL_HYST, 0x008cb4), + REG_RESERVED(ANA_POL_MISC_CFG), +}; + +static const u32 vsc9959_qs_regmap[] = { + REG(QS_XTR_GRP_CFG, 0x000000), + REG(QS_XTR_RD, 0x000008), + REG(QS_XTR_FRM_PRUNING, 0x000010), + REG(QS_XTR_FLUSH, 0x000018), + REG(QS_XTR_DATA_PRESENT, 0x00001c), + REG(QS_XTR_CFG, 0x000020), + REG(QS_INJ_GRP_CFG, 0x000024), + REG(QS_INJ_WR, 0x00002c), + REG(QS_INJ_CTRL, 0x000034), + REG(QS_INJ_STATUS, 0x00003c), + REG(QS_INJ_ERR, 0x000040), + REG_RESERVED(QS_INH_DBG), +}; + +static const u32 vsc9959_vcap_regmap[] = { + /* VCAP_CORE_CFG */ + REG(VCAP_CORE_UPDATE_CTRL, 0x000000), + REG(VCAP_CORE_MV_CFG, 0x000004), + /* VCAP_CORE_CACHE */ + REG(VCAP_CACHE_ENTRY_DAT, 0x000008), + REG(VCAP_CACHE_MASK_DAT, 0x000108), + REG(VCAP_CACHE_ACTION_DAT, 0x000208), + REG(VCAP_CACHE_CNT_DAT, 0x000308), + REG(VCAP_CACHE_TG_DAT, 0x000388), + /* VCAP_CONST */ + REG(VCAP_CONST_VCAP_VER, 0x000398), + REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c), + REG(VCAP_CONST_ENTRY_CNT, 0x0003a0), + REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4), + REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8), + REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac), + REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0), + REG(VCAP_CONST_CNT_WIDTH, 0x0003b4), + REG(VCAP_CONST_CORE_CNT, 0x0003b8), + REG(VCAP_CONST_IF_CNT, 0x0003bc), +}; + +static const u32 vsc9959_qsys_regmap[] = { + REG(QSYS_PORT_MODE, 0x00f460), + REG(QSYS_SWITCH_PORT_MODE, 0x00f480), + REG(QSYS_STAT_CNT_CFG, 0x00f49c), + REG(QSYS_EEE_CFG, 0x00f4a0), + REG(QSYS_EEE_THRES, 0x00f4b8), + REG(QSYS_IGR_NO_SHARING, 0x00f4bc), + REG(QSYS_EGR_NO_SHARING, 0x00f4c0), + REG(QSYS_SW_STATUS, 0x00f4c4), + REG(QSYS_EXT_CPU_CFG, 0x00f4e0), + REG_RESERVED(QSYS_PAD_CFG), + REG(QSYS_CPU_GROUP_MAP, 0x00f4e8), + REG_RESERVED(QSYS_QMAP), + REG_RESERVED(QSYS_ISDX_SGRP), + REG_RESERVED(QSYS_TIMED_FRAME_ENTRY), + REG(QSYS_TFRM_MISC, 0x00f50c), + REG(QSYS_TFRM_PORT_DLY, 0x00f510), + REG(QSYS_TFRM_TIMER_CFG_1, 0x00f514), + REG(QSYS_TFRM_TIMER_CFG_2, 0x00f518), + REG(QSYS_TFRM_TIMER_CFG_3, 0x00f51c), + REG(QSYS_TFRM_TIMER_CFG_4, 0x00f520), + REG(QSYS_TFRM_TIMER_CFG_5, 0x00f524), + REG(QSYS_TFRM_TIMER_CFG_6, 0x00f528), + REG(QSYS_TFRM_TIMER_CFG_7, 0x00f52c), + REG(QSYS_TFRM_TIMER_CFG_8, 0x00f530), + REG(QSYS_RED_PROFILE, 0x00f534), + REG(QSYS_RES_QOS_MODE, 0x00f574), + REG(QSYS_RES_CFG, 0x00c000), + REG(QSYS_RES_STAT, 0x00c004), + REG(QSYS_EGR_DROP_MODE, 0x00f578), + REG(QSYS_EQ_CTRL, 0x00f57c), + REG_RESERVED(QSYS_EVENTS_CORE), + REG(QSYS_QMAXSDU_CFG_0, 0x00f584), + REG(QSYS_QMAXSDU_CFG_1, 0x00f5a0), + REG(QSYS_QMAXSDU_CFG_2, 0x00f5bc), + REG(QSYS_QMAXSDU_CFG_3, 0x00f5d8), + REG(QSYS_QMAXSDU_CFG_4, 0x00f5f4), + REG(QSYS_QMAXSDU_CFG_5, 0x00f610), + REG(QSYS_QMAXSDU_CFG_6, 0x00f62c), + REG(QSYS_QMAXSDU_CFG_7, 0x00f648), + REG(QSYS_PREEMPTION_CFG, 0x00f664), + REG(QSYS_CIR_CFG, 0x000000), + REG(QSYS_EIR_CFG, 0x000004), + REG(QSYS_SE_CFG, 0x000008), + REG(QSYS_SE_DWRR_CFG, 0x00000c), + REG_RESERVED(QSYS_SE_CONNECT), + REG(QSYS_SE_DLB_SENSE, 0x000040), + REG(QSYS_CIR_STATE, 0x000044), + REG(QSYS_EIR_STATE, 0x000048), + REG_RESERVED(QSYS_SE_STATE), + REG(QSYS_HSCH_MISC_CFG, 0x00f67c), + REG(QSYS_TAG_CONFIG, 0x00f680), + REG(QSYS_TAS_PARAM_CFG_CTRL, 0x00f698), + REG(QSYS_PORT_MAX_SDU, 0x00f69c), + REG(QSYS_PARAM_CFG_REG_1, 0x00f440), + REG(QSYS_PARAM_CFG_REG_2, 0x00f444), + REG(QSYS_PARAM_CFG_REG_3, 0x00f448), + REG(QSYS_PARAM_CFG_REG_4, 0x00f44c), + REG(QSYS_PARAM_CFG_REG_5, 0x00f450), + REG(QSYS_GCL_CFG_REG_1, 0x00f454), + REG(QSYS_GCL_CFG_REG_2, 0x00f458), + REG(QSYS_PARAM_STATUS_REG_1, 0x00f400), + REG(QSYS_PARAM_STATUS_REG_2, 0x00f404), + REG(QSYS_PARAM_STATUS_REG_3, 0x00f408), + REG(QSYS_PARAM_STATUS_REG_4, 0x00f40c), + REG(QSYS_PARAM_STATUS_REG_5, 0x00f410), + REG(QSYS_PARAM_STATUS_REG_6, 0x00f414), + REG(QSYS_PARAM_STATUS_REG_7, 0x00f418), + REG(QSYS_PARAM_STATUS_REG_8, 0x00f41c), + REG(QSYS_PARAM_STATUS_REG_9, 0x00f420), + REG(QSYS_GCL_STATUS_REG_1, 0x00f424), + REG(QSYS_GCL_STATUS_REG_2, 0x00f428), +}; + +static const u32 vsc9959_rew_regmap[] = { + REG(REW_PORT_VLAN_CFG, 0x000000), + REG(REW_TAG_CFG, 0x000004), + REG(REW_PORT_CFG, 0x000008), + REG(REW_DSCP_CFG, 0x00000c), + REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010), + REG(REW_PTP_CFG, 0x000050), + REG(REW_PTP_DLY1_CFG, 0x000054), + REG(REW_RED_TAG_CFG, 0x000058), + REG(REW_DSCP_REMAP_DP1_CFG, 0x000410), + REG(REW_DSCP_REMAP_CFG, 0x000510), + REG_RESERVED(REW_STAT_CFG), + REG_RESERVED(REW_REW_STICKY), + REG_RESERVED(REW_PPT), +}; + +static const u32 vsc9959_sys_regmap[] = { + REG(SYS_COUNT_RX_OCTETS, 0x000000), + REG(SYS_COUNT_RX_UNICAST, 0x000004), + REG(SYS_COUNT_RX_MULTICAST, 0x000008), + REG(SYS_COUNT_RX_BROADCAST, 0x00000c), + REG(SYS_COUNT_RX_SHORTS, 0x000010), + REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), + REG(SYS_COUNT_RX_JABBERS, 0x000018), + REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c), + REG(SYS_COUNT_RX_SYM_ERRS, 0x000020), + REG(SYS_COUNT_RX_64, 0x000024), + REG(SYS_COUNT_RX_65_127, 0x000028), + REG(SYS_COUNT_RX_128_255, 0x00002c), + REG(SYS_COUNT_RX_256_511, 0x000030), + REG(SYS_COUNT_RX_512_1023, 0x000034), + REG(SYS_COUNT_RX_1024_1526, 0x000038), + REG(SYS_COUNT_RX_1527_MAX, 0x00003c), + REG(SYS_COUNT_RX_PAUSE, 0x000040), + REG(SYS_COUNT_RX_CONTROL, 0x000044), + REG(SYS_COUNT_RX_LONGS, 0x000048), + REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c), + REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050), + REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054), + REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058), + REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c), + REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060), + REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064), + REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068), + REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c), + REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070), + REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074), + REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078), + REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c), + REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080), + REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084), + REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088), + REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c), + REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090), + REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094), + REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098), + REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c), + REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0), + REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4), + REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8), + REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac), + REG(SYS_COUNT_TX_OCTETS, 0x000200), + REG(SYS_COUNT_TX_UNICAST, 0x000204), + REG(SYS_COUNT_TX_MULTICAST, 0x000208), + REG(SYS_COUNT_TX_BROADCAST, 0x00020c), + REG(SYS_COUNT_TX_COLLISION, 0x000210), + REG(SYS_COUNT_TX_DROPS, 0x000214), + REG(SYS_COUNT_TX_PAUSE, 0x000218), + REG(SYS_COUNT_TX_64, 0x00021c), + REG(SYS_COUNT_TX_65_127, 0x000220), + REG(SYS_COUNT_TX_128_255, 0x000224), + REG(SYS_COUNT_TX_256_511, 0x000228), + REG(SYS_COUNT_TX_512_1023, 0x00022c), + REG(SYS_COUNT_TX_1024_1526, 0x000230), + REG(SYS_COUNT_TX_1527_MAX, 0x000234), + REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000238), + REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00023c), + REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000240), + REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000244), + REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000248), + REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00024c), + REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000250), + REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000254), + REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000258), + REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00025c), + REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000260), + REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000264), + REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000268), + REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c), + REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270), + REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274), + REG(SYS_COUNT_TX_AGED, 0x000278), + REG(SYS_COUNT_DROP_LOCAL, 0x000400), + REG(SYS_COUNT_DROP_TAIL, 0x000404), + REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408), + REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00040c), + REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000410), + REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000414), + REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000418), + REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00041c), + REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000420), + REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000424), + REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000428), + REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00042c), + REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000430), + REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000434), + REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000438), + REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c), + REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440), + REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444), + REG(SYS_COUNT_SF_MATCHING_FRAMES, 0x000800), + REG(SYS_COUNT_SF_NOT_PASSING_FRAMES, 0x000804), + REG(SYS_COUNT_SF_NOT_PASSING_SDU, 0x000808), + REG(SYS_COUNT_SF_RED_FRAMES, 0x00080c), + REG(SYS_RESET_CFG, 0x000e00), + REG(SYS_SR_ETYPE_CFG, 0x000e04), + REG(SYS_VLAN_ETYPE_CFG, 0x000e08), + REG(SYS_PORT_MODE, 0x000e0c), + REG(SYS_FRONT_PORT_MODE, 0x000e2c), + REG(SYS_FRM_AGING, 0x000e44), + REG(SYS_STAT_CFG, 0x000e48), + REG(SYS_SW_STATUS, 0x000e4c), + REG_RESERVED(SYS_MISC_CFG), + REG(SYS_REW_MAC_HIGH_CFG, 0x000e6c), + REG(SYS_REW_MAC_LOW_CFG, 0x000e84), + REG(SYS_TIMESTAMP_OFFSET, 0x000e9c), + REG(SYS_PAUSE_CFG, 0x000ea0), + REG(SYS_PAUSE_TOT_CFG, 0x000ebc), + REG(SYS_ATOP, 0x000ec0), + REG(SYS_ATOP_TOT_CFG, 0x000edc), + REG(SYS_MAC_FC_CFG, 0x000ee0), + REG(SYS_MMGT, 0x000ef8), + REG_RESERVED(SYS_MMGT_FAST), + REG_RESERVED(SYS_EVENTS_DIF), + REG_RESERVED(SYS_EVENTS_CORE), + REG(SYS_PTP_STATUS, 0x000f14), + REG(SYS_PTP_TXSTAMP, 0x000f18), + REG(SYS_PTP_NXT, 0x000f1c), + REG(SYS_PTP_CFG, 0x000f20), + REG(SYS_RAM_INIT, 0x000f24), + REG_RESERVED(SYS_CM_ADDR), + REG_RESERVED(SYS_CM_DATA_WR), + REG_RESERVED(SYS_CM_DATA_RD), + REG_RESERVED(SYS_CM_OP), + REG_RESERVED(SYS_CM_DATA), +}; + +static const u32 vsc9959_ptp_regmap[] = { + REG(PTP_PIN_CFG, 0x000000), + REG(PTP_PIN_TOD_SEC_MSB, 0x000004), + REG(PTP_PIN_TOD_SEC_LSB, 0x000008), + REG(PTP_PIN_TOD_NSEC, 0x00000c), + REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014), + REG(PTP_PIN_WF_LOW_PERIOD, 0x000018), + REG(PTP_CFG_MISC, 0x0000a0), + REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4), + REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8), +}; + +static const u32 vsc9959_gcb_regmap[] = { + REG(GCB_SOFT_RST, 0x000004), +}; + +static const u32 vsc9959_dev_gmii_regmap[] = { + REG(DEV_CLOCK_CFG, 0x0), + REG(DEV_PORT_MISC, 0x4), + REG(DEV_EVENTS, 0x8), + REG(DEV_EEE_CFG, 0xc), + REG(DEV_RX_PATH_DELAY, 0x10), + REG(DEV_TX_PATH_DELAY, 0x14), + REG(DEV_PTP_PREDICT_CFG, 0x18), + REG(DEV_MAC_ENA_CFG, 0x1c), + REG(DEV_MAC_MODE_CFG, 0x20), + REG(DEV_MAC_MAXLEN_CFG, 0x24), + REG(DEV_MAC_TAGS_CFG, 0x28), + REG(DEV_MAC_ADV_CHK_CFG, 0x2c), + REG(DEV_MAC_IFG_CFG, 0x30), + REG(DEV_MAC_HDX_CFG, 0x34), + REG(DEV_MAC_DBG_CFG, 0x38), + REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c), + REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40), + REG(DEV_MAC_STICKY, 0x44), + REG_RESERVED(PCS1G_CFG), + REG_RESERVED(PCS1G_MODE_CFG), + REG_RESERVED(PCS1G_SD_CFG), + REG_RESERVED(PCS1G_ANEG_CFG), + REG_RESERVED(PCS1G_ANEG_NP_CFG), + REG_RESERVED(PCS1G_LB_CFG), + REG_RESERVED(PCS1G_DBG_CFG), + REG_RESERVED(PCS1G_CDET_CFG), + REG_RESERVED(PCS1G_ANEG_STATUS), + REG_RESERVED(PCS1G_ANEG_NP_STATUS), + REG_RESERVED(PCS1G_LINK_STATUS), + REG_RESERVED(PCS1G_LINK_DOWN_CNT), + REG_RESERVED(PCS1G_STICKY), + REG_RESERVED(PCS1G_DEBUG_STATUS), + REG_RESERVED(PCS1G_LPI_CFG), + REG_RESERVED(PCS1G_LPI_WAKE_ERROR_CNT), + REG_RESERVED(PCS1G_LPI_STATUS), + REG_RESERVED(PCS1G_TSTPAT_MODE_CFG), + REG_RESERVED(PCS1G_TSTPAT_STATUS), + REG_RESERVED(DEV_PCS_FX100_CFG), + REG_RESERVED(DEV_PCS_FX100_STATUS), +}; + +static const u32 *vsc9959_regmap[TARGET_MAX] = { + [ANA] = vsc9959_ana_regmap, + [QS] = vsc9959_qs_regmap, + [QSYS] = vsc9959_qsys_regmap, + [REW] = vsc9959_rew_regmap, + [SYS] = vsc9959_sys_regmap, + [S0] = vsc9959_vcap_regmap, + [S1] = vsc9959_vcap_regmap, + [S2] = vsc9959_vcap_regmap, + [PTP] = vsc9959_ptp_regmap, + [GCB] = vsc9959_gcb_regmap, + [DEV_GMII] = vsc9959_dev_gmii_regmap, +}; + +/* Addresses are relative to the PCI device's base address */ +static const struct resource vsc9959_resources[] = { + DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"), + DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"), + DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"), + DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"), + DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"), + DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"), + DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"), + DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"), + DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"), + DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"), + DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"), + DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"), + DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"), + DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"), + DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"), + DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"), +}; + +static const char * const vsc9959_resource_names[TARGET_MAX] = { + [SYS] = "sys", + [REW] = "rew", + [S0] = "s0", + [S1] = "s1", + [S2] = "s2", + [GCB] = "devcpu_gcb", + [QS] = "qs", + [PTP] = "ptp", + [QSYS] = "qsys", + [ANA] = "ana", +}; + +/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an + * SGMII/QSGMII MAC PCS can be found. + */ +static const struct resource vsc9959_imdio_res = + DEFINE_RES_MEM_NAMED(0x8030, 0x10, "imdio"); + +static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = { + [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6), + [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 5), + [ANA_ANEVENTS_FLOOD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 30, 30), + [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 26, 26), + [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 24, 24), + [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 23, 23), + [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 22, 22), + [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 21, 21), + [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 20, 20), + [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 19, 19), + [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 18, 18), + [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 17, 17), + [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 15, 15), + [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 14, 14), + [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 13, 13), + [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 12, 12), + [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 11, 11), + [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 10, 10), + [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 9, 9), + [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 8, 8), + [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 7, 7), + [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6), + [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5), + [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 4, 4), + [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 3, 3), + [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 2, 2), + [ANA_ANEVENTS_SEQ_GEN_ERR_0] = REG_FIELD(ANA_ANEVENTS, 1, 1), + [ANA_ANEVENTS_SEQ_GEN_ERR_1] = REG_FIELD(ANA_ANEVENTS, 0, 0), + [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 16, 16), + [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 11, 12), + [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10), + [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 0, 0), + [GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0), + /* Replicated per number of ports (7), register size 4 per port */ + [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 7, 4), + [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 7, 4), + [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 7, 4), + [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 7, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 7, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 7, 4), + [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 7, 4), + [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 7, 4), + [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 7, 4), + [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 7, 4), + [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 7, 4), + [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 7, 4), + [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4), +}; + +static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = { + OCELOT_COMMON_STATS, +}; + +static const struct vcap_field vsc9959_vcap_es0_keys[] = { + [VCAP_ES0_EGR_PORT] = { 0, 3}, + [VCAP_ES0_IGR_PORT] = { 3, 3}, + [VCAP_ES0_RSV] = { 6, 2}, + [VCAP_ES0_L2_MC] = { 8, 1}, + [VCAP_ES0_L2_BC] = { 9, 1}, + [VCAP_ES0_VID] = { 10, 12}, + [VCAP_ES0_DP] = { 22, 1}, + [VCAP_ES0_PCP] = { 23, 3}, +}; + +static const struct vcap_field vsc9959_vcap_es0_actions[] = { + [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2}, + [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1}, + [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2}, + [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1}, + [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2}, + [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2}, + [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2}, + [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1}, + [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2}, + [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2}, + [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12}, + [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3}, + [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1}, + [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12}, + [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3}, + [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1}, + [VCAP_ES0_ACT_RSV] = { 49, 23}, + [VCAP_ES0_ACT_HIT_STICKY] = { 72, 1}, +}; + +static const struct vcap_field vsc9959_vcap_is1_keys[] = { + [VCAP_IS1_HK_TYPE] = { 0, 1}, + [VCAP_IS1_HK_LOOKUP] = { 1, 2}, + [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 7}, + [VCAP_IS1_HK_RSV] = { 10, 9}, + [VCAP_IS1_HK_OAM_Y1731] = { 19, 1}, + [VCAP_IS1_HK_L2_MC] = { 20, 1}, + [VCAP_IS1_HK_L2_BC] = { 21, 1}, + [VCAP_IS1_HK_IP_MC] = { 22, 1}, + [VCAP_IS1_HK_VLAN_TAGGED] = { 23, 1}, + [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 24, 1}, + [VCAP_IS1_HK_TPID] = { 25, 1}, + [VCAP_IS1_HK_VID] = { 26, 12}, + [VCAP_IS1_HK_DEI] = { 38, 1}, + [VCAP_IS1_HK_PCP] = { 39, 3}, + /* Specific Fields for IS1 Half Key S1_NORMAL */ + [VCAP_IS1_HK_L2_SMAC] = { 42, 48}, + [VCAP_IS1_HK_ETYPE_LEN] = { 90, 1}, + [VCAP_IS1_HK_ETYPE] = { 91, 16}, + [VCAP_IS1_HK_IP_SNAP] = {107, 1}, + [VCAP_IS1_HK_IP4] = {108, 1}, + /* Layer-3 Information */ + [VCAP_IS1_HK_L3_FRAGMENT] = {109, 1}, + [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {110, 1}, + [VCAP_IS1_HK_L3_OPTIONS] = {111, 1}, + [VCAP_IS1_HK_L3_DSCP] = {112, 6}, + [VCAP_IS1_HK_L3_IP4_SIP] = {118, 32}, + /* Layer-4 Information */ + [VCAP_IS1_HK_TCP_UDP] = {150, 1}, + [VCAP_IS1_HK_TCP] = {151, 1}, + [VCAP_IS1_HK_L4_SPORT] = {152, 16}, + [VCAP_IS1_HK_L4_RNG] = {168, 8}, + /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */ + [VCAP_IS1_HK_IP4_INNER_TPID] = { 42, 1}, + [VCAP_IS1_HK_IP4_INNER_VID] = { 43, 12}, + [VCAP_IS1_HK_IP4_INNER_DEI] = { 55, 1}, + [VCAP_IS1_HK_IP4_INNER_PCP] = { 56, 3}, + [VCAP_IS1_HK_IP4_IP4] = { 59, 1}, + [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 60, 1}, + [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 61, 1}, + [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 62, 1}, + [VCAP_IS1_HK_IP4_L3_DSCP] = { 63, 6}, + [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 69, 32}, + [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {101, 32}, + [VCAP_IS1_HK_IP4_L3_PROTO] = {133, 8}, + [VCAP_IS1_HK_IP4_TCP_UDP] = {141, 1}, + [VCAP_IS1_HK_IP4_TCP] = {142, 1}, + [VCAP_IS1_HK_IP4_L4_RNG] = {143, 8}, + [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {151, 32}, +}; + +static const struct vcap_field vsc9959_vcap_is1_actions[] = { + [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1}, + [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6}, + [VCAP_IS1_ACT_QOS_ENA] = { 7, 1}, + [VCAP_IS1_ACT_QOS_VAL] = { 8, 3}, + [VCAP_IS1_ACT_DP_ENA] = { 11, 1}, + [VCAP_IS1_ACT_DP_VAL] = { 12, 1}, + [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8}, + [VCAP_IS1_ACT_PAG_VAL] = { 21, 8}, + [VCAP_IS1_ACT_RSV] = { 29, 9}, + /* The fields below are incorrectly shifted by 2 in the manual */ + [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1}, + [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12}, + [VCAP_IS1_ACT_FID_SEL] = { 51, 2}, + [VCAP_IS1_ACT_FID_VAL] = { 53, 13}, + [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1}, + [VCAP_IS1_ACT_PCP_VAL] = { 67, 3}, + [VCAP_IS1_ACT_DEI_VAL] = { 70, 1}, + [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1}, + [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2}, + [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4}, + [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1}, +}; + +static struct vcap_field vsc9959_vcap_is2_keys[] = { + /* Common: 41 bits */ + [VCAP_IS2_TYPE] = { 0, 4}, + [VCAP_IS2_HK_FIRST] = { 4, 1}, + [VCAP_IS2_HK_PAG] = { 5, 8}, + [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 7}, + [VCAP_IS2_HK_RSV2] = { 20, 1}, + [VCAP_IS2_HK_HOST_MATCH] = { 21, 1}, + [VCAP_IS2_HK_L2_MC] = { 22, 1}, + [VCAP_IS2_HK_L2_BC] = { 23, 1}, + [VCAP_IS2_HK_VLAN_TAGGED] = { 24, 1}, + [VCAP_IS2_HK_VID] = { 25, 12}, + [VCAP_IS2_HK_DEI] = { 37, 1}, + [VCAP_IS2_HK_PCP] = { 38, 3}, + /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */ + [VCAP_IS2_HK_L2_DMAC] = { 41, 48}, + [VCAP_IS2_HK_L2_SMAC] = { 89, 48}, + /* MAC_ETYPE (TYPE=000) */ + [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {137, 16}, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {153, 16}, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {169, 8}, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {177, 3}, + /* MAC_LLC (TYPE=001) */ + [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {137, 40}, + /* MAC_SNAP (TYPE=010) */ + [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {137, 40}, + /* MAC_ARP (TYPE=011) */ + [VCAP_IS2_HK_MAC_ARP_SMAC] = { 41, 48}, + [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 89, 1}, + [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 90, 1}, + [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 91, 1}, + [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 92, 1}, + [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 93, 1}, + [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 94, 1}, + [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 95, 2}, + [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = { 97, 32}, + [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {129, 32}, + [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {161, 1}, + /* IP4_TCP_UDP / IP4_OTHER common */ + [VCAP_IS2_HK_IP4] = { 41, 1}, + [VCAP_IS2_HK_L3_FRAGMENT] = { 42, 1}, + [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 43, 1}, + [VCAP_IS2_HK_L3_OPTIONS] = { 44, 1}, + [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 45, 1}, + [VCAP_IS2_HK_L3_TOS] = { 46, 8}, + [VCAP_IS2_HK_L3_IP4_DIP] = { 54, 32}, + [VCAP_IS2_HK_L3_IP4_SIP] = { 86, 32}, + [VCAP_IS2_HK_DIP_EQ_SIP] = {118, 1}, + /* IP4_TCP_UDP (TYPE=100) */ + [VCAP_IS2_HK_TCP] = {119, 1}, + [VCAP_IS2_HK_L4_DPORT] = {120, 16}, + [VCAP_IS2_HK_L4_SPORT] = {136, 16}, + [VCAP_IS2_HK_L4_RNG] = {152, 8}, + [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {160, 1}, + [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {161, 1}, + [VCAP_IS2_HK_L4_FIN] = {162, 1}, + [VCAP_IS2_HK_L4_SYN] = {163, 1}, + [VCAP_IS2_HK_L4_RST] = {164, 1}, + [VCAP_IS2_HK_L4_PSH] = {165, 1}, + [VCAP_IS2_HK_L4_ACK] = {166, 1}, + [VCAP_IS2_HK_L4_URG] = {167, 1}, + [VCAP_IS2_HK_L4_1588_DOM] = {168, 8}, + [VCAP_IS2_HK_L4_1588_VER] = {176, 4}, + /* IP4_OTHER (TYPE=101) */ + [VCAP_IS2_HK_IP4_L3_PROTO] = {119, 8}, + [VCAP_IS2_HK_L3_PAYLOAD] = {127, 56}, + /* IP6_STD (TYPE=110) */ + [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 41, 1}, + [VCAP_IS2_HK_L3_IP6_SIP] = { 42, 128}, + [VCAP_IS2_HK_IP6_L3_PROTO] = {170, 8}, + /* OAM (TYPE=111) */ + [VCAP_IS2_HK_OAM_MEL_FLAGS] = {137, 7}, + [VCAP_IS2_HK_OAM_VER] = {144, 5}, + [VCAP_IS2_HK_OAM_OPCODE] = {149, 8}, + [VCAP_IS2_HK_OAM_FLAGS] = {157, 8}, + [VCAP_IS2_HK_OAM_MEPID] = {165, 16}, + [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {181, 1}, + [VCAP_IS2_HK_OAM_IS_Y1731] = {182, 1}, +}; + +static struct vcap_field vsc9959_vcap_is2_actions[] = { + [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1}, + [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1}, + [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3}, + [VCAP_IS2_ACT_MASK_MODE] = { 5, 2}, + [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1}, + [VCAP_IS2_ACT_LRN_DIS] = { 8, 1}, + [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1}, + [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9}, + [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1}, + [VCAP_IS2_ACT_PORT_MASK] = { 20, 6}, + [VCAP_IS2_ACT_REW_OP] = { 26, 9}, + [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 35, 1}, + [VCAP_IS2_ACT_RSV] = { 36, 2}, + [VCAP_IS2_ACT_ACL_ID] = { 38, 6}, + [VCAP_IS2_ACT_HIT_CNT] = { 44, 32}, +}; + +static struct vcap_props vsc9959_vcap_props[] = { + [VCAP_ES0] = { + .action_type_width = 0, + .action_table = { + [ES0_ACTION_TYPE_NORMAL] = { + .width = 72, /* HIT_STICKY not included */ + .count = 1, + }, + }, + .target = S0, + .keys = vsc9959_vcap_es0_keys, + .actions = vsc9959_vcap_es0_actions, + }, + [VCAP_IS1] = { + .action_type_width = 0, + .action_table = { + [IS1_ACTION_TYPE_NORMAL] = { + .width = 78, /* HIT_STICKY not included */ + .count = 4, + }, + }, + .target = S1, + .keys = vsc9959_vcap_is1_keys, + .actions = vsc9959_vcap_is1_actions, + }, + [VCAP_IS2] = { + .action_type_width = 1, + .action_table = { + [IS2_ACTION_TYPE_NORMAL] = { + .width = 44, + .count = 2 + }, + [IS2_ACTION_TYPE_SMAC_SIP] = { + .width = 6, + .count = 4 + }, + }, + .target = S2, + .keys = vsc9959_vcap_is2_keys, + .actions = vsc9959_vcap_is2_actions, + }, +}; + +static const struct ptp_clock_info vsc9959_ptp_caps = { + .owner = THIS_MODULE, + .name = "felix ptp", + .max_adj = 0x7fffffff, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = OCELOT_PTP_PINS_NUM, + .n_pins = OCELOT_PTP_PINS_NUM, + .pps = 0, + .gettime64 = ocelot_ptp_gettime64, + .settime64 = ocelot_ptp_settime64, + .adjtime = ocelot_ptp_adjtime, + .adjfine = ocelot_ptp_adjfine, + .verify = ocelot_ptp_verify, + .enable = ocelot_ptp_enable, +}; + +#define VSC9959_INIT_TIMEOUT 50000 +#define VSC9959_GCB_RST_SLEEP 100 +#define VSC9959_SYS_RAMINIT_SLEEP 80 + +static int vsc9959_gcb_soft_rst_status(struct ocelot *ocelot) +{ + int val; + + ocelot_field_read(ocelot, GCB_SOFT_RST_SWC_RST, &val); + + return val; +} + +static int vsc9959_sys_ram_init_status(struct ocelot *ocelot) +{ + return ocelot_read(ocelot, SYS_RAM_INIT); +} + +/* CORE_ENA is in SYS:SYSTEM:RESET_CFG + * RAM_INIT is in SYS:RAM_CTRL:RAM_INIT + */ +static int vsc9959_reset(struct ocelot *ocelot) +{ + int val, err; + + /* soft-reset the switch core */ + ocelot_field_write(ocelot, GCB_SOFT_RST_SWC_RST, 1); + + err = readx_poll_timeout(vsc9959_gcb_soft_rst_status, ocelot, val, !val, + VSC9959_GCB_RST_SLEEP, VSC9959_INIT_TIMEOUT); + if (err) { + dev_err(ocelot->dev, "timeout: switch core reset\n"); + return err; + } + + /* initialize switch mem ~40us */ + ocelot_write(ocelot, SYS_RAM_INIT_RAM_INIT, SYS_RAM_INIT); + err = readx_poll_timeout(vsc9959_sys_ram_init_status, ocelot, val, !val, + VSC9959_SYS_RAMINIT_SLEEP, + VSC9959_INIT_TIMEOUT); + if (err) { + dev_err(ocelot->dev, "timeout: switch sram init\n"); + return err; + } + + /* enable switch core */ + ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1); + + return 0; +} + +static void vsc9959_phylink_validate(struct ocelot *ocelot, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 1000baseT_Half); + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + + if (state->interface == PHY_INTERFACE_MODE_INTERNAL || + state->interface == PHY_INTERFACE_MODE_2500BASEX || + state->interface == PHY_INTERFACE_MODE_USXGMII) { + phylink_set(mask, 2500baseT_Full); + phylink_set(mask, 2500baseX_Full); + } + + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); +} + +/* Watermark encode + * Bit 8: Unit; 0:1, 1:16 + * Bit 7-0: Value to be multiplied with unit + */ +static u16 vsc9959_wm_enc(u16 value) +{ + WARN_ON(value >= 16 * BIT(8)); + + if (value >= BIT(8)) + return BIT(8) | (value / 16); + + return value; +} + +static u16 vsc9959_wm_dec(u16 wm) +{ + WARN_ON(wm & ~GENMASK(8, 0)); + + if (wm & BIT(8)) + return (wm & GENMASK(7, 0)) * 16; + + return wm; +} + +static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse) +{ + *inuse = (val & GENMASK(23, 12)) >> 12; + *maxuse = val & GENMASK(11, 0); +} + +static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) +{ + struct pci_dev *pdev = to_pci_dev(ocelot->dev); + struct felix *felix = ocelot_to_felix(ocelot); + struct enetc_mdio_priv *mdio_priv; + struct device *dev = ocelot->dev; + resource_size_t imdio_base; + void __iomem *imdio_regs; + struct resource res; + struct enetc_hw *hw; + struct mii_bus *bus; + int port; + int rc; + + felix->pcs = devm_kcalloc(dev, felix->info->num_ports, + sizeof(struct phylink_pcs *), + GFP_KERNEL); + if (!felix->pcs) { + dev_err(dev, "failed to allocate array for PCS PHYs\n"); + return -ENOMEM; + } + + imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR); + + memcpy(&res, &vsc9959_imdio_res, sizeof(res)); + res.start += imdio_base; + res.end += imdio_base; + + imdio_regs = devm_ioremap_resource(dev, &res); + if (IS_ERR(imdio_regs)) + return PTR_ERR(imdio_regs); + + hw = enetc_hw_alloc(dev, imdio_regs); + if (IS_ERR(hw)) { + dev_err(dev, "failed to allocate ENETC HW structure\n"); + return PTR_ERR(hw); + } + + bus = mdiobus_alloc_size(sizeof(*mdio_priv)); + if (!bus) + return -ENOMEM; + + bus->name = "VSC9959 internal MDIO bus"; + bus->read = enetc_mdio_read; + bus->write = enetc_mdio_write; + bus->parent = dev; + mdio_priv = bus->priv; + mdio_priv->hw = hw; + /* This gets added to imdio_regs, which already maps addresses + * starting with the proper offset. + */ + mdio_priv->mdio_base = 0; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev)); + + /* Needed in order to initialize the bus mutex lock */ + rc = mdiobus_register(bus); + if (rc < 0) { + dev_err(dev, "failed to register MDIO bus\n"); + mdiobus_free(bus); + return rc; + } + + felix->imdio = bus; + + for (port = 0; port < felix->info->num_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct phylink_pcs *phylink_pcs; + struct mdio_device *mdio_device; + + if (dsa_is_unused_port(felix->ds, port)) + continue; + + if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL) + continue; + + mdio_device = mdio_device_create(felix->imdio, port); + if (IS_ERR(mdio_device)) + continue; + + phylink_pcs = lynx_pcs_create(mdio_device); + if (!phylink_pcs) { + mdio_device_free(mdio_device); + continue; + } + + felix->pcs[port] = phylink_pcs; + + dev_info(dev, "Found PCS at internal MDIO address %d\n", port); + } + + return 0; +} + +static void vsc9959_mdio_bus_free(struct ocelot *ocelot) +{ + struct felix *felix = ocelot_to_felix(ocelot); + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct phylink_pcs *phylink_pcs = felix->pcs[port]; + struct mdio_device *mdio_device; + + if (!phylink_pcs) + continue; + + mdio_device = lynx_get_mdio_device(phylink_pcs); + mdio_device_free(mdio_device); + lynx_pcs_destroy(phylink_pcs); + } + mdiobus_unregister(felix->imdio); + mdiobus_free(felix->imdio); +} + +/* The switch considers any frame (regardless of size) as eligible for + * transmission if the traffic class gate is open for at least 33 ns. + * Overruns are prevented by cropping an interval at the end of the gate time + * slot for which egress scheduling is blocked, but we need to still keep 33 ns + * available for one packet to be transmitted, otherwise the port tc will hang. + * This function returns the size of a gate interval that remains available for + * setting the guard band, after reserving the space for one egress frame. + */ +static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns) +{ + /* Gate always open */ + if (gate_len_ns == U64_MAX) + return U64_MAX; + + if (gate_len_ns < VSC9959_TAS_MIN_GATE_LEN_NS) + return 0; + + return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC; +} + +/* Extract shortest continuous gate open intervals in ns for each traffic class + * of a cyclic tc-taprio schedule. If a gate is always open, the duration is + * considered U64_MAX. If the gate is always closed, it is considered 0. + */ +static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio, + u64 min_gate_len[OCELOT_NUM_TC]) +{ + struct tc_taprio_sched_entry *entry; + u64 gate_len[OCELOT_NUM_TC]; + u8 gates_ever_opened = 0; + int tc, i, n; + + /* Initialize arrays */ + for (tc = 0; tc < OCELOT_NUM_TC; tc++) { + min_gate_len[tc] = U64_MAX; + gate_len[tc] = 0; + } + + /* If we don't have taprio, consider all gates as permanently open */ + if (!taprio) + return; + + n = taprio->num_entries; + + /* Walk through the gate list twice to determine the length + * of consecutively open gates for a traffic class, including + * open gates that wrap around. We are just interested in the + * minimum window size, and this doesn't change what the + * minimum is (if the gate never closes, min_gate_len will + * remain U64_MAX). + */ + for (i = 0; i < 2 * n; i++) { + entry = &taprio->entries[i % n]; + + for (tc = 0; tc < OCELOT_NUM_TC; tc++) { + if (entry->gate_mask & BIT(tc)) { + gate_len[tc] += entry->interval; + gates_ever_opened |= BIT(tc); + } else { + /* Gate closes now, record a potential new + * minimum and reinitialize length + */ + if (min_gate_len[tc] > gate_len[tc] && + gate_len[tc]) + min_gate_len[tc] = gate_len[tc]; + gate_len[tc] = 0; + } + } + } + + /* min_gate_len[tc] actually tracks minimum *open* gate time, so for + * permanently closed gates, min_gate_len[tc] will still be U64_MAX. + * Therefore they are currently indistinguishable from permanently + * open gates. Overwrite the gate len with 0 when we know they're + * actually permanently closed, i.e. after the loop above. + */ + for (tc = 0; tc < OCELOT_NUM_TC; tc++) + if (!(gates_ever_opened & BIT(tc))) + min_gate_len[tc] = 0; +} + +/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ, + * so we need to spell out the register access to each traffic class in helper + * functions, to simplify callers + */ +static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc, + u32 max_sdu) +{ + switch (tc) { + case 0: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0, + port); + break; + case 1: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1, + port); + break; + case 2: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2, + port); + break; + case 3: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3, + port); + break; + case 4: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4, + port); + break; + case 5: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5, + port); + break; + case 6: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6, + port); + break; + case 7: + ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7, + port); + break; + } +} + +static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc) +{ + switch (tc) { + case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port); + case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port); + case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port); + case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port); + case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port); + case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port); + case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port); + case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port); + default: + return 0; + } +} + +static u32 vsc9959_tas_tc_max_sdu(struct tc_taprio_qopt_offload *taprio, int tc) +{ + if (!taprio || !taprio->max_sdu[tc]) + return 0; + + return taprio->max_sdu[tc] + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN; +} + +/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the + * switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU + * values (the default value is 1518). Also, for traffic class windows smaller + * than one MTU sized frame, update QSYS_QMAXSDU_CFG to enable oversized frame + * dropping, such that these won't hang the port, as they will never be sent. + */ +static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct tc_taprio_qopt_offload *taprio; + u64 min_gate_len[OCELOT_NUM_TC]; + int speed, picos_per_byte; + u64 needed_bit_time_ps; + u32 val, maxlen; + u8 tas_speed; + int tc; + + lockdep_assert_held(&ocelot->tas_lock); + + taprio = ocelot_port->taprio; + + val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port); + tas_speed = QSYS_TAG_CONFIG_LINK_SPEED_X(val); + + switch (tas_speed) { + case OCELOT_SPEED_10: + speed = SPEED_10; + break; + case OCELOT_SPEED_100: + speed = SPEED_100; + break; + case OCELOT_SPEED_1000: + speed = SPEED_1000; + break; + case OCELOT_SPEED_2500: + speed = SPEED_2500; + break; + default: + return; + } + + picos_per_byte = (USEC_PER_SEC * 8) / speed; + + val = ocelot_port_readl(ocelot_port, DEV_MAC_MAXLEN_CFG); + /* MAXLEN_CFG accounts automatically for VLAN. We need to include it + * manually in the bit time calculation, plus the preamble and SFD. + */ + maxlen = val + 2 * VLAN_HLEN; + /* Consider the standard Ethernet overhead of 8 octets preamble+SFD, + * 4 octets FCS, 12 octets IFG. + */ + needed_bit_time_ps = (u64)(maxlen + 24) * picos_per_byte; + + dev_dbg(ocelot->dev, + "port %d: max frame size %d needs %llu ps at speed %d\n", + port, maxlen, needed_bit_time_ps, speed); + + vsc9959_tas_min_gate_lengths(taprio, min_gate_len); + + mutex_lock(&ocelot->fwd_domain_lock); + + for (tc = 0; tc < OCELOT_NUM_TC; tc++) { + u32 requested_max_sdu = vsc9959_tas_tc_max_sdu(taprio, tc); + u64 remaining_gate_len_ps; + u32 max_sdu; + + remaining_gate_len_ps = + vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]); + + if (remaining_gate_len_ps > needed_bit_time_ps) { + /* Setting QMAXSDU_CFG to 0 disables oversized frame + * dropping. + */ + max_sdu = requested_max_sdu; + dev_dbg(ocelot->dev, + "port %d tc %d min gate len %llu" + ", sending all frames\n", + port, tc, min_gate_len[tc]); + } else { + /* If traffic class doesn't support a full MTU sized + * frame, make sure to enable oversize frame dropping + * for frames larger than the smallest that would fit. + * + * However, the exact same register, QSYS_QMAXSDU_CFG_*, + * controls not only oversized frame dropping, but also + * per-tc static guard band lengths, so it reduces the + * useful gate interval length. Therefore, be careful + * to calculate a guard band (and therefore max_sdu) + * that still leaves 33 ns available in the time slot. + */ + max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte); + /* A TC gate may be completely closed, which is a + * special case where all packets are oversized. + * Any limit smaller than 64 octets accomplishes this + */ + if (!max_sdu) + max_sdu = 1; + /* Take L1 overhead into account, but just don't allow + * max_sdu to go negative or to 0. Here we use 20 + * because QSYS_MAXSDU_CFG_* already counts the 4 FCS + * octets as part of packet size. + */ + if (max_sdu > 20) + max_sdu -= 20; + + if (requested_max_sdu && requested_max_sdu < max_sdu) + max_sdu = requested_max_sdu; + + dev_info(ocelot->dev, + "port %d tc %d min gate length %llu" + " ns not enough for max frame size %d at %d" + " Mbps, dropping frames over %d" + " octets including FCS\n", + port, tc, min_gate_len[tc], maxlen, speed, + max_sdu); + } + + vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu); + } + + ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port); + + ocelot->ops->cut_through_fwd(ocelot); + + mutex_unlock(&ocelot->fwd_domain_lock); +} + +static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port, + u32 speed) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u8 tas_speed; + + switch (speed) { + case SPEED_10: + tas_speed = OCELOT_SPEED_10; + break; + case SPEED_100: + tas_speed = OCELOT_SPEED_100; + break; + case SPEED_1000: + tas_speed = OCELOT_SPEED_1000; + break; + case SPEED_2500: + tas_speed = OCELOT_SPEED_2500; + break; + default: + tas_speed = OCELOT_SPEED_1000; + break; + } + + mutex_lock(&ocelot->tas_lock); + + ocelot_rmw_rix(ocelot, + QSYS_TAG_CONFIG_LINK_SPEED(tas_speed), + QSYS_TAG_CONFIG_LINK_SPEED_M, + QSYS_TAG_CONFIG, port); + + if (ocelot_port->taprio) + vsc9959_tas_guard_bands_update(ocelot, port); + + mutex_unlock(&ocelot->tas_lock); +} + +static void vsc9959_new_base_time(struct ocelot *ocelot, ktime_t base_time, + u64 cycle_time, + struct timespec64 *new_base_ts) +{ + struct timespec64 ts; + ktime_t new_base_time; + ktime_t current_time; + + ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); + current_time = timespec64_to_ktime(ts); + new_base_time = base_time; + + if (base_time < current_time) { + u64 nr_of_cycles = current_time - base_time; + + do_div(nr_of_cycles, cycle_time); + new_base_time += cycle_time * (nr_of_cycles + 1); + } + + *new_base_ts = ktime_to_timespec64(new_base_time); +} + +static u32 vsc9959_tas_read_cfg_status(struct ocelot *ocelot) +{ + return ocelot_read(ocelot, QSYS_TAS_PARAM_CFG_CTRL); +} + +static void vsc9959_tas_gcl_set(struct ocelot *ocelot, const u32 gcl_ix, + struct tc_taprio_sched_entry *entry) +{ + ocelot_write(ocelot, + QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(gcl_ix) | + QSYS_GCL_CFG_REG_1_GATE_STATE(entry->gate_mask), + QSYS_GCL_CFG_REG_1); + ocelot_write(ocelot, entry->interval, QSYS_GCL_CFG_REG_2); +} + +static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port, + struct tc_taprio_qopt_offload *taprio) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct timespec64 base_ts; + int ret, i; + u32 val; + + mutex_lock(&ocelot->tas_lock); + + if (!taprio->enable) { + ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE, + QSYS_TAG_CONFIG, port); + + taprio_offload_free(ocelot_port->taprio); + ocelot_port->taprio = NULL; + + vsc9959_tas_guard_bands_update(ocelot, port); + + mutex_unlock(&ocelot->tas_lock); + return 0; + } + + if (taprio->cycle_time > NSEC_PER_SEC || + taprio->cycle_time_extension >= NSEC_PER_SEC) { + ret = -EINVAL; + goto err; + } + + if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX) { + ret = -ERANGE; + goto err; + } + + /* Enable guard band. The switch will schedule frames without taking + * their length into account. Thus we'll always need to enable the + * guard band which reserves the time of a maximum sized frame at the + * end of the time window. + * + * Although the ALWAYS_GUARD_BAND_SCH_Q bit is global for all ports, we + * need to set PORT_NUM, because subsequent writes to PARAM_CFG_REG_n + * operate on the port number. + */ + ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port) | + QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q, + QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M | + QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q, + QSYS_TAS_PARAM_CFG_CTRL); + + /* Hardware errata - Admin config could not be overwritten if + * config is pending, need reset the TAS module + */ + val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8); + if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) { + ret = -EBUSY; + goto err; + } + + ocelot_rmw_rix(ocelot, + QSYS_TAG_CONFIG_ENABLE | + QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF) | + QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(0xFF), + QSYS_TAG_CONFIG_ENABLE | + QSYS_TAG_CONFIG_INIT_GATE_STATE_M | + QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M, + QSYS_TAG_CONFIG, port); + + vsc9959_new_base_time(ocelot, taprio->base_time, + taprio->cycle_time, &base_ts); + ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1); + ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec), QSYS_PARAM_CFG_REG_2); + val = upper_32_bits(base_ts.tv_sec); + ocelot_write(ocelot, + QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val) | + QSYS_PARAM_CFG_REG_3_LIST_LENGTH(taprio->num_entries), + QSYS_PARAM_CFG_REG_3); + ocelot_write(ocelot, taprio->cycle_time, QSYS_PARAM_CFG_REG_4); + ocelot_write(ocelot, taprio->cycle_time_extension, QSYS_PARAM_CFG_REG_5); + + for (i = 0; i < taprio->num_entries; i++) + vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]); + + ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, + QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, + QSYS_TAS_PARAM_CFG_CTRL); + + ret = readx_poll_timeout(vsc9959_tas_read_cfg_status, ocelot, val, + !(val & QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE), + 10, 100000); + if (ret) + goto err; + + ocelot_port->taprio = taprio_offload_get(taprio); + vsc9959_tas_guard_bands_update(ocelot, port); + +err: + mutex_unlock(&ocelot->tas_lock); + + return ret; +} + +static void vsc9959_tas_clock_adjust(struct ocelot *ocelot) +{ + struct tc_taprio_qopt_offload *taprio; + struct ocelot_port *ocelot_port; + struct timespec64 base_ts; + int port; + u32 val; + + mutex_lock(&ocelot->tas_lock); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + ocelot_port = ocelot->ports[port]; + taprio = ocelot_port->taprio; + if (!taprio) + continue; + + ocelot_rmw(ocelot, + QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port), + QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M, + QSYS_TAS_PARAM_CFG_CTRL); + + /* Disable time-aware shaper */ + ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE, + QSYS_TAG_CONFIG, port); + + vsc9959_new_base_time(ocelot, taprio->base_time, + taprio->cycle_time, &base_ts); + + ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1); + ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec), + QSYS_PARAM_CFG_REG_2); + val = upper_32_bits(base_ts.tv_sec); + ocelot_rmw(ocelot, + QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val), + QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M, + QSYS_PARAM_CFG_REG_3); + + ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, + QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, + QSYS_TAS_PARAM_CFG_CTRL); + + /* Re-enable time-aware shaper */ + ocelot_rmw_rix(ocelot, QSYS_TAG_CONFIG_ENABLE, + QSYS_TAG_CONFIG_ENABLE, + QSYS_TAG_CONFIG, port); + } + mutex_unlock(&ocelot->tas_lock); +} + +static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port, + struct tc_cbs_qopt_offload *cbs_qopt) +{ + struct ocelot *ocelot = ds->priv; + int port_ix = port * 8 + cbs_qopt->queue; + u32 rate, burst; + + if (cbs_qopt->queue >= ds->num_tx_queues) + return -EINVAL; + + if (!cbs_qopt->enable) { + ocelot_write_gix(ocelot, QSYS_CIR_CFG_CIR_RATE(0) | + QSYS_CIR_CFG_CIR_BURST(0), + QSYS_CIR_CFG, port_ix); + + ocelot_rmw_gix(ocelot, 0, QSYS_SE_CFG_SE_AVB_ENA, + QSYS_SE_CFG, port_ix); + + return 0; + } + + /* Rate unit is 100 kbps */ + rate = DIV_ROUND_UP(cbs_qopt->idleslope, 100); + /* Avoid using zero rate */ + rate = clamp_t(u32, rate, 1, GENMASK(14, 0)); + /* Burst unit is 4kB */ + burst = DIV_ROUND_UP(cbs_qopt->hicredit, 4096); + /* Avoid using zero burst size */ + burst = clamp_t(u32, burst, 1, GENMASK(5, 0)); + ocelot_write_gix(ocelot, + QSYS_CIR_CFG_CIR_RATE(rate) | + QSYS_CIR_CFG_CIR_BURST(burst), + QSYS_CIR_CFG, + port_ix); + + ocelot_rmw_gix(ocelot, + QSYS_SE_CFG_SE_FRM_MODE(0) | + QSYS_SE_CFG_SE_AVB_ENA, + QSYS_SE_CFG_SE_AVB_ENA | + QSYS_SE_CFG_SE_FRM_MODE_M, + QSYS_SE_CFG, + port_ix); + + return 0; +} + +static int vsc9959_qos_query_caps(struct tc_query_caps_base *base) +{ + switch (base->type) { + case TC_SETUP_QDISC_TAPRIO: { + struct tc_taprio_caps *caps = base->caps; + + caps->supports_queue_max_sdu = true; + + return 0; + } + default: + return -EOPNOTSUPP; + } +} + +static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port, + enum tc_setup_type type, + void *type_data) +{ + struct ocelot *ocelot = ds->priv; + + switch (type) { + case TC_QUERY_CAPS: + return vsc9959_qos_query_caps(type_data); + case TC_SETUP_QDISC_TAPRIO: + return vsc9959_qos_port_tas_set(ocelot, port, type_data); + case TC_SETUP_QDISC_CBS: + return vsc9959_qos_port_cbs_set(ds, port, type_data); + default: + return -EOPNOTSUPP; + } +} + +#define VSC9959_PSFP_SFID_MAX 175 +#define VSC9959_PSFP_GATE_ID_MAX 183 +#define VSC9959_PSFP_POLICER_BASE 63 +#define VSC9959_PSFP_POLICER_MAX 383 +#define VSC9959_PSFP_GATE_LIST_NUM 4 +#define VSC9959_PSFP_GATE_CYCLETIME_MIN 5000 + +struct felix_stream { + struct list_head list; + unsigned long id; + bool dummy; + int ports; + int port; + u8 dmac[ETH_ALEN]; + u16 vid; + s8 prio; + u8 sfid_valid; + u8 ssid_valid; + u32 sfid; + u32 ssid; +}; + +struct felix_stream_filter_counters { + u64 match; + u64 not_pass_gate; + u64 not_pass_sdu; + u64 red; +}; + +struct felix_stream_filter { + struct felix_stream_filter_counters stats; + struct list_head list; + refcount_t refcount; + u32 index; + u8 enable; + int portmask; + u8 sg_valid; + u32 sgid; + u8 fm_valid; + u32 fmid; + u8 prio_valid; + u8 prio; + u32 maxsdu; +}; + +struct felix_stream_gate { + u32 index; + u8 enable; + u8 ipv_valid; + u8 init_ipv; + u64 basetime; + u64 cycletime; + u64 cycletime_ext; + u32 num_entries; + struct action_gate_entry entries[]; +}; + +struct felix_stream_gate_entry { + struct list_head list; + refcount_t refcount; + u32 index; +}; + +static int vsc9959_stream_identify(struct flow_cls_offload *f, + struct felix_stream *stream) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) + return -EOPNOTSUPP; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + ether_addr_copy(stream->dmac, match.key->dst); + if (!is_zero_ether_addr(match.mask->src)) + return -EOPNOTSUPP; + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_priority) + stream->prio = match.key->vlan_priority; + else + stream->prio = -1; + + if (!match.mask->vlan_id) + return -EOPNOTSUPP; + stream->vid = match.key->vlan_id; + } else { + return -EOPNOTSUPP; + } + + stream->id = f->cookie; + + return 0; +} + +static int vsc9959_mact_stream_set(struct ocelot *ocelot, + struct felix_stream *stream, + struct netlink_ext_ack *extack) +{ + enum macaccess_entry_type type; + int ret, sfid, ssid; + u32 vid, dst_idx; + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, stream->dmac); + vid = stream->vid; + + /* Stream identification desn't support to add a stream with non + * existent MAC (The MAC entry has not been learned in MAC table). + */ + ret = ocelot_mact_lookup(ocelot, &dst_idx, mac, vid, &type); + if (ret) { + if (extack) + NL_SET_ERR_MSG_MOD(extack, "Stream is not learned in MAC table"); + return -EOPNOTSUPP; + } + + if ((stream->sfid_valid || stream->ssid_valid) && + type == ENTRYTYPE_NORMAL) + type = ENTRYTYPE_LOCKED; + + sfid = stream->sfid_valid ? stream->sfid : -1; + ssid = stream->ssid_valid ? stream->ssid : -1; + + ret = ocelot_mact_learn_streamdata(ocelot, dst_idx, mac, vid, type, + sfid, ssid); + + return ret; +} + +static struct felix_stream * +vsc9959_stream_table_lookup(struct list_head *stream_list, + struct felix_stream *stream) +{ + struct felix_stream *tmp; + + list_for_each_entry(tmp, stream_list, list) + if (ether_addr_equal(tmp->dmac, stream->dmac) && + tmp->vid == stream->vid) + return tmp; + + return NULL; +} + +static int vsc9959_stream_table_add(struct ocelot *ocelot, + struct list_head *stream_list, + struct felix_stream *stream, + struct netlink_ext_ack *extack) +{ + struct felix_stream *stream_entry; + int ret; + + stream_entry = kmemdup(stream, sizeof(*stream_entry), GFP_KERNEL); + if (!stream_entry) + return -ENOMEM; + + if (!stream->dummy) { + ret = vsc9959_mact_stream_set(ocelot, stream_entry, extack); + if (ret) { + kfree(stream_entry); + return ret; + } + } + + list_add_tail(&stream_entry->list, stream_list); + + return 0; +} + +static struct felix_stream * +vsc9959_stream_table_get(struct list_head *stream_list, unsigned long id) +{ + struct felix_stream *tmp; + + list_for_each_entry(tmp, stream_list, list) + if (tmp->id == id) + return tmp; + + return NULL; +} + +static void vsc9959_stream_table_del(struct ocelot *ocelot, + struct felix_stream *stream) +{ + if (!stream->dummy) + vsc9959_mact_stream_set(ocelot, stream, NULL); + + list_del(&stream->list); + kfree(stream); +} + +static u32 vsc9959_sfi_access_status(struct ocelot *ocelot) +{ + return ocelot_read(ocelot, ANA_TABLES_SFIDACCESS); +} + +static int vsc9959_psfp_sfi_set(struct ocelot *ocelot, + struct felix_stream_filter *sfi) +{ + u32 val; + + if (sfi->index > VSC9959_PSFP_SFID_MAX) + return -EINVAL; + + if (!sfi->enable) { + ocelot_write(ocelot, ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index), + ANA_TABLES_SFIDTIDX); + + val = ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE); + ocelot_write(ocelot, val, ANA_TABLES_SFIDACCESS); + + return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val, + (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)), + 10, 100000); + } + + if (sfi->sgid > VSC9959_PSFP_GATE_ID_MAX || + sfi->fmid > VSC9959_PSFP_POLICER_MAX) + return -EINVAL; + + ocelot_write(ocelot, + (sfi->sg_valid ? ANA_TABLES_SFIDTIDX_SGID_VALID : 0) | + ANA_TABLES_SFIDTIDX_SGID(sfi->sgid) | + (sfi->fm_valid ? ANA_TABLES_SFIDTIDX_POL_ENA : 0) | + ANA_TABLES_SFIDTIDX_POL_IDX(sfi->fmid) | + ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index), + ANA_TABLES_SFIDTIDX); + + ocelot_write(ocelot, + (sfi->prio_valid ? ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA : 0) | + ANA_TABLES_SFIDACCESS_IGR_PRIO(sfi->prio) | + ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(sfi->maxsdu) | + ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE), + ANA_TABLES_SFIDACCESS); + + return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val, + (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)), + 10, 100000); +} + +static int vsc9959_psfp_sfidmask_set(struct ocelot *ocelot, u32 sfid, int ports) +{ + u32 val; + + ocelot_rmw(ocelot, + ANA_TABLES_SFIDTIDX_SFID_INDEX(sfid), + ANA_TABLES_SFIDTIDX_SFID_INDEX_M, + ANA_TABLES_SFIDTIDX); + + ocelot_write(ocelot, + ANA_TABLES_SFID_MASK_IGR_PORT_MASK(ports) | + ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA, + ANA_TABLES_SFID_MASK); + + ocelot_rmw(ocelot, + ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE), + ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M, + ANA_TABLES_SFIDACCESS); + + return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val, + (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)), + 10, 100000); +} + +static int vsc9959_psfp_sfi_list_add(struct ocelot *ocelot, + struct felix_stream_filter *sfi, + struct list_head *pos) +{ + struct felix_stream_filter *sfi_entry; + int ret; + + sfi_entry = kmemdup(sfi, sizeof(*sfi_entry), GFP_KERNEL); + if (!sfi_entry) + return -ENOMEM; + + refcount_set(&sfi_entry->refcount, 1); + + ret = vsc9959_psfp_sfi_set(ocelot, sfi_entry); + if (ret) { + kfree(sfi_entry); + return ret; + } + + vsc9959_psfp_sfidmask_set(ocelot, sfi->index, sfi->portmask); + + list_add(&sfi_entry->list, pos); + + return 0; +} + +static int vsc9959_psfp_sfi_table_add(struct ocelot *ocelot, + struct felix_stream_filter *sfi) +{ + struct list_head *pos, *q, *last; + struct felix_stream_filter *tmp; + struct ocelot_psfp_list *psfp; + u32 insert = 0; + + psfp = &ocelot->psfp; + last = &psfp->sfi_list; + + list_for_each_safe(pos, q, &psfp->sfi_list) { + tmp = list_entry(pos, struct felix_stream_filter, list); + if (sfi->sg_valid == tmp->sg_valid && + sfi->fm_valid == tmp->fm_valid && + sfi->portmask == tmp->portmask && + tmp->sgid == sfi->sgid && + tmp->fmid == sfi->fmid) { + sfi->index = tmp->index; + refcount_inc(&tmp->refcount); + return 0; + } + /* Make sure that the index is increasing in order. */ + if (tmp->index == insert) { + last = pos; + insert++; + } + } + sfi->index = insert; + + return vsc9959_psfp_sfi_list_add(ocelot, sfi, last); +} + +static int vsc9959_psfp_sfi_table_add2(struct ocelot *ocelot, + struct felix_stream_filter *sfi, + struct felix_stream_filter *sfi2) +{ + struct felix_stream_filter *tmp; + struct list_head *pos, *q, *last; + struct ocelot_psfp_list *psfp; + u32 insert = 0; + int ret; + + psfp = &ocelot->psfp; + last = &psfp->sfi_list; + + list_for_each_safe(pos, q, &psfp->sfi_list) { + tmp = list_entry(pos, struct felix_stream_filter, list); + /* Make sure that the index is increasing in order. */ + if (tmp->index >= insert + 2) + break; + + insert = tmp->index + 1; + last = pos; + } + sfi->index = insert; + + ret = vsc9959_psfp_sfi_list_add(ocelot, sfi, last); + if (ret) + return ret; + + sfi2->index = insert + 1; + + return vsc9959_psfp_sfi_list_add(ocelot, sfi2, last->next); +} + +static struct felix_stream_filter * +vsc9959_psfp_sfi_table_get(struct list_head *sfi_list, u32 index) +{ + struct felix_stream_filter *tmp; + + list_for_each_entry(tmp, sfi_list, list) + if (tmp->index == index) + return tmp; + + return NULL; +} + +static void vsc9959_psfp_sfi_table_del(struct ocelot *ocelot, u32 index) +{ + struct felix_stream_filter *tmp, *n; + struct ocelot_psfp_list *psfp; + u8 z; + + psfp = &ocelot->psfp; + + list_for_each_entry_safe(tmp, n, &psfp->sfi_list, list) + if (tmp->index == index) { + z = refcount_dec_and_test(&tmp->refcount); + if (z) { + tmp->enable = 0; + vsc9959_psfp_sfi_set(ocelot, tmp); + list_del(&tmp->list); + kfree(tmp); + } + break; + } +} + +static void vsc9959_psfp_parse_gate(const struct flow_action_entry *entry, + struct felix_stream_gate *sgi) +{ + sgi->index = entry->hw_index; + sgi->ipv_valid = (entry->gate.prio < 0) ? 0 : 1; + sgi->init_ipv = (sgi->ipv_valid) ? entry->gate.prio : 0; + sgi->basetime = entry->gate.basetime; + sgi->cycletime = entry->gate.cycletime; + sgi->num_entries = entry->gate.num_entries; + sgi->enable = 1; + + memcpy(sgi->entries, entry->gate.entries, + entry->gate.num_entries * sizeof(struct action_gate_entry)); +} + +static u32 vsc9959_sgi_cfg_status(struct ocelot *ocelot) +{ + return ocelot_read(ocelot, ANA_SG_ACCESS_CTRL); +} + +static int vsc9959_psfp_sgi_set(struct ocelot *ocelot, + struct felix_stream_gate *sgi) +{ + struct action_gate_entry *e; + struct timespec64 base_ts; + u32 interval_sum = 0; + u32 val; + int i; + + if (sgi->index > VSC9959_PSFP_GATE_ID_MAX) + return -EINVAL; + + ocelot_write(ocelot, ANA_SG_ACCESS_CTRL_SGID(sgi->index), + ANA_SG_ACCESS_CTRL); + + if (!sgi->enable) { + ocelot_rmw(ocelot, ANA_SG_CONFIG_REG_3_INIT_GATE_STATE, + ANA_SG_CONFIG_REG_3_INIT_GATE_STATE | + ANA_SG_CONFIG_REG_3_GATE_ENABLE, + ANA_SG_CONFIG_REG_3); + + return 0; + } + + if (sgi->cycletime < VSC9959_PSFP_GATE_CYCLETIME_MIN || + sgi->cycletime > NSEC_PER_SEC) + return -EINVAL; + + if (sgi->num_entries > VSC9959_PSFP_GATE_LIST_NUM) + return -EINVAL; + + vsc9959_new_base_time(ocelot, sgi->basetime, sgi->cycletime, &base_ts); + ocelot_write(ocelot, base_ts.tv_nsec, ANA_SG_CONFIG_REG_1); + val = lower_32_bits(base_ts.tv_sec); + ocelot_write(ocelot, val, ANA_SG_CONFIG_REG_2); + + val = upper_32_bits(base_ts.tv_sec); + ocelot_write(ocelot, + (sgi->ipv_valid ? ANA_SG_CONFIG_REG_3_IPV_VALID : 0) | + ANA_SG_CONFIG_REG_3_INIT_IPV(sgi->init_ipv) | + ANA_SG_CONFIG_REG_3_GATE_ENABLE | + ANA_SG_CONFIG_REG_3_LIST_LENGTH(sgi->num_entries) | + ANA_SG_CONFIG_REG_3_INIT_GATE_STATE | + ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(val), + ANA_SG_CONFIG_REG_3); + + ocelot_write(ocelot, sgi->cycletime, ANA_SG_CONFIG_REG_4); + + e = sgi->entries; + for (i = 0; i < sgi->num_entries; i++) { + u32 ips = (e[i].ipv < 0) ? 0 : (e[i].ipv + 8); + + ocelot_write_rix(ocelot, ANA_SG_GCL_GS_CONFIG_IPS(ips) | + (e[i].gate_state ? + ANA_SG_GCL_GS_CONFIG_GATE_STATE : 0), + ANA_SG_GCL_GS_CONFIG, i); + + interval_sum += e[i].interval; + ocelot_write_rix(ocelot, interval_sum, ANA_SG_GCL_TI_CONFIG, i); + } + + ocelot_rmw(ocelot, ANA_SG_ACCESS_CTRL_CONFIG_CHANGE, + ANA_SG_ACCESS_CTRL_CONFIG_CHANGE, + ANA_SG_ACCESS_CTRL); + + return readx_poll_timeout(vsc9959_sgi_cfg_status, ocelot, val, + (!(ANA_SG_ACCESS_CTRL_CONFIG_CHANGE & val)), + 10, 100000); +} + +static int vsc9959_psfp_sgi_table_add(struct ocelot *ocelot, + struct felix_stream_gate *sgi) +{ + struct felix_stream_gate_entry *tmp; + struct ocelot_psfp_list *psfp; + int ret; + + psfp = &ocelot->psfp; + + list_for_each_entry(tmp, &psfp->sgi_list, list) + if (tmp->index == sgi->index) { + refcount_inc(&tmp->refcount); + return 0; + } + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + ret = vsc9959_psfp_sgi_set(ocelot, sgi); + if (ret) { + kfree(tmp); + return ret; + } + + tmp->index = sgi->index; + refcount_set(&tmp->refcount, 1); + list_add_tail(&tmp->list, &psfp->sgi_list); + + return 0; +} + +static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot, + u32 index) +{ + struct felix_stream_gate_entry *tmp, *n; + struct felix_stream_gate sgi = {0}; + struct ocelot_psfp_list *psfp; + u8 z; + + psfp = &ocelot->psfp; + + list_for_each_entry_safe(tmp, n, &psfp->sgi_list, list) + if (tmp->index == index) { + z = refcount_dec_and_test(&tmp->refcount); + if (z) { + sgi.index = index; + sgi.enable = 0; + vsc9959_psfp_sgi_set(ocelot, &sgi); + list_del(&tmp->list); + kfree(tmp); + } + break; + } +} + +static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port, + struct flow_cls_offload *f) +{ + struct netlink_ext_ack *extack = f->common.extack; + struct felix_stream_filter old_sfi, *sfi_entry; + struct felix_stream_filter sfi = {0}; + const struct flow_action_entry *a; + struct felix_stream *stream_entry; + struct felix_stream stream = {0}; + struct felix_stream_gate *sgi; + struct ocelot_psfp_list *psfp; + struct ocelot_policer pol; + int ret, i, size; + u64 rate, burst; + u32 index; + + psfp = &ocelot->psfp; + + ret = vsc9959_stream_identify(f, &stream); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, "Only can match on VID, PCP, and dest MAC"); + return ret; + } + + mutex_lock(&psfp->lock); + + flow_action_for_each(i, a, &f->rule->action) { + switch (a->id) { + case FLOW_ACTION_GATE: + size = struct_size(sgi, entries, a->gate.num_entries); + sgi = kzalloc(size, GFP_KERNEL); + if (!sgi) { + ret = -ENOMEM; + goto err; + } + vsc9959_psfp_parse_gate(a, sgi); + ret = vsc9959_psfp_sgi_table_add(ocelot, sgi); + if (ret) { + kfree(sgi); + goto err; + } + sfi.sg_valid = 1; + sfi.sgid = sgi->index; + kfree(sgi); + break; + case FLOW_ACTION_POLICE: + index = a->hw_index + VSC9959_PSFP_POLICER_BASE; + if (index > VSC9959_PSFP_POLICER_MAX) { + ret = -EINVAL; + goto err; + } + + rate = a->police.rate_bytes_ps; + burst = rate * PSCHED_NS2TICKS(a->police.burst); + pol = (struct ocelot_policer) { + .burst = div_u64(burst, PSCHED_TICKS_PER_SEC), + .rate = div_u64(rate, 1000) * 8, + }; + ret = ocelot_vcap_policer_add(ocelot, index, &pol); + if (ret) + goto err; + + sfi.fm_valid = 1; + sfi.fmid = index; + sfi.maxsdu = a->police.mtu; + break; + default: + mutex_unlock(&psfp->lock); + return -EOPNOTSUPP; + } + } + + stream.ports = BIT(port); + stream.port = port; + + sfi.portmask = stream.ports; + sfi.prio_valid = (stream.prio < 0 ? 0 : 1); + sfi.prio = (sfi.prio_valid ? stream.prio : 0); + sfi.enable = 1; + + /* Check if stream is set. */ + stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &stream); + if (stream_entry) { + if (stream_entry->ports & BIT(port)) { + NL_SET_ERR_MSG_MOD(extack, + "The stream is added on this port"); + ret = -EEXIST; + goto err; + } + + if (stream_entry->ports != BIT(stream_entry->port)) { + NL_SET_ERR_MSG_MOD(extack, + "The stream is added on two ports"); + ret = -EEXIST; + goto err; + } + + stream_entry->ports |= BIT(port); + stream.ports = stream_entry->ports; + + sfi_entry = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, + stream_entry->sfid); + memcpy(&old_sfi, sfi_entry, sizeof(old_sfi)); + + vsc9959_psfp_sfi_table_del(ocelot, stream_entry->sfid); + + old_sfi.portmask = stream_entry->ports; + sfi.portmask = stream.ports; + + if (stream_entry->port > port) { + ret = vsc9959_psfp_sfi_table_add2(ocelot, &sfi, + &old_sfi); + stream_entry->dummy = true; + } else { + ret = vsc9959_psfp_sfi_table_add2(ocelot, &old_sfi, + &sfi); + stream.dummy = true; + } + if (ret) + goto err; + + stream_entry->sfid = old_sfi.index; + } else { + ret = vsc9959_psfp_sfi_table_add(ocelot, &sfi); + if (ret) + goto err; + } + + stream.sfid = sfi.index; + stream.sfid_valid = 1; + ret = vsc9959_stream_table_add(ocelot, &psfp->stream_list, + &stream, extack); + if (ret) { + vsc9959_psfp_sfi_table_del(ocelot, stream.sfid); + goto err; + } + + mutex_unlock(&psfp->lock); + + return 0; + +err: + if (sfi.sg_valid) + vsc9959_psfp_sgi_table_del(ocelot, sfi.sgid); + + if (sfi.fm_valid) + ocelot_vcap_policer_del(ocelot, sfi.fmid); + + mutex_unlock(&psfp->lock); + + return ret; +} + +static int vsc9959_psfp_filter_del(struct ocelot *ocelot, + struct flow_cls_offload *f) +{ + struct felix_stream *stream, tmp, *stream_entry; + struct ocelot_psfp_list *psfp = &ocelot->psfp; + static struct felix_stream_filter *sfi; + + mutex_lock(&psfp->lock); + + stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie); + if (!stream) { + mutex_unlock(&psfp->lock); + return -ENOMEM; + } + + sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid); + if (!sfi) { + mutex_unlock(&psfp->lock); + return -ENOMEM; + } + + if (sfi->sg_valid) + vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid); + + if (sfi->fm_valid) + ocelot_vcap_policer_del(ocelot, sfi->fmid); + + vsc9959_psfp_sfi_table_del(ocelot, stream->sfid); + + memcpy(&tmp, stream, sizeof(tmp)); + + stream->sfid_valid = 0; + vsc9959_stream_table_del(ocelot, stream); + + stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &tmp); + if (stream_entry) { + stream_entry->ports = BIT(stream_entry->port); + if (stream_entry->dummy) { + stream_entry->dummy = false; + vsc9959_mact_stream_set(ocelot, stream_entry, NULL); + } + vsc9959_psfp_sfidmask_set(ocelot, stream_entry->sfid, + stream_entry->ports); + } + + mutex_unlock(&psfp->lock); + + return 0; +} + +static void vsc9959_update_sfid_stats(struct ocelot *ocelot, + struct felix_stream_filter *sfi) +{ + struct felix_stream_filter_counters *s = &sfi->stats; + u32 match, not_pass_gate, not_pass_sdu, red; + u32 sfid = sfi->index; + + lockdep_assert_held(&ocelot->stat_view_lock); + + ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(sfid), + SYS_STAT_CFG_STAT_VIEW_M, + SYS_STAT_CFG); + + match = ocelot_read(ocelot, SYS_COUNT_SF_MATCHING_FRAMES); + not_pass_gate = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_FRAMES); + not_pass_sdu = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_SDU); + red = ocelot_read(ocelot, SYS_COUNT_SF_RED_FRAMES); + + /* Clear the PSFP counter. */ + ocelot_write(ocelot, + SYS_STAT_CFG_STAT_VIEW(sfid) | + SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10), + SYS_STAT_CFG); + + s->match += match; + s->not_pass_gate += not_pass_gate; + s->not_pass_sdu += not_pass_sdu; + s->red += red; +} + +/* Caller must hold &ocelot->stat_view_lock */ +static void vsc9959_update_stats(struct ocelot *ocelot) +{ + struct ocelot_psfp_list *psfp = &ocelot->psfp; + struct felix_stream_filter *sfi; + + mutex_lock(&psfp->lock); + + list_for_each_entry(sfi, &psfp->sfi_list, list) + vsc9959_update_sfid_stats(ocelot, sfi); + + mutex_unlock(&psfp->lock); +} + +static int vsc9959_psfp_stats_get(struct ocelot *ocelot, + struct flow_cls_offload *f, + struct flow_stats *stats) +{ + struct ocelot_psfp_list *psfp = &ocelot->psfp; + struct felix_stream_filter_counters *s; + static struct felix_stream_filter *sfi; + struct felix_stream *stream; + + stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie); + if (!stream) + return -ENOMEM; + + sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid); + if (!sfi) + return -EINVAL; + + mutex_lock(&ocelot->stat_view_lock); + + vsc9959_update_sfid_stats(ocelot, sfi); + + s = &sfi->stats; + stats->pkts = s->match; + stats->drops = s->not_pass_gate + s->not_pass_sdu + s->red; + + memset(s, 0, sizeof(*s)); + + mutex_unlock(&ocelot->stat_view_lock); + + return 0; +} + +static void vsc9959_psfp_init(struct ocelot *ocelot) +{ + struct ocelot_psfp_list *psfp = &ocelot->psfp; + + INIT_LIST_HEAD(&psfp->stream_list); + INIT_LIST_HEAD(&psfp->sfi_list); + INIT_LIST_HEAD(&psfp->sgi_list); + mutex_init(&psfp->lock); +} + +/* When using cut-through forwarding and the egress port runs at a higher data + * rate than the ingress port, the packet currently under transmission would + * suffer an underrun since it would be transmitted faster than it is received. + * The Felix switch implementation of cut-through forwarding does not check in + * hardware whether this condition is satisfied or not, so we must restrict the + * list of ports that have cut-through forwarding enabled on egress to only be + * the ports operating at the lowest link speed within their respective + * forwarding domain. + */ +static void vsc9959_cut_through_fwd(struct ocelot *ocelot) +{ + struct felix *felix = ocelot_to_felix(ocelot); + struct dsa_switch *ds = felix->ds; + int tc, port, other_port; + + lockdep_assert_held(&ocelot->fwd_domain_lock); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int min_speed = ocelot_port->speed; + unsigned long mask = 0; + u32 tmp, val = 0; + + /* Disable cut-through on ports that are down */ + if (ocelot_port->speed <= 0) + goto set; + + if (dsa_is_cpu_port(ds, port)) { + /* Ocelot switches forward from the NPI port towards + * any port, regardless of it being in the NPI port's + * forwarding domain or not. + */ + mask = dsa_user_ports(ds); + } else { + mask = ocelot_get_bridge_fwd_mask(ocelot, port); + mask &= ~BIT(port); + if (ocelot->npi >= 0) + mask |= BIT(ocelot->npi); + else + mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot, + port); + } + + /* Calculate the minimum link speed, among the ports that are + * up, of this source port's forwarding domain. + */ + for_each_set_bit(other_port, &mask, ocelot->num_phys_ports) { + struct ocelot_port *other_ocelot_port; + + other_ocelot_port = ocelot->ports[other_port]; + if (other_ocelot_port->speed <= 0) + continue; + + if (min_speed > other_ocelot_port->speed) + min_speed = other_ocelot_port->speed; + } + + /* Enable cut-through forwarding for all traffic classes that + * don't have oversized dropping enabled, since this check is + * bypassed in cut-through mode. + */ + if (ocelot_port->speed == min_speed) { + val = GENMASK(7, 0); + + for (tc = 0; tc < OCELOT_NUM_TC; tc++) + if (vsc9959_port_qmaxsdu_get(ocelot, port, tc)) + val &= ~BIT(tc); + } + +set: + tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port); + if (tmp == val) + continue; + + dev_dbg(ocelot->dev, + "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n", + port, mask, ocelot_port->speed, min_speed, + val ? "enabling" : "disabling", val); + + ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port); + } +} + +static const struct ocelot_ops vsc9959_ops = { + .reset = vsc9959_reset, + .wm_enc = vsc9959_wm_enc, + .wm_dec = vsc9959_wm_dec, + .wm_stat = vsc9959_wm_stat, + .port_to_netdev = felix_port_to_netdev, + .netdev_to_port = felix_netdev_to_port, + .psfp_init = vsc9959_psfp_init, + .psfp_filter_add = vsc9959_psfp_filter_add, + .psfp_filter_del = vsc9959_psfp_filter_del, + .psfp_stats_get = vsc9959_psfp_stats_get, + .cut_through_fwd = vsc9959_cut_through_fwd, + .tas_clock_adjust = vsc9959_tas_clock_adjust, + .update_stats = vsc9959_update_stats, +}; + +static const struct felix_info felix_info_vsc9959 = { + .resources = vsc9959_resources, + .num_resources = ARRAY_SIZE(vsc9959_resources), + .resource_names = vsc9959_resource_names, + .regfields = vsc9959_regfields, + .map = vsc9959_regmap, + .ops = &vsc9959_ops, + .stats_layout = vsc9959_stats_layout, + .vcap = vsc9959_vcap_props, + .vcap_pol_base = VSC9959_VCAP_POLICER_BASE, + .vcap_pol_max = VSC9959_VCAP_POLICER_MAX, + .vcap_pol_base2 = 0, + .vcap_pol_max2 = 0, + .num_mact_rows = 2048, + .num_ports = VSC9959_NUM_PORTS, + .num_tx_queues = OCELOT_NUM_TC, + .quirk_no_xtr_irq = true, + .ptp_caps = &vsc9959_ptp_caps, + .mdio_bus_alloc = vsc9959_mdio_bus_alloc, + .mdio_bus_free = vsc9959_mdio_bus_free, + .phylink_validate = vsc9959_phylink_validate, + .port_modes = vsc9959_port_modes, + .port_setup_tc = vsc9959_port_setup_tc, + .port_sched_speed_set = vsc9959_sched_speed_set, + .tas_guard_bands_update = vsc9959_tas_guard_bands_update, +}; + +static irqreturn_t felix_irq_handler(int irq, void *data) +{ + struct ocelot *ocelot = (struct ocelot *)data; + + /* The INTB interrupt is used for both PTP TX timestamp interrupt + * and preemption status change interrupt on each port. + * + * - Get txtstamp if have + * - TODO: handle preemption. Without handling it, driver may get + * interrupt storm. + */ + + ocelot_get_txtstamp(ocelot); + + return IRQ_HANDLED; +} + +static int felix_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct dsa_switch *ds; + struct ocelot *ocelot; + struct felix *felix; + int err; + + if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { + dev_info(&pdev->dev, "device is disabled, skipping\n"); + return -ENODEV; + } + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "device enable failed\n"); + goto err_pci_enable; + } + + felix = kzalloc(sizeof(struct felix), GFP_KERNEL); + if (!felix) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed to allocate driver memory\n"); + goto err_alloc_felix; + } + + pci_set_drvdata(pdev, felix); + ocelot = &felix->ocelot; + ocelot->dev = &pdev->dev; + ocelot->num_flooding_pgids = OCELOT_NUM_TC; + felix->info = &felix_info_vsc9959; + felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR); + + pci_set_master(pdev); + + err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL, + &felix_irq_handler, IRQF_ONESHOT, + "felix-intb", ocelot); + if (err) { + dev_err(&pdev->dev, "Failed to request irq\n"); + goto err_alloc_irq; + } + + ocelot->ptp = 1; + + ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL); + if (!ds) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed to allocate DSA switch\n"); + goto err_alloc_ds; + } + + ds->dev = &pdev->dev; + ds->num_ports = felix->info->num_ports; + ds->num_tx_queues = felix->info->num_tx_queues; + ds->ops = &felix_switch_ops; + ds->priv = ocelot; + felix->ds = ds; + felix->tag_proto = DSA_TAG_PROTO_OCELOT; + + err = dsa_register_switch(ds); + if (err) { + dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n"); + goto err_register_ds; + } + + return 0; + +err_register_ds: + kfree(ds); +err_alloc_ds: +err_alloc_irq: + kfree(felix); +err_alloc_felix: + pci_disable_device(pdev); +err_pci_enable: + return err; +} + +static void felix_pci_remove(struct pci_dev *pdev) +{ + struct felix *felix = pci_get_drvdata(pdev); + + if (!felix) + return; + + dsa_unregister_switch(felix->ds); + + kfree(felix->ds); + kfree(felix); + + pci_disable_device(pdev); +} + +static void felix_pci_shutdown(struct pci_dev *pdev) +{ + struct felix *felix = pci_get_drvdata(pdev); + + if (!felix) + return; + + dsa_switch_shutdown(felix->ds); + + pci_set_drvdata(pdev, NULL); +} + +static struct pci_device_id felix_ids[] = { + { + /* NXP LS1028A */ + PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0xEEF0), + }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, felix_ids); + +static struct pci_driver felix_vsc9959_pci_driver = { + .name = "mscc_felix", + .id_table = felix_ids, + .probe = felix_pci_probe, + .remove = felix_pci_remove, + .shutdown = felix_pci_shutdown, +}; +module_pci_driver(felix_vsc9959_pci_driver); + +MODULE_DESCRIPTION("Felix Switch driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c new file mode 100644 index 000000000..c2863d6d8 --- /dev/null +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -0,0 +1,1119 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Distributed Switch Architecture VSC9953 driver + * Copyright (C) 2020, Maxim Kochetkov <fido_max@inbox.ru> + */ +#include <linux/types.h> +#include <soc/mscc/ocelot_vcap.h> +#include <soc/mscc/ocelot_sys.h> +#include <soc/mscc/ocelot.h> +#include <linux/mdio/mdio-mscc-miim.h> +#include <linux/of_mdio.h> +#include <linux/of_platform.h> +#include <linux/pcs-lynx.h> +#include <linux/dsa/ocelot.h> +#include <linux/iopoll.h> +#include "felix.h" + +#define VSC9953_NUM_PORTS 10 + +#define VSC9953_VCAP_POLICER_BASE 11 +#define VSC9953_VCAP_POLICER_MAX 31 +#define VSC9953_VCAP_POLICER_BASE2 120 +#define VSC9953_VCAP_POLICER_MAX2 161 + +#define VSC9953_PORT_MODE_SERDES (OCELOT_PORT_MODE_1000BASEX | \ + OCELOT_PORT_MODE_SGMII | \ + OCELOT_PORT_MODE_QSGMII) + +static const u32 vsc9953_port_modes[VSC9953_NUM_PORTS] = { + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + VSC9953_PORT_MODE_SERDES, + OCELOT_PORT_MODE_INTERNAL, + OCELOT_PORT_MODE_INTERNAL, +}; + +static const u32 vsc9953_ana_regmap[] = { + REG(ANA_ADVLEARN, 0x00b500), + REG(ANA_VLANMASK, 0x00b504), + REG_RESERVED(ANA_PORT_B_DOMAIN), + REG(ANA_ANAGEFIL, 0x00b50c), + REG(ANA_ANEVENTS, 0x00b510), + REG(ANA_STORMLIMIT_BURST, 0x00b514), + REG(ANA_STORMLIMIT_CFG, 0x00b518), + REG(ANA_ISOLATED_PORTS, 0x00b528), + REG(ANA_COMMUNITY_PORTS, 0x00b52c), + REG(ANA_AUTOAGE, 0x00b530), + REG(ANA_MACTOPTIONS, 0x00b534), + REG(ANA_LEARNDISC, 0x00b538), + REG(ANA_AGENCTRL, 0x00b53c), + REG(ANA_MIRRORPORTS, 0x00b540), + REG(ANA_EMIRRORPORTS, 0x00b544), + REG(ANA_FLOODING, 0x00b548), + REG(ANA_FLOODING_IPMC, 0x00b54c), + REG(ANA_SFLOW_CFG, 0x00b550), + REG(ANA_PORT_MODE, 0x00b57c), + REG_RESERVED(ANA_CUT_THRU_CFG), + REG(ANA_PGID_PGID, 0x00b600), + REG(ANA_TABLES_ANMOVED, 0x00b4ac), + REG(ANA_TABLES_MACHDATA, 0x00b4b0), + REG(ANA_TABLES_MACLDATA, 0x00b4b4), + REG_RESERVED(ANA_TABLES_STREAMDATA), + REG(ANA_TABLES_MACACCESS, 0x00b4b8), + REG(ANA_TABLES_MACTINDX, 0x00b4bc), + REG(ANA_TABLES_VLANACCESS, 0x00b4c0), + REG(ANA_TABLES_VLANTIDX, 0x00b4c4), + REG_RESERVED(ANA_TABLES_ISDXACCESS), + REG_RESERVED(ANA_TABLES_ISDXTIDX), + REG(ANA_TABLES_ENTRYLIM, 0x00b480), + REG_RESERVED(ANA_TABLES_PTP_ID_HIGH), + REG_RESERVED(ANA_TABLES_PTP_ID_LOW), + REG_RESERVED(ANA_TABLES_STREAMACCESS), + REG_RESERVED(ANA_TABLES_STREAMTIDX), + REG_RESERVED(ANA_TABLES_SEQ_HISTORY), + REG_RESERVED(ANA_TABLES_SEQ_MASK), + REG_RESERVED(ANA_TABLES_SFID_MASK), + REG_RESERVED(ANA_TABLES_SFIDACCESS), + REG_RESERVED(ANA_TABLES_SFIDTIDX), + REG_RESERVED(ANA_MSTI_STATE), + REG_RESERVED(ANA_OAM_UPM_LM_CNT), + REG_RESERVED(ANA_SG_ACCESS_CTRL), + REG_RESERVED(ANA_SG_CONFIG_REG_1), + REG_RESERVED(ANA_SG_CONFIG_REG_2), + REG_RESERVED(ANA_SG_CONFIG_REG_3), + REG_RESERVED(ANA_SG_CONFIG_REG_4), + REG_RESERVED(ANA_SG_CONFIG_REG_5), + REG_RESERVED(ANA_SG_GCL_GS_CONFIG), + REG_RESERVED(ANA_SG_GCL_TI_CONFIG), + REG_RESERVED(ANA_SG_STATUS_REG_1), + REG_RESERVED(ANA_SG_STATUS_REG_2), + REG_RESERVED(ANA_SG_STATUS_REG_3), + REG(ANA_PORT_VLAN_CFG, 0x000000), + REG(ANA_PORT_DROP_CFG, 0x000004), + REG(ANA_PORT_QOS_CFG, 0x000008), + REG(ANA_PORT_VCAP_CFG, 0x00000c), + REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x000010), + REG(ANA_PORT_VCAP_S2_CFG, 0x00001c), + REG(ANA_PORT_PCP_DEI_MAP, 0x000020), + REG(ANA_PORT_CPU_FWD_CFG, 0x000060), + REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x000064), + REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x000068), + REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00006c), + REG(ANA_PORT_PORT_CFG, 0x000070), + REG(ANA_PORT_POL_CFG, 0x000074), + REG_RESERVED(ANA_PORT_PTP_CFG), + REG_RESERVED(ANA_PORT_PTP_DLY1_CFG), + REG_RESERVED(ANA_PORT_PTP_DLY2_CFG), + REG_RESERVED(ANA_PORT_SFID_CFG), + REG(ANA_PFC_PFC_CFG, 0x00c000), + REG_RESERVED(ANA_PFC_PFC_TIMER), + REG_RESERVED(ANA_IPT_OAM_MEP_CFG), + REG_RESERVED(ANA_IPT_IPT), + REG_RESERVED(ANA_PPT_PPT), + REG_RESERVED(ANA_FID_MAP_FID_MAP), + REG(ANA_AGGR_CFG, 0x00c600), + REG(ANA_CPUQ_CFG, 0x00c604), + REG_RESERVED(ANA_CPUQ_CFG2), + REG(ANA_CPUQ_8021_CFG, 0x00c60c), + REG(ANA_DSCP_CFG, 0x00c64c), + REG(ANA_DSCP_REWR_CFG, 0x00c74c), + REG(ANA_VCAP_RNG_TYPE_CFG, 0x00c78c), + REG(ANA_VCAP_RNG_VAL_CFG, 0x00c7ac), + REG_RESERVED(ANA_VRAP_CFG), + REG_RESERVED(ANA_VRAP_HDR_DATA), + REG_RESERVED(ANA_VRAP_HDR_MASK), + REG(ANA_DISCARD_CFG, 0x00c7d8), + REG(ANA_FID_CFG, 0x00c7dc), + REG(ANA_POL_PIR_CFG, 0x00a000), + REG(ANA_POL_CIR_CFG, 0x00a004), + REG(ANA_POL_MODE_CFG, 0x00a008), + REG(ANA_POL_PIR_STATE, 0x00a00c), + REG(ANA_POL_CIR_STATE, 0x00a010), + REG_RESERVED(ANA_POL_STATE), + REG(ANA_POL_FLOWC, 0x00c280), + REG(ANA_POL_HYST, 0x00c2ec), + REG_RESERVED(ANA_POL_MISC_CFG), +}; + +static const u32 vsc9953_qs_regmap[] = { + REG(QS_XTR_GRP_CFG, 0x000000), + REG(QS_XTR_RD, 0x000008), + REG(QS_XTR_FRM_PRUNING, 0x000010), + REG(QS_XTR_FLUSH, 0x000018), + REG(QS_XTR_DATA_PRESENT, 0x00001c), + REG(QS_XTR_CFG, 0x000020), + REG(QS_INJ_GRP_CFG, 0x000024), + REG(QS_INJ_WR, 0x00002c), + REG(QS_INJ_CTRL, 0x000034), + REG(QS_INJ_STATUS, 0x00003c), + REG(QS_INJ_ERR, 0x000040), + REG_RESERVED(QS_INH_DBG), +}; + +static const u32 vsc9953_vcap_regmap[] = { + /* VCAP_CORE_CFG */ + REG(VCAP_CORE_UPDATE_CTRL, 0x000000), + REG(VCAP_CORE_MV_CFG, 0x000004), + /* VCAP_CORE_CACHE */ + REG(VCAP_CACHE_ENTRY_DAT, 0x000008), + REG(VCAP_CACHE_MASK_DAT, 0x000108), + REG(VCAP_CACHE_ACTION_DAT, 0x000208), + REG(VCAP_CACHE_CNT_DAT, 0x000308), + REG(VCAP_CACHE_TG_DAT, 0x000388), + /* VCAP_CONST */ + REG(VCAP_CONST_VCAP_VER, 0x000398), + REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c), + REG(VCAP_CONST_ENTRY_CNT, 0x0003a0), + REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4), + REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8), + REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac), + REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0), + REG(VCAP_CONST_CNT_WIDTH, 0x0003b4), + REG_RESERVED(VCAP_CONST_CORE_CNT), + REG_RESERVED(VCAP_CONST_IF_CNT), +}; + +static const u32 vsc9953_qsys_regmap[] = { + REG(QSYS_PORT_MODE, 0x003600), + REG(QSYS_SWITCH_PORT_MODE, 0x003630), + REG(QSYS_STAT_CNT_CFG, 0x00365c), + REG(QSYS_EEE_CFG, 0x003660), + REG(QSYS_EEE_THRES, 0x003688), + REG(QSYS_IGR_NO_SHARING, 0x00368c), + REG(QSYS_EGR_NO_SHARING, 0x003690), + REG(QSYS_SW_STATUS, 0x003694), + REG(QSYS_EXT_CPU_CFG, 0x0036c0), + REG_RESERVED(QSYS_PAD_CFG), + REG(QSYS_CPU_GROUP_MAP, 0x0036c8), + REG_RESERVED(QSYS_QMAP), + REG_RESERVED(QSYS_ISDX_SGRP), + REG_RESERVED(QSYS_TIMED_FRAME_ENTRY), + REG_RESERVED(QSYS_TFRM_MISC), + REG_RESERVED(QSYS_TFRM_PORT_DLY), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_1), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_2), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_3), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_4), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_5), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_6), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_7), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_8), + REG(QSYS_RED_PROFILE, 0x003724), + REG(QSYS_RES_QOS_MODE, 0x003764), + REG(QSYS_RES_CFG, 0x004000), + REG(QSYS_RES_STAT, 0x004004), + REG(QSYS_EGR_DROP_MODE, 0x003768), + REG(QSYS_EQ_CTRL, 0x00376c), + REG_RESERVED(QSYS_EVENTS_CORE), + REG_RESERVED(QSYS_QMAXSDU_CFG_0), + REG_RESERVED(QSYS_QMAXSDU_CFG_1), + REG_RESERVED(QSYS_QMAXSDU_CFG_2), + REG_RESERVED(QSYS_QMAXSDU_CFG_3), + REG_RESERVED(QSYS_QMAXSDU_CFG_4), + REG_RESERVED(QSYS_QMAXSDU_CFG_5), + REG_RESERVED(QSYS_QMAXSDU_CFG_6), + REG_RESERVED(QSYS_QMAXSDU_CFG_7), + REG_RESERVED(QSYS_PREEMPTION_CFG), + REG(QSYS_CIR_CFG, 0x000000), + REG_RESERVED(QSYS_EIR_CFG), + REG(QSYS_SE_CFG, 0x000008), + REG(QSYS_SE_DWRR_CFG, 0x00000c), + REG_RESERVED(QSYS_SE_CONNECT), + REG_RESERVED(QSYS_SE_DLB_SENSE), + REG(QSYS_CIR_STATE, 0x000044), + REG_RESERVED(QSYS_EIR_STATE), + REG_RESERVED(QSYS_SE_STATE), + REG(QSYS_HSCH_MISC_CFG, 0x003774), + REG_RESERVED(QSYS_TAG_CONFIG), + REG_RESERVED(QSYS_TAS_PARAM_CFG_CTRL), + REG_RESERVED(QSYS_PORT_MAX_SDU), + REG_RESERVED(QSYS_PARAM_CFG_REG_1), + REG_RESERVED(QSYS_PARAM_CFG_REG_2), + REG_RESERVED(QSYS_PARAM_CFG_REG_3), + REG_RESERVED(QSYS_PARAM_CFG_REG_4), + REG_RESERVED(QSYS_PARAM_CFG_REG_5), + REG_RESERVED(QSYS_GCL_CFG_REG_1), + REG_RESERVED(QSYS_GCL_CFG_REG_2), + REG_RESERVED(QSYS_PARAM_STATUS_REG_1), + REG_RESERVED(QSYS_PARAM_STATUS_REG_2), + REG_RESERVED(QSYS_PARAM_STATUS_REG_3), + REG_RESERVED(QSYS_PARAM_STATUS_REG_4), + REG_RESERVED(QSYS_PARAM_STATUS_REG_5), + REG_RESERVED(QSYS_PARAM_STATUS_REG_6), + REG_RESERVED(QSYS_PARAM_STATUS_REG_7), + REG_RESERVED(QSYS_PARAM_STATUS_REG_8), + REG_RESERVED(QSYS_PARAM_STATUS_REG_9), + REG_RESERVED(QSYS_GCL_STATUS_REG_1), + REG_RESERVED(QSYS_GCL_STATUS_REG_2), +}; + +static const u32 vsc9953_rew_regmap[] = { + REG(REW_PORT_VLAN_CFG, 0x000000), + REG(REW_TAG_CFG, 0x000004), + REG(REW_PORT_CFG, 0x000008), + REG(REW_DSCP_CFG, 0x00000c), + REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010), + REG_RESERVED(REW_PTP_CFG), + REG_RESERVED(REW_PTP_DLY1_CFG), + REG_RESERVED(REW_RED_TAG_CFG), + REG(REW_DSCP_REMAP_DP1_CFG, 0x000610), + REG(REW_DSCP_REMAP_CFG, 0x000710), + REG_RESERVED(REW_STAT_CFG), + REG_RESERVED(REW_REW_STICKY), + REG_RESERVED(REW_PPT), +}; + +static const u32 vsc9953_sys_regmap[] = { + REG(SYS_COUNT_RX_OCTETS, 0x000000), + REG(SYS_COUNT_RX_UNICAST, 0x000004), + REG(SYS_COUNT_RX_MULTICAST, 0x000008), + REG(SYS_COUNT_RX_BROADCAST, 0x00000c), + REG(SYS_COUNT_RX_SHORTS, 0x000010), + REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), + REG(SYS_COUNT_RX_JABBERS, 0x000018), + REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c), + REG(SYS_COUNT_RX_SYM_ERRS, 0x000020), + REG(SYS_COUNT_RX_64, 0x000024), + REG(SYS_COUNT_RX_65_127, 0x000028), + REG(SYS_COUNT_RX_128_255, 0x00002c), + REG(SYS_COUNT_RX_256_511, 0x000030), + REG(SYS_COUNT_RX_512_1023, 0x000034), + REG(SYS_COUNT_RX_1024_1526, 0x000038), + REG(SYS_COUNT_RX_1527_MAX, 0x00003c), + REG(SYS_COUNT_RX_PAUSE, 0x000040), + REG(SYS_COUNT_RX_CONTROL, 0x000044), + REG(SYS_COUNT_RX_LONGS, 0x000048), + REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c), + REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050), + REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054), + REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058), + REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c), + REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060), + REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064), + REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068), + REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c), + REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070), + REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074), + REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078), + REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c), + REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080), + REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084), + REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088), + REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c), + REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090), + REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094), + REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098), + REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c), + REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0), + REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4), + REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8), + REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac), + REG(SYS_COUNT_TX_OCTETS, 0x000100), + REG(SYS_COUNT_TX_UNICAST, 0x000104), + REG(SYS_COUNT_TX_MULTICAST, 0x000108), + REG(SYS_COUNT_TX_BROADCAST, 0x00010c), + REG(SYS_COUNT_TX_COLLISION, 0x000110), + REG(SYS_COUNT_TX_DROPS, 0x000114), + REG(SYS_COUNT_TX_PAUSE, 0x000118), + REG(SYS_COUNT_TX_64, 0x00011c), + REG(SYS_COUNT_TX_65_127, 0x000120), + REG(SYS_COUNT_TX_128_255, 0x000124), + REG(SYS_COUNT_TX_256_511, 0x000128), + REG(SYS_COUNT_TX_512_1023, 0x00012c), + REG(SYS_COUNT_TX_1024_1526, 0x000130), + REG(SYS_COUNT_TX_1527_MAX, 0x000134), + REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138), + REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c), + REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140), + REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144), + REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148), + REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c), + REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150), + REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154), + REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158), + REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c), + REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160), + REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164), + REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168), + REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c), + REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170), + REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174), + REG(SYS_COUNT_TX_AGED, 0x000178), + REG(SYS_COUNT_DROP_LOCAL, 0x000200), + REG(SYS_COUNT_DROP_TAIL, 0x000204), + REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208), + REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c), + REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210), + REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214), + REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218), + REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c), + REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220), + REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224), + REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228), + REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c), + REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230), + REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234), + REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238), + REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c), + REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240), + REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244), + REG(SYS_RESET_CFG, 0x000318), + REG_RESERVED(SYS_SR_ETYPE_CFG), + REG(SYS_VLAN_ETYPE_CFG, 0x000320), + REG(SYS_PORT_MODE, 0x000324), + REG(SYS_FRONT_PORT_MODE, 0x000354), + REG(SYS_FRM_AGING, 0x00037c), + REG(SYS_STAT_CFG, 0x000380), + REG_RESERVED(SYS_SW_STATUS), + REG_RESERVED(SYS_MISC_CFG), + REG_RESERVED(SYS_REW_MAC_HIGH_CFG), + REG_RESERVED(SYS_REW_MAC_LOW_CFG), + REG_RESERVED(SYS_TIMESTAMP_OFFSET), + REG(SYS_PAUSE_CFG, 0x00044c), + REG(SYS_PAUSE_TOT_CFG, 0x000478), + REG(SYS_ATOP, 0x00047c), + REG(SYS_ATOP_TOT_CFG, 0x0004a8), + REG(SYS_MAC_FC_CFG, 0x0004ac), + REG(SYS_MMGT, 0x0004d4), + REG_RESERVED(SYS_MMGT_FAST), + REG_RESERVED(SYS_EVENTS_DIF), + REG_RESERVED(SYS_EVENTS_CORE), + REG_RESERVED(SYS_PTP_STATUS), + REG_RESERVED(SYS_PTP_TXSTAMP), + REG_RESERVED(SYS_PTP_NXT), + REG_RESERVED(SYS_PTP_CFG), + REG_RESERVED(SYS_RAM_INIT), + REG_RESERVED(SYS_CM_ADDR), + REG_RESERVED(SYS_CM_DATA_WR), + REG_RESERVED(SYS_CM_DATA_RD), + REG_RESERVED(SYS_CM_OP), + REG_RESERVED(SYS_CM_DATA), +}; + +static const u32 vsc9953_gcb_regmap[] = { + REG(GCB_SOFT_RST, 0x000008), + REG(GCB_MIIM_MII_STATUS, 0x0000ac), + REG(GCB_MIIM_MII_CMD, 0x0000b4), + REG(GCB_MIIM_MII_DATA, 0x0000b8), +}; + +static const u32 vsc9953_dev_gmii_regmap[] = { + REG(DEV_CLOCK_CFG, 0x0), + REG(DEV_PORT_MISC, 0x4), + REG_RESERVED(DEV_EVENTS), + REG(DEV_EEE_CFG, 0xc), + REG_RESERVED(DEV_RX_PATH_DELAY), + REG_RESERVED(DEV_TX_PATH_DELAY), + REG_RESERVED(DEV_PTP_PREDICT_CFG), + REG(DEV_MAC_ENA_CFG, 0x10), + REG(DEV_MAC_MODE_CFG, 0x14), + REG(DEV_MAC_MAXLEN_CFG, 0x18), + REG(DEV_MAC_TAGS_CFG, 0x1c), + REG(DEV_MAC_ADV_CHK_CFG, 0x20), + REG(DEV_MAC_IFG_CFG, 0x24), + REG(DEV_MAC_HDX_CFG, 0x28), + REG_RESERVED(DEV_MAC_DBG_CFG), + REG(DEV_MAC_FC_MAC_LOW_CFG, 0x30), + REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x34), + REG(DEV_MAC_STICKY, 0x38), + REG_RESERVED(PCS1G_CFG), + REG_RESERVED(PCS1G_MODE_CFG), + REG_RESERVED(PCS1G_SD_CFG), + REG_RESERVED(PCS1G_ANEG_CFG), + REG_RESERVED(PCS1G_ANEG_NP_CFG), + REG_RESERVED(PCS1G_LB_CFG), + REG_RESERVED(PCS1G_DBG_CFG), + REG_RESERVED(PCS1G_CDET_CFG), + REG_RESERVED(PCS1G_ANEG_STATUS), + REG_RESERVED(PCS1G_ANEG_NP_STATUS), + REG_RESERVED(PCS1G_LINK_STATUS), + REG_RESERVED(PCS1G_LINK_DOWN_CNT), + REG_RESERVED(PCS1G_STICKY), + REG_RESERVED(PCS1G_DEBUG_STATUS), + REG_RESERVED(PCS1G_LPI_CFG), + REG_RESERVED(PCS1G_LPI_WAKE_ERROR_CNT), + REG_RESERVED(PCS1G_LPI_STATUS), + REG_RESERVED(PCS1G_TSTPAT_MODE_CFG), + REG_RESERVED(PCS1G_TSTPAT_STATUS), + REG_RESERVED(DEV_PCS_FX100_CFG), + REG_RESERVED(DEV_PCS_FX100_STATUS), +}; + +static const u32 *vsc9953_regmap[TARGET_MAX] = { + [ANA] = vsc9953_ana_regmap, + [QS] = vsc9953_qs_regmap, + [QSYS] = vsc9953_qsys_regmap, + [REW] = vsc9953_rew_regmap, + [SYS] = vsc9953_sys_regmap, + [S0] = vsc9953_vcap_regmap, + [S1] = vsc9953_vcap_regmap, + [S2] = vsc9953_vcap_regmap, + [GCB] = vsc9953_gcb_regmap, + [DEV_GMII] = vsc9953_dev_gmii_regmap, +}; + +/* Addresses are relative to the device's base address */ +static const struct resource vsc9953_resources[] = { + DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"), + DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"), + DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"), + DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"), + DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"), + DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"), + DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"), + DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"), + DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"), + DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"), + DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"), + DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"), + DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"), + DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"), + DEFINE_RES_MEM_NAMED(0x0160000, 0x0010000, "port6"), + DEFINE_RES_MEM_NAMED(0x0170000, 0x0010000, "port7"), + DEFINE_RES_MEM_NAMED(0x0180000, 0x0010000, "port8"), + DEFINE_RES_MEM_NAMED(0x0190000, 0x0010000, "port9"), + DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"), + DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"), +}; + +static const char * const vsc9953_resource_names[TARGET_MAX] = { + [SYS] = "sys", + [REW] = "rew", + [S0] = "s0", + [S1] = "s1", + [S2] = "s2", + [GCB] = "devcpu_gcb", + [QS] = "qs", + [PTP] = "ptp", + [QSYS] = "qsys", + [ANA] = "ana", +}; + +static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = { + [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 10, 10), + [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 9), + [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24), + [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22), + [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21), + [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20), + [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19), + [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18), + [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17), + [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16), + [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15), + [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13), + [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12), + [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11), + [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10), + [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9), + [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8), + [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7), + [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6), + [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5), + [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4), + [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3), + [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2), + [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1), + [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0), + [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 16, 16), + [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 11, 12), + [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10), + [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 7, 7), + [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 6, 6), + [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 5, 5), + [GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0), + [GCB_MIIM_MII_STATUS_PENDING] = REG_FIELD(GCB_MIIM_MII_STATUS, 2, 2), + [GCB_MIIM_MII_STATUS_BUSY] = REG_FIELD(GCB_MIIM_MII_STATUS, 3, 3), + /* Replicated per number of ports (11), register size 4 per port */ + [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 13, 13, 11, 4), + [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 11, 4), + [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 11, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 11, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 11, 4), + [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 4, 5, 11, 4), + [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 2, 3, 11, 4), + [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 11, 4), + [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 11, 20, 11, 4), + [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 10, 11, 4), + [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4), +}; + +static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = { + OCELOT_COMMON_STATS, +}; + +static const struct vcap_field vsc9953_vcap_es0_keys[] = { + [VCAP_ES0_EGR_PORT] = { 0, 4}, + [VCAP_ES0_IGR_PORT] = { 4, 4}, + [VCAP_ES0_RSV] = { 8, 2}, + [VCAP_ES0_L2_MC] = { 10, 1}, + [VCAP_ES0_L2_BC] = { 11, 1}, + [VCAP_ES0_VID] = { 12, 12}, + [VCAP_ES0_DP] = { 24, 1}, + [VCAP_ES0_PCP] = { 25, 3}, +}; + +static const struct vcap_field vsc9953_vcap_es0_actions[] = { + [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2}, + [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1}, + [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2}, + [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1}, + [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2}, + [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2}, + [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2}, + [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1}, + [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2}, + [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2}, + [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12}, + [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3}, + [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1}, + [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12}, + [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3}, + [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1}, + [VCAP_ES0_ACT_RSV] = { 49, 24}, + [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1}, +}; + +static const struct vcap_field vsc9953_vcap_is1_keys[] = { + [VCAP_IS1_HK_TYPE] = { 0, 1}, + [VCAP_IS1_HK_LOOKUP] = { 1, 2}, + [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 11}, + [VCAP_IS1_HK_RSV] = { 14, 10}, + /* VCAP_IS1_HK_OAM_Y1731 not supported */ + [VCAP_IS1_HK_L2_MC] = { 24, 1}, + [VCAP_IS1_HK_L2_BC] = { 25, 1}, + [VCAP_IS1_HK_IP_MC] = { 26, 1}, + [VCAP_IS1_HK_VLAN_TAGGED] = { 27, 1}, + [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 28, 1}, + [VCAP_IS1_HK_TPID] = { 29, 1}, + [VCAP_IS1_HK_VID] = { 30, 12}, + [VCAP_IS1_HK_DEI] = { 42, 1}, + [VCAP_IS1_HK_PCP] = { 43, 3}, + /* Specific Fields for IS1 Half Key S1_NORMAL */ + [VCAP_IS1_HK_L2_SMAC] = { 46, 48}, + [VCAP_IS1_HK_ETYPE_LEN] = { 94, 1}, + [VCAP_IS1_HK_ETYPE] = { 95, 16}, + [VCAP_IS1_HK_IP_SNAP] = {111, 1}, + [VCAP_IS1_HK_IP4] = {112, 1}, + /* Layer-3 Information */ + [VCAP_IS1_HK_L3_FRAGMENT] = {113, 1}, + [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {114, 1}, + [VCAP_IS1_HK_L3_OPTIONS] = {115, 1}, + [VCAP_IS1_HK_L3_DSCP] = {116, 6}, + [VCAP_IS1_HK_L3_IP4_SIP] = {122, 32}, + /* Layer-4 Information */ + [VCAP_IS1_HK_TCP_UDP] = {154, 1}, + [VCAP_IS1_HK_TCP] = {155, 1}, + [VCAP_IS1_HK_L4_SPORT] = {156, 16}, + [VCAP_IS1_HK_L4_RNG] = {172, 8}, + /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */ + [VCAP_IS1_HK_IP4_INNER_TPID] = { 46, 1}, + [VCAP_IS1_HK_IP4_INNER_VID] = { 47, 12}, + [VCAP_IS1_HK_IP4_INNER_DEI] = { 59, 1}, + [VCAP_IS1_HK_IP4_INNER_PCP] = { 60, 3}, + [VCAP_IS1_HK_IP4_IP4] = { 63, 1}, + [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 64, 1}, + [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 65, 1}, + [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 66, 1}, + [VCAP_IS1_HK_IP4_L3_DSCP] = { 67, 6}, + [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 73, 32}, + [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {105, 32}, + [VCAP_IS1_HK_IP4_L3_PROTO] = {137, 8}, + [VCAP_IS1_HK_IP4_TCP_UDP] = {145, 1}, + [VCAP_IS1_HK_IP4_TCP] = {146, 1}, + [VCAP_IS1_HK_IP4_L4_RNG] = {147, 8}, + [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {155, 32}, +}; + +static const struct vcap_field vsc9953_vcap_is1_actions[] = { + [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1}, + [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6}, + [VCAP_IS1_ACT_QOS_ENA] = { 7, 1}, + [VCAP_IS1_ACT_QOS_VAL] = { 8, 3}, + [VCAP_IS1_ACT_DP_ENA] = { 11, 1}, + [VCAP_IS1_ACT_DP_VAL] = { 12, 1}, + [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8}, + [VCAP_IS1_ACT_PAG_VAL] = { 21, 8}, + [VCAP_IS1_ACT_RSV] = { 29, 11}, + [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 40, 1}, + [VCAP_IS1_ACT_VID_ADD_VAL] = { 41, 12}, + [VCAP_IS1_ACT_FID_SEL] = { 53, 2}, + [VCAP_IS1_ACT_FID_VAL] = { 55, 13}, + [VCAP_IS1_ACT_PCP_DEI_ENA] = { 68, 1}, + [VCAP_IS1_ACT_PCP_VAL] = { 69, 3}, + [VCAP_IS1_ACT_DEI_VAL] = { 72, 1}, + [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 73, 1}, + [VCAP_IS1_ACT_VLAN_POP_CNT] = { 74, 2}, + [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 76, 4}, + [VCAP_IS1_ACT_HIT_STICKY] = { 80, 1}, +}; + +static struct vcap_field vsc9953_vcap_is2_keys[] = { + /* Common: 41 bits */ + [VCAP_IS2_TYPE] = { 0, 4}, + [VCAP_IS2_HK_FIRST] = { 4, 1}, + [VCAP_IS2_HK_PAG] = { 5, 8}, + [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 11}, + [VCAP_IS2_HK_RSV2] = { 24, 1}, + [VCAP_IS2_HK_HOST_MATCH] = { 25, 1}, + [VCAP_IS2_HK_L2_MC] = { 26, 1}, + [VCAP_IS2_HK_L2_BC] = { 27, 1}, + [VCAP_IS2_HK_VLAN_TAGGED] = { 28, 1}, + [VCAP_IS2_HK_VID] = { 29, 12}, + [VCAP_IS2_HK_DEI] = { 41, 1}, + [VCAP_IS2_HK_PCP] = { 42, 3}, + /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */ + [VCAP_IS2_HK_L2_DMAC] = { 45, 48}, + [VCAP_IS2_HK_L2_SMAC] = { 93, 48}, + /* MAC_ETYPE (TYPE=000) */ + [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {141, 16}, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {157, 16}, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {173, 8}, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {181, 3}, + /* MAC_LLC (TYPE=001) */ + [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {141, 40}, + /* MAC_SNAP (TYPE=010) */ + [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {141, 40}, + /* MAC_ARP (TYPE=011) */ + [VCAP_IS2_HK_MAC_ARP_SMAC] = { 45, 48}, + [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 93, 1}, + [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 94, 1}, + [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 95, 1}, + [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 96, 1}, + [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 97, 1}, + [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 98, 1}, + [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 99, 2}, + [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = {101, 32}, + [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {133, 32}, + [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {165, 1}, + /* IP4_TCP_UDP / IP4_OTHER common */ + [VCAP_IS2_HK_IP4] = { 45, 1}, + [VCAP_IS2_HK_L3_FRAGMENT] = { 46, 1}, + [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 47, 1}, + [VCAP_IS2_HK_L3_OPTIONS] = { 48, 1}, + [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 49, 1}, + [VCAP_IS2_HK_L3_TOS] = { 50, 8}, + [VCAP_IS2_HK_L3_IP4_DIP] = { 58, 32}, + [VCAP_IS2_HK_L3_IP4_SIP] = { 90, 32}, + [VCAP_IS2_HK_DIP_EQ_SIP] = {122, 1}, + /* IP4_TCP_UDP (TYPE=100) */ + [VCAP_IS2_HK_TCP] = {123, 1}, + [VCAP_IS2_HK_L4_DPORT] = {124, 16}, + [VCAP_IS2_HK_L4_SPORT] = {140, 16}, + [VCAP_IS2_HK_L4_RNG] = {156, 8}, + [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {164, 1}, + [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {165, 1}, + [VCAP_IS2_HK_L4_FIN] = {166, 1}, + [VCAP_IS2_HK_L4_SYN] = {167, 1}, + [VCAP_IS2_HK_L4_RST] = {168, 1}, + [VCAP_IS2_HK_L4_PSH] = {169, 1}, + [VCAP_IS2_HK_L4_ACK] = {170, 1}, + [VCAP_IS2_HK_L4_URG] = {171, 1}, + /* IP4_OTHER (TYPE=101) */ + [VCAP_IS2_HK_IP4_L3_PROTO] = {123, 8}, + [VCAP_IS2_HK_L3_PAYLOAD] = {131, 56}, + /* IP6_STD (TYPE=110) */ + [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 45, 1}, + [VCAP_IS2_HK_L3_IP6_SIP] = { 46, 128}, + [VCAP_IS2_HK_IP6_L3_PROTO] = {174, 8}, +}; + +static struct vcap_field vsc9953_vcap_is2_actions[] = { + [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1}, + [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1}, + [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3}, + [VCAP_IS2_ACT_MASK_MODE] = { 5, 2}, + [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1}, + [VCAP_IS2_ACT_LRN_DIS] = { 8, 1}, + [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1}, + [VCAP_IS2_ACT_POLICE_IDX] = { 10, 8}, + [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 21, 1}, + [VCAP_IS2_ACT_PORT_MASK] = { 22, 10}, + [VCAP_IS2_ACT_ACL_ID] = { 44, 6}, + [VCAP_IS2_ACT_HIT_CNT] = { 50, 32}, +}; + +static struct vcap_props vsc9953_vcap_props[] = { + [VCAP_ES0] = { + .action_type_width = 0, + .action_table = { + [ES0_ACTION_TYPE_NORMAL] = { + .width = 73, /* HIT_STICKY not included */ + .count = 1, + }, + }, + .target = S0, + .keys = vsc9953_vcap_es0_keys, + .actions = vsc9953_vcap_es0_actions, + }, + [VCAP_IS1] = { + .action_type_width = 0, + .action_table = { + [IS1_ACTION_TYPE_NORMAL] = { + .width = 80, /* HIT_STICKY not included */ + .count = 4, + }, + }, + .target = S1, + .keys = vsc9953_vcap_is1_keys, + .actions = vsc9953_vcap_is1_actions, + }, + [VCAP_IS2] = { + .action_type_width = 1, + .action_table = { + [IS2_ACTION_TYPE_NORMAL] = { + .width = 50, /* HIT_CNT not included */ + .count = 2 + }, + [IS2_ACTION_TYPE_SMAC_SIP] = { + .width = 6, + .count = 4 + }, + }, + .target = S2, + .keys = vsc9953_vcap_is2_keys, + .actions = vsc9953_vcap_is2_actions, + }, +}; + +#define VSC9953_INIT_TIMEOUT 50000 +#define VSC9953_GCB_RST_SLEEP 100 +#define VSC9953_SYS_RAMINIT_SLEEP 80 + +static int vsc9953_gcb_soft_rst_status(struct ocelot *ocelot) +{ + int val; + + ocelot_field_read(ocelot, GCB_SOFT_RST_SWC_RST, &val); + + return val; +} + +static int vsc9953_sys_ram_init_status(struct ocelot *ocelot) +{ + int val; + + ocelot_field_read(ocelot, SYS_RESET_CFG_MEM_INIT, &val); + + return val; +} + + +/* CORE_ENA is in SYS:SYSTEM:RESET_CFG + * MEM_INIT is in SYS:SYSTEM:RESET_CFG + * MEM_ENA is in SYS:SYSTEM:RESET_CFG + */ +static int vsc9953_reset(struct ocelot *ocelot) +{ + int val, err; + + /* soft-reset the switch core */ + ocelot_field_write(ocelot, GCB_SOFT_RST_SWC_RST, 1); + + err = readx_poll_timeout(vsc9953_gcb_soft_rst_status, ocelot, val, !val, + VSC9953_GCB_RST_SLEEP, VSC9953_INIT_TIMEOUT); + if (err) { + dev_err(ocelot->dev, "timeout: switch core reset\n"); + return err; + } + + /* initialize switch mem ~40us */ + ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_ENA, 1); + ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_INIT, 1); + + err = readx_poll_timeout(vsc9953_sys_ram_init_status, ocelot, val, !val, + VSC9953_SYS_RAMINIT_SLEEP, + VSC9953_INIT_TIMEOUT); + if (err) { + dev_err(ocelot->dev, "timeout: switch sram init\n"); + return err; + } + + /* enable switch core */ + ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1); + + return 0; +} + +static void vsc9953_phylink_validate(struct ocelot *ocelot, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + + if (state->interface == PHY_INTERFACE_MODE_INTERNAL) { + phylink_set(mask, 2500baseT_Full); + phylink_set(mask, 2500baseX_Full); + } + + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); +} + +/* Watermark encode + * Bit 9: Unit; 0:1, 1:16 + * Bit 8-0: Value to be multiplied with unit + */ +static u16 vsc9953_wm_enc(u16 value) +{ + WARN_ON(value >= 16 * BIT(9)); + + if (value >= BIT(9)) + return BIT(9) | (value / 16); + + return value; +} + +static u16 vsc9953_wm_dec(u16 wm) +{ + WARN_ON(wm & ~GENMASK(9, 0)); + + if (wm & BIT(9)) + return (wm & GENMASK(8, 0)) * 16; + + return wm; +} + +static void vsc9953_wm_stat(u32 val, u32 *inuse, u32 *maxuse) +{ + *inuse = (val & GENMASK(25, 13)) >> 13; + *maxuse = val & GENMASK(12, 0); +} + +static const struct ocelot_ops vsc9953_ops = { + .reset = vsc9953_reset, + .wm_enc = vsc9953_wm_enc, + .wm_dec = vsc9953_wm_dec, + .wm_stat = vsc9953_wm_stat, + .port_to_netdev = felix_port_to_netdev, + .netdev_to_port = felix_netdev_to_port, +}; + +static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot) +{ + struct felix *felix = ocelot_to_felix(ocelot); + struct device *dev = ocelot->dev; + struct mii_bus *bus; + int port; + int rc; + + felix->pcs = devm_kcalloc(dev, felix->info->num_ports, + sizeof(struct phylink_pcs *), + GFP_KERNEL); + if (!felix->pcs) { + dev_err(dev, "failed to allocate array for PCS PHYs\n"); + return -ENOMEM; + } + + rc = mscc_miim_setup(dev, &bus, "VSC9953 internal MDIO bus", + ocelot->targets[GCB], + ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK], + true); + if (rc) { + dev_err(dev, "failed to setup MDIO bus\n"); + return rc; + } + + /* Needed in order to initialize the bus mutex lock */ + rc = devm_of_mdiobus_register(dev, bus, NULL); + if (rc < 0) { + dev_err(dev, "failed to register MDIO bus\n"); + return rc; + } + + felix->imdio = bus; + + for (port = 0; port < felix->info->num_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct phylink_pcs *phylink_pcs; + struct mdio_device *mdio_device; + int addr = port + 4; + + if (dsa_is_unused_port(felix->ds, port)) + continue; + + if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL) + continue; + + mdio_device = mdio_device_create(felix->imdio, addr); + if (IS_ERR(mdio_device)) + continue; + + phylink_pcs = lynx_pcs_create(mdio_device); + if (!phylink_pcs) { + mdio_device_free(mdio_device); + continue; + } + + felix->pcs[port] = phylink_pcs; + + dev_info(dev, "Found PCS at internal MDIO address %d\n", addr); + } + + return 0; +} + +static void vsc9953_mdio_bus_free(struct ocelot *ocelot) +{ + struct felix *felix = ocelot_to_felix(ocelot); + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct phylink_pcs *phylink_pcs = felix->pcs[port]; + struct mdio_device *mdio_device; + + if (!phylink_pcs) + continue; + + mdio_device = lynx_get_mdio_device(phylink_pcs); + mdio_device_free(mdio_device); + lynx_pcs_destroy(phylink_pcs); + } + + /* mdiobus_unregister and mdiobus_free handled by devres */ +} + +static const struct felix_info seville_info_vsc9953 = { + .resources = vsc9953_resources, + .num_resources = ARRAY_SIZE(vsc9953_resources), + .resource_names = vsc9953_resource_names, + .regfields = vsc9953_regfields, + .map = vsc9953_regmap, + .ops = &vsc9953_ops, + .stats_layout = vsc9953_stats_layout, + .vcap = vsc9953_vcap_props, + .vcap_pol_base = VSC9953_VCAP_POLICER_BASE, + .vcap_pol_max = VSC9953_VCAP_POLICER_MAX, + .vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2, + .vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2, + .num_mact_rows = 2048, + .num_ports = VSC9953_NUM_PORTS, + .num_tx_queues = OCELOT_NUM_TC, + .mdio_bus_alloc = vsc9953_mdio_bus_alloc, + .mdio_bus_free = vsc9953_mdio_bus_free, + .phylink_validate = vsc9953_phylink_validate, + .port_modes = vsc9953_port_modes, +}; + +static int seville_probe(struct platform_device *pdev) +{ + struct dsa_switch *ds; + struct ocelot *ocelot; + struct resource *res; + struct felix *felix; + int err; + + felix = kzalloc(sizeof(struct felix), GFP_KERNEL); + if (!felix) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed to allocate driver memory\n"); + goto err_alloc_felix; + } + + platform_set_drvdata(pdev, felix); + + ocelot = &felix->ocelot; + ocelot->dev = &pdev->dev; + ocelot->num_flooding_pgids = 1; + felix->info = &seville_info_vsc9953; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + err = -EINVAL; + dev_err(&pdev->dev, "Invalid resource\n"); + goto err_alloc_felix; + } + felix->switch_base = res->start; + + ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL); + if (!ds) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed to allocate DSA switch\n"); + goto err_alloc_ds; + } + + ds->dev = &pdev->dev; + ds->num_ports = felix->info->num_ports; + ds->ops = &felix_switch_ops; + ds->priv = ocelot; + felix->ds = ds; + felix->tag_proto = DSA_TAG_PROTO_SEVILLE; + + err = dsa_register_switch(ds); + if (err) { + dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err); + goto err_register_ds; + } + + return 0; + +err_register_ds: + kfree(ds); +err_alloc_ds: +err_alloc_felix: + kfree(felix); + return err; +} + +static int seville_remove(struct platform_device *pdev) +{ + struct felix *felix = platform_get_drvdata(pdev); + + if (!felix) + return 0; + + dsa_unregister_switch(felix->ds); + + kfree(felix->ds); + kfree(felix); + + return 0; +} + +static void seville_shutdown(struct platform_device *pdev) +{ + struct felix *felix = platform_get_drvdata(pdev); + + if (!felix) + return; + + dsa_switch_shutdown(felix->ds); + + platform_set_drvdata(pdev, NULL); +} + +static const struct of_device_id seville_of_match[] = { + { .compatible = "mscc,vsc9953-switch" }, + { }, +}; +MODULE_DEVICE_TABLE(of, seville_of_match); + +static struct platform_driver seville_vsc9953_driver = { + .probe = seville_probe, + .remove = seville_remove, + .shutdown = seville_shutdown, + .driver = { + .name = "mscc_seville", + .of_match_table = of_match_ptr(seville_of_match), + }, +}; +module_platform_driver(seville_vsc9953_driver); + +MODULE_DESCRIPTION("Seville Switch driver"); +MODULE_LICENSE("GPL v2"); |