summaryrefslogtreecommitdiffstats
path: root/net/dsa
diff options
context:
space:
mode:
Diffstat (limited to 'net/dsa')
-rw-r--r--net/dsa/Kconfig131
-rw-r--r--net/dsa/Makefile20
-rw-r--r--net/dsa/dsa.c495
-rw-r--r--net/dsa/dsa2.c1027
-rw-r--r--net/dsa/dsa_priv.h264
-rw-r--r--net/dsa/master.c378
-rw-r--r--net/dsa/port.c814
-rw-r--r--net/dsa/slave.c2251
-rw-r--r--net/dsa/switch.c373
-rw-r--r--net/dsa/tag_8021q.c463
-rw-r--r--net/dsa/tag_ar9331.c93
-rw-r--r--net/dsa/tag_brcm.c236
-rw-r--r--net/dsa/tag_dsa.c151
-rw-r--r--net/dsa/tag_edsa.c202
-rw-r--r--net/dsa/tag_gswip.c112
-rw-r--r--net/dsa/tag_ksz.c202
-rw-r--r--net/dsa/tag_lan9303.c134
-rw-r--r--net/dsa/tag_mtk.c123
-rw-r--r--net/dsa/tag_ocelot.c273
-rw-r--r--net/dsa/tag_qca.c100
-rw-r--r--net/dsa/tag_rtl4_a.c133
-rw-r--r--net/dsa/tag_sja1105.c373
-rw-r--r--net/dsa/tag_trailer.c65
23 files changed, 8413 insertions, 0 deletions
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
new file mode 100644
index 000000000..1f9b9b110
--- /dev/null
+++ b/net/dsa/Kconfig
@@ -0,0 +1,131 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HAVE_NET_DSA
+ def_bool y
+ depends on INET && NETDEVICES && !S390
+
+# Drivers must select NET_DSA and the appropriate tagging format
+
+menuconfig NET_DSA
+ tristate "Distributed Switch Architecture"
+ depends on HAVE_NET_DSA
+ depends on BRIDGE || BRIDGE=n
+ select GRO_CELLS
+ select NET_SWITCHDEV
+ select PHYLINK
+ select NET_DEVLINK
+ help
+ Say Y if you want to enable support for the hardware switches supported
+ by the Distributed Switch Architecture.
+
+if NET_DSA
+
+# tagging formats
+config NET_DSA_TAG_8021Q
+ tristate
+ select VLAN_8021Q
+ help
+ Unlike the other tagging protocols, the 802.1Q config option simply
+ provides helpers for other tagging implementations that might rely on
+ VLAN in one way or another. It is not a complete solution.
+
+ Drivers which use these helpers should select this as dependency.
+
+config NET_DSA_TAG_AR9331
+ tristate "Tag driver for Atheros AR9331 SoC with built-in switch"
+ help
+ Say Y or M if you want to enable support for tagging frames for
+ the Atheros AR9331 SoC with built-in switch.
+
+config NET_DSA_TAG_BRCM_COMMON
+ tristate
+ default n
+
+config NET_DSA_TAG_BRCM
+ tristate "Tag driver for Broadcom switches using in-frame headers"
+ select NET_DSA_TAG_BRCM_COMMON
+ help
+ Say Y if you want to enable support for tagging frames for the
+ Broadcom switches which place the tag after the MAC source address.
+
+
+config NET_DSA_TAG_BRCM_PREPEND
+ tristate "Tag driver for Broadcom switches using prepended headers"
+ select NET_DSA_TAG_BRCM_COMMON
+ help
+ Say Y if you want to enable support for tagging frames for the
+ Broadcom switches which places the tag before the Ethernet header
+ (prepended).
+
+config NET_DSA_TAG_GSWIP
+ tristate "Tag driver for Lantiq / Intel GSWIP switches"
+ help
+ Say Y or M if you want to enable support for tagging frames for the
+ Lantiq / Intel GSWIP switches.
+
+config NET_DSA_TAG_DSA
+ tristate "Tag driver for Marvell switches using DSA headers"
+ help
+ Say Y or M if you want to enable support for tagging frames for the
+ Marvell switches which use DSA headers.
+
+config NET_DSA_TAG_EDSA
+ tristate "Tag driver for Marvell switches using EtherType DSA headers"
+ help
+ Say Y or M if you want to enable support for tagging frames for the
+ Marvell switches which use EtherType DSA headers.
+
+config NET_DSA_TAG_MTK
+ tristate "Tag driver for Mediatek switches"
+ help
+ Say Y or M if you want to enable support for tagging frames for
+ Mediatek switches.
+
+config NET_DSA_TAG_KSZ
+ tristate "Tag driver for Microchip 8795/9477/9893 families of switches"
+ help
+ Say Y if you want to enable support for tagging frames for the
+ Microchip 8795/9477/9893 families of switches.
+
+config NET_DSA_TAG_RTL4_A
+ tristate "Tag driver for Realtek 4 byte protocol A tags"
+ help
+ Say Y or M if you want to enable support for tagging frames for the
+ Realtek switches with 4 byte protocol A tags, sich as found in
+ the Realtek RTL8366RB.
+
+config NET_DSA_TAG_OCELOT
+ tristate "Tag driver for Ocelot family of switches"
+ select PACKING
+ help
+ Say Y or M if you want to enable support for tagging frames for the
+ Ocelot switches (VSC7511, VSC7512, VSC7513, VSC7514, VSC9959).
+
+config NET_DSA_TAG_QCA
+ tristate "Tag driver for Qualcomm Atheros QCA8K switches"
+ help
+ Say Y or M if you want to enable support for tagging frames for
+ the Qualcomm Atheros QCA8K switches.
+
+config NET_DSA_TAG_LAN9303
+ tristate "Tag driver for SMSC/Microchip LAN9303 family of switches"
+ help
+ Say Y or M if you want to enable support for tagging frames for the
+ SMSC/Microchip LAN9303 family of switches.
+
+config NET_DSA_TAG_SJA1105
+ tristate "Tag driver for NXP SJA1105 switches"
+ select NET_DSA_TAG_8021Q
+ select PACKING
+ help
+ Say Y or M if you want to enable support for tagging frames with the
+ NXP SJA1105 switch family. Both the native tagging protocol (which
+ is only for link-local traffic) as well as non-native tagging (based
+ on a custom 802.1Q VLAN header) are available.
+
+config NET_DSA_TAG_TRAILER
+ tristate "Tag driver for switches using a trailer tag"
+ help
+ Say Y or M if you want to enable support for tagging frames at
+ with a trailed. e.g. Marvell 88E6060.
+
+endif
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
new file mode 100644
index 000000000..4f47b2025
--- /dev/null
+++ b/net/dsa/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+# the core
+obj-$(CONFIG_NET_DSA) += dsa_core.o
+dsa_core-y += dsa.o dsa2.o master.o port.o slave.o switch.o
+
+# tagging formats
+obj-$(CONFIG_NET_DSA_TAG_8021Q) += tag_8021q.o
+obj-$(CONFIG_NET_DSA_TAG_AR9331) += tag_ar9331.o
+obj-$(CONFIG_NET_DSA_TAG_BRCM_COMMON) += tag_brcm.o
+obj-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
+obj-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
+obj-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o
+obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
+obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o
+obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
+obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
+obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o
+obj-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
+obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o
+obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
new file mode 100644
index 000000000..2131bf2b3
--- /dev/null
+++ b/net/dsa/dsa.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * net/dsa/dsa.c - Hardware switch handling
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
+ */
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/netdevice.h>
+#include <linux/sysfs.h>
+#include <linux/phy_fixed.h>
+#include <linux/ptp_classify.h>
+#include <linux/etherdevice.h>
+
+#include "dsa_priv.h"
+
+static LIST_HEAD(dsa_tag_drivers_list);
+static DEFINE_MUTEX(dsa_tag_drivers_lock);
+
+static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ /* Just return the original SKB */
+ return skb;
+}
+
+static const struct dsa_device_ops none_ops = {
+ .name = "none",
+ .proto = DSA_TAG_PROTO_NONE,
+ .xmit = dsa_slave_notag_xmit,
+ .rcv = NULL,
+};
+
+DSA_TAG_DRIVER(none_ops);
+
+static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
+ struct module *owner)
+{
+ dsa_tag_driver->owner = owner;
+
+ mutex_lock(&dsa_tag_drivers_lock);
+ list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
+ mutex_unlock(&dsa_tag_drivers_lock);
+}
+
+void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
+ unsigned int count, struct module *owner)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
+}
+
+static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
+{
+ mutex_lock(&dsa_tag_drivers_lock);
+ list_del(&dsa_tag_driver->list);
+ mutex_unlock(&dsa_tag_drivers_lock);
+}
+EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
+
+void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
+ unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
+}
+EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
+
+const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
+{
+ return ops->name;
+};
+
+const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol)
+{
+ struct dsa_tag_driver *dsa_tag_driver;
+ const struct dsa_device_ops *ops;
+ bool found = false;
+
+ request_module("%s%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
+
+ mutex_lock(&dsa_tag_drivers_lock);
+ list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
+ ops = dsa_tag_driver->ops;
+ if (ops->proto == tag_protocol) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ if (!try_module_get(dsa_tag_driver->owner))
+ ops = ERR_PTR(-ENOPROTOOPT);
+ } else {
+ ops = ERR_PTR(-ENOPROTOOPT);
+ }
+
+ mutex_unlock(&dsa_tag_drivers_lock);
+
+ return ops;
+}
+
+void dsa_tag_driver_put(const struct dsa_device_ops *ops)
+{
+ struct dsa_tag_driver *dsa_tag_driver;
+
+ mutex_lock(&dsa_tag_drivers_lock);
+ list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
+ if (dsa_tag_driver->ops == ops) {
+ module_put(dsa_tag_driver->owner);
+ break;
+ }
+ }
+ mutex_unlock(&dsa_tag_drivers_lock);
+}
+
+static int dev_is_class(struct device *dev, void *class)
+{
+ if (dev->class != NULL && !strcmp(dev->class->name, class))
+ return 1;
+
+ return 0;
+}
+
+static struct device *dev_find_class(struct device *parent, char *class)
+{
+ if (dev_is_class(parent, class)) {
+ get_device(parent);
+ return parent;
+ }
+
+ return device_find_child(parent, class, dev_is_class);
+}
+
+struct net_device *dsa_dev_to_net_device(struct device *dev)
+{
+ struct device *d;
+
+ d = dev_find_class(dev, "net");
+ if (d != NULL) {
+ struct net_device *nd;
+
+ nd = to_net_dev(d);
+ dev_hold(nd);
+ put_device(d);
+
+ return nd;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dsa_dev_to_net_device);
+
+/* Determine if we should defer delivery of skb until we have a rx timestamp.
+ *
+ * Called from dsa_switch_rcv. For now, this will only work if tagging is
+ * enabled on the switch. Normally the MAC driver would retrieve the hardware
+ * timestamp when it reads the packet out of the hardware. However in a DSA
+ * switch, the DSA driver owning the interface to which the packet is
+ * delivered is never notified unless we do so here.
+ */
+static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
+ struct sk_buff *skb)
+{
+ struct dsa_switch *ds = p->dp->ds;
+ unsigned int type;
+
+ if (skb_headroom(skb) < ETH_HLEN)
+ return false;
+
+ __skb_push(skb, ETH_HLEN);
+
+ type = ptp_classify_raw(skb);
+
+ __skb_pull(skb, ETH_HLEN);
+
+ if (type == PTP_CLASS_NONE)
+ return false;
+
+ if (likely(ds->ops->port_rxtstamp))
+ return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
+
+ return false;
+}
+
+static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *unused)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct sk_buff *nskb = NULL;
+ struct pcpu_sw_netstats *s;
+ struct dsa_slave_priv *p;
+
+ if (unlikely(!cpu_dp)) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ return 0;
+
+ nskb = cpu_dp->rcv(skb, dev, pt);
+ if (!nskb) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ skb = nskb;
+ p = netdev_priv(skb->dev);
+ skb_push(skb, ETH_HLEN);
+ skb->pkt_type = PACKET_HOST;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
+ nskb = dsa_untag_bridge_pvid(skb);
+ if (!nskb) {
+ kfree_skb(skb);
+ return 0;
+ }
+ skb = nskb;
+ }
+
+ s = this_cpu_ptr(p->stats64);
+ u64_stats_update_begin(&s->syncp);
+ s->rx_packets++;
+ s->rx_bytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ if (dsa_skb_defer_rx_timestamp(p, skb))
+ return 0;
+
+ gro_cells_receive(&p->gcells, skb);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
+{
+ const struct dsa_port *dp = dsa_to_port(ds, p);
+
+ return dp->type == DSA_PORT_TYPE_USER && dp->slave;
+}
+
+int dsa_switch_suspend(struct dsa_switch *ds)
+{
+ int i, ret = 0;
+
+ /* Suspend slave network devices */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (!dsa_is_port_initialized(ds, i))
+ continue;
+
+ ret = dsa_slave_suspend(dsa_to_port(ds, i)->slave);
+ if (ret)
+ return ret;
+ }
+
+ if (ds->ops->suspend)
+ ret = ds->ops->suspend(ds);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_suspend);
+
+int dsa_switch_resume(struct dsa_switch *ds)
+{
+ int i, ret = 0;
+
+ if (ds->ops->resume)
+ ret = ds->ops->resume(ds);
+
+ if (ret)
+ return ret;
+
+ /* Resume slave network devices */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (!dsa_is_port_initialized(ds, i))
+ continue;
+
+ ret = dsa_slave_resume(dsa_to_port(ds, i)->slave);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_resume);
+#endif
+
+static struct packet_type dsa_pack_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_XDSA),
+ .func = dsa_switch_rcv,
+};
+
+static struct workqueue_struct *dsa_owq;
+
+bool dsa_schedule_work(struct work_struct *work)
+{
+ return queue_work(dsa_owq, work);
+}
+
+static ATOMIC_NOTIFIER_HEAD(dsa_notif_chain);
+
+int register_dsa_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&dsa_notif_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_dsa_notifier);
+
+int unregister_dsa_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&dsa_notif_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_dsa_notifier);
+
+int call_dsa_notifiers(unsigned long val, struct net_device *dev,
+ struct dsa_notifier_info *info)
+{
+ info->dev = dev;
+ return atomic_notifier_call_chain(&dsa_notif_chain, val, info);
+}
+EXPORT_SYMBOL_GPL(call_dsa_notifiers);
+
+int dsa_devlink_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+
+ if (!ds->ops->devlink_param_get)
+ return -EOPNOTSUPP;
+
+ return ds->ops->devlink_param_get(ds, id, ctx);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
+
+int dsa_devlink_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+
+ if (!ds->ops->devlink_param_set)
+ return -EOPNOTSUPP;
+
+ return ds->ops->devlink_param_set(ds, id, ctx);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_param_set);
+
+int dsa_devlink_params_register(struct dsa_switch *ds,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ return devlink_params_register(ds->devlink, params, params_count);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_params_register);
+
+void dsa_devlink_params_unregister(struct dsa_switch *ds,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ devlink_params_unregister(ds->devlink, params, params_count);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister);
+
+int dsa_devlink_resource_register(struct dsa_switch *ds,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params)
+{
+ return devlink_resource_register(ds->devlink, resource_name,
+ resource_size, resource_id,
+ parent_resource_id,
+ size_params);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
+
+void dsa_devlink_resources_unregister(struct dsa_switch *ds)
+{
+ devlink_resources_unregister(ds->devlink, NULL);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister);
+
+void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv)
+{
+ return devlink_resource_occ_get_register(ds->devlink, resource_id,
+ occ_get, occ_get_priv);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
+
+void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
+ u64 resource_id)
+{
+ devlink_resource_occ_get_unregister(ds->devlink, resource_id);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
+
+struct devlink_region *
+dsa_devlink_region_create(struct dsa_switch *ds,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size)
+{
+ return devlink_region_create(ds->devlink, ops, region_max_snapshots,
+ region_size);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_region_create);
+
+struct devlink_region *
+dsa_devlink_port_region_create(struct dsa_switch *ds,
+ int port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+
+ return devlink_port_region_create(&dp->devlink_port, ops,
+ region_max_snapshots,
+ region_size);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_port_region_create);
+
+void dsa_devlink_region_destroy(struct devlink_region *region)
+{
+ devlink_region_destroy(region);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_region_destroy);
+
+struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
+{
+ if (!netdev || !dsa_slave_dev_check(netdev))
+ return ERR_PTR(-ENODEV);
+
+ return dsa_slave_to_port(netdev);
+}
+EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
+
+static int __init dsa_init_module(void)
+{
+ int rc;
+
+ dsa_owq = alloc_ordered_workqueue("dsa_ordered",
+ WQ_MEM_RECLAIM);
+ if (!dsa_owq)
+ return -ENOMEM;
+
+ rc = dsa_slave_register_notifier();
+ if (rc)
+ goto register_notifier_fail;
+
+ dev_add_pack(&dsa_pack_type);
+
+ dsa_tag_driver_register(&DSA_TAG_DRIVER_NAME(none_ops),
+ THIS_MODULE);
+
+ return 0;
+
+register_notifier_fail:
+ destroy_workqueue(dsa_owq);
+
+ return rc;
+}
+module_init(dsa_init_module);
+
+static void __exit dsa_cleanup_module(void)
+{
+ dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops));
+
+ dsa_slave_unregister_notifier();
+ dev_remove_pack(&dsa_pack_type);
+ destroy_workqueue(dsa_owq);
+}
+module_exit(dsa_cleanup_module);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dsa");
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
new file mode 100644
index 000000000..f543fca6d
--- /dev/null
+++ b/net/dsa/dsa2.c
@@ -0,0 +1,1027 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * net/dsa/dsa2.c - Hardware switch handling, binding version 2
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
+ * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <net/devlink.h>
+
+#include "dsa_priv.h"
+
+static DEFINE_MUTEX(dsa2_mutex);
+LIST_HEAD(dsa_tree_list);
+
+struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
+{
+ struct dsa_switch_tree *dst;
+ struct dsa_port *dp;
+
+ list_for_each_entry(dst, &dsa_tree_list, list) {
+ if (dst->index != tree_index)
+ continue;
+
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->ds->index != sw_index)
+ continue;
+
+ return dp->ds;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_find);
+
+static struct dsa_switch_tree *dsa_tree_find(int index)
+{
+ struct dsa_switch_tree *dst;
+
+ list_for_each_entry(dst, &dsa_tree_list, list)
+ if (dst->index == index)
+ return dst;
+
+ return NULL;
+}
+
+static struct dsa_switch_tree *dsa_tree_alloc(int index)
+{
+ struct dsa_switch_tree *dst;
+
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (!dst)
+ return NULL;
+
+ dst->index = index;
+
+ INIT_LIST_HEAD(&dst->rtable);
+
+ INIT_LIST_HEAD(&dst->ports);
+
+ INIT_LIST_HEAD(&dst->list);
+ list_add_tail(&dst->list, &dsa_tree_list);
+
+ kref_init(&dst->refcount);
+
+ return dst;
+}
+
+static void dsa_tree_free(struct dsa_switch_tree *dst)
+{
+ list_del(&dst->list);
+ kfree(dst);
+}
+
+static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
+{
+ if (dst)
+ kref_get(&dst->refcount);
+
+ return dst;
+}
+
+static struct dsa_switch_tree *dsa_tree_touch(int index)
+{
+ struct dsa_switch_tree *dst;
+
+ dst = dsa_tree_find(index);
+ if (dst)
+ return dsa_tree_get(dst);
+ else
+ return dsa_tree_alloc(index);
+}
+
+static void dsa_tree_release(struct kref *ref)
+{
+ struct dsa_switch_tree *dst;
+
+ dst = container_of(ref, struct dsa_switch_tree, refcount);
+
+ dsa_tree_free(dst);
+}
+
+static void dsa_tree_put(struct dsa_switch_tree *dst)
+{
+ if (dst)
+ kref_put(&dst->refcount, dsa_tree_release);
+}
+
+static bool dsa_port_is_dsa(struct dsa_port *port)
+{
+ return port->type == DSA_PORT_TYPE_DSA;
+}
+
+static bool dsa_port_is_cpu(struct dsa_port *port)
+{
+ return port->type == DSA_PORT_TYPE_CPU;
+}
+
+static bool dsa_port_is_user(struct dsa_port *dp)
+{
+ return dp->type == DSA_PORT_TYPE_USER;
+}
+
+static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
+ struct device_node *dn)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dp->dn == dn)
+ return dp;
+
+ return NULL;
+}
+
+static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
+ struct dsa_port *link_dp)
+{
+ struct dsa_switch *ds = dp->ds;
+ struct dsa_switch_tree *dst;
+ struct dsa_link *dl;
+
+ dst = ds->dst;
+
+ list_for_each_entry(dl, &dst->rtable, list)
+ if (dl->dp == dp && dl->link_dp == link_dp)
+ return dl;
+
+ dl = kzalloc(sizeof(*dl), GFP_KERNEL);
+ if (!dl)
+ return NULL;
+
+ dl->dp = dp;
+ dl->link_dp = link_dp;
+
+ INIT_LIST_HEAD(&dl->list);
+ list_add_tail(&dl->list, &dst->rtable);
+
+ return dl;
+}
+
+static bool dsa_port_setup_routing_table(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+ struct dsa_switch_tree *dst = ds->dst;
+ struct device_node *dn = dp->dn;
+ struct of_phandle_iterator it;
+ struct dsa_port *link_dp;
+ struct dsa_link *dl;
+ int err;
+
+ of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
+ link_dp = dsa_tree_find_port_by_node(dst, it.node);
+ if (!link_dp) {
+ of_node_put(it.node);
+ return false;
+ }
+
+ dl = dsa_link_touch(dp, link_dp);
+ if (!dl) {
+ of_node_put(it.node);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
+{
+ bool complete = true;
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dsa_port_is_dsa(dp)) {
+ complete = dsa_port_setup_routing_table(dp);
+ if (!complete)
+ break;
+ }
+ }
+
+ return complete;
+}
+
+static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_is_cpu(dp))
+ return dp;
+
+ return NULL;
+}
+
+static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
+{
+ struct dsa_port *cpu_dp, *dp;
+
+ cpu_dp = dsa_tree_find_first_cpu(dst);
+ if (!cpu_dp) {
+ pr_err("DSA: tree %d has no CPU port\n", dst->index);
+ return -EINVAL;
+ }
+
+ /* Assign the default CPU port to all ports of the fabric */
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
+ dp->cpu_dp = cpu_dp;
+
+ return 0;
+}
+
+static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
+ dp->cpu_dp = NULL;
+}
+
+static int dsa_port_setup(struct dsa_port *dp)
+{
+ struct devlink_port *dlp = &dp->devlink_port;
+ bool dsa_port_link_registered = false;
+ bool dsa_port_enabled = false;
+ int err = 0;
+
+ if (dp->setup)
+ return 0;
+
+ switch (dp->type) {
+ case DSA_PORT_TYPE_UNUSED:
+ dsa_port_disable(dp);
+ break;
+ case DSA_PORT_TYPE_CPU:
+ err = dsa_port_link_register_of(dp);
+ if (err)
+ break;
+ dsa_port_link_registered = true;
+
+ err = dsa_port_enable(dp, NULL);
+ if (err)
+ break;
+ dsa_port_enabled = true;
+
+ break;
+ case DSA_PORT_TYPE_DSA:
+ err = dsa_port_link_register_of(dp);
+ if (err)
+ break;
+ dsa_port_link_registered = true;
+
+ err = dsa_port_enable(dp, NULL);
+ if (err)
+ break;
+ dsa_port_enabled = true;
+
+ break;
+ case DSA_PORT_TYPE_USER:
+ dp->mac = of_get_mac_address(dp->dn);
+ err = dsa_slave_create(dp);
+ if (err)
+ break;
+
+ devlink_port_type_eth_set(dlp, dp->slave);
+ break;
+ }
+
+ if (err && dsa_port_enabled)
+ dsa_port_disable(dp);
+ if (err && dsa_port_link_registered)
+ dsa_port_link_unregister_of(dp);
+ if (err)
+ return err;
+
+ dp->setup = true;
+
+ return 0;
+}
+
+static int dsa_port_devlink_setup(struct dsa_port *dp)
+{
+ struct devlink_port *dlp = &dp->devlink_port;
+ struct dsa_switch_tree *dst = dp->ds->dst;
+ struct devlink_port_attrs attrs = {};
+ struct devlink *dl = dp->ds->devlink;
+ const unsigned char *id;
+ unsigned char len;
+ int err;
+
+ id = (const unsigned char *)&dst->index;
+ len = sizeof(dst->index);
+
+ attrs.phys.port_number = dp->index;
+ memcpy(attrs.switch_id.id, id, len);
+ attrs.switch_id.id_len = len;
+ memset(dlp, 0, sizeof(*dlp));
+
+ switch (dp->type) {
+ case DSA_PORT_TYPE_UNUSED:
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
+ break;
+ case DSA_PORT_TYPE_CPU:
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
+ break;
+ case DSA_PORT_TYPE_DSA:
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
+ break;
+ case DSA_PORT_TYPE_USER:
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ break;
+ }
+
+ devlink_port_attrs_set(dlp, &attrs);
+ err = devlink_port_register(dl, dlp, dp->index);
+
+ if (!err)
+ dp->devlink_port_setup = true;
+
+ return err;
+}
+
+static void dsa_port_teardown(struct dsa_port *dp)
+{
+ struct devlink_port *dlp = &dp->devlink_port;
+
+ if (!dp->setup)
+ return;
+
+ devlink_port_type_clear(dlp);
+
+ switch (dp->type) {
+ case DSA_PORT_TYPE_UNUSED:
+ break;
+ case DSA_PORT_TYPE_CPU:
+ dsa_port_disable(dp);
+ dsa_tag_driver_put(dp->tag_ops);
+ dsa_port_link_unregister_of(dp);
+ break;
+ case DSA_PORT_TYPE_DSA:
+ dsa_port_disable(dp);
+ dsa_port_link_unregister_of(dp);
+ break;
+ case DSA_PORT_TYPE_USER:
+ if (dp->slave) {
+ dsa_slave_destroy(dp->slave);
+ dp->slave = NULL;
+ }
+ break;
+ }
+
+ dp->setup = false;
+}
+
+static void dsa_port_devlink_teardown(struct dsa_port *dp)
+{
+ struct devlink_port *dlp = &dp->devlink_port;
+
+ if (dp->devlink_port_setup)
+ devlink_port_unregister(dlp);
+ dp->devlink_port_setup = false;
+}
+
+static int dsa_devlink_info_get(struct devlink *dl,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+
+ if (ds->ops->devlink_info_get)
+ return ds->ops->devlink_info_get(ds, req, extack);
+
+ return -EOPNOTSUPP;
+}
+
+static const struct devlink_ops dsa_devlink_ops = {
+ .info_get = dsa_devlink_info_get,
+};
+
+static int dsa_switch_setup(struct dsa_switch *ds)
+{
+ struct dsa_devlink_priv *dl_priv;
+ struct dsa_port *dp;
+ int err;
+
+ if (ds->setup)
+ return 0;
+
+ /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
+ * driver and before ops->setup() has run, since the switch drivers and
+ * the slave MDIO bus driver rely on these values for probing PHY
+ * devices or not
+ */
+ ds->phys_mii_mask |= dsa_user_ports(ds);
+
+ /* Add the switch to devlink before calling setup, so that setup can
+ * add dpipe tables
+ */
+ ds->devlink = devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv));
+ if (!ds->devlink)
+ return -ENOMEM;
+ dl_priv = devlink_priv(ds->devlink);
+ dl_priv->ds = ds;
+
+ err = devlink_register(ds->devlink, ds->dev);
+ if (err)
+ goto free_devlink;
+
+ /* Setup devlink port instances now, so that the switch
+ * setup() can register regions etc, against the ports
+ */
+ list_for_each_entry(dp, &ds->dst->ports, list) {
+ if (dp->ds == ds) {
+ err = dsa_port_devlink_setup(dp);
+ if (err)
+ goto unregister_devlink_ports;
+ }
+ }
+
+ err = dsa_switch_register_notifier(ds);
+ if (err)
+ goto unregister_devlink_ports;
+
+ err = ds->ops->setup(ds);
+ if (err < 0)
+ goto unregister_notifier;
+
+ devlink_params_publish(ds->devlink);
+
+ if (!ds->slave_mii_bus && ds->ops->phy_read) {
+ ds->slave_mii_bus = mdiobus_alloc();
+ if (!ds->slave_mii_bus) {
+ err = -ENOMEM;
+ goto teardown;
+ }
+
+ dsa_slave_mii_bus_init(ds);
+
+ err = mdiobus_register(ds->slave_mii_bus);
+ if (err < 0)
+ goto free_slave_mii_bus;
+ }
+
+ ds->setup = true;
+
+ return 0;
+
+free_slave_mii_bus:
+ if (ds->slave_mii_bus && ds->ops->phy_read)
+ mdiobus_free(ds->slave_mii_bus);
+teardown:
+ if (ds->ops->teardown)
+ ds->ops->teardown(ds);
+unregister_notifier:
+ dsa_switch_unregister_notifier(ds);
+unregister_devlink_ports:
+ list_for_each_entry(dp, &ds->dst->ports, list)
+ if (dp->ds == ds)
+ dsa_port_devlink_teardown(dp);
+ devlink_unregister(ds->devlink);
+free_devlink:
+ devlink_free(ds->devlink);
+ ds->devlink = NULL;
+
+ return err;
+}
+
+static void dsa_switch_teardown(struct dsa_switch *ds)
+{
+ struct dsa_port *dp;
+
+ if (!ds->setup)
+ return;
+
+ if (ds->slave_mii_bus && ds->ops->phy_read) {
+ mdiobus_unregister(ds->slave_mii_bus);
+ mdiobus_free(ds->slave_mii_bus);
+ ds->slave_mii_bus = NULL;
+ }
+
+ dsa_switch_unregister_notifier(ds);
+
+ if (ds->ops->teardown)
+ ds->ops->teardown(ds);
+
+ if (ds->devlink) {
+ list_for_each_entry(dp, &ds->dst->ports, list)
+ if (dp->ds == ds)
+ dsa_port_devlink_teardown(dp);
+ devlink_unregister(ds->devlink);
+ devlink_free(ds->devlink);
+ ds->devlink = NULL;
+ }
+
+ ds->setup = false;
+}
+
+static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
+{
+ struct dsa_port *dp;
+ int err;
+
+ list_for_each_entry(dp, &dst->ports, list) {
+ err = dsa_switch_setup(dp->ds);
+ if (err)
+ goto teardown;
+ }
+
+ list_for_each_entry(dp, &dst->ports, list) {
+ err = dsa_port_setup(dp);
+ if (err) {
+ dsa_port_devlink_teardown(dp);
+ dp->type = DSA_PORT_TYPE_UNUSED;
+ err = dsa_port_devlink_setup(dp);
+ if (err)
+ goto teardown;
+ continue;
+ }
+ }
+
+ return 0;
+
+teardown:
+ list_for_each_entry(dp, &dst->ports, list)
+ dsa_port_teardown(dp);
+
+ list_for_each_entry(dp, &dst->ports, list)
+ dsa_switch_teardown(dp->ds);
+
+ return err;
+}
+
+static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ dsa_port_teardown(dp);
+
+ list_for_each_entry(dp, &dst->ports, list)
+ dsa_switch_teardown(dp->ds);
+}
+
+static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
+{
+ struct dsa_port *dp;
+ int err;
+
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dsa_port_is_cpu(dp)) {
+ err = dsa_master_setup(dp->master, dp);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_is_cpu(dp))
+ dsa_master_teardown(dp->master);
+}
+
+static int dsa_tree_setup(struct dsa_switch_tree *dst)
+{
+ bool complete;
+ int err;
+
+ if (dst->setup) {
+ pr_err("DSA: tree %d already setup! Disjoint trees?\n",
+ dst->index);
+ return -EEXIST;
+ }
+
+ complete = dsa_tree_setup_routing_table(dst);
+ if (!complete)
+ return 0;
+
+ err = dsa_tree_setup_default_cpu(dst);
+ if (err)
+ return err;
+
+ err = dsa_tree_setup_switches(dst);
+ if (err)
+ goto teardown_default_cpu;
+
+ err = dsa_tree_setup_master(dst);
+ if (err)
+ goto teardown_switches;
+
+ dst->setup = true;
+
+ pr_info("DSA: tree %d setup\n", dst->index);
+
+ return 0;
+
+teardown_switches:
+ dsa_tree_teardown_switches(dst);
+teardown_default_cpu:
+ dsa_tree_teardown_default_cpu(dst);
+
+ return err;
+}
+
+static void dsa_tree_teardown(struct dsa_switch_tree *dst)
+{
+ struct dsa_link *dl, *next;
+
+ if (!dst->setup)
+ return;
+
+ dsa_tree_teardown_master(dst);
+
+ dsa_tree_teardown_switches(dst);
+
+ dsa_tree_teardown_default_cpu(dst);
+
+ list_for_each_entry_safe(dl, next, &dst->rtable, list) {
+ list_del(&dl->list);
+ kfree(dl);
+ }
+
+ pr_info("DSA: tree %d torn down\n", dst->index);
+
+ dst->setup = false;
+}
+
+static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
+{
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dp->ds == ds && dp->index == index)
+ return dp;
+
+ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return NULL;
+
+ dp->ds = ds;
+ dp->index = index;
+
+ INIT_LIST_HEAD(&dp->list);
+ list_add_tail(&dp->list, &dst->ports);
+
+ return dp;
+}
+
+static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
+{
+ if (!name)
+ name = "eth%d";
+
+ dp->type = DSA_PORT_TYPE_USER;
+ dp->name = name;
+
+ return 0;
+}
+
+static int dsa_port_parse_dsa(struct dsa_port *dp)
+{
+ dp->type = DSA_PORT_TYPE_DSA;
+
+ return 0;
+}
+
+static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
+ struct net_device *master)
+{
+ enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
+ struct dsa_switch *mds, *ds = dp->ds;
+ unsigned int mdp_upstream;
+ struct dsa_port *mdp;
+
+ /* It is possible to stack DSA switches onto one another when that
+ * happens the switch driver may want to know if its tagging protocol
+ * is going to work in such a configuration.
+ */
+ if (dsa_slave_dev_check(master)) {
+ mdp = dsa_slave_to_port(master);
+ mds = mdp->ds;
+ mdp_upstream = dsa_upstream_port(mds, mdp->index);
+ tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
+ DSA_TAG_PROTO_NONE);
+ }
+
+ /* If the master device is not itself a DSA slave in a disjoint DSA
+ * tree, then return immediately.
+ */
+ return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
+}
+
+static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
+{
+ struct dsa_switch *ds = dp->ds;
+ struct dsa_switch_tree *dst = ds->dst;
+ const struct dsa_device_ops *tag_ops;
+ enum dsa_tag_protocol tag_protocol;
+
+ tag_protocol = dsa_get_tag_protocol(dp, master);
+ tag_ops = dsa_tag_driver_get(tag_protocol);
+ if (IS_ERR(tag_ops)) {
+ if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
+ return -EPROBE_DEFER;
+ dev_warn(ds->dev, "No tagger for this switch\n");
+ dp->master = NULL;
+ return PTR_ERR(tag_ops);
+ }
+
+ dp->master = master;
+ dp->type = DSA_PORT_TYPE_CPU;
+ dp->filter = tag_ops->filter;
+ dp->rcv = tag_ops->rcv;
+ dp->tag_ops = tag_ops;
+ dp->dst = dst;
+
+ return 0;
+}
+
+static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
+{
+ struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
+ const char *name = of_get_property(dn, "label", NULL);
+ bool link = of_property_read_bool(dn, "link");
+
+ dp->dn = dn;
+
+ if (ethernet) {
+ struct net_device *master;
+
+ master = of_find_net_device_by_node(ethernet);
+ of_node_put(ethernet);
+ if (!master)
+ return -EPROBE_DEFER;
+
+ return dsa_port_parse_cpu(dp, master);
+ }
+
+ if (link)
+ return dsa_port_parse_dsa(dp);
+
+ return dsa_port_parse_user(dp, name);
+}
+
+static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
+ struct device_node *dn)
+{
+ struct device_node *ports, *port;
+ struct dsa_port *dp;
+ int err = 0;
+ u32 reg;
+
+ ports = of_get_child_by_name(dn, "ports");
+ if (!ports) {
+ /* The second possibility is "ethernet-ports" */
+ ports = of_get_child_by_name(dn, "ethernet-ports");
+ if (!ports) {
+ dev_err(ds->dev, "no ports child node found\n");
+ return -EINVAL;
+ }
+ }
+
+ for_each_available_child_of_node(ports, port) {
+ err = of_property_read_u32(port, "reg", &reg);
+ if (err)
+ goto out_put_node;
+
+ if (reg >= ds->num_ports) {
+ err = -EINVAL;
+ goto out_put_node;
+ }
+
+ dp = dsa_to_port(ds, reg);
+
+ err = dsa_port_parse_of(dp, port);
+ if (err)
+ goto out_put_node;
+ }
+
+out_put_node:
+ of_node_put(ports);
+ return err;
+}
+
+static int dsa_switch_parse_member_of(struct dsa_switch *ds,
+ struct device_node *dn)
+{
+ u32 m[2] = { 0, 0 };
+ int sz;
+
+ /* Don't error out if this optional property isn't found */
+ sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
+ if (sz < 0 && sz != -EINVAL)
+ return sz;
+
+ ds->index = m[1];
+
+ ds->dst = dsa_tree_touch(m[0]);
+ if (!ds->dst)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int dsa_switch_touch_ports(struct dsa_switch *ds)
+{
+ struct dsa_port *dp;
+ int port;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ dp = dsa_port_touch(ds, port);
+ if (!dp)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
+{
+ int err;
+
+ err = dsa_switch_parse_member_of(ds, dn);
+ if (err)
+ return err;
+
+ err = dsa_switch_touch_ports(ds);
+ if (err)
+ return err;
+
+ return dsa_switch_parse_ports_of(ds, dn);
+}
+
+static int dsa_port_parse(struct dsa_port *dp, const char *name,
+ struct device *dev)
+{
+ if (!strcmp(name, "cpu")) {
+ struct net_device *master;
+
+ master = dsa_dev_to_net_device(dev);
+ if (!master)
+ return -EPROBE_DEFER;
+
+ dev_put(master);
+
+ return dsa_port_parse_cpu(dp, master);
+ }
+
+ if (!strcmp(name, "dsa"))
+ return dsa_port_parse_dsa(dp);
+
+ return dsa_port_parse_user(dp, name);
+}
+
+static int dsa_switch_parse_ports(struct dsa_switch *ds,
+ struct dsa_chip_data *cd)
+{
+ bool valid_name_found = false;
+ struct dsa_port *dp;
+ struct device *dev;
+ const char *name;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < DSA_MAX_PORTS; i++) {
+ name = cd->port_names[i];
+ dev = cd->netdev[i];
+ dp = dsa_to_port(ds, i);
+
+ if (!name)
+ continue;
+
+ err = dsa_port_parse(dp, name, dev);
+ if (err)
+ return err;
+
+ valid_name_found = true;
+ }
+
+ if (!valid_name_found && i == DSA_MAX_PORTS)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
+{
+ int err;
+
+ ds->cd = cd;
+
+ /* We don't support interconnected switches nor multiple trees via
+ * platform data, so this is the unique switch of the tree.
+ */
+ ds->index = 0;
+ ds->dst = dsa_tree_touch(0);
+ if (!ds->dst)
+ return -ENOMEM;
+
+ err = dsa_switch_touch_ports(ds);
+ if (err)
+ return err;
+
+ return dsa_switch_parse_ports(ds, cd);
+}
+
+static void dsa_switch_release_ports(struct dsa_switch *ds)
+{
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp, *next;
+
+ list_for_each_entry_safe(dp, next, &dst->ports, list) {
+ if (dp->ds != ds)
+ continue;
+ list_del(&dp->list);
+ kfree(dp);
+ }
+}
+
+static int dsa_switch_probe(struct dsa_switch *ds)
+{
+ struct dsa_switch_tree *dst;
+ struct dsa_chip_data *pdata;
+ struct device_node *np;
+ int err;
+
+ if (!ds->dev)
+ return -ENODEV;
+
+ pdata = ds->dev->platform_data;
+ np = ds->dev->of_node;
+
+ if (!ds->num_ports)
+ return -EINVAL;
+
+ if (np) {
+ err = dsa_switch_parse_of(ds, np);
+ if (err)
+ dsa_switch_release_ports(ds);
+ } else if (pdata) {
+ err = dsa_switch_parse(ds, pdata);
+ if (err)
+ dsa_switch_release_ports(ds);
+ } else {
+ err = -ENODEV;
+ }
+
+ if (err)
+ return err;
+
+ dst = ds->dst;
+ dsa_tree_get(dst);
+ err = dsa_tree_setup(dst);
+ if (err) {
+ dsa_switch_release_ports(ds);
+ dsa_tree_put(dst);
+ }
+
+ return err;
+}
+
+int dsa_register_switch(struct dsa_switch *ds)
+{
+ int err;
+
+ mutex_lock(&dsa2_mutex);
+ err = dsa_switch_probe(ds);
+ dsa_tree_put(ds->dst);
+ mutex_unlock(&dsa2_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dsa_register_switch);
+
+static void dsa_switch_remove(struct dsa_switch *ds)
+{
+ struct dsa_switch_tree *dst = ds->dst;
+
+ dsa_tree_teardown(dst);
+ dsa_switch_release_ports(ds);
+ dsa_tree_put(dst);
+}
+
+void dsa_unregister_switch(struct dsa_switch *ds)
+{
+ mutex_lock(&dsa2_mutex);
+ dsa_switch_remove(ds);
+ mutex_unlock(&dsa2_mutex);
+}
+EXPORT_SYMBOL_GPL(dsa_unregister_switch);
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
new file mode 100644
index 000000000..12998bf04
--- /dev/null
+++ b/net/dsa/dsa_priv.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * net/dsa/dsa_priv.h - Hardware switch handling
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ */
+
+#ifndef __DSA_PRIV_H
+#define __DSA_PRIV_H
+
+#include <linux/if_bridge.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <linux/netpoll.h>
+#include <net/dsa.h>
+#include <net/gro_cells.h>
+
+enum {
+ DSA_NOTIFIER_AGEING_TIME,
+ DSA_NOTIFIER_BRIDGE_JOIN,
+ DSA_NOTIFIER_BRIDGE_LEAVE,
+ DSA_NOTIFIER_FDB_ADD,
+ DSA_NOTIFIER_FDB_DEL,
+ DSA_NOTIFIER_MDB_ADD,
+ DSA_NOTIFIER_MDB_DEL,
+ DSA_NOTIFIER_VLAN_ADD,
+ DSA_NOTIFIER_VLAN_DEL,
+ DSA_NOTIFIER_MTU,
+};
+
+/* DSA_NOTIFIER_AGEING_TIME */
+struct dsa_notifier_ageing_time_info {
+ struct switchdev_trans *trans;
+ unsigned int ageing_time;
+};
+
+/* DSA_NOTIFIER_BRIDGE_* */
+struct dsa_notifier_bridge_info {
+ struct net_device *br;
+ int tree_index;
+ int sw_index;
+ int port;
+};
+
+/* DSA_NOTIFIER_FDB_* */
+struct dsa_notifier_fdb_info {
+ int sw_index;
+ int port;
+ const unsigned char *addr;
+ u16 vid;
+};
+
+/* DSA_NOTIFIER_MDB_* */
+struct dsa_notifier_mdb_info {
+ const struct switchdev_obj_port_mdb *mdb;
+ struct switchdev_trans *trans;
+ int sw_index;
+ int port;
+};
+
+/* DSA_NOTIFIER_VLAN_* */
+struct dsa_notifier_vlan_info {
+ const struct switchdev_obj_port_vlan *vlan;
+ struct switchdev_trans *trans;
+ int sw_index;
+ int port;
+};
+
+/* DSA_NOTIFIER_MTU */
+struct dsa_notifier_mtu_info {
+ bool propagate_upstream;
+ int sw_index;
+ int port;
+ int mtu;
+};
+
+struct dsa_slave_priv {
+ /* Copy of CPU port xmit for faster access in slave transmit hot path */
+ struct sk_buff * (*xmit)(struct sk_buff *skb,
+ struct net_device *dev);
+
+ struct pcpu_sw_netstats __percpu *stats64;
+
+ struct gro_cells gcells;
+
+ /* DSA port data, such as switch, port index, etc. */
+ struct dsa_port *dp;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *netpoll;
+#endif
+
+ /* TC context */
+ struct list_head mall_tc_list;
+};
+
+/* dsa.c */
+const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
+void dsa_tag_driver_put(const struct dsa_device_ops *ops);
+
+bool dsa_schedule_work(struct work_struct *work);
+const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
+
+int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid,
+ u16 flags,
+ struct netlink_ext_ack *extack);
+int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid);
+
+/* master.c */
+int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
+void dsa_master_teardown(struct net_device *dev);
+
+static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
+ int device, int port)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dp->ds->index == device && dp->index == port &&
+ dp->type == DSA_PORT_TYPE_USER)
+ return dp->slave;
+
+ return NULL;
+}
+
+/* port.c */
+int dsa_port_set_state(struct dsa_port *dp, u8 state,
+ struct switchdev_trans *trans);
+int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
+int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
+void dsa_port_disable_rt(struct dsa_port *dp);
+void dsa_port_disable(struct dsa_port *dp);
+int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
+void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
+int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
+ struct switchdev_trans *trans);
+bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
+int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock,
+ struct switchdev_trans *trans);
+int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
+ bool propagate_upstream);
+int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid);
+int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid);
+int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
+int dsa_port_mdb_add(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans);
+int dsa_port_mdb_del(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb);
+int dsa_port_pre_bridge_flags(const struct dsa_port *dp, unsigned long flags,
+ struct switchdev_trans *trans);
+int dsa_port_bridge_flags(const struct dsa_port *dp, unsigned long flags,
+ struct switchdev_trans *trans);
+int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
+ struct switchdev_trans *trans);
+int dsa_port_vlan_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans);
+int dsa_port_vlan_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan);
+int dsa_port_link_register_of(struct dsa_port *dp);
+void dsa_port_link_unregister_of(struct dsa_port *dp);
+extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
+
+/* slave.c */
+extern const struct dsa_device_ops notag_netdev_ops;
+void dsa_slave_mii_bus_init(struct dsa_switch *ds);
+int dsa_slave_create(struct dsa_port *dp);
+void dsa_slave_destroy(struct net_device *slave_dev);
+bool dsa_slave_dev_check(const struct net_device *dev);
+int dsa_slave_suspend(struct net_device *slave_dev);
+int dsa_slave_resume(struct net_device *slave_dev);
+int dsa_slave_register_notifier(void);
+void dsa_slave_unregister_notifier(void);
+
+static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+
+ return p->dp;
+}
+
+static inline struct net_device *
+dsa_slave_to_master(const struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ return dp->cpu_dp->master;
+}
+
+/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
+ * frames as untagged, since the bridge will not untag them.
+ */
+static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
+{
+ struct dsa_port *dp = dsa_slave_to_port(skb->dev);
+ struct net_device *br = dp->bridge_dev;
+ struct net_device *dev = skb->dev;
+ struct net_device *upper_dev;
+ u16 vid, pvid, proto;
+ int err;
+
+ if (!br || br_vlan_enabled(br))
+ return skb;
+
+ err = br_vlan_get_proto(br, &proto);
+ if (err)
+ return skb;
+
+ /* Move VLAN tag from data to hwaccel */
+ if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
+ skb = skb_vlan_untag(skb);
+ if (!skb)
+ return NULL;
+ }
+
+ if (!skb_vlan_tag_present(skb))
+ return skb;
+
+ vid = skb_vlan_tag_get_id(skb);
+
+ /* We already run under an RCU read-side critical section since
+ * we are called from netif_receive_skb_list_internal().
+ */
+ err = br_vlan_get_pvid_rcu(dev, &pvid);
+ if (err)
+ return skb;
+
+ if (vid != pvid)
+ return skb;
+
+ /* The sad part about attempting to untag from DSA is that we
+ * don't know, unless we check, if the skb will end up in
+ * the bridge's data path - br_allowed_ingress() - or not.
+ * For example, there might be an 8021q upper for the
+ * default_pvid of the bridge, which will steal VLAN-tagged traffic
+ * from the bridge's data path. This is a configuration that DSA
+ * supports because vlan_filtering is 0. In that case, we should
+ * definitely keep the tag, to make sure it keeps working.
+ */
+ upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
+ if (upper_dev)
+ return skb;
+
+ __vlan_hwaccel_clear_tag(skb);
+
+ return skb;
+}
+
+/* switch.c */
+int dsa_switch_register_notifier(struct dsa_switch *ds);
+void dsa_switch_unregister_notifier(struct dsa_switch *ds);
+
+/* dsa2.c */
+extern struct list_head dsa_tree_list;
+
+#endif
diff --git a/net/dsa/master.c b/net/dsa/master.c
new file mode 100644
index 000000000..45bd627b4
--- /dev/null
+++ b/net/dsa/master.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Handling of a master device, switching frames via its switch fabric CPU port
+ *
+ * Copyright (c) 2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ */
+
+#include "dsa_priv.h"
+
+static int dsa_master_get_regs_len(struct net_device *dev)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
+ struct dsa_switch *ds = cpu_dp->ds;
+ int port = cpu_dp->index;
+ int ret = 0;
+ int len;
+
+ if (ops->get_regs_len) {
+ len = ops->get_regs_len(dev);
+ if (len < 0)
+ return len;
+ ret += len;
+ }
+
+ ret += sizeof(struct ethtool_drvinfo);
+ ret += sizeof(struct ethtool_regs);
+
+ if (ds->ops->get_regs_len) {
+ len = ds->ops->get_regs_len(ds, port);
+ if (len < 0)
+ return len;
+ ret += len;
+ }
+
+ return ret;
+}
+
+static void dsa_master_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
+ struct dsa_switch *ds = cpu_dp->ds;
+ struct ethtool_drvinfo *cpu_info;
+ struct ethtool_regs *cpu_regs;
+ int port = cpu_dp->index;
+ int len;
+
+ if (ops->get_regs_len && ops->get_regs) {
+ len = ops->get_regs_len(dev);
+ if (len < 0)
+ return;
+ regs->len = len;
+ ops->get_regs(dev, regs, data);
+ data += regs->len;
+ }
+
+ cpu_info = (struct ethtool_drvinfo *)data;
+ strlcpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver));
+ data += sizeof(*cpu_info);
+ cpu_regs = (struct ethtool_regs *)data;
+ data += sizeof(*cpu_regs);
+
+ if (ds->ops->get_regs_len && ds->ops->get_regs) {
+ len = ds->ops->get_regs_len(ds, port);
+ if (len < 0)
+ return;
+ cpu_regs->len = len;
+ ds->ops->get_regs(ds, port, cpu_regs, data);
+ }
+}
+
+static void dsa_master_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
+ struct dsa_switch *ds = cpu_dp->ds;
+ int port = cpu_dp->index;
+ int count = 0;
+
+ if (ops->get_sset_count && ops->get_ethtool_stats) {
+ count = ops->get_sset_count(dev, ETH_SS_STATS);
+ ops->get_ethtool_stats(dev, stats, data);
+ }
+
+ if (ds->ops->get_ethtool_stats)
+ ds->ops->get_ethtool_stats(ds, port, data + count);
+}
+
+static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
+ struct dsa_switch *ds = cpu_dp->ds;
+ int port = cpu_dp->index;
+ int count = 0;
+
+ if (dev->phydev && !ops->get_ethtool_phy_stats) {
+ count = phy_ethtool_get_sset_count(dev->phydev);
+ if (count >= 0)
+ phy_ethtool_get_stats(dev->phydev, stats, data);
+ } else if (ops->get_sset_count && ops->get_ethtool_phy_stats) {
+ count = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
+ ops->get_ethtool_phy_stats(dev, stats, data);
+ }
+
+ if (count < 0)
+ count = 0;
+
+ if (ds->ops->get_ethtool_phy_stats)
+ ds->ops->get_ethtool_phy_stats(ds, port, data + count);
+}
+
+static int dsa_master_get_sset_count(struct net_device *dev, int sset)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
+ struct dsa_switch *ds = cpu_dp->ds;
+ int count = 0;
+
+ if (sset == ETH_SS_PHY_STATS && dev->phydev &&
+ !ops->get_ethtool_phy_stats)
+ count = phy_ethtool_get_sset_count(dev->phydev);
+ else if (ops->get_sset_count)
+ count = ops->get_sset_count(dev, sset);
+
+ if (count < 0)
+ count = 0;
+
+ if (ds->ops->get_sset_count)
+ count += ds->ops->get_sset_count(ds, cpu_dp->index, sset);
+
+ return count;
+}
+
+static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
+ uint8_t *data)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
+ struct dsa_switch *ds = cpu_dp->ds;
+ int port = cpu_dp->index;
+ int len = ETH_GSTRING_LEN;
+ int mcount = 0, count, i;
+ uint8_t pfx[4];
+ uint8_t *ndata;
+
+ snprintf(pfx, sizeof(pfx), "p%.2d", port);
+ /* We do not want to be NULL-terminated, since this is a prefix */
+ pfx[sizeof(pfx) - 1] = '_';
+
+ if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
+ !ops->get_ethtool_phy_stats) {
+ mcount = phy_ethtool_get_sset_count(dev->phydev);
+ if (mcount < 0)
+ mcount = 0;
+ else
+ phy_ethtool_get_strings(dev->phydev, data);
+ } else if (ops->get_sset_count && ops->get_strings) {
+ mcount = ops->get_sset_count(dev, stringset);
+ if (mcount < 0)
+ mcount = 0;
+ ops->get_strings(dev, stringset, data);
+ }
+
+ if (ds->ops->get_strings) {
+ ndata = data + mcount * len;
+ /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
+ * the output after to prepend our CPU port prefix we
+ * constructed earlier
+ */
+ ds->ops->get_strings(ds, port, stringset, ndata);
+ count = ds->ops->get_sset_count(ds, port, stringset);
+ if (count < 0)
+ return;
+ for (i = 0; i < count; i++) {
+ memmove(ndata + (i * len + sizeof(pfx)),
+ ndata + i * len, len - sizeof(pfx));
+ memcpy(ndata + i * len, pfx, sizeof(pfx));
+ }
+ }
+}
+
+static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct dsa_switch *ds = cpu_dp->ds;
+ struct dsa_switch_tree *dst;
+ int err = -EOPNOTSUPP;
+ struct dsa_port *dp;
+
+ dst = ds->dst;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ case SIOCSHWTSTAMP:
+ /* Deny PTP operations on master if there is at least one
+ * switch in the tree that is PTP capable.
+ */
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dp->ds->ops->port_hwtstamp_get ||
+ dp->ds->ops->port_hwtstamp_set)
+ return -EBUSY;
+ break;
+ }
+
+ if (dev->netdev_ops->ndo_do_ioctl)
+ err = dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+
+ return err;
+}
+
+static const struct dsa_netdevice_ops dsa_netdev_ops = {
+ .ndo_do_ioctl = dsa_master_ioctl,
+};
+
+static int dsa_master_ethtool_setup(struct net_device *dev)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct dsa_switch *ds = cpu_dp->ds;
+ struct ethtool_ops *ops;
+
+ ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ cpu_dp->orig_ethtool_ops = dev->ethtool_ops;
+ if (cpu_dp->orig_ethtool_ops)
+ memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops));
+
+ ops->get_regs_len = dsa_master_get_regs_len;
+ ops->get_regs = dsa_master_get_regs;
+ ops->get_sset_count = dsa_master_get_sset_count;
+ ops->get_ethtool_stats = dsa_master_get_ethtool_stats;
+ ops->get_strings = dsa_master_get_strings;
+ ops->get_ethtool_phy_stats = dsa_master_get_ethtool_phy_stats;
+
+ dev->ethtool_ops = ops;
+
+ return 0;
+}
+
+static void dsa_master_ethtool_teardown(struct net_device *dev)
+{
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+
+ dev->ethtool_ops = cpu_dp->orig_ethtool_ops;
+ cpu_dp->orig_ethtool_ops = NULL;
+}
+
+static void dsa_netdev_ops_set(struct net_device *dev,
+ const struct dsa_netdevice_ops *ops)
+{
+ dev->dsa_ptr->netdev_ops = ops;
+}
+
+static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
+{
+ const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
+
+ if (!ops->promisc_on_master)
+ return;
+
+ rtnl_lock();
+ dev_set_promiscuity(dev, inc);
+ rtnl_unlock();
+}
+
+static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *dev = to_net_dev(d);
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+
+ return sprintf(buf, "%s\n",
+ dsa_tag_protocol_to_str(cpu_dp->tag_ops));
+}
+static DEVICE_ATTR_RO(tagging);
+
+static struct attribute *dsa_slave_attrs[] = {
+ &dev_attr_tagging.attr,
+ NULL
+};
+
+static const struct attribute_group dsa_group = {
+ .name = "dsa",
+ .attrs = dsa_slave_attrs,
+};
+
+static void dsa_master_reset_mtu(struct net_device *dev)
+{
+ int err;
+
+ rtnl_lock();
+ err = dev_set_mtu(dev, ETH_DATA_LEN);
+ if (err)
+ netdev_dbg(dev,
+ "Unable to reset MTU to exclude DSA overheads\n");
+ rtnl_unlock();
+}
+
+static struct lock_class_key dsa_master_addr_list_lock_key;
+
+int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
+{
+ struct dsa_switch *ds = cpu_dp->ds;
+ struct device_link *consumer_link;
+ int ret;
+
+ /* The DSA master must use SET_NETDEV_DEV for this to work. */
+ consumer_link = device_link_add(ds->dev, dev->dev.parent,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!consumer_link)
+ netdev_err(dev,
+ "Failed to create a device link to DSA switch %s\n",
+ dev_name(ds->dev));
+
+ rtnl_lock();
+ ret = dev_set_mtu(dev, ETH_DATA_LEN + cpu_dp->tag_ops->overhead);
+ rtnl_unlock();
+ if (ret)
+ netdev_warn(dev, "error %d setting MTU to include DSA overhead\n",
+ ret);
+
+ /* If we use a tagging format that doesn't have an ethertype
+ * field, make sure that all packets from this point on get
+ * sent to the tag format's receive function.
+ */
+ wmb();
+
+ dev->dsa_ptr = cpu_dp;
+ lockdep_set_class(&dev->addr_list_lock,
+ &dsa_master_addr_list_lock_key);
+
+ dsa_master_set_promiscuity(dev, 1);
+
+ ret = dsa_master_ethtool_setup(dev);
+ if (ret)
+ goto out_err_reset_promisc;
+
+ dsa_netdev_ops_set(dev, &dsa_netdev_ops);
+
+ ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
+ if (ret)
+ goto out_err_ndo_teardown;
+
+ return ret;
+
+out_err_ndo_teardown:
+ dsa_netdev_ops_set(dev, NULL);
+ dsa_master_ethtool_teardown(dev);
+out_err_reset_promisc:
+ dsa_master_set_promiscuity(dev, -1);
+ return ret;
+}
+
+void dsa_master_teardown(struct net_device *dev)
+{
+ sysfs_remove_group(&dev->dev.kobj, &dsa_group);
+ dsa_netdev_ops_set(dev, NULL);
+ dsa_master_ethtool_teardown(dev);
+ dsa_master_reset_mtu(dev);
+ dsa_master_set_promiscuity(dev, -1);
+
+ dev->dsa_ptr = NULL;
+
+ /* If we used a tagging format that doesn't have an ethertype
+ * field, make sure that all packets from this point get sent
+ * without the tag and go through the regular receive path.
+ */
+ wmb();
+}
diff --git a/net/dsa/port.c b/net/dsa/port.c
new file mode 100644
index 000000000..c9d552c4c
--- /dev/null
+++ b/net/dsa/port.c
@@ -0,0 +1,814 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Handling of a single switch port
+ *
+ * Copyright (c) 2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ */
+
+#include <linux/if_bridge.h>
+#include <linux/notifier.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "dsa_priv.h"
+
+static int dsa_broadcast(unsigned long e, void *v)
+{
+ struct dsa_switch_tree *dst;
+ int err = 0;
+
+ list_for_each_entry(dst, &dsa_tree_list, list) {
+ struct raw_notifier_head *nh = &dst->nh;
+
+ err = raw_notifier_call_chain(nh, e, v);
+ err = notifier_to_errno(err);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
+{
+ struct raw_notifier_head *nh = &dp->ds->dst->nh;
+ int err;
+
+ err = raw_notifier_call_chain(nh, e, v);
+
+ return notifier_to_errno(err);
+}
+
+int dsa_port_set_state(struct dsa_port *dp, u8 state,
+ struct switchdev_trans *trans)
+{
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP;
+
+ if (ds->ops->port_stp_state_set)
+ ds->ops->port_stp_state_set(ds, port, state);
+
+ if (ds->ops->port_fast_age) {
+ /* Fast age FDB entries or flush appropriate forwarding database
+ * for the given port, if we are moving it from Learning or
+ * Forwarding state, to Disabled or Blocking or Listening state.
+ */
+
+ if ((dp->stp_state == BR_STATE_LEARNING ||
+ dp->stp_state == BR_STATE_FORWARDING) &&
+ (state == BR_STATE_DISABLED ||
+ state == BR_STATE_BLOCKING ||
+ state == BR_STATE_LISTENING))
+ ds->ops->port_fast_age(ds, port);
+ }
+
+ dp->stp_state = state;
+
+ return 0;
+}
+
+static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
+{
+ int err;
+
+ err = dsa_port_set_state(dp, state, NULL);
+ if (err)
+ pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
+}
+
+int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
+{
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+ int err;
+
+ if (ds->ops->port_enable) {
+ err = ds->ops->port_enable(ds, port, phy);
+ if (err)
+ return err;
+ }
+
+ if (!dp->bridge_dev)
+ dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
+
+ if (dp->pl)
+ phylink_start(dp->pl);
+
+ return 0;
+}
+
+int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
+{
+ int err;
+
+ rtnl_lock();
+ err = dsa_port_enable_rt(dp, phy);
+ rtnl_unlock();
+
+ return err;
+}
+
+void dsa_port_disable_rt(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (dp->pl)
+ phylink_stop(dp->pl);
+
+ if (!dp->bridge_dev)
+ dsa_port_set_state_now(dp, BR_STATE_DISABLED);
+
+ if (ds->ops->port_disable)
+ ds->ops->port_disable(ds, port);
+}
+
+void dsa_port_disable(struct dsa_port *dp)
+{
+ rtnl_lock();
+ dsa_port_disable_rt(dp);
+ rtnl_unlock();
+}
+
+int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
+{
+ struct dsa_notifier_bridge_info info = {
+ .tree_index = dp->ds->dst->index,
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .br = br,
+ };
+ int err;
+
+ /* Set the flooding mode before joining the port in the switch */
+ err = dsa_port_bridge_flags(dp, BR_FLOOD | BR_MCAST_FLOOD, NULL);
+ if (err)
+ return err;
+
+ /* Here the interface is already bridged. Reflect the current
+ * configuration so that drivers can program their chips accordingly.
+ */
+ dp->bridge_dev = br;
+
+ err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
+
+ /* The bridging is rolled back on error */
+ if (err) {
+ dsa_port_bridge_flags(dp, 0, NULL);
+ dp->bridge_dev = NULL;
+ }
+
+ return err;
+}
+
+void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
+{
+ struct dsa_notifier_bridge_info info = {
+ .tree_index = dp->ds->dst->index,
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .br = br,
+ };
+ int err;
+
+ /* Here the port is already unbridged. Reflect the current configuration
+ * so that drivers can program their chips accordingly.
+ */
+ dp->bridge_dev = NULL;
+
+ err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
+ if (err)
+ pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
+
+ /* Port is leaving the bridge, disable flooding */
+ dsa_port_bridge_flags(dp, 0, NULL);
+
+ /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
+ * so allow it to be in BR_STATE_FORWARDING to be kept functional
+ */
+ dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
+}
+
+/* Must be called under rcu_read_lock() */
+static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
+ bool vlan_filtering)
+{
+ struct dsa_switch *ds = dp->ds;
+ int err, i;
+
+ /* VLAN awareness was off, so the question is "can we turn it on".
+ * We may have had 8021q uppers, those need to go. Make sure we don't
+ * enter an inconsistent state: deny changing the VLAN awareness state
+ * as long as we have 8021q uppers.
+ */
+ if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
+ struct net_device *upper_dev, *slave = dp->slave;
+ struct net_device *br = dp->bridge_dev;
+ struct list_head *iter;
+
+ netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
+ struct bridge_vlan_info br_info;
+ u16 vid;
+
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ vid = vlan_dev_vlan_id(upper_dev);
+
+ /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
+ * device, respectively the VID is not found, returning
+ * 0 means success, which is a failure for us here.
+ */
+ err = br_vlan_get_info(br, vid, &br_info);
+ if (err == 0) {
+ dev_err(ds->dev, "Must remove upper %s first\n",
+ upper_dev->name);
+ return false;
+ }
+ }
+ }
+
+ if (!ds->vlan_filtering_is_global)
+ return true;
+
+ /* For cases where enabling/disabling VLAN awareness is global to the
+ * switch, we need to handle the case where multiple bridges span
+ * different ports of the same switch device and one of them has a
+ * different setting than what is being requested.
+ */
+ for (i = 0; i < ds->num_ports; i++) {
+ struct net_device *other_bridge;
+
+ other_bridge = dsa_to_port(ds, i)->bridge_dev;
+ if (!other_bridge)
+ continue;
+ /* If it's the same bridge, it also has same
+ * vlan_filtering setting => no need to check
+ */
+ if (other_bridge == dp->bridge_dev)
+ continue;
+ if (br_vlan_enabled(other_bridge) != vlan_filtering) {
+ dev_err(ds->dev, "VLAN filtering is a global setting\n");
+ return false;
+ }
+ }
+ return true;
+}
+
+int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
+ struct switchdev_trans *trans)
+{
+ struct dsa_switch *ds = dp->ds;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ bool apply;
+
+ if (!ds->ops->port_vlan_filtering)
+ return -EOPNOTSUPP;
+
+ /* We are called from dsa_slave_switchdev_blocking_event(),
+ * which is not under rcu_read_lock(), unlike
+ * dsa_slave_switchdev_event().
+ */
+ rcu_read_lock();
+ apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering);
+ rcu_read_unlock();
+ if (!apply)
+ return -EINVAL;
+ }
+
+ if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
+ return 0;
+
+ err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
+ trans);
+ if (err)
+ return err;
+
+ if (switchdev_trans_ph_commit(trans)) {
+ if (ds->vlan_filtering_is_global)
+ ds->vlan_filtering = vlan_filtering;
+ else
+ dp->vlan_filtering = vlan_filtering;
+ }
+
+ return 0;
+}
+
+/* This enforces legacy behavior for switch drivers which assume they can't
+ * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
+ */
+bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ if (!dp->bridge_dev)
+ return false;
+
+ return (!ds->configure_vlan_while_not_filtering &&
+ !br_vlan_enabled(dp->bridge_dev));
+}
+
+int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock,
+ struct switchdev_trans *trans)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
+ unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
+ struct dsa_notifier_ageing_time_info info = {
+ .ageing_time = ageing_time,
+ .trans = trans,
+ };
+
+ if (switchdev_trans_ph_prepare(trans))
+ return dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
+
+ dp->ageing_time = ageing_time;
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
+}
+
+int dsa_port_pre_bridge_flags(const struct dsa_port *dp, unsigned long flags,
+ struct switchdev_trans *trans)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->port_egress_floods ||
+ (flags & ~(BR_FLOOD | BR_MCAST_FLOOD)))
+ return -EINVAL;
+
+ return 0;
+}
+
+int dsa_port_bridge_flags(const struct dsa_port *dp, unsigned long flags,
+ struct switchdev_trans *trans)
+{
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+ int err = 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (ds->ops->port_egress_floods)
+ err = ds->ops->port_egress_floods(ds, port, flags & BR_FLOOD,
+ flags & BR_MCAST_FLOOD);
+
+ return err;
+}
+
+int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
+ struct switchdev_trans *trans)
+{
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return ds->ops->port_egress_floods ? 0 : -EOPNOTSUPP;
+
+ return ds->ops->port_egress_floods(ds, port, true, mrouter);
+}
+
+int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
+ bool propagate_upstream)
+{
+ struct dsa_notifier_mtu_info info = {
+ .sw_index = dp->ds->index,
+ .propagate_upstream = propagate_upstream,
+ .port = dp->index,
+ .mtu = new_mtu,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
+}
+
+int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid)
+{
+ struct dsa_notifier_fdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .addr = addr,
+ .vid = vid,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
+}
+
+int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
+ u16 vid)
+{
+ struct dsa_notifier_fdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .addr = addr,
+ .vid = vid,
+
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
+}
+
+int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->port_fdb_dump)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_fdb_dump(ds, port, cb, data);
+}
+
+int dsa_port_mdb_add(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ struct dsa_notifier_mdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .trans = trans,
+ .mdb = mdb,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
+}
+
+int dsa_port_mdb_del(const struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct dsa_notifier_mdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .mdb = mdb,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
+}
+
+int dsa_port_vlan_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct dsa_notifier_vlan_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .trans = trans,
+ .vlan = vlan,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
+}
+
+int dsa_port_vlan_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct dsa_notifier_vlan_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .vlan = vlan,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
+}
+
+static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
+{
+ struct device_node *phy_dn;
+ struct phy_device *phydev;
+
+ phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
+ if (!phy_dn)
+ return NULL;
+
+ phydev = of_phy_find_device(phy_dn);
+ if (!phydev) {
+ of_node_put(phy_dn);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ of_node_put(phy_dn);
+ return phydev;
+}
+
+static void dsa_port_phylink_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->phylink_validate)
+ return;
+
+ ds->ops->phylink_validate(ds, dp->index, supported, state);
+}
+
+static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
+ struct dsa_switch *ds = dp->ds;
+ int err;
+
+ /* Only called for inband modes */
+ if (!ds->ops->phylink_mac_link_state) {
+ state->link = 0;
+ return;
+ }
+
+ err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
+ if (err < 0) {
+ dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
+ dp->index, err);
+ state->link = 0;
+ }
+}
+
+static void dsa_port_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->phylink_mac_config)
+ return;
+
+ ds->ops->phylink_mac_config(ds, dp->index, mode, state);
+}
+
+static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
+{
+ struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->phylink_mac_an_restart)
+ return;
+
+ ds->ops->phylink_mac_an_restart(ds, dp->index);
+}
+
+static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
+ struct phy_device *phydev = NULL;
+ struct dsa_switch *ds = dp->ds;
+
+ if (dsa_is_user_port(ds, dp->index))
+ phydev = dp->slave->phydev;
+
+ if (!ds->ops->phylink_mac_link_down) {
+ if (ds->ops->adjust_link && phydev)
+ ds->ops->adjust_link(ds, dp->index, phydev);
+ return;
+ }
+
+ ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
+}
+
+static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->phylink_mac_link_up) {
+ if (ds->ops->adjust_link && phydev)
+ ds->ops->adjust_link(ds, dp->index, phydev);
+ return;
+ }
+
+ ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
+ speed, duplex, tx_pause, rx_pause);
+}
+
+const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
+ .validate = dsa_port_phylink_validate,
+ .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
+ .mac_config = dsa_port_phylink_mac_config,
+ .mac_an_restart = dsa_port_phylink_mac_an_restart,
+ .mac_link_down = dsa_port_phylink_mac_link_down,
+ .mac_link_up = dsa_port_phylink_mac_link_up,
+};
+
+static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
+{
+ struct dsa_switch *ds = dp->ds;
+ struct phy_device *phydev;
+ int port = dp->index;
+ int err = 0;
+
+ phydev = dsa_port_get_phy_device(dp);
+ if (!phydev)
+ return 0;
+
+ if (IS_ERR(phydev))
+ return PTR_ERR(phydev);
+
+ if (enable) {
+ err = genphy_resume(phydev);
+ if (err < 0)
+ goto err_put_dev;
+
+ err = genphy_read_status(phydev);
+ if (err < 0)
+ goto err_put_dev;
+ } else {
+ err = genphy_suspend(phydev);
+ if (err < 0)
+ goto err_put_dev;
+ }
+
+ if (ds->ops->adjust_link)
+ ds->ops->adjust_link(ds, port, phydev);
+
+ dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
+
+err_put_dev:
+ put_device(&phydev->mdio.dev);
+ return err;
+}
+
+static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
+{
+ struct device_node *dn = dp->dn;
+ struct dsa_switch *ds = dp->ds;
+ struct phy_device *phydev;
+ int port = dp->index;
+ phy_interface_t mode;
+ int err;
+
+ err = of_phy_register_fixed_link(dn);
+ if (err) {
+ dev_err(ds->dev,
+ "failed to register the fixed PHY of port %d\n",
+ port);
+ return err;
+ }
+
+ phydev = of_phy_find_device(dn);
+
+ err = of_get_phy_mode(dn, &mode);
+ if (err)
+ mode = PHY_INTERFACE_MODE_NA;
+ phydev->interface = mode;
+
+ genphy_read_status(phydev);
+
+ if (ds->ops->adjust_link)
+ ds->ops->adjust_link(ds, port, phydev);
+
+ put_device(&phydev->mdio.dev);
+
+ return 0;
+}
+
+static int dsa_port_phylink_register(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+ struct device_node *port_dn = dp->dn;
+ phy_interface_t mode;
+ int err;
+
+ err = of_get_phy_mode(port_dn, &mode);
+ if (err)
+ mode = PHY_INTERFACE_MODE_NA;
+
+ dp->pl_config.dev = ds->dev;
+ dp->pl_config.type = PHYLINK_DEV;
+ dp->pl_config.pcs_poll = ds->pcs_poll;
+
+ dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
+ mode, &dsa_port_phylink_mac_ops);
+ if (IS_ERR(dp->pl)) {
+ pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
+ return PTR_ERR(dp->pl);
+ }
+
+ err = phylink_of_phy_connect(dp->pl, port_dn, 0);
+ if (err && err != -ENODEV) {
+ pr_err("could not attach to PHY: %d\n", err);
+ goto err_phy_connect;
+ }
+
+ return 0;
+
+err_phy_connect:
+ phylink_destroy(dp->pl);
+ return err;
+}
+
+int dsa_port_link_register_of(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+ struct device_node *phy_np;
+ int port = dp->index;
+
+ if (!ds->ops->adjust_link) {
+ phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
+ if (of_phy_is_fixed_link(dp->dn) || phy_np) {
+ if (ds->ops->phylink_mac_link_down)
+ ds->ops->phylink_mac_link_down(ds, port,
+ MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
+ of_node_put(phy_np);
+ return dsa_port_phylink_register(dp);
+ }
+ of_node_put(phy_np);
+ return 0;
+ }
+
+ dev_warn(ds->dev,
+ "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
+
+ if (of_phy_is_fixed_link(dp->dn))
+ return dsa_port_fixed_link_register_of(dp);
+ else
+ return dsa_port_setup_phy_of(dp, true);
+}
+
+void dsa_port_link_unregister_of(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->adjust_link && dp->pl) {
+ rtnl_lock();
+ phylink_disconnect_phy(dp->pl);
+ rtnl_unlock();
+ phylink_destroy(dp->pl);
+ dp->pl = NULL;
+ return;
+ }
+
+ if (of_phy_is_fixed_link(dp->dn))
+ of_phy_deregister_fixed_link(dp->dn);
+ else
+ dsa_port_setup_phy_of(dp, false);
+}
+
+int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
+{
+ struct phy_device *phydev;
+ int ret = -EOPNOTSUPP;
+
+ if (of_phy_is_fixed_link(dp->dn))
+ return ret;
+
+ phydev = dsa_port_get_phy_device(dp);
+ if (IS_ERR_OR_NULL(phydev))
+ return ret;
+
+ ret = phy_ethtool_get_strings(phydev, data);
+ put_device(&phydev->mdio.dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
+
+int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
+{
+ struct phy_device *phydev;
+ int ret = -EOPNOTSUPP;
+
+ if (of_phy_is_fixed_link(dp->dn))
+ return ret;
+
+ phydev = dsa_port_get_phy_device(dp);
+ if (IS_ERR_OR_NULL(phydev))
+ return ret;
+
+ ret = phy_ethtool_get_stats(phydev, NULL, data);
+ put_device(&phydev->mdio.dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
+
+int dsa_port_get_phy_sset_count(struct dsa_port *dp)
+{
+ struct phy_device *phydev;
+ int ret = -EOPNOTSUPP;
+
+ if (of_phy_is_fixed_link(dp->dn))
+ return ret;
+
+ phydev = dsa_port_get_phy_device(dp);
+ if (IS_ERR_OR_NULL(phydev))
+ return ret;
+
+ ret = phy_ethtool_get_sset_count(phydev);
+ put_device(&phydev->mdio.dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
new file mode 100644
index 000000000..65b125bb3
--- /dev/null
+++ b/net/dsa/slave.c
@@ -0,0 +1,2251 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * net/dsa/slave.c - Slave device handling
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ */
+
+#include <linux/list.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/mdio.h>
+#include <net/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_mirred.h>
+#include <linux/if_bridge.h>
+#include <linux/netpoll.h>
+#include <linux/ptp_classify.h>
+
+#include "dsa_priv.h"
+
+/* slave mii_bus handling ***************************************************/
+static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
+{
+ struct dsa_switch *ds = bus->priv;
+
+ if (ds->phys_mii_mask & (1 << addr))
+ return ds->ops->phy_read(ds, addr, reg);
+
+ return 0xffff;
+}
+
+static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct dsa_switch *ds = bus->priv;
+
+ if (ds->phys_mii_mask & (1 << addr))
+ return ds->ops->phy_write(ds, addr, reg, val);
+
+ return 0;
+}
+
+void dsa_slave_mii_bus_init(struct dsa_switch *ds)
+{
+ ds->slave_mii_bus->priv = (void *)ds;
+ ds->slave_mii_bus->name = "dsa slave smi";
+ ds->slave_mii_bus->read = dsa_slave_phy_read;
+ ds->slave_mii_bus->write = dsa_slave_phy_write;
+ snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
+ ds->dst->index, ds->index);
+ ds->slave_mii_bus->parent = ds->dev;
+ ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
+}
+
+
+/* slave device handling ****************************************************/
+static int dsa_slave_get_iflink(const struct net_device *dev)
+{
+ return dsa_slave_to_master(dev)->ifindex;
+}
+
+static int dsa_slave_open(struct net_device *dev)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ int err;
+
+ if (!(master->flags & IFF_UP))
+ return -ENETDOWN;
+
+ if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
+ err = dev_uc_add(master, dev->dev_addr);
+ if (err < 0)
+ goto out;
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ err = dev_set_allmulti(master, 1);
+ if (err < 0)
+ goto del_unicast;
+ }
+ if (dev->flags & IFF_PROMISC) {
+ err = dev_set_promiscuity(master, 1);
+ if (err < 0)
+ goto clear_allmulti;
+ }
+
+ err = dsa_port_enable_rt(dp, dev->phydev);
+ if (err)
+ goto clear_promisc;
+
+ return 0;
+
+clear_promisc:
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(master, -1);
+clear_allmulti:
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(master, -1);
+del_unicast:
+ if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
+ dev_uc_del(master, dev->dev_addr);
+out:
+ return err;
+}
+
+static int dsa_slave_close(struct net_device *dev)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ dsa_port_disable_rt(dp);
+
+ dev_mc_unsync(master, dev);
+ dev_uc_unsync(master, dev);
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(master, -1);
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(master, -1);
+
+ if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
+ dev_uc_del(master, dev->dev_addr);
+
+ return 0;
+}
+
+static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(master,
+ dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (change & IFF_PROMISC)
+ dev_set_promiscuity(master,
+ dev->flags & IFF_PROMISC ? 1 : -1);
+ }
+}
+
+static void dsa_slave_set_rx_mode(struct net_device *dev)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+
+ dev_mc_sync(master, dev);
+ dev_uc_sync(master, dev);
+}
+
+static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct sockaddr *addr = a;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (!(dev->flags & IFF_UP))
+ goto out;
+
+ if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
+ err = dev_uc_add(master, addr->sa_data);
+ if (err < 0)
+ return err;
+ }
+
+ if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
+ dev_uc_del(master, dev->dev_addr);
+
+out:
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
+struct dsa_slave_dump_ctx {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx;
+};
+
+static int
+dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
+ bool is_static, void *data)
+{
+ struct dsa_slave_dump_ctx *dump = data;
+ u32 portid = NETLINK_CB(dump->cb->skb).portid;
+ u32 seq = dump->cb->nlh->nlmsg_seq;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ if (dump->idx < dump->cb->args[2])
+ goto skip;
+
+ nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+ sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dump->dev->ifindex;
+ ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
+
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
+ goto nla_put_failure;
+
+ if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
+ goto nla_put_failure;
+
+ nlmsg_end(dump->skb, nlh);
+
+skip:
+ dump->idx++;
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(dump->skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int
+dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev, struct net_device *filter_dev,
+ int *idx)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_slave_dump_ctx dump = {
+ .dev = dev,
+ .skb = skb,
+ .cb = cb,
+ .idx = *idx,
+ };
+ int err;
+
+ err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
+ *idx = dump.idx;
+
+ return err;
+}
+
+static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->dp->ds;
+ int port = p->dp->index;
+
+ /* Pass through to switch driver if it supports timestamping */
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ if (ds->ops->port_hwtstamp_get)
+ return ds->ops->port_hwtstamp_get(ds, port, ifr);
+ break;
+ case SIOCSHWTSTAMP:
+ if (ds->ops->port_hwtstamp_set)
+ return ds->ops->port_hwtstamp_set(ds, port, ifr);
+ break;
+ }
+
+ return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
+}
+
+static int dsa_slave_port_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ int ret;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ret = dsa_port_set_state(dp, attr->u.stp_state, trans);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
+ trans);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
+ trans);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
+ ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, trans);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+/* Must be called under rcu_read_lock() */
+static int
+dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct net_device *upper_dev;
+ struct list_head *iter;
+
+ netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
+ u16 vid;
+
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ vid = vlan_dev_vlan_id(upper_dev);
+ if (vid >= vlan->vid_begin && vid <= vlan->vid_end)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int dsa_slave_vlan_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct switchdev_obj_port_vlan vlan;
+ int vid, err;
+
+ if (obj->orig_dev != dev)
+ return -EOPNOTSUPP;
+
+ if (dsa_port_skip_vlan_configuration(dp))
+ return 0;
+
+ vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
+
+ /* Deny adding a bridge VLAN when there is already an 802.1Q upper with
+ * the same VID.
+ */
+ if (trans->ph_prepare && br_vlan_enabled(dp->bridge_dev)) {
+ rcu_read_lock();
+ err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan);
+ rcu_read_unlock();
+ if (err)
+ return err;
+ }
+
+ err = dsa_port_vlan_add(dp, &vlan, trans);
+ if (err)
+ return err;
+
+ /* We need the dedicated CPU port to be a member of the VLAN as well.
+ * Even though drivers often handle CPU membership in special ways,
+ * it doesn't make sense to program a PVID, so clear this flag.
+ */
+ vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
+
+ err = dsa_port_vlan_add(dp->cpu_dp, &vlan, trans);
+ if (err)
+ return err;
+
+ for (vid = vlan.vid_begin; vid <= vlan.vid_end; vid++) {
+ err = vlan_vid_add(master, htons(ETH_P_8021Q), vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dsa_slave_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ int err;
+
+ /* For the prepare phase, ensure the full set of changes is feasable in
+ * one go in order to signal a failure properly. If an operation is not
+ * supported, return -EOPNOTSUPP.
+ */
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ if (obj->orig_dev != dev)
+ return -EOPNOTSUPP;
+ err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
+ break;
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ /* DSA can directly translate this to a normal MDB add,
+ * but on the CPU port.
+ */
+ err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj),
+ trans);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = dsa_slave_vlan_add(dev, obj, trans);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dsa_slave_vlan_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct switchdev_obj_port_vlan *vlan;
+ int vid, err;
+
+ if (obj->orig_dev != dev)
+ return -EOPNOTSUPP;
+
+ if (dsa_port_skip_vlan_configuration(dp))
+ return 0;
+
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+
+ /* Do not deprogram the CPU port as it may be shared with other user
+ * ports which can be members of this VLAN as well.
+ */
+ err = dsa_port_vlan_del(dp, vlan);
+ if (err)
+ return err;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
+ vlan_vid_del(master, htons(ETH_P_8021Q), vid);
+
+ return 0;
+}
+
+static int dsa_slave_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ if (obj->orig_dev != dev)
+ return -EOPNOTSUPP;
+ err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ /* DSA can directly translate this to a normal MDB add,
+ * but on the CPU port.
+ */
+ err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = dsa_slave_vlan_del(dev, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dsa_slave_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ struct dsa_switch_tree *dst = ds->dst;
+
+ /* For non-legacy ports, devlink is used and it takes
+ * care of the name generation. This ndo implementation
+ * should be removed with legacy support.
+ */
+ if (dp->ds->devlink)
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(dst->index);
+ memcpy(&ppid->id, &dst->index, ppid->id_len);
+
+ return 0;
+}
+
+static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct dsa_slave_priv *p = netdev_priv(dev);
+
+ return netpoll_send_skb(p->netpoll, skb);
+#else
+ BUG();
+ return NETDEV_TX_OK;
+#endif
+}
+
+static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
+ struct sk_buff *skb)
+{
+ struct dsa_switch *ds = p->dp->ds;
+ struct sk_buff *clone;
+ unsigned int type;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return;
+
+ if (!ds->ops->port_txtstamp)
+ return;
+
+ clone = skb_clone_sk(skb);
+ if (!clone)
+ return;
+
+ DSA_SKB_CB(skb)->clone = clone;
+
+ if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type))
+ return;
+
+ kfree_skb(clone);
+}
+
+netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
+{
+ /* SKB for netpoll still need to be mangled with the protocol-specific
+ * tag to be successfully transmitted
+ */
+ if (unlikely(netpoll_tx_running(dev)))
+ return dsa_slave_netpoll_send_skb(dev, skb);
+
+ /* Queue the SKB for transmission on the parent interface, but
+ * do not modify its EtherType
+ */
+ skb->dev = dsa_slave_to_master(dev);
+ dev_queue_xmit(skb);
+
+ return NETDEV_TX_OK;
+}
+EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
+
+static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
+{
+ int needed_headroom = dev->needed_headroom;
+ int needed_tailroom = dev->needed_tailroom;
+
+ /* For tail taggers, we need to pad short frames ourselves, to ensure
+ * that the tail tag does not fail at its role of being at the end of
+ * the packet, once the master interface pads the frame. Account for
+ * that pad length here, and pad later.
+ */
+ if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
+ needed_tailroom += ETH_ZLEN - skb->len;
+ /* skb_headroom() returns unsigned int... */
+ needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
+
+ if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
+ /* No reallocation needed, yay! */
+ return 0;
+
+ return pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+}
+
+static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct pcpu_sw_netstats *s;
+ struct sk_buff *nskb;
+
+ s = this_cpu_ptr(p->stats64);
+ u64_stats_update_begin(&s->syncp);
+ s->tx_packets++;
+ s->tx_bytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ DSA_SKB_CB(skb)->clone = NULL;
+
+ /* Identify PTP protocol packets, clone them, and pass them to the
+ * switch driver
+ */
+ dsa_skb_tx_timestamp(p, skb);
+
+ if (dsa_realloc_skb(skb, dev)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* needed_tailroom should still be 'warm' in the cache line from
+ * dsa_realloc_skb(), which has also ensured that padding is safe.
+ */
+ if (dev->needed_tailroom)
+ eth_skb_pad(skb);
+
+ /* Transmit function may have to reallocate the original SKB,
+ * in which case it must have freed it. Only free it here on error.
+ */
+ nskb = p->xmit(skb, dev);
+ if (!nskb) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ return dsa_enqueue_skb(nskb, dev);
+}
+
+/* ethtool operations *******************************************************/
+
+static void dsa_slave_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
+}
+
+static int dsa_slave_get_regs_len(struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_regs_len)
+ return ds->ops->get_regs_len(ds, dp->index);
+
+ return -EOPNOTSUPP;
+}
+
+static void
+dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_regs)
+ ds->ops->get_regs(ds, dp->index, regs, _p);
+}
+
+static int dsa_slave_nway_reset(struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ return phylink_ethtool_nway_reset(dp->pl);
+}
+
+static int dsa_slave_get_eeprom_len(struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->cd && ds->cd->eeprom_len)
+ return ds->cd->eeprom_len;
+
+ if (ds->ops->get_eeprom_len)
+ return ds->ops->get_eeprom_len(ds);
+
+ return 0;
+}
+
+static int dsa_slave_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_eeprom)
+ return ds->ops->get_eeprom(ds, eeprom, data);
+
+ return -EOPNOTSUPP;
+}
+
+static int dsa_slave_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->set_eeprom)
+ return ds->ops->set_eeprom(ds, eeprom, data);
+
+ return -EOPNOTSUPP;
+}
+
+static void dsa_slave_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (stringset == ETH_SS_STATS) {
+ int len = ETH_GSTRING_LEN;
+
+ strncpy(data, "tx_packets", len);
+ strncpy(data + len, "tx_bytes", len);
+ strncpy(data + 2 * len, "rx_packets", len);
+ strncpy(data + 3 * len, "rx_bytes", len);
+ if (ds->ops->get_strings)
+ ds->ops->get_strings(ds, dp->index, stringset,
+ data + 4 * len);
+ }
+}
+
+static void dsa_slave_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = dp->ds;
+ struct pcpu_sw_netstats *s;
+ unsigned int start;
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+
+ s = per_cpu_ptr(p->stats64, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&s->syncp);
+ tx_packets = s->tx_packets;
+ tx_bytes = s->tx_bytes;
+ rx_packets = s->rx_packets;
+ rx_bytes = s->rx_bytes;
+ } while (u64_stats_fetch_retry_irq(&s->syncp, start));
+ data[0] += tx_packets;
+ data[1] += tx_bytes;
+ data[2] += rx_packets;
+ data[3] += rx_bytes;
+ }
+ if (ds->ops->get_ethtool_stats)
+ ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
+}
+
+static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (sset == ETH_SS_STATS) {
+ int count = 0;
+
+ if (ds->ops->get_sset_count) {
+ count = ds->ops->get_sset_count(ds, dp->index, sset);
+ if (count < 0)
+ return count;
+ }
+
+ return count + 4;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ phylink_ethtool_get_wol(dp->pl, w);
+
+ if (ds->ops->get_wol)
+ ds->ops->get_wol(ds, dp->index, w);
+}
+
+static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int ret = -EOPNOTSUPP;
+
+ phylink_ethtool_set_wol(dp->pl, w);
+
+ if (ds->ops->set_wol)
+ ret = ds->ops->set_wol(ds, dp->index, w);
+
+ return ret;
+}
+
+static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int ret;
+
+ /* Port's PHY and MAC both need to be EEE capable */
+ if (!dev->phydev || !dp->pl)
+ return -ENODEV;
+
+ if (!ds->ops->set_mac_eee)
+ return -EOPNOTSUPP;
+
+ ret = ds->ops->set_mac_eee(ds, dp->index, e);
+ if (ret)
+ return ret;
+
+ return phylink_ethtool_set_eee(dp->pl, e);
+}
+
+static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int ret;
+
+ /* Port's PHY and MAC both need to be EEE capable */
+ if (!dev->phydev || !dp->pl)
+ return -ENODEV;
+
+ if (!ds->ops->get_mac_eee)
+ return -EOPNOTSUPP;
+
+ ret = ds->ops->get_mac_eee(ds, dp->index, e);
+ if (ret)
+ return ret;
+
+ return phylink_ethtool_get_eee(dp->pl, e);
+}
+
+static int dsa_slave_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ return phylink_ethtool_ksettings_get(dp->pl, cmd);
+}
+
+static int dsa_slave_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ return phylink_ethtool_ksettings_set(dp->pl, cmd);
+}
+
+static void dsa_slave_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ phylink_ethtool_get_pauseparam(dp->pl, pause);
+}
+
+static int dsa_slave_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ return phylink_ethtool_set_pauseparam(dp->pl, pause);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static int dsa_slave_netpoll_setup(struct net_device *dev,
+ struct netpoll_info *ni)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct netpoll *netpoll;
+ int err = 0;
+
+ netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+ if (!netpoll)
+ return -ENOMEM;
+
+ err = __netpoll_setup(netpoll, master);
+ if (err) {
+ kfree(netpoll);
+ goto out;
+ }
+
+ p->netpoll = netpoll;
+out:
+ return err;
+}
+
+static void dsa_slave_netpoll_cleanup(struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct netpoll *netpoll = p->netpoll;
+
+ if (!netpoll)
+ return;
+
+ p->netpoll = NULL;
+
+ __netpoll_free(netpoll);
+}
+
+static void dsa_slave_poll_controller(struct net_device *dev)
+{
+}
+#endif
+
+static int dsa_slave_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ /* For non-legacy ports, devlink is used and it takes
+ * care of the name generation. This ndo implementation
+ * should be removed with legacy support.
+ */
+ if (dp->ds->devlink)
+ return -EOPNOTSUPP;
+
+ if (snprintf(name, len, "p%d", dp->index) >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct dsa_mall_tc_entry *
+dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_mall_tc_entry *mall_tc_entry;
+
+ list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
+ if (mall_tc_entry->cookie == cookie)
+ return mall_tc_entry;
+
+ return NULL;
+}
+
+static int
+dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_mall_mirror_tc_entry *mirror;
+ struct dsa_mall_tc_entry *mall_tc_entry;
+ struct dsa_switch *ds = dp->ds;
+ struct flow_action_entry *act;
+ struct dsa_port *to_dp;
+ int err;
+
+ if (!ds->ops->port_mirror_add)
+ return -EOPNOTSUPP;
+
+ if (!flow_action_basic_hw_stats_check(&cls->rule->action,
+ cls->common.extack))
+ return -EOPNOTSUPP;
+
+ act = &cls->rule->action.entries[0];
+
+ if (!act->dev)
+ return -EINVAL;
+
+ if (!dsa_slave_dev_check(act->dev))
+ return -EOPNOTSUPP;
+
+ mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
+ if (!mall_tc_entry)
+ return -ENOMEM;
+
+ mall_tc_entry->cookie = cls->cookie;
+ mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
+ mirror = &mall_tc_entry->mirror;
+
+ to_dp = dsa_slave_to_port(act->dev);
+
+ mirror->to_local_port = to_dp->index;
+ mirror->ingress = ingress;
+
+ err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
+ if (err) {
+ kfree(mall_tc_entry);
+ return err;
+ }
+
+ list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
+
+ return err;
+}
+
+static int
+dsa_slave_add_cls_matchall_police(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_mall_policer_tc_entry *policer;
+ struct dsa_mall_tc_entry *mall_tc_entry;
+ struct dsa_switch *ds = dp->ds;
+ struct flow_action_entry *act;
+ int err;
+
+ if (!ds->ops->port_policer_add) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Policing offload not implemented");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only supported on ingress qdisc");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_action_basic_hw_stats_check(&cls->rule->action,
+ cls->common.extack))
+ return -EOPNOTSUPP;
+
+ list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
+ if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one port policer allowed");
+ return -EEXIST;
+ }
+ }
+
+ act = &cls->rule->action.entries[0];
+
+ mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
+ if (!mall_tc_entry)
+ return -ENOMEM;
+
+ mall_tc_entry->cookie = cls->cookie;
+ mall_tc_entry->type = DSA_PORT_MALL_POLICER;
+ policer = &mall_tc_entry->policer;
+ policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
+ policer->burst = act->police.burst;
+
+ err = ds->ops->port_policer_add(ds, dp->index, policer);
+ if (err) {
+ kfree(mall_tc_entry);
+ return err;
+ }
+
+ list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
+
+ return err;
+}
+
+static int dsa_slave_add_cls_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
+{
+ int err = -EOPNOTSUPP;
+
+ if (cls->common.protocol == htons(ETH_P_ALL) &&
+ flow_offload_has_one_action(&cls->rule->action) &&
+ cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
+ err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
+ else if (flow_offload_has_one_action(&cls->rule->action) &&
+ cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
+ err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
+
+ return err;
+}
+
+static void dsa_slave_del_cls_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_mall_tc_entry *mall_tc_entry;
+ struct dsa_switch *ds = dp->ds;
+
+ mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
+ if (!mall_tc_entry)
+ return;
+
+ list_del(&mall_tc_entry->list);
+
+ switch (mall_tc_entry->type) {
+ case DSA_PORT_MALL_MIRROR:
+ if (ds->ops->port_mirror_del)
+ ds->ops->port_mirror_del(ds, dp->index,
+ &mall_tc_entry->mirror);
+ break;
+ case DSA_PORT_MALL_POLICER:
+ if (ds->ops->port_policer_del)
+ ds->ops->port_policer_del(ds, dp->index);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ kfree(mall_tc_entry);
+}
+
+static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
+{
+ if (cls->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (cls->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return dsa_slave_add_cls_matchall(dev, cls, ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ dsa_slave_del_cls_matchall(dev, cls);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dsa_slave_add_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->cls_flower_add)
+ return -EOPNOTSUPP;
+
+ return ds->ops->cls_flower_add(ds, port, cls, ingress);
+}
+
+static int dsa_slave_del_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->cls_flower_del)
+ return -EOPNOTSUPP;
+
+ return ds->ops->cls_flower_del(ds, port, cls, ingress);
+}
+
+static int dsa_slave_stats_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->cls_flower_stats)
+ return -EOPNOTSUPP;
+
+ return ds->ops->cls_flower_stats(ds, port, cls, ingress);
+}
+
+static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
+{
+ switch (cls->command) {
+ case FLOW_CLS_REPLACE:
+ return dsa_slave_add_cls_flower(dev, cls, ingress);
+ case FLOW_CLS_DESTROY:
+ return dsa_slave_del_cls_flower(dev, cls, ingress);
+ case FLOW_CLS_STATS:
+ return dsa_slave_stats_cls_flower(dev, cls, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv, bool ingress)
+{
+ struct net_device *dev = cb_priv;
+
+ if (!tc_can_offload(dev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
+ case TC_SETUP_CLSFLOWER:
+ return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
+}
+
+static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
+}
+
+static LIST_HEAD(dsa_slave_block_cb_list);
+
+static int dsa_slave_setup_tc_block(struct net_device *dev,
+ struct flow_block_offload *f)
+{
+ struct flow_block_cb *block_cb;
+ flow_setup_cb_t *cb;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ cb = dsa_slave_setup_tc_block_cb_ig;
+ else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ cb = dsa_slave_setup_tc_block_cb_eg;
+ else
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &dsa_slave_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
+ return -EBUSY;
+
+ block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
+ if (!block_cb)
+ return -ENOENT;
+
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (type == TC_SETUP_BLOCK)
+ return dsa_slave_setup_tc_block(dev, type_data);
+
+ if (!ds->ops->port_setup_tc)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
+}
+
+static void dsa_slave_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+
+ netdev_stats_to_stats64(stats, &dev->stats);
+ dev_fetch_sw_netstats(stats, p->stats64);
+}
+
+static int dsa_slave_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->get_rxnfc)
+ return -EOPNOTSUPP;
+
+ return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
+}
+
+static int dsa_slave_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->set_rxnfc)
+ return -EOPNOTSUPP;
+
+ return ds->ops->set_rxnfc(ds, dp->index, nfc);
+}
+
+static int dsa_slave_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *ts)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->dp->ds;
+
+ if (!ds->ops->get_ts_info)
+ return -EOPNOTSUPP;
+
+ return ds->ops->get_ts_info(ds, p->dp->index, ts);
+}
+
+static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid_begin = vid,
+ .vid_end = vid,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+ struct switchdev_trans trans;
+ int ret;
+
+ /* User port... */
+ trans.ph_prepare = true;
+ ret = dsa_port_vlan_add(dp, &vlan, &trans);
+ if (ret)
+ return ret;
+
+ trans.ph_prepare = false;
+ ret = dsa_port_vlan_add(dp, &vlan, &trans);
+ if (ret)
+ return ret;
+
+ /* And CPU port... */
+ trans.ph_prepare = true;
+ ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &trans);
+ if (ret)
+ return ret;
+
+ trans.ph_prepare = false;
+ ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &trans);
+ if (ret)
+ return ret;
+
+ return vlan_vid_add(master, proto, vid);
+}
+
+static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct switchdev_obj_port_vlan vlan = {
+ .vid_begin = vid,
+ .vid_end = vid,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+ int err;
+
+ /* Do not deprogram the CPU port as it may be shared with other user
+ * ports which can be members of this VLAN as well.
+ */
+ err = dsa_port_vlan_del(dp, &vlan);
+ if (err)
+ return err;
+
+ vlan_vid_del(master, proto, vid);
+
+ return 0;
+}
+
+struct dsa_hw_port {
+ struct list_head list;
+ struct net_device *dev;
+ int old_mtu;
+};
+
+static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
+{
+ const struct dsa_hw_port *p;
+ int err;
+
+ list_for_each_entry(p, hw_port_list, list) {
+ if (p->dev->mtu == mtu)
+ continue;
+
+ err = dev_set_mtu(p->dev, mtu);
+ if (err)
+ goto rollback;
+ }
+
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(p, hw_port_list, list) {
+ if (p->dev->mtu == p->old_mtu)
+ continue;
+
+ if (dev_set_mtu(p->dev, p->old_mtu))
+ netdev_err(p->dev, "Failed to restore MTU\n");
+ }
+
+ return err;
+}
+
+static void dsa_hw_port_list_free(struct list_head *hw_port_list)
+{
+ struct dsa_hw_port *p, *n;
+
+ list_for_each_entry_safe(p, n, hw_port_list, list)
+ kfree(p);
+}
+
+/* Make the hardware datapath to/from @dev limited to a common MTU */
+static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
+{
+ struct list_head hw_port_list;
+ struct dsa_switch_tree *dst;
+ int min_mtu = ETH_MAX_MTU;
+ struct dsa_port *other_dp;
+ int err;
+
+ if (!dp->ds->mtu_enforcement_ingress)
+ return;
+
+ if (!dp->bridge_dev)
+ return;
+
+ INIT_LIST_HEAD(&hw_port_list);
+
+ /* Populate the list of ports that are part of the same bridge
+ * as the newly added/modified port
+ */
+ list_for_each_entry(dst, &dsa_tree_list, list) {
+ list_for_each_entry(other_dp, &dst->ports, list) {
+ struct dsa_hw_port *hw_port;
+ struct net_device *slave;
+
+ if (other_dp->type != DSA_PORT_TYPE_USER)
+ continue;
+
+ if (other_dp->bridge_dev != dp->bridge_dev)
+ continue;
+
+ if (!other_dp->ds->mtu_enforcement_ingress)
+ continue;
+
+ slave = other_dp->slave;
+
+ if (min_mtu > slave->mtu)
+ min_mtu = slave->mtu;
+
+ hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
+ if (!hw_port)
+ goto out;
+
+ hw_port->dev = slave;
+ hw_port->old_mtu = slave->mtu;
+
+ list_add(&hw_port->list, &hw_port_list);
+ }
+ }
+
+ /* Attempt to configure the entire hardware bridge to the newly added
+ * interface's MTU first, regardless of whether the intention of the
+ * user was to raise or lower it.
+ */
+ err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
+ if (!err)
+ goto out;
+
+ /* Clearly that didn't work out so well, so just set the minimum MTU on
+ * all hardware bridge ports now. If this fails too, then all ports will
+ * still have their old MTU rolled back anyway.
+ */
+ dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
+
+out:
+ dsa_hw_port_list_free(&hw_port_list);
+}
+
+static int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->dp->ds;
+ struct dsa_port *cpu_dp;
+ int port = p->dp->index;
+ int largest_mtu = 0;
+ int new_master_mtu;
+ int old_master_mtu;
+ int mtu_limit;
+ int cpu_mtu;
+ int err, i;
+
+ if (!ds->ops->port_change_mtu)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ int slave_mtu;
+
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ /* During probe, this function will be called for each slave
+ * device, while not all of them have been allocated. That's
+ * ok, it doesn't change what the maximum is, so ignore it.
+ */
+ if (!dsa_to_port(ds, i)->slave)
+ continue;
+
+ /* Pretend that we already applied the setting, which we
+ * actually haven't (still haven't done all integrity checks)
+ */
+ if (i == port)
+ slave_mtu = new_mtu;
+ else
+ slave_mtu = dsa_to_port(ds, i)->slave->mtu;
+
+ if (largest_mtu < slave_mtu)
+ largest_mtu = slave_mtu;
+ }
+
+ cpu_dp = dsa_to_port(ds, port)->cpu_dp;
+
+ mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
+ old_master_mtu = master->mtu;
+ new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead;
+ if (new_master_mtu > mtu_limit)
+ return -ERANGE;
+
+ /* If the master MTU isn't over limit, there's no need to check the CPU
+ * MTU, since that surely isn't either.
+ */
+ cpu_mtu = largest_mtu;
+
+ /* Start applying stuff */
+ if (new_master_mtu != old_master_mtu) {
+ err = dev_set_mtu(master, new_master_mtu);
+ if (err < 0)
+ goto out_master_failed;
+
+ /* We only need to propagate the MTU of the CPU port to
+ * upstream switches.
+ */
+ err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true);
+ if (err)
+ goto out_cpu_failed;
+ }
+
+ err = dsa_port_mtu_change(dp, new_mtu, false);
+ if (err)
+ goto out_port_failed;
+
+ dev->mtu = new_mtu;
+
+ dsa_bridge_mtu_normalization(dp);
+
+ return 0;
+
+out_port_failed:
+ if (new_master_mtu != old_master_mtu)
+ dsa_port_mtu_change(cpu_dp, old_master_mtu -
+ cpu_dp->tag_ops->overhead,
+ true);
+out_cpu_failed:
+ if (new_master_mtu != old_master_mtu)
+ dev_set_mtu(master, old_master_mtu);
+out_master_failed:
+ return err;
+}
+
+static const struct ethtool_ops dsa_slave_ethtool_ops = {
+ .get_drvinfo = dsa_slave_get_drvinfo,
+ .get_regs_len = dsa_slave_get_regs_len,
+ .get_regs = dsa_slave_get_regs,
+ .nway_reset = dsa_slave_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = dsa_slave_get_eeprom_len,
+ .get_eeprom = dsa_slave_get_eeprom,
+ .set_eeprom = dsa_slave_set_eeprom,
+ .get_strings = dsa_slave_get_strings,
+ .get_ethtool_stats = dsa_slave_get_ethtool_stats,
+ .get_sset_count = dsa_slave_get_sset_count,
+ .set_wol = dsa_slave_set_wol,
+ .get_wol = dsa_slave_get_wol,
+ .set_eee = dsa_slave_set_eee,
+ .get_eee = dsa_slave_get_eee,
+ .get_link_ksettings = dsa_slave_get_link_ksettings,
+ .set_link_ksettings = dsa_slave_set_link_ksettings,
+ .get_pauseparam = dsa_slave_get_pauseparam,
+ .set_pauseparam = dsa_slave_set_pauseparam,
+ .get_rxnfc = dsa_slave_get_rxnfc,
+ .set_rxnfc = dsa_slave_set_rxnfc,
+ .get_ts_info = dsa_slave_get_ts_info,
+};
+
+/* legacy way, bypassing the bridge *****************************************/
+int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid,
+ u16 flags,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ return dsa_port_fdb_add(dp, addr, vid);
+}
+
+int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ return dsa_port_fdb_del(dp, addr, vid);
+}
+
+static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ return dp->ds->devlink ? &dp->devlink_port : NULL;
+}
+
+static const struct net_device_ops dsa_slave_netdev_ops = {
+ .ndo_open = dsa_slave_open,
+ .ndo_stop = dsa_slave_close,
+ .ndo_start_xmit = dsa_slave_xmit,
+ .ndo_change_rx_flags = dsa_slave_change_rx_flags,
+ .ndo_set_rx_mode = dsa_slave_set_rx_mode,
+ .ndo_set_mac_address = dsa_slave_set_mac_address,
+ .ndo_fdb_add = dsa_legacy_fdb_add,
+ .ndo_fdb_del = dsa_legacy_fdb_del,
+ .ndo_fdb_dump = dsa_slave_fdb_dump,
+ .ndo_do_ioctl = dsa_slave_ioctl,
+ .ndo_get_iflink = dsa_slave_get_iflink,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_setup = dsa_slave_netpoll_setup,
+ .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
+ .ndo_poll_controller = dsa_slave_poll_controller,
+#endif
+ .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
+ .ndo_setup_tc = dsa_slave_setup_tc,
+ .ndo_get_stats64 = dsa_slave_get_stats64,
+ .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
+ .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
+ .ndo_get_devlink_port = dsa_slave_get_devlink_port,
+ .ndo_change_mtu = dsa_slave_change_mtu,
+};
+
+static struct device_type dsa_type = {
+ .name = "dsa",
+};
+
+void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
+{
+ const struct dsa_port *dp = dsa_to_port(ds, port);
+
+ if (dp->pl)
+ phylink_mac_change(dp->pl, up);
+}
+EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
+
+static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
+ struct dsa_switch *ds = dp->ds;
+
+ /* No need to check that this operation is valid, the callback would
+ * not be called if it was not.
+ */
+ ds->ops->phylink_fixed_state(ds, dp->index, state);
+}
+
+/* slave device setup *******************************************************/
+static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
+{
+ struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+ struct dsa_switch *ds = dp->ds;
+
+ slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
+ if (!slave_dev->phydev) {
+ netdev_err(slave_dev, "no phy at %d\n", addr);
+ return -ENODEV;
+ }
+
+ return phylink_connect_phy(dp->pl, slave_dev->phydev);
+}
+
+static int dsa_slave_phy_setup(struct net_device *slave_dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+ struct device_node *port_dn = dp->dn;
+ struct dsa_switch *ds = dp->ds;
+ phy_interface_t mode;
+ u32 phy_flags = 0;
+ int ret;
+
+ ret = of_get_phy_mode(port_dn, &mode);
+ if (ret)
+ mode = PHY_INTERFACE_MODE_NA;
+
+ dp->pl_config.dev = &slave_dev->dev;
+ dp->pl_config.type = PHYLINK_NETDEV;
+
+ /* The get_fixed_state callback takes precedence over polling the
+ * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set
+ * this if the switch provides such a callback.
+ */
+ if (ds->ops->phylink_fixed_state) {
+ dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
+ dp->pl_config.poll_fixed_state = true;
+ }
+
+ dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
+ &dsa_port_phylink_mac_ops);
+ if (IS_ERR(dp->pl)) {
+ netdev_err(slave_dev,
+ "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
+ return PTR_ERR(dp->pl);
+ }
+
+ if (ds->ops->get_phy_flags)
+ phy_flags = ds->ops->get_phy_flags(ds, dp->index);
+
+ ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
+ if (ret == -ENODEV && ds->slave_mii_bus) {
+ /* We could not connect to a designated PHY or SFP, so try to
+ * use the switch internal MDIO bus instead
+ */
+ ret = dsa_slave_phy_connect(slave_dev, dp->index);
+ }
+ if (ret) {
+ netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
+ ERR_PTR(ret));
+ phylink_destroy(dp->pl);
+ }
+
+ return ret;
+}
+
+static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
+static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock,
+ &dsa_slave_netdev_xmit_lock_key);
+}
+
+int dsa_slave_suspend(struct net_device *slave_dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+
+ if (!netif_running(slave_dev))
+ return 0;
+
+ netif_device_detach(slave_dev);
+
+ rtnl_lock();
+ phylink_stop(dp->pl);
+ rtnl_unlock();
+
+ return 0;
+}
+
+int dsa_slave_resume(struct net_device *slave_dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+
+ if (!netif_running(slave_dev))
+ return 0;
+
+ netif_device_attach(slave_dev);
+
+ rtnl_lock();
+ phylink_start(dp->pl);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static void dsa_slave_notify(struct net_device *dev, unsigned long val)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_notifier_register_info rinfo = {
+ .switch_number = dp->ds->index,
+ .port_number = dp->index,
+ .master = master,
+ .info.dev = dev,
+ };
+
+ call_dsa_notifiers(val, dev, &rinfo.info);
+}
+
+int dsa_slave_create(struct dsa_port *port)
+{
+ const struct dsa_port *cpu_dp = port->cpu_dp;
+ struct net_device *master = cpu_dp->master;
+ struct dsa_switch *ds = port->ds;
+ const char *name = port->name;
+ struct net_device *slave_dev;
+ struct dsa_slave_priv *p;
+ int ret;
+
+ if (!ds->num_tx_queues)
+ ds->num_tx_queues = 1;
+
+ slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
+ NET_NAME_UNKNOWN, ether_setup,
+ ds->num_tx_queues, 1);
+ if (slave_dev == NULL)
+ return -ENOMEM;
+
+ slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
+ if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
+ slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ slave_dev->hw_features |= NETIF_F_HW_TC;
+ slave_dev->features |= NETIF_F_LLTX;
+ slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
+ if (!IS_ERR_OR_NULL(port->mac))
+ ether_addr_copy(slave_dev->dev_addr, port->mac);
+ else
+ eth_hw_addr_inherit(slave_dev, master);
+ slave_dev->priv_flags |= IFF_NO_QUEUE;
+ slave_dev->netdev_ops = &dsa_slave_netdev_ops;
+ if (ds->ops->port_max_mtu)
+ slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
+ if (cpu_dp->tag_ops->tail_tag)
+ slave_dev->needed_tailroom = cpu_dp->tag_ops->overhead;
+ else
+ slave_dev->needed_headroom = cpu_dp->tag_ops->overhead;
+ /* Try to save one extra realloc later in the TX path (in the master)
+ * by also inheriting the master's needed headroom and tailroom.
+ * The 8021q driver also does this.
+ */
+ slave_dev->needed_headroom += master->needed_headroom;
+ slave_dev->needed_tailroom += master->needed_tailroom;
+ SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
+
+ netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
+ NULL);
+
+ SET_NETDEV_DEV(slave_dev, port->ds->dev);
+ slave_dev->dev.of_node = port->dn;
+ slave_dev->vlan_features = master->vlan_features;
+
+ p = netdev_priv(slave_dev);
+ p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!p->stats64) {
+ free_netdev(slave_dev);
+ return -ENOMEM;
+ }
+
+ ret = gro_cells_init(&p->gcells, slave_dev);
+ if (ret)
+ goto out_free;
+
+ p->dp = port;
+ INIT_LIST_HEAD(&p->mall_tc_list);
+ p->xmit = cpu_dp->tag_ops->xmit;
+ port->slave = slave_dev;
+
+ rtnl_lock();
+ ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
+ rtnl_unlock();
+ if (ret && ret != -EOPNOTSUPP)
+ dev_warn(ds->dev, "nonfatal error %d setting MTU on port %d\n",
+ ret, port->index);
+
+ netif_carrier_off(slave_dev);
+
+ ret = dsa_slave_phy_setup(slave_dev);
+ if (ret) {
+ netdev_err(slave_dev,
+ "error %d setting up PHY for tree %d, switch %d, port %d\n",
+ ret, ds->dst->index, ds->index, port->index);
+ goto out_gcells;
+ }
+
+ dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
+
+ rtnl_lock();
+
+ ret = register_netdevice(slave_dev);
+ if (ret) {
+ netdev_err(master, "error %d registering interface %s\n",
+ ret, slave_dev->name);
+ rtnl_unlock();
+ goto out_phy;
+ }
+
+ ret = netdev_upper_dev_link(master, slave_dev, NULL);
+
+ rtnl_unlock();
+
+ if (ret)
+ goto out_unregister;
+
+ return 0;
+
+out_unregister:
+ unregister_netdev(slave_dev);
+out_phy:
+ rtnl_lock();
+ phylink_disconnect_phy(p->dp->pl);
+ rtnl_unlock();
+ phylink_destroy(p->dp->pl);
+out_gcells:
+ gro_cells_destroy(&p->gcells);
+out_free:
+ free_percpu(p->stats64);
+ free_netdev(slave_dev);
+ port->slave = NULL;
+ return ret;
+}
+
+void dsa_slave_destroy(struct net_device *slave_dev)
+{
+ struct net_device *master = dsa_slave_to_master(slave_dev);
+ struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+ struct dsa_slave_priv *p = netdev_priv(slave_dev);
+
+ netif_carrier_off(slave_dev);
+ rtnl_lock();
+ netdev_upper_dev_unlink(master, slave_dev);
+ unregister_netdevice(slave_dev);
+ phylink_disconnect_phy(dp->pl);
+ rtnl_unlock();
+
+ dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
+ phylink_destroy(dp->pl);
+ gro_cells_destroy(&p->gcells);
+ free_percpu(p->stats64);
+ free_netdev(slave_dev);
+}
+
+bool dsa_slave_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &dsa_slave_netdev_ops;
+}
+
+static int dsa_slave_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ int err = NOTIFY_DONE;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking) {
+ err = dsa_port_bridge_join(dp, info->upper_dev);
+ if (!err)
+ dsa_bridge_mtu_normalization(dp);
+ err = notifier_from_errno(err);
+ } else {
+ dsa_port_bridge_leave(dp, info->upper_dev);
+ err = NOTIFY_OK;
+ }
+ }
+
+ return err;
+}
+
+static int
+dsa_prevent_bridging_8021q_upper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct netlink_ext_ack *ext_ack;
+ struct net_device *slave;
+ struct dsa_port *dp;
+
+ ext_ack = netdev_notifier_info_to_extack(&info->info);
+
+ if (!is_vlan_dev(dev))
+ return NOTIFY_DONE;
+
+ slave = vlan_dev_real_dev(dev);
+ if (!dsa_slave_dev_check(slave))
+ return NOTIFY_DONE;
+
+ dp = dsa_slave_to_port(slave);
+ if (!dp->bridge_dev)
+ return NOTIFY_DONE;
+
+ /* Deny enslaving a VLAN device into a VLAN-aware bridge */
+ if (br_vlan_enabled(dp->bridge_dev) &&
+ netif_is_bridge_master(info->upper_dev) && info->linking) {
+ NL_SET_ERR_MSG_MOD(ext_ack,
+ "Cannot enslave VLAN device into VLAN aware bridge");
+ return notifier_from_errno(-EINVAL);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int
+dsa_slave_check_8021q_upper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *br = dp->bridge_dev;
+ struct bridge_vlan_info br_info;
+ struct netlink_ext_ack *extack;
+ int err = NOTIFY_DONE;
+ u16 vid;
+
+ if (!br || !br_vlan_enabled(br))
+ return NOTIFY_DONE;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ vid = vlan_dev_vlan_id(info->upper_dev);
+
+ /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
+ * device, respectively the VID is not found, returning
+ * 0 means success, which is a failure for us here.
+ */
+ err = br_vlan_get_info(br, vid, &br_info);
+ if (err == 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "This VLAN is already configured by the bridge");
+ return notifier_from_errno(-EBUSY);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int dsa_slave_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER: {
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (!dsa_slave_dev_check(dev))
+ return dsa_prevent_bridging_8021q_upper(dev, ptr);
+
+ if (is_vlan_dev(info->upper_dev))
+ return dsa_slave_check_8021q_upper(dev, ptr);
+ break;
+ }
+ case NETDEV_CHANGEUPPER:
+ if (!dsa_slave_dev_check(dev))
+ return NOTIFY_DONE;
+
+ return dsa_slave_changeupper(dev, ptr);
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct dsa_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+static void dsa_slave_switchdev_event_work(struct work_struct *work)
+{
+ struct dsa_switchdev_event_work *switchdev_work =
+ container_of(work, struct dsa_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ int err;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb_info = &switchdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
+
+ err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid);
+ if (err) {
+ netdev_dbg(dev, "fdb add failed err=%d\n", err);
+ break;
+ }
+ fdb_info->offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
+ &fdb_info->info, NULL);
+ break;
+
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = &switchdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
+
+ err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid);
+ if (err) {
+ netdev_dbg(dev, "fdb del failed err=%d\n", err);
+ dev_close(dev);
+ }
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+static int
+dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work *
+ switchdev_work,
+ const struct switchdev_notifier_fdb_info *
+ fdb_info)
+{
+ memcpy(&switchdev_work->fdb_info, fdb_info,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ return -ENOMEM;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ return 0;
+}
+
+/* Called under rcu_read_lock() */
+static int dsa_slave_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct dsa_switchdev_event_work *switchdev_work;
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ dsa_slave_dev_check,
+ dsa_slave_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!dsa_slave_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (!switchdev_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work,
+ dsa_slave_switchdev_event_work);
+ switchdev_work->dev = dev;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr))
+ goto err_fdb_work_init;
+ dev_hold(dev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ dsa_schedule_work(&switchdev_work->work);
+ return NOTIFY_OK;
+
+err_fdb_work_init:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ dsa_slave_dev_check,
+ dsa_slave_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ dsa_slave_dev_check,
+ dsa_slave_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ dsa_slave_dev_check,
+ dsa_slave_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block dsa_slave_nb __read_mostly = {
+ .notifier_call = dsa_slave_netdevice_event,
+};
+
+static struct notifier_block dsa_slave_switchdev_notifier = {
+ .notifier_call = dsa_slave_switchdev_event,
+};
+
+static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
+ .notifier_call = dsa_slave_switchdev_blocking_event,
+};
+
+int dsa_slave_register_notifier(void)
+{
+ struct notifier_block *nb;
+ int err;
+
+ err = register_netdevice_notifier(&dsa_slave_nb);
+ if (err)
+ return err;
+
+ err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
+ if (err)
+ goto err_switchdev_nb;
+
+ nb = &dsa_slave_switchdev_blocking_notifier;
+ err = register_switchdev_blocking_notifier(nb);
+ if (err)
+ goto err_switchdev_blocking_nb;
+
+ return 0;
+
+err_switchdev_blocking_nb:
+ unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
+err_switchdev_nb:
+ unregister_netdevice_notifier(&dsa_slave_nb);
+ return err;
+}
+
+void dsa_slave_unregister_notifier(void)
+{
+ struct notifier_block *nb;
+ int err;
+
+ nb = &dsa_slave_switchdev_blocking_notifier;
+ err = unregister_switchdev_blocking_notifier(nb);
+ if (err)
+ pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
+
+ err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
+ if (err)
+ pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
+
+ err = unregister_netdevice_notifier(&dsa_slave_nb);
+ if (err)
+ pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
+}
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
new file mode 100644
index 000000000..a44035872
--- /dev/null
+++ b/net/dsa/switch.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Handling of a single switch chip, part of a switch fabric
+ *
+ * Copyright (c) 2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ */
+
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+
+#include "dsa_priv.h"
+
+static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ int i;
+
+ for (i = 0; i < ds->num_ports; ++i) {
+ struct dsa_port *dp = dsa_to_port(ds, i);
+
+ if (dp->ageing_time && dp->ageing_time < ageing_time)
+ ageing_time = dp->ageing_time;
+ }
+
+ return ageing_time;
+}
+
+static int dsa_switch_ageing_time(struct dsa_switch *ds,
+ struct dsa_notifier_ageing_time_info *info)
+{
+ unsigned int ageing_time = info->ageing_time;
+ struct switchdev_trans *trans = info->trans;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
+ return -ERANGE;
+ if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
+ return -ERANGE;
+ return 0;
+ }
+
+ /* Program the fastest ageing time in case of multiple bridges */
+ ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
+
+ if (ds->ops->set_ageing_time)
+ return ds->ops->set_ageing_time(ds, ageing_time);
+
+ return 0;
+}
+
+static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
+ struct dsa_notifier_mtu_info *info)
+{
+ if (ds->index == info->sw_index)
+ return (port == info->port) || dsa_is_dsa_port(ds, port);
+
+ if (!info->propagate_upstream)
+ return false;
+
+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ return true;
+
+ return false;
+}
+
+static int dsa_switch_mtu(struct dsa_switch *ds,
+ struct dsa_notifier_mtu_info *info)
+{
+ int port, ret;
+
+ if (!ds->ops->port_change_mtu)
+ return -EOPNOTSUPP;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_switch_mtu_match(ds, port, info)) {
+ ret = ds->ops->port_change_mtu(ds, port, info->mtu);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int dsa_switch_bridge_join(struct dsa_switch *ds,
+ struct dsa_notifier_bridge_info *info)
+{
+ struct dsa_switch_tree *dst = ds->dst;
+
+ if (dst->index == info->tree_index && ds->index == info->sw_index &&
+ ds->ops->port_bridge_join)
+ return ds->ops->port_bridge_join(ds, info->port, info->br);
+
+ if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
+ ds->ops->crosschip_bridge_join)
+ return ds->ops->crosschip_bridge_join(ds, info->tree_index,
+ info->sw_index,
+ info->port, info->br);
+
+ return 0;
+}
+
+static int dsa_switch_bridge_leave(struct dsa_switch *ds,
+ struct dsa_notifier_bridge_info *info)
+{
+ bool unset_vlan_filtering = br_vlan_enabled(info->br);
+ struct dsa_switch_tree *dst = ds->dst;
+ int err, i;
+
+ if (dst->index == info->tree_index && ds->index == info->sw_index &&
+ ds->ops->port_bridge_leave)
+ ds->ops->port_bridge_leave(ds, info->port, info->br);
+
+ if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
+ ds->ops->crosschip_bridge_leave)
+ ds->ops->crosschip_bridge_leave(ds, info->tree_index,
+ info->sw_index, info->port,
+ info->br);
+
+ /* If the bridge was vlan_filtering, the bridge core doesn't trigger an
+ * event for changing vlan_filtering setting upon slave ports leaving
+ * it. That is a good thing, because that lets us handle it and also
+ * handle the case where the switch's vlan_filtering setting is global
+ * (not per port). When that happens, the correct moment to trigger the
+ * vlan_filtering callback is only when the last port left this bridge.
+ */
+ if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
+ for (i = 0; i < ds->num_ports; i++) {
+ if (i == info->port)
+ continue;
+ if (dsa_to_port(ds, i)->bridge_dev == info->br) {
+ unset_vlan_filtering = false;
+ break;
+ }
+ }
+ }
+ if (unset_vlan_filtering) {
+ struct switchdev_trans trans;
+
+ trans.ph_prepare = true;
+ err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
+ false, &trans);
+ if (err && err != EOPNOTSUPP)
+ return err;
+
+ trans.ph_prepare = false;
+ err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
+ false, &trans);
+ if (err && err != EOPNOTSUPP)
+ return err;
+ }
+ return 0;
+}
+
+static int dsa_switch_fdb_add(struct dsa_switch *ds,
+ struct dsa_notifier_fdb_info *info)
+{
+ int port = dsa_towards_port(ds, info->sw_index, info->port);
+
+ if (!ds->ops->port_fdb_add)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_fdb_add(ds, port, info->addr, info->vid);
+}
+
+static int dsa_switch_fdb_del(struct dsa_switch *ds,
+ struct dsa_notifier_fdb_info *info)
+{
+ int port = dsa_towards_port(ds, info->sw_index, info->port);
+
+ if (!ds->ops->port_fdb_del)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
+}
+
+static bool dsa_switch_mdb_match(struct dsa_switch *ds, int port,
+ struct dsa_notifier_mdb_info *info)
+{
+ if (ds->index == info->sw_index && port == info->port)
+ return true;
+
+ if (dsa_is_dsa_port(ds, port))
+ return true;
+
+ return false;
+}
+
+static int dsa_switch_mdb_prepare(struct dsa_switch *ds,
+ struct dsa_notifier_mdb_info *info)
+{
+ int port, err;
+
+ if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
+ return -EOPNOTSUPP;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_switch_mdb_match(ds, port, info)) {
+ err = ds->ops->port_mdb_prepare(ds, port, info->mdb);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int dsa_switch_mdb_add(struct dsa_switch *ds,
+ struct dsa_notifier_mdb_info *info)
+{
+ int port;
+
+ if (switchdev_trans_ph_prepare(info->trans))
+ return dsa_switch_mdb_prepare(ds, info);
+
+ if (!ds->ops->port_mdb_add)
+ return 0;
+
+ for (port = 0; port < ds->num_ports; port++)
+ if (dsa_switch_mdb_match(ds, port, info))
+ ds->ops->port_mdb_add(ds, port, info->mdb);
+
+ return 0;
+}
+
+static int dsa_switch_mdb_del(struct dsa_switch *ds,
+ struct dsa_notifier_mdb_info *info)
+{
+ if (!ds->ops->port_mdb_del)
+ return -EOPNOTSUPP;
+
+ if (ds->index == info->sw_index)
+ return ds->ops->port_mdb_del(ds, info->port, info->mdb);
+
+ return 0;
+}
+
+static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
+ struct dsa_notifier_vlan_info *info)
+{
+ if (ds->index == info->sw_index && port == info->port)
+ return true;
+
+ if (dsa_is_dsa_port(ds, port))
+ return true;
+
+ return false;
+}
+
+static int dsa_switch_vlan_prepare(struct dsa_switch *ds,
+ struct dsa_notifier_vlan_info *info)
+{
+ int port, err;
+
+ if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
+ return -EOPNOTSUPP;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_switch_vlan_match(ds, port, info)) {
+ err = ds->ops->port_vlan_prepare(ds, port, info->vlan);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int dsa_switch_vlan_add(struct dsa_switch *ds,
+ struct dsa_notifier_vlan_info *info)
+{
+ int port;
+
+ if (switchdev_trans_ph_prepare(info->trans))
+ return dsa_switch_vlan_prepare(ds, info);
+
+ if (!ds->ops->port_vlan_add)
+ return 0;
+
+ for (port = 0; port < ds->num_ports; port++)
+ if (dsa_switch_vlan_match(ds, port, info))
+ ds->ops->port_vlan_add(ds, port, info->vlan);
+
+ return 0;
+}
+
+static int dsa_switch_vlan_del(struct dsa_switch *ds,
+ struct dsa_notifier_vlan_info *info)
+{
+ if (!ds->ops->port_vlan_del)
+ return -EOPNOTSUPP;
+
+ if (ds->index == info->sw_index)
+ return ds->ops->port_vlan_del(ds, info->port, info->vlan);
+
+ /* Do not deprogram the DSA links as they may be used as conduit
+ * for other VLAN members in the fabric.
+ */
+ return 0;
+}
+
+static int dsa_switch_event(struct notifier_block *nb,
+ unsigned long event, void *info)
+{
+ struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
+ int err;
+
+ switch (event) {
+ case DSA_NOTIFIER_AGEING_TIME:
+ err = dsa_switch_ageing_time(ds, info);
+ break;
+ case DSA_NOTIFIER_BRIDGE_JOIN:
+ err = dsa_switch_bridge_join(ds, info);
+ break;
+ case DSA_NOTIFIER_BRIDGE_LEAVE:
+ err = dsa_switch_bridge_leave(ds, info);
+ break;
+ case DSA_NOTIFIER_FDB_ADD:
+ err = dsa_switch_fdb_add(ds, info);
+ break;
+ case DSA_NOTIFIER_FDB_DEL:
+ err = dsa_switch_fdb_del(ds, info);
+ break;
+ case DSA_NOTIFIER_MDB_ADD:
+ err = dsa_switch_mdb_add(ds, info);
+ break;
+ case DSA_NOTIFIER_MDB_DEL:
+ err = dsa_switch_mdb_del(ds, info);
+ break;
+ case DSA_NOTIFIER_VLAN_ADD:
+ err = dsa_switch_vlan_add(ds, info);
+ break;
+ case DSA_NOTIFIER_VLAN_DEL:
+ err = dsa_switch_vlan_del(ds, info);
+ break;
+ case DSA_NOTIFIER_MTU:
+ err = dsa_switch_mtu(ds, info);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ /* Non-switchdev operations cannot be rolled back. If a DSA driver
+ * returns an error during the chained call, switch chips may be in an
+ * inconsistent state.
+ */
+ if (err)
+ dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
+ event, err);
+
+ return notifier_from_errno(err);
+}
+
+int dsa_switch_register_notifier(struct dsa_switch *ds)
+{
+ ds->nb.notifier_call = dsa_switch_event;
+
+ return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
+}
+
+void dsa_switch_unregister_notifier(struct dsa_switch *ds)
+{
+ int err;
+
+ err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
+ if (err)
+ dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
+}
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
new file mode 100644
index 000000000..a00b513c2
--- /dev/null
+++ b/net/dsa/tag_8021q.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ *
+ * This module is not a complete tagger implementation. It only provides
+ * primitives for taggers that rely on 802.1Q VLAN tags to use. The
+ * dsa_8021q_netdev_ops is registered for API compliance and not used
+ * directly by callers.
+ */
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/dsa/8021q.h>
+
+#include "dsa_priv.h"
+
+/* Binary structure of the fake 12-bit VID field (when the TPID is
+ * ETH_P_DSA_8021Q):
+ *
+ * | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * +-----------+-----+-----------------+-----------+-----------------------+
+ * | DIR | SVL | SWITCH_ID | SUBVLAN | PORT |
+ * +-----------+-----+-----------------+-----------+-----------------------+
+ *
+ * DIR - VID[11:10]:
+ * Direction flags.
+ * * 1 (0b01) for RX VLAN,
+ * * 2 (0b10) for TX VLAN.
+ * These values make the special VIDs of 0, 1 and 4095 to be left
+ * unused by this coding scheme.
+ *
+ * SVL/SUBVLAN - { VID[9], VID[5:4] }:
+ * Sub-VLAN encoding. Valid only when DIR indicates an RX VLAN.
+ * * 0 (0b000): Field does not encode a sub-VLAN, either because
+ * received traffic is untagged, PVID-tagged or because a second
+ * VLAN tag is present after this tag and not inside of it.
+ * * 1 (0b001): Received traffic is tagged with a VID value private
+ * to the host. This field encodes the index in the host's lookup
+ * table through which the value of the ingress VLAN ID can be
+ * recovered.
+ * * 2 (0b010): Field encodes a sub-VLAN.
+ * ...
+ * * 7 (0b111): Field encodes a sub-VLAN.
+ * When DIR indicates a TX VLAN, SUBVLAN must be transmitted as zero
+ * (by the host) and ignored on receive (by the switch).
+ *
+ * SWITCH_ID - VID[8:6]:
+ * Index of switch within DSA tree. Must be between 0 and 7.
+ *
+ * PORT - VID[3:0]:
+ * Index of switch port. Must be between 0 and 15.
+ */
+
+#define DSA_8021Q_DIR_SHIFT 10
+#define DSA_8021Q_DIR_MASK GENMASK(11, 10)
+#define DSA_8021Q_DIR(x) (((x) << DSA_8021Q_DIR_SHIFT) & \
+ DSA_8021Q_DIR_MASK)
+#define DSA_8021Q_DIR_RX DSA_8021Q_DIR(1)
+#define DSA_8021Q_DIR_TX DSA_8021Q_DIR(2)
+
+#define DSA_8021Q_SWITCH_ID_SHIFT 6
+#define DSA_8021Q_SWITCH_ID_MASK GENMASK(8, 6)
+#define DSA_8021Q_SWITCH_ID(x) (((x) << DSA_8021Q_SWITCH_ID_SHIFT) & \
+ DSA_8021Q_SWITCH_ID_MASK)
+
+#define DSA_8021Q_SUBVLAN_HI_SHIFT 9
+#define DSA_8021Q_SUBVLAN_HI_MASK GENMASK(9, 9)
+#define DSA_8021Q_SUBVLAN_LO_SHIFT 4
+#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(5, 4)
+#define DSA_8021Q_SUBVLAN_HI(x) (((x) & GENMASK(2, 2)) >> 2)
+#define DSA_8021Q_SUBVLAN_LO(x) ((x) & GENMASK(1, 0))
+#define DSA_8021Q_SUBVLAN(x) \
+ (((DSA_8021Q_SUBVLAN_LO(x) << DSA_8021Q_SUBVLAN_LO_SHIFT) & \
+ DSA_8021Q_SUBVLAN_LO_MASK) | \
+ ((DSA_8021Q_SUBVLAN_HI(x) << DSA_8021Q_SUBVLAN_HI_SHIFT) & \
+ DSA_8021Q_SUBVLAN_HI_MASK))
+
+#define DSA_8021Q_PORT_SHIFT 0
+#define DSA_8021Q_PORT_MASK GENMASK(3, 0)
+#define DSA_8021Q_PORT(x) (((x) << DSA_8021Q_PORT_SHIFT) & \
+ DSA_8021Q_PORT_MASK)
+
+/* Returns the VID to be inserted into the frame from xmit for switch steering
+ * instructions on egress. Encodes switch ID and port ID.
+ */
+u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port)
+{
+ return DSA_8021Q_DIR_TX | DSA_8021Q_SWITCH_ID(ds->index) |
+ DSA_8021Q_PORT(port);
+}
+EXPORT_SYMBOL_GPL(dsa_8021q_tx_vid);
+
+/* Returns the VID that will be installed as pvid for this switch port, sent as
+ * tagged egress towards the CPU port and decoded by the rcv function.
+ */
+u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port)
+{
+ return DSA_8021Q_DIR_RX | DSA_8021Q_SWITCH_ID(ds->index) |
+ DSA_8021Q_PORT(port);
+}
+EXPORT_SYMBOL_GPL(dsa_8021q_rx_vid);
+
+u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan)
+{
+ return DSA_8021Q_DIR_RX | DSA_8021Q_SWITCH_ID(ds->index) |
+ DSA_8021Q_PORT(port) | DSA_8021Q_SUBVLAN(subvlan);
+}
+EXPORT_SYMBOL_GPL(dsa_8021q_rx_vid_subvlan);
+
+/* Returns the decoded switch ID from the RX VID. */
+int dsa_8021q_rx_switch_id(u16 vid)
+{
+ return (vid & DSA_8021Q_SWITCH_ID_MASK) >> DSA_8021Q_SWITCH_ID_SHIFT;
+}
+EXPORT_SYMBOL_GPL(dsa_8021q_rx_switch_id);
+
+/* Returns the decoded port ID from the RX VID. */
+int dsa_8021q_rx_source_port(u16 vid)
+{
+ return (vid & DSA_8021Q_PORT_MASK) >> DSA_8021Q_PORT_SHIFT;
+}
+EXPORT_SYMBOL_GPL(dsa_8021q_rx_source_port);
+
+/* Returns the decoded subvlan from the RX VID. */
+u16 dsa_8021q_rx_subvlan(u16 vid)
+{
+ u16 svl_hi, svl_lo;
+
+ svl_hi = (vid & DSA_8021Q_SUBVLAN_HI_MASK) >>
+ DSA_8021Q_SUBVLAN_HI_SHIFT;
+ svl_lo = (vid & DSA_8021Q_SUBVLAN_LO_MASK) >>
+ DSA_8021Q_SUBVLAN_LO_SHIFT;
+
+ return (svl_hi << 2) | svl_lo;
+}
+EXPORT_SYMBOL_GPL(dsa_8021q_rx_subvlan);
+
+bool vid_is_dsa_8021q(u16 vid)
+{
+ return ((vid & DSA_8021Q_DIR_MASK) == DSA_8021Q_DIR_RX ||
+ (vid & DSA_8021Q_DIR_MASK) == DSA_8021Q_DIR_TX);
+}
+EXPORT_SYMBOL_GPL(vid_is_dsa_8021q);
+
+/* If @enabled is true, installs @vid with @flags into the switch port's HW
+ * filter.
+ * If @enabled is false, deletes @vid (ignores @flags) from the port. Had the
+ * user explicitly configured this @vid through the bridge core, then the @vid
+ * is installed again, but this time with the flags from the bridge layer.
+ */
+static int dsa_8021q_vid_apply(struct dsa_8021q_context *ctx, int port, u16 vid,
+ u16 flags, bool enabled)
+{
+ struct dsa_port *dp = dsa_to_port(ctx->ds, port);
+
+ if (enabled)
+ return ctx->ops->vlan_add(ctx->ds, dp->index, vid, flags);
+
+ return ctx->ops->vlan_del(ctx->ds, dp->index, vid);
+}
+
+/* RX VLAN tagging (left) and TX VLAN tagging (right) setup shown for a single
+ * front-panel switch port (here swp0).
+ *
+ * Port identification through VLAN (802.1Q) tags has different requirements
+ * for it to work effectively:
+ * - On RX (ingress from network): each front-panel port must have a pvid
+ * that uniquely identifies it, and the egress of this pvid must be tagged
+ * towards the CPU port, so that software can recover the source port based
+ * on the VID in the frame. But this would only work for standalone ports;
+ * if bridged, this VLAN setup would break autonomous forwarding and would
+ * force all switched traffic to pass through the CPU. So we must also make
+ * the other front-panel ports members of this VID we're adding, albeit
+ * we're not making it their PVID (they'll still have their own).
+ * By the way - just because we're installing the same VID in multiple
+ * switch ports doesn't mean that they'll start to talk to one another, even
+ * while not bridged: the final forwarding decision is still an AND between
+ * the L2 forwarding information (which is limiting forwarding in this case)
+ * and the VLAN-based restrictions (of which there are none in this case,
+ * since all ports are members).
+ * - On TX (ingress from CPU and towards network) we are faced with a problem.
+ * If we were to tag traffic (from within DSA) with the port's pvid, all
+ * would be well, assuming the switch ports were standalone. Frames would
+ * have no choice but to be directed towards the correct front-panel port.
+ * But because we also want the RX VLAN to not break bridging, then
+ * inevitably that means that we have to give them a choice (of what
+ * front-panel port to go out on), and therefore we cannot steer traffic
+ * based on the RX VID. So what we do is simply install one more VID on the
+ * front-panel and CPU ports, and profit off of the fact that steering will
+ * work just by virtue of the fact that there is only one other port that's
+ * a member of the VID we're tagging the traffic with - the desired one.
+ *
+ * So at the end, each front-panel port will have one RX VID (also the PVID),
+ * the RX VID of all other front-panel ports, and one TX VID. Whereas the CPU
+ * port will have the RX and TX VIDs of all front-panel ports, and on top of
+ * that, is also tagged-input and tagged-output (VLAN trunk).
+ *
+ * CPU port CPU port
+ * +-------------+-----+-------------+ +-------------+-----+-------------+
+ * | RX VID | | | | TX VID | | |
+ * | of swp0 | | | | of swp0 | | |
+ * | +-----+ | | +-----+ |
+ * | ^ T | | | Tagged |
+ * | | | | | ingress |
+ * | +-------+---+---+-------+ | | +-----------+ |
+ * | | | | | | | | Untagged |
+ * | | U v U v U v | | v egress |
+ * | +-----+ +-----+ +-----+ +-----+ | | +-----+ +-----+ +-----+ +-----+ |
+ * | | | | | | | | | | | | | | | | | | | |
+ * | |PVID | | | | | | | | | | | | | | | | | |
+ * +-+-----+-+-----+-+-----+-+-----+-+ +-+-----+-+-----+-+-----+-+-----+-+
+ * swp0 swp1 swp2 swp3 swp0 swp1 swp2 swp3
+ */
+static int dsa_8021q_setup_port(struct dsa_8021q_context *ctx, int port,
+ bool enabled)
+{
+ int upstream = dsa_upstream_port(ctx->ds, port);
+ u16 rx_vid = dsa_8021q_rx_vid(ctx->ds, port);
+ u16 tx_vid = dsa_8021q_tx_vid(ctx->ds, port);
+ struct net_device *master;
+ int i, err, subvlan;
+
+ /* The CPU port is implicitly configured by
+ * configuring the front-panel ports
+ */
+ if (!dsa_is_user_port(ctx->ds, port))
+ return 0;
+
+ master = dsa_to_port(ctx->ds, port)->cpu_dp->master;
+
+ /* Add this user port's RX VID to the membership list of all others
+ * (including itself). This is so that bridging will not be hindered.
+ * L2 forwarding rules still take precedence when there are no VLAN
+ * restrictions, so there are no concerns about leaking traffic.
+ */
+ for (i = 0; i < ctx->ds->num_ports; i++) {
+ u16 flags;
+
+ if (i == upstream)
+ continue;
+ else if (i == port)
+ /* The RX VID is pvid on this port */
+ flags = BRIDGE_VLAN_INFO_UNTAGGED |
+ BRIDGE_VLAN_INFO_PVID;
+ else
+ /* The RX VID is a regular VLAN on all others */
+ flags = BRIDGE_VLAN_INFO_UNTAGGED;
+
+ err = dsa_8021q_vid_apply(ctx, i, rx_vid, flags, enabled);
+ if (err) {
+ dev_err(ctx->ds->dev,
+ "Failed to apply RX VID %d to port %d: %d\n",
+ rx_vid, port, err);
+ return err;
+ }
+ }
+
+ /* CPU port needs to see this port's RX VID
+ * as tagged egress.
+ */
+ err = dsa_8021q_vid_apply(ctx, upstream, rx_vid, 0, enabled);
+ if (err) {
+ dev_err(ctx->ds->dev,
+ "Failed to apply RX VID %d to port %d: %d\n",
+ rx_vid, port, err);
+ return err;
+ }
+
+ /* Add to the master's RX filter not only @rx_vid, but in fact
+ * the entire subvlan range, just in case this DSA switch might
+ * want to use sub-VLANs.
+ */
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) {
+ u16 vid = dsa_8021q_rx_vid_subvlan(ctx->ds, port, subvlan);
+
+ if (enabled)
+ vlan_vid_add(master, ctx->proto, vid);
+ else
+ vlan_vid_del(master, ctx->proto, vid);
+ }
+
+ /* Finally apply the TX VID on this port and on the CPU port */
+ err = dsa_8021q_vid_apply(ctx, port, tx_vid, BRIDGE_VLAN_INFO_UNTAGGED,
+ enabled);
+ if (err) {
+ dev_err(ctx->ds->dev,
+ "Failed to apply TX VID %d on port %d: %d\n",
+ tx_vid, port, err);
+ return err;
+ }
+ err = dsa_8021q_vid_apply(ctx, upstream, tx_vid, 0, enabled);
+ if (err) {
+ dev_err(ctx->ds->dev,
+ "Failed to apply TX VID %d on port %d: %d\n",
+ tx_vid, upstream, err);
+ return err;
+ }
+
+ return err;
+}
+
+int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled)
+{
+ int rc, port;
+
+ ASSERT_RTNL();
+
+ for (port = 0; port < ctx->ds->num_ports; port++) {
+ rc = dsa_8021q_setup_port(ctx, port, enabled);
+ if (rc < 0) {
+ dev_err(ctx->ds->dev,
+ "Failed to setup VLAN tagging for port %d: %d\n",
+ port, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_8021q_setup);
+
+static int dsa_8021q_crosschip_link_apply(struct dsa_8021q_context *ctx,
+ int port,
+ struct dsa_8021q_context *other_ctx,
+ int other_port, bool enabled)
+{
+ u16 rx_vid = dsa_8021q_rx_vid(ctx->ds, port);
+
+ /* @rx_vid of local @ds port @port goes to @other_port of
+ * @other_ds
+ */
+ return dsa_8021q_vid_apply(other_ctx, other_port, rx_vid,
+ BRIDGE_VLAN_INFO_UNTAGGED, enabled);
+}
+
+static int dsa_8021q_crosschip_link_add(struct dsa_8021q_context *ctx, int port,
+ struct dsa_8021q_context *other_ctx,
+ int other_port)
+{
+ struct dsa_8021q_crosschip_link *c;
+
+ list_for_each_entry(c, &ctx->crosschip_links, list) {
+ if (c->port == port && c->other_ctx == other_ctx &&
+ c->other_port == other_port) {
+ refcount_inc(&c->refcount);
+ return 0;
+ }
+ }
+
+ dev_dbg(ctx->ds->dev,
+ "adding crosschip link from port %d to %s port %d\n",
+ port, dev_name(other_ctx->ds->dev), other_port);
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ c->port = port;
+ c->other_ctx = other_ctx;
+ c->other_port = other_port;
+ refcount_set(&c->refcount, 1);
+
+ list_add(&c->list, &ctx->crosschip_links);
+
+ return 0;
+}
+
+static void dsa_8021q_crosschip_link_del(struct dsa_8021q_context *ctx,
+ struct dsa_8021q_crosschip_link *c,
+ bool *keep)
+{
+ *keep = !refcount_dec_and_test(&c->refcount);
+
+ if (*keep)
+ return;
+
+ dev_dbg(ctx->ds->dev,
+ "deleting crosschip link from port %d to %s port %d\n",
+ c->port, dev_name(c->other_ctx->ds->dev), c->other_port);
+
+ list_del(&c->list);
+ kfree(c);
+}
+
+/* Make traffic from local port @port be received by remote port @other_port.
+ * This means that our @rx_vid needs to be installed on @other_ds's upstream
+ * and user ports. The user ports should be egress-untagged so that they can
+ * pop the dsa_8021q VLAN. But the @other_upstream can be either egress-tagged
+ * or untagged: it doesn't matter, since it should never egress a frame having
+ * our @rx_vid.
+ */
+int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
+ struct dsa_8021q_context *other_ctx,
+ int other_port)
+{
+ /* @other_upstream is how @other_ds reaches us. If we are part
+ * of disjoint trees, then we are probably connected through
+ * our CPU ports. If we're part of the same tree though, we should
+ * probably use dsa_towards_port.
+ */
+ int other_upstream = dsa_upstream_port(other_ctx->ds, other_port);
+ int rc;
+
+ rc = dsa_8021q_crosschip_link_add(ctx, port, other_ctx, other_port);
+ if (rc)
+ return rc;
+
+ rc = dsa_8021q_crosschip_link_apply(ctx, port, other_ctx,
+ other_port, true);
+ if (rc)
+ return rc;
+
+ rc = dsa_8021q_crosschip_link_add(ctx, port, other_ctx, other_upstream);
+ if (rc)
+ return rc;
+
+ return dsa_8021q_crosschip_link_apply(ctx, port, other_ctx,
+ other_upstream, true);
+}
+EXPORT_SYMBOL_GPL(dsa_8021q_crosschip_bridge_join);
+
+int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
+ struct dsa_8021q_context *other_ctx,
+ int other_port)
+{
+ int other_upstream = dsa_upstream_port(other_ctx->ds, other_port);
+ struct dsa_8021q_crosschip_link *c, *n;
+
+ list_for_each_entry_safe(c, n, &ctx->crosschip_links, list) {
+ if (c->port == port && c->other_ctx == other_ctx &&
+ (c->other_port == other_port ||
+ c->other_port == other_upstream)) {
+ struct dsa_8021q_context *other_ctx = c->other_ctx;
+ int other_port = c->other_port;
+ bool keep;
+ int rc;
+
+ dsa_8021q_crosschip_link_del(ctx, c, &keep);
+ if (keep)
+ continue;
+
+ rc = dsa_8021q_crosschip_link_apply(ctx, port,
+ other_ctx,
+ other_port,
+ false);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_8021q_crosschip_bridge_leave);
+
+struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
+ u16 tpid, u16 tci)
+{
+ /* skb->data points at skb_mac_header, which
+ * is fine for vlan_insert_tag.
+ */
+ return vlan_insert_tag(skb, htons(tpid), tci);
+}
+EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/net/dsa/tag_ar9331.c b/net/dsa/tag_ar9331.c
new file mode 100644
index 000000000..002cf7f95
--- /dev/null
+++ b/net/dsa/tag_ar9331.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ */
+
+
+#include <linux/bitfield.h>
+#include <linux/etherdevice.h>
+
+#include "dsa_priv.h"
+
+#define AR9331_HDR_LEN 2
+#define AR9331_HDR_VERSION 1
+
+#define AR9331_HDR_VERSION_MASK GENMASK(15, 14)
+#define AR9331_HDR_PRIORITY_MASK GENMASK(13, 12)
+#define AR9331_HDR_TYPE_MASK GENMASK(10, 8)
+#define AR9331_HDR_BROADCAST BIT(7)
+#define AR9331_HDR_FROM_CPU BIT(6)
+/* AR9331_HDR_RESERVED - not used or may be version field.
+ * According to the AR8216 doc it should 0b10. On AR9331 it is 0b11 on RX path
+ * and should be set to 0b11 to make it work.
+ */
+#define AR9331_HDR_RESERVED_MASK GENMASK(5, 4)
+#define AR9331_HDR_PORT_NUM_MASK GENMASK(3, 0)
+
+static struct sk_buff *ar9331_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ __le16 *phdr;
+ u16 hdr;
+
+ phdr = skb_push(skb, AR9331_HDR_LEN);
+
+ hdr = FIELD_PREP(AR9331_HDR_VERSION_MASK, AR9331_HDR_VERSION);
+ hdr |= AR9331_HDR_FROM_CPU | dp->index;
+ /* 0b10 for AR8216 and 0b11 for AR9331 */
+ hdr |= AR9331_HDR_RESERVED_MASK;
+
+ phdr[0] = cpu_to_le16(hdr);
+
+ return skb;
+}
+
+static struct sk_buff *ar9331_tag_rcv(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt)
+{
+ u8 ver, port;
+ u16 hdr;
+
+ if (unlikely(!pskb_may_pull(skb, AR9331_HDR_LEN)))
+ return NULL;
+
+ hdr = le16_to_cpu(*(__le16 *)skb_mac_header(skb));
+
+ ver = FIELD_GET(AR9331_HDR_VERSION_MASK, hdr);
+ if (unlikely(ver != AR9331_HDR_VERSION)) {
+ netdev_warn_once(ndev, "%s:%i wrong header version 0x%2x\n",
+ __func__, __LINE__, hdr);
+ return NULL;
+ }
+
+ if (unlikely(hdr & AR9331_HDR_FROM_CPU)) {
+ netdev_warn_once(ndev, "%s:%i packet should not be from cpu 0x%2x\n",
+ __func__, __LINE__, hdr);
+ return NULL;
+ }
+
+ skb_pull_rcsum(skb, AR9331_HDR_LEN);
+
+ /* Get source port information */
+ port = FIELD_GET(AR9331_HDR_PORT_NUM_MASK, hdr);
+
+ skb->dev = dsa_master_find_slave(ndev, 0, port);
+ if (!skb->dev)
+ return NULL;
+
+ return skb;
+}
+
+static const struct dsa_device_ops ar9331_netdev_ops = {
+ .name = "ar9331",
+ .proto = DSA_TAG_PROTO_AR9331,
+ .xmit = ar9331_tag_xmit,
+ .rcv = ar9331_tag_rcv,
+ .overhead = AR9331_HDR_LEN,
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_AR9331);
+module_dsa_tag_driver(ar9331_netdev_ops);
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
new file mode 100644
index 000000000..e934dace3
--- /dev/null
+++ b/net/dsa/tag_brcm.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Broadcom tag support
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "dsa_priv.h"
+
+/* This tag length is 4 bytes, older ones were 6 bytes, we do not
+ * handle them
+ */
+#define BRCM_TAG_LEN 4
+
+/* Tag is constructed and desconstructed using byte by byte access
+ * because the tag is placed after the MAC Source Address, which does
+ * not make it 4-bytes aligned, so this might cause unaligned accesses
+ * on most systems where this is used.
+ */
+
+/* Ingress and egress opcodes */
+#define BRCM_OPCODE_SHIFT 5
+#define BRCM_OPCODE_MASK 0x7
+
+/* Ingress fields */
+/* 1st byte in the tag */
+#define BRCM_IG_TC_SHIFT 2
+#define BRCM_IG_TC_MASK 0x7
+/* 2nd byte in the tag */
+#define BRCM_IG_TE_MASK 0x3
+#define BRCM_IG_TS_SHIFT 7
+/* 3rd byte in the tag */
+#define BRCM_IG_DSTMAP2_MASK 1
+#define BRCM_IG_DSTMAP1_MASK 0xff
+
+/* Egress fields */
+
+/* 2nd byte in the tag */
+#define BRCM_EG_CID_MASK 0xff
+
+/* 3rd byte in the tag */
+#define BRCM_EG_RC_MASK 0xff
+#define BRCM_EG_RC_RSVD (3 << 6)
+#define BRCM_EG_RC_EXCEPTION (1 << 5)
+#define BRCM_EG_RC_PROT_SNOOP (1 << 4)
+#define BRCM_EG_RC_PROT_TERM (1 << 3)
+#define BRCM_EG_RC_SWITCH (1 << 2)
+#define BRCM_EG_RC_MAC_LEARN (1 << 1)
+#define BRCM_EG_RC_MIRROR (1 << 0)
+#define BRCM_EG_TC_SHIFT 5
+#define BRCM_EG_TC_MASK 0x7
+#define BRCM_EG_PID_MASK 0x1f
+
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM) || \
+ IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
+
+static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
+ struct net_device *dev,
+ unsigned int offset)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ u16 queue = skb_get_queue_mapping(skb);
+ u8 *brcm_tag;
+
+ /* The Ethernet switch we are interfaced with needs packets to be at
+ * least 64 bytes (including FCS) otherwise they will be discarded when
+ * they enter the switch port logic. When Broadcom tags are enabled, we
+ * need to make sure that packets are at least 68 bytes
+ * (including FCS and tag) because the length verification is done after
+ * the Broadcom tag is stripped off the ingress packet.
+ *
+ * Let dsa_slave_xmit() free the SKB
+ */
+ if (__skb_put_padto(skb, ETH_ZLEN + BRCM_TAG_LEN, false))
+ return NULL;
+
+ skb_push(skb, BRCM_TAG_LEN);
+
+ if (offset)
+ memmove(skb->data, skb->data + BRCM_TAG_LEN, offset);
+
+ brcm_tag = skb->data + offset;
+
+ /* Set the ingress opcode, traffic class, tag enforcment is
+ * deprecated
+ */
+ brcm_tag[0] = (1 << BRCM_OPCODE_SHIFT) |
+ ((queue & BRCM_IG_TC_MASK) << BRCM_IG_TC_SHIFT);
+ brcm_tag[1] = 0;
+ brcm_tag[2] = 0;
+ if (dp->index == 8)
+ brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
+ brcm_tag[3] = (1 << dp->index) & BRCM_IG_DSTMAP1_MASK;
+
+ /* Now tell the master network device about the desired output queue
+ * as well
+ */
+ skb_set_queue_mapping(skb, BRCM_TAG_SET_PORT_QUEUE(dp->index, queue));
+
+ return skb;
+}
+
+/* Frames with this tag have one of these two layouts:
+ * -----------------------------------
+ * | MAC DA | MAC SA | 4b tag | Type | DSA_TAG_PROTO_BRCM
+ * -----------------------------------
+ * -----------------------------------
+ * | 4b tag | MAC DA | MAC SA | Type | DSA_TAG_PROTO_BRCM_PREPEND
+ * -----------------------------------
+ * In both cases, at receive time, skb->data points 2 bytes before the actual
+ * Ethernet type field and we have an offset of 4bytes between where skb->data
+ * and where the payload starts. So the same low-level receive function can be
+ * used.
+ */
+static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt,
+ unsigned int offset)
+{
+ int source_port;
+ u8 *brcm_tag;
+
+ if (unlikely(!pskb_may_pull(skb, BRCM_TAG_LEN)))
+ return NULL;
+
+ brcm_tag = skb->data - offset;
+
+ /* The opcode should never be different than 0b000 */
+ if (unlikely((brcm_tag[0] >> BRCM_OPCODE_SHIFT) & BRCM_OPCODE_MASK))
+ return NULL;
+
+ /* We should never see a reserved reason code without knowing how to
+ * handle it
+ */
+ if (unlikely(brcm_tag[2] & BRCM_EG_RC_RSVD))
+ return NULL;
+
+ /* Locate which port this is coming from */
+ source_port = brcm_tag[3] & BRCM_EG_PID_MASK;
+
+ skb->dev = dsa_master_find_slave(dev, 0, source_port);
+ if (!skb->dev)
+ return NULL;
+
+ /* Remove Broadcom tag and update checksum */
+ skb_pull_rcsum(skb, BRCM_TAG_LEN);
+
+ skb->offload_fwd_mark = 1;
+
+ return skb;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
+static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ /* Build the tag after the MAC Source Address */
+ return brcm_tag_xmit_ll(skb, dev, 2 * ETH_ALEN);
+}
+
+
+static struct sk_buff *brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+{
+ struct sk_buff *nskb;
+
+ /* skb->data points to the EtherType, the tag is right before it */
+ nskb = brcm_tag_rcv_ll(skb, dev, pt, 2);
+ if (!nskb)
+ return nskb;
+
+ /* Move the Ethernet DA and SA */
+ memmove(nskb->data - ETH_HLEN,
+ nskb->data - ETH_HLEN - BRCM_TAG_LEN,
+ 2 * ETH_ALEN);
+
+ return nskb;
+}
+
+static const struct dsa_device_ops brcm_netdev_ops = {
+ .name = "brcm",
+ .proto = DSA_TAG_PROTO_BRCM,
+ .xmit = brcm_tag_xmit,
+ .rcv = brcm_tag_rcv,
+ .overhead = BRCM_TAG_LEN,
+};
+
+DSA_TAG_DRIVER(brcm_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM);
+#endif
+
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
+static struct sk_buff *brcm_tag_xmit_prepend(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ /* tag is prepended to the packet */
+ return brcm_tag_xmit_ll(skb, dev, 0);
+}
+
+static struct sk_buff *brcm_tag_rcv_prepend(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt)
+{
+ /* tag is prepended to the packet */
+ return brcm_tag_rcv_ll(skb, dev, pt, ETH_HLEN);
+}
+
+static const struct dsa_device_ops brcm_prepend_netdev_ops = {
+ .name = "brcm-prepend",
+ .proto = DSA_TAG_PROTO_BRCM_PREPEND,
+ .xmit = brcm_tag_xmit_prepend,
+ .rcv = brcm_tag_rcv_prepend,
+ .overhead = BRCM_TAG_LEN,
+};
+
+DSA_TAG_DRIVER(brcm_prepend_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_PREPEND);
+#endif
+
+static struct dsa_tag_driver *dsa_tag_driver_array[] = {
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
+ &DSA_TAG_DRIVER_NAME(brcm_netdev_ops),
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
+ &DSA_TAG_DRIVER_NAME(brcm_prepend_netdev_ops),
+#endif
+};
+
+module_dsa_tag_drivers(dsa_tag_driver_array);
+
+MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
new file mode 100644
index 000000000..63d690a0f
--- /dev/null
+++ b/net/dsa/tag_dsa.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * net/dsa/tag_dsa.c - (Non-ethertype) DSA tagging
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "dsa_priv.h"
+
+#define DSA_HLEN 4
+
+static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ u8 *dsa_header;
+
+ /*
+ * Convert the outermost 802.1q tag to a DSA tag for tagged
+ * packets, or insert a DSA tag between the addresses and
+ * the ethertype field for untagged packets.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ /*
+ * Construct tagged FROM_CPU DSA tag from 802.1q tag.
+ */
+ dsa_header = skb->data + 2 * ETH_ALEN;
+ dsa_header[0] = 0x60 | dp->ds->index;
+ dsa_header[1] = dp->index << 3;
+
+ /*
+ * Move CFI field from byte 2 to byte 1.
+ */
+ if (dsa_header[2] & 0x10) {
+ dsa_header[1] |= 0x01;
+ dsa_header[2] &= ~0x10;
+ }
+ } else {
+ skb_push(skb, DSA_HLEN);
+
+ memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN);
+
+ /*
+ * Construct untagged FROM_CPU DSA tag.
+ */
+ dsa_header = skb->data + 2 * ETH_ALEN;
+ dsa_header[0] = 0x40 | dp->ds->index;
+ dsa_header[1] = dp->index << 3;
+ dsa_header[2] = 0x00;
+ dsa_header[3] = 0x00;
+ }
+
+ return skb;
+}
+
+static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+{
+ u8 *dsa_header;
+ int source_device;
+ int source_port;
+
+ if (unlikely(!pskb_may_pull(skb, DSA_HLEN)))
+ return NULL;
+
+ /*
+ * The ethertype field is part of the DSA header.
+ */
+ dsa_header = skb->data - 2;
+
+ /*
+ * Check that frame type is either TO_CPU or FORWARD.
+ */
+ if ((dsa_header[0] & 0xc0) != 0x00 && (dsa_header[0] & 0xc0) != 0xc0)
+ return NULL;
+
+ /*
+ * Determine source device and port.
+ */
+ source_device = dsa_header[0] & 0x1f;
+ source_port = (dsa_header[1] >> 3) & 0x1f;
+
+ skb->dev = dsa_master_find_slave(dev, source_device, source_port);
+ if (!skb->dev)
+ return NULL;
+
+ /*
+ * Convert the DSA header to an 802.1q header if the 'tagged'
+ * bit in the DSA header is set. If the 'tagged' bit is clear,
+ * delete the DSA header entirely.
+ */
+ if (dsa_header[0] & 0x20) {
+ u8 new_header[4];
+
+ /*
+ * Insert 802.1q ethertype and copy the VLAN-related
+ * fields, but clear the bit that will hold CFI (since
+ * DSA uses that bit location for another purpose).
+ */
+ new_header[0] = (ETH_P_8021Q >> 8) & 0xff;
+ new_header[1] = ETH_P_8021Q & 0xff;
+ new_header[2] = dsa_header[2] & ~0x10;
+ new_header[3] = dsa_header[3];
+
+ /*
+ * Move CFI bit from its place in the DSA header to
+ * its 802.1q-designated place.
+ */
+ if (dsa_header[1] & 0x01)
+ new_header[2] |= 0x10;
+
+ /*
+ * Update packet checksum if skb is CHECKSUM_COMPLETE.
+ */
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ __wsum c = skb->csum;
+ c = csum_add(c, csum_partial(new_header + 2, 2, 0));
+ c = csum_sub(c, csum_partial(dsa_header + 2, 2, 0));
+ skb->csum = c;
+ }
+
+ memcpy(dsa_header, new_header, DSA_HLEN);
+ } else {
+ /*
+ * Remove DSA tag and update checksum.
+ */
+ skb_pull_rcsum(skb, DSA_HLEN);
+ memmove(skb->data - ETH_HLEN,
+ skb->data - ETH_HLEN - DSA_HLEN,
+ 2 * ETH_ALEN);
+ }
+
+ skb->offload_fwd_mark = 1;
+
+ return skb;
+}
+
+static const struct dsa_device_ops dsa_netdev_ops = {
+ .name = "dsa",
+ .proto = DSA_TAG_PROTO_DSA,
+ .xmit = dsa_xmit,
+ .rcv = dsa_rcv,
+ .overhead = DSA_HLEN,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_DSA);
+
+module_dsa_tag_driver(dsa_netdev_ops);
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
new file mode 100644
index 000000000..abf70a29d
--- /dev/null
+++ b/net/dsa/tag_edsa.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * net/dsa/tag_edsa.c - Ethertype DSA tagging
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "dsa_priv.h"
+
+#define DSA_HLEN 4
+#define EDSA_HLEN 8
+
+#define FRAME_TYPE_TO_CPU 0x00
+#define FRAME_TYPE_FORWARD 0x03
+
+#define TO_CPU_CODE_MGMT_TRAP 0x00
+#define TO_CPU_CODE_FRAME2REG 0x01
+#define TO_CPU_CODE_IGMP_MLD_TRAP 0x02
+#define TO_CPU_CODE_POLICY_TRAP 0x03
+#define TO_CPU_CODE_ARP_MIRROR 0x04
+#define TO_CPU_CODE_POLICY_MIRROR 0x05
+
+static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ u8 *edsa_header;
+
+ /*
+ * Convert the outermost 802.1q tag to a DSA tag and prepend
+ * a DSA ethertype field is the packet is tagged, or insert
+ * a DSA ethertype plus DSA tag between the addresses and the
+ * current ethertype field if the packet is untagged.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ skb_push(skb, DSA_HLEN);
+
+ memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN);
+
+ /*
+ * Construct tagged FROM_CPU DSA tag from 802.1q tag.
+ */
+ edsa_header = skb->data + 2 * ETH_ALEN;
+ edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
+ edsa_header[1] = ETH_P_EDSA & 0xff;
+ edsa_header[2] = 0x00;
+ edsa_header[3] = 0x00;
+ edsa_header[4] = 0x60 | dp->ds->index;
+ edsa_header[5] = dp->index << 3;
+
+ /*
+ * Move CFI field from byte 6 to byte 5.
+ */
+ if (edsa_header[6] & 0x10) {
+ edsa_header[5] |= 0x01;
+ edsa_header[6] &= ~0x10;
+ }
+ } else {
+ skb_push(skb, EDSA_HLEN);
+
+ memmove(skb->data, skb->data + EDSA_HLEN, 2 * ETH_ALEN);
+
+ /*
+ * Construct untagged FROM_CPU DSA tag.
+ */
+ edsa_header = skb->data + 2 * ETH_ALEN;
+ edsa_header[0] = (ETH_P_EDSA >> 8) & 0xff;
+ edsa_header[1] = ETH_P_EDSA & 0xff;
+ edsa_header[2] = 0x00;
+ edsa_header[3] = 0x00;
+ edsa_header[4] = 0x40 | dp->ds->index;
+ edsa_header[5] = dp->index << 3;
+ edsa_header[6] = 0x00;
+ edsa_header[7] = 0x00;
+ }
+
+ return skb;
+}
+
+static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+{
+ u8 *edsa_header;
+ int frame_type;
+ int code;
+ int source_device;
+ int source_port;
+
+ if (unlikely(!pskb_may_pull(skb, EDSA_HLEN)))
+ return NULL;
+
+ /*
+ * Skip the two null bytes after the ethertype.
+ */
+ edsa_header = skb->data + 2;
+
+ /*
+ * Check that frame type is either TO_CPU or FORWARD.
+ */
+ frame_type = edsa_header[0] >> 6;
+
+ switch (frame_type) {
+ case FRAME_TYPE_TO_CPU:
+ code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1);
+
+ /*
+ * Mark the frame to never egress on any port of the same switch
+ * unless it's a trapped IGMP/MLD packet, in which case the
+ * bridge might want to forward it.
+ */
+ if (code != TO_CPU_CODE_IGMP_MLD_TRAP)
+ skb->offload_fwd_mark = 1;
+
+ break;
+
+ case FRAME_TYPE_FORWARD:
+ skb->offload_fwd_mark = 1;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ /*
+ * Determine source device and port.
+ */
+ source_device = edsa_header[0] & 0x1f;
+ source_port = (edsa_header[1] >> 3) & 0x1f;
+
+ skb->dev = dsa_master_find_slave(dev, source_device, source_port);
+ if (!skb->dev)
+ return NULL;
+
+ /*
+ * If the 'tagged' bit is set, convert the DSA tag to a 802.1q
+ * tag and delete the ethertype part. If the 'tagged' bit is
+ * clear, delete the ethertype and the DSA tag parts.
+ */
+ if (edsa_header[0] & 0x20) {
+ u8 new_header[4];
+
+ /*
+ * Insert 802.1q ethertype and copy the VLAN-related
+ * fields, but clear the bit that will hold CFI (since
+ * DSA uses that bit location for another purpose).
+ */
+ new_header[0] = (ETH_P_8021Q >> 8) & 0xff;
+ new_header[1] = ETH_P_8021Q & 0xff;
+ new_header[2] = edsa_header[2] & ~0x10;
+ new_header[3] = edsa_header[3];
+
+ /*
+ * Move CFI bit from its place in the DSA header to
+ * its 802.1q-designated place.
+ */
+ if (edsa_header[1] & 0x01)
+ new_header[2] |= 0x10;
+
+ skb_pull_rcsum(skb, DSA_HLEN);
+
+ /*
+ * Update packet checksum if skb is CHECKSUM_COMPLETE.
+ */
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ __wsum c = skb->csum;
+ c = csum_add(c, csum_partial(new_header + 2, 2, 0));
+ c = csum_sub(c, csum_partial(edsa_header + 2, 2, 0));
+ skb->csum = c;
+ }
+
+ memcpy(edsa_header, new_header, DSA_HLEN);
+
+ memmove(skb->data - ETH_HLEN,
+ skb->data - ETH_HLEN - DSA_HLEN,
+ 2 * ETH_ALEN);
+ } else {
+ /*
+ * Remove DSA tag and update checksum.
+ */
+ skb_pull_rcsum(skb, EDSA_HLEN);
+ memmove(skb->data - ETH_HLEN,
+ skb->data - ETH_HLEN - EDSA_HLEN,
+ 2 * ETH_ALEN);
+ }
+
+ return skb;
+}
+
+static const struct dsa_device_ops edsa_netdev_ops = {
+ .name = "edsa",
+ .proto = DSA_TAG_PROTO_EDSA,
+ .xmit = edsa_xmit,
+ .rcv = edsa_rcv,
+ .overhead = EDSA_HLEN,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_EDSA);
+
+module_dsa_tag_driver(edsa_netdev_ops);
diff --git a/net/dsa/tag_gswip.c b/net/dsa/tag_gswip.c
new file mode 100644
index 000000000..2f5bd5e33
--- /dev/null
+++ b/net/dsa/tag_gswip.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel / Lantiq GSWIP V2.0 PMAC tag support
+ *
+ * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/dsa.h>
+
+#include "dsa_priv.h"
+
+#define GSWIP_TX_HEADER_LEN 4
+
+/* special tag in TX path header */
+/* Byte 0 */
+#define GSWIP_TX_SLPID_SHIFT 0 /* source port ID */
+#define GSWIP_TX_SLPID_CPU 2
+#define GSWIP_TX_SLPID_APP1 3
+#define GSWIP_TX_SLPID_APP2 4
+#define GSWIP_TX_SLPID_APP3 5
+#define GSWIP_TX_SLPID_APP4 6
+#define GSWIP_TX_SLPID_APP5 7
+
+/* Byte 1 */
+#define GSWIP_TX_CRCGEN_DIS BIT(7)
+#define GSWIP_TX_DPID_SHIFT 0 /* destination group ID */
+#define GSWIP_TX_DPID_ELAN 0
+#define GSWIP_TX_DPID_EWAN 1
+#define GSWIP_TX_DPID_CPU 2
+#define GSWIP_TX_DPID_APP1 3
+#define GSWIP_TX_DPID_APP2 4
+#define GSWIP_TX_DPID_APP3 5
+#define GSWIP_TX_DPID_APP4 6
+#define GSWIP_TX_DPID_APP5 7
+
+/* Byte 2 */
+#define GSWIP_TX_PORT_MAP_EN BIT(7)
+#define GSWIP_TX_PORT_MAP_SEL BIT(6)
+#define GSWIP_TX_LRN_DIS BIT(5)
+#define GSWIP_TX_CLASS_EN BIT(4)
+#define GSWIP_TX_CLASS_SHIFT 0
+#define GSWIP_TX_CLASS_MASK GENMASK(3, 0)
+
+/* Byte 3 */
+#define GSWIP_TX_DPID_EN BIT(0)
+#define GSWIP_TX_PORT_MAP_SHIFT 1
+#define GSWIP_TX_PORT_MAP_MASK GENMASK(6, 1)
+
+#define GSWIP_RX_HEADER_LEN 8
+
+/* special tag in RX path header */
+/* Byte 7 */
+#define GSWIP_RX_SPPID_SHIFT 4
+#define GSWIP_RX_SPPID_MASK GENMASK(6, 4)
+
+static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ u8 *gswip_tag;
+
+ skb_push(skb, GSWIP_TX_HEADER_LEN);
+
+ gswip_tag = skb->data;
+ gswip_tag[0] = GSWIP_TX_SLPID_CPU;
+ gswip_tag[1] = GSWIP_TX_DPID_ELAN;
+ gswip_tag[2] = GSWIP_TX_PORT_MAP_EN | GSWIP_TX_PORT_MAP_SEL;
+ gswip_tag[3] = BIT(dp->index + GSWIP_TX_PORT_MAP_SHIFT) & GSWIP_TX_PORT_MAP_MASK;
+ gswip_tag[3] |= GSWIP_TX_DPID_EN;
+
+ return skb;
+}
+
+static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt)
+{
+ int port;
+ u8 *gswip_tag;
+
+ if (unlikely(!pskb_may_pull(skb, GSWIP_RX_HEADER_LEN)))
+ return NULL;
+
+ gswip_tag = skb->data - ETH_HLEN;
+
+ /* Get source port information */
+ port = (gswip_tag[7] & GSWIP_RX_SPPID_MASK) >> GSWIP_RX_SPPID_SHIFT;
+ skb->dev = dsa_master_find_slave(dev, 0, port);
+ if (!skb->dev)
+ return NULL;
+
+ /* remove GSWIP tag */
+ skb_pull_rcsum(skb, GSWIP_RX_HEADER_LEN);
+
+ return skb;
+}
+
+static const struct dsa_device_ops gswip_netdev_ops = {
+ .name = "gswip",
+ .proto = DSA_TAG_PROTO_GSWIP,
+ .xmit = gswip_tag_xmit,
+ .rcv = gswip_tag_rcv,
+ .overhead = GSWIP_RX_HEADER_LEN,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_GSWIP);
+
+module_dsa_tag_driver(gswip_netdev_ops);
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
new file mode 100644
index 000000000..230ddf45d
--- /dev/null
+++ b/net/dsa/tag_ksz.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * net/dsa/tag_ksz.c - Microchip KSZ Switch tag format handling
+ * Copyright (c) 2017 Microchip Technology
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <net/dsa.h>
+#include "dsa_priv.h"
+
+/* Typically only one byte is used for tail tag. */
+#define KSZ_EGRESS_TAG_LEN 1
+#define KSZ_INGRESS_TAG_LEN 1
+
+static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
+ struct net_device *dev,
+ unsigned int port, unsigned int len)
+{
+ skb->dev = dsa_master_find_slave(dev, 0, port);
+ if (!skb->dev)
+ return NULL;
+
+ if (pskb_trim_rcsum(skb, skb->len - len))
+ return NULL;
+
+ skb->offload_fwd_mark = true;
+
+ return skb;
+}
+
+/*
+ * For Ingress (Host -> KSZ8795), 1 byte is added before FCS.
+ * ---------------------------------------------------------------------------
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag(1byte)|FCS(4bytes)
+ * ---------------------------------------------------------------------------
+ * tag : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x10=port5)
+ *
+ * For Egress (KSZ8795 -> Host), 1 byte is added before FCS.
+ * ---------------------------------------------------------------------------
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
+ * ---------------------------------------------------------------------------
+ * tag0 : zero-based value represents port
+ * (eg, 0x00=port1, 0x02=port3, 0x06=port7)
+ */
+
+#define KSZ8795_TAIL_TAG_OVERRIDE BIT(6)
+#define KSZ8795_TAIL_TAG_LOOKUP BIT(7)
+
+static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ u8 *tag;
+ u8 *addr;
+
+ /* Tag encoding */
+ tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
+ addr = skb_mac_header(skb);
+
+ *tag = 1 << dp->index;
+ if (is_link_local_ether_addr(addr))
+ *tag |= KSZ8795_TAIL_TAG_OVERRIDE;
+
+ return skb;
+}
+
+static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+{
+ u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+
+ return ksz_common_rcv(skb, dev, tag[0] & 7, KSZ_EGRESS_TAG_LEN);
+}
+
+static const struct dsa_device_ops ksz8795_netdev_ops = {
+ .name = "ksz8795",
+ .proto = DSA_TAG_PROTO_KSZ8795,
+ .xmit = ksz8795_xmit,
+ .rcv = ksz8795_rcv,
+ .overhead = KSZ_INGRESS_TAG_LEN,
+ .tail_tag = true,
+};
+
+DSA_TAG_DRIVER(ksz8795_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ8795);
+
+/*
+ * For Ingress (Host -> KSZ9477), 2 bytes are added before FCS.
+ * ---------------------------------------------------------------------------
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|tag1(1byte)|FCS(4bytes)
+ * ---------------------------------------------------------------------------
+ * tag0 : Prioritization (not used now)
+ * tag1 : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x10=port5)
+ *
+ * For Egress (KSZ9477 -> Host), 1 byte is added before FCS.
+ * ---------------------------------------------------------------------------
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
+ * ---------------------------------------------------------------------------
+ * tag0 : zero-based value represents port
+ * (eg, 0x00=port1, 0x02=port3, 0x06=port7)
+ */
+
+#define KSZ9477_INGRESS_TAG_LEN 2
+#define KSZ9477_PTP_TAG_LEN 4
+#define KSZ9477_PTP_TAG_INDICATION 0x80
+
+#define KSZ9477_TAIL_TAG_OVERRIDE BIT(9)
+#define KSZ9477_TAIL_TAG_LOOKUP BIT(10)
+
+static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ __be16 *tag;
+ u8 *addr;
+ u16 val;
+
+ /* Tag encoding */
+ tag = skb_put(skb, KSZ9477_INGRESS_TAG_LEN);
+ addr = skb_mac_header(skb);
+
+ val = BIT(dp->index);
+
+ if (is_link_local_ether_addr(addr))
+ val |= KSZ9477_TAIL_TAG_OVERRIDE;
+
+ *tag = cpu_to_be16(val);
+
+ return skb;
+}
+
+static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+{
+ /* Tag decoding */
+ u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+ unsigned int port = tag[0] & 7;
+ unsigned int len = KSZ_EGRESS_TAG_LEN;
+
+ /* Extra 4-bytes PTP timestamp */
+ if (tag[0] & KSZ9477_PTP_TAG_INDICATION)
+ len += KSZ9477_PTP_TAG_LEN;
+
+ return ksz_common_rcv(skb, dev, port, len);
+}
+
+static const struct dsa_device_ops ksz9477_netdev_ops = {
+ .name = "ksz9477",
+ .proto = DSA_TAG_PROTO_KSZ9477,
+ .xmit = ksz9477_xmit,
+ .rcv = ksz9477_rcv,
+ .overhead = KSZ9477_INGRESS_TAG_LEN,
+ .tail_tag = true,
+};
+
+DSA_TAG_DRIVER(ksz9477_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ9477);
+
+#define KSZ9893_TAIL_TAG_OVERRIDE BIT(5)
+#define KSZ9893_TAIL_TAG_LOOKUP BIT(6)
+
+static struct sk_buff *ksz9893_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ u8 *addr;
+ u8 *tag;
+
+ /* Tag encoding */
+ tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
+ addr = skb_mac_header(skb);
+
+ *tag = BIT(dp->index);
+
+ if (is_link_local_ether_addr(addr))
+ *tag |= KSZ9893_TAIL_TAG_OVERRIDE;
+
+ return skb;
+}
+
+static const struct dsa_device_ops ksz9893_netdev_ops = {
+ .name = "ksz9893",
+ .proto = DSA_TAG_PROTO_KSZ9893,
+ .xmit = ksz9893_xmit,
+ .rcv = ksz9477_rcv,
+ .overhead = KSZ_INGRESS_TAG_LEN,
+ .tail_tag = true,
+};
+
+DSA_TAG_DRIVER(ksz9893_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ9893);
+
+static struct dsa_tag_driver *dsa_tag_driver_array[] = {
+ &DSA_TAG_DRIVER_NAME(ksz8795_netdev_ops),
+ &DSA_TAG_DRIVER_NAME(ksz9477_netdev_ops),
+ &DSA_TAG_DRIVER_NAME(ksz9893_netdev_ops),
+};
+
+module_dsa_tag_drivers(dsa_tag_driver_array);
+
+MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
new file mode 100644
index 000000000..aa1318dcc
--- /dev/null
+++ b/net/dsa/tag_lan9303.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Pengutronix, Juergen Borleis <jbe@pengutronix.de>
+ */
+#include <linux/dsa/lan9303.h>
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "dsa_priv.h"
+
+/* To define the outgoing port and to discover the incoming port a regular
+ * VLAN tag is used by the LAN9303. But its VID meaning is 'special':
+ *
+ * Dest MAC Src MAC TAG Type
+ * ...| 1 2 3 4 5 6 | 1 2 3 4 5 6 | 1 2 3 4 | 1 2 |...
+ * |<------->|
+ * TAG:
+ * |<------------->|
+ * | 1 2 | 3 4 |
+ * TPID VID
+ * 0x8100
+ *
+ * VID bit 3 indicates a request for an ALR lookup.
+ *
+ * If VID bit 3 is zero, then bits 0 and 1 specify the destination port
+ * (0, 1, 2) or broadcast (3) or the source port (1, 2).
+ *
+ * VID bit 4 is used to specify if the STP port state should be overridden.
+ * Required when no forwarding between the external ports should happen.
+ */
+
+#define LAN9303_TAG_LEN 4
+# define LAN9303_TAG_TX_USE_ALR BIT(3)
+# define LAN9303_TAG_TX_STP_OVERRIDE BIT(4)
+# define LAN9303_TAG_RX_IGMP BIT(3)
+# define LAN9303_TAG_RX_STP BIT(4)
+# define LAN9303_TAG_RX_TRAPPED_TO_CPU (LAN9303_TAG_RX_IGMP | \
+ LAN9303_TAG_RX_STP)
+
+/* Decide whether to transmit using ALR lookup, or transmit directly to
+ * port using tag. ALR learning is performed only when using ALR lookup.
+ * If the two external ports are bridged and the frame is unicast,
+ * then use ALR lookup to allow ALR learning on CPU port.
+ * Otherwise transmit directly to port with STP state override.
+ * See also: lan9303_separate_ports() and lan9303.pdf 6.4.10.1
+ */
+static int lan9303_xmit_use_arl(struct dsa_port *dp, u8 *dest_addr)
+{
+ struct lan9303 *chip = dp->ds->priv;
+
+ return chip->is_bridged && !is_multicast_ether_addr(dest_addr);
+}
+
+static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ __be16 *lan9303_tag;
+ u16 tag;
+
+ /* provide 'LAN9303_TAG_LEN' bytes additional space */
+ skb_push(skb, LAN9303_TAG_LEN);
+
+ /* make room between MACs and Ether-Type */
+ memmove(skb->data, skb->data + LAN9303_TAG_LEN, 2 * ETH_ALEN);
+
+ lan9303_tag = (__be16 *)(skb->data + 2 * ETH_ALEN);
+ tag = lan9303_xmit_use_arl(dp, skb->data) ?
+ LAN9303_TAG_TX_USE_ALR :
+ dp->index | LAN9303_TAG_TX_STP_OVERRIDE;
+ lan9303_tag[0] = htons(ETH_P_8021Q);
+ lan9303_tag[1] = htons(tag);
+
+ return skb;
+}
+
+static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+{
+ __be16 *lan9303_tag;
+ u16 lan9303_tag1;
+ unsigned int source_port;
+
+ if (unlikely(!pskb_may_pull(skb, LAN9303_TAG_LEN))) {
+ dev_warn_ratelimited(&dev->dev,
+ "Dropping packet, cannot pull\n");
+ return NULL;
+ }
+
+ /* '->data' points into the middle of our special VLAN tag information:
+ *
+ * ~ MAC src | 0x81 | 0x00 | 0xyy | 0xzz | ether type
+ * ^
+ * ->data
+ */
+ lan9303_tag = (__be16 *)(skb->data - 2);
+
+ if (lan9303_tag[0] != htons(ETH_P_8021Q)) {
+ dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n");
+ return NULL;
+ }
+
+ lan9303_tag1 = ntohs(lan9303_tag[1]);
+ source_port = lan9303_tag1 & 0x3;
+
+ skb->dev = dsa_master_find_slave(dev, 0, source_port);
+ if (!skb->dev) {
+ dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n");
+ return NULL;
+ }
+
+ /* remove the special VLAN tag between the MAC addresses
+ * and the current ethertype field.
+ */
+ skb_pull_rcsum(skb, 2 + 2);
+ memmove(skb->data - ETH_HLEN, skb->data - (ETH_HLEN + LAN9303_TAG_LEN),
+ 2 * ETH_ALEN);
+ skb->offload_fwd_mark = !(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU);
+
+ return skb;
+}
+
+static const struct dsa_device_ops lan9303_netdev_ops = {
+ .name = "lan9303",
+ .proto = DSA_TAG_PROTO_LAN9303,
+ .xmit = lan9303_xmit,
+ .rcv = lan9303_rcv,
+ .overhead = LAN9303_TAG_LEN,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_LAN9303);
+
+module_dsa_tag_driver(lan9303_netdev_ops);
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
new file mode 100644
index 000000000..597484876
--- /dev/null
+++ b/net/dsa/tag_mtk.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mediatek DSA Tag support
+ * Copyright (C) 2017 Landen Chao <landen.chao@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+
+#include "dsa_priv.h"
+
+#define MTK_HDR_LEN 4
+#define MTK_HDR_XMIT_UNTAGGED 0
+#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
+#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2
+#define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0)
+#define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0)
+#define MTK_HDR_XMIT_SA_DIS BIT(6)
+
+static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ u8 xmit_tpid;
+ u8 *mtk_tag;
+ unsigned char *dest = eth_hdr(skb)->h_dest;
+ bool is_multicast_skb = is_multicast_ether_addr(dest) &&
+ !is_broadcast_ether_addr(dest);
+
+ /* Build the special tag after the MAC Source Address. If VLAN header
+ * is present, it's required that VLAN header and special tag is
+ * being combined. Only in this way we can allow the switch can parse
+ * the both special and VLAN tag at the same time and then look up VLAN
+ * table with VID.
+ */
+ switch (skb->protocol) {
+ case htons(ETH_P_8021Q):
+ xmit_tpid = MTK_HDR_XMIT_TAGGED_TPID_8100;
+ break;
+ case htons(ETH_P_8021AD):
+ xmit_tpid = MTK_HDR_XMIT_TAGGED_TPID_88A8;
+ break;
+ default:
+ xmit_tpid = MTK_HDR_XMIT_UNTAGGED;
+ skb_push(skb, MTK_HDR_LEN);
+ memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN);
+ }
+
+ mtk_tag = skb->data + 2 * ETH_ALEN;
+
+ /* Mark tag attribute on special tag insertion to notify hardware
+ * whether that's a combined special tag with 802.1Q header.
+ */
+ mtk_tag[0] = xmit_tpid;
+ mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
+
+ /* Disable SA learning for multicast frames */
+ if (unlikely(is_multicast_skb))
+ mtk_tag[1] |= MTK_HDR_XMIT_SA_DIS;
+
+ /* Tag control information is kept for 802.1Q */
+ if (xmit_tpid == MTK_HDR_XMIT_UNTAGGED) {
+ mtk_tag[2] = 0;
+ mtk_tag[3] = 0;
+ }
+
+ return skb;
+}
+
+static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+{
+ u16 hdr;
+ int port;
+ __be16 *phdr;
+ unsigned char *dest = eth_hdr(skb)->h_dest;
+ bool is_multicast_skb = is_multicast_ether_addr(dest) &&
+ !is_broadcast_ether_addr(dest);
+
+ if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
+ return NULL;
+
+ /* The MTK header is added by the switch between src addr
+ * and ethertype at this point, skb->data points to 2 bytes
+ * after src addr so header should be 2 bytes right before.
+ */
+ phdr = (__be16 *)(skb->data - 2);
+ hdr = ntohs(*phdr);
+
+ /* Remove MTK tag and recalculate checksum. */
+ skb_pull_rcsum(skb, MTK_HDR_LEN);
+
+ memmove(skb->data - ETH_HLEN,
+ skb->data - ETH_HLEN - MTK_HDR_LEN,
+ 2 * ETH_ALEN);
+
+ /* Get source port information */
+ port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK);
+
+ skb->dev = dsa_master_find_slave(dev, 0, port);
+ if (!skb->dev)
+ return NULL;
+
+ /* Only unicast or broadcast frames are offloaded */
+ if (likely(!is_multicast_skb))
+ skb->offload_fwd_mark = 1;
+
+ return skb;
+}
+
+static const struct dsa_device_ops mtk_netdev_ops = {
+ .name = "mtk",
+ .proto = DSA_TAG_PROTO_MTK,
+ .xmit = mtk_tag_xmit,
+ .rcv = mtk_tag_rcv,
+ .overhead = MTK_HDR_LEN,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_MTK);
+
+module_dsa_tag_driver(mtk_netdev_ops);
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
new file mode 100644
index 000000000..16a1afd5b
--- /dev/null
+++ b/net/dsa/tag_ocelot.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 NXP Semiconductors
+ */
+#include <soc/mscc/ocelot.h>
+#include <linux/packing.h>
+#include "dsa_priv.h"
+
+/* The CPU injection header and the CPU extraction header can have 3 types of
+ * prefixes: long, short and no prefix. The format of the header itself is the
+ * same in all 3 cases.
+ *
+ * Extraction with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | ff:ff:ff:ff:ff:ff | ff:ff:ff:ff:ff:ff | 8880 | 000a | extraction | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Extraction with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | extraction | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Extraction with no prefix:
+ *
+ * +------------+-------+
+ * | extraction | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ *
+ * Injection with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | any dmac | any smac | 8880 | 000a | injection | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Injection with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | injection | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Injection with no prefix:
+ *
+ * +------------+-------+
+ * | injection | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ * The injection header looks like this (network byte order, bit 127
+ * is part of lowest address byte in memory, bit 0 is part of highest
+ * address byte):
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 |BYPASS| MASQ | MASQ_PORT |REW_OP|REW_OP|
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | RSV | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | RSV |TFRM_TIMER|
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | TFRM_TIMER | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 | RSV | DP | POP_CNT | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ *
+ * And the extraction header looks like this:
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 | RSV | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL | LLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | LLEN | WLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | WLEN | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | ACL_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | ACL_ID | RSV | SFLOW_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 |ACL_HIT| DP | LRN_FLAGS | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ */
+
+static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
+ struct dsa_switch *ds = dp->ds;
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port;
+ u8 *prefix, *injection;
+ u64 qos_class, rew_op;
+
+ ocelot_port = ocelot->ports[dp->index];
+
+ injection = skb_push(skb, OCELOT_TAG_LEN);
+
+ prefix = skb_push(skb, OCELOT_SHORT_PREFIX_LEN);
+
+ memcpy(prefix, ocelot_port->xmit_template, OCELOT_TOTAL_TAG_LEN);
+
+ /* Fix up the fields which are not statically determined
+ * in the template
+ */
+ qos_class = skb->priority;
+ packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0);
+
+ /* TX timestamping was requested */
+ if (clone) {
+ rew_op = ocelot_port->ptp_cmd;
+ /* Retrieve timestamp ID populated inside skb->cb[0] of the
+ * clone by ocelot_port_add_txtstamp_skb
+ */
+ if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ rew_op |= clone->cb[0] << 3;
+
+ packing(injection, &rew_op, 125, 117, OCELOT_TAG_LEN, PACK, 0);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct packet_type *pt)
+{
+ struct dsa_port *cpu_dp = netdev->dsa_ptr;
+ struct dsa_switch *ds = cpu_dp->ds;
+ struct ocelot *ocelot = ds->priv;
+ u64 src_port, qos_class;
+ u64 vlan_tci, tag_type;
+ u8 *start = skb->data;
+ u8 *extraction;
+ u16 vlan_tpid;
+
+ /* Revert skb->data by the amount consumed by the DSA master,
+ * so it points to the beginning of the frame.
+ */
+ skb_push(skb, ETH_HLEN);
+ /* We don't care about the short prefix, it is just for easy entrance
+ * into the DSA master's RX filter. Discard it now by moving it into
+ * the headroom.
+ */
+ skb_pull(skb, OCELOT_SHORT_PREFIX_LEN);
+ /* And skb->data now points to the extraction frame header.
+ * Keep a pointer to it.
+ */
+ extraction = skb->data;
+ /* Now the EFH is part of the headroom as well */
+ skb_pull(skb, OCELOT_TAG_LEN);
+ /* Reset the pointer to the real MAC header */
+ skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
+ /* And move skb->data to the correct location again */
+ skb_pull(skb, ETH_HLEN);
+
+ /* Remove from inet csum the extraction header */
+ skb_postpull_rcsum(skb, start, OCELOT_TOTAL_TAG_LEN);
+
+ packing(extraction, &src_port, 46, 43, OCELOT_TAG_LEN, UNPACK, 0);
+ packing(extraction, &qos_class, 19, 17, OCELOT_TAG_LEN, UNPACK, 0);
+ packing(extraction, &tag_type, 16, 16, OCELOT_TAG_LEN, UNPACK, 0);
+ packing(extraction, &vlan_tci, 15, 0, OCELOT_TAG_LEN, UNPACK, 0);
+
+ skb->dev = dsa_master_find_slave(netdev, 0, src_port);
+ if (!skb->dev)
+ /* The switch will reflect back some frames sent through
+ * sockets opened on the bare DSA master. These will come back
+ * with src_port equal to the index of the CPU port, for which
+ * there is no slave registered. So don't print any error
+ * message here (ignore and drop those frames).
+ */
+ return NULL;
+
+ skb->offload_fwd_mark = 1;
+ skb->priority = qos_class;
+
+ /* Ocelot switches copy frames unmodified to the CPU. However, it is
+ * possible for the user to request a VLAN modification through
+ * VCAP_IS1_ACT_VID_REPLACE_ENA. In this case, what will happen is that
+ * the VLAN ID field from the Extraction Header gets updated, but the
+ * 802.1Q header does not (the classified VLAN only becomes visible on
+ * egress through the "port tag" of front-panel ports).
+ * So, for traffic extracted by the CPU, we want to pick up the
+ * classified VLAN and manually replace the existing 802.1Q header from
+ * the packet with it, so that the operating system is always up to
+ * date with the result of tc-vlan actions.
+ * NOTE: In VLAN-unaware mode, we don't want to do that, we want the
+ * frame to remain unmodified, because the classified VLAN is always
+ * equal to the pvid of the ingress port and should not be used for
+ * processing.
+ */
+ vlan_tpid = tag_type ? ETH_P_8021AD : ETH_P_8021Q;
+
+ if (ocelot->ports[src_port]->vlan_aware &&
+ eth_hdr(skb)->h_proto == htons(vlan_tpid)) {
+ u16 dummy_vlan_tci;
+
+ skb_push_rcsum(skb, ETH_HLEN);
+ __skb_vlan_pop(skb, &dummy_vlan_tci);
+ skb_pull_rcsum(skb, ETH_HLEN);
+ __vlan_hwaccel_put_tag(skb, htons(vlan_tpid), vlan_tci);
+ }
+
+ return skb;
+}
+
+static const struct dsa_device_ops ocelot_netdev_ops = {
+ .name = "ocelot",
+ .proto = DSA_TAG_PROTO_OCELOT,
+ .xmit = ocelot_xmit,
+ .rcv = ocelot_rcv,
+ .overhead = OCELOT_TOTAL_TAG_LEN,
+ .promisc_on_master = true,
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_OCELOT);
+
+module_dsa_tag_driver(ocelot_netdev_ops);
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
new file mode 100644
index 000000000..88181b52f
--- /dev/null
+++ b/net/dsa/tag_qca.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/etherdevice.h>
+
+#include "dsa_priv.h"
+
+#define QCA_HDR_LEN 2
+#define QCA_HDR_VERSION 0x2
+
+#define QCA_HDR_RECV_VERSION_MASK GENMASK(15, 14)
+#define QCA_HDR_RECV_VERSION_S 14
+#define QCA_HDR_RECV_PRIORITY_MASK GENMASK(13, 11)
+#define QCA_HDR_RECV_PRIORITY_S 11
+#define QCA_HDR_RECV_TYPE_MASK GENMASK(10, 6)
+#define QCA_HDR_RECV_TYPE_S 6
+#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3)
+#define QCA_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0)
+
+#define QCA_HDR_XMIT_VERSION_MASK GENMASK(15, 14)
+#define QCA_HDR_XMIT_VERSION_S 14
+#define QCA_HDR_XMIT_PRIORITY_MASK GENMASK(13, 11)
+#define QCA_HDR_XMIT_PRIORITY_S 11
+#define QCA_HDR_XMIT_CONTROL_MASK GENMASK(10, 8)
+#define QCA_HDR_XMIT_CONTROL_S 8
+#define QCA_HDR_XMIT_FROM_CPU BIT(7)
+#define QCA_HDR_XMIT_DP_BIT_MASK GENMASK(6, 0)
+
+static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ __be16 *phdr;
+ u16 hdr;
+
+ skb_push(skb, QCA_HDR_LEN);
+
+ memmove(skb->data, skb->data + QCA_HDR_LEN, 2 * ETH_ALEN);
+ phdr = (__be16 *)(skb->data + 2 * ETH_ALEN);
+
+ /* Set the version field, and set destination port information */
+ hdr = QCA_HDR_VERSION << QCA_HDR_XMIT_VERSION_S |
+ QCA_HDR_XMIT_FROM_CPU | BIT(dp->index);
+
+ *phdr = htons(hdr);
+
+ return skb;
+}
+
+static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+{
+ u8 ver;
+ u16 hdr;
+ int port;
+ __be16 *phdr;
+
+ if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN)))
+ return NULL;
+
+ /* The QCA header is added by the switch between src addr and Ethertype
+ * At this point, skb->data points to ethertype so header should be
+ * right before
+ */
+ phdr = (__be16 *)(skb->data - 2);
+ hdr = ntohs(*phdr);
+
+ /* Make sure the version is correct */
+ ver = (hdr & QCA_HDR_RECV_VERSION_MASK) >> QCA_HDR_RECV_VERSION_S;
+ if (unlikely(ver != QCA_HDR_VERSION))
+ return NULL;
+
+ /* Remove QCA tag and recalculate checksum */
+ skb_pull_rcsum(skb, QCA_HDR_LEN);
+ memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - QCA_HDR_LEN,
+ ETH_HLEN - QCA_HDR_LEN);
+
+ /* Get source port information */
+ port = (hdr & QCA_HDR_RECV_SOURCE_PORT_MASK);
+
+ skb->dev = dsa_master_find_slave(dev, 0, port);
+ if (!skb->dev)
+ return NULL;
+
+ return skb;
+}
+
+static const struct dsa_device_ops qca_netdev_ops = {
+ .name = "qca",
+ .proto = DSA_TAG_PROTO_QCA,
+ .xmit = qca_tag_xmit,
+ .rcv = qca_tag_rcv,
+ .overhead = QCA_HDR_LEN,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_QCA);
+
+module_dsa_tag_driver(qca_netdev_ops);
diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
new file mode 100644
index 000000000..24375ebd6
--- /dev/null
+++ b/net/dsa/tag_rtl4_a.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Handler for Realtek 4 byte DSA switch tags
+ * Currently only supports protocol "A" found in RTL8366RB
+ * Copyright (c) 2020 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This "proprietary tag" header looks like so:
+ *
+ * -------------------------------------------------
+ * | MAC DA | MAC SA | 0x8899 | 2 bytes tag | Type |
+ * -------------------------------------------------
+ *
+ * The 2 bytes tag form a 16 bit big endian word. The exact
+ * meaning has been guessed from packet dumps from ingress
+ * frames.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/bits.h>
+
+#include "dsa_priv.h"
+
+#define RTL4_A_HDR_LEN 4
+#define RTL4_A_ETHERTYPE 0x8899
+#define RTL4_A_PROTOCOL_SHIFT 12
+/*
+ * 0x1 = Realtek Remote Control protocol (RRCP)
+ * 0x2/0x3 seems to be used for loopback testing
+ * 0x9 = RTL8306 DSA protocol
+ * 0xa = RTL8366RB DSA protocol
+ */
+#define RTL4_A_PROTOCOL_RTL8366RB 0xa
+
+static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ __be16 *p;
+ u8 *tag;
+ u16 out;
+
+ /* Pad out to at least 60 bytes */
+ if (unlikely(__skb_put_padto(skb, ETH_ZLEN, false)))
+ return NULL;
+
+ netdev_dbg(dev, "add realtek tag to package to port %d\n",
+ dp->index);
+ skb_push(skb, RTL4_A_HDR_LEN);
+
+ memmove(skb->data, skb->data + RTL4_A_HDR_LEN, 2 * ETH_ALEN);
+ tag = skb->data + 2 * ETH_ALEN;
+
+ /* Set Ethertype */
+ p = (__be16 *)tag;
+ *p = htons(RTL4_A_ETHERTYPE);
+
+ out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT) | (2 << 8);
+ /* The lower bits indicate the port number */
+ out |= BIT(dp->index);
+
+ p = (__be16 *)(tag + 2);
+ *p = htons(out);
+
+ return skb;
+}
+
+static struct sk_buff *rtl4a_tag_rcv(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt)
+{
+ u16 protport;
+ __be16 *p;
+ u16 etype;
+ u8 *tag;
+ u8 prot;
+ u8 port;
+
+ if (unlikely(!pskb_may_pull(skb, RTL4_A_HDR_LEN)))
+ return NULL;
+
+ /* The RTL4 header has its own custom Ethertype 0x8899 and that
+ * starts right at the beginning of the packet, after the src
+ * ethernet addr. Apparantly skb->data always points 2 bytes in,
+ * behind the Ethertype.
+ */
+ tag = skb->data - 2;
+ p = (__be16 *)tag;
+ etype = ntohs(*p);
+ if (etype != RTL4_A_ETHERTYPE) {
+ /* Not custom, just pass through */
+ netdev_dbg(dev, "non-realtek ethertype 0x%04x\n", etype);
+ return skb;
+ }
+ p = (__be16 *)(tag + 2);
+ protport = ntohs(*p);
+ /* The 4 upper bits are the protocol */
+ prot = (protport >> RTL4_A_PROTOCOL_SHIFT) & 0x0f;
+ if (prot != RTL4_A_PROTOCOL_RTL8366RB) {
+ netdev_err(dev, "unknown realtek protocol 0x%01x\n", prot);
+ return NULL;
+ }
+ port = protport & 0xff;
+
+ skb->dev = dsa_master_find_slave(dev, 0, port);
+ if (!skb->dev) {
+ netdev_dbg(dev, "could not find slave for port %d\n", port);
+ return NULL;
+ }
+
+ /* Remove RTL4 tag and recalculate checksum */
+ skb_pull_rcsum(skb, RTL4_A_HDR_LEN);
+
+ /* Move ethernet DA and SA in front of the data */
+ memmove(skb->data - ETH_HLEN,
+ skb->data - ETH_HLEN - RTL4_A_HDR_LEN,
+ 2 * ETH_ALEN);
+
+ skb->offload_fwd_mark = 1;
+
+ return skb;
+}
+
+static const struct dsa_device_ops rtl4a_netdev_ops = {
+ .name = "rtl4a",
+ .proto = DSA_TAG_PROTO_RTL4_A,
+ .xmit = rtl4a_tag_xmit,
+ .rcv = rtl4a_tag_rcv,
+ .overhead = RTL4_A_HDR_LEN,
+};
+module_dsa_tag_driver(rtl4a_netdev_ops);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL4_A);
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
new file mode 100644
index 000000000..07876160e
--- /dev/null
+++ b/net/dsa/tag_sja1105.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/if_vlan.h>
+#include <linux/dsa/sja1105.h>
+#include <linux/dsa/8021q.h>
+#include <linux/packing.h>
+#include "dsa_priv.h"
+
+/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
+static inline bool sja1105_is_link_local(const struct sk_buff *skb)
+{
+ const struct ethhdr *hdr = eth_hdr(skb);
+ u64 dmac = ether_addr_to_u64(hdr->h_dest);
+
+ if (ntohs(hdr->h_proto) == ETH_P_SJA1105_META)
+ return false;
+ if ((dmac & SJA1105_LINKLOCAL_FILTER_A_MASK) ==
+ SJA1105_LINKLOCAL_FILTER_A)
+ return true;
+ if ((dmac & SJA1105_LINKLOCAL_FILTER_B_MASK) ==
+ SJA1105_LINKLOCAL_FILTER_B)
+ return true;
+ return false;
+}
+
+struct sja1105_meta {
+ u64 tstamp;
+ u64 dmac_byte_4;
+ u64 dmac_byte_3;
+ u64 source_port;
+ u64 switch_id;
+};
+
+static void sja1105_meta_unpack(const struct sk_buff *skb,
+ struct sja1105_meta *meta)
+{
+ u8 *buf = skb_mac_header(skb) + ETH_HLEN;
+
+ /* UM10944.pdf section 4.2.17 AVB Parameters:
+ * Structure of the meta-data follow-up frame.
+ * It is in network byte order, so there are no quirks
+ * while unpacking the meta frame.
+ *
+ * Also SJA1105 E/T only populates bits 23:0 of the timestamp
+ * whereas P/Q/R/S does 32 bits. Since the structure is the
+ * same and the E/T puts zeroes in the high-order byte, use
+ * a unified unpacking command for both device series.
+ */
+ packing(buf, &meta->tstamp, 31, 0, 4, UNPACK, 0);
+ packing(buf + 4, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
+ packing(buf + 5, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
+ packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
+ packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0);
+}
+
+static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
+{
+ const struct ethhdr *hdr = eth_hdr(skb);
+ u64 smac = ether_addr_to_u64(hdr->h_source);
+ u64 dmac = ether_addr_to_u64(hdr->h_dest);
+
+ if (smac != SJA1105_META_SMAC)
+ return false;
+ if (dmac != SJA1105_META_DMAC)
+ return false;
+ if (ntohs(hdr->h_proto) != ETH_P_SJA1105_META)
+ return false;
+ return true;
+}
+
+static bool sja1105_can_use_vlan_as_tags(const struct sk_buff *skb)
+{
+ struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
+ u16 vlan_tci;
+
+ if (hdr->h_vlan_proto == htons(ETH_P_SJA1105))
+ return true;
+
+ if (hdr->h_vlan_proto != htons(ETH_P_8021Q) &&
+ !skb_vlan_tag_present(skb))
+ return false;
+
+ if (skb_vlan_tag_present(skb))
+ vlan_tci = skb_vlan_tag_get(skb);
+ else
+ vlan_tci = ntohs(hdr->h_vlan_TCI);
+
+ return vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK);
+}
+
+/* This is the first time the tagger sees the frame on RX.
+ * Figure out if we can decode it.
+ */
+static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev)
+{
+ if (sja1105_can_use_vlan_as_tags(skb))
+ return true;
+ if (sja1105_is_link_local(skb))
+ return true;
+ if (sja1105_is_meta_frame(skb))
+ return true;
+ return false;
+}
+
+/* Calls sja1105_port_deferred_xmit in sja1105_main.c */
+static struct sk_buff *sja1105_defer_xmit(struct sja1105_port *sp,
+ struct sk_buff *skb)
+{
+ /* Increase refcount so the kfree_skb in dsa_slave_xmit
+ * won't really free the packet.
+ */
+ skb_queue_tail(&sp->xmit_queue, skb_get(skb));
+ kthread_queue_work(sp->xmit_worker, &sp->xmit_work);
+
+ return NULL;
+}
+
+static u16 sja1105_xmit_tpid(struct sja1105_port *sp)
+{
+ return sp->xmit_tpid;
+}
+
+static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(netdev);
+ u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+ u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
+
+ /* Transmitting management traffic does not rely upon switch tagging,
+ * but instead SPI-installed management routes. Part 2 of this
+ * is the .port_deferred_xmit driver callback.
+ */
+ if (unlikely(sja1105_is_link_local(skb)))
+ return sja1105_defer_xmit(dp->priv, skb);
+
+ return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp->priv),
+ ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
+}
+
+static void sja1105_transfer_meta(struct sk_buff *skb,
+ const struct sja1105_meta *meta)
+{
+ struct ethhdr *hdr = eth_hdr(skb);
+
+ hdr->h_dest[3] = meta->dmac_byte_3;
+ hdr->h_dest[4] = meta->dmac_byte_4;
+ SJA1105_SKB_CB(skb)->meta_tstamp = meta->tstamp;
+}
+
+/* This is a simple state machine which follows the hardware mechanism of
+ * generating RX timestamps:
+ *
+ * After each timestampable skb (all traffic for which send_meta1 and
+ * send_meta0 is true, aka all MAC-filtered link-local traffic) a meta frame
+ * containing a partial timestamp is immediately generated by the switch and
+ * sent as a follow-up to the link-local frame on the CPU port.
+ *
+ * The meta frames have no unique identifier (such as sequence number) by which
+ * one may pair them to the correct timestampable frame.
+ * Instead, the switch has internal logic that ensures no frames are sent on
+ * the CPU port between a link-local timestampable frame and its corresponding
+ * meta follow-up. It also ensures strict ordering between ports (lower ports
+ * have higher priority towards the CPU port). For this reason, a per-port
+ * data structure is not needed/desirable.
+ *
+ * This function pairs the link-local frame with its partial timestamp from the
+ * meta follow-up frame. The full timestamp will be reconstructed later in a
+ * work queue.
+ */
+static struct sk_buff
+*sja1105_rcv_meta_state_machine(struct sk_buff *skb,
+ struct sja1105_meta *meta,
+ bool is_link_local,
+ bool is_meta)
+{
+ struct sja1105_port *sp;
+ struct dsa_port *dp;
+
+ dp = dsa_slave_to_port(skb->dev);
+ sp = dp->priv;
+
+ /* Step 1: A timestampable frame was received.
+ * Buffer it until we get its meta frame.
+ */
+ if (is_link_local) {
+ if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
+ /* Do normal processing. */
+ return skb;
+
+ spin_lock(&sp->data->meta_lock);
+ /* Was this a link-local frame instead of the meta
+ * that we were expecting?
+ */
+ if (sp->data->stampable_skb) {
+ dev_err_ratelimited(dp->ds->dev,
+ "Expected meta frame, is %12llx "
+ "in the DSA master multicast filter?\n",
+ SJA1105_META_DMAC);
+ kfree_skb(sp->data->stampable_skb);
+ }
+
+ /* Hold a reference to avoid dsa_switch_rcv
+ * from freeing the skb.
+ */
+ sp->data->stampable_skb = skb_get(skb);
+ spin_unlock(&sp->data->meta_lock);
+
+ /* Tell DSA we got nothing */
+ return NULL;
+
+ /* Step 2: The meta frame arrived.
+ * Time to take the stampable skb out of the closet, annotate it
+ * with the partial timestamp, and pretend that we received it
+ * just now (basically masquerade the buffered frame as the meta
+ * frame, which serves no further purpose).
+ */
+ } else if (is_meta) {
+ struct sk_buff *stampable_skb;
+
+ /* Drop the meta frame if we're not in the right state
+ * to process it.
+ */
+ if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
+ return NULL;
+
+ spin_lock(&sp->data->meta_lock);
+
+ stampable_skb = sp->data->stampable_skb;
+ sp->data->stampable_skb = NULL;
+
+ /* Was this a meta frame instead of the link-local
+ * that we were expecting?
+ */
+ if (!stampable_skb) {
+ dev_err_ratelimited(dp->ds->dev,
+ "Unexpected meta frame\n");
+ spin_unlock(&sp->data->meta_lock);
+ return NULL;
+ }
+
+ if (stampable_skb->dev != skb->dev) {
+ dev_err_ratelimited(dp->ds->dev,
+ "Meta frame on wrong port\n");
+ spin_unlock(&sp->data->meta_lock);
+ return NULL;
+ }
+
+ /* Free the meta frame and give DSA the buffered stampable_skb
+ * for further processing up the network stack.
+ */
+ kfree_skb(skb);
+ skb = stampable_skb;
+ sja1105_transfer_meta(skb, meta);
+
+ spin_unlock(&sp->data->meta_lock);
+ }
+
+ return skb;
+}
+
+static void sja1105_decode_subvlan(struct sk_buff *skb, u16 subvlan)
+{
+ struct dsa_port *dp = dsa_slave_to_port(skb->dev);
+ struct sja1105_port *sp = dp->priv;
+ u16 vid = sp->subvlan_map[subvlan];
+ u16 vlan_tci;
+
+ if (vid == VLAN_N_VID)
+ return;
+
+ vlan_tci = (skb->priority << VLAN_PRIO_SHIFT) | vid;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+}
+
+static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct packet_type *pt)
+{
+ struct sja1105_meta meta = {0};
+ int source_port, switch_id;
+ struct ethhdr *hdr;
+ u16 tpid, vid, tci;
+ bool is_link_local;
+ u16 subvlan = 0;
+ bool is_tagged;
+ bool is_meta;
+
+ hdr = eth_hdr(skb);
+ tpid = ntohs(hdr->h_proto);
+ is_tagged = (tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
+ skb_vlan_tag_present(skb));
+ is_link_local = sja1105_is_link_local(skb);
+ is_meta = sja1105_is_meta_frame(skb);
+
+ skb->offload_fwd_mark = 1;
+
+ if (is_tagged) {
+ /* Normal traffic path. */
+ skb_push_rcsum(skb, ETH_HLEN);
+ if (skb_vlan_tag_present(skb)) {
+ tci = skb_vlan_tag_get(skb);
+ __vlan_hwaccel_clear_tag(skb);
+ } else {
+ __skb_vlan_pop(skb, &tci);
+ }
+ skb_pull_rcsum(skb, ETH_HLEN);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ vid = tci & VLAN_VID_MASK;
+ source_port = dsa_8021q_rx_source_port(vid);
+ switch_id = dsa_8021q_rx_switch_id(vid);
+ skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ subvlan = dsa_8021q_rx_subvlan(vid);
+ } else if (is_link_local) {
+ /* Management traffic path. Switch embeds the switch ID and
+ * port ID into bytes of the destination MAC, courtesy of
+ * the incl_srcpt options.
+ */
+ source_port = hdr->h_dest[3];
+ switch_id = hdr->h_dest[4];
+ /* Clear the DMAC bytes that were mangled by the switch */
+ hdr->h_dest[3] = 0;
+ hdr->h_dest[4] = 0;
+ } else if (is_meta) {
+ sja1105_meta_unpack(skb, &meta);
+ source_port = meta.source_port;
+ switch_id = meta.switch_id;
+ } else {
+ return NULL;
+ }
+
+ skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
+ if (!skb->dev) {
+ netdev_warn(netdev, "Couldn't decode source port\n");
+ return NULL;
+ }
+
+ if (subvlan)
+ sja1105_decode_subvlan(skb, subvlan);
+
+ return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
+ is_meta);
+}
+
+static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
+ int *offset)
+{
+ /* No tag added for management frames, all ok */
+ if (unlikely(sja1105_is_link_local(skb)))
+ return;
+
+ dsa_tag_generic_flow_dissect(skb, proto, offset);
+}
+
+static const struct dsa_device_ops sja1105_netdev_ops = {
+ .name = "sja1105",
+ .proto = DSA_TAG_PROTO_SJA1105,
+ .xmit = sja1105_xmit,
+ .rcv = sja1105_rcv,
+ .filter = sja1105_filter,
+ .overhead = VLAN_HLEN,
+ .flow_dissect = sja1105_flow_dissect,
+ .promisc_on_master = true,
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1105);
+
+module_dsa_tag_driver(sja1105_netdev_ops);
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
new file mode 100644
index 000000000..5b97ede56
--- /dev/null
+++ b/net/dsa/tag_trailer.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * net/dsa/tag_trailer.c - Trailer tag format handling
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "dsa_priv.h"
+
+static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ u8 *trailer;
+
+ trailer = skb_put(skb, 4);
+ trailer[0] = 0x80;
+ trailer[1] = 1 << dp->index;
+ trailer[2] = 0x10;
+ trailer[3] = 0x00;
+
+ return skb;
+}
+
+static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+{
+ u8 *trailer;
+ int source_port;
+
+ if (skb_linearize(skb))
+ return NULL;
+
+ trailer = skb_tail_pointer(skb) - 4;
+ if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 ||
+ (trailer[2] & 0xef) != 0x00 || trailer[3] != 0x00)
+ return NULL;
+
+ source_port = trailer[1] & 7;
+
+ skb->dev = dsa_master_find_slave(dev, 0, source_port);
+ if (!skb->dev)
+ return NULL;
+
+ if (pskb_trim_rcsum(skb, skb->len - 4))
+ return NULL;
+
+ return skb;
+}
+
+static const struct dsa_device_ops trailer_netdev_ops = {
+ .name = "trailer",
+ .proto = DSA_TAG_PROTO_TRAILER,
+ .xmit = trailer_xmit,
+ .rcv = trailer_rcv,
+ .overhead = 4,
+ .tail_tag = true,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_TRAILER);
+
+module_dsa_tag_driver(trailer_netdev_ops);