summaryrefslogtreecommitdiffstats
path: root/net/802
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /net/802
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--net/802/Kconfig11
-rw-r--r--net/802/Makefile15
-rw-r--r--net/802/fc.c106
-rw-r--r--net/802/fddi.c178
-rw-r--r--net/802/garp.c649
-rw-r--r--net/802/hippi.c193
-rw-r--r--net/802/mrp.c943
-rw-r--r--net/802/p8022.c63
-rw-r--r--net/802/p8023.c60
-rw-r--r--net/802/psnap.c163
-rw-r--r--net/802/stp.c101
-rw-r--r--net/8021q/Kconfig41
-rw-r--r--net/8021q/Makefile11
-rw-r--r--net/8021q/vlan.c739
-rw-r--r--net/8021q/vlan.h200
-rw-r--r--net/8021q/vlan_core.c560
-rw-r--r--net/8021q/vlan_dev.c842
-rw-r--r--net/8021q/vlan_gvrp.c67
-rw-r--r--net/8021q/vlan_mvrp.c73
-rw-r--r--net/8021q/vlan_netlink.c307
-rw-r--r--net/8021q/vlanproc.c291
-rw-r--r--net/8021q/vlanproc.h21
22 files changed, 5634 insertions, 0 deletions
diff --git a/net/802/Kconfig b/net/802/Kconfig
new file mode 100644
index 000000000..aaa83e888
--- /dev/null
+++ b/net/802/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config STP
+ tristate
+ select LLC
+
+config GARP
+ tristate
+ select STP
+
+config MRP
+ tristate
diff --git a/net/802/Makefile b/net/802/Makefile
new file mode 100644
index 000000000..19406a87b
--- /dev/null
+++ b/net/802/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux 802.x protocol layers.
+#
+
+# Check the p8022 selections against net/core/Makefile.
+obj-$(CONFIG_LLC) += p8022.o psnap.o
+obj-$(CONFIG_NET_FC) += fc.o
+obj-$(CONFIG_FDDI) += fddi.o
+obj-$(CONFIG_HIPPI) += hippi.o
+obj-$(CONFIG_IPX) += p8022.o psnap.o p8023.o
+obj-$(CONFIG_ATALK) += p8022.o psnap.o
+obj-$(CONFIG_STP) += stp.o
+obj-$(CONFIG_GARP) += garp.o
+obj-$(CONFIG_MRP) += mrp.o
diff --git a/net/802/fc.c b/net/802/fc.c
new file mode 100644
index 000000000..afd3d288a
--- /dev/null
+++ b/net/802/fc.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NET3: Fibre Channel device handling subroutines
+ *
+ * Vineet Abraham <vma@iol.unh.edu>
+ * v 1.0 03/22/99
+ */
+
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/fcdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/net.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <net/arp.h>
+
+/*
+ * Put the headers on a Fibre Channel packet.
+ */
+
+static int fc_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr, unsigned int len)
+{
+ struct fch_hdr *fch;
+ int hdr_len;
+
+ /*
+ * Add the 802.2 SNAP header if IP as the IPv4 code calls
+ * dev->hard_header directly.
+ */
+ if (type == ETH_P_IP || type == ETH_P_ARP)
+ {
+ struct fcllc *fcllc;
+
+ hdr_len = sizeof(struct fch_hdr) + sizeof(struct fcllc);
+ fch = skb_push(skb, hdr_len);
+ fcllc = (struct fcllc *)(fch+1);
+ fcllc->dsap = fcllc->ssap = EXTENDED_SAP;
+ fcllc->llc = UI_CMD;
+ fcllc->protid[0] = fcllc->protid[1] = fcllc->protid[2] = 0x00;
+ fcllc->ethertype = htons(type);
+ }
+ else
+ {
+ hdr_len = sizeof(struct fch_hdr);
+ fch = skb_push(skb, hdr_len);
+ }
+
+ if(saddr)
+ memcpy(fch->saddr,saddr,dev->addr_len);
+ else
+ memcpy(fch->saddr,dev->dev_addr,dev->addr_len);
+
+ if(daddr)
+ {
+ memcpy(fch->daddr,daddr,dev->addr_len);
+ return hdr_len;
+ }
+ return -hdr_len;
+}
+
+static const struct header_ops fc_header_ops = {
+ .create = fc_header,
+};
+
+static void fc_setup(struct net_device *dev)
+{
+ dev->header_ops = &fc_header_ops;
+ dev->type = ARPHRD_IEEE802;
+ dev->hard_header_len = FC_HLEN;
+ dev->mtu = 2024;
+ dev->addr_len = FC_ALEN;
+ dev->tx_queue_len = 100; /* Long queues on fc */
+ dev->flags = IFF_BROADCAST;
+
+ memset(dev->broadcast, 0xFF, FC_ALEN);
+}
+
+/**
+ * alloc_fcdev - Register fibre channel device
+ * @sizeof_priv: Size of additional driver-private structure to be allocated
+ * for this fibre channel device
+ *
+ * Fill in the fields of the device structure with fibre channel-generic values.
+ *
+ * Constructs a new net device, complete with a private data area of
+ * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
+ * this private data area.
+ */
+struct net_device *alloc_fcdev(int sizeof_priv)
+{
+ return alloc_netdev(sizeof_priv, "fc%d", NET_NAME_UNKNOWN, fc_setup);
+}
+EXPORT_SYMBOL(alloc_fcdev);
diff --git a/net/802/fddi.c b/net/802/fddi.c
new file mode 100644
index 000000000..7533ce26b
--- /dev/null
+++ b/net/802/fddi.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * FDDI-type device handling.
+ *
+ * Version: @(#)fddi.c 1.0.0 08/12/96
+ *
+ * Authors: Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * fddi.c is based on previous eth.c and tr.c work by
+ * Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * Changes
+ * Alan Cox : New arp/rebuild header
+ * Maciej W. Rozycki : IPv6 support
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <net/arp.h>
+#include <net/sock.h>
+
+/*
+ * Create the FDDI MAC header for an arbitrary protocol layer
+ *
+ * saddr=NULL means use device source address
+ * daddr=NULL means leave destination address (eg unresolved arp)
+ */
+
+static int fddi_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr, unsigned int len)
+{
+ int hl = FDDI_K_SNAP_HLEN;
+ struct fddihdr *fddi;
+
+ if(type != ETH_P_IP && type != ETH_P_IPV6 && type != ETH_P_ARP)
+ hl=FDDI_K_8022_HLEN-3;
+ fddi = skb_push(skb, hl);
+ fddi->fc = FDDI_FC_K_ASYNC_LLC_DEF;
+ if(type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
+ {
+ fddi->hdr.llc_snap.dsap = FDDI_EXTENDED_SAP;
+ fddi->hdr.llc_snap.ssap = FDDI_EXTENDED_SAP;
+ fddi->hdr.llc_snap.ctrl = FDDI_UI_CMD;
+ fddi->hdr.llc_snap.oui[0] = 0x00;
+ fddi->hdr.llc_snap.oui[1] = 0x00;
+ fddi->hdr.llc_snap.oui[2] = 0x00;
+ fddi->hdr.llc_snap.ethertype = htons(type);
+ }
+
+ /* Set the source and destination hardware addresses */
+
+ if (saddr != NULL)
+ memcpy(fddi->saddr, saddr, dev->addr_len);
+ else
+ memcpy(fddi->saddr, dev->dev_addr, dev->addr_len);
+
+ if (daddr != NULL)
+ {
+ memcpy(fddi->daddr, daddr, dev->addr_len);
+ return hl;
+ }
+
+ return -hl;
+}
+
+/*
+ * Determine the packet's protocol ID and fill in skb fields.
+ * This routine is called before an incoming packet is passed
+ * up. It's used to fill in specific skb fields and to set
+ * the proper pointer to the start of packet data (skb->data).
+ */
+
+__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fddihdr *fddi = (struct fddihdr *)skb->data;
+ __be16 type;
+
+ /*
+ * Set mac.raw field to point to FC byte, set data field to point
+ * to start of packet data. Assume 802.2 SNAP frames for now.
+ */
+
+ skb->dev = dev;
+ skb_reset_mac_header(skb); /* point to frame control (FC) */
+
+ if(fddi->hdr.llc_8022_1.dsap==0xe0)
+ {
+ skb_pull(skb, FDDI_K_8022_HLEN-3);
+ type = htons(ETH_P_802_2);
+ }
+ else
+ {
+ skb_pull(skb, FDDI_K_SNAP_HLEN); /* adjust for 21 byte header */
+ type=fddi->hdr.llc_snap.ethertype;
+ }
+
+ /* Set packet type based on destination address and flag settings */
+
+ if (*fddi->daddr & 0x01)
+ {
+ if (memcmp(fddi->daddr, dev->broadcast, FDDI_K_ALEN) == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ }
+
+ else if (dev->flags & IFF_PROMISC)
+ {
+ if (memcmp(fddi->daddr, dev->dev_addr, FDDI_K_ALEN))
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+
+ /* Assume 802.2 SNAP frames, for now */
+
+ return type;
+}
+
+EXPORT_SYMBOL(fddi_type_trans);
+
+static const struct header_ops fddi_header_ops = {
+ .create = fddi_header,
+};
+
+
+static void fddi_setup(struct net_device *dev)
+{
+ dev->header_ops = &fddi_header_ops;
+ dev->type = ARPHRD_FDDI;
+ dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */
+ dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */
+ dev->min_mtu = FDDI_K_SNAP_HLEN;
+ dev->max_mtu = FDDI_K_SNAP_DLEN;
+ dev->addr_len = FDDI_K_ALEN;
+ dev->tx_queue_len = 100; /* Long queues on FDDI */
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
+
+ memset(dev->broadcast, 0xFF, FDDI_K_ALEN);
+}
+
+/**
+ * alloc_fddidev - Register FDDI device
+ * @sizeof_priv: Size of additional driver-private structure to be allocated
+ * for this FDDI device
+ *
+ * Fill in the fields of the device structure with FDDI-generic values.
+ *
+ * Constructs a new net device, complete with a private data area of
+ * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
+ * this private data area.
+ */
+struct net_device *alloc_fddidev(int sizeof_priv)
+{
+ return alloc_netdev(sizeof_priv, "fddi%d", NET_NAME_UNKNOWN,
+ fddi_setup);
+}
+EXPORT_SYMBOL(alloc_fddidev);
+
+MODULE_LICENSE("GPL");
diff --git a/net/802/garp.c b/net/802/garp.c
new file mode 100644
index 000000000..f6012f8e5
--- /dev/null
+++ b/net/802/garp.c
@@ -0,0 +1,649 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IEEE 802.1D Generic Attribute Registration Protocol (GARP)
+ *
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ */
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/llc.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <net/llc.h>
+#include <net/llc_pdu.h>
+#include <net/garp.h>
+#include <asm/unaligned.h>
+
+static unsigned int garp_join_time __read_mostly = 200;
+module_param(garp_join_time, uint, 0644);
+MODULE_PARM_DESC(garp_join_time, "Join time in ms (default 200ms)");
+MODULE_LICENSE("GPL");
+
+static const struct garp_state_trans {
+ u8 state;
+ u8 action;
+} garp_applicant_state_table[GARP_APPLICANT_MAX + 1][GARP_EVENT_MAX + 1] = {
+ [GARP_APPLICANT_VA] = {
+ [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA,
+ .action = GARP_ACTION_S_JOIN_IN },
+ [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AA },
+ [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
+ [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
+ [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA },
+ [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
+ [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
+ },
+ [GARP_APPLICANT_AA] = {
+ [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA,
+ .action = GARP_ACTION_S_JOIN_IN },
+ [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA },
+ [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
+ [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
+ [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA },
+ [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
+ [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
+ },
+ [GARP_APPLICANT_QA] = {
+ [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
+ [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA },
+ [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
+ [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
+ [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
+ [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
+ },
+ [GARP_APPLICANT_LA] = {
+ [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_VO,
+ .action = GARP_ACTION_S_LEAVE_EMPTY },
+ [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_LA },
+ [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_LA },
+ [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_LA },
+ [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VA },
+ [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
+ },
+ [GARP_APPLICANT_VP] = {
+ [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA,
+ .action = GARP_ACTION_S_JOIN_IN },
+ [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AP },
+ [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
+ [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_VO },
+ },
+ [GARP_APPLICANT_AP] = {
+ [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA,
+ .action = GARP_ACTION_S_JOIN_IN },
+ [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP },
+ [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
+ [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_AO },
+ },
+ [GARP_APPLICANT_QP] = {
+ [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
+ [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP },
+ [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
+ [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_QO },
+ },
+ [GARP_APPLICANT_VO] = {
+ [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
+ [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AO },
+ [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VP },
+ [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
+ },
+ [GARP_APPLICANT_AO] = {
+ [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
+ [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO },
+ [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_AP },
+ [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
+ },
+ [GARP_APPLICANT_QO] = {
+ [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
+ [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO },
+ [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
+ [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_QP },
+ [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
+ },
+};
+
+static int garp_attr_cmp(const struct garp_attr *attr,
+ const void *data, u8 len, u8 type)
+{
+ if (attr->type != type)
+ return attr->type - type;
+ if (attr->dlen != len)
+ return attr->dlen - len;
+ return memcmp(attr->data, data, len);
+}
+
+static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app,
+ const void *data, u8 len, u8 type)
+{
+ struct rb_node *parent = app->gid.rb_node;
+ struct garp_attr *attr;
+ int d;
+
+ while (parent) {
+ attr = rb_entry(parent, struct garp_attr, node);
+ d = garp_attr_cmp(attr, data, len, type);
+ if (d > 0)
+ parent = parent->rb_left;
+ else if (d < 0)
+ parent = parent->rb_right;
+ else
+ return attr;
+ }
+ return NULL;
+}
+
+static struct garp_attr *garp_attr_create(struct garp_applicant *app,
+ const void *data, u8 len, u8 type)
+{
+ struct rb_node *parent = NULL, **p = &app->gid.rb_node;
+ struct garp_attr *attr;
+ int d;
+
+ while (*p) {
+ parent = *p;
+ attr = rb_entry(parent, struct garp_attr, node);
+ d = garp_attr_cmp(attr, data, len, type);
+ if (d > 0)
+ p = &parent->rb_left;
+ else if (d < 0)
+ p = &parent->rb_right;
+ else {
+ /* The attribute already exists; re-use it. */
+ return attr;
+ }
+ }
+ attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
+ if (!attr)
+ return attr;
+ attr->state = GARP_APPLICANT_VO;
+ attr->type = type;
+ attr->dlen = len;
+ memcpy(attr->data, data, len);
+
+ rb_link_node(&attr->node, parent, p);
+ rb_insert_color(&attr->node, &app->gid);
+ return attr;
+}
+
+static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr)
+{
+ rb_erase(&attr->node, &app->gid);
+ kfree(attr);
+}
+
+static void garp_attr_destroy_all(struct garp_applicant *app)
+{
+ struct rb_node *node, *next;
+ struct garp_attr *attr;
+
+ for (node = rb_first(&app->gid);
+ next = node ? rb_next(node) : NULL, node != NULL;
+ node = next) {
+ attr = rb_entry(node, struct garp_attr, node);
+ garp_attr_destroy(app, attr);
+ }
+}
+
+static int garp_pdu_init(struct garp_applicant *app)
+{
+ struct sk_buff *skb;
+ struct garp_pdu_hdr *gp;
+
+#define LLC_RESERVE sizeof(struct llc_pdu_un)
+ skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
+ GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb->dev = app->dev;
+ skb->protocol = htons(ETH_P_802_2);
+ skb_reserve(skb, LL_RESERVED_SPACE(app->dev) + LLC_RESERVE);
+
+ gp = __skb_put(skb, sizeof(*gp));
+ put_unaligned(htons(GARP_PROTOCOL_ID), &gp->protocol);
+
+ app->pdu = skb;
+ return 0;
+}
+
+static int garp_pdu_append_end_mark(struct garp_applicant *app)
+{
+ if (skb_tailroom(app->pdu) < sizeof(u8))
+ return -1;
+ __skb_put_u8(app->pdu, GARP_END_MARK);
+ return 0;
+}
+
+static void garp_pdu_queue(struct garp_applicant *app)
+{
+ if (!app->pdu)
+ return;
+
+ garp_pdu_append_end_mark(app);
+ garp_pdu_append_end_mark(app);
+
+ llc_pdu_header_init(app->pdu, LLC_PDU_TYPE_U, LLC_SAP_BSPAN,
+ LLC_SAP_BSPAN, LLC_PDU_CMD);
+ llc_pdu_init_as_ui_cmd(app->pdu);
+ llc_mac_hdr_init(app->pdu, app->dev->dev_addr,
+ app->app->proto.group_address);
+
+ skb_queue_tail(&app->queue, app->pdu);
+ app->pdu = NULL;
+}
+
+static void garp_queue_xmit(struct garp_applicant *app)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&app->queue)))
+ dev_queue_xmit(skb);
+}
+
+static int garp_pdu_append_msg(struct garp_applicant *app, u8 attrtype)
+{
+ struct garp_msg_hdr *gm;
+
+ if (skb_tailroom(app->pdu) < sizeof(*gm))
+ return -1;
+ gm = __skb_put(app->pdu, sizeof(*gm));
+ gm->attrtype = attrtype;
+ garp_cb(app->pdu)->cur_type = attrtype;
+ return 0;
+}
+
+static int garp_pdu_append_attr(struct garp_applicant *app,
+ const struct garp_attr *attr,
+ enum garp_attr_event event)
+{
+ struct garp_attr_hdr *ga;
+ unsigned int len;
+ int err;
+again:
+ if (!app->pdu) {
+ err = garp_pdu_init(app);
+ if (err < 0)
+ return err;
+ }
+
+ if (garp_cb(app->pdu)->cur_type != attr->type) {
+ if (garp_cb(app->pdu)->cur_type &&
+ garp_pdu_append_end_mark(app) < 0)
+ goto queue;
+ if (garp_pdu_append_msg(app, attr->type) < 0)
+ goto queue;
+ }
+
+ len = sizeof(*ga) + attr->dlen;
+ if (skb_tailroom(app->pdu) < len)
+ goto queue;
+ ga = __skb_put(app->pdu, len);
+ ga->len = len;
+ ga->event = event;
+ memcpy(ga->data, attr->data, attr->dlen);
+ return 0;
+
+queue:
+ garp_pdu_queue(app);
+ goto again;
+}
+
+static void garp_attr_event(struct garp_applicant *app,
+ struct garp_attr *attr, enum garp_event event)
+{
+ enum garp_applicant_state state;
+
+ state = garp_applicant_state_table[attr->state][event].state;
+ if (state == GARP_APPLICANT_INVALID)
+ return;
+
+ switch (garp_applicant_state_table[attr->state][event].action) {
+ case GARP_ACTION_NONE:
+ break;
+ case GARP_ACTION_S_JOIN_IN:
+ /* When appending the attribute fails, don't update state in
+ * order to retry on next TRANSMIT_PDU event. */
+ if (garp_pdu_append_attr(app, attr, GARP_JOIN_IN) < 0)
+ return;
+ break;
+ case GARP_ACTION_S_LEAVE_EMPTY:
+ garp_pdu_append_attr(app, attr, GARP_LEAVE_EMPTY);
+ /* As a pure applicant, sending a leave message implies that
+ * the attribute was unregistered and can be destroyed. */
+ garp_attr_destroy(app, attr);
+ return;
+ default:
+ WARN_ON(1);
+ }
+
+ attr->state = state;
+}
+
+int garp_request_join(const struct net_device *dev,
+ const struct garp_application *appl,
+ const void *data, u8 len, u8 type)
+{
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
+ struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
+ struct garp_attr *attr;
+
+ spin_lock_bh(&app->lock);
+ attr = garp_attr_create(app, data, len, type);
+ if (!attr) {
+ spin_unlock_bh(&app->lock);
+ return -ENOMEM;
+ }
+ garp_attr_event(app, attr, GARP_EVENT_REQ_JOIN);
+ spin_unlock_bh(&app->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(garp_request_join);
+
+void garp_request_leave(const struct net_device *dev,
+ const struct garp_application *appl,
+ const void *data, u8 len, u8 type)
+{
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
+ struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
+ struct garp_attr *attr;
+
+ spin_lock_bh(&app->lock);
+ attr = garp_attr_lookup(app, data, len, type);
+ if (!attr) {
+ spin_unlock_bh(&app->lock);
+ return;
+ }
+ garp_attr_event(app, attr, GARP_EVENT_REQ_LEAVE);
+ spin_unlock_bh(&app->lock);
+}
+EXPORT_SYMBOL_GPL(garp_request_leave);
+
+static void garp_gid_event(struct garp_applicant *app, enum garp_event event)
+{
+ struct rb_node *node, *next;
+ struct garp_attr *attr;
+
+ for (node = rb_first(&app->gid);
+ next = node ? rb_next(node) : NULL, node != NULL;
+ node = next) {
+ attr = rb_entry(node, struct garp_attr, node);
+ garp_attr_event(app, attr, event);
+ }
+}
+
+static void garp_join_timer_arm(struct garp_applicant *app)
+{
+ unsigned long delay;
+
+ delay = (u64)msecs_to_jiffies(garp_join_time) * prandom_u32() >> 32;
+ mod_timer(&app->join_timer, jiffies + delay);
+}
+
+static void garp_join_timer(struct timer_list *t)
+{
+ struct garp_applicant *app = from_timer(app, t, join_timer);
+
+ spin_lock(&app->lock);
+ garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
+ garp_pdu_queue(app);
+ spin_unlock(&app->lock);
+
+ garp_queue_xmit(app);
+ garp_join_timer_arm(app);
+}
+
+static int garp_pdu_parse_end_mark(struct sk_buff *skb)
+{
+ if (!pskb_may_pull(skb, sizeof(u8)))
+ return -1;
+ if (*skb->data == GARP_END_MARK) {
+ skb_pull(skb, sizeof(u8));
+ return -1;
+ }
+ return 0;
+}
+
+static int garp_pdu_parse_attr(struct garp_applicant *app, struct sk_buff *skb,
+ u8 attrtype)
+{
+ const struct garp_attr_hdr *ga;
+ struct garp_attr *attr;
+ enum garp_event event;
+ unsigned int dlen;
+
+ if (!pskb_may_pull(skb, sizeof(*ga)))
+ return -1;
+ ga = (struct garp_attr_hdr *)skb->data;
+ if (ga->len < sizeof(*ga))
+ return -1;
+
+ if (!pskb_may_pull(skb, ga->len))
+ return -1;
+ skb_pull(skb, ga->len);
+ dlen = sizeof(*ga) - ga->len;
+
+ if (attrtype > app->app->maxattr)
+ return 0;
+
+ switch (ga->event) {
+ case GARP_LEAVE_ALL:
+ if (dlen != 0)
+ return -1;
+ garp_gid_event(app, GARP_EVENT_R_LEAVE_EMPTY);
+ return 0;
+ case GARP_JOIN_EMPTY:
+ event = GARP_EVENT_R_JOIN_EMPTY;
+ break;
+ case GARP_JOIN_IN:
+ event = GARP_EVENT_R_JOIN_IN;
+ break;
+ case GARP_LEAVE_EMPTY:
+ event = GARP_EVENT_R_LEAVE_EMPTY;
+ break;
+ case GARP_EMPTY:
+ event = GARP_EVENT_R_EMPTY;
+ break;
+ default:
+ return 0;
+ }
+
+ if (dlen == 0)
+ return -1;
+ attr = garp_attr_lookup(app, ga->data, dlen, attrtype);
+ if (attr == NULL)
+ return 0;
+ garp_attr_event(app, attr, event);
+ return 0;
+}
+
+static int garp_pdu_parse_msg(struct garp_applicant *app, struct sk_buff *skb)
+{
+ const struct garp_msg_hdr *gm;
+
+ if (!pskb_may_pull(skb, sizeof(*gm)))
+ return -1;
+ gm = (struct garp_msg_hdr *)skb->data;
+ if (gm->attrtype == 0)
+ return -1;
+ skb_pull(skb, sizeof(*gm));
+
+ while (skb->len > 0) {
+ if (garp_pdu_parse_attr(app, skb, gm->attrtype) < 0)
+ return -1;
+ if (garp_pdu_parse_end_mark(skb) < 0)
+ break;
+ }
+ return 0;
+}
+
+static void garp_pdu_rcv(const struct stp_proto *proto, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct garp_application *appl = proto->data;
+ struct garp_port *port;
+ struct garp_applicant *app;
+ const struct garp_pdu_hdr *gp;
+
+ port = rcu_dereference(dev->garp_port);
+ if (!port)
+ goto err;
+ app = rcu_dereference(port->applicants[appl->type]);
+ if (!app)
+ goto err;
+
+ if (!pskb_may_pull(skb, sizeof(*gp)))
+ goto err;
+ gp = (struct garp_pdu_hdr *)skb->data;
+ if (get_unaligned(&gp->protocol) != htons(GARP_PROTOCOL_ID))
+ goto err;
+ skb_pull(skb, sizeof(*gp));
+
+ spin_lock(&app->lock);
+ while (skb->len > 0) {
+ if (garp_pdu_parse_msg(app, skb) < 0)
+ break;
+ if (garp_pdu_parse_end_mark(skb) < 0)
+ break;
+ }
+ spin_unlock(&app->lock);
+err:
+ kfree_skb(skb);
+}
+
+static int garp_init_port(struct net_device *dev)
+{
+ struct garp_port *port;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+ rcu_assign_pointer(dev->garp_port, port);
+ return 0;
+}
+
+static void garp_release_port(struct net_device *dev)
+{
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
+ unsigned int i;
+
+ for (i = 0; i <= GARP_APPLICATION_MAX; i++) {
+ if (rtnl_dereference(port->applicants[i]))
+ return;
+ }
+ RCU_INIT_POINTER(dev->garp_port, NULL);
+ kfree_rcu(port, rcu);
+}
+
+int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
+{
+ struct garp_applicant *app;
+ int err;
+
+ ASSERT_RTNL();
+
+ if (!rtnl_dereference(dev->garp_port)) {
+ err = garp_init_port(dev);
+ if (err < 0)
+ goto err1;
+ }
+
+ err = -ENOMEM;
+ app = kzalloc(sizeof(*app), GFP_KERNEL);
+ if (!app)
+ goto err2;
+
+ err = dev_mc_add(dev, appl->proto.group_address);
+ if (err < 0)
+ goto err3;
+
+ app->dev = dev;
+ app->app = appl;
+ app->gid = RB_ROOT;
+ spin_lock_init(&app->lock);
+ skb_queue_head_init(&app->queue);
+ rcu_assign_pointer(dev->garp_port->applicants[appl->type], app);
+ timer_setup(&app->join_timer, garp_join_timer, 0);
+ garp_join_timer_arm(app);
+ return 0;
+
+err3:
+ kfree(app);
+err2:
+ garp_release_port(dev);
+err1:
+ return err;
+}
+EXPORT_SYMBOL_GPL(garp_init_applicant);
+
+void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
+{
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
+ struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
+
+ ASSERT_RTNL();
+
+ RCU_INIT_POINTER(port->applicants[appl->type], NULL);
+
+ /* Delete timer and generate a final TRANSMIT_PDU event to flush out
+ * all pending messages before the applicant is gone. */
+ del_timer_sync(&app->join_timer);
+
+ spin_lock_bh(&app->lock);
+ garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
+ garp_attr_destroy_all(app);
+ garp_pdu_queue(app);
+ spin_unlock_bh(&app->lock);
+
+ garp_queue_xmit(app);
+
+ dev_mc_del(dev, appl->proto.group_address);
+ kfree_rcu(app, rcu);
+ garp_release_port(dev);
+}
+EXPORT_SYMBOL_GPL(garp_uninit_applicant);
+
+int garp_register_application(struct garp_application *appl)
+{
+ appl->proto.rcv = garp_pdu_rcv;
+ appl->proto.data = appl;
+ return stp_proto_register(&appl->proto);
+}
+EXPORT_SYMBOL_GPL(garp_register_application);
+
+void garp_unregister_application(struct garp_application *appl)
+{
+ stp_proto_unregister(&appl->proto);
+}
+EXPORT_SYMBOL_GPL(garp_unregister_application);
diff --git a/net/802/hippi.c b/net/802/hippi.c
new file mode 100644
index 000000000..f80b33a8f
--- /dev/null
+++ b/net/802/hippi.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * HIPPI-type device handling.
+ *
+ * Version: @(#)hippi.c 1.0.0 05/29/97
+ *
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Jes Sorensen, <Jes.Sorensen@cern.ch>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/hippidevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <net/arp.h>
+#include <net/sock.h>
+#include <linux/uaccess.h>
+
+/*
+ * Create the HIPPI MAC header for an arbitrary protocol layer
+ *
+ * saddr=NULL means use device source address
+ * daddr=NULL means leave destination address (eg unresolved arp)
+ */
+
+static int hippi_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr, unsigned int len)
+{
+ struct hippi_hdr *hip = skb_push(skb, HIPPI_HLEN);
+ struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
+
+ if (!len){
+ len = skb->len - HIPPI_HLEN;
+ printk("hippi_header(): length not supplied\n");
+ }
+
+ /*
+ * Due to the stupidity of the little endian byte-order we
+ * have to set the fp field this way.
+ */
+ hip->fp.fixed = htonl(0x04800018);
+ hip->fp.d2_size = htonl(len + 8);
+ hip->le.fc = 0;
+ hip->le.double_wide = 0; /* only HIPPI 800 for the time being */
+ hip->le.message_type = 0; /* Data PDU */
+
+ hip->le.dest_addr_type = 2; /* 12 bit SC address */
+ hip->le.src_addr_type = 2; /* 12 bit SC address */
+
+ memcpy(hip->le.src_switch_addr, dev->dev_addr + 3, 3);
+ memset(&hip->le.reserved, 0, 16);
+
+ hip->snap.dsap = HIPPI_EXTENDED_SAP;
+ hip->snap.ssap = HIPPI_EXTENDED_SAP;
+ hip->snap.ctrl = HIPPI_UI_CMD;
+ hip->snap.oui[0] = 0x00;
+ hip->snap.oui[1] = 0x00;
+ hip->snap.oui[2] = 0x00;
+ hip->snap.ethertype = htons(type);
+
+ if (daddr)
+ {
+ memcpy(hip->le.dest_switch_addr, daddr + 3, 3);
+ memcpy(&hcb->ifield, daddr + 2, 4);
+ return HIPPI_HLEN;
+ }
+ hcb->ifield = 0;
+ return -((int)HIPPI_HLEN);
+}
+
+
+/*
+ * Determine the packet's protocol ID.
+ */
+
+__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ struct hippi_hdr *hip;
+
+ /*
+ * This is actually wrong ... question is if we really should
+ * set the raw address here.
+ */
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ hip = (struct hippi_hdr *)skb_mac_header(skb);
+ skb_pull(skb, HIPPI_HLEN);
+
+ /*
+ * No fancy promisc stuff here now.
+ */
+
+ return hip->snap.ethertype;
+}
+
+EXPORT_SYMBOL(hippi_type_trans);
+
+/*
+ * For HIPPI we will actually use the lower 4 bytes of the hardware
+ * address as the I-FIELD rather than the actual hardware address.
+ */
+int hippi_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ if (netif_running(dev))
+ return -EBUSY;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return 0;
+}
+EXPORT_SYMBOL(hippi_mac_addr);
+
+int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
+{
+ /* Never send broadcast/multicast ARP messages */
+ NEIGH_VAR_INIT(p, MCAST_PROBES, 0);
+
+ /* In IPv6 unicast probes are valid even on NBMA,
+ * because they are encapsulated in normal IPv6 protocol.
+ * Should be a generic flag.
+ */
+ if (p->tbl->family != AF_INET6)
+ NEIGH_VAR_INIT(p, UCAST_PROBES, 0);
+ return 0;
+}
+EXPORT_SYMBOL(hippi_neigh_setup_dev);
+
+static const struct header_ops hippi_header_ops = {
+ .create = hippi_header,
+};
+
+
+static void hippi_setup(struct net_device *dev)
+{
+ dev->header_ops = &hippi_header_ops;
+
+ /*
+ * We don't support HIPPI `ARP' for the time being, and probably
+ * never will unless someone else implements it. However we
+ * still need a fake ARPHRD to make ifconfig and friends play ball.
+ */
+ dev->type = ARPHRD_HIPPI;
+ dev->hard_header_len = HIPPI_HLEN;
+ dev->mtu = 65280;
+ dev->min_mtu = 68;
+ dev->max_mtu = 65280;
+ dev->addr_len = HIPPI_ALEN;
+ dev->tx_queue_len = 25 /* 5 */;
+ memset(dev->broadcast, 0xFF, HIPPI_ALEN);
+
+
+ /*
+ * HIPPI doesn't support broadcast+multicast and we only use
+ * static ARP tables. ARP is disabled by hippi_neigh_setup_dev.
+ */
+ dev->flags = 0;
+}
+
+/**
+ * alloc_hippi_dev - Register HIPPI device
+ * @sizeof_priv: Size of additional driver-private structure to be allocated
+ * for this HIPPI device
+ *
+ * Fill in the fields of the device structure with HIPPI-generic values.
+ *
+ * Constructs a new net device, complete with a private data area of
+ * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
+ * this private data area.
+ */
+
+struct net_device *alloc_hippi_dev(int sizeof_priv)
+{
+ return alloc_netdev(sizeof_priv, "hip%d", NET_NAME_UNKNOWN,
+ hippi_setup);
+}
+
+EXPORT_SYMBOL(alloc_hippi_dev);
diff --git a/net/802/mrp.c b/net/802/mrp.c
new file mode 100644
index 000000000..c10a432a5
--- /dev/null
+++ b/net/802/mrp.c
@@ -0,0 +1,943 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IEEE 802.1Q Multiple Registration Protocol (MRP)
+ *
+ * Copyright (c) 2012 Massachusetts Institute of Technology
+ *
+ * Adapted from code in net/802/garp.c
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ */
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <net/mrp.h>
+#include <asm/unaligned.h>
+
+static unsigned int mrp_join_time __read_mostly = 200;
+module_param(mrp_join_time, uint, 0644);
+MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
+
+static unsigned int mrp_periodic_time __read_mostly = 1000;
+module_param(mrp_periodic_time, uint, 0644);
+MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
+
+MODULE_LICENSE("GPL");
+
+static const u8
+mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
+ [MRP_APPLICANT_VO] = {
+ [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
+ [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
+ [MRP_EVENT_LV] = MRP_APPLICANT_VO,
+ [MRP_EVENT_TX] = MRP_APPLICANT_VO,
+ [MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
+ [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
+ [MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
+ [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
+ [MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
+ [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
+ [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
+ [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
+ [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
+ },
+ [MRP_APPLICANT_VP] = {
+ [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
+ [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
+ [MRP_EVENT_LV] = MRP_APPLICANT_VO,
+ [MRP_EVENT_TX] = MRP_APPLICANT_AA,
+ [MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
+ [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
+ [MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
+ [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
+ [MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
+ [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
+ [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
+ [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
+ [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
+ },
+ [MRP_APPLICANT_VN] = {
+ [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
+ [MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
+ [MRP_EVENT_LV] = MRP_APPLICANT_LA,
+ [MRP_EVENT_TX] = MRP_APPLICANT_AN,
+ [MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
+ [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
+ [MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
+ [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
+ [MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
+ [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
+ [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
+ [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
+ [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
+ },
+ [MRP_APPLICANT_AN] = {
+ [MRP_EVENT_NEW] = MRP_APPLICANT_AN,
+ [MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
+ [MRP_EVENT_LV] = MRP_APPLICANT_LA,
+ [MRP_EVENT_TX] = MRP_APPLICANT_QA,
+ [MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
+ [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
+ [MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
+ [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
+ [MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
+ [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
+ [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
+ [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
+ [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
+ },
+ [MRP_APPLICANT_AA] = {
+ [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
+ [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
+ [MRP_EVENT_LV] = MRP_APPLICANT_LA,
+ [MRP_EVENT_TX] = MRP_APPLICANT_QA,
+ [MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
+ [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
+ [MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
+ [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
+ [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
+ [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
+ [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
+ [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
+ [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
+ },
+ [MRP_APPLICANT_QA] = {
+ [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
+ [MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
+ [MRP_EVENT_LV] = MRP_APPLICANT_LA,
+ [MRP_EVENT_TX] = MRP_APPLICANT_QA,
+ [MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
+ [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
+ [MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
+ [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
+ [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
+ [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
+ [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
+ [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
+ [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
+ },
+ [MRP_APPLICANT_LA] = {
+ [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
+ [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
+ [MRP_EVENT_LV] = MRP_APPLICANT_LA,
+ [MRP_EVENT_TX] = MRP_APPLICANT_VO,
+ [MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
+ [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
+ [MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
+ [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
+ [MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
+ [MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
+ [MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
+ [MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
+ [MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
+ },
+ [MRP_APPLICANT_AO] = {
+ [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
+ [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
+ [MRP_EVENT_LV] = MRP_APPLICANT_AO,
+ [MRP_EVENT_TX] = MRP_APPLICANT_AO,
+ [MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
+ [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
+ [MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
+ [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
+ [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
+ [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
+ [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
+ [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
+ [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
+ },
+ [MRP_APPLICANT_QO] = {
+ [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
+ [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
+ [MRP_EVENT_LV] = MRP_APPLICANT_QO,
+ [MRP_EVENT_TX] = MRP_APPLICANT_QO,
+ [MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
+ [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
+ [MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
+ [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
+ [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
+ [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
+ [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
+ [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
+ [MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
+ },
+ [MRP_APPLICANT_AP] = {
+ [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
+ [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
+ [MRP_EVENT_LV] = MRP_APPLICANT_AO,
+ [MRP_EVENT_TX] = MRP_APPLICANT_QA,
+ [MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
+ [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
+ [MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
+ [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
+ [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
+ [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
+ [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
+ [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
+ [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
+ },
+ [MRP_APPLICANT_QP] = {
+ [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
+ [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
+ [MRP_EVENT_LV] = MRP_APPLICANT_QO,
+ [MRP_EVENT_TX] = MRP_APPLICANT_QP,
+ [MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
+ [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
+ [MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
+ [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
+ [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
+ [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
+ [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
+ [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
+ [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
+ },
+};
+
+static const u8
+mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
+ [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
+ [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
+ [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
+ [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
+ [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
+ [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
+ [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
+ [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
+ [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
+ [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
+ [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
+};
+
+static void mrp_attrvalue_inc(void *value, u8 len)
+{
+ u8 *v = (u8 *)value;
+
+ /* Add 1 to the last byte. If it becomes zero,
+ * go to the previous byte and repeat.
+ */
+ while (len > 0 && !++v[--len])
+ ;
+}
+
+static int mrp_attr_cmp(const struct mrp_attr *attr,
+ const void *value, u8 len, u8 type)
+{
+ if (attr->type != type)
+ return attr->type - type;
+ if (attr->len != len)
+ return attr->len - len;
+ return memcmp(attr->value, value, len);
+}
+
+static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
+ const void *value, u8 len, u8 type)
+{
+ struct rb_node *parent = app->mad.rb_node;
+ struct mrp_attr *attr;
+ int d;
+
+ while (parent) {
+ attr = rb_entry(parent, struct mrp_attr, node);
+ d = mrp_attr_cmp(attr, value, len, type);
+ if (d > 0)
+ parent = parent->rb_left;
+ else if (d < 0)
+ parent = parent->rb_right;
+ else
+ return attr;
+ }
+ return NULL;
+}
+
+static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
+ const void *value, u8 len, u8 type)
+{
+ struct rb_node *parent = NULL, **p = &app->mad.rb_node;
+ struct mrp_attr *attr;
+ int d;
+
+ while (*p) {
+ parent = *p;
+ attr = rb_entry(parent, struct mrp_attr, node);
+ d = mrp_attr_cmp(attr, value, len, type);
+ if (d > 0)
+ p = &parent->rb_left;
+ else if (d < 0)
+ p = &parent->rb_right;
+ else {
+ /* The attribute already exists; re-use it. */
+ return attr;
+ }
+ }
+ attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
+ if (!attr)
+ return attr;
+ attr->state = MRP_APPLICANT_VO;
+ attr->type = type;
+ attr->len = len;
+ memcpy(attr->value, value, len);
+
+ rb_link_node(&attr->node, parent, p);
+ rb_insert_color(&attr->node, &app->mad);
+ return attr;
+}
+
+static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
+{
+ rb_erase(&attr->node, &app->mad);
+ kfree(attr);
+}
+
+static void mrp_attr_destroy_all(struct mrp_applicant *app)
+{
+ struct rb_node *node, *next;
+ struct mrp_attr *attr;
+
+ for (node = rb_first(&app->mad);
+ next = node ? rb_next(node) : NULL, node != NULL;
+ node = next) {
+ attr = rb_entry(node, struct mrp_attr, node);
+ mrp_attr_destroy(app, attr);
+ }
+}
+
+static int mrp_pdu_init(struct mrp_applicant *app)
+{
+ struct sk_buff *skb;
+ struct mrp_pdu_hdr *ph;
+
+ skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
+ GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb->dev = app->dev;
+ skb->protocol = app->app->pkttype.type;
+ skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ ph = __skb_put(skb, sizeof(*ph));
+ ph->version = app->app->version;
+
+ app->pdu = skb;
+ return 0;
+}
+
+static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
+{
+ __be16 *endmark;
+
+ if (skb_tailroom(app->pdu) < sizeof(*endmark))
+ return -1;
+ endmark = __skb_put(app->pdu, sizeof(*endmark));
+ put_unaligned(MRP_END_MARK, endmark);
+ return 0;
+}
+
+static void mrp_pdu_queue(struct mrp_applicant *app)
+{
+ if (!app->pdu)
+ return;
+
+ if (mrp_cb(app->pdu)->mh)
+ mrp_pdu_append_end_mark(app);
+ mrp_pdu_append_end_mark(app);
+
+ dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
+ app->app->group_address, app->dev->dev_addr,
+ app->pdu->len);
+
+ skb_queue_tail(&app->queue, app->pdu);
+ app->pdu = NULL;
+}
+
+static void mrp_queue_xmit(struct mrp_applicant *app)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&app->queue)))
+ dev_queue_xmit(skb);
+}
+
+static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
+ u8 attrtype, u8 attrlen)
+{
+ struct mrp_msg_hdr *mh;
+
+ if (mrp_cb(app->pdu)->mh) {
+ if (mrp_pdu_append_end_mark(app) < 0)
+ return -1;
+ mrp_cb(app->pdu)->mh = NULL;
+ mrp_cb(app->pdu)->vah = NULL;
+ }
+
+ if (skb_tailroom(app->pdu) < sizeof(*mh))
+ return -1;
+ mh = __skb_put(app->pdu, sizeof(*mh));
+ mh->attrtype = attrtype;
+ mh->attrlen = attrlen;
+ mrp_cb(app->pdu)->mh = mh;
+ return 0;
+}
+
+static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
+ const void *firstattrvalue, u8 attrlen)
+{
+ struct mrp_vecattr_hdr *vah;
+
+ if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
+ return -1;
+ vah = __skb_put(app->pdu, sizeof(*vah) + attrlen);
+ put_unaligned(0, &vah->lenflags);
+ memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
+ mrp_cb(app->pdu)->vah = vah;
+ memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
+ return 0;
+}
+
+static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
+ const struct mrp_attr *attr,
+ enum mrp_vecattr_event vaevent)
+{
+ u16 len, pos;
+ u8 *vaevents;
+ int err;
+again:
+ if (!app->pdu) {
+ err = mrp_pdu_init(app);
+ if (err < 0)
+ return err;
+ }
+
+ /* If there is no Message header in the PDU, or the Message header is
+ * for a different attribute type, add an EndMark (if necessary) and a
+ * new Message header to the PDU.
+ */
+ if (!mrp_cb(app->pdu)->mh ||
+ mrp_cb(app->pdu)->mh->attrtype != attr->type ||
+ mrp_cb(app->pdu)->mh->attrlen != attr->len) {
+ if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
+ goto queue;
+ }
+
+ /* If there is no VectorAttribute header for this Message in the PDU,
+ * or this attribute's value does not sequentially follow the previous
+ * attribute's value, add a new VectorAttribute header to the PDU.
+ */
+ if (!mrp_cb(app->pdu)->vah ||
+ memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
+ if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
+ goto queue;
+ }
+
+ len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
+ pos = len % 3;
+
+ /* Events are packed into Vectors in the PDU, three to a byte. Add a
+ * byte to the end of the Vector if necessary.
+ */
+ if (!pos) {
+ if (skb_tailroom(app->pdu) < sizeof(u8))
+ goto queue;
+ vaevents = __skb_put(app->pdu, sizeof(u8));
+ } else {
+ vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
+ }
+
+ switch (pos) {
+ case 0:
+ *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
+ __MRP_VECATTR_EVENT_MAX);
+ break;
+ case 1:
+ *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
+ break;
+ case 2:
+ *vaevents += vaevent;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ /* Increment the length of the VectorAttribute in the PDU, as well as
+ * the value of the next attribute that would continue its Vector.
+ */
+ put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
+ mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
+
+ return 0;
+
+queue:
+ mrp_pdu_queue(app);
+ goto again;
+}
+
+static void mrp_attr_event(struct mrp_applicant *app,
+ struct mrp_attr *attr, enum mrp_event event)
+{
+ enum mrp_applicant_state state;
+
+ state = mrp_applicant_state_table[attr->state][event];
+ if (state == MRP_APPLICANT_INVALID) {
+ WARN_ON(1);
+ return;
+ }
+
+ if (event == MRP_EVENT_TX) {
+ /* When appending the attribute fails, don't update its state
+ * in order to retry at the next TX event.
+ */
+
+ switch (mrp_tx_action_table[attr->state]) {
+ case MRP_TX_ACTION_NONE:
+ case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
+ case MRP_TX_ACTION_S_IN_OPTIONAL:
+ break;
+ case MRP_TX_ACTION_S_NEW:
+ if (mrp_pdu_append_vecattr_event(
+ app, attr, MRP_VECATTR_EVENT_NEW) < 0)
+ return;
+ break;
+ case MRP_TX_ACTION_S_JOIN_IN:
+ if (mrp_pdu_append_vecattr_event(
+ app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
+ return;
+ break;
+ case MRP_TX_ACTION_S_LV:
+ if (mrp_pdu_append_vecattr_event(
+ app, attr, MRP_VECATTR_EVENT_LV) < 0)
+ return;
+ /* As a pure applicant, sending a leave message
+ * implies that the attribute was unregistered and
+ * can be destroyed.
+ */
+ mrp_attr_destroy(app, attr);
+ return;
+ default:
+ WARN_ON(1);
+ }
+ }
+
+ attr->state = state;
+}
+
+int mrp_request_join(const struct net_device *dev,
+ const struct mrp_application *appl,
+ const void *value, u8 len, u8 type)
+{
+ struct mrp_port *port = rtnl_dereference(dev->mrp_port);
+ struct mrp_applicant *app = rtnl_dereference(
+ port->applicants[appl->type]);
+ struct mrp_attr *attr;
+
+ if (sizeof(struct mrp_skb_cb) + len >
+ sizeof_field(struct sk_buff, cb))
+ return -ENOMEM;
+
+ spin_lock_bh(&app->lock);
+ attr = mrp_attr_create(app, value, len, type);
+ if (!attr) {
+ spin_unlock_bh(&app->lock);
+ return -ENOMEM;
+ }
+ mrp_attr_event(app, attr, MRP_EVENT_JOIN);
+ spin_unlock_bh(&app->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mrp_request_join);
+
+void mrp_request_leave(const struct net_device *dev,
+ const struct mrp_application *appl,
+ const void *value, u8 len, u8 type)
+{
+ struct mrp_port *port = rtnl_dereference(dev->mrp_port);
+ struct mrp_applicant *app = rtnl_dereference(
+ port->applicants[appl->type]);
+ struct mrp_attr *attr;
+
+ if (sizeof(struct mrp_skb_cb) + len >
+ sizeof_field(struct sk_buff, cb))
+ return;
+
+ spin_lock_bh(&app->lock);
+ attr = mrp_attr_lookup(app, value, len, type);
+ if (!attr) {
+ spin_unlock_bh(&app->lock);
+ return;
+ }
+ mrp_attr_event(app, attr, MRP_EVENT_LV);
+ spin_unlock_bh(&app->lock);
+}
+EXPORT_SYMBOL_GPL(mrp_request_leave);
+
+static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
+{
+ struct rb_node *node, *next;
+ struct mrp_attr *attr;
+
+ for (node = rb_first(&app->mad);
+ next = node ? rb_next(node) : NULL, node != NULL;
+ node = next) {
+ attr = rb_entry(node, struct mrp_attr, node);
+ mrp_attr_event(app, attr, event);
+ }
+}
+
+static void mrp_join_timer_arm(struct mrp_applicant *app)
+{
+ unsigned long delay;
+
+ delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32;
+ mod_timer(&app->join_timer, jiffies + delay);
+}
+
+static void mrp_join_timer(struct timer_list *t)
+{
+ struct mrp_applicant *app = from_timer(app, t, join_timer);
+
+ spin_lock(&app->lock);
+ mrp_mad_event(app, MRP_EVENT_TX);
+ mrp_pdu_queue(app);
+ spin_unlock(&app->lock);
+
+ mrp_queue_xmit(app);
+ spin_lock(&app->lock);
+ if (likely(app->active))
+ mrp_join_timer_arm(app);
+ spin_unlock(&app->lock);
+}
+
+static void mrp_periodic_timer_arm(struct mrp_applicant *app)
+{
+ mod_timer(&app->periodic_timer,
+ jiffies + msecs_to_jiffies(mrp_periodic_time));
+}
+
+static void mrp_periodic_timer(struct timer_list *t)
+{
+ struct mrp_applicant *app = from_timer(app, t, periodic_timer);
+
+ spin_lock(&app->lock);
+ if (likely(app->active)) {
+ mrp_mad_event(app, MRP_EVENT_PERIODIC);
+ mrp_pdu_queue(app);
+ mrp_periodic_timer_arm(app);
+ }
+ spin_unlock(&app->lock);
+}
+
+static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
+{
+ __be16 endmark;
+
+ if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
+ return -1;
+ if (endmark == MRP_END_MARK) {
+ *offset += sizeof(endmark);
+ return -1;
+ }
+ return 0;
+}
+
+static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
+ struct sk_buff *skb,
+ enum mrp_vecattr_event vaevent)
+{
+ struct mrp_attr *attr;
+ enum mrp_event event;
+
+ attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
+ mrp_cb(skb)->mh->attrlen,
+ mrp_cb(skb)->mh->attrtype);
+ if (attr == NULL)
+ return;
+
+ switch (vaevent) {
+ case MRP_VECATTR_EVENT_NEW:
+ event = MRP_EVENT_R_NEW;
+ break;
+ case MRP_VECATTR_EVENT_JOIN_IN:
+ event = MRP_EVENT_R_JOIN_IN;
+ break;
+ case MRP_VECATTR_EVENT_IN:
+ event = MRP_EVENT_R_IN;
+ break;
+ case MRP_VECATTR_EVENT_JOIN_MT:
+ event = MRP_EVENT_R_JOIN_MT;
+ break;
+ case MRP_VECATTR_EVENT_MT:
+ event = MRP_EVENT_R_MT;
+ break;
+ case MRP_VECATTR_EVENT_LV:
+ event = MRP_EVENT_R_LV;
+ break;
+ default:
+ return;
+ }
+
+ mrp_attr_event(app, attr, event);
+}
+
+static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
+ struct sk_buff *skb, int *offset)
+{
+ struct mrp_vecattr_hdr _vah;
+ u16 valen;
+ u8 vaevents, vaevent;
+
+ mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
+ &_vah);
+ if (!mrp_cb(skb)->vah)
+ return -1;
+ *offset += sizeof(_vah);
+
+ if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
+ MRP_VECATTR_HDR_FLAG_LA)
+ mrp_mad_event(app, MRP_EVENT_R_LA);
+ valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
+ MRP_VECATTR_HDR_LEN_MASK);
+
+ /* The VectorAttribute structure in a PDU carries event information
+ * about one or more attributes having consecutive values. Only the
+ * value for the first attribute is contained in the structure. So
+ * we make a copy of that value, and then increment it each time we
+ * advance to the next event in its Vector.
+ */
+ if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
+ sizeof_field(struct sk_buff, cb))
+ return -1;
+ if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
+ mrp_cb(skb)->mh->attrlen) < 0)
+ return -1;
+ *offset += mrp_cb(skb)->mh->attrlen;
+
+ /* In a VectorAttribute, the Vector contains events which are packed
+ * three to a byte. We process one byte of the Vector at a time.
+ */
+ while (valen > 0) {
+ if (skb_copy_bits(skb, *offset, &vaevents,
+ sizeof(vaevents)) < 0)
+ return -1;
+ *offset += sizeof(vaevents);
+
+ /* Extract and process the first event. */
+ vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
+ __MRP_VECATTR_EVENT_MAX);
+ if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
+ /* The byte is malformed; stop processing. */
+ return -1;
+ }
+ mrp_pdu_parse_vecattr_event(app, skb, vaevent);
+
+ /* If present, extract and process the second event. */
+ if (!--valen)
+ break;
+ mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
+ mrp_cb(skb)->mh->attrlen);
+ vaevents %= (__MRP_VECATTR_EVENT_MAX *
+ __MRP_VECATTR_EVENT_MAX);
+ vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
+ mrp_pdu_parse_vecattr_event(app, skb, vaevent);
+
+ /* If present, extract and process the third event. */
+ if (!--valen)
+ break;
+ mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
+ mrp_cb(skb)->mh->attrlen);
+ vaevents %= __MRP_VECATTR_EVENT_MAX;
+ vaevent = vaevents;
+ mrp_pdu_parse_vecattr_event(app, skb, vaevent);
+ }
+ return 0;
+}
+
+static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
+ int *offset)
+{
+ struct mrp_msg_hdr _mh;
+
+ mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
+ if (!mrp_cb(skb)->mh)
+ return -1;
+ *offset += sizeof(_mh);
+
+ if (mrp_cb(skb)->mh->attrtype == 0 ||
+ mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
+ mrp_cb(skb)->mh->attrlen == 0)
+ return -1;
+
+ while (skb->len > *offset) {
+ if (mrp_pdu_parse_end_mark(skb, offset) < 0)
+ break;
+ if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct mrp_application *appl = container_of(pt, struct mrp_application,
+ pkttype);
+ struct mrp_port *port;
+ struct mrp_applicant *app;
+ struct mrp_pdu_hdr _ph;
+ const struct mrp_pdu_hdr *ph;
+ int offset = skb_network_offset(skb);
+
+ /* If the interface is in promiscuous mode, drop the packet if
+ * it was unicast to another host.
+ */
+ if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
+ goto out;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ port = rcu_dereference(dev->mrp_port);
+ if (unlikely(!port))
+ goto out;
+ app = rcu_dereference(port->applicants[appl->type]);
+ if (unlikely(!app))
+ goto out;
+
+ ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
+ if (!ph)
+ goto out;
+ offset += sizeof(_ph);
+
+ if (ph->version != app->app->version)
+ goto out;
+
+ spin_lock(&app->lock);
+ while (skb->len > offset) {
+ if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
+ break;
+ if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
+ break;
+ }
+ spin_unlock(&app->lock);
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int mrp_init_port(struct net_device *dev)
+{
+ struct mrp_port *port;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+ rcu_assign_pointer(dev->mrp_port, port);
+ return 0;
+}
+
+static void mrp_release_port(struct net_device *dev)
+{
+ struct mrp_port *port = rtnl_dereference(dev->mrp_port);
+ unsigned int i;
+
+ for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
+ if (rtnl_dereference(port->applicants[i]))
+ return;
+ }
+ RCU_INIT_POINTER(dev->mrp_port, NULL);
+ kfree_rcu(port, rcu);
+}
+
+int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
+{
+ struct mrp_applicant *app;
+ int err;
+
+ ASSERT_RTNL();
+
+ if (!rtnl_dereference(dev->mrp_port)) {
+ err = mrp_init_port(dev);
+ if (err < 0)
+ goto err1;
+ }
+
+ err = -ENOMEM;
+ app = kzalloc(sizeof(*app), GFP_KERNEL);
+ if (!app)
+ goto err2;
+
+ err = dev_mc_add(dev, appl->group_address);
+ if (err < 0)
+ goto err3;
+
+ app->dev = dev;
+ app->app = appl;
+ app->mad = RB_ROOT;
+ app->active = true;
+ spin_lock_init(&app->lock);
+ skb_queue_head_init(&app->queue);
+ rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
+ timer_setup(&app->join_timer, mrp_join_timer, 0);
+ mrp_join_timer_arm(app);
+ timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
+ mrp_periodic_timer_arm(app);
+ return 0;
+
+err3:
+ kfree(app);
+err2:
+ mrp_release_port(dev);
+err1:
+ return err;
+}
+EXPORT_SYMBOL_GPL(mrp_init_applicant);
+
+void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
+{
+ struct mrp_port *port = rtnl_dereference(dev->mrp_port);
+ struct mrp_applicant *app = rtnl_dereference(
+ port->applicants[appl->type]);
+
+ ASSERT_RTNL();
+
+ RCU_INIT_POINTER(port->applicants[appl->type], NULL);
+
+ spin_lock_bh(&app->lock);
+ app->active = false;
+ spin_unlock_bh(&app->lock);
+ /* Delete timer and generate a final TX event to flush out
+ * all pending messages before the applicant is gone.
+ */
+ del_timer_sync(&app->join_timer);
+ del_timer_sync(&app->periodic_timer);
+
+ spin_lock_bh(&app->lock);
+ mrp_mad_event(app, MRP_EVENT_TX);
+ mrp_attr_destroy_all(app);
+ mrp_pdu_queue(app);
+ spin_unlock_bh(&app->lock);
+
+ mrp_queue_xmit(app);
+
+ dev_mc_del(dev, appl->group_address);
+ kfree_rcu(app, rcu);
+ mrp_release_port(dev);
+}
+EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
+
+int mrp_register_application(struct mrp_application *appl)
+{
+ appl->pkttype.func = mrp_rcv;
+ dev_add_pack(&appl->pkttype);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mrp_register_application);
+
+void mrp_unregister_application(struct mrp_application *appl)
+{
+ dev_remove_pack(&appl->pkttype);
+}
+EXPORT_SYMBOL_GPL(mrp_unregister_application);
diff --git a/net/802/p8022.c b/net/802/p8022.c
new file mode 100644
index 000000000..a65856270
--- /dev/null
+++ b/net/802/p8022.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NET3: Support for 802.2 demultiplexing off Ethernet
+ *
+ * Demultiplex 802.2 encoded protocols. We match the entry by the
+ * SSAP/DSAP pair and then deliver to the registered datalink that
+ * matches. The control byte is ignored and handling of such items
+ * is up to the routine passed the frame.
+ *
+ * Unlike the 802.3 datalink we have a list of 802.2 entries as
+ * there are multiple protocols to demux. The list is currently
+ * short (3 or 4 entries at most). The current demux assumes this.
+ */
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/datalink.h>
+#include <linux/mm.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <net/llc.h>
+#include <net/p8022.h>
+
+static int p8022_request(struct datalink_proto *dl, struct sk_buff *skb,
+ unsigned char *dest)
+{
+ llc_build_and_send_ui_pkt(dl->sap, skb, dest, dl->sap->laddr.lsap);
+ return 0;
+}
+
+struct datalink_proto *register_8022_client(unsigned char type,
+ int (*func)(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt,
+ struct net_device *orig_dev))
+{
+ struct datalink_proto *proto;
+
+ proto = kmalloc(sizeof(*proto), GFP_ATOMIC);
+ if (proto) {
+ proto->type[0] = type;
+ proto->header_length = 3;
+ proto->request = p8022_request;
+ proto->sap = llc_sap_open(type, func);
+ if (!proto->sap) {
+ kfree(proto);
+ proto = NULL;
+ }
+ }
+ return proto;
+}
+
+void unregister_8022_client(struct datalink_proto *proto)
+{
+ llc_sap_put(proto->sap);
+ kfree(proto);
+}
+
+EXPORT_SYMBOL(register_8022_client);
+EXPORT_SYMBOL(unregister_8022_client);
+
+MODULE_LICENSE("GPL");
diff --git a/net/802/p8023.c b/net/802/p8023.c
new file mode 100644
index 000000000..19cd56990
--- /dev/null
+++ b/net/802/p8023.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NET3: 802.3 data link hooks used for IPX 802.3
+ *
+ * 802.3 isn't really a protocol data link layer. Some old IPX stuff
+ * uses it however. Note that there is only one 802.3 protocol layer
+ * in the system. We don't currently support different protocols
+ * running raw 802.3 on different devices. Thankfully nobody else
+ * has done anything like the old IPX.
+ */
+
+#include <linux/in.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+
+#include <net/datalink.h>
+#include <net/p8022.h>
+
+/*
+ * Place an 802.3 header on a packet. The driver will do the mac
+ * addresses, we just need to give it the buffer length.
+ */
+static int p8023_request(struct datalink_proto *dl,
+ struct sk_buff *skb, unsigned char *dest_node)
+{
+ struct net_device *dev = skb->dev;
+
+ dev_hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len);
+ return dev_queue_xmit(skb);
+}
+
+/*
+ * Create an 802.3 client. Note there can be only one 802.3 client
+ */
+struct datalink_proto *make_8023_client(void)
+{
+ struct datalink_proto *proto = kmalloc(sizeof(*proto), GFP_ATOMIC);
+
+ if (proto) {
+ proto->header_length = 0;
+ proto->request = p8023_request;
+ }
+ return proto;
+}
+
+/*
+ * Destroy the 802.3 client.
+ */
+void destroy_8023_client(struct datalink_proto *dl)
+{
+ kfree(dl);
+}
+
+EXPORT_SYMBOL(destroy_8023_client);
+EXPORT_SYMBOL(make_8023_client);
+
+MODULE_LICENSE("GPL");
diff --git a/net/802/psnap.c b/net/802/psnap.c
new file mode 100644
index 000000000..4492e8d7a
--- /dev/null
+++ b/net/802/psnap.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SNAP data link layer. Derived from 802.2
+ *
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>,
+ * from the 802.2 layer by Greg Page.
+ * Merged in additions from Greg Page's psnap.c.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/datalink.h>
+#include <net/llc.h>
+#include <net/psnap.h>
+#include <linux/mm.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/rculist.h>
+
+static LIST_HEAD(snap_list);
+static DEFINE_SPINLOCK(snap_lock);
+static struct llc_sap *snap_sap;
+
+/*
+ * Find a snap client by matching the 5 bytes.
+ */
+static struct datalink_proto *find_snap_client(const unsigned char *desc)
+{
+ struct datalink_proto *proto = NULL, *p;
+
+ list_for_each_entry_rcu(p, &snap_list, node, lockdep_is_held(&snap_lock)) {
+ if (!memcmp(p->type, desc, 5)) {
+ proto = p;
+ break;
+ }
+ }
+ return proto;
+}
+
+/*
+ * A SNAP packet has arrived
+ */
+static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ int rc = 1;
+ struct datalink_proto *proto;
+ static struct packet_type snap_packet_type = {
+ .type = cpu_to_be16(ETH_P_SNAP),
+ };
+
+ if (unlikely(!pskb_may_pull(skb, 5)))
+ goto drop;
+
+ rcu_read_lock();
+ proto = find_snap_client(skb_transport_header(skb));
+ if (proto) {
+ /* Pass the frame on. */
+ skb->transport_header += 5;
+ skb_pull_rcsum(skb, 5);
+ rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
+ }
+ rcu_read_unlock();
+
+ if (unlikely(!proto))
+ goto drop;
+
+out:
+ return rc;
+
+drop:
+ kfree_skb(skb);
+ goto out;
+}
+
+/*
+ * Put a SNAP header on a frame and pass to 802.2
+ */
+static int snap_request(struct datalink_proto *dl,
+ struct sk_buff *skb, u8 *dest)
+{
+ memcpy(skb_push(skb, 5), dl->type, 5);
+ llc_build_and_send_ui_pkt(snap_sap, skb, dest, snap_sap->laddr.lsap);
+ return 0;
+}
+
+/*
+ * Set up the SNAP layer
+ */
+EXPORT_SYMBOL(register_snap_client);
+EXPORT_SYMBOL(unregister_snap_client);
+
+static const char snap_err_msg[] __initconst =
+ KERN_CRIT "SNAP - unable to register with 802.2\n";
+
+static int __init snap_init(void)
+{
+ snap_sap = llc_sap_open(0xAA, snap_rcv);
+ if (!snap_sap) {
+ printk(snap_err_msg);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+module_init(snap_init);
+
+static void __exit snap_exit(void)
+{
+ llc_sap_put(snap_sap);
+}
+
+module_exit(snap_exit);
+
+
+/*
+ * Register SNAP clients. We don't yet use this for IP.
+ */
+struct datalink_proto *register_snap_client(const unsigned char *desc,
+ int (*rcvfunc)(struct sk_buff *,
+ struct net_device *,
+ struct packet_type *,
+ struct net_device *))
+{
+ struct datalink_proto *proto = NULL;
+
+ spin_lock_bh(&snap_lock);
+
+ if (find_snap_client(desc))
+ goto out;
+
+ proto = kmalloc(sizeof(*proto), GFP_ATOMIC);
+ if (proto) {
+ memcpy(proto->type, desc, 5);
+ proto->rcvfunc = rcvfunc;
+ proto->header_length = 5 + 3; /* snap + 802.2 */
+ proto->request = snap_request;
+ list_add_rcu(&proto->node, &snap_list);
+ }
+out:
+ spin_unlock_bh(&snap_lock);
+
+ return proto;
+}
+
+/*
+ * Unregister SNAP clients. Protocols no longer want to play with us ...
+ */
+void unregister_snap_client(struct datalink_proto *proto)
+{
+ spin_lock_bh(&snap_lock);
+ list_del_rcu(&proto->node);
+ spin_unlock_bh(&snap_lock);
+
+ synchronize_net();
+
+ kfree(proto);
+}
+
+MODULE_LICENSE("GPL");
diff --git a/net/802/stp.c b/net/802/stp.c
new file mode 100644
index 000000000..d550d9f88
--- /dev/null
+++ b/net/802/stp.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STP SAP demux
+ *
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ */
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/llc.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <net/llc.h>
+#include <net/llc_pdu.h>
+#include <net/stp.h>
+
+/* 01:80:c2:00:00:20 - 01:80:c2:00:00:2F */
+#define GARP_ADDR_MIN 0x20
+#define GARP_ADDR_MAX 0x2F
+#define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN)
+
+static const struct stp_proto __rcu *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
+static const struct stp_proto __rcu *stp_proto __read_mostly;
+
+static struct llc_sap *sap __read_mostly;
+static unsigned int sap_registered;
+static DEFINE_MUTEX(stp_proto_mutex);
+
+/* Called under rcu_read_lock from LLC */
+static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ const struct ethhdr *eh = eth_hdr(skb);
+ const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
+ const struct stp_proto *proto;
+
+ if (pdu->ssap != LLC_SAP_BSPAN ||
+ pdu->dsap != LLC_SAP_BSPAN ||
+ pdu->ctrl_1 != LLC_PDU_TYPE_U)
+ goto err;
+
+ if (eh->h_dest[5] >= GARP_ADDR_MIN && eh->h_dest[5] <= GARP_ADDR_MAX) {
+ proto = rcu_dereference(garp_protos[eh->h_dest[5] -
+ GARP_ADDR_MIN]);
+ if (proto &&
+ !ether_addr_equal(eh->h_dest, proto->group_address))
+ goto err;
+ } else
+ proto = rcu_dereference(stp_proto);
+
+ if (!proto)
+ goto err;
+
+ proto->rcv(proto, skb, dev);
+ return 0;
+
+err:
+ kfree_skb(skb);
+ return 0;
+}
+
+int stp_proto_register(const struct stp_proto *proto)
+{
+ int err = 0;
+
+ mutex_lock(&stp_proto_mutex);
+ if (sap_registered++ == 0) {
+ sap = llc_sap_open(LLC_SAP_BSPAN, stp_pdu_rcv);
+ if (!sap) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+ if (is_zero_ether_addr(proto->group_address))
+ rcu_assign_pointer(stp_proto, proto);
+ else
+ rcu_assign_pointer(garp_protos[proto->group_address[5] -
+ GARP_ADDR_MIN], proto);
+out:
+ mutex_unlock(&stp_proto_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(stp_proto_register);
+
+void stp_proto_unregister(const struct stp_proto *proto)
+{
+ mutex_lock(&stp_proto_mutex);
+ if (is_zero_ether_addr(proto->group_address))
+ RCU_INIT_POINTER(stp_proto, NULL);
+ else
+ RCU_INIT_POINTER(garp_protos[proto->group_address[5] -
+ GARP_ADDR_MIN], NULL);
+ synchronize_rcu();
+
+ if (--sap_registered == 0)
+ llc_sap_put(sap);
+ mutex_unlock(&stp_proto_mutex);
+}
+EXPORT_SYMBOL_GPL(stp_proto_unregister);
+
+MODULE_LICENSE("GPL");
diff --git a/net/8021q/Kconfig b/net/8021q/Kconfig
new file mode 100644
index 000000000..8bf7a1765
--- /dev/null
+++ b/net/8021q/Kconfig
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Configuration for 802.1Q VLAN support
+#
+
+config VLAN_8021Q
+ tristate "802.1Q/802.1ad VLAN Support"
+ help
+ Select this and you will be able to create 802.1Q VLAN interfaces
+ on your Ethernet interfaces. 802.1Q VLAN supports almost
+ everything a regular Ethernet interface does, including
+ firewalling, bridging, and of course IP traffic. You will need
+ the 'ip' utility in order to effectively use VLANs.
+ See the VLAN web page for more information:
+ <http://www.candelatech.com/~greear/vlan.html>
+
+ To compile this code as a module, choose M here: the module
+ will be called 8021q.
+
+ If unsure, say N.
+
+config VLAN_8021Q_GVRP
+ bool "GVRP (GARP VLAN Registration Protocol) support"
+ depends on VLAN_8021Q
+ select GARP
+ help
+ Select this to enable GVRP end-system support. GVRP is used for
+ automatic propagation of registered VLANs to switches.
+
+ If unsure, say N.
+
+config VLAN_8021Q_MVRP
+ bool "MVRP (Multiple VLAN Registration Protocol) support"
+ depends on VLAN_8021Q
+ select MRP
+ help
+ Select this to enable MVRP end-system support. MVRP is used for
+ automatic propagation of registered VLANs to switches; it
+ supersedes GVRP and is not backwards-compatible.
+
+ If unsure, say N.
diff --git a/net/8021q/Makefile b/net/8021q/Makefile
new file mode 100644
index 000000000..e05d4d7aa
--- /dev/null
+++ b/net/8021q/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux VLAN layer.
+#
+obj-$(subst m,y,$(CONFIG_VLAN_8021Q)) += vlan_core.o
+obj-$(CONFIG_VLAN_8021Q) += 8021q.o
+
+8021q-y := vlan.o vlan_dev.o vlan_netlink.o
+8021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o
+8021q-$(CONFIG_VLAN_8021Q_MVRP) += vlan_mvrp.o
+8021q-$(CONFIG_PROC_FS) += vlanproc.o
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
new file mode 100644
index 000000000..64a94c981
--- /dev/null
+++ b/net/8021q/vlan.c
@@ -0,0 +1,739 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * INET 802.1Q VLAN
+ * Ethernet-type device handling.
+ *
+ * Authors: Ben Greear <greearb@candelatech.com>
+ * Please send support related email to: netdev@vger.kernel.org
+ * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
+ *
+ * Fixes:
+ * Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
+ * Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
+ * Correct all the locking - David S. Miller <davem@redhat.com>;
+ * Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/rculist.h>
+#include <net/p8022.h>
+#include <net/arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/notifier.h>
+#include <net/rtnetlink.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <linux/uaccess.h>
+
+#include <linux/if_vlan.h>
+#include "vlan.h"
+#include "vlanproc.h"
+
+#define DRV_VERSION "1.8"
+
+/* Global VLAN variables */
+
+unsigned int vlan_net_id __read_mostly;
+
+const char vlan_fullname[] = "802.1Q VLAN Support";
+const char vlan_version[] = DRV_VERSION;
+
+/* End of global variables definitions. */
+
+static int vlan_group_prealloc_vid(struct vlan_group *vg,
+ __be16 vlan_proto, u16 vlan_id)
+{
+ struct net_device **array;
+ unsigned int vidx;
+ unsigned int size;
+ int pidx;
+
+ ASSERT_RTNL();
+
+ pidx = vlan_proto_idx(vlan_proto);
+ if (pidx < 0)
+ return -EINVAL;
+
+ vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
+ array = vg->vlan_devices_arrays[pidx][vidx];
+ if (array != NULL)
+ return 0;
+
+ size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
+ array = kzalloc(size, GFP_KERNEL);
+ if (array == NULL)
+ return -ENOBUFS;
+
+ vg->vlan_devices_arrays[pidx][vidx] = array;
+ return 0;
+}
+
+static void vlan_stacked_transfer_operstate(const struct net_device *rootdev,
+ struct net_device *dev,
+ struct vlan_dev_priv *vlan)
+{
+ if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
+ netif_stacked_transfer_operstate(rootdev, dev);
+}
+
+void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+ struct vlan_info *vlan_info;
+ struct vlan_group *grp;
+ u16 vlan_id = vlan->vlan_id;
+
+ ASSERT_RTNL();
+
+ vlan_info = rtnl_dereference(real_dev->vlan_info);
+ BUG_ON(!vlan_info);
+
+ grp = &vlan_info->grp;
+
+ grp->nr_vlan_devs--;
+
+ if (vlan->flags & VLAN_FLAG_MVRP)
+ vlan_mvrp_request_leave(dev);
+ if (vlan->flags & VLAN_FLAG_GVRP)
+ vlan_gvrp_request_leave(dev);
+
+ vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
+
+ netdev_upper_dev_unlink(real_dev, dev);
+ /* Because unregister_netdevice_queue() makes sure at least one rcu
+ * grace period is respected before device freeing,
+ * we dont need to call synchronize_net() here.
+ */
+ unregister_netdevice_queue(dev, head);
+
+ if (grp->nr_vlan_devs == 0) {
+ vlan_mvrp_uninit_applicant(real_dev);
+ vlan_gvrp_uninit_applicant(real_dev);
+ }
+
+ vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
+}
+
+int vlan_check_real_dev(struct net_device *real_dev,
+ __be16 protocol, u16 vlan_id,
+ struct netlink_ext_ack *extack)
+{
+ const char *name = real_dev->name;
+
+ if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
+ pr_info("VLANs not supported on %s\n", name);
+ NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device");
+ return -EOPNOTSUPP;
+ }
+
+ if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN device already exists");
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+ u16 vlan_id = vlan->vlan_id;
+ struct vlan_info *vlan_info;
+ struct vlan_group *grp;
+ int err;
+
+ err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id);
+ if (err)
+ return err;
+
+ vlan_info = rtnl_dereference(real_dev->vlan_info);
+ /* vlan_info should be there now. vlan_vid_add took care of it */
+ BUG_ON(!vlan_info);
+
+ grp = &vlan_info->grp;
+ if (grp->nr_vlan_devs == 0) {
+ err = vlan_gvrp_init_applicant(real_dev);
+ if (err < 0)
+ goto out_vid_del;
+ err = vlan_mvrp_init_applicant(real_dev);
+ if (err < 0)
+ goto out_uninit_gvrp;
+ }
+
+ err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);
+ if (err < 0)
+ goto out_uninit_mvrp;
+
+ err = register_netdevice(dev);
+ if (err < 0)
+ goto out_uninit_mvrp;
+
+ err = netdev_upper_dev_link(real_dev, dev, extack);
+ if (err)
+ goto out_unregister_netdev;
+
+ vlan_stacked_transfer_operstate(real_dev, dev, vlan);
+ linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
+
+ /* So, got the sucker initialized, now lets place
+ * it into our local structure.
+ */
+ vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
+ grp->nr_vlan_devs++;
+
+ return 0;
+
+out_unregister_netdev:
+ unregister_netdevice(dev);
+out_uninit_mvrp:
+ if (grp->nr_vlan_devs == 0)
+ vlan_mvrp_uninit_applicant(real_dev);
+out_uninit_gvrp:
+ if (grp->nr_vlan_devs == 0)
+ vlan_gvrp_uninit_applicant(real_dev);
+out_vid_del:
+ vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
+ return err;
+}
+
+/* Attach a VLAN device to a mac address (ie Ethernet Card).
+ * Returns 0 if the device was created or a negative error code otherwise.
+ */
+static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
+{
+ struct net_device *new_dev;
+ struct vlan_dev_priv *vlan;
+ struct net *net = dev_net(real_dev);
+ struct vlan_net *vn = net_generic(net, vlan_net_id);
+ char name[IFNAMSIZ];
+ int err;
+
+ if (vlan_id >= VLAN_VID_MASK)
+ return -ERANGE;
+
+ err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id,
+ NULL);
+ if (err < 0)
+ return err;
+
+ /* Gotta set up the fields for the device. */
+ switch (vn->name_type) {
+ case VLAN_NAME_TYPE_RAW_PLUS_VID:
+ /* name will look like: eth1.0005 */
+ snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
+ break;
+ case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
+ /* Put our vlan.VID in the name.
+ * Name will look like: vlan5
+ */
+ snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
+ break;
+ case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
+ /* Put our vlan.VID in the name.
+ * Name will look like: eth0.5
+ */
+ snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
+ break;
+ case VLAN_NAME_TYPE_PLUS_VID:
+ /* Put our vlan.VID in the name.
+ * Name will look like: vlan0005
+ */
+ default:
+ snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
+ }
+
+ new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name,
+ NET_NAME_UNKNOWN, vlan_setup);
+
+ if (new_dev == NULL)
+ return -ENOBUFS;
+
+ dev_net_set(new_dev, net);
+ /* need 4 bytes for extra VLAN header info,
+ * hope the underlying device can handle it.
+ */
+ new_dev->mtu = real_dev->mtu;
+
+ vlan = vlan_dev_priv(new_dev);
+ vlan->vlan_proto = htons(ETH_P_8021Q);
+ vlan->vlan_id = vlan_id;
+ vlan->real_dev = real_dev;
+ vlan->dent = NULL;
+ vlan->flags = VLAN_FLAG_REORDER_HDR;
+
+ new_dev->rtnl_link_ops = &vlan_link_ops;
+ err = register_vlan_dev(new_dev, NULL);
+ if (err < 0)
+ goto out_free_newdev;
+
+ return 0;
+
+out_free_newdev:
+ free_netdev(new_dev);
+ return err;
+}
+
+static void vlan_sync_address(struct net_device *dev,
+ struct net_device *vlandev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
+
+ /* May be called without an actual change */
+ if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
+ return;
+
+ /* vlan continues to inherit address of lower device */
+ if (vlan_dev_inherit_address(vlandev, dev))
+ goto out;
+
+ /* vlan address was different from the old address and is equal to
+ * the new address */
+ if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
+ ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
+ dev_uc_del(dev, vlandev->dev_addr);
+
+ /* vlan address was equal to the old address and is different from
+ * the new address */
+ if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
+ !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
+ dev_uc_add(dev, vlandev->dev_addr);
+
+out:
+ ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
+}
+
+static void vlan_transfer_features(struct net_device *dev,
+ struct net_device *vlandev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
+
+ vlandev->gso_max_size = dev->gso_max_size;
+ vlandev->gso_max_segs = dev->gso_max_segs;
+
+ if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
+ vlandev->hard_header_len = dev->hard_header_len;
+ else
+ vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
+
+#if IS_ENABLED(CONFIG_FCOE)
+ vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
+#endif
+
+ vlandev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ vlandev->priv_flags |= (vlan->real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
+ vlandev->hw_enc_features = vlan_tnl_features(vlan->real_dev);
+
+ netdev_update_features(vlandev);
+}
+
+static int __vlan_device_event(struct net_device *dev, unsigned long event)
+{
+ int err = 0;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ vlan_proc_rem_dev(dev);
+ err = vlan_proc_add_dev(dev);
+ break;
+ case NETDEV_REGISTER:
+ err = vlan_proc_add_dev(dev);
+ break;
+ case NETDEV_UNREGISTER:
+ vlan_proc_rem_dev(dev);
+ break;
+ }
+
+ return err;
+}
+
+static int vlan_device_event(struct notifier_block *unused, unsigned long event,
+ void *ptr)
+{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct vlan_group *grp;
+ struct vlan_info *vlan_info;
+ int i, flgs;
+ struct net_device *vlandev;
+ struct vlan_dev_priv *vlan;
+ bool last = false;
+ LIST_HEAD(list);
+ int err;
+
+ if (is_vlan_dev(dev)) {
+ int err = __vlan_device_event(dev, event);
+
+ if (err)
+ return notifier_from_errno(err);
+ }
+
+ if ((event == NETDEV_UP) &&
+ (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+ pr_info("adding VLAN 0 to HW filter on device %s\n",
+ dev->name);
+ vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
+ }
+ if (event == NETDEV_DOWN &&
+ (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
+
+ vlan_info = rtnl_dereference(dev->vlan_info);
+ if (!vlan_info)
+ goto out;
+ grp = &vlan_info->grp;
+
+ /* It is OK that we do not hold the group lock right now,
+ * as we run under the RTNL lock.
+ */
+
+ switch (event) {
+ case NETDEV_CHANGE:
+ /* Propagate real device state to vlan devices */
+ vlan_group_for_each_dev(grp, i, vlandev)
+ vlan_stacked_transfer_operstate(dev, vlandev,
+ vlan_dev_priv(vlandev));
+ break;
+
+ case NETDEV_CHANGEADDR:
+ /* Adjust unicast filters on underlying device */
+ vlan_group_for_each_dev(grp, i, vlandev) {
+ flgs = vlandev->flags;
+ if (!(flgs & IFF_UP))
+ continue;
+
+ vlan_sync_address(dev, vlandev);
+ }
+ break;
+
+ case NETDEV_CHANGEMTU:
+ vlan_group_for_each_dev(grp, i, vlandev) {
+ if (vlandev->mtu <= dev->mtu)
+ continue;
+
+ dev_set_mtu(vlandev, dev->mtu);
+ }
+ break;
+
+ case NETDEV_FEAT_CHANGE:
+ /* Propagate device features to underlying device */
+ vlan_group_for_each_dev(grp, i, vlandev)
+ vlan_transfer_features(dev, vlandev);
+ break;
+
+ case NETDEV_DOWN: {
+ struct net_device *tmp;
+ LIST_HEAD(close_list);
+
+ /* Put all VLANs for this dev in the down state too. */
+ vlan_group_for_each_dev(grp, i, vlandev) {
+ flgs = vlandev->flags;
+ if (!(flgs & IFF_UP))
+ continue;
+
+ vlan = vlan_dev_priv(vlandev);
+ if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
+ list_add(&vlandev->close_list, &close_list);
+ }
+
+ dev_close_many(&close_list, false);
+
+ list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
+ vlan_stacked_transfer_operstate(dev, vlandev,
+ vlan_dev_priv(vlandev));
+ list_del_init(&vlandev->close_list);
+ }
+ list_del(&close_list);
+ break;
+ }
+ case NETDEV_UP:
+ /* Put all VLANs for this dev in the up state too. */
+ vlan_group_for_each_dev(grp, i, vlandev) {
+ flgs = dev_get_flags(vlandev);
+ if (flgs & IFF_UP)
+ continue;
+
+ vlan = vlan_dev_priv(vlandev);
+ if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
+ dev_change_flags(vlandev, flgs | IFF_UP,
+ extack);
+ vlan_stacked_transfer_operstate(dev, vlandev, vlan);
+ }
+ break;
+
+ case NETDEV_UNREGISTER:
+ /* twiddle thumbs on netns device moves */
+ if (dev->reg_state != NETREG_UNREGISTERING)
+ break;
+
+ vlan_group_for_each_dev(grp, i, vlandev) {
+ /* removal of last vid destroys vlan_info, abort
+ * afterwards */
+ if (vlan_info->nr_vids == 1)
+ last = true;
+
+ unregister_vlan_dev(vlandev, &list);
+ if (last)
+ break;
+ }
+ unregister_netdevice_many(&list);
+ break;
+
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid underlaying device to change its type. */
+ if (vlan_uses_dev(dev))
+ return NOTIFY_BAD;
+ break;
+
+ case NETDEV_NOTIFY_PEERS:
+ case NETDEV_BONDING_FAILOVER:
+ case NETDEV_RESEND_IGMP:
+ /* Propagate to vlan devices */
+ vlan_group_for_each_dev(grp, i, vlandev)
+ call_netdevice_notifiers(event, vlandev);
+ break;
+
+ case NETDEV_CVLAN_FILTER_PUSH_INFO:
+ err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021Q));
+ if (err)
+ return notifier_from_errno(err);
+ break;
+
+ case NETDEV_CVLAN_FILTER_DROP_INFO:
+ vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021Q));
+ break;
+
+ case NETDEV_SVLAN_FILTER_PUSH_INFO:
+ err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021AD));
+ if (err)
+ return notifier_from_errno(err);
+ break;
+
+ case NETDEV_SVLAN_FILTER_DROP_INFO:
+ vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021AD));
+ break;
+ }
+
+out:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block vlan_notifier_block __read_mostly = {
+ .notifier_call = vlan_device_event,
+};
+
+/*
+ * VLAN IOCTL handler.
+ * o execute requested action or pass command to the device driver
+ * arg is really a struct vlan_ioctl_args __user *.
+ */
+static int vlan_ioctl_handler(struct net *net, void __user *arg)
+{
+ int err;
+ struct vlan_ioctl_args args;
+ struct net_device *dev = NULL;
+
+ if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
+ return -EFAULT;
+
+ /* Null terminate this sucker, just in case. */
+ args.device1[sizeof(args.device1) - 1] = 0;
+ args.u.device2[sizeof(args.u.device2) - 1] = 0;
+
+ rtnl_lock();
+
+ switch (args.cmd) {
+ case SET_VLAN_INGRESS_PRIORITY_CMD:
+ case SET_VLAN_EGRESS_PRIORITY_CMD:
+ case SET_VLAN_FLAG_CMD:
+ case ADD_VLAN_CMD:
+ case DEL_VLAN_CMD:
+ case GET_VLAN_REALDEV_NAME_CMD:
+ case GET_VLAN_VID_CMD:
+ err = -ENODEV;
+ dev = __dev_get_by_name(net, args.device1);
+ if (!dev)
+ goto out;
+
+ err = -EINVAL;
+ if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
+ goto out;
+ }
+
+ switch (args.cmd) {
+ case SET_VLAN_INGRESS_PRIORITY_CMD:
+ err = -EPERM;
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ break;
+ vlan_dev_set_ingress_priority(dev,
+ args.u.skb_priority,
+ args.vlan_qos);
+ err = 0;
+ break;
+
+ case SET_VLAN_EGRESS_PRIORITY_CMD:
+ err = -EPERM;
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ break;
+ err = vlan_dev_set_egress_priority(dev,
+ args.u.skb_priority,
+ args.vlan_qos);
+ break;
+
+ case SET_VLAN_FLAG_CMD:
+ err = -EPERM;
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ break;
+ err = vlan_dev_change_flags(dev,
+ args.vlan_qos ? args.u.flag : 0,
+ args.u.flag);
+ break;
+
+ case SET_VLAN_NAME_TYPE_CMD:
+ err = -EPERM;
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ break;
+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
+ struct vlan_net *vn;
+
+ vn = net_generic(net, vlan_net_id);
+ vn->name_type = args.u.name_type;
+ err = 0;
+ } else {
+ err = -EINVAL;
+ }
+ break;
+
+ case ADD_VLAN_CMD:
+ err = -EPERM;
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ break;
+ err = register_vlan_device(dev, args.u.VID);
+ break;
+
+ case DEL_VLAN_CMD:
+ err = -EPERM;
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ break;
+ unregister_vlan_dev(dev, NULL);
+ err = 0;
+ break;
+
+ case GET_VLAN_REALDEV_NAME_CMD:
+ err = 0;
+ vlan_dev_get_realdev_name(dev, args.u.device2);
+ if (copy_to_user(arg, &args,
+ sizeof(struct vlan_ioctl_args)))
+ err = -EFAULT;
+ break;
+
+ case GET_VLAN_VID_CMD:
+ err = 0;
+ args.u.VID = vlan_dev_vlan_id(dev);
+ if (copy_to_user(arg, &args,
+ sizeof(struct vlan_ioctl_args)))
+ err = -EFAULT;
+ break;
+
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+out:
+ rtnl_unlock();
+ return err;
+}
+
+static int __net_init vlan_init_net(struct net *net)
+{
+ struct vlan_net *vn = net_generic(net, vlan_net_id);
+ int err;
+
+ vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
+
+ err = vlan_proc_init(net);
+
+ return err;
+}
+
+static void __net_exit vlan_exit_net(struct net *net)
+{
+ vlan_proc_cleanup(net);
+}
+
+static struct pernet_operations vlan_net_ops = {
+ .init = vlan_init_net,
+ .exit = vlan_exit_net,
+ .id = &vlan_net_id,
+ .size = sizeof(struct vlan_net),
+};
+
+static int __init vlan_proto_init(void)
+{
+ int err;
+
+ pr_info("%s v%s\n", vlan_fullname, vlan_version);
+
+ err = register_pernet_subsys(&vlan_net_ops);
+ if (err < 0)
+ goto err0;
+
+ err = register_netdevice_notifier(&vlan_notifier_block);
+ if (err < 0)
+ goto err2;
+
+ err = vlan_gvrp_init();
+ if (err < 0)
+ goto err3;
+
+ err = vlan_mvrp_init();
+ if (err < 0)
+ goto err4;
+
+ err = vlan_netlink_init();
+ if (err < 0)
+ goto err5;
+
+ vlan_ioctl_set(vlan_ioctl_handler);
+ return 0;
+
+err5:
+ vlan_mvrp_uninit();
+err4:
+ vlan_gvrp_uninit();
+err3:
+ unregister_netdevice_notifier(&vlan_notifier_block);
+err2:
+ unregister_pernet_subsys(&vlan_net_ops);
+err0:
+ return err;
+}
+
+static void __exit vlan_cleanup_module(void)
+{
+ vlan_ioctl_set(NULL);
+
+ vlan_netlink_fini();
+
+ unregister_netdevice_notifier(&vlan_notifier_block);
+
+ unregister_pernet_subsys(&vlan_net_ops);
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
+
+ vlan_mvrp_uninit();
+ vlan_gvrp_uninit();
+}
+
+module_init(vlan_proto_init);
+module_exit(vlan_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
new file mode 100644
index 000000000..953405362
--- /dev/null
+++ b/net/8021q/vlan.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BEN_VLAN_802_1Q_INC__
+#define __BEN_VLAN_802_1Q_INC__
+
+#include <linux/if_vlan.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/list.h>
+
+/* if this changes, algorithm will have to be reworked because this
+ * depends on completely exhausting the VLAN identifier space. Thus
+ * it gives constant time look-up, but in many cases it wastes memory.
+ */
+#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
+#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
+
+enum vlan_protos {
+ VLAN_PROTO_8021Q = 0,
+ VLAN_PROTO_8021AD,
+ VLAN_PROTO_NUM,
+};
+
+struct vlan_group {
+ unsigned int nr_vlan_devs;
+ struct hlist_node hlist; /* linked list */
+ struct net_device **vlan_devices_arrays[VLAN_PROTO_NUM]
+ [VLAN_GROUP_ARRAY_SPLIT_PARTS];
+};
+
+struct vlan_info {
+ struct net_device *real_dev; /* The ethernet(like) device
+ * the vlan is attached to.
+ */
+ struct vlan_group grp;
+ struct list_head vid_list;
+ unsigned int nr_vids;
+ struct rcu_head rcu;
+};
+
+static inline int vlan_proto_idx(__be16 proto)
+{
+ switch (proto) {
+ case htons(ETH_P_8021Q):
+ return VLAN_PROTO_8021Q;
+ case htons(ETH_P_8021AD):
+ return VLAN_PROTO_8021AD;
+ default:
+ WARN(1, "invalid VLAN protocol: 0x%04x\n", ntohs(proto));
+ return -EINVAL;
+ }
+}
+
+static inline struct net_device *__vlan_group_get_device(struct vlan_group *vg,
+ unsigned int pidx,
+ u16 vlan_id)
+{
+ struct net_device **array;
+
+ array = vg->vlan_devices_arrays[pidx]
+ [vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
+ return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL;
+}
+
+static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
+ __be16 vlan_proto,
+ u16 vlan_id)
+{
+ int pidx = vlan_proto_idx(vlan_proto);
+
+ if (pidx < 0)
+ return NULL;
+
+ return __vlan_group_get_device(vg, pidx, vlan_id);
+}
+
+static inline void vlan_group_set_device(struct vlan_group *vg,
+ __be16 vlan_proto, u16 vlan_id,
+ struct net_device *dev)
+{
+ int pidx = vlan_proto_idx(vlan_proto);
+ struct net_device **array;
+
+ if (!vg || pidx < 0)
+ return;
+ array = vg->vlan_devices_arrays[pidx]
+ [vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
+ array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
+}
+
+/* Must be invoked with rcu_read_lock or with RTNL. */
+static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
+ __be16 vlan_proto, u16 vlan_id)
+{
+ struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
+
+ if (vlan_info)
+ return vlan_group_get_device(&vlan_info->grp,
+ vlan_proto, vlan_id);
+
+ return NULL;
+}
+
+static inline netdev_features_t vlan_tnl_features(struct net_device *real_dev)
+{
+ netdev_features_t ret;
+
+ ret = real_dev->hw_enc_features &
+ (NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO | NETIF_F_GSO_ENCAP_ALL);
+
+ if ((ret & NETIF_F_GSO_ENCAP_ALL) && (ret & NETIF_F_CSUM_MASK))
+ return (ret & ~NETIF_F_CSUM_MASK) | NETIF_F_HW_CSUM;
+ return 0;
+}
+
+#define vlan_group_for_each_dev(grp, i, dev) \
+ for ((i) = 0; i < VLAN_PROTO_NUM * VLAN_N_VID; i++) \
+ if (((dev) = __vlan_group_get_device((grp), (i) / VLAN_N_VID, \
+ (i) % VLAN_N_VID)))
+
+int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto);
+void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto);
+
+/* found in vlan_dev.c */
+void vlan_dev_set_ingress_priority(const struct net_device *dev,
+ u32 skb_prio, u16 vlan_prio);
+int vlan_dev_set_egress_priority(const struct net_device *dev,
+ u32 skb_prio, u16 vlan_prio);
+int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
+void vlan_dev_get_realdev_name(const struct net_device *dev, char *result);
+
+int vlan_check_real_dev(struct net_device *real_dev,
+ __be16 protocol, u16 vlan_id,
+ struct netlink_ext_ack *extack);
+void vlan_setup(struct net_device *dev);
+int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack);
+void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+void vlan_dev_uninit(struct net_device *dev);
+bool vlan_dev_inherit_address(struct net_device *dev,
+ struct net_device *real_dev);
+
+static inline u32 vlan_get_ingress_priority(struct net_device *dev,
+ u16 vlan_tci)
+{
+ struct vlan_dev_priv *vip = vlan_dev_priv(dev);
+
+ return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7];
+}
+
+#ifdef CONFIG_VLAN_8021Q_GVRP
+int vlan_gvrp_request_join(const struct net_device *dev);
+void vlan_gvrp_request_leave(const struct net_device *dev);
+int vlan_gvrp_init_applicant(struct net_device *dev);
+void vlan_gvrp_uninit_applicant(struct net_device *dev);
+int vlan_gvrp_init(void);
+void vlan_gvrp_uninit(void);
+#else
+static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; }
+static inline void vlan_gvrp_request_leave(const struct net_device *dev) {}
+static inline int vlan_gvrp_init_applicant(struct net_device *dev) { return 0; }
+static inline void vlan_gvrp_uninit_applicant(struct net_device *dev) {}
+static inline int vlan_gvrp_init(void) { return 0; }
+static inline void vlan_gvrp_uninit(void) {}
+#endif
+
+#ifdef CONFIG_VLAN_8021Q_MVRP
+int vlan_mvrp_request_join(const struct net_device *dev);
+void vlan_mvrp_request_leave(const struct net_device *dev);
+int vlan_mvrp_init_applicant(struct net_device *dev);
+void vlan_mvrp_uninit_applicant(struct net_device *dev);
+int vlan_mvrp_init(void);
+void vlan_mvrp_uninit(void);
+#else
+static inline int vlan_mvrp_request_join(const struct net_device *dev) { return 0; }
+static inline void vlan_mvrp_request_leave(const struct net_device *dev) {}
+static inline int vlan_mvrp_init_applicant(struct net_device *dev) { return 0; }
+static inline void vlan_mvrp_uninit_applicant(struct net_device *dev) {}
+static inline int vlan_mvrp_init(void) { return 0; }
+static inline void vlan_mvrp_uninit(void) {}
+#endif
+
+extern const char vlan_fullname[];
+extern const char vlan_version[];
+int vlan_netlink_init(void);
+void vlan_netlink_fini(void);
+
+extern struct rtnl_link_ops vlan_link_ops;
+
+extern unsigned int vlan_net_id;
+
+struct proc_dir_entry;
+
+struct vlan_net {
+ /* /proc/net/vlan */
+ struct proc_dir_entry *proc_vlan_dir;
+ /* /proc/net/vlan/config */
+ struct proc_dir_entry *proc_vlan_conf;
+ /* Determines interface naming scheme. */
+ unsigned short name_type;
+};
+
+#endif /* !(__BEN_VLAN_802_1Q_INC__) */
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
new file mode 100644
index 000000000..43aea97c5
--- /dev/null
+++ b/net/8021q/vlan_core.c
@@ -0,0 +1,560 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/netpoll.h>
+#include <linux/export.h>
+#include "vlan.h"
+
+bool vlan_do_receive(struct sk_buff **skbp)
+{
+ struct sk_buff *skb = *skbp;
+ __be16 vlan_proto = skb->vlan_proto;
+ u16 vlan_id = skb_vlan_tag_get_id(skb);
+ struct net_device *vlan_dev;
+ struct vlan_pcpu_stats *rx_stats;
+
+ vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
+ if (!vlan_dev)
+ return false;
+
+ skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return false;
+
+ if (unlikely(!(vlan_dev->flags & IFF_UP))) {
+ kfree_skb(skb);
+ *skbp = NULL;
+ return false;
+ }
+
+ skb->dev = vlan_dev;
+ if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
+ /* Our lower layer thinks this is not local, let's make sure.
+ * This allows the VLAN to have a different MAC than the
+ * underlying device, and still route correctly. */
+ if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
+ skb->pkt_type = PACKET_HOST;
+ }
+
+ if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
+ !netif_is_macvlan_port(vlan_dev) &&
+ !netif_is_bridge_port(vlan_dev)) {
+ unsigned int offset = skb->data - skb_mac_header(skb);
+
+ /*
+ * vlan_insert_tag expect skb->data pointing to mac header.
+ * So change skb->data before calling it and change back to
+ * original position later
+ */
+ skb_push(skb, offset);
+ skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
+ skb->vlan_tci, skb->mac_len);
+ if (!skb)
+ return false;
+ skb_pull(skb, offset + VLAN_HLEN);
+ skb_reset_mac_len(skb);
+ }
+
+ skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
+ __vlan_hwaccel_clear_tag(skb);
+
+ rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
+
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->rx_packets++;
+ rx_stats->rx_bytes += skb->len;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ rx_stats->rx_multicast++;
+ u64_stats_update_end(&rx_stats->syncp);
+
+ return true;
+}
+
+/* Must be invoked with rcu_read_lock. */
+struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
+ __be16 vlan_proto, u16 vlan_id)
+{
+ struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
+
+ if (vlan_info) {
+ return vlan_group_get_device(&vlan_info->grp,
+ vlan_proto, vlan_id);
+ } else {
+ /*
+ * Lower devices of master uppers (bonding, team) do not have
+ * grp assigned to themselves. Grp is assigned to upper device
+ * instead.
+ */
+ struct net_device *upper_dev;
+
+ upper_dev = netdev_master_upper_dev_get_rcu(dev);
+ if (upper_dev)
+ return __vlan_find_dev_deep_rcu(upper_dev,
+ vlan_proto, vlan_id);
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
+
+struct net_device *vlan_dev_real_dev(const struct net_device *dev)
+{
+ struct net_device *ret = vlan_dev_priv(dev)->real_dev;
+
+ while (is_vlan_dev(ret))
+ ret = vlan_dev_priv(ret)->real_dev;
+
+ return ret;
+}
+EXPORT_SYMBOL(vlan_dev_real_dev);
+
+u16 vlan_dev_vlan_id(const struct net_device *dev)
+{
+ return vlan_dev_priv(dev)->vlan_id;
+}
+EXPORT_SYMBOL(vlan_dev_vlan_id);
+
+__be16 vlan_dev_vlan_proto(const struct net_device *dev)
+{
+ return vlan_dev_priv(dev)->vlan_proto;
+}
+EXPORT_SYMBOL(vlan_dev_vlan_proto);
+
+/*
+ * vlan info and vid list
+ */
+
+static void vlan_group_free(struct vlan_group *grp)
+{
+ int i, j;
+
+ for (i = 0; i < VLAN_PROTO_NUM; i++)
+ for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
+ kfree(grp->vlan_devices_arrays[i][j]);
+}
+
+static void vlan_info_free(struct vlan_info *vlan_info)
+{
+ vlan_group_free(&vlan_info->grp);
+ kfree(vlan_info);
+}
+
+static void vlan_info_rcu_free(struct rcu_head *rcu)
+{
+ vlan_info_free(container_of(rcu, struct vlan_info, rcu));
+}
+
+static struct vlan_info *vlan_info_alloc(struct net_device *dev)
+{
+ struct vlan_info *vlan_info;
+
+ vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
+ if (!vlan_info)
+ return NULL;
+
+ vlan_info->real_dev = dev;
+ INIT_LIST_HEAD(&vlan_info->vid_list);
+ return vlan_info;
+}
+
+struct vlan_vid_info {
+ struct list_head list;
+ __be16 proto;
+ u16 vid;
+ int refcount;
+};
+
+static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto)
+{
+ if (proto == htons(ETH_P_8021Q) &&
+ dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ return true;
+ if (proto == htons(ETH_P_8021AD) &&
+ dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
+ return true;
+ return false;
+}
+
+static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
+ __be16 proto, u16 vid)
+{
+ struct vlan_vid_info *vid_info;
+
+ list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+ if (vid_info->proto == proto && vid_info->vid == vid)
+ return vid_info;
+ }
+ return NULL;
+}
+
+static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
+{
+ struct vlan_vid_info *vid_info;
+
+ vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
+ if (!vid_info)
+ return NULL;
+ vid_info->proto = proto;
+ vid_info->vid = vid;
+
+ return vid_info;
+}
+
+static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
+{
+ if (!vlan_hw_filter_capable(dev, proto))
+ return 0;
+
+ if (netif_device_present(dev))
+ return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid);
+ else
+ return -ENODEV;
+}
+
+static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
+{
+ if (!vlan_hw_filter_capable(dev, proto))
+ return 0;
+
+ if (netif_device_present(dev))
+ return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
+ else
+ return -ENODEV;
+}
+
+int vlan_for_each(struct net_device *dev,
+ int (*action)(struct net_device *dev, int vid, void *arg),
+ void *arg)
+{
+ struct vlan_vid_info *vid_info;
+ struct vlan_info *vlan_info;
+ struct net_device *vdev;
+ int ret;
+
+ ASSERT_RTNL();
+
+ vlan_info = rtnl_dereference(dev->vlan_info);
+ if (!vlan_info)
+ return 0;
+
+ list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+ vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto,
+ vid_info->vid);
+ ret = action(vdev, vid_info->vid, arg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(vlan_for_each);
+
+int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
+{
+ struct net_device *real_dev = vlan_info->real_dev;
+ struct vlan_vid_info *vlan_vid_info;
+ int err;
+
+ list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) {
+ if (vlan_vid_info->proto == proto) {
+ err = vlan_add_rx_filter_info(real_dev, proto,
+ vlan_vid_info->vid);
+ if (err)
+ goto unwind;
+ }
+ }
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(vlan_vid_info,
+ &vlan_info->vid_list, list) {
+ if (vlan_vid_info->proto == proto)
+ vlan_kill_rx_filter_info(real_dev, proto,
+ vlan_vid_info->vid);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(vlan_filter_push_vids);
+
+void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto)
+{
+ struct vlan_vid_info *vlan_vid_info;
+
+ list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list)
+ if (vlan_vid_info->proto == proto)
+ vlan_kill_rx_filter_info(vlan_info->real_dev,
+ vlan_vid_info->proto,
+ vlan_vid_info->vid);
+}
+EXPORT_SYMBOL(vlan_filter_drop_vids);
+
+static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
+ struct vlan_vid_info **pvid_info)
+{
+ struct net_device *dev = vlan_info->real_dev;
+ struct vlan_vid_info *vid_info;
+ int err;
+
+ vid_info = vlan_vid_info_alloc(proto, vid);
+ if (!vid_info)
+ return -ENOMEM;
+
+ err = vlan_add_rx_filter_info(dev, proto, vid);
+ if (err) {
+ kfree(vid_info);
+ return err;
+ }
+
+ list_add(&vid_info->list, &vlan_info->vid_list);
+ vlan_info->nr_vids++;
+ *pvid_info = vid_info;
+ return 0;
+}
+
+int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct vlan_info *vlan_info;
+ struct vlan_vid_info *vid_info;
+ bool vlan_info_created = false;
+ int err;
+
+ ASSERT_RTNL();
+
+ vlan_info = rtnl_dereference(dev->vlan_info);
+ if (!vlan_info) {
+ vlan_info = vlan_info_alloc(dev);
+ if (!vlan_info)
+ return -ENOMEM;
+ vlan_info_created = true;
+ }
+ vid_info = vlan_vid_info_get(vlan_info, proto, vid);
+ if (!vid_info) {
+ err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
+ if (err)
+ goto out_free_vlan_info;
+ }
+ vid_info->refcount++;
+
+ if (vlan_info_created)
+ rcu_assign_pointer(dev->vlan_info, vlan_info);
+
+ return 0;
+
+out_free_vlan_info:
+ if (vlan_info_created)
+ kfree(vlan_info);
+ return err;
+}
+EXPORT_SYMBOL(vlan_vid_add);
+
+static void __vlan_vid_del(struct vlan_info *vlan_info,
+ struct vlan_vid_info *vid_info)
+{
+ struct net_device *dev = vlan_info->real_dev;
+ __be16 proto = vid_info->proto;
+ u16 vid = vid_info->vid;
+ int err;
+
+ err = vlan_kill_rx_filter_info(dev, proto, vid);
+ if (err && dev->reg_state != NETREG_UNREGISTERING)
+ netdev_warn(dev, "failed to kill vid %04x/%d\n", proto, vid);
+
+ list_del(&vid_info->list);
+ kfree(vid_info);
+ vlan_info->nr_vids--;
+}
+
+void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct vlan_info *vlan_info;
+ struct vlan_vid_info *vid_info;
+
+ ASSERT_RTNL();
+
+ vlan_info = rtnl_dereference(dev->vlan_info);
+ if (!vlan_info)
+ return;
+
+ vid_info = vlan_vid_info_get(vlan_info, proto, vid);
+ if (!vid_info)
+ return;
+ vid_info->refcount--;
+ if (vid_info->refcount == 0) {
+ __vlan_vid_del(vlan_info, vid_info);
+ if (vlan_info->nr_vids == 0) {
+ RCU_INIT_POINTER(dev->vlan_info, NULL);
+ call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
+ }
+ }
+}
+EXPORT_SYMBOL(vlan_vid_del);
+
+int vlan_vids_add_by_dev(struct net_device *dev,
+ const struct net_device *by_dev)
+{
+ struct vlan_vid_info *vid_info;
+ struct vlan_info *vlan_info;
+ int err;
+
+ ASSERT_RTNL();
+
+ vlan_info = rtnl_dereference(by_dev->vlan_info);
+ if (!vlan_info)
+ return 0;
+
+ list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
+ continue;
+ err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
+ if (err)
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(vid_info,
+ &vlan_info->vid_list,
+ list) {
+ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
+ continue;
+ vlan_vid_del(dev, vid_info->proto, vid_info->vid);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(vlan_vids_add_by_dev);
+
+void vlan_vids_del_by_dev(struct net_device *dev,
+ const struct net_device *by_dev)
+{
+ struct vlan_vid_info *vid_info;
+ struct vlan_info *vlan_info;
+
+ ASSERT_RTNL();
+
+ vlan_info = rtnl_dereference(by_dev->vlan_info);
+ if (!vlan_info)
+ return;
+
+ list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+ if (!vlan_hw_filter_capable(by_dev, vid_info->proto))
+ continue;
+ vlan_vid_del(dev, vid_info->proto, vid_info->vid);
+ }
+}
+EXPORT_SYMBOL(vlan_vids_del_by_dev);
+
+bool vlan_uses_dev(const struct net_device *dev)
+{
+ struct vlan_info *vlan_info;
+
+ ASSERT_RTNL();
+
+ vlan_info = rtnl_dereference(dev->vlan_info);
+ if (!vlan_info)
+ return false;
+ return vlan_info->grp.nr_vlan_devs ? true : false;
+}
+EXPORT_SYMBOL(vlan_uses_dev);
+
+static struct sk_buff *vlan_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
+{
+ const struct packet_offload *ptype;
+ unsigned int hlen, off_vlan;
+ struct sk_buff *pp = NULL;
+ struct vlan_hdr *vhdr;
+ struct sk_buff *p;
+ __be16 type;
+ int flush = 1;
+
+ off_vlan = skb_gro_offset(skb);
+ hlen = off_vlan + sizeof(*vhdr);
+ vhdr = skb_gro_header_fast(skb, off_vlan);
+ if (skb_gro_header_hard(skb, hlen)) {
+ vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
+ if (unlikely(!vhdr))
+ goto out;
+ }
+
+ type = vhdr->h_vlan_encapsulated_proto;
+
+ rcu_read_lock();
+ ptype = gro_find_receive_by_type(type);
+ if (!ptype)
+ goto out_unlock;
+
+ flush = 0;
+
+ list_for_each_entry(p, head, list) {
+ struct vlan_hdr *vhdr2;
+
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
+ if (compare_vlan_header(vhdr, vhdr2))
+ NAPI_GRO_CB(p)->same_flow = 0;
+ }
+
+ skb_gro_pull(skb, sizeof(*vhdr));
+ skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ skb_gro_flush_final(skb, pp, flush);
+
+ return pp;
+}
+
+static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
+ __be16 type = vhdr->h_vlan_encapsulated_proto;
+ struct packet_offload *ptype;
+ int err = -ENOENT;
+
+ rcu_read_lock();
+ ptype = gro_find_complete_by_type(type);
+ if (ptype)
+ err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
+
+ rcu_read_unlock();
+ return err;
+}
+
+static struct packet_offload vlan_packet_offloads[] __read_mostly = {
+ {
+ .type = cpu_to_be16(ETH_P_8021Q),
+ .priority = 10,
+ .callbacks = {
+ .gro_receive = vlan_gro_receive,
+ .gro_complete = vlan_gro_complete,
+ },
+ },
+ {
+ .type = cpu_to_be16(ETH_P_8021AD),
+ .priority = 10,
+ .callbacks = {
+ .gro_receive = vlan_gro_receive,
+ .gro_complete = vlan_gro_complete,
+ },
+ },
+};
+
+static int __init vlan_offload_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
+ dev_add_offload(&vlan_packet_offloads[i]);
+
+ return 0;
+}
+
+fs_initcall(vlan_offload_init);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
new file mode 100644
index 000000000..8edac9307
--- /dev/null
+++ b/net/8021q/vlan_dev.c
@@ -0,0 +1,842 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* -*- linux-c -*-
+ * INET 802.1Q VLAN
+ * Ethernet-type device handling.
+ *
+ * Authors: Ben Greear <greearb@candelatech.com>
+ * Please send support related email to: netdev@vger.kernel.org
+ * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
+ *
+ * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
+ * - reset skb->pkt_type on incoming packets when MAC was changed
+ * - see that changed MAC is saddr for outgoing packets
+ * Oct 20, 2001: Ard van Breeman:
+ * - Fix MC-list, finally.
+ * - Flush MC-list on VLAN destroy.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <net/arp.h>
+
+#include "vlan.h"
+#include "vlanproc.h"
+#include <linux/if_vlan.h>
+#include <linux/netpoll.h>
+
+/*
+ * Create the VLAN header for an arbitrary protocol layer
+ *
+ * saddr=NULL means use device source address
+ * daddr=NULL means leave destination address (eg unresolved arp)
+ *
+ * This is called when the SKB is moving down the stack towards the
+ * physical devices.
+ */
+static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr,
+ unsigned int len)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct vlan_hdr *vhdr;
+ unsigned int vhdrlen = 0;
+ u16 vlan_tci = 0;
+ int rc;
+
+ if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) {
+ vhdr = skb_push(skb, VLAN_HLEN);
+
+ vlan_tci = vlan->vlan_id;
+ vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
+ vhdr->h_vlan_TCI = htons(vlan_tci);
+
+ /*
+ * Set the protocol type. For a packet of type ETH_P_802_3/2 we
+ * put the length in here instead.
+ */
+ if (type != ETH_P_802_3 && type != ETH_P_802_2)
+ vhdr->h_vlan_encapsulated_proto = htons(type);
+ else
+ vhdr->h_vlan_encapsulated_proto = htons(len);
+
+ skb->protocol = vlan->vlan_proto;
+ type = ntohs(vlan->vlan_proto);
+ vhdrlen = VLAN_HLEN;
+ }
+
+ /* Before delegating work to the lower layer, enter our MAC-address */
+ if (saddr == NULL)
+ saddr = dev->dev_addr;
+
+ /* Now make the underlying real hard header */
+ dev = vlan->real_dev;
+ rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
+ if (rc > 0)
+ rc += vhdrlen;
+ return rc;
+}
+
+static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ return netpoll_send_skb(vlan->netpoll, skb);
+#else
+ BUG();
+ return NETDEV_TX_OK;
+#endif
+}
+
+static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
+ unsigned int len;
+ int ret;
+
+ /* Handle non-VLAN frames if they are sent to us, for example by DHCP.
+ *
+ * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
+ * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
+ */
+ if (vlan->flags & VLAN_FLAG_REORDER_HDR ||
+ veth->h_vlan_proto != vlan->vlan_proto) {
+ u16 vlan_tci;
+ vlan_tci = vlan->vlan_id;
+ vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
+ __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
+ }
+
+ skb->dev = vlan->real_dev;
+ len = skb->len;
+ if (unlikely(netpoll_tx_running(dev)))
+ return vlan_netpoll_send_skb(vlan, skb);
+
+ ret = dev_queue_xmit(skb);
+
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+ struct vlan_pcpu_stats *stats;
+
+ stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+ } else {
+ this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);
+ }
+
+ return ret;
+}
+
+static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ unsigned int max_mtu = real_dev->mtu;
+
+ if (netif_reduces_vlan_mtu(real_dev))
+ max_mtu -= VLAN_HLEN;
+ if (max_mtu < new_mtu)
+ return -ERANGE;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+void vlan_dev_set_ingress_priority(const struct net_device *dev,
+ u32 skb_prio, u16 vlan_prio)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+ if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
+ vlan->nr_ingress_mappings--;
+ else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio)
+ vlan->nr_ingress_mappings++;
+
+ vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio;
+}
+
+int vlan_dev_set_egress_priority(const struct net_device *dev,
+ u32 skb_prio, u16 vlan_prio)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct vlan_priority_tci_mapping *mp = NULL;
+ struct vlan_priority_tci_mapping *np;
+ u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
+
+ /* See if a priority mapping exists.. */
+ mp = vlan->egress_priority_map[skb_prio & 0xF];
+ while (mp) {
+ if (mp->priority == skb_prio) {
+ if (mp->vlan_qos && !vlan_qos)
+ vlan->nr_egress_mappings--;
+ else if (!mp->vlan_qos && vlan_qos)
+ vlan->nr_egress_mappings++;
+ mp->vlan_qos = vlan_qos;
+ return 0;
+ }
+ mp = mp->next;
+ }
+
+ /* Create a new mapping then. */
+ mp = vlan->egress_priority_map[skb_prio & 0xF];
+ np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
+ if (!np)
+ return -ENOBUFS;
+
+ np->next = mp;
+ np->priority = skb_prio;
+ np->vlan_qos = vlan_qos;
+ /* Before inserting this element in hash table, make sure all its fields
+ * are committed to memory.
+ * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
+ */
+ smp_wmb();
+ vlan->egress_priority_map[skb_prio & 0xF] = np;
+ if (vlan_qos)
+ vlan->nr_egress_mappings++;
+ return 0;
+}
+
+/* Flags are defined in the vlan_flags enum in
+ * include/uapi/linux/if_vlan.h file.
+ */
+int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ u32 old_flags = vlan->flags;
+
+ if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
+ VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP |
+ VLAN_FLAG_BRIDGE_BINDING))
+ return -EINVAL;
+
+ vlan->flags = (old_flags & ~mask) | (flags & mask);
+
+ if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) {
+ if (vlan->flags & VLAN_FLAG_GVRP)
+ vlan_gvrp_request_join(dev);
+ else
+ vlan_gvrp_request_leave(dev);
+ }
+
+ if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) {
+ if (vlan->flags & VLAN_FLAG_MVRP)
+ vlan_mvrp_request_join(dev);
+ else
+ vlan_mvrp_request_leave(dev);
+ }
+ return 0;
+}
+
+void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
+{
+ strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
+}
+
+bool vlan_dev_inherit_address(struct net_device *dev,
+ struct net_device *real_dev)
+{
+ if (dev->addr_assign_type != NET_ADDR_STOLEN)
+ return false;
+
+ ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return true;
+}
+
+static int vlan_dev_open(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+ int err;
+
+ if (!(real_dev->flags & IFF_UP) &&
+ !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
+ return -ENETDOWN;
+
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
+ !vlan_dev_inherit_address(dev, real_dev)) {
+ err = dev_uc_add(real_dev, dev->dev_addr);
+ if (err < 0)
+ goto out;
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ err = dev_set_allmulti(real_dev, 1);
+ if (err < 0)
+ goto del_unicast;
+ }
+ if (dev->flags & IFF_PROMISC) {
+ err = dev_set_promiscuity(real_dev, 1);
+ if (err < 0)
+ goto clear_allmulti;
+ }
+
+ ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr);
+
+ if (vlan->flags & VLAN_FLAG_GVRP)
+ vlan_gvrp_request_join(dev);
+
+ if (vlan->flags & VLAN_FLAG_MVRP)
+ vlan_mvrp_request_join(dev);
+
+ if (netif_carrier_ok(real_dev) &&
+ !(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
+ netif_carrier_on(dev);
+ return 0;
+
+clear_allmulti:
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, -1);
+del_unicast:
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+ dev_uc_del(real_dev, dev->dev_addr);
+out:
+ netif_carrier_off(dev);
+ return err;
+}
+
+static int vlan_dev_stop(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+
+ dev_mc_unsync(real_dev, dev);
+ dev_uc_unsync(real_dev, dev);
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, -1);
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(real_dev, -1);
+
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+ dev_uc_del(real_dev, dev->dev_addr);
+
+ if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
+ netif_carrier_off(dev);
+ return 0;
+}
+
+static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ struct sockaddr *addr = p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (!(dev->flags & IFF_UP))
+ goto out;
+
+ if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) {
+ err = dev_uc_add(real_dev, addr->sa_data);
+ if (err < 0)
+ return err;
+ }
+
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+ dev_uc_del(real_dev, dev->dev_addr);
+
+out:
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+ return 0;
+}
+
+static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ struct ifreq ifrr;
+ int err = -EOPNOTSUPP;
+
+ strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
+ ifrr.ifr_ifru = ifr->ifr_ifru;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ if (!net_eq(dev_net(dev), dev_net(real_dev)))
+ break;
+ fallthrough;
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ case SIOCGHWTSTAMP:
+ if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
+ err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
+ break;
+ }
+
+ if (!err)
+ ifr->ifr_ifru = ifrr.ifr_ifru;
+
+ return err;
+}
+
+static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int err = 0;
+
+ if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
+ err = ops->ndo_neigh_setup(real_dev, pa);
+
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_FCOE)
+static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = 0;
+
+ if (ops->ndo_fcoe_ddp_setup)
+ rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc);
+
+ return rc;
+}
+
+static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int len = 0;
+
+ if (ops->ndo_fcoe_ddp_done)
+ len = ops->ndo_fcoe_ddp_done(real_dev, xid);
+
+ return len;
+}
+
+static int vlan_dev_fcoe_enable(struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = -EINVAL;
+
+ if (ops->ndo_fcoe_enable)
+ rc = ops->ndo_fcoe_enable(real_dev);
+ return rc;
+}
+
+static int vlan_dev_fcoe_disable(struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = -EINVAL;
+
+ if (ops->ndo_fcoe_disable)
+ rc = ops->ndo_fcoe_disable(real_dev);
+ return rc;
+}
+
+static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = 0;
+
+ if (ops->ndo_fcoe_ddp_target)
+ rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
+
+ return rc;
+}
+#endif
+
+#ifdef NETDEV_FCOE_WWNN
+static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = -EINVAL;
+
+ if (ops->ndo_fcoe_get_wwn)
+ rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
+ return rc;
+}
+#endif
+
+static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (change & IFF_PROMISC)
+ dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
+ }
+}
+
+static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
+{
+ dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+ dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+}
+
+/*
+ * vlan network devices have devices nesting below it, and are a special
+ * "super class" of normal network devices; split their locks off into a
+ * separate class since they always nest.
+ */
+static struct lock_class_key vlan_netdev_xmit_lock_key;
+static struct lock_class_key vlan_netdev_addr_lock_key;
+
+static void vlan_dev_set_lockdep_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
+}
+
+static void vlan_dev_set_lockdep_class(struct net_device *dev)
+{
+ lockdep_set_class(&dev->addr_list_lock,
+ &vlan_netdev_addr_lock_key);
+ netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
+}
+
+static const struct header_ops vlan_header_ops = {
+ .create = vlan_dev_hard_header,
+ .parse = eth_header_parse,
+};
+
+static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr,
+ unsigned int len)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+
+ if (saddr == NULL)
+ saddr = dev->dev_addr;
+
+ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+}
+
+static const struct header_ops vlan_passthru_header_ops = {
+ .create = vlan_passthru_hard_header,
+ .parse = eth_header_parse,
+};
+
+static struct device_type vlan_type = {
+ .name = "vlan",
+};
+
+static const struct net_device_ops vlan_netdev_ops;
+
+static int vlan_dev_init(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+
+ netif_carrier_off(dev);
+
+ /* IFF_BROADCAST|IFF_MULTICAST; ??? */
+ dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
+ IFF_MASTER | IFF_SLAVE);
+ dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
+ (1<<__LINK_STATE_DORMANT))) |
+ (1<<__LINK_STATE_PRESENT);
+
+ if (vlan->flags & VLAN_FLAG_BRIDGE_BINDING)
+ dev->state |= (1 << __LINK_STATE_NOCARRIER);
+
+ dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG |
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE |
+ NETIF_F_GSO_ENCAP_ALL |
+ NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
+ NETIF_F_ALL_FCOE;
+
+ dev->features |= dev->hw_features | NETIF_F_LLTX;
+ dev->gso_max_size = real_dev->gso_max_size;
+ dev->gso_max_segs = real_dev->gso_max_segs;
+ if (dev->features & NETIF_F_VLAN_FEATURES)
+ netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
+
+ dev->vlan_features = real_dev->vlan_features & ~NETIF_F_ALL_FCOE;
+ dev->hw_enc_features = vlan_tnl_features(real_dev);
+ dev->mpls_features = real_dev->mpls_features;
+
+ /* ipv6 shared card related stuff */
+ dev->dev_id = real_dev->dev_id;
+
+ if (is_zero_ether_addr(dev->dev_addr)) {
+ ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+ dev->addr_assign_type = NET_ADDR_STOLEN;
+ }
+ if (is_zero_ether_addr(dev->broadcast))
+ memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
+
+#if IS_ENABLED(CONFIG_FCOE)
+ dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
+#endif
+
+ dev->needed_headroom = real_dev->needed_headroom;
+ if (vlan_hw_offload_capable(real_dev->features, vlan->vlan_proto)) {
+ dev->header_ops = &vlan_passthru_header_ops;
+ dev->hard_header_len = real_dev->hard_header_len;
+ } else {
+ dev->header_ops = &vlan_header_ops;
+ dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
+ }
+
+ dev->netdev_ops = &vlan_netdev_ops;
+
+ SET_NETDEV_DEVTYPE(dev, &vlan_type);
+
+ vlan_dev_set_lockdep_class(dev);
+
+ vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
+ if (!vlan->vlan_pcpu_stats)
+ return -ENOMEM;
+
+ /* Get vlan's reference to real_dev */
+ dev_hold(real_dev);
+
+ return 0;
+}
+
+/* Note: this function might be called multiple times for the same device. */
+void vlan_dev_uninit(struct net_device *dev)
+{
+ struct vlan_priority_tci_mapping *pm;
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
+ while ((pm = vlan->egress_priority_map[i]) != NULL) {
+ vlan->egress_priority_map[i] = pm->next;
+ kfree(pm);
+ }
+ }
+}
+
+static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ netdev_features_t old_features = features;
+ netdev_features_t lower_features;
+
+ lower_features = netdev_intersect_features((real_dev->vlan_features |
+ NETIF_F_RXCSUM),
+ real_dev->features);
+
+ /* Add HW_CSUM setting to preserve user ability to control
+ * checksum offload on the vlan device.
+ */
+ if (lower_features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
+ lower_features |= NETIF_F_HW_CSUM;
+ features = netdev_intersect_features(features, lower_features);
+ features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE);
+ features |= NETIF_F_LLTX;
+
+ return features;
+}
+
+static int vlan_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+ return __ethtool_get_link_ksettings(vlan->real_dev, cmd);
+}
+
+static void vlan_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, vlan_fullname, sizeof(info->driver));
+ strlcpy(info->version, vlan_version, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+}
+
+static int vlan_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
+ struct phy_device *phydev = vlan->real_dev->phydev;
+
+ if (phy_has_tsinfo(phydev)) {
+ return phy_ts_info(phydev, info);
+ } else if (ops->get_ts_info) {
+ return ops->get_ts_info(vlan->real_dev, info);
+ } else {
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ }
+
+ return 0;
+}
+
+static void vlan_dev_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct vlan_pcpu_stats *p;
+ u32 rx_errors = 0, tx_dropped = 0;
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
+ unsigned int start;
+
+ p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ rxpackets = p->rx_packets;
+ rxbytes = p->rx_bytes;
+ rxmulticast = p->rx_multicast;
+ txpackets = p->tx_packets;
+ txbytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+ stats->rx_packets += rxpackets;
+ stats->rx_bytes += rxbytes;
+ stats->multicast += rxmulticast;
+ stats->tx_packets += txpackets;
+ stats->tx_bytes += txbytes;
+ /* rx_errors & tx_dropped are u32 */
+ rx_errors += p->rx_errors;
+ tx_dropped += p->tx_dropped;
+ }
+ stats->rx_errors = rx_errors;
+ stats->tx_dropped = tx_dropped;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vlan_dev_poll_controller(struct net_device *dev)
+{
+ return;
+}
+
+static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+ struct netpoll *netpoll;
+ int err = 0;
+
+ netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!netpoll)
+ goto out;
+
+ err = __netpoll_setup(netpoll, real_dev);
+ if (err) {
+ kfree(netpoll);
+ goto out;
+ }
+
+ vlan->netpoll = netpoll;
+
+out:
+ return err;
+}
+
+static void vlan_dev_netpoll_cleanup(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan= vlan_dev_priv(dev);
+ struct netpoll *netpoll = vlan->netpoll;
+
+ if (!netpoll)
+ return;
+
+ vlan->netpoll = NULL;
+ __netpoll_free(netpoll);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int vlan_dev_get_iflink(const struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+ return real_dev->ifindex;
+}
+
+static const struct ethtool_ops vlan_ethtool_ops = {
+ .get_link_ksettings = vlan_ethtool_get_link_ksettings,
+ .get_drvinfo = vlan_ethtool_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = vlan_ethtool_get_ts_info,
+};
+
+static const struct net_device_ops vlan_netdev_ops = {
+ .ndo_change_mtu = vlan_dev_change_mtu,
+ .ndo_init = vlan_dev_init,
+ .ndo_uninit = vlan_dev_uninit,
+ .ndo_open = vlan_dev_open,
+ .ndo_stop = vlan_dev_stop,
+ .ndo_start_xmit = vlan_dev_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = vlan_dev_set_mac_address,
+ .ndo_set_rx_mode = vlan_dev_set_rx_mode,
+ .ndo_change_rx_flags = vlan_dev_change_rx_flags,
+ .ndo_do_ioctl = vlan_dev_ioctl,
+ .ndo_neigh_setup = vlan_dev_neigh_setup,
+ .ndo_get_stats64 = vlan_dev_get_stats64,
+#if IS_ENABLED(CONFIG_FCOE)
+ .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
+ .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
+ .ndo_fcoe_enable = vlan_dev_fcoe_enable,
+ .ndo_fcoe_disable = vlan_dev_fcoe_disable,
+ .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
+#endif
+#ifdef NETDEV_FCOE_WWNN
+ .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = vlan_dev_poll_controller,
+ .ndo_netpoll_setup = vlan_dev_netpoll_setup,
+ .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
+#endif
+ .ndo_fix_features = vlan_dev_fix_features,
+ .ndo_get_iflink = vlan_dev_get_iflink,
+};
+
+static void vlan_dev_free(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+ free_percpu(vlan->vlan_pcpu_stats);
+ vlan->vlan_pcpu_stats = NULL;
+
+ /* Get rid of the vlan's reference to real_dev */
+ dev_put(vlan->real_dev);
+}
+
+void vlan_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->priv_flags |= IFF_802_1Q_VLAN | IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ netif_keep_dst(dev);
+
+ dev->netdev_ops = &vlan_netdev_ops;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = vlan_dev_free;
+ dev->ethtool_ops = &vlan_ethtool_ops;
+
+ dev->min_mtu = 0;
+ dev->max_mtu = ETH_MAX_MTU;
+
+ eth_zero_addr(dev->broadcast);
+}
diff --git a/net/8021q/vlan_gvrp.c b/net/8021q/vlan_gvrp.c
new file mode 100644
index 000000000..6b34b72aa
--- /dev/null
+++ b/net/8021q/vlan_gvrp.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IEEE 802.1Q GARP VLAN Registration Protocol (GVRP)
+ *
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ */
+#include <linux/types.h>
+#include <linux/if_vlan.h>
+#include <net/garp.h>
+#include "vlan.h"
+
+#define GARP_GVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 }
+
+enum gvrp_attributes {
+ GVRP_ATTR_INVALID,
+ GVRP_ATTR_VID,
+ __GVRP_ATTR_MAX
+};
+#define GVRP_ATTR_MAX (__GVRP_ATTR_MAX - 1)
+
+static struct garp_application vlan_gvrp_app __read_mostly = {
+ .proto.group_address = GARP_GVRP_ADDRESS,
+ .maxattr = GVRP_ATTR_MAX,
+ .type = GARP_APPLICATION_GVRP,
+};
+
+int vlan_gvrp_request_join(const struct net_device *dev)
+{
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ __be16 vlan_id = htons(vlan->vlan_id);
+
+ if (vlan->vlan_proto != htons(ETH_P_8021Q))
+ return 0;
+ return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
+ &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
+}
+
+void vlan_gvrp_request_leave(const struct net_device *dev)
+{
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ __be16 vlan_id = htons(vlan->vlan_id);
+
+ if (vlan->vlan_proto != htons(ETH_P_8021Q))
+ return;
+ garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
+ &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
+}
+
+int vlan_gvrp_init_applicant(struct net_device *dev)
+{
+ return garp_init_applicant(dev, &vlan_gvrp_app);
+}
+
+void vlan_gvrp_uninit_applicant(struct net_device *dev)
+{
+ garp_uninit_applicant(dev, &vlan_gvrp_app);
+}
+
+int __init vlan_gvrp_init(void)
+{
+ return garp_register_application(&vlan_gvrp_app);
+}
+
+void vlan_gvrp_uninit(void)
+{
+ garp_unregister_application(&vlan_gvrp_app);
+}
diff --git a/net/8021q/vlan_mvrp.c b/net/8021q/vlan_mvrp.c
new file mode 100644
index 000000000..689eceeaa
--- /dev/null
+++ b/net/8021q/vlan_mvrp.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IEEE 802.1Q Multiple VLAN Registration Protocol (MVRP)
+ *
+ * Copyright (c) 2012 Massachusetts Institute of Technology
+ *
+ * Adapted from code in net/8021q/vlan_gvrp.c
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ */
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/mrp.h>
+#include "vlan.h"
+
+#define MRP_MVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 }
+
+enum mvrp_attributes {
+ MVRP_ATTR_INVALID,
+ MVRP_ATTR_VID,
+ __MVRP_ATTR_MAX
+};
+#define MVRP_ATTR_MAX (__MVRP_ATTR_MAX - 1)
+
+static struct mrp_application vlan_mrp_app __read_mostly = {
+ .type = MRP_APPLICATION_MVRP,
+ .maxattr = MVRP_ATTR_MAX,
+ .pkttype.type = htons(ETH_P_MVRP),
+ .group_address = MRP_MVRP_ADDRESS,
+ .version = 0,
+};
+
+int vlan_mvrp_request_join(const struct net_device *dev)
+{
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ __be16 vlan_id = htons(vlan->vlan_id);
+
+ if (vlan->vlan_proto != htons(ETH_P_8021Q))
+ return 0;
+ return mrp_request_join(vlan->real_dev, &vlan_mrp_app,
+ &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
+}
+
+void vlan_mvrp_request_leave(const struct net_device *dev)
+{
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ __be16 vlan_id = htons(vlan->vlan_id);
+
+ if (vlan->vlan_proto != htons(ETH_P_8021Q))
+ return;
+ mrp_request_leave(vlan->real_dev, &vlan_mrp_app,
+ &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
+}
+
+int vlan_mvrp_init_applicant(struct net_device *dev)
+{
+ return mrp_init_applicant(dev, &vlan_mrp_app);
+}
+
+void vlan_mvrp_uninit_applicant(struct net_device *dev)
+{
+ mrp_uninit_applicant(dev, &vlan_mrp_app);
+}
+
+int __init vlan_mvrp_init(void)
+{
+ return mrp_register_application(&vlan_mrp_app);
+}
+
+void vlan_mvrp_uninit(void)
+{
+ mrp_unregister_application(&vlan_mrp_app);
+}
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
new file mode 100644
index 000000000..0db85aeb1
--- /dev/null
+++ b/net/8021q/vlan_netlink.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VLAN netlink control interface
+ *
+ * Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/module.h>
+#include <net/net_namespace.h>
+#include <net/netlink.h>
+#include <net/rtnetlink.h>
+#include "vlan.h"
+
+
+static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = {
+ [IFLA_VLAN_ID] = { .type = NLA_U16 },
+ [IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) },
+ [IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED },
+ [IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED },
+ [IFLA_VLAN_PROTOCOL] = { .type = NLA_U16 },
+};
+
+static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = {
+ [IFLA_VLAN_QOS_MAPPING] = { .len = sizeof(struct ifla_vlan_qos_mapping) },
+};
+
+
+static inline int vlan_validate_qos_map(struct nlattr *attr)
+{
+ if (!attr)
+ return 0;
+ return nla_validate_nested_deprecated(attr, IFLA_VLAN_QOS_MAX,
+ vlan_map_policy, NULL);
+}
+
+static int vlan_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct ifla_vlan_flags *flags;
+ u16 id;
+ int err;
+
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid link address");
+ return -EINVAL;
+ }
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid link address");
+ return -EADDRNOTAVAIL;
+ }
+ }
+
+ if (!data) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN properties not specified");
+ return -EINVAL;
+ }
+
+ if (data[IFLA_VLAN_PROTOCOL]) {
+ switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) {
+ case htons(ETH_P_8021Q):
+ case htons(ETH_P_8021AD):
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Invalid VLAN protocol");
+ return -EPROTONOSUPPORT;
+ }
+ }
+
+ if (data[IFLA_VLAN_ID]) {
+ id = nla_get_u16(data[IFLA_VLAN_ID]);
+ if (id >= VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid VLAN id");
+ return -ERANGE;
+ }
+ }
+ if (data[IFLA_VLAN_FLAGS]) {
+ flags = nla_data(data[IFLA_VLAN_FLAGS]);
+ if ((flags->flags & flags->mask) &
+ ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
+ VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP |
+ VLAN_FLAG_BRIDGE_BINDING)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid VLAN flags");
+ return -EINVAL;
+ }
+ }
+
+ err = vlan_validate_qos_map(data[IFLA_VLAN_INGRESS_QOS]);
+ if (err < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid ingress QOS map");
+ return err;
+ }
+ err = vlan_validate_qos_map(data[IFLA_VLAN_EGRESS_QOS]);
+ if (err < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid egress QOS map");
+ return err;
+ }
+ return 0;
+}
+
+static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct ifla_vlan_flags *flags;
+ struct ifla_vlan_qos_mapping *m;
+ struct nlattr *attr;
+ int rem, err;
+
+ if (data[IFLA_VLAN_FLAGS]) {
+ flags = nla_data(data[IFLA_VLAN_FLAGS]);
+ err = vlan_dev_change_flags(dev, flags->flags, flags->mask);
+ if (err)
+ return err;
+ }
+ if (data[IFLA_VLAN_INGRESS_QOS]) {
+ nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
+ m = nla_data(attr);
+ vlan_dev_set_ingress_priority(dev, m->to, m->from);
+ }
+ }
+ if (data[IFLA_VLAN_EGRESS_QOS]) {
+ nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
+ m = nla_data(attr);
+ err = vlan_dev_set_egress_priority(dev, m->from, m->to);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int vlan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev;
+ unsigned int max_mtu;
+ __be16 proto;
+ int err;
+
+ if (!data[IFLA_VLAN_ID]) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN id not specified");
+ return -EINVAL;
+ }
+
+ if (!tb[IFLA_LINK]) {
+ NL_SET_ERR_MSG_MOD(extack, "link not specified");
+ return -EINVAL;
+ }
+
+ real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ if (!real_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "link does not exist");
+ return -ENODEV;
+ }
+
+ if (data[IFLA_VLAN_PROTOCOL])
+ proto = nla_get_be16(data[IFLA_VLAN_PROTOCOL]);
+ else
+ proto = htons(ETH_P_8021Q);
+
+ vlan->vlan_proto = proto;
+ vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]);
+ vlan->real_dev = real_dev;
+ dev->priv_flags |= (real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
+ vlan->flags = VLAN_FLAG_REORDER_HDR;
+
+ err = vlan_check_real_dev(real_dev, vlan->vlan_proto, vlan->vlan_id,
+ extack);
+ if (err < 0)
+ return err;
+
+ max_mtu = netif_reduces_vlan_mtu(real_dev) ? real_dev->mtu - VLAN_HLEN :
+ real_dev->mtu;
+ if (!tb[IFLA_MTU])
+ dev->mtu = max_mtu;
+ else if (dev->mtu > max_mtu)
+ return -EINVAL;
+
+ err = vlan_changelink(dev, tb, data, extack);
+ if (!err)
+ err = register_vlan_dev(dev, extack);
+ if (err)
+ vlan_dev_uninit(dev);
+ return err;
+}
+
+static inline size_t vlan_qos_map_size(unsigned int n)
+{
+ if (n == 0)
+ return 0;
+ /* IFLA_VLAN_{EGRESS,INGRESS}_QOS + n * IFLA_VLAN_QOS_MAPPING */
+ return nla_total_size(sizeof(struct nlattr)) +
+ nla_total_size(sizeof(struct ifla_vlan_qos_mapping)) * n;
+}
+
+static size_t vlan_get_size(const struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+ return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
+ nla_total_size(2) + /* IFLA_VLAN_ID */
+ nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
+ vlan_qos_map_size(vlan->nr_ingress_mappings) +
+ vlan_qos_map_size(vlan->nr_egress_mappings);
+}
+
+static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct vlan_priority_tci_mapping *pm;
+ struct ifla_vlan_flags f;
+ struct ifla_vlan_qos_mapping m;
+ struct nlattr *nest;
+ unsigned int i;
+
+ if (nla_put_be16(skb, IFLA_VLAN_PROTOCOL, vlan->vlan_proto) ||
+ nla_put_u16(skb, IFLA_VLAN_ID, vlan->vlan_id))
+ goto nla_put_failure;
+ if (vlan->flags) {
+ f.flags = vlan->flags;
+ f.mask = ~0;
+ if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f))
+ goto nla_put_failure;
+ }
+ if (vlan->nr_ingress_mappings) {
+ nest = nla_nest_start_noflag(skb, IFLA_VLAN_INGRESS_QOS);
+ if (nest == NULL)
+ goto nla_put_failure;
+
+ for (i = 0; i < ARRAY_SIZE(vlan->ingress_priority_map); i++) {
+ if (!vlan->ingress_priority_map[i])
+ continue;
+
+ m.from = i;
+ m.to = vlan->ingress_priority_map[i];
+ if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
+ sizeof(m), &m))
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, nest);
+ }
+
+ if (vlan->nr_egress_mappings) {
+ nest = nla_nest_start_noflag(skb, IFLA_VLAN_EGRESS_QOS);
+ if (nest == NULL)
+ goto nla_put_failure;
+
+ for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
+ for (pm = vlan->egress_priority_map[i]; pm;
+ pm = pm->next) {
+ if (!pm->vlan_qos)
+ continue;
+
+ m.from = pm->priority;
+ m.to = (pm->vlan_qos >> 13) & 0x7;
+ if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
+ sizeof(m), &m))
+ goto nla_put_failure;
+ }
+ }
+ nla_nest_end(skb, nest);
+ }
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct net *vlan_get_link_net(const struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+ return dev_net(real_dev);
+}
+
+struct rtnl_link_ops vlan_link_ops __read_mostly = {
+ .kind = "vlan",
+ .maxtype = IFLA_VLAN_MAX,
+ .policy = vlan_policy,
+ .priv_size = sizeof(struct vlan_dev_priv),
+ .setup = vlan_setup,
+ .validate = vlan_validate,
+ .newlink = vlan_newlink,
+ .changelink = vlan_changelink,
+ .dellink = unregister_vlan_dev,
+ .get_size = vlan_get_size,
+ .fill_info = vlan_fill_info,
+ .get_link_net = vlan_get_link_net,
+};
+
+int __init vlan_netlink_init(void)
+{
+ return rtnl_link_register(&vlan_link_ops);
+}
+
+void __exit vlan_netlink_fini(void)
+{
+ rtnl_link_unregister(&vlan_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("vlan");
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
new file mode 100644
index 000000000..ec87dea23
--- /dev/null
+++ b/net/8021q/vlanproc.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/******************************************************************************
+ * vlanproc.c VLAN Module. /proc filesystem interface.
+ *
+ * This module is completely hardware-independent and provides
+ * access to the router using Linux /proc filesystem.
+ *
+ * Author: Ben Greear, <greearb@candelatech.com> coppied from wanproc.c
+ * by: Gene Kozin <genek@compuserve.com>
+ *
+ * Copyright: (c) 1998 Ben Greear
+ *
+ * ============================================================================
+ * Jan 20, 1998 Ben Greear Initial Version
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include "vlanproc.h"
+#include "vlan.h"
+
+/****** Function Prototypes *************************************************/
+
+/* Methods for preparing data for reading proc entries */
+static int vlan_seq_show(struct seq_file *seq, void *v);
+static void *vlan_seq_start(struct seq_file *seq, loff_t *pos);
+static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+static void vlan_seq_stop(struct seq_file *seq, void *);
+static int vlandev_seq_show(struct seq_file *seq, void *v);
+
+/*
+ * Global Data
+ */
+
+
+/*
+ * Names of the proc directory entries
+ */
+
+static const char name_root[] = "vlan";
+static const char name_conf[] = "config";
+
+/*
+ * Structures for interfacing with the /proc filesystem.
+ * VLAN creates its own directory /proc/net/vlan with the following
+ * entries:
+ * config device status/configuration
+ * <device> entry for each device
+ */
+
+/*
+ * Generic /proc/net/vlan/<file> file and inode operations
+ */
+
+static const struct seq_operations vlan_seq_ops = {
+ .start = vlan_seq_start,
+ .next = vlan_seq_next,
+ .stop = vlan_seq_stop,
+ .show = vlan_seq_show,
+};
+
+/*
+ * Proc filesystem directory entries.
+ */
+
+/* Strings */
+static const char *const vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = {
+ [VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID",
+ [VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD",
+ [VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD",
+ [VLAN_NAME_TYPE_PLUS_VID] = "VLAN_NAME_TYPE_PLUS_VID",
+};
+/*
+ * Interface functions
+ */
+
+/*
+ * Clean up /proc/net/vlan entries
+ */
+
+void vlan_proc_cleanup(struct net *net)
+{
+ struct vlan_net *vn = net_generic(net, vlan_net_id);
+
+ if (vn->proc_vlan_conf)
+ remove_proc_entry(name_conf, vn->proc_vlan_dir);
+
+ if (vn->proc_vlan_dir)
+ remove_proc_entry(name_root, net->proc_net);
+
+ /* Dynamically added entries should be cleaned up as their vlan_device
+ * is removed, so we should not have to take care of it here...
+ */
+}
+
+/*
+ * Create /proc/net/vlan entries
+ */
+
+int __net_init vlan_proc_init(struct net *net)
+{
+ struct vlan_net *vn = net_generic(net, vlan_net_id);
+
+ vn->proc_vlan_dir = proc_net_mkdir(net, name_root, net->proc_net);
+ if (!vn->proc_vlan_dir)
+ goto err;
+
+ vn->proc_vlan_conf = proc_create_net(name_conf, S_IFREG | 0600,
+ vn->proc_vlan_dir, &vlan_seq_ops,
+ sizeof(struct seq_net_private));
+ if (!vn->proc_vlan_conf)
+ goto err;
+ return 0;
+
+err:
+ pr_err("can't create entry in proc filesystem!\n");
+ vlan_proc_cleanup(net);
+ return -ENOBUFS;
+}
+
+/*
+ * Add directory entry for VLAN device.
+ */
+
+int vlan_proc_add_dev(struct net_device *vlandev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
+ struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
+
+ if (!strcmp(vlandev->name, name_conf))
+ return -EINVAL;
+ vlan->dent = proc_create_single_data(vlandev->name, S_IFREG | 0600,
+ vn->proc_vlan_dir, vlandev_seq_show, vlandev);
+ if (!vlan->dent)
+ return -ENOBUFS;
+ return 0;
+}
+
+/*
+ * Delete directory entry for VLAN device.
+ */
+void vlan_proc_rem_dev(struct net_device *vlandev)
+{
+ /** NOTE: This will consume the memory pointed to by dent, it seems. */
+ proc_remove(vlan_dev_priv(vlandev)->dent);
+ vlan_dev_priv(vlandev)->dent = NULL;
+}
+
+/****** Proc filesystem entry points ****************************************/
+
+/*
+ * The following few functions build the content of /proc/net/vlan/config
+ */
+
+/* start read of /proc/net/vlan/config */
+static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(rcu)
+{
+ struct net_device *dev;
+ struct net *net = seq_file_net(seq);
+ loff_t i = 1;
+
+ rcu_read_lock();
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ for_each_netdev_rcu(net, dev) {
+ if (!is_vlan_dev(dev))
+ continue;
+
+ if (i++ == *pos)
+ return dev;
+ }
+
+ return NULL;
+}
+
+static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct net_device *dev;
+ struct net *net = seq_file_net(seq);
+
+ ++*pos;
+
+ dev = v;
+ if (v == SEQ_START_TOKEN)
+ dev = net_device_entry(&net->dev_base_head);
+
+ for_each_netdev_continue_rcu(net, dev) {
+ if (!is_vlan_dev(dev))
+ continue;
+
+ return dev;
+ }
+
+ return NULL;
+}
+
+static void vlan_seq_stop(struct seq_file *seq, void *v)
+ __releases(rcu)
+{
+ rcu_read_unlock();
+}
+
+static int vlan_seq_show(struct seq_file *seq, void *v)
+{
+ struct net *net = seq_file_net(seq);
+ struct vlan_net *vn = net_generic(net, vlan_net_id);
+
+ if (v == SEQ_START_TOKEN) {
+ const char *nmtype = NULL;
+
+ seq_puts(seq, "VLAN Dev name | VLAN ID\n");
+
+ if (vn->name_type < ARRAY_SIZE(vlan_name_type_str))
+ nmtype = vlan_name_type_str[vn->name_type];
+
+ seq_printf(seq, "Name-Type: %s\n",
+ nmtype ? nmtype : "UNKNOWN");
+ } else {
+ const struct net_device *vlandev = v;
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
+
+ seq_printf(seq, "%-15s| %d | %s\n", vlandev->name,
+ vlan->vlan_id, vlan->real_dev->name);
+ }
+ return 0;
+}
+
+static int vlandev_seq_show(struct seq_file *seq, void *offset)
+{
+ struct net_device *vlandev = (struct net_device *) seq->private;
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
+ struct rtnl_link_stats64 temp;
+ const struct rtnl_link_stats64 *stats;
+ static const char fmt64[] = "%30s %12llu\n";
+ int i;
+
+ if (!is_vlan_dev(vlandev))
+ return 0;
+
+ stats = dev_get_stats(vlandev, &temp);
+ seq_printf(seq,
+ "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n",
+ vlandev->name, vlan->vlan_id,
+ (int)(vlan->flags & 1), vlandev->priv_flags);
+
+ seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
+ seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
+ seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast);
+ seq_puts(seq, "\n");
+ seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
+ seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
+ seq_printf(seq, "Device: %s", vlan->real_dev->name);
+ /* now show all PRIORITY mappings relating to this VLAN */
+ seq_printf(seq, "\nINGRESS priority mappings: "
+ "0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n",
+ vlan->ingress_priority_map[0],
+ vlan->ingress_priority_map[1],
+ vlan->ingress_priority_map[2],
+ vlan->ingress_priority_map[3],
+ vlan->ingress_priority_map[4],
+ vlan->ingress_priority_map[5],
+ vlan->ingress_priority_map[6],
+ vlan->ingress_priority_map[7]);
+
+ seq_printf(seq, " EGRESS priority mappings: ");
+ for (i = 0; i < 16; i++) {
+ const struct vlan_priority_tci_mapping *mp
+ = vlan->egress_priority_map[i];
+ while (mp) {
+ seq_printf(seq, "%u:%hu ",
+ mp->priority, ((mp->vlan_qos >> 13) & 0x7));
+ mp = mp->next;
+ }
+ }
+ seq_puts(seq, "\n");
+
+ return 0;
+}
diff --git a/net/8021q/vlanproc.h b/net/8021q/vlanproc.h
new file mode 100644
index 000000000..48cd4b478
--- /dev/null
+++ b/net/8021q/vlanproc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BEN_VLAN_PROC_INC__
+#define __BEN_VLAN_PROC_INC__
+
+#ifdef CONFIG_PROC_FS
+struct net;
+
+int vlan_proc_init(struct net *net);
+void vlan_proc_rem_dev(struct net_device *vlandev);
+int vlan_proc_add_dev(struct net_device *vlandev);
+void vlan_proc_cleanup(struct net *net);
+
+#else /* No CONFIG_PROC_FS */
+
+#define vlan_proc_init(net) (0)
+#define vlan_proc_cleanup(net) do {} while (0)
+#define vlan_proc_add_dev(dev) ({(void)(dev), 0; })
+#define vlan_proc_rem_dev(dev) do {} while (0)
+#endif
+
+#endif /* !(__BEN_VLAN_PROC_INC__) */