summaryrefslogtreecommitdiffstats
path: root/net/hsr
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /net/hsr
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'net/hsr')
-rw-r--r--net/hsr/Kconfig40
-rw-r--r--net/hsr/Makefile10
-rw-r--r--net/hsr/hsr_debugfs.c134
-rw-r--r--net/hsr/hsr_device.c570
-rw-r--r--net/hsr/hsr_device.h24
-rw-r--r--net/hsr/hsr_forward.c575
-rw-r--r--net/hsr/hsr_forward.h30
-rw-r--r--net/hsr/hsr_framereg.c594
-rw-r--r--net/hsr/hsr_framereg.h88
-rw-r--r--net/hsr/hsr_main.c159
-rw-r--r--net/hsr/hsr_main.h305
-rw-r--r--net/hsr/hsr_netlink.c555
-rw-r--r--net/hsr/hsr_netlink.h29
-rw-r--r--net/hsr/hsr_slave.c221
-rw-r--r--net/hsr/hsr_slave.h37
15 files changed, 3371 insertions, 0 deletions
diff --git a/net/hsr/Kconfig b/net/hsr/Kconfig
new file mode 100644
index 000000000..1b048c17b
--- /dev/null
+++ b/net/hsr/Kconfig
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# IEC 62439-3 High-availability Seamless Redundancy
+#
+
+config HSR
+ tristate "High-availability Seamless Redundancy (HSR & PRP)"
+ help
+ This enables IEC 62439 defined High-availability Seamless
+ Redundancy (HSR) and Parallel Redundancy Protocol (PRP).
+
+ If you say Y here, then your Linux box will be able to act as a
+ DANH ("Doubly attached node implementing HSR") or DANP ("Doubly
+ attached node implementing PRP"). For this to work, your Linux box
+ needs (at least) two physical Ethernet interfaces.
+
+ For DANH, it must be connected as a node in a ring network together
+ with other HSR capable nodes. All Ethernet frames sent over the HSR
+ device will be sent in both directions on the ring (over both slave
+ ports), giving a redundant, instant fail-over network. Each HSR node
+ in the ring acts like a bridge for HSR frames, but filters frames
+ that have been forwarded earlier.
+
+ For DANP, it must be connected as a node connecting to two
+ separate networks over the two slave interfaces. Like HSR, Ethernet
+ frames sent over the PRP device will be sent to both networks giving
+ a redundant, instant fail-over network. Unlike HSR, PRP networks
+ can have Singly Attached Nodes (SAN) such as PC, printer, bridges
+ etc and will be able to communicate with DANP nodes.
+
+ This code is a "best effort" to comply with the HSR standard as
+ described in IEC 62439-3:2010 (HSRv0) and IEC 62439-3:2012 (HSRv1),
+ and PRP standard described in IEC 62439-4:2012 (PRP), but no
+ compliancy tests have been made. Use iproute2 to select the protocol
+ you would like to use.
+
+ You need to perform any and all necessary tests yourself before
+ relying on this code in a safety critical system!
+
+ If unsure, say N.
diff --git a/net/hsr/Makefile b/net/hsr/Makefile
new file mode 100644
index 000000000..75df90d3b
--- /dev/null
+++ b/net/hsr/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for HSR
+#
+
+obj-$(CONFIG_HSR) += hsr.o
+
+hsr-y := hsr_main.o hsr_framereg.o hsr_device.o \
+ hsr_netlink.o hsr_slave.o hsr_forward.o
+hsr-$(CONFIG_DEBUG_FS) += hsr_debugfs.o
diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
new file mode 100644
index 000000000..4cfd9e829
--- /dev/null
+++ b/net/hsr/hsr_debugfs.c
@@ -0,0 +1,134 @@
+/*
+ * debugfs code for HSR & PRP
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Author(s):
+ * Murali Karicheri <m-karicheri2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include "hsr_main.h"
+#include "hsr_framereg.h"
+
+static struct dentry *hsr_debugfs_root_dir;
+
+/* hsr_node_table_show - Formats and prints node_table entries */
+static int
+hsr_node_table_show(struct seq_file *sfp, void *data)
+{
+ struct hsr_priv *priv = (struct hsr_priv *)sfp->private;
+ struct hsr_node *node;
+
+ seq_printf(sfp, "Node Table entries for (%s) device\n",
+ (priv->prot_version == PRP_V1 ? "PRP" : "HSR"));
+ seq_puts(sfp, "MAC-Address-A, MAC-Address-B, time_in[A], ");
+ seq_puts(sfp, "time_in[B], Address-B port, ");
+ if (priv->prot_version == PRP_V1)
+ seq_puts(sfp, "SAN-A, SAN-B, DAN-P\n");
+ else
+ seq_puts(sfp, "DAN-H\n");
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(node, &priv->node_db, mac_list) {
+ /* skip self node */
+ if (hsr_addr_is_self(priv, node->macaddress_A))
+ continue;
+ seq_printf(sfp, "%pM ", &node->macaddress_A[0]);
+ seq_printf(sfp, "%pM ", &node->macaddress_B[0]);
+ seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_A]);
+ seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_B]);
+ seq_printf(sfp, "%14x, ", node->addr_B_port);
+
+ if (priv->prot_version == PRP_V1)
+ seq_printf(sfp, "%5x, %5x, %5x\n",
+ node->san_a, node->san_b,
+ (node->san_a == 0 && node->san_b == 0));
+ else
+ seq_printf(sfp, "%5x\n", 1);
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hsr_node_table);
+
+void hsr_debugfs_rename(struct net_device *dev)
+{
+ struct hsr_priv *priv = netdev_priv(dev);
+ struct dentry *d;
+
+ d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root,
+ hsr_debugfs_root_dir, dev->name);
+ if (IS_ERR(d))
+ netdev_warn(dev, "failed to rename\n");
+ else
+ priv->node_tbl_root = d;
+}
+
+/* hsr_debugfs_init - create hsr node_table file for dumping
+ * the node table
+ *
+ * Description:
+ * When debugfs is configured this routine sets up the node_table file per
+ * hsr device for dumping the node_table entries
+ */
+void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
+{
+ struct dentry *de = NULL;
+
+ de = debugfs_create_dir(hsr_dev->name, hsr_debugfs_root_dir);
+ if (IS_ERR(de)) {
+ pr_err("Cannot create hsr debugfs directory\n");
+ return;
+ }
+
+ priv->node_tbl_root = de;
+
+ de = debugfs_create_file("node_table", S_IFREG | 0444,
+ priv->node_tbl_root, priv,
+ &hsr_node_table_fops);
+ if (IS_ERR(de)) {
+ pr_err("Cannot create hsr node_table file\n");
+ debugfs_remove(priv->node_tbl_root);
+ priv->node_tbl_root = NULL;
+ return;
+ }
+}
+
+/* hsr_debugfs_term - Tear down debugfs intrastructure
+ *
+ * Description:
+ * When Debufs is configured this routine removes debugfs file system
+ * elements that are specific to hsr
+ */
+void
+hsr_debugfs_term(struct hsr_priv *priv)
+{
+ debugfs_remove_recursive(priv->node_tbl_root);
+ priv->node_tbl_root = NULL;
+}
+
+void hsr_debugfs_create_root(void)
+{
+ hsr_debugfs_root_dir = debugfs_create_dir("hsr", NULL);
+ if (IS_ERR(hsr_debugfs_root_dir)) {
+ pr_err("Cannot create hsr debugfs root directory\n");
+ hsr_debugfs_root_dir = NULL;
+ }
+}
+
+void hsr_debugfs_remove_root(void)
+{
+ /* debugfs_remove() internally checks NULL and ERROR */
+ debugfs_remove(hsr_debugfs_root_dir);
+}
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
new file mode 100644
index 000000000..84e6ef4f3
--- /dev/null
+++ b/net/hsr/hsr_device.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * Author(s):
+ * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ * This file contains device methods for creating, using and destroying
+ * virtual HSR or PRP devices.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_sched.h>
+#include "hsr_device.h"
+#include "hsr_slave.h"
+#include "hsr_framereg.h"
+#include "hsr_main.h"
+#include "hsr_forward.h"
+
+static bool is_admin_up(struct net_device *dev)
+{
+ return dev && (dev->flags & IFF_UP);
+}
+
+static bool is_slave_up(struct net_device *dev)
+{
+ return dev && is_admin_up(dev) && netif_oper_up(dev);
+}
+
+static void __hsr_set_operstate(struct net_device *dev, int transition)
+{
+ write_lock_bh(&dev_base_lock);
+ if (dev->operstate != transition) {
+ dev->operstate = transition;
+ write_unlock_bh(&dev_base_lock);
+ netdev_state_change(dev);
+ } else {
+ write_unlock_bh(&dev_base_lock);
+ }
+}
+
+static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
+{
+ if (!is_admin_up(master->dev)) {
+ __hsr_set_operstate(master->dev, IF_OPER_DOWN);
+ return;
+ }
+
+ if (has_carrier)
+ __hsr_set_operstate(master->dev, IF_OPER_UP);
+ else
+ __hsr_set_operstate(master->dev, IF_OPER_LOWERLAYERDOWN);
+}
+
+static bool hsr_check_carrier(struct hsr_port *master)
+{
+ struct hsr_port *port;
+
+ ASSERT_RTNL();
+
+ hsr_for_each_port(master->hsr, port) {
+ if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
+ netif_carrier_on(master->dev);
+ return true;
+ }
+ }
+
+ netif_carrier_off(master->dev);
+
+ return false;
+}
+
+static void hsr_check_announce(struct net_device *hsr_dev,
+ unsigned char old_operstate)
+{
+ struct hsr_priv *hsr;
+
+ hsr = netdev_priv(hsr_dev);
+
+ if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) {
+ /* Went up */
+ hsr->announce_count = 0;
+ mod_timer(&hsr->announce_timer,
+ jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
+ }
+
+ if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP)
+ /* Went down */
+ del_timer(&hsr->announce_timer);
+}
+
+void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
+{
+ struct hsr_port *master;
+ unsigned char old_operstate;
+ bool has_carrier;
+
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ /* netif_stacked_transfer_operstate() cannot be used here since
+ * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
+ */
+ old_operstate = master->dev->operstate;
+ has_carrier = hsr_check_carrier(master);
+ hsr_set_operstate(master, has_carrier);
+ hsr_check_announce(master->dev, old_operstate);
+}
+
+int hsr_get_max_mtu(struct hsr_priv *hsr)
+{
+ unsigned int mtu_max;
+ struct hsr_port *port;
+
+ mtu_max = ETH_DATA_LEN;
+ hsr_for_each_port(hsr, port)
+ if (port->type != HSR_PT_MASTER)
+ mtu_max = min(port->dev->mtu, mtu_max);
+
+ if (mtu_max < HSR_HLEN)
+ return 0;
+ return mtu_max - HSR_HLEN;
+}
+
+static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct hsr_priv *hsr;
+
+ hsr = netdev_priv(dev);
+
+ if (new_mtu > hsr_get_max_mtu(hsr)) {
+ netdev_info(dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
+ HSR_HLEN);
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int hsr_dev_open(struct net_device *dev)
+{
+ struct hsr_priv *hsr;
+ struct hsr_port *port;
+ char designation;
+
+ hsr = netdev_priv(dev);
+ designation = '\0';
+
+ hsr_for_each_port(hsr, port) {
+ if (port->type == HSR_PT_MASTER)
+ continue;
+ switch (port->type) {
+ case HSR_PT_SLAVE_A:
+ designation = 'A';
+ break;
+ case HSR_PT_SLAVE_B:
+ designation = 'B';
+ break;
+ default:
+ designation = '?';
+ }
+ if (!is_slave_up(port->dev))
+ netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n",
+ designation, port->dev->name);
+ }
+
+ if (designation == '\0')
+ netdev_warn(dev, "No slave devices configured\n");
+
+ return 0;
+}
+
+static int hsr_dev_close(struct net_device *dev)
+{
+ /* Nothing to do here. */
+ return 0;
+}
+
+static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
+ netdev_features_t features)
+{
+ netdev_features_t mask;
+ struct hsr_port *port;
+
+ mask = features;
+
+ /* Mask out all features that, if supported by one device, should be
+ * enabled for all devices (see NETIF_F_ONE_FOR_ALL).
+ *
+ * Anything that's off in mask will not be enabled - so only things
+ * that were in features originally, and also is in NETIF_F_ONE_FOR_ALL,
+ * may become enabled.
+ */
+ features &= ~NETIF_F_ONE_FOR_ALL;
+ hsr_for_each_port(hsr, port)
+ features = netdev_increment_features(features,
+ port->dev->features,
+ mask);
+
+ return features;
+}
+
+static netdev_features_t hsr_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct hsr_priv *hsr = netdev_priv(dev);
+
+ return hsr_features_recompute(hsr, features);
+}
+
+static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct hsr_priv *hsr = netdev_priv(dev);
+ struct hsr_port *master;
+
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ if (master) {
+ skb->dev = master->dev;
+ skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
+ spin_lock_bh(&hsr->seqnr_lock);
+ hsr_forward_skb(skb, master);
+ spin_unlock_bh(&hsr->seqnr_lock);
+ } else {
+ atomic_long_inc(&dev->tx_dropped);
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
+}
+
+static const struct header_ops hsr_header_ops = {
+ .create = eth_header,
+ .parse = eth_header_parse,
+};
+
+static struct sk_buff *hsr_init_skb(struct hsr_port *master)
+{
+ struct hsr_priv *hsr = master->hsr;
+ struct sk_buff *skb;
+ int hlen, tlen;
+
+ hlen = LL_RESERVED_SPACE(master->dev);
+ tlen = master->dev->needed_tailroom;
+ /* skb size is same for PRP/HSR frames, only difference
+ * being, for PRP it is a trailer and for HSR it is a
+ * header
+ */
+ skb = dev_alloc_skb(sizeof(struct hsr_sup_tag) +
+ sizeof(struct hsr_sup_payload) + hlen + tlen);
+
+ if (!skb)
+ return skb;
+
+ skb_reserve(skb, hlen);
+ skb->dev = master->dev;
+ skb->priority = TC_PRIO_CONTROL;
+
+ if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
+ hsr->sup_multicast_addr,
+ skb->dev->dev_addr, skb->len) <= 0)
+ goto out;
+
+ skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ return skb;
+out:
+ kfree_skb(skb);
+
+ return NULL;
+}
+
+static void send_hsr_supervision_frame(struct hsr_port *master,
+ unsigned long *interval)
+{
+ struct hsr_priv *hsr = master->hsr;
+ __u8 type = HSR_TLV_LIFE_CHECK;
+ struct hsr_sup_payload *hsr_sp;
+ struct hsr_sup_tag *hsr_stag;
+ struct sk_buff *skb;
+
+ *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+ if (hsr->announce_count < 3 && hsr->prot_version == 0) {
+ type = HSR_TLV_ANNOUNCE;
+ *interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+ hsr->announce_count++;
+ }
+
+ skb = hsr_init_skb(master);
+ if (!skb) {
+ WARN_ONCE(1, "HSR: Could not send supervision frame\n");
+ return;
+ }
+
+ hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
+ set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf));
+ set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version);
+
+ /* From HSRv1 on we have separate supervision sequence numbers. */
+ spin_lock_bh(&hsr->seqnr_lock);
+ if (hsr->prot_version > 0) {
+ hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
+ hsr->sup_sequence_nr++;
+ } else {
+ hsr_stag->sequence_nr = htons(hsr->sequence_nr);
+ hsr->sequence_nr++;
+ }
+
+ hsr_stag->HSR_TLV_type = type;
+ /* TODO: Why 12 in HSRv0? */
+ hsr_stag->HSR_TLV_length = hsr->prot_version ?
+ sizeof(struct hsr_sup_payload) : 12;
+
+ /* Payload: MacAddressA */
+ hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
+ ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
+
+ if (skb_put_padto(skb, ETH_ZLEN)) {
+ spin_unlock_bh(&hsr->seqnr_lock);
+ return;
+ }
+
+ hsr_forward_skb(skb, master);
+ spin_unlock_bh(&hsr->seqnr_lock);
+ return;
+}
+
+static void send_prp_supervision_frame(struct hsr_port *master,
+ unsigned long *interval)
+{
+ struct hsr_priv *hsr = master->hsr;
+ struct hsr_sup_payload *hsr_sp;
+ struct hsr_sup_tag *hsr_stag;
+ struct sk_buff *skb;
+
+ skb = hsr_init_skb(master);
+ if (!skb) {
+ WARN_ONCE(1, "PRP: Could not send supervision frame\n");
+ return;
+ }
+
+ *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+ hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
+ set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf));
+ set_hsr_stag_HSR_ver(hsr_stag, (hsr->prot_version ? 1 : 0));
+
+ /* From HSRv1 on we have separate supervision sequence numbers. */
+ spin_lock_bh(&hsr->seqnr_lock);
+ hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
+ hsr->sup_sequence_nr++;
+ hsr_stag->HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD;
+ hsr_stag->HSR_TLV_length = sizeof(struct hsr_sup_payload);
+
+ /* Payload: MacAddressA */
+ hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
+ ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
+
+ if (skb_put_padto(skb, ETH_ZLEN)) {
+ spin_unlock_bh(&hsr->seqnr_lock);
+ return;
+ }
+
+ hsr_forward_skb(skb, master);
+ spin_unlock_bh(&hsr->seqnr_lock);
+}
+
+/* Announce (supervision frame) timer function
+ */
+static void hsr_announce(struct timer_list *t)
+{
+ struct hsr_priv *hsr;
+ struct hsr_port *master;
+ unsigned long interval;
+
+ hsr = from_timer(hsr, t, announce_timer);
+
+ rcu_read_lock();
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ hsr->proto_ops->send_sv_frame(master, &interval);
+
+ if (is_admin_up(master->dev))
+ mod_timer(&hsr->announce_timer, jiffies + interval);
+
+ rcu_read_unlock();
+}
+
+void hsr_del_ports(struct hsr_priv *hsr)
+{
+ struct hsr_port *port;
+
+ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+ if (port)
+ hsr_del_port(port);
+
+ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+ if (port)
+ hsr_del_port(port);
+
+ port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ if (port)
+ hsr_del_port(port);
+}
+
+static const struct net_device_ops hsr_device_ops = {
+ .ndo_change_mtu = hsr_dev_change_mtu,
+ .ndo_open = hsr_dev_open,
+ .ndo_stop = hsr_dev_close,
+ .ndo_start_xmit = hsr_dev_xmit,
+ .ndo_fix_features = hsr_fix_features,
+};
+
+static struct device_type hsr_type = {
+ .name = "hsr",
+};
+
+static struct hsr_proto_ops hsr_ops = {
+ .send_sv_frame = send_hsr_supervision_frame,
+ .create_tagged_frame = hsr_create_tagged_frame,
+ .get_untagged_frame = hsr_get_untagged_frame,
+ .fill_frame_info = hsr_fill_frame_info,
+ .invalid_dan_ingress_frame = hsr_invalid_dan_ingress_frame,
+};
+
+static struct hsr_proto_ops prp_ops = {
+ .send_sv_frame = send_prp_supervision_frame,
+ .create_tagged_frame = prp_create_tagged_frame,
+ .get_untagged_frame = prp_get_untagged_frame,
+ .drop_frame = prp_drop_frame,
+ .fill_frame_info = prp_fill_frame_info,
+ .handle_san_frame = prp_handle_san_frame,
+ .update_san_info = prp_update_san_info,
+};
+
+void hsr_dev_setup(struct net_device *dev)
+{
+ eth_hw_addr_random(dev);
+
+ ether_setup(dev);
+ dev->min_mtu = 0;
+ dev->header_ops = &hsr_header_ops;
+ dev->netdev_ops = &hsr_device_ops;
+ SET_NETDEV_DEVTYPE(dev, &hsr_type);
+ dev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
+
+ dev->needs_free_netdev = true;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
+ NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ dev->features = dev->hw_features;
+
+ /* Prevent recursive tx locking */
+ dev->features |= NETIF_F_LLTX;
+ /* VLAN on top of HSR needs testing and probably some work on
+ * hsr_header_create() etc.
+ */
+ dev->features |= NETIF_F_VLAN_CHALLENGED;
+ /* Not sure about this. Taken from bridge code. netdev_features.h says
+ * it means "Does not change network namespaces".
+ */
+ dev->features |= NETIF_F_NETNS_LOCAL;
+}
+
+/* Return true if dev is a HSR master; return false otherwise.
+ */
+inline bool is_hsr_master(struct net_device *dev)
+{
+ return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit);
+}
+
+/* Default multicast address for HSR Supervision frames */
+static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
+ 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
+};
+
+int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ unsigned char multicast_spec, u8 protocol_version,
+ struct netlink_ext_ack *extack)
+{
+ bool unregister = false;
+ struct hsr_priv *hsr;
+ int res;
+
+ hsr = netdev_priv(hsr_dev);
+ INIT_LIST_HEAD(&hsr->ports);
+ INIT_LIST_HEAD(&hsr->node_db);
+ INIT_LIST_HEAD(&hsr->self_node_db);
+ spin_lock_init(&hsr->list_lock);
+
+ ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
+
+ /* initialize protocol specific functions */
+ if (protocol_version == PRP_V1) {
+ /* For PRP, lan_id has most significant 3 bits holding
+ * the net_id of PRP_LAN_ID
+ */
+ hsr->net_id = PRP_LAN_ID << 1;
+ hsr->proto_ops = &prp_ops;
+ } else {
+ hsr->proto_ops = &hsr_ops;
+ }
+
+ /* Make sure we recognize frames from ourselves in hsr_rcv() */
+ res = hsr_create_self_node(hsr, hsr_dev->dev_addr,
+ slave[1]->dev_addr);
+ if (res < 0)
+ return res;
+
+ spin_lock_init(&hsr->seqnr_lock);
+ /* Overflow soon to find bugs easier: */
+ hsr->sequence_nr = HSR_SEQNR_START;
+ hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
+
+ timer_setup(&hsr->announce_timer, hsr_announce, 0);
+ timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
+
+ ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
+ hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
+
+ hsr->prot_version = protocol_version;
+
+ /* FIXME: should I modify the value of these?
+ *
+ * - hsr_dev->flags - i.e.
+ * IFF_MASTER/SLAVE?
+ * - hsr_dev->priv_flags - i.e.
+ * IFF_EBRIDGE?
+ * IFF_TX_SKB_SHARING?
+ * IFF_HSR_MASTER/SLAVE?
+ */
+
+ /* Make sure the 1st call to netif_carrier_on() gets through */
+ netif_carrier_off(hsr_dev);
+
+ res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER, extack);
+ if (res)
+ goto err_add_master;
+
+ res = register_netdevice(hsr_dev);
+ if (res)
+ goto err_unregister;
+
+ unregister = true;
+
+ res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack);
+ if (res)
+ goto err_unregister;
+
+ res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack);
+ if (res)
+ goto err_unregister;
+
+ hsr_debugfs_init(hsr, hsr_dev);
+ mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
+
+ return 0;
+
+err_unregister:
+ hsr_del_ports(hsr);
+err_add_master:
+ hsr_del_self_node(hsr);
+
+ if (unregister)
+ unregister_netdevice(hsr_dev);
+ return res;
+}
diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h
new file mode 100644
index 000000000..868373822
--- /dev/null
+++ b/net/hsr/hsr_device.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * Author(s):
+ * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * include file for HSR and PRP.
+ */
+
+#ifndef __HSR_DEVICE_H
+#define __HSR_DEVICE_H
+
+#include <linux/netdevice.h>
+#include "hsr_main.h"
+
+void hsr_del_ports(struct hsr_priv *hsr);
+void hsr_dev_setup(struct net_device *dev);
+int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ unsigned char multicast_spec, u8 protocol_version,
+ struct netlink_ext_ack *extack);
+void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
+bool is_hsr_master(struct net_device *dev);
+int hsr_get_max_mtu(struct hsr_priv *hsr);
+#endif /* __HSR_DEVICE_H */
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
new file mode 100644
index 000000000..0c115d8de
--- /dev/null
+++ b/net/hsr/hsr_forward.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * Author(s):
+ * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * Frame router for HSR and PRP.
+ */
+
+#include "hsr_forward.h"
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include "hsr_main.h"
+#include "hsr_framereg.h"
+
+struct hsr_node;
+
+/* The uses I can see for these HSR supervision frames are:
+ * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
+ * 22") to reset any sequence_nr counters belonging to that node. Useful if
+ * the other node's counter has been reset for some reason.
+ * --
+ * Or not - resetting the counter and bridging the frame would create a
+ * loop, unfortunately.
+ *
+ * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
+ * frame is received from a particular node, we know something is wrong.
+ * We just register these (as with normal frames) and throw them away.
+ *
+ * 3) Allow different MAC addresses for the two slave interfaces, using the
+ * MacAddressA field.
+ */
+static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
+{
+ struct ethhdr *eth_hdr;
+ struct hsr_sup_tag *hsr_sup_tag;
+ struct hsrv1_ethhdr_sp *hsr_V1_hdr;
+
+ WARN_ON_ONCE(!skb_mac_header_was_set(skb));
+ eth_hdr = (struct ethhdr *)skb_mac_header(skb);
+
+ /* Correct addr? */
+ if (!ether_addr_equal(eth_hdr->h_dest,
+ hsr->sup_multicast_addr))
+ return false;
+
+ /* Correct ether type?. */
+ if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
+ eth_hdr->h_proto == htons(ETH_P_HSR)))
+ return false;
+
+ /* Get the supervision header from correct location. */
+ if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
+ hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
+ if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
+ return false;
+
+ hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
+ } else {
+ hsr_sup_tag =
+ &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
+ }
+
+ if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE &&
+ hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
+ hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
+ hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
+ return false;
+ if (hsr_sup_tag->HSR_TLV_length != 12 &&
+ hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_sup_payload))
+ return false;
+
+ return true;
+}
+
+static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
+ struct hsr_frame_info *frame)
+{
+ struct sk_buff *skb;
+ int copylen;
+ unsigned char *dst, *src;
+
+ skb_pull(skb_in, HSR_HLEN);
+ skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
+ skb_push(skb_in, HSR_HLEN);
+ if (!skb)
+ return NULL;
+
+ skb_reset_mac_header(skb);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ skb->csum_start -= HSR_HLEN;
+
+ copylen = 2 * ETH_ALEN;
+ if (frame->is_vlan)
+ copylen += VLAN_HLEN;
+ src = skb_mac_header(skb_in);
+ dst = skb_mac_header(skb);
+ memcpy(dst, src, copylen);
+
+ skb->protocol = eth_hdr(skb)->h_proto;
+ return skb;
+}
+
+struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port)
+{
+ if (!frame->skb_std) {
+ if (frame->skb_hsr)
+ frame->skb_std =
+ create_stripped_skb_hsr(frame->skb_hsr, frame);
+ else
+ netdev_warn_once(port->dev,
+ "Unexpected frame received in hsr_get_untagged_frame()\n");
+
+ if (!frame->skb_std)
+ return NULL;
+ }
+
+ return skb_clone(frame->skb_std, GFP_ATOMIC);
+}
+
+struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port)
+{
+ if (!frame->skb_std) {
+ if (frame->skb_prp) {
+ /* trim the skb by len - HSR_HLEN to exclude RCT */
+ skb_trim(frame->skb_prp,
+ frame->skb_prp->len - HSR_HLEN);
+ frame->skb_std =
+ __pskb_copy(frame->skb_prp,
+ skb_headroom(frame->skb_prp),
+ GFP_ATOMIC);
+ } else {
+ /* Unexpected */
+ WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
+ __FILE__, __LINE__, port->dev->name);
+ return NULL;
+ }
+ }
+
+ return skb_clone(frame->skb_std, GFP_ATOMIC);
+}
+
+static void prp_set_lan_id(struct prp_rct *trailer,
+ struct hsr_port *port)
+{
+ int lane_id;
+
+ if (port->type == HSR_PT_SLAVE_A)
+ lane_id = 0;
+ else
+ lane_id = 1;
+
+ /* Add net_id in the upper 3 bits of lane_id */
+ lane_id |= port->hsr->net_id;
+ set_prp_lan_id(trailer, lane_id);
+}
+
+/* Tailroom for PRP rct should have been created before calling this */
+static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
+ struct hsr_frame_info *frame,
+ struct hsr_port *port)
+{
+ struct prp_rct *trailer;
+ int min_size = ETH_ZLEN;
+ int lsdu_size;
+
+ if (!skb)
+ return skb;
+
+ if (frame->is_vlan)
+ min_size = VLAN_ETH_ZLEN;
+
+ if (skb_put_padto(skb, min_size))
+ return NULL;
+
+ trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
+ lsdu_size = skb->len - 14;
+ if (frame->is_vlan)
+ lsdu_size -= 4;
+ prp_set_lan_id(trailer, port);
+ set_prp_LSDU_size(trailer, lsdu_size);
+ trailer->sequence_nr = htons(frame->sequence_nr);
+ trailer->PRP_suffix = htons(ETH_P_PRP);
+ skb->protocol = eth_hdr(skb)->h_proto;
+
+ return skb;
+}
+
+static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
+ struct hsr_port *port)
+{
+ int path_id;
+
+ if (port->type == HSR_PT_SLAVE_A)
+ path_id = 0;
+ else
+ path_id = 1;
+
+ set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
+}
+
+static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
+ struct hsr_frame_info *frame,
+ struct hsr_port *port, u8 proto_version)
+{
+ struct hsr_ethhdr *hsr_ethhdr;
+ int lsdu_size;
+
+ /* pad to minimum packet size which is 60 + 6 (HSR tag) */
+ if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
+ return NULL;
+
+ lsdu_size = skb->len - 14;
+ if (frame->is_vlan)
+ lsdu_size -= 4;
+
+ hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
+
+ hsr_set_path_id(hsr_ethhdr, port);
+ set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
+ hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
+ hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
+ hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
+ ETH_P_HSR : ETH_P_PRP);
+ skb->protocol = hsr_ethhdr->ethhdr.h_proto;
+
+ return skb;
+}
+
+/* If the original frame was an HSR tagged frame, just clone it to be sent
+ * unchanged. Otherwise, create a private frame especially tagged for 'port'.
+ */
+struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port)
+{
+ unsigned char *dst, *src;
+ struct sk_buff *skb;
+ int movelen;
+
+ if (frame->skb_hsr) {
+ struct hsr_ethhdr *hsr_ethhdr =
+ (struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
+
+ /* set the lane id properly */
+ hsr_set_path_id(hsr_ethhdr, port);
+ return skb_clone(frame->skb_hsr, GFP_ATOMIC);
+ }
+
+ /* Create the new skb with enough headroom to fit the HSR tag */
+ skb = __pskb_copy(frame->skb_std,
+ skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+ skb_reset_mac_header(skb);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ skb->csum_start += HSR_HLEN;
+
+ movelen = ETH_HLEN;
+ if (frame->is_vlan)
+ movelen += VLAN_HLEN;
+
+ src = skb_mac_header(skb);
+ dst = skb_push(skb, HSR_HLEN);
+ memmove(dst, src, movelen);
+ skb_reset_mac_header(skb);
+
+ /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
+ * that case
+ */
+ return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
+}
+
+struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port)
+{
+ struct sk_buff *skb;
+
+ if (frame->skb_prp) {
+ struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp);
+
+ if (trailer) {
+ prp_set_lan_id(trailer, port);
+ } else {
+ WARN_ONCE(!trailer, "errored PRP skb");
+ return NULL;
+ }
+ return skb_clone(frame->skb_prp, GFP_ATOMIC);
+ }
+
+ skb = skb_copy_expand(frame->skb_std, 0,
+ skb_tailroom(frame->skb_std) + HSR_HLEN,
+ GFP_ATOMIC);
+ return prp_fill_rct(skb, frame, port);
+}
+
+static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
+ struct hsr_node *node_src)
+{
+ bool was_multicast_frame;
+ int res, recv_len;
+
+ was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
+ hsr_addr_subst_source(node_src, skb);
+ skb_pull(skb, ETH_HLEN);
+ recv_len = skb->len;
+ res = netif_rx(skb);
+ if (res == NET_RX_DROP) {
+ dev->stats.rx_dropped++;
+ } else {
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += recv_len;
+ if (was_multicast_frame)
+ dev->stats.multicast++;
+ }
+}
+
+static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
+ struct hsr_frame_info *frame)
+{
+ if (frame->port_rcv->type == HSR_PT_MASTER) {
+ hsr_addr_subst_dest(frame->node_src, skb, port);
+
+ /* Address substitution (IEC62439-3 pp 26, 50): replace mac
+ * address of outgoing frame with that of the outgoing slave's.
+ */
+ ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
+ }
+ return dev_queue_xmit(skb);
+}
+
+bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
+{
+ return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
+ port->type == HSR_PT_SLAVE_B) ||
+ (frame->port_rcv->type == HSR_PT_SLAVE_B &&
+ port->type == HSR_PT_SLAVE_A));
+}
+
+/* Forward the frame through all devices except:
+ * - Back through the receiving device
+ * - If it's a HSR frame: through a device where it has passed before
+ * - if it's a PRP frame: through another PRP slave device (no bridge)
+ * - To the local HSR master only if the frame is directly addressed to it, or
+ * a non-supervision multicast or broadcast frame.
+ *
+ * HSR slave devices should insert a HSR tag into the frame, or forward the
+ * frame unchanged if it's already tagged. Interlink devices should strip HSR
+ * tags if they're of the non-HSR type (but only after duplicate discard). The
+ * master device always strips HSR tags.
+ */
+static void hsr_forward_do(struct hsr_frame_info *frame)
+{
+ struct hsr_port *port;
+ struct sk_buff *skb;
+
+ hsr_for_each_port(frame->port_rcv->hsr, port) {
+ struct hsr_priv *hsr = port->hsr;
+ /* Don't send frame back the way it came */
+ if (port == frame->port_rcv)
+ continue;
+
+ /* Don't deliver locally unless we should */
+ if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
+ continue;
+
+ /* Deliver frames directly addressed to us to master only */
+ if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
+ continue;
+
+ /* Don't send frame over port where it has been sent before.
+ * Also fro SAN, this shouldn't be done.
+ */
+ if (!frame->is_from_san &&
+ hsr_register_frame_out(port, frame->node_src,
+ frame->sequence_nr))
+ continue;
+
+ if (frame->is_supervision && port->type == HSR_PT_MASTER) {
+ hsr_handle_sup_frame(frame);
+ continue;
+ }
+
+ /* Check if frame is to be dropped. Eg. for PRP no forward
+ * between ports.
+ */
+ if (hsr->proto_ops->drop_frame &&
+ hsr->proto_ops->drop_frame(frame, port))
+ continue;
+
+ if (port->type != HSR_PT_MASTER)
+ skb = hsr->proto_ops->create_tagged_frame(frame, port);
+ else
+ skb = hsr->proto_ops->get_untagged_frame(frame, port);
+
+ if (!skb) {
+ frame->port_rcv->dev->stats.rx_dropped++;
+ continue;
+ }
+
+ skb->dev = port->dev;
+ if (port->type == HSR_PT_MASTER)
+ hsr_deliver_master(skb, port->dev, frame->node_src);
+ else
+ hsr_xmit(skb, port, frame);
+ }
+}
+
+static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
+ struct hsr_frame_info *frame)
+{
+ if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
+ frame->is_local_exclusive = true;
+ skb->pkt_type = PACKET_HOST;
+ } else {
+ frame->is_local_exclusive = false;
+ }
+
+ if (skb->pkt_type == PACKET_HOST ||
+ skb->pkt_type == PACKET_MULTICAST ||
+ skb->pkt_type == PACKET_BROADCAST) {
+ frame->is_local_dest = true;
+ } else {
+ frame->is_local_dest = false;
+ }
+}
+
+static void handle_std_frame(struct sk_buff *skb,
+ struct hsr_frame_info *frame)
+{
+ struct hsr_port *port = frame->port_rcv;
+ struct hsr_priv *hsr = port->hsr;
+
+ frame->skb_hsr = NULL;
+ frame->skb_prp = NULL;
+ frame->skb_std = skb;
+
+ if (port->type != HSR_PT_MASTER) {
+ frame->is_from_san = true;
+ } else {
+ /* Sequence nr for the master node */
+ lockdep_assert_held(&hsr->seqnr_lock);
+ frame->sequence_nr = hsr->sequence_nr;
+ hsr->sequence_nr++;
+ }
+}
+
+int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame)
+{
+ struct hsr_port *port = frame->port_rcv;
+ struct hsr_priv *hsr = port->hsr;
+
+ /* HSRv0 supervisory frames double as a tag so treat them as tagged. */
+ if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
+ proto == htons(ETH_P_HSR)) {
+ /* Check if skb contains hsr_ethhdr */
+ if (skb->mac_len < sizeof(struct hsr_ethhdr))
+ return -EINVAL;
+
+ /* HSR tagged frame :- Data or Supervision */
+ frame->skb_std = NULL;
+ frame->skb_prp = NULL;
+ frame->skb_hsr = skb;
+ frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
+ return 0;
+ }
+
+ /* Standard frame or PRP from master port */
+ handle_std_frame(skb, frame);
+
+ return 0;
+}
+
+int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame)
+{
+ /* Supervision frame */
+ struct prp_rct *rct = skb_get_PRP_rct(skb);
+
+ if (rct &&
+ prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
+ frame->skb_hsr = NULL;
+ frame->skb_std = NULL;
+ frame->skb_prp = skb;
+ frame->sequence_nr = prp_get_skb_sequence_nr(rct);
+ return 0;
+ }
+ handle_std_frame(skb, frame);
+
+ return 0;
+}
+
+static int fill_frame_info(struct hsr_frame_info *frame,
+ struct sk_buff *skb, struct hsr_port *port)
+{
+ struct hsr_priv *hsr = port->hsr;
+ struct hsr_vlan_ethhdr *vlan_hdr;
+ struct ethhdr *ethhdr;
+ __be16 proto;
+ int ret;
+
+ /* Check if skb contains ethhdr */
+ if (skb->mac_len < sizeof(struct ethhdr))
+ return -EINVAL;
+
+ memset(frame, 0, sizeof(*frame));
+ frame->is_supervision = is_supervision_frame(port->hsr, skb);
+ frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
+ frame->is_supervision,
+ port->type);
+ if (!frame->node_src)
+ return -1; /* Unknown node and !is_supervision, or no mem */
+
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+ frame->is_vlan = false;
+ proto = ethhdr->h_proto;
+
+ if (proto == htons(ETH_P_8021Q))
+ frame->is_vlan = true;
+
+ if (frame->is_vlan) {
+ vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
+ proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
+ /* FIXME: */
+ netdev_warn_once(skb->dev, "VLAN not yet supported");
+ return -EINVAL;
+ }
+
+ frame->is_from_san = false;
+ frame->port_rcv = port;
+ ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
+ if (ret)
+ return ret;
+
+ check_local_dest(port->hsr, skb, frame);
+
+ return 0;
+}
+
+/* Must be called holding rcu read lock (because of the port parameter) */
+void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
+{
+ struct hsr_frame_info frame;
+
+ rcu_read_lock();
+ if (fill_frame_info(&frame, skb, port) < 0)
+ goto out_drop;
+
+ hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
+ hsr_forward_do(&frame);
+ rcu_read_unlock();
+ /* Gets called for ingress frames as well as egress from master port.
+ * So check and increment stats for master port only here.
+ */
+ if (port->type == HSR_PT_MASTER) {
+ port->dev->stats.tx_packets++;
+ port->dev->stats.tx_bytes += skb->len;
+ }
+
+ kfree_skb(frame.skb_hsr);
+ kfree_skb(frame.skb_prp);
+ kfree_skb(frame.skb_std);
+ return;
+
+out_drop:
+ rcu_read_unlock();
+ port->dev->stats.tx_dropped++;
+ kfree_skb(skb);
+}
diff --git a/net/hsr/hsr_forward.h b/net/hsr/hsr_forward.h
new file mode 100644
index 000000000..008f45786
--- /dev/null
+++ b/net/hsr/hsr_forward.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * Author(s):
+ * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * include file for HSR and PRP.
+ */
+
+#ifndef __HSR_FORWARD_H
+#define __HSR_FORWARD_H
+
+#include <linux/netdevice.h>
+#include "hsr_main.h"
+
+void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port);
+struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port);
+struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port);
+struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port);
+struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port);
+bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port);
+int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame);
+int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame);
+#endif /* __HSR_FORWARD_H */
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
new file mode 100644
index 000000000..afc97d65c
--- /dev/null
+++ b/net/hsr/hsr_framereg.c
@@ -0,0 +1,594 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * Author(s):
+ * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * The HSR spec says never to forward the same frame twice on the same
+ * interface. A frame is identified by its source MAC address and its HSR
+ * sequence number. This code keeps track of senders and their sequence numbers
+ * to allow filtering of duplicate frames, and to detect HSR ring errors.
+ * Same code handles filtering of duplicates for PRP as well.
+ */
+
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/rculist.h>
+#include "hsr_main.h"
+#include "hsr_framereg.h"
+#include "hsr_netlink.h"
+
+/* TODO: use hash lists for mac addresses (linux/jhash.h)? */
+
+/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
+ * false otherwise.
+ */
+static bool seq_nr_after(u16 a, u16 b)
+{
+ /* Remove inconsistency where
+ * seq_nr_after(a, b) == seq_nr_before(a, b)
+ */
+ if ((int)b - a == 32768)
+ return false;
+
+ return (((s16)(b - a)) < 0);
+}
+
+#define seq_nr_before(a, b) seq_nr_after((b), (a))
+#define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b)))
+
+bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
+{
+ struct hsr_node *node;
+
+ node = list_first_or_null_rcu(&hsr->self_node_db, struct hsr_node,
+ mac_list);
+ if (!node) {
+ WARN_ONCE(1, "HSR: No self node\n");
+ return false;
+ }
+
+ if (ether_addr_equal(addr, node->macaddress_A))
+ return true;
+ if (ether_addr_equal(addr, node->macaddress_B))
+ return true;
+
+ return false;
+}
+
+/* Search for mac entry. Caller must hold rcu read lock.
+ */
+static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
+ const unsigned char addr[ETH_ALEN])
+{
+ struct hsr_node *node;
+
+ list_for_each_entry_rcu(node, node_db, mac_list) {
+ if (ether_addr_equal(node->macaddress_A, addr))
+ return node;
+ }
+
+ return NULL;
+}
+
+/* Helper for device init; the self_node_db is used in hsr_rcv() to recognize
+ * frames from self that's been looped over the HSR ring.
+ */
+int hsr_create_self_node(struct hsr_priv *hsr,
+ unsigned char addr_a[ETH_ALEN],
+ unsigned char addr_b[ETH_ALEN])
+{
+ struct list_head *self_node_db = &hsr->self_node_db;
+ struct hsr_node *node, *oldnode;
+
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ ether_addr_copy(node->macaddress_A, addr_a);
+ ether_addr_copy(node->macaddress_B, addr_b);
+
+ spin_lock_bh(&hsr->list_lock);
+ oldnode = list_first_or_null_rcu(self_node_db,
+ struct hsr_node, mac_list);
+ if (oldnode) {
+ list_replace_rcu(&oldnode->mac_list, &node->mac_list);
+ spin_unlock_bh(&hsr->list_lock);
+ kfree_rcu(oldnode, rcu_head);
+ } else {
+ list_add_tail_rcu(&node->mac_list, self_node_db);
+ spin_unlock_bh(&hsr->list_lock);
+ }
+
+ return 0;
+}
+
+void hsr_del_self_node(struct hsr_priv *hsr)
+{
+ struct list_head *self_node_db = &hsr->self_node_db;
+ struct hsr_node *node;
+
+ spin_lock_bh(&hsr->list_lock);
+ node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
+ if (node) {
+ list_del_rcu(&node->mac_list);
+ kfree_rcu(node, rcu_head);
+ }
+ spin_unlock_bh(&hsr->list_lock);
+}
+
+void hsr_del_nodes(struct list_head *node_db)
+{
+ struct hsr_node *node;
+ struct hsr_node *tmp;
+
+ list_for_each_entry_safe(node, tmp, node_db, mac_list)
+ kfree(node);
+}
+
+void prp_handle_san_frame(bool san, enum hsr_port_type port,
+ struct hsr_node *node)
+{
+ /* Mark if the SAN node is over LAN_A or LAN_B */
+ if (port == HSR_PT_SLAVE_A) {
+ node->san_a = true;
+ return;
+ }
+
+ if (port == HSR_PT_SLAVE_B)
+ node->san_b = true;
+}
+
+/* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
+ * seq_out is used to initialize filtering of outgoing duplicate frames
+ * originating from the newly added node.
+ */
+static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
+ struct list_head *node_db,
+ unsigned char addr[],
+ u16 seq_out, bool san,
+ enum hsr_port_type rx_port)
+{
+ struct hsr_node *new_node, *node;
+ unsigned long now;
+ int i;
+
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ return NULL;
+
+ ether_addr_copy(new_node->macaddress_A, addr);
+ spin_lock_init(&new_node->seq_out_lock);
+
+ /* We are only interested in time diffs here, so use current jiffies
+ * as initialization. (0 could trigger an spurious ring error warning).
+ */
+ now = jiffies;
+ for (i = 0; i < HSR_PT_PORTS; i++) {
+ new_node->time_in[i] = now;
+ new_node->time_out[i] = now;
+ }
+ for (i = 0; i < HSR_PT_PORTS; i++)
+ new_node->seq_out[i] = seq_out;
+
+ if (san && hsr->proto_ops->handle_san_frame)
+ hsr->proto_ops->handle_san_frame(san, rx_port, new_node);
+
+ spin_lock_bh(&hsr->list_lock);
+ list_for_each_entry_rcu(node, node_db, mac_list,
+ lockdep_is_held(&hsr->list_lock)) {
+ if (ether_addr_equal(node->macaddress_A, addr))
+ goto out;
+ if (ether_addr_equal(node->macaddress_B, addr))
+ goto out;
+ }
+ list_add_tail_rcu(&new_node->mac_list, node_db);
+ spin_unlock_bh(&hsr->list_lock);
+ return new_node;
+out:
+ spin_unlock_bh(&hsr->list_lock);
+ kfree(new_node);
+ return node;
+}
+
+void prp_update_san_info(struct hsr_node *node, bool is_sup)
+{
+ if (!is_sup)
+ return;
+
+ node->san_a = false;
+ node->san_b = false;
+}
+
+/* Get the hsr_node from which 'skb' was sent.
+ */
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
+ struct sk_buff *skb, bool is_sup,
+ enum hsr_port_type rx_port)
+{
+ struct hsr_priv *hsr = port->hsr;
+ struct hsr_node *node;
+ struct ethhdr *ethhdr;
+ struct prp_rct *rct;
+ bool san = false;
+ u16 seq_out;
+
+ if (!skb_mac_header_was_set(skb))
+ return NULL;
+
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+ list_for_each_entry_rcu(node, node_db, mac_list) {
+ if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
+ if (hsr->proto_ops->update_san_info)
+ hsr->proto_ops->update_san_info(node, is_sup);
+ return node;
+ }
+ if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) {
+ if (hsr->proto_ops->update_san_info)
+ hsr->proto_ops->update_san_info(node, is_sup);
+ return node;
+ }
+ }
+
+ /* Everyone may create a node entry, connected node to a HSR/PRP
+ * device.
+ */
+ if (ethhdr->h_proto == htons(ETH_P_PRP) ||
+ ethhdr->h_proto == htons(ETH_P_HSR)) {
+ /* Use the existing sequence_nr from the tag as starting point
+ * for filtering duplicate frames.
+ */
+ seq_out = hsr_get_skb_sequence_nr(skb) - 1;
+ } else {
+ rct = skb_get_PRP_rct(skb);
+ if (rct && prp_check_lsdu_size(skb, rct, is_sup)) {
+ seq_out = prp_get_skb_sequence_nr(rct);
+ } else {
+ if (rx_port != HSR_PT_MASTER)
+ san = true;
+ seq_out = HSR_SEQNR_START;
+ }
+ }
+
+ return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out,
+ san, rx_port);
+}
+
+/* Use the Supervision frame's info about an eventual macaddress_B for merging
+ * nodes that has previously had their macaddress_B registered as a separate
+ * node.
+ */
+void hsr_handle_sup_frame(struct hsr_frame_info *frame)
+{
+ struct hsr_node *node_curr = frame->node_src;
+ struct hsr_port *port_rcv = frame->port_rcv;
+ struct hsr_priv *hsr = port_rcv->hsr;
+ struct hsr_sup_payload *hsr_sp;
+ struct hsr_node *node_real;
+ struct sk_buff *skb = NULL;
+ struct list_head *node_db;
+ struct ethhdr *ethhdr;
+ int i;
+
+ /* Here either frame->skb_hsr or frame->skb_prp should be
+ * valid as supervision frame always will have protocol
+ * header info.
+ */
+ if (frame->skb_hsr)
+ skb = frame->skb_hsr;
+ else if (frame->skb_prp)
+ skb = frame->skb_prp;
+ if (!skb)
+ return;
+
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+ /* Leave the ethernet header. */
+ skb_pull(skb, sizeof(struct ethhdr));
+
+ /* And leave the HSR tag. */
+ if (ethhdr->h_proto == htons(ETH_P_HSR))
+ skb_pull(skb, sizeof(struct hsr_tag));
+
+ /* And leave the HSR sup tag. */
+ skb_pull(skb, sizeof(struct hsr_sup_tag));
+
+ hsr_sp = (struct hsr_sup_payload *)skb->data;
+
+ /* Merge node_curr (registered on macaddress_B) into node_real */
+ node_db = &port_rcv->hsr->node_db;
+ node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
+ if (!node_real)
+ /* No frame received from AddrA of this node yet */
+ node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
+ HSR_SEQNR_START - 1, true,
+ port_rcv->type);
+ if (!node_real)
+ goto done; /* No mem */
+ if (node_real == node_curr)
+ /* Node has already been merged */
+ goto done;
+
+ ether_addr_copy(node_real->macaddress_B, ethhdr->h_source);
+ spin_lock_bh(&node_real->seq_out_lock);
+ for (i = 0; i < HSR_PT_PORTS; i++) {
+ if (!node_curr->time_in_stale[i] &&
+ time_after(node_curr->time_in[i], node_real->time_in[i])) {
+ node_real->time_in[i] = node_curr->time_in[i];
+ node_real->time_in_stale[i] =
+ node_curr->time_in_stale[i];
+ }
+ if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
+ node_real->seq_out[i] = node_curr->seq_out[i];
+ }
+ spin_unlock_bh(&node_real->seq_out_lock);
+ node_real->addr_B_port = port_rcv->type;
+
+ spin_lock_bh(&hsr->list_lock);
+ list_del_rcu(&node_curr->mac_list);
+ spin_unlock_bh(&hsr->list_lock);
+ kfree_rcu(node_curr, rcu_head);
+
+done:
+ /* PRP uses v0 header */
+ if (ethhdr->h_proto == htons(ETH_P_HSR))
+ skb_push(skb, sizeof(struct hsrv1_ethhdr_sp));
+ else
+ skb_push(skb, sizeof(struct hsrv0_ethhdr_sp));
+}
+
+/* 'skb' is a frame meant for this host, that is to be passed to upper layers.
+ *
+ * If the frame was sent by a node's B interface, replace the source
+ * address with that node's "official" address (macaddress_A) so that upper
+ * layers recognize where it came from.
+ */
+void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
+{
+ if (!skb_mac_header_was_set(skb)) {
+ WARN_ONCE(1, "%s: Mac header not set\n", __func__);
+ return;
+ }
+
+ memcpy(&eth_hdr(skb)->h_source, node->macaddress_A, ETH_ALEN);
+}
+
+/* 'skb' is a frame meant for another host.
+ * 'port' is the outgoing interface
+ *
+ * Substitute the target (dest) MAC address if necessary, so the it matches the
+ * recipient interface MAC address, regardless of whether that is the
+ * recipient's A or B interface.
+ * This is needed to keep the packets flowing through switches that learn on
+ * which "side" the different interfaces are.
+ */
+void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
+ struct hsr_port *port)
+{
+ struct hsr_node *node_dst;
+
+ if (!skb_mac_header_was_set(skb)) {
+ WARN_ONCE(1, "%s: Mac header not set\n", __func__);
+ return;
+ }
+
+ if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
+ return;
+
+ node_dst = find_node_by_addr_A(&port->hsr->node_db,
+ eth_hdr(skb)->h_dest);
+ if (!node_dst) {
+ if (port->hsr->prot_version != PRP_V1 && net_ratelimit())
+ netdev_err(skb->dev, "%s: Unknown node\n", __func__);
+ return;
+ }
+ if (port->type != node_dst->addr_B_port)
+ return;
+
+ if (is_valid_ether_addr(node_dst->macaddress_B))
+ ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
+}
+
+void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
+ u16 sequence_nr)
+{
+ /* Don't register incoming frames without a valid sequence number. This
+ * ensures entries of restarted nodes gets pruned so that they can
+ * re-register and resume communications.
+ */
+ if (seq_nr_before(sequence_nr, node->seq_out[port->type]))
+ return;
+
+ node->time_in[port->type] = jiffies;
+ node->time_in_stale[port->type] = false;
+}
+
+/* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
+ * ethhdr->h_source address and skb->mac_header set.
+ *
+ * Return:
+ * 1 if frame can be shown to have been sent recently on this interface,
+ * 0 otherwise, or
+ * negative error code on error
+ */
+int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
+ u16 sequence_nr)
+{
+ spin_lock_bh(&node->seq_out_lock);
+ if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) &&
+ time_is_after_jiffies(node->time_out[port->type] +
+ msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) {
+ spin_unlock_bh(&node->seq_out_lock);
+ return 1;
+ }
+
+ node->time_out[port->type] = jiffies;
+ node->seq_out[port->type] = sequence_nr;
+ spin_unlock_bh(&node->seq_out_lock);
+ return 0;
+}
+
+static struct hsr_port *get_late_port(struct hsr_priv *hsr,
+ struct hsr_node *node)
+{
+ if (node->time_in_stale[HSR_PT_SLAVE_A])
+ return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+ if (node->time_in_stale[HSR_PT_SLAVE_B])
+ return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+
+ if (time_after(node->time_in[HSR_PT_SLAVE_B],
+ node->time_in[HSR_PT_SLAVE_A] +
+ msecs_to_jiffies(MAX_SLAVE_DIFF)))
+ return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+ if (time_after(node->time_in[HSR_PT_SLAVE_A],
+ node->time_in[HSR_PT_SLAVE_B] +
+ msecs_to_jiffies(MAX_SLAVE_DIFF)))
+ return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+
+ return NULL;
+}
+
+/* Remove stale sequence_nr records. Called by timer every
+ * HSR_LIFE_CHECK_INTERVAL (two seconds or so).
+ */
+void hsr_prune_nodes(struct timer_list *t)
+{
+ struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
+ struct hsr_node *node;
+ struct hsr_node *tmp;
+ struct hsr_port *port;
+ unsigned long timestamp;
+ unsigned long time_a, time_b;
+
+ spin_lock_bh(&hsr->list_lock);
+ list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
+ /* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
+ * nor time_in[HSR_PT_SLAVE_B], will ever be updated for
+ * the master port. Thus the master node will be repeatedly
+ * pruned leading to packet loss.
+ */
+ if (hsr_addr_is_self(hsr, node->macaddress_A))
+ continue;
+
+ /* Shorthand */
+ time_a = node->time_in[HSR_PT_SLAVE_A];
+ time_b = node->time_in[HSR_PT_SLAVE_B];
+
+ /* Check for timestamps old enough to risk wrap-around */
+ if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
+ node->time_in_stale[HSR_PT_SLAVE_A] = true;
+ if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
+ node->time_in_stale[HSR_PT_SLAVE_B] = true;
+
+ /* Get age of newest frame from node.
+ * At least one time_in is OK here; nodes get pruned long
+ * before both time_ins can get stale
+ */
+ timestamp = time_a;
+ if (node->time_in_stale[HSR_PT_SLAVE_A] ||
+ (!node->time_in_stale[HSR_PT_SLAVE_B] &&
+ time_after(time_b, time_a)))
+ timestamp = time_b;
+
+ /* Warn of ring error only as long as we get frames at all */
+ if (time_is_after_jiffies(timestamp +
+ msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
+ rcu_read_lock();
+ port = get_late_port(hsr, node);
+ if (port)
+ hsr_nl_ringerror(hsr, node->macaddress_A, port);
+ rcu_read_unlock();
+ }
+
+ /* Prune old entries */
+ if (time_is_before_jiffies(timestamp +
+ msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
+ hsr_nl_nodedown(hsr, node->macaddress_A);
+ list_del_rcu(&node->mac_list);
+ /* Note that we need to free this entry later: */
+ kfree_rcu(node, rcu_head);
+ }
+ }
+ spin_unlock_bh(&hsr->list_lock);
+
+ /* Restart timer */
+ mod_timer(&hsr->prune_timer,
+ jiffies + msecs_to_jiffies(PRUNE_PERIOD));
+}
+
+void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
+ unsigned char addr[ETH_ALEN])
+{
+ struct hsr_node *node;
+
+ if (!_pos) {
+ node = list_first_or_null_rcu(&hsr->node_db,
+ struct hsr_node, mac_list);
+ if (node)
+ ether_addr_copy(addr, node->macaddress_A);
+ return node;
+ }
+
+ node = _pos;
+ list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
+ ether_addr_copy(addr, node->macaddress_A);
+ return node;
+ }
+
+ return NULL;
+}
+
+int hsr_get_node_data(struct hsr_priv *hsr,
+ const unsigned char *addr,
+ unsigned char addr_b[ETH_ALEN],
+ unsigned int *addr_b_ifindex,
+ int *if1_age,
+ u16 *if1_seq,
+ int *if2_age,
+ u16 *if2_seq)
+{
+ struct hsr_node *node;
+ struct hsr_port *port;
+ unsigned long tdiff;
+
+ node = find_node_by_addr_A(&hsr->node_db, addr);
+ if (!node)
+ return -ENOENT;
+
+ ether_addr_copy(addr_b, node->macaddress_B);
+
+ tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
+ if (node->time_in_stale[HSR_PT_SLAVE_A])
+ *if1_age = INT_MAX;
+#if HZ <= MSEC_PER_SEC
+ else if (tdiff > msecs_to_jiffies(INT_MAX))
+ *if1_age = INT_MAX;
+#endif
+ else
+ *if1_age = jiffies_to_msecs(tdiff);
+
+ tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B];
+ if (node->time_in_stale[HSR_PT_SLAVE_B])
+ *if2_age = INT_MAX;
+#if HZ <= MSEC_PER_SEC
+ else if (tdiff > msecs_to_jiffies(INT_MAX))
+ *if2_age = INT_MAX;
+#endif
+ else
+ *if2_age = jiffies_to_msecs(tdiff);
+
+ /* Present sequence numbers as if they were incoming on interface */
+ *if1_seq = node->seq_out[HSR_PT_SLAVE_B];
+ *if2_seq = node->seq_out[HSR_PT_SLAVE_A];
+
+ if (node->addr_B_port != HSR_PT_NONE) {
+ port = hsr_port_get_hsr(hsr, node->addr_B_port);
+ *addr_b_ifindex = port->dev->ifindex;
+ } else {
+ *addr_b_ifindex = -1;
+ }
+
+ return 0;
+}
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
new file mode 100644
index 000000000..5a771cb3f
--- /dev/null
+++ b/net/hsr/hsr_framereg.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * Author(s):
+ * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * include file for HSR and PRP.
+ */
+
+#ifndef __HSR_FRAMEREG_H
+#define __HSR_FRAMEREG_H
+
+#include "hsr_main.h"
+
+struct hsr_node;
+
+struct hsr_frame_info {
+ struct sk_buff *skb_std;
+ struct sk_buff *skb_hsr;
+ struct sk_buff *skb_prp;
+ struct hsr_port *port_rcv;
+ struct hsr_node *node_src;
+ u16 sequence_nr;
+ bool is_supervision;
+ bool is_vlan;
+ bool is_local_dest;
+ bool is_local_exclusive;
+ bool is_from_san;
+};
+
+void hsr_del_self_node(struct hsr_priv *hsr);
+void hsr_del_nodes(struct list_head *node_db);
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
+ struct sk_buff *skb, bool is_sup,
+ enum hsr_port_type rx_port);
+void hsr_handle_sup_frame(struct hsr_frame_info *frame);
+bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr);
+
+void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb);
+void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
+ struct hsr_port *port);
+
+void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
+ u16 sequence_nr);
+int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
+ u16 sequence_nr);
+
+void hsr_prune_nodes(struct timer_list *t);
+
+int hsr_create_self_node(struct hsr_priv *hsr,
+ unsigned char addr_a[ETH_ALEN],
+ unsigned char addr_b[ETH_ALEN]);
+
+void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
+ unsigned char addr[ETH_ALEN]);
+
+int hsr_get_node_data(struct hsr_priv *hsr,
+ const unsigned char *addr,
+ unsigned char addr_b[ETH_ALEN],
+ unsigned int *addr_b_ifindex,
+ int *if1_age,
+ u16 *if1_seq,
+ int *if2_age,
+ u16 *if2_seq);
+
+void prp_handle_san_frame(bool san, enum hsr_port_type port,
+ struct hsr_node *node);
+void prp_update_san_info(struct hsr_node *node, bool is_sup);
+
+struct hsr_node {
+ struct list_head mac_list;
+ /* Protect R/W access to seq_out */
+ spinlock_t seq_out_lock;
+ unsigned char macaddress_A[ETH_ALEN];
+ unsigned char macaddress_B[ETH_ALEN];
+ /* Local slave through which AddrB frames are received from this node */
+ enum hsr_port_type addr_B_port;
+ unsigned long time_in[HSR_PT_PORTS];
+ bool time_in_stale[HSR_PT_PORTS];
+ unsigned long time_out[HSR_PT_PORTS];
+ /* if the node is a SAN */
+ bool san_a;
+ bool san_b;
+ u16 seq_out[HSR_PT_PORTS];
+ struct rcu_head rcu_head;
+};
+
+#endif /* __HSR_FRAMEREG_H */
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
new file mode 100644
index 000000000..2fd1976e5
--- /dev/null
+++ b/net/hsr/hsr_main.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * Author(s):
+ * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * Event handling for HSR and PRP devices.
+ */
+
+#include <linux/netdevice.h>
+#include <net/rtnetlink.h>
+#include <linux/rculist.h>
+#include <linux/timer.h>
+#include <linux/etherdevice.h>
+#include "hsr_main.h"
+#include "hsr_device.h"
+#include "hsr_netlink.h"
+#include "hsr_framereg.h"
+#include "hsr_slave.h"
+
+static bool hsr_slave_empty(struct hsr_priv *hsr)
+{
+ struct hsr_port *port;
+
+ hsr_for_each_port(hsr, port)
+ if (port->type != HSR_PT_MASTER)
+ return false;
+ return true;
+}
+
+static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct hsr_port *port, *master;
+ struct net_device *dev;
+ struct hsr_priv *hsr;
+ LIST_HEAD(list_kill);
+ int mtu_max;
+ int res;
+
+ dev = netdev_notifier_info_to_dev(ptr);
+ port = hsr_port_get_rtnl(dev);
+ if (!port) {
+ if (!is_hsr_master(dev))
+ return NOTIFY_DONE; /* Not an HSR device */
+ hsr = netdev_priv(dev);
+ port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ if (!port) {
+ /* Resend of notification concerning removed device? */
+ return NOTIFY_DONE;
+ }
+ } else {
+ hsr = port->hsr;
+ }
+
+ switch (event) {
+ case NETDEV_UP: /* Administrative state DOWN */
+ case NETDEV_DOWN: /* Administrative state UP */
+ case NETDEV_CHANGE: /* Link (carrier) state changes */
+ hsr_check_carrier_and_operstate(hsr);
+ break;
+ case NETDEV_CHANGENAME:
+ if (is_hsr_master(dev))
+ hsr_debugfs_rename(dev);
+ break;
+ case NETDEV_CHANGEADDR:
+ if (port->type == HSR_PT_MASTER) {
+ /* This should not happen since there's no
+ * ndo_set_mac_address() for HSR devices - i.e. not
+ * supported.
+ */
+ break;
+ }
+
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+
+ if (port->type == HSR_PT_SLAVE_A) {
+ ether_addr_copy(master->dev->dev_addr, dev->dev_addr);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR,
+ master->dev);
+ }
+
+ /* Make sure we recognize frames from ourselves in hsr_rcv() */
+ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+ res = hsr_create_self_node(hsr,
+ master->dev->dev_addr,
+ port ?
+ port->dev->dev_addr :
+ master->dev->dev_addr);
+ if (res)
+ netdev_warn(master->dev,
+ "Could not update HSR node address.\n");
+ break;
+ case NETDEV_CHANGEMTU:
+ if (port->type == HSR_PT_MASTER)
+ break; /* Handled in ndo_change_mtu() */
+ mtu_max = hsr_get_max_mtu(port->hsr);
+ master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
+ master->dev->mtu = mtu_max;
+ break;
+ case NETDEV_UNREGISTER:
+ if (!is_hsr_master(dev)) {
+ master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
+ hsr_del_port(port);
+ if (hsr_slave_empty(master->hsr)) {
+ const struct rtnl_link_ops *ops;
+
+ ops = master->dev->rtnl_link_ops;
+ ops->dellink(master->dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ }
+ }
+ break;
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* HSR works only on Ethernet devices. Refuse slave to change
+ * its type.
+ */
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
+{
+ struct hsr_port *port;
+
+ hsr_for_each_port(hsr, port)
+ if (port->type == pt)
+ return port;
+ return NULL;
+}
+
+static struct notifier_block hsr_nb = {
+ .notifier_call = hsr_netdev_notify, /* Slave event notifications */
+};
+
+static int __init hsr_init(void)
+{
+ int res;
+
+ BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN);
+
+ register_netdevice_notifier(&hsr_nb);
+ res = hsr_netlink_init();
+
+ return res;
+}
+
+static void __exit hsr_exit(void)
+{
+ hsr_netlink_exit();
+ hsr_debugfs_remove_root();
+ unregister_netdevice_notifier(&hsr_nb);
+}
+
+module_init(hsr_init);
+module_exit(hsr_exit);
+MODULE_LICENSE("GPL");
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
new file mode 100644
index 000000000..9a25a5d34
--- /dev/null
+++ b/net/hsr/hsr_main.h
@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * Author(s):
+ * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * include file for HSR and PRP.
+ */
+
+#ifndef __HSR_PRIVATE_H
+#define __HSR_PRIVATE_H
+
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/if_vlan.h>
+
+/* Time constants as specified in the HSR specification (IEC-62439-3 2010)
+ * Table 8.
+ * All values in milliseconds.
+ */
+#define HSR_LIFE_CHECK_INTERVAL 2000 /* ms */
+#define HSR_NODE_FORGET_TIME 60000 /* ms */
+#define HSR_ANNOUNCE_INTERVAL 100 /* ms */
+#define HSR_ENTRY_FORGET_TIME 400 /* ms */
+
+/* By how much may slave1 and slave2 timestamps of latest received frame from
+ * each node differ before we notify of communication problem?
+ */
+#define MAX_SLAVE_DIFF 3000 /* ms */
+#define HSR_SEQNR_START (USHRT_MAX - 1024)
+#define HSR_SUP_SEQNR_START (HSR_SEQNR_START / 2)
+
+/* How often shall we check for broken ring and remove node entries older than
+ * HSR_NODE_FORGET_TIME?
+ */
+#define PRUNE_PERIOD 3000 /* ms */
+
+#define HSR_TLV_ANNOUNCE 22
+#define HSR_TLV_LIFE_CHECK 23
+/* PRP V1 life check for Duplicate discard */
+#define PRP_TLV_LIFE_CHECK_DD 20
+/* PRP V1 life check for Duplicate Accept */
+#define PRP_TLV_LIFE_CHECK_DA 21
+
+/* HSR Tag.
+ * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
+ * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
+ * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
+ * encapsulated protocol } instead.
+ *
+ * Field names as defined in the IEC:2010 standard for HSR.
+ */
+struct hsr_tag {
+ __be16 path_and_LSDU_size;
+ __be16 sequence_nr;
+ __be16 encap_proto;
+} __packed;
+
+#define HSR_HLEN 6
+
+#define HSR_V1_SUP_LSDUSIZE 52
+
+/* The helper functions below assumes that 'path' occupies the 4 most
+ * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or
+ * equivalently, the 4 most significant bits of HSR tag byte 14).
+ *
+ * This is unclear in the IEC specification; its definition of MAC addresses
+ * indicates the spec is written with the least significant bit first (to the
+ * left). This, however, would mean that the LSDU field would be split in two
+ * with the path field in-between, which seems strange. I'm guessing the MAC
+ * address definition is in error.
+ */
+
+static inline void set_hsr_tag_path(struct hsr_tag *ht, u16 path)
+{
+ ht->path_and_LSDU_size =
+ htons((ntohs(ht->path_and_LSDU_size) & 0x0FFF) | (path << 12));
+}
+
+static inline void set_hsr_tag_LSDU_size(struct hsr_tag *ht, u16 LSDU_size)
+{
+ ht->path_and_LSDU_size = htons((ntohs(ht->path_and_LSDU_size) &
+ 0xF000) | (LSDU_size & 0x0FFF));
+}
+
+struct hsr_ethhdr {
+ struct ethhdr ethhdr;
+ struct hsr_tag hsr_tag;
+} __packed;
+
+struct hsr_vlan_ethhdr {
+ struct vlan_ethhdr vlanhdr;
+ struct hsr_tag hsr_tag;
+} __packed;
+
+/* HSR/PRP Supervision Frame data types.
+ * Field names as defined in the IEC:2010 standard for HSR.
+ */
+struct hsr_sup_tag {
+ __be16 path_and_HSR_ver;
+ __be16 sequence_nr;
+ __u8 HSR_TLV_type;
+ __u8 HSR_TLV_length;
+} __packed;
+
+struct hsr_sup_payload {
+ unsigned char macaddress_A[ETH_ALEN];
+} __packed;
+
+static inline void set_hsr_stag_path(struct hsr_sup_tag *hst, u16 path)
+{
+ set_hsr_tag_path((struct hsr_tag *)hst, path);
+}
+
+static inline void set_hsr_stag_HSR_ver(struct hsr_sup_tag *hst, u16 HSR_ver)
+{
+ set_hsr_tag_LSDU_size((struct hsr_tag *)hst, HSR_ver);
+}
+
+struct hsrv0_ethhdr_sp {
+ struct ethhdr ethhdr;
+ struct hsr_sup_tag hsr_sup;
+} __packed;
+
+struct hsrv1_ethhdr_sp {
+ struct ethhdr ethhdr;
+ struct hsr_tag hsr;
+ struct hsr_sup_tag hsr_sup;
+} __packed;
+
+enum hsr_port_type {
+ HSR_PT_NONE = 0, /* Must be 0, used by framereg */
+ HSR_PT_SLAVE_A,
+ HSR_PT_SLAVE_B,
+ HSR_PT_INTERLINK,
+ HSR_PT_MASTER,
+ HSR_PT_PORTS, /* This must be the last item in the enum */
+};
+
+/* PRP Redunancy Control Trailor (RCT).
+ * As defined in IEC-62439-4:2012, the PRP RCT is really { sequence Nr,
+ * Lan indentifier (LanId), LSDU_size and PRP_suffix = 0x88FB }.
+ *
+ * Field names as defined in the IEC:2012 standard for PRP.
+ */
+struct prp_rct {
+ __be16 sequence_nr;
+ __be16 lan_id_and_LSDU_size;
+ __be16 PRP_suffix;
+} __packed;
+
+static inline u16 get_prp_LSDU_size(struct prp_rct *rct)
+{
+ return ntohs(rct->lan_id_and_LSDU_size) & 0x0FFF;
+}
+
+static inline void set_prp_lan_id(struct prp_rct *rct, u16 lan_id)
+{
+ rct->lan_id_and_LSDU_size = htons((ntohs(rct->lan_id_and_LSDU_size) &
+ 0x0FFF) | (lan_id << 12));
+}
+static inline void set_prp_LSDU_size(struct prp_rct *rct, u16 LSDU_size)
+{
+ rct->lan_id_and_LSDU_size = htons((ntohs(rct->lan_id_and_LSDU_size) &
+ 0xF000) | (LSDU_size & 0x0FFF));
+}
+
+struct hsr_port {
+ struct list_head port_list;
+ struct net_device *dev;
+ struct hsr_priv *hsr;
+ enum hsr_port_type type;
+};
+
+/* used by driver internally to differentiate various protocols */
+enum hsr_version {
+ HSR_V0 = 0,
+ HSR_V1,
+ PRP_V1,
+};
+
+struct hsr_frame_info;
+struct hsr_node;
+
+struct hsr_proto_ops {
+ /* format and send supervision frame */
+ void (*send_sv_frame)(struct hsr_port *port, unsigned long *interval);
+ void (*handle_san_frame)(bool san, enum hsr_port_type port,
+ struct hsr_node *node);
+ bool (*drop_frame)(struct hsr_frame_info *frame, struct hsr_port *port);
+ struct sk_buff * (*get_untagged_frame)(struct hsr_frame_info *frame,
+ struct hsr_port *port);
+ struct sk_buff * (*create_tagged_frame)(struct hsr_frame_info *frame,
+ struct hsr_port *port);
+ int (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
+ struct hsr_frame_info *frame);
+ bool (*invalid_dan_ingress_frame)(__be16 protocol);
+ void (*update_san_info)(struct hsr_node *node, bool is_sup);
+};
+
+struct hsr_priv {
+ struct rcu_head rcu_head;
+ struct list_head ports;
+ struct list_head node_db; /* Known HSR nodes */
+ struct list_head self_node_db; /* MACs of slaves */
+ struct timer_list announce_timer; /* Supervision frame dispatch */
+ struct timer_list prune_timer;
+ int announce_count;
+ u16 sequence_nr;
+ u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */
+ enum hsr_version prot_version; /* Indicate if HSRv0, HSRv1 or PRPv1 */
+ spinlock_t seqnr_lock; /* locking for sequence_nr */
+ spinlock_t list_lock; /* locking for node list */
+ struct hsr_proto_ops *proto_ops;
+#define PRP_LAN_ID 0x5 /* 0x1010 for A and 0x1011 for B. Bit 0 is set
+ * based on SLAVE_A or SLAVE_B
+ */
+ u8 net_id; /* for PRP, it occupies most significant 3 bits
+ * of lan_id
+ */
+ unsigned char sup_multicast_addr[ETH_ALEN];
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *node_tbl_root;
+#endif
+};
+
+#define hsr_for_each_port(hsr, port) \
+ list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
+
+struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
+
+/* Caller must ensure skb is a valid HSR frame */
+static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
+{
+ struct hsr_ethhdr *hsr_ethhdr;
+
+ hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
+ return ntohs(hsr_ethhdr->hsr_tag.sequence_nr);
+}
+
+static inline struct prp_rct *skb_get_PRP_rct(struct sk_buff *skb)
+{
+ unsigned char *tail = skb_tail_pointer(skb) - HSR_HLEN;
+
+ struct prp_rct *rct = (struct prp_rct *)tail;
+
+ if (rct->PRP_suffix == htons(ETH_P_PRP))
+ return rct;
+
+ return NULL;
+}
+
+/* Assume caller has confirmed this skb is PRP suffixed */
+static inline u16 prp_get_skb_sequence_nr(struct prp_rct *rct)
+{
+ return ntohs(rct->sequence_nr);
+}
+
+static inline u16 get_prp_lan_id(struct prp_rct *rct)
+{
+ return ntohs(rct->lan_id_and_LSDU_size) >> 12;
+}
+
+/* assume there is a valid rct */
+static inline bool prp_check_lsdu_size(struct sk_buff *skb,
+ struct prp_rct *rct,
+ bool is_sup)
+{
+ struct ethhdr *ethhdr;
+ int expected_lsdu_size;
+
+ if (is_sup) {
+ expected_lsdu_size = HSR_V1_SUP_LSDUSIZE;
+ } else {
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+ expected_lsdu_size = skb->len - 14;
+ if (ethhdr->h_proto == htons(ETH_P_8021Q))
+ expected_lsdu_size -= 4;
+ }
+
+ return (expected_lsdu_size == get_prp_LSDU_size(rct));
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+void hsr_debugfs_rename(struct net_device *dev);
+void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
+void hsr_debugfs_term(struct hsr_priv *priv);
+void hsr_debugfs_create_root(void);
+void hsr_debugfs_remove_root(void);
+#else
+static inline void hsr_debugfs_rename(struct net_device *dev)
+{
+}
+static inline void hsr_debugfs_init(struct hsr_priv *priv,
+ struct net_device *hsr_dev)
+{}
+static inline void hsr_debugfs_term(struct hsr_priv *priv)
+{}
+static inline void hsr_debugfs_create_root(void)
+{}
+static inline void hsr_debugfs_remove_root(void)
+{}
+#endif
+
+#endif /* __HSR_PRIVATE_H */
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
new file mode 100644
index 000000000..f3c8f91db
--- /dev/null
+++ b/net/hsr/hsr_netlink.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * Author(s):
+ * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * Routines for handling Netlink messages for HSR and PRP.
+ */
+
+#include "hsr_netlink.h"
+#include <linux/kernel.h>
+#include <net/rtnetlink.h>
+#include <net/genetlink.h>
+#include "hsr_main.h"
+#include "hsr_device.h"
+#include "hsr_framereg.h"
+
+static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
+ [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
+ [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
+ [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
+ [IFLA_HSR_VERSION] = { .type = NLA_U8 },
+ [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
+ [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
+ [IFLA_HSR_PROTOCOL] = { .type = NLA_U8 },
+};
+
+/* Here, it seems a netdevice has already been allocated for us, and the
+ * hsr_dev_setup routine has been executed. Nice!
+ */
+static int hsr_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ enum hsr_version proto_version;
+ unsigned char multicast_spec;
+ u8 proto = HSR_PROTOCOL_HSR;
+ struct net_device *link[2];
+
+ if (!data) {
+ NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
+ return -EINVAL;
+ }
+ if (!data[IFLA_HSR_SLAVE1]) {
+ NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
+ return -EINVAL;
+ }
+ link[0] = __dev_get_by_index(src_net,
+ nla_get_u32(data[IFLA_HSR_SLAVE1]));
+ if (!link[0]) {
+ NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
+ return -EINVAL;
+ }
+ if (!data[IFLA_HSR_SLAVE2]) {
+ NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
+ return -EINVAL;
+ }
+ link[1] = __dev_get_by_index(src_net,
+ nla_get_u32(data[IFLA_HSR_SLAVE2]));
+ if (!link[1]) {
+ NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
+ return -EINVAL;
+ }
+
+ if (link[0] == link[1]) {
+ NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
+ return -EINVAL;
+ }
+
+ if (!data[IFLA_HSR_MULTICAST_SPEC])
+ multicast_spec = 0;
+ else
+ multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
+
+ if (data[IFLA_HSR_PROTOCOL])
+ proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
+
+ if (proto >= HSR_PROTOCOL_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol");
+ return -EINVAL;
+ }
+
+ if (!data[IFLA_HSR_VERSION]) {
+ proto_version = HSR_V0;
+ } else {
+ if (proto == HSR_PROTOCOL_PRP) {
+ NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported");
+ return -EINVAL;
+ }
+
+ proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
+ if (proto_version > HSR_V1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only HSR version 0/1 supported");
+ return -EINVAL;
+ }
+ }
+
+ if (proto == HSR_PROTOCOL_PRP)
+ proto_version = PRP_V1;
+
+ return hsr_dev_finalize(dev, link, multicast_spec, proto_version, extack);
+}
+
+static void hsr_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct hsr_priv *hsr = netdev_priv(dev);
+
+ del_timer_sync(&hsr->prune_timer);
+ del_timer_sync(&hsr->announce_timer);
+
+ hsr_debugfs_term(hsr);
+ hsr_del_ports(hsr);
+
+ hsr_del_self_node(hsr);
+ hsr_del_nodes(&hsr->node_db);
+
+ unregister_netdevice_queue(dev, head);
+}
+
+static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct hsr_priv *hsr = netdev_priv(dev);
+ u8 proto = HSR_PROTOCOL_HSR;
+ struct hsr_port *port;
+
+ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+ if (port) {
+ if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
+ goto nla_put_failure;
+ }
+
+ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+ if (port) {
+ if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
+ goto nla_put_failure;
+ }
+
+ if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
+ hsr->sup_multicast_addr) ||
+ nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
+ goto nla_put_failure;
+ if (hsr->prot_version == PRP_V1)
+ proto = HSR_PROTOCOL_PRP;
+ if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops hsr_link_ops __read_mostly = {
+ .kind = "hsr",
+ .maxtype = IFLA_HSR_MAX,
+ .policy = hsr_policy,
+ .priv_size = sizeof(struct hsr_priv),
+ .setup = hsr_dev_setup,
+ .newlink = hsr_newlink,
+ .dellink = hsr_dellink,
+ .fill_info = hsr_fill_info,
+};
+
+/* attribute policy */
+static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
+ [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
+ [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
+ [HSR_A_IFINDEX] = { .type = NLA_U32 },
+ [HSR_A_IF1_AGE] = { .type = NLA_U32 },
+ [HSR_A_IF2_AGE] = { .type = NLA_U32 },
+ [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
+ [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
+};
+
+static struct genl_family hsr_genl_family;
+
+static const struct genl_multicast_group hsr_mcgrps[] = {
+ { .name = "hsr-network", },
+};
+
+/* This is called if for some node with MAC address addr, we only get frames
+ * over one of the slave interfaces. This would indicate an open network ring
+ * (i.e. a link has failed somewhere).
+ */
+void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
+ struct hsr_port *port)
+{
+ struct sk_buff *skb;
+ void *msg_head;
+ struct hsr_port *master;
+ int res;
+
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (!skb)
+ goto fail;
+
+ msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
+ HSR_C_RING_ERROR);
+ if (!msg_head)
+ goto nla_put_failure;
+
+ res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
+ if (res < 0)
+ goto nla_put_failure;
+
+ res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
+ if (res < 0)
+ goto nla_put_failure;
+
+ genlmsg_end(skb, msg_head);
+ genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
+
+ return;
+
+nla_put_failure:
+ kfree_skb(skb);
+
+fail:
+ rcu_read_lock();
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ netdev_warn(master->dev, "Could not send HSR ring error message\n");
+ rcu_read_unlock();
+}
+
+/* This is called when we haven't heard from the node with MAC address addr for
+ * some time (just before the node is removed from the node table/list).
+ */
+void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+ void *msg_head;
+ struct hsr_port *master;
+ int res;
+
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (!skb)
+ goto fail;
+
+ msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
+ if (!msg_head)
+ goto nla_put_failure;
+
+ res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
+ if (res < 0)
+ goto nla_put_failure;
+
+ genlmsg_end(skb, msg_head);
+ genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
+
+ return;
+
+nla_put_failure:
+ kfree_skb(skb);
+
+fail:
+ rcu_read_lock();
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ netdev_warn(master->dev, "Could not send HSR node down\n");
+ rcu_read_unlock();
+}
+
+/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
+ * about the status of a specific node in the network, defined by its MAC
+ * address.
+ *
+ * Input: hsr ifindex, node mac address
+ * Output: hsr ifindex, node mac address (copied from request),
+ * age of latest frame from node over slave 1, slave 2 [ms]
+ */
+static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
+{
+ /* For receiving */
+ struct nlattr *na;
+ struct net_device *hsr_dev;
+
+ /* For sending */
+ struct sk_buff *skb_out;
+ void *msg_head;
+ struct hsr_priv *hsr;
+ struct hsr_port *port;
+ unsigned char hsr_node_addr_b[ETH_ALEN];
+ int hsr_node_if1_age;
+ u16 hsr_node_if1_seq;
+ int hsr_node_if2_age;
+ u16 hsr_node_if2_seq;
+ int addr_b_ifindex;
+ int res;
+
+ if (!info)
+ goto invalid;
+
+ na = info->attrs[HSR_A_IFINDEX];
+ if (!na)
+ goto invalid;
+ na = info->attrs[HSR_A_NODE_ADDR];
+ if (!na)
+ goto invalid;
+
+ rcu_read_lock();
+ hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
+ nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+ if (!hsr_dev)
+ goto rcu_unlock;
+ if (!is_hsr_master(hsr_dev))
+ goto rcu_unlock;
+
+ /* Send reply */
+ skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (!skb_out) {
+ res = -ENOMEM;
+ goto fail;
+ }
+
+ msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
+ info->snd_seq, &hsr_genl_family, 0,
+ HSR_C_SET_NODE_STATUS);
+ if (!msg_head) {
+ res = -ENOMEM;
+ goto nla_put_failure;
+ }
+
+ res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
+ if (res < 0)
+ goto nla_put_failure;
+
+ hsr = netdev_priv(hsr_dev);
+ res = hsr_get_node_data(hsr,
+ (unsigned char *)
+ nla_data(info->attrs[HSR_A_NODE_ADDR]),
+ hsr_node_addr_b,
+ &addr_b_ifindex,
+ &hsr_node_if1_age,
+ &hsr_node_if1_seq,
+ &hsr_node_if2_age,
+ &hsr_node_if2_seq);
+ if (res < 0)
+ goto nla_put_failure;
+
+ res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
+ nla_data(info->attrs[HSR_A_NODE_ADDR]));
+ if (res < 0)
+ goto nla_put_failure;
+
+ if (addr_b_ifindex > -1) {
+ res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
+ hsr_node_addr_b);
+ if (res < 0)
+ goto nla_put_failure;
+
+ res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
+ addr_b_ifindex);
+ if (res < 0)
+ goto nla_put_failure;
+ }
+
+ res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
+ if (res < 0)
+ goto nla_put_failure;
+ res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
+ if (res < 0)
+ goto nla_put_failure;
+ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+ if (port)
+ res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
+ port->dev->ifindex);
+ if (res < 0)
+ goto nla_put_failure;
+
+ res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
+ if (res < 0)
+ goto nla_put_failure;
+ res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
+ if (res < 0)
+ goto nla_put_failure;
+ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+ if (port)
+ res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
+ port->dev->ifindex);
+ if (res < 0)
+ goto nla_put_failure;
+
+ rcu_read_unlock();
+
+ genlmsg_end(skb_out, msg_head);
+ genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
+
+ return 0;
+
+rcu_unlock:
+ rcu_read_unlock();
+invalid:
+ netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
+ return 0;
+
+nla_put_failure:
+ kfree_skb(skb_out);
+ /* Fall through */
+
+fail:
+ rcu_read_unlock();
+ return res;
+}
+
+/* Get a list of MacAddressA of all nodes known to this node (including self).
+ */
+static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
+{
+ unsigned char addr[ETH_ALEN];
+ struct net_device *hsr_dev;
+ struct sk_buff *skb_out;
+ struct hsr_priv *hsr;
+ bool restart = false;
+ struct nlattr *na;
+ void *pos = NULL;
+ void *msg_head;
+ int res;
+
+ if (!info)
+ goto invalid;
+
+ na = info->attrs[HSR_A_IFINDEX];
+ if (!na)
+ goto invalid;
+
+ rcu_read_lock();
+ hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
+ nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+ if (!hsr_dev)
+ goto rcu_unlock;
+ if (!is_hsr_master(hsr_dev))
+ goto rcu_unlock;
+
+restart:
+ /* Send reply */
+ skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb_out) {
+ res = -ENOMEM;
+ goto fail;
+ }
+
+ msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
+ info->snd_seq, &hsr_genl_family, 0,
+ HSR_C_SET_NODE_LIST);
+ if (!msg_head) {
+ res = -ENOMEM;
+ goto nla_put_failure;
+ }
+
+ if (!restart) {
+ res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
+ if (res < 0)
+ goto nla_put_failure;
+ }
+
+ hsr = netdev_priv(hsr_dev);
+
+ if (!pos)
+ pos = hsr_get_next_node(hsr, NULL, addr);
+ while (pos) {
+ res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
+ if (res < 0) {
+ if (res == -EMSGSIZE) {
+ genlmsg_end(skb_out, msg_head);
+ genlmsg_unicast(genl_info_net(info), skb_out,
+ info->snd_portid);
+ restart = true;
+ goto restart;
+ }
+ goto nla_put_failure;
+ }
+ pos = hsr_get_next_node(hsr, pos, addr);
+ }
+ rcu_read_unlock();
+
+ genlmsg_end(skb_out, msg_head);
+ genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
+
+ return 0;
+
+rcu_unlock:
+ rcu_read_unlock();
+invalid:
+ netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
+ return 0;
+
+nla_put_failure:
+ nlmsg_free(skb_out);
+ /* Fall through */
+
+fail:
+ rcu_read_unlock();
+ return res;
+}
+
+static const struct genl_small_ops hsr_ops[] = {
+ {
+ .cmd = HSR_C_GET_NODE_STATUS,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = 0,
+ .doit = hsr_get_node_status,
+ .dumpit = NULL,
+ },
+ {
+ .cmd = HSR_C_GET_NODE_LIST,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = 0,
+ .doit = hsr_get_node_list,
+ .dumpit = NULL,
+ },
+};
+
+static struct genl_family hsr_genl_family __ro_after_init = {
+ .hdrsize = 0,
+ .name = "HSR",
+ .version = 1,
+ .maxattr = HSR_A_MAX,
+ .policy = hsr_genl_policy,
+ .netnsok = true,
+ .module = THIS_MODULE,
+ .small_ops = hsr_ops,
+ .n_small_ops = ARRAY_SIZE(hsr_ops),
+ .mcgrps = hsr_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
+};
+
+int __init hsr_netlink_init(void)
+{
+ int rc;
+
+ rc = rtnl_link_register(&hsr_link_ops);
+ if (rc)
+ goto fail_rtnl_link_register;
+
+ rc = genl_register_family(&hsr_genl_family);
+ if (rc)
+ goto fail_genl_register_family;
+
+ hsr_debugfs_create_root();
+ return 0;
+
+fail_genl_register_family:
+ rtnl_link_unregister(&hsr_link_ops);
+fail_rtnl_link_register:
+
+ return rc;
+}
+
+void __exit hsr_netlink_exit(void)
+{
+ genl_unregister_family(&hsr_genl_family);
+ rtnl_link_unregister(&hsr_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("hsr");
diff --git a/net/hsr/hsr_netlink.h b/net/hsr/hsr_netlink.h
new file mode 100644
index 000000000..501552d97
--- /dev/null
+++ b/net/hsr/hsr_netlink.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * Author(s):
+ * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * include file for HSR and PRP.
+ */
+
+#ifndef __HSR_NETLINK_H
+#define __HSR_NETLINK_H
+
+#include <linux/if_ether.h>
+#include <linux/module.h>
+#include <uapi/linux/hsr_netlink.h>
+
+struct hsr_priv;
+struct hsr_port;
+
+int __init hsr_netlink_init(void);
+void __exit hsr_netlink_exit(void);
+
+void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
+ struct hsr_port *port);
+void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN]);
+void hsr_nl_framedrop(int dropcount, int dev_idx);
+void hsr_nl_linkdown(int dev_idx);
+
+#endif /* __HSR_NETLINK_H */
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
new file mode 100644
index 000000000..aecc05a28
--- /dev/null
+++ b/net/hsr/hsr_slave.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * Author(s):
+ * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * Frame handler other utility functions for HSR and PRP.
+ */
+
+#include "hsr_slave.h"
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include "hsr_main.h"
+#include "hsr_device.h"
+#include "hsr_forward.h"
+#include "hsr_framereg.h"
+
+bool hsr_invalid_dan_ingress_frame(__be16 protocol)
+{
+ return (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR));
+}
+
+static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct hsr_port *port;
+ struct hsr_priv *hsr;
+ __be16 protocol;
+
+ /* Packets from dev_loopback_xmit() do not have L2 header, bail out */
+ if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
+ return RX_HANDLER_PASS;
+
+ if (!skb_mac_header_was_set(skb)) {
+ WARN_ONCE(1, "%s: skb invalid", __func__);
+ return RX_HANDLER_PASS;
+ }
+
+ port = hsr_port_get_rcu(skb->dev);
+ if (!port)
+ goto finish_pass;
+ hsr = port->hsr;
+
+ if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
+ /* Directly kill frames sent by ourselves */
+ kfree_skb(skb);
+ goto finish_consume;
+ }
+
+ /* For HSR, only tagged frames are expected, but for PRP
+ * there could be non tagged frames as well from Single
+ * attached nodes (SANs).
+ */
+ protocol = eth_hdr(skb)->h_proto;
+ if (hsr->proto_ops->invalid_dan_ingress_frame &&
+ hsr->proto_ops->invalid_dan_ingress_frame(protocol))
+ goto finish_pass;
+
+ skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
+ protocol == htons(ETH_P_HSR))
+ skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
+ skb_reset_mac_len(skb);
+
+ hsr_forward_skb(skb, port);
+
+finish_consume:
+ return RX_HANDLER_CONSUMED;
+
+finish_pass:
+ return RX_HANDLER_PASS;
+}
+
+bool hsr_port_exists(const struct net_device *dev)
+{
+ return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
+}
+
+static int hsr_check_dev_ok(struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ /* Don't allow HSR on non-ethernet like devices */
+ if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
+ dev->addr_len != ETH_ALEN) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot use loopback or non-ethernet device as HSR slave.");
+ return -EINVAL;
+ }
+
+ /* Don't allow enslaving hsr devices */
+ if (is_hsr_master(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot create trees of HSR devices.");
+ return -EINVAL;
+ }
+
+ if (hsr_port_exists(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "This device is already a HSR slave.");
+ return -EINVAL;
+ }
+
+ if (is_vlan_dev(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "HSR on top of VLAN is not yet supported in this driver.");
+ return -EINVAL;
+ }
+
+ if (dev->priv_flags & IFF_DONT_BRIDGE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "This device does not support bridging.");
+ return -EOPNOTSUPP;
+ }
+
+ /* HSR over bonded devices has not been tested, but I'm not sure it
+ * won't work...
+ */
+
+ return 0;
+}
+
+/* Setup device to be added to the HSR bridge. */
+static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
+ struct hsr_port *port,
+ struct netlink_ext_ack *extack)
+
+{
+ struct net_device *hsr_dev;
+ struct hsr_port *master;
+ int res;
+
+ res = dev_set_promiscuity(dev, 1);
+ if (res)
+ return res;
+
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ hsr_dev = master->dev;
+
+ res = netdev_upper_dev_link(dev, hsr_dev, extack);
+ if (res)
+ goto fail_upper_dev_link;
+
+ res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
+ if (res)
+ goto fail_rx_handler;
+ dev_disable_lro(dev);
+
+ return 0;
+
+fail_rx_handler:
+ netdev_upper_dev_unlink(dev, hsr_dev);
+fail_upper_dev_link:
+ dev_set_promiscuity(dev, -1);
+ return res;
+}
+
+int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
+ enum hsr_port_type type, struct netlink_ext_ack *extack)
+{
+ struct hsr_port *port, *master;
+ int res;
+
+ if (type != HSR_PT_MASTER) {
+ res = hsr_check_dev_ok(dev, extack);
+ if (res)
+ return res;
+ }
+
+ port = hsr_port_get_hsr(hsr, type);
+ if (port)
+ return -EBUSY; /* This port already exists */
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->hsr = hsr;
+ port->dev = dev;
+ port->type = type;
+
+ if (type != HSR_PT_MASTER) {
+ res = hsr_portdev_setup(hsr, dev, port, extack);
+ if (res)
+ goto fail_dev_setup;
+ }
+
+ list_add_tail_rcu(&port->port_list, &hsr->ports);
+ synchronize_rcu();
+
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ netdev_update_features(master->dev);
+ dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+
+ return 0;
+
+fail_dev_setup:
+ kfree(port);
+ return res;
+}
+
+void hsr_del_port(struct hsr_port *port)
+{
+ struct hsr_priv *hsr;
+ struct hsr_port *master;
+
+ hsr = port->hsr;
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ list_del_rcu(&port->port_list);
+
+ if (port != master) {
+ netdev_update_features(master->dev);
+ dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+ netdev_rx_handler_unregister(port->dev);
+ dev_set_promiscuity(port->dev, -1);
+ netdev_upper_dev_unlink(port->dev, master->dev);
+ }
+
+ synchronize_rcu();
+
+ kfree(port);
+}
diff --git a/net/hsr/hsr_slave.h b/net/hsr/hsr_slave.h
new file mode 100644
index 000000000..edc4612bb
--- /dev/null
+++ b/net/hsr/hsr_slave.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ *
+ * include file for HSR and PRP.
+ */
+
+#ifndef __HSR_SLAVE_H
+#define __HSR_SLAVE_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include "hsr_main.h"
+
+int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
+ enum hsr_port_type pt, struct netlink_ext_ack *extack);
+void hsr_del_port(struct hsr_port *port);
+bool hsr_port_exists(const struct net_device *dev);
+
+static inline struct hsr_port *hsr_port_get_rtnl(const struct net_device *dev)
+{
+ ASSERT_RTNL();
+ return hsr_port_exists(dev) ?
+ rtnl_dereference(dev->rx_handler_data) : NULL;
+}
+
+static inline struct hsr_port *hsr_port_get_rcu(const struct net_device *dev)
+{
+ return hsr_port_exists(dev) ?
+ rcu_dereference(dev->rx_handler_data) : NULL;
+}
+
+bool hsr_invalid_dan_ingress_frame(__be16 protocol);
+
+#endif /* __HSR_SLAVE_H */