summaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /net/tipc
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/Kconfig61
-rw-r--r--net/tipc/Makefile22
-rw-r--r--net/tipc/addr.c124
-rw-r--r--net/tipc/addr.h135
-rw-r--r--net/tipc/bcast.c864
-rw-r--r--net/tipc/bcast.h127
-rw-r--r--net/tipc/bearer.c1370
-rw-r--r--net/tipc/bearer.h266
-rw-r--r--net/tipc/core.c229
-rw-r--r--net/tipc/core.h228
-rw-r--r--net/tipc/crypto.c2475
-rw-r--r--net/tipc/crypto.h200
-rw-r--r--net/tipc/diag.c116
-rw-r--r--net/tipc/discover.c420
-rw-r--r--net/tipc/discover.h51
-rw-r--r--net/tipc/eth_media.c98
-rw-r--r--net/tipc/group.c959
-rw-r--r--net/tipc/group.h77
-rw-r--r--net/tipc/ib_media.c104
-rw-r--r--net/tipc/link.c3009
-rw-r--r--net/tipc/link.h158
-rw-r--r--net/tipc/monitor.c874
-rw-r--r--net/tipc/monitor.h83
-rw-r--r--net/tipc/msg.c851
-rw-r--r--net/tipc/msg.h1310
-rw-r--r--net/tipc/name_distr.c411
-rw-r--r--net/tipc/name_distr.h79
-rw-r--r--net/tipc/name_table.c1204
-rw-r--r--net/tipc/name_table.h155
-rw-r--r--net/tipc/net.c345
-rw-r--r--net/tipc/net.h52
-rw-r--r--net/tipc/netlink.c315
-rw-r--r--net/tipc/netlink.h64
-rw-r--r--net/tipc/netlink_compat.c1376
-rw-r--r--net/tipc/node.c3166
-rw-r--r--net/tipc/node.h131
-rw-r--r--net/tipc/socket.c4019
-rw-r--r--net/tipc/socket.h80
-rw-r--r--net/tipc/subscr.c183
-rw-r--r--net/tipc/subscr.h122
-rw-r--r--net/tipc/sysctl.c108
-rw-r--r--net/tipc/topsrv.c731
-rw-r--r--net/tipc/topsrv.h54
-rw-r--r--net/tipc/trace.c206
-rw-r--r--net/tipc/trace.h434
-rw-r--r--net/tipc/udp_media.c855
-rw-r--r--net/tipc/udp_media.h60
47 files changed, 28361 insertions, 0 deletions
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
new file mode 100644
index 0000000000..be1c4003d6
--- /dev/null
+++ b/net/tipc/Kconfig
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# TIPC configuration
+#
+
+menuconfig TIPC
+ tristate "The TIPC Protocol"
+ depends on INET
+ depends on IPV6 || IPV6=n
+ help
+ The Transparent Inter Process Communication (TIPC) protocol is
+ specially designed for intra cluster communication. This protocol
+ originates from Ericsson where it has been used in carrier grade
+ cluster applications for many years.
+
+ For more information about TIPC, see http://tipc.sourceforge.net.
+
+ This protocol support is also available as a module ( = code which
+ can be inserted in and removed from the running kernel whenever you
+ want). The module will be called tipc. If you want to compile it
+ as a module, say M here and read <file:Documentation/kbuild/modules.rst>.
+
+ If in doubt, say N.
+
+config TIPC_MEDIA_IB
+ bool "InfiniBand media type support"
+ depends on TIPC && INFINIBAND_IPOIB
+ help
+ Saying Y here will enable support for running TIPC on
+ IP-over-InfiniBand devices.
+config TIPC_MEDIA_UDP
+ bool "IP/UDP media type support"
+ depends on TIPC
+ select NET_UDP_TUNNEL
+ help
+ Saying Y here will enable support for running TIPC over IP/UDP
+ bool
+ default y
+config TIPC_CRYPTO
+ bool "TIPC encryption support"
+ depends on TIPC
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_GCM
+ help
+ Saying Y here will enable support for TIPC encryption.
+ All TIPC messages will be encrypted/decrypted by using the currently most
+ advanced algorithm: AEAD AES-GCM (like IPSec or TLS) before leaving/
+ entering the TIPC stack.
+ Key setting from user-space is performed via netlink by a user program
+ (e.g. the iproute2 'tipc' tool).
+ bool
+ default y
+
+config TIPC_DIAG
+ tristate "TIPC: socket monitoring interface"
+ depends on TIPC
+ default y
+ help
+ Support for TIPC socket monitoring interface used by ss tool.
+ If unsure, say Y.
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
new file mode 100644
index 0000000000..ee49a9f1dd
--- /dev/null
+++ b/net/tipc/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux TIPC layer
+#
+
+obj-$(CONFIG_TIPC) := tipc.o
+
+tipc-y += addr.o bcast.o bearer.o \
+ core.o link.o discover.o msg.o \
+ name_distr.o subscr.o monitor.o name_table.o net.o \
+ netlink.o netlink_compat.o node.o socket.o eth_media.o \
+ topsrv.o group.o trace.o
+
+CFLAGS_trace.o += -I$(src)
+
+tipc-$(CONFIG_TIPC_MEDIA_UDP) += udp_media.o
+tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
+tipc-$(CONFIG_SYSCTL) += sysctl.o
+tipc-$(CONFIG_TIPC_CRYPTO) += crypto.o
+
+
+obj-$(CONFIG_TIPC_DIAG) += diag.o
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
new file mode 100644
index 0000000000..fd0796269e
--- /dev/null
+++ b/net/tipc/addr.c
@@ -0,0 +1,124 @@
+/*
+ * net/tipc/addr.c: TIPC address utility routines
+ *
+ * Copyright (c) 2000-2006, 2018, Ericsson AB
+ * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "addr.h"
+#include "core.h"
+
+bool tipc_in_scope(bool legacy_format, u32 domain, u32 addr)
+{
+ if (!domain || (domain == addr))
+ return true;
+ if (!legacy_format)
+ return false;
+ if (domain == tipc_cluster_mask(addr)) /* domain <Z.C.0> */
+ return true;
+ if (domain == (addr & TIPC_ZONE_CLUSTER_MASK)) /* domain <Z.C.0> */
+ return true;
+ if (domain == (addr & TIPC_ZONE_MASK)) /* domain <Z.0.0> */
+ return true;
+ return false;
+}
+
+void tipc_set_node_id(struct net *net, u8 *id)
+{
+ struct tipc_net *tn = tipc_net(net);
+
+ memcpy(tn->node_id, id, NODE_ID_LEN);
+ tipc_nodeid2string(tn->node_id_string, id);
+ tn->trial_addr = hash128to32(id);
+ pr_info("Node identity %s, cluster identity %u\n",
+ tipc_own_id_string(net), tn->net_id);
+}
+
+void tipc_set_node_addr(struct net *net, u32 addr)
+{
+ struct tipc_net *tn = tipc_net(net);
+ u8 node_id[NODE_ID_LEN] = {0,};
+
+ tn->node_addr = addr;
+ if (!tipc_own_id(net)) {
+ sprintf(node_id, "%x", addr);
+ tipc_set_node_id(net, node_id);
+ }
+ tn->trial_addr = addr;
+ tn->addr_trial_end = jiffies;
+ pr_info("Node number set to %u\n", addr);
+}
+
+char *tipc_nodeid2string(char *str, u8 *id)
+{
+ int i;
+ u8 c;
+
+ /* Already a string ? */
+ for (i = 0; i < NODE_ID_LEN; i++) {
+ c = id[i];
+ if (c >= '0' && c <= '9')
+ continue;
+ if (c >= 'A' && c <= 'Z')
+ continue;
+ if (c >= 'a' && c <= 'z')
+ continue;
+ if (c == '.')
+ continue;
+ if (c == ':')
+ continue;
+ if (c == '_')
+ continue;
+ if (c == '-')
+ continue;
+ if (c == '@')
+ continue;
+ if (c != 0)
+ break;
+ }
+ if (i == NODE_ID_LEN) {
+ memcpy(str, id, NODE_ID_LEN);
+ str[NODE_ID_LEN] = 0;
+ return str;
+ }
+
+ /* Translate to hex string */
+ for (i = 0; i < NODE_ID_LEN; i++)
+ sprintf(&str[2 * i], "%02x", id[i]);
+
+ /* Strip off trailing zeroes */
+ for (i = NODE_ID_STR_LEN - 2; str[i] == '0'; i--)
+ str[i] = 0;
+
+ return str;
+}
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
new file mode 100644
index 0000000000..93f8239828
--- /dev/null
+++ b/net/tipc/addr.h
@@ -0,0 +1,135 @@
+/*
+ * net/tipc/addr.h: Include file for TIPC address utility routines
+ *
+ * Copyright (c) 2000-2006, 2018, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_ADDR_H
+#define _TIPC_ADDR_H
+
+#include <linux/types.h>
+#include <linux/tipc.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include "core.h"
+
+/* Struct tipc_uaddr: internal version of struct sockaddr_tipc.
+ * Must be kept aligned both regarding field positions and size.
+ */
+struct tipc_uaddr {
+ unsigned short family;
+ unsigned char addrtype;
+ signed char scope;
+ union {
+ struct {
+ struct tipc_service_addr sa;
+ u32 lookup_node;
+ };
+ struct tipc_service_range sr;
+ struct tipc_socket_addr sk;
+ };
+};
+
+static inline void tipc_uaddr(struct tipc_uaddr *ua, u32 atype, u32 scope,
+ u32 type, u32 lower, u32 upper)
+{
+ ua->family = AF_TIPC;
+ ua->addrtype = atype;
+ ua->scope = scope;
+ ua->sr.type = type;
+ ua->sr.lower = lower;
+ ua->sr.upper = upper;
+}
+
+static inline bool tipc_uaddr_valid(struct tipc_uaddr *ua, int len)
+{
+ u32 atype;
+
+ if (len < sizeof(struct sockaddr_tipc))
+ return false;
+ atype = ua->addrtype;
+ if (ua->family != AF_TIPC)
+ return false;
+ if (atype == TIPC_SERVICE_ADDR || atype == TIPC_SOCKET_ADDR)
+ return true;
+ if (atype == TIPC_SERVICE_RANGE)
+ return ua->sr.upper >= ua->sr.lower;
+ return false;
+}
+
+static inline u32 tipc_own_addr(struct net *net)
+{
+ return tipc_net(net)->node_addr;
+}
+
+static inline u8 *tipc_own_id(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+
+ if (!strlen(tn->node_id_string))
+ return NULL;
+ return tn->node_id;
+}
+
+static inline char *tipc_own_id_string(struct net *net)
+{
+ return tipc_net(net)->node_id_string;
+}
+
+static inline u32 tipc_cluster_mask(u32 addr)
+{
+ return addr & TIPC_ZONE_CLUSTER_MASK;
+}
+
+static inline int tipc_node2scope(u32 node)
+{
+ return node ? TIPC_NODE_SCOPE : TIPC_CLUSTER_SCOPE;
+}
+
+static inline int tipc_scope2node(struct net *net, int sc)
+{
+ return sc != TIPC_NODE_SCOPE ? 0 : tipc_own_addr(net);
+}
+
+static inline int in_own_node(struct net *net, u32 addr)
+{
+ return addr == tipc_own_addr(net) || !addr;
+}
+
+bool tipc_in_scope(bool legacy_format, u32 domain, u32 addr);
+void tipc_set_node_id(struct net *net, u8 *id);
+void tipc_set_node_addr(struct net *net, u32 addr);
+char *tipc_nodeid2string(char *str, u8 *id);
+
+#endif
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
new file mode 100644
index 0000000000..593846d252
--- /dev/null
+++ b/net/tipc/bcast.c
@@ -0,0 +1,864 @@
+/*
+ * net/tipc/bcast.c: TIPC broadcast code
+ *
+ * Copyright (c) 2004-2006, 2014-2017, Ericsson AB
+ * Copyright (c) 2004, Intel Corporation.
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/tipc_config.h>
+#include "socket.h"
+#include "msg.h"
+#include "bcast.h"
+#include "link.h"
+#include "name_table.h"
+
+#define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
+#define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
+
+const char tipc_bclink_name[] = "broadcast-link";
+unsigned long sysctl_tipc_bc_retruni __read_mostly;
+
+/**
+ * struct tipc_bc_base - base structure for keeping broadcast send state
+ * @link: broadcast send link structure
+ * @inputq: data input queue; will only carry SOCK_WAKEUP messages
+ * @dests: array keeping number of reachable destinations per bearer
+ * @primary_bearer: a bearer having links to all broadcast destinations, if any
+ * @bcast_support: indicates if primary bearer, if any, supports broadcast
+ * @force_bcast: forces broadcast for multicast traffic
+ * @rcast_support: indicates if all peer nodes support replicast
+ * @force_rcast: forces replicast for multicast traffic
+ * @rc_ratio: dest count as percentage of cluster size where send method changes
+ * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
+ */
+struct tipc_bc_base {
+ struct tipc_link *link;
+ struct sk_buff_head inputq;
+ int dests[MAX_BEARERS];
+ int primary_bearer;
+ bool bcast_support;
+ bool force_bcast;
+ bool rcast_support;
+ bool force_rcast;
+ int rc_ratio;
+ int bc_threshold;
+};
+
+static struct tipc_bc_base *tipc_bc_base(struct net *net)
+{
+ return tipc_net(net)->bcbase;
+}
+
+/* tipc_bcast_get_mtu(): -get the MTU currently used by broadcast link
+ * Note: the MTU is decremented to give room for a tunnel header, in
+ * case the message needs to be sent as replicast
+ */
+int tipc_bcast_get_mtu(struct net *net)
+{
+ return tipc_link_mss(tipc_bc_sndlink(net));
+}
+
+void tipc_bcast_toggle_rcast(struct net *net, bool supp)
+{
+ tipc_bc_base(net)->rcast_support = supp;
+}
+
+static void tipc_bcbase_calc_bc_threshold(struct net *net)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+ int cluster_size = tipc_link_bc_peers(tipc_bc_sndlink(net));
+
+ bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100);
+}
+
+/* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
+ * if any, and make it primary bearer
+ */
+static void tipc_bcbase_select_primary(struct net *net)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+ int all_dests = tipc_link_bc_peers(bb->link);
+ int max_win = tipc_link_max_win(bb->link);
+ int min_win = tipc_link_min_win(bb->link);
+ int i, mtu, prim;
+
+ bb->primary_bearer = INVALID_BEARER_ID;
+ bb->bcast_support = true;
+
+ if (!all_dests)
+ return;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (!bb->dests[i])
+ continue;
+
+ mtu = tipc_bearer_mtu(net, i);
+ if (mtu < tipc_link_mtu(bb->link)) {
+ tipc_link_set_mtu(bb->link, mtu);
+ tipc_link_set_queue_limits(bb->link,
+ min_win,
+ max_win);
+ }
+ bb->bcast_support &= tipc_bearer_bcast_support(net, i);
+ if (bb->dests[i] < all_dests)
+ continue;
+
+ bb->primary_bearer = i;
+
+ /* Reduce risk that all nodes select same primary */
+ if ((i ^ tipc_own_addr(net)) & 1)
+ break;
+ }
+ prim = bb->primary_bearer;
+ if (prim != INVALID_BEARER_ID)
+ bb->bcast_support = tipc_bearer_bcast_support(net, prim);
+}
+
+void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+
+ tipc_bcast_lock(net);
+ bb->dests[bearer_id]++;
+ tipc_bcbase_select_primary(net);
+ tipc_bcast_unlock(net);
+}
+
+void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+
+ tipc_bcast_lock(net);
+ bb->dests[bearer_id]--;
+ tipc_bcbase_select_primary(net);
+ tipc_bcast_unlock(net);
+}
+
+/* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers
+ *
+ * Note that number of reachable destinations, as indicated in the dests[]
+ * array, may transitionally differ from the number of destinations indicated
+ * in each sent buffer. We can sustain this. Excess destination nodes will
+ * drop and never acknowledge the unexpected packets, and missing destinations
+ * will either require retransmission (if they are just about to be added to
+ * the bearer), or be removed from the buffer's 'ackers' counter (if they
+ * just went down)
+ */
+static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
+{
+ int bearer_id;
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+ struct sk_buff *skb, *_skb;
+ struct sk_buff_head _xmitq;
+
+ if (skb_queue_empty(xmitq))
+ return;
+
+ /* The typical case: at least one bearer has links to all nodes */
+ bearer_id = bb->primary_bearer;
+ if (bearer_id >= 0) {
+ tipc_bearer_bc_xmit(net, bearer_id, xmitq);
+ return;
+ }
+
+ /* We have to transmit across all bearers */
+ __skb_queue_head_init(&_xmitq);
+ for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ if (!bb->dests[bearer_id])
+ continue;
+
+ skb_queue_walk(xmitq, skb) {
+ _skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
+ if (!_skb)
+ break;
+ __skb_queue_tail(&_xmitq, _skb);
+ }
+ tipc_bearer_bc_xmit(net, bearer_id, &_xmitq);
+ }
+ __skb_queue_purge(xmitq);
+ __skb_queue_purge(&_xmitq);
+}
+
+static void tipc_bcast_select_xmit_method(struct net *net, int dests,
+ struct tipc_mc_method *method)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+ unsigned long exp = method->expires;
+
+ /* Broadcast supported by used bearer/bearers? */
+ if (!bb->bcast_support) {
+ method->rcast = true;
+ return;
+ }
+ /* Any destinations which don't support replicast ? */
+ if (!bb->rcast_support) {
+ method->rcast = false;
+ return;
+ }
+ /* Can current method be changed ? */
+ method->expires = jiffies + TIPC_METHOD_EXPIRE;
+ if (method->mandatory)
+ return;
+
+ if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL) &&
+ time_before(jiffies, exp))
+ return;
+
+ /* Configuration as force 'broadcast' method */
+ if (bb->force_bcast) {
+ method->rcast = false;
+ return;
+ }
+ /* Configuration as force 'replicast' method */
+ if (bb->force_rcast) {
+ method->rcast = true;
+ return;
+ }
+ /* Configuration as 'autoselect' or default method */
+ /* Determine method to use now */
+ method->rcast = dests <= bb->bc_threshold;
+}
+
+/* tipc_bcast_xmit - broadcast the buffer chain to all external nodes
+ * @net: the applicable net namespace
+ * @pkts: chain of buffers containing message
+ * @cong_link_cnt: set to 1 if broadcast link is congested, otherwise 0
+ * Consumes the buffer chain.
+ * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE
+ */
+int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
+ u16 *cong_link_cnt)
+{
+ struct tipc_link *l = tipc_bc_sndlink(net);
+ struct sk_buff_head xmitq;
+ int rc = 0;
+
+ __skb_queue_head_init(&xmitq);
+ tipc_bcast_lock(net);
+ if (tipc_link_bc_peers(l))
+ rc = tipc_link_xmit(l, pkts, &xmitq);
+ tipc_bcast_unlock(net);
+ tipc_bcbase_xmit(net, &xmitq);
+ __skb_queue_purge(pkts);
+ if (rc == -ELINKCONG) {
+ *cong_link_cnt = 1;
+ rc = 0;
+ }
+ return rc;
+}
+
+/* tipc_rcast_xmit - replicate and send a message to given destination nodes
+ * @net: the applicable net namespace
+ * @pkts: chain of buffers containing message
+ * @dests: list of destination nodes
+ * @cong_link_cnt: returns number of congested links
+ * @cong_links: returns identities of congested links
+ * Returns 0 if success, otherwise errno
+ */
+static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
+ struct tipc_nlist *dests, u16 *cong_link_cnt)
+{
+ struct tipc_dest *dst, *tmp;
+ struct sk_buff_head _pkts;
+ u32 dnode, selector;
+
+ selector = msg_link_selector(buf_msg(skb_peek(pkts)));
+ __skb_queue_head_init(&_pkts);
+
+ list_for_each_entry_safe(dst, tmp, &dests->list, list) {
+ dnode = dst->node;
+ if (!tipc_msg_pskb_copy(dnode, pkts, &_pkts))
+ return -ENOMEM;
+
+ /* Any other return value than -ELINKCONG is ignored */
+ if (tipc_node_xmit(net, &_pkts, dnode, selector) == -ELINKCONG)
+ (*cong_link_cnt)++;
+ }
+ return 0;
+}
+
+/* tipc_mcast_send_sync - deliver a dummy message with SYN bit
+ * @net: the applicable net namespace
+ * @skb: socket buffer to copy
+ * @method: send method to be used
+ * @dests: destination nodes for message.
+ * Returns 0 if success, otherwise errno
+ */
+static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
+ struct tipc_mc_method *method,
+ struct tipc_nlist *dests)
+{
+ struct tipc_msg *hdr, *_hdr;
+ struct sk_buff_head tmpq;
+ struct sk_buff *_skb;
+ u16 cong_link_cnt;
+ int rc = 0;
+
+ /* Is a cluster supporting with new capabilities ? */
+ if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL))
+ return 0;
+
+ hdr = buf_msg(skb);
+ if (msg_user(hdr) == MSG_FRAGMENTER)
+ hdr = msg_inner_hdr(hdr);
+ if (msg_type(hdr) != TIPC_MCAST_MSG)
+ return 0;
+
+ /* Allocate dummy message */
+ _skb = tipc_buf_acquire(MCAST_H_SIZE, GFP_KERNEL);
+ if (!_skb)
+ return -ENOMEM;
+
+ /* Preparing for 'synching' header */
+ msg_set_syn(hdr, 1);
+
+ /* Copy skb's header into a dummy header */
+ skb_copy_to_linear_data(_skb, hdr, MCAST_H_SIZE);
+ skb_orphan(_skb);
+
+ /* Reverse method for dummy message */
+ _hdr = buf_msg(_skb);
+ msg_set_size(_hdr, MCAST_H_SIZE);
+ msg_set_is_rcast(_hdr, !msg_is_rcast(hdr));
+ msg_set_errcode(_hdr, TIPC_ERR_NO_PORT);
+
+ __skb_queue_head_init(&tmpq);
+ __skb_queue_tail(&tmpq, _skb);
+ if (method->rcast)
+ rc = tipc_bcast_xmit(net, &tmpq, &cong_link_cnt);
+ else
+ rc = tipc_rcast_xmit(net, &tmpq, dests, &cong_link_cnt);
+
+ /* This queue should normally be empty by now */
+ __skb_queue_purge(&tmpq);
+
+ return rc;
+}
+
+/* tipc_mcast_xmit - deliver message to indicated destination nodes
+ * and to identified node local sockets
+ * @net: the applicable net namespace
+ * @pkts: chain of buffers containing message
+ * @method: send method to be used
+ * @dests: destination nodes for message.
+ * @cong_link_cnt: returns number of encountered congested destination links
+ * Consumes buffer chain.
+ * Returns 0 if success, otherwise errno
+ */
+int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
+ struct tipc_mc_method *method, struct tipc_nlist *dests,
+ u16 *cong_link_cnt)
+{
+ struct sk_buff_head inputq, localq;
+ bool rcast = method->rcast;
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
+ int rc = 0;
+
+ skb_queue_head_init(&inputq);
+ __skb_queue_head_init(&localq);
+
+ /* Clone packets before they are consumed by next call */
+ if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ /* Send according to determined transmit method */
+ if (dests->remote) {
+ tipc_bcast_select_xmit_method(net, dests->remote, method);
+
+ skb = skb_peek(pkts);
+ hdr = buf_msg(skb);
+ if (msg_user(hdr) == MSG_FRAGMENTER)
+ hdr = msg_inner_hdr(hdr);
+ msg_set_is_rcast(hdr, method->rcast);
+
+ /* Switch method ? */
+ if (rcast != method->rcast) {
+ rc = tipc_mcast_send_sync(net, skb, method, dests);
+ if (unlikely(rc)) {
+ pr_err("Unable to send SYN: method %d, rc %d\n",
+ rcast, rc);
+ goto exit;
+ }
+ }
+
+ if (method->rcast)
+ rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt);
+ else
+ rc = tipc_bcast_xmit(net, pkts, cong_link_cnt);
+ }
+
+ if (dests->local) {
+ tipc_loopback_trace(net, &localq);
+ tipc_sk_mcast_rcv(net, &localq, &inputq);
+ }
+exit:
+ /* This queue should normally be empty by now */
+ __skb_queue_purge(pkts);
+ return rc;
+}
+
+/* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
+ *
+ * RCU is locked, no other locks set
+ */
+int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+ struct sk_buff_head xmitq;
+ int rc;
+
+ __skb_queue_head_init(&xmitq);
+
+ if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ tipc_bcast_lock(net);
+ if (msg_user(hdr) == BCAST_PROTOCOL)
+ rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
+ else
+ rc = tipc_link_rcv(l, skb, NULL);
+ tipc_bcast_unlock(net);
+
+ tipc_bcbase_xmit(net, &xmitq);
+
+ /* Any socket wakeup messages ? */
+ if (!skb_queue_empty(inputq))
+ tipc_sk_rcv(net, inputq);
+
+ return rc;
+}
+
+/* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
+ *
+ * RCU is locked, no other locks set
+ */
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr)
+{
+ struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+ u16 acked = msg_bcast_ack(hdr);
+ struct sk_buff_head xmitq;
+
+ /* Ignore bc acks sent by peer before bcast synch point was received */
+ if (msg_bc_ack_invalid(hdr))
+ return;
+
+ __skb_queue_head_init(&xmitq);
+
+ tipc_bcast_lock(net);
+ tipc_link_bc_ack_rcv(l, acked, 0, NULL, &xmitq, NULL);
+ tipc_bcast_unlock(net);
+
+ tipc_bcbase_xmit(net, &xmitq);
+
+ /* Any socket wakeup messages ? */
+ if (!skb_queue_empty(inputq))
+ tipc_sk_rcv(net, inputq);
+}
+
+/* tipc_bcast_synch_rcv - check and update rcv link with peer's send state
+ *
+ * RCU is locked, no other locks set
+ */
+int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr,
+ struct sk_buff_head *retrq)
+{
+ struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+ struct tipc_gap_ack_blks *ga;
+ struct sk_buff_head xmitq;
+ int rc = 0;
+
+ __skb_queue_head_init(&xmitq);
+
+ tipc_bcast_lock(net);
+ if (msg_type(hdr) != STATE_MSG) {
+ tipc_link_bc_init_rcv(l, hdr);
+ } else if (!msg_bc_ack_invalid(hdr)) {
+ tipc_get_gap_ack_blks(&ga, l, hdr, false);
+ if (!sysctl_tipc_bc_retruni)
+ retrq = &xmitq;
+ rc = tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr),
+ msg_bc_gap(hdr), ga, &xmitq,
+ retrq);
+ rc |= tipc_link_bc_sync_rcv(l, hdr, &xmitq);
+ }
+ tipc_bcast_unlock(net);
+
+ tipc_bcbase_xmit(net, &xmitq);
+
+ /* Any socket wakeup messages ? */
+ if (!skb_queue_empty(inputq))
+ tipc_sk_rcv(net, inputq);
+ return rc;
+}
+
+/* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
+ *
+ * RCU is locked, node lock is set
+ */
+void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_link *snd_l = tipc_bc_sndlink(net);
+
+ tipc_bcast_lock(net);
+ tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
+ tipc_bcbase_select_primary(net);
+ tipc_bcbase_calc_bc_threshold(net);
+ tipc_bcast_unlock(net);
+}
+
+/* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
+ *
+ * RCU is locked, node lock is set
+ */
+void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
+{
+ struct tipc_link *snd_l = tipc_bc_sndlink(net);
+ struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+ struct sk_buff_head xmitq;
+
+ __skb_queue_head_init(&xmitq);
+
+ tipc_bcast_lock(net);
+ tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
+ tipc_bcbase_select_primary(net);
+ tipc_bcbase_calc_bc_threshold(net);
+ tipc_bcast_unlock(net);
+
+ tipc_bcbase_xmit(net, &xmitq);
+
+ /* Any socket wakeup messages ? */
+ if (!skb_queue_empty(inputq))
+ tipc_sk_rcv(net, inputq);
+}
+
+int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l)
+{
+ if (!l)
+ return -ENOPROTOOPT;
+
+ tipc_bcast_lock(net);
+ tipc_link_reset_stats(l);
+ tipc_bcast_unlock(net);
+ return 0;
+}
+
+static int tipc_bc_link_set_queue_limits(struct net *net, u32 max_win)
+{
+ struct tipc_link *l = tipc_bc_sndlink(net);
+
+ if (!l)
+ return -ENOPROTOOPT;
+ if (max_win < BCLINK_WIN_MIN)
+ max_win = BCLINK_WIN_MIN;
+ if (max_win > TIPC_MAX_LINK_WIN)
+ return -EINVAL;
+ tipc_bcast_lock(net);
+ tipc_link_set_queue_limits(l, tipc_link_min_win(l), max_win);
+ tipc_bcast_unlock(net);
+ return 0;
+}
+
+static int tipc_bc_link_set_broadcast_mode(struct net *net, u32 bc_mode)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+
+ switch (bc_mode) {
+ case BCLINK_MODE_BCAST:
+ if (!bb->bcast_support)
+ return -ENOPROTOOPT;
+
+ bb->force_bcast = true;
+ bb->force_rcast = false;
+ break;
+ case BCLINK_MODE_RCAST:
+ if (!bb->rcast_support)
+ return -ENOPROTOOPT;
+
+ bb->force_bcast = false;
+ bb->force_rcast = true;
+ break;
+ case BCLINK_MODE_SEL:
+ if (!bb->bcast_support || !bb->rcast_support)
+ return -ENOPROTOOPT;
+
+ bb->force_bcast = false;
+ bb->force_rcast = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tipc_bc_link_set_broadcast_ratio(struct net *net, u32 bc_ratio)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+
+ if (!bb->bcast_support || !bb->rcast_support)
+ return -ENOPROTOOPT;
+
+ if (bc_ratio > 100 || bc_ratio <= 0)
+ return -EINVAL;
+
+ bb->rc_ratio = bc_ratio;
+ tipc_bcast_lock(net);
+ tipc_bcbase_calc_bc_threshold(net);
+ tipc_bcast_unlock(net);
+
+ return 0;
+}
+
+int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
+{
+ int err;
+ u32 win;
+ u32 bc_mode;
+ u32 bc_ratio;
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+ if (!attrs[TIPC_NLA_LINK_PROP])
+ return -EINVAL;
+
+ err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
+ if (err)
+ return err;
+
+ if (!props[TIPC_NLA_PROP_WIN] &&
+ !props[TIPC_NLA_PROP_BROADCAST] &&
+ !props[TIPC_NLA_PROP_BROADCAST_RATIO]) {
+ return -EOPNOTSUPP;
+ }
+
+ if (props[TIPC_NLA_PROP_BROADCAST]) {
+ bc_mode = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST]);
+ err = tipc_bc_link_set_broadcast_mode(net, bc_mode);
+ }
+
+ if (!err && props[TIPC_NLA_PROP_BROADCAST_RATIO]) {
+ bc_ratio = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST_RATIO]);
+ err = tipc_bc_link_set_broadcast_ratio(net, bc_ratio);
+ }
+
+ if (!err && props[TIPC_NLA_PROP_WIN]) {
+ win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ err = tipc_bc_link_set_queue_limits(net, win);
+ }
+
+ return err;
+}
+
+int tipc_bcast_init(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_bc_base *bb = NULL;
+ struct tipc_link *l = NULL;
+
+ bb = kzalloc(sizeof(*bb), GFP_KERNEL);
+ if (!bb)
+ goto enomem;
+ tn->bcbase = bb;
+ spin_lock_init(&tipc_net(net)->bclock);
+
+ if (!tipc_link_bc_create(net, 0, 0, NULL,
+ one_page_mtu,
+ BCLINK_WIN_DEFAULT,
+ BCLINK_WIN_DEFAULT,
+ 0,
+ &bb->inputq,
+ NULL,
+ NULL,
+ &l))
+ goto enomem;
+ bb->link = l;
+ tn->bcl = l;
+ bb->rc_ratio = 10;
+ bb->rcast_support = true;
+ return 0;
+enomem:
+ kfree(bb);
+ kfree(l);
+ return -ENOMEM;
+}
+
+void tipc_bcast_stop(struct net *net)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ synchronize_net();
+ kfree(tn->bcbase);
+ kfree(tn->bcl);
+}
+
+void tipc_nlist_init(struct tipc_nlist *nl, u32 self)
+{
+ memset(nl, 0, sizeof(*nl));
+ INIT_LIST_HEAD(&nl->list);
+ nl->self = self;
+}
+
+void tipc_nlist_add(struct tipc_nlist *nl, u32 node)
+{
+ if (node == nl->self)
+ nl->local = true;
+ else if (tipc_dest_push(&nl->list, node, 0))
+ nl->remote++;
+}
+
+void tipc_nlist_del(struct tipc_nlist *nl, u32 node)
+{
+ if (node == nl->self)
+ nl->local = false;
+ else if (tipc_dest_del(&nl->list, node, 0))
+ nl->remote--;
+}
+
+void tipc_nlist_purge(struct tipc_nlist *nl)
+{
+ tipc_dest_list_purge(&nl->list);
+ nl->remote = 0;
+ nl->local = false;
+}
+
+u32 tipc_bcast_get_mode(struct net *net)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+
+ if (bb->force_bcast)
+ return BCLINK_MODE_BCAST;
+
+ if (bb->force_rcast)
+ return BCLINK_MODE_RCAST;
+
+ if (bb->bcast_support && bb->rcast_support)
+ return BCLINK_MODE_SEL;
+
+ return 0;
+}
+
+u32 tipc_bcast_get_broadcast_ratio(struct net *net)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+
+ return bb->rc_ratio;
+}
+
+void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq,
+ struct sk_buff_head *inputq)
+{
+ struct sk_buff *skb, *_skb, *tmp;
+ struct tipc_msg *hdr, *_hdr;
+ bool match = false;
+ u32 node, port;
+
+ skb = skb_peek(inputq);
+ if (!skb)
+ return;
+
+ hdr = buf_msg(skb);
+
+ if (likely(!msg_is_syn(hdr) && skb_queue_empty(defq)))
+ return;
+
+ node = msg_orignode(hdr);
+ if (node == tipc_own_addr(net))
+ return;
+
+ port = msg_origport(hdr);
+
+ /* Has the twin SYN message already arrived ? */
+ skb_queue_walk(defq, _skb) {
+ _hdr = buf_msg(_skb);
+ if (msg_orignode(_hdr) != node)
+ continue;
+ if (msg_origport(_hdr) != port)
+ continue;
+ match = true;
+ break;
+ }
+
+ if (!match) {
+ if (!msg_is_syn(hdr))
+ return;
+ __skb_dequeue(inputq);
+ __skb_queue_tail(defq, skb);
+ return;
+ }
+
+ /* Deliver non-SYN message from other link, otherwise queue it */
+ if (!msg_is_syn(hdr)) {
+ if (msg_is_rcast(hdr) != msg_is_rcast(_hdr))
+ return;
+ __skb_dequeue(inputq);
+ __skb_queue_tail(defq, skb);
+ return;
+ }
+
+ /* Queue non-SYN/SYN message from same link */
+ if (msg_is_rcast(hdr) == msg_is_rcast(_hdr)) {
+ __skb_dequeue(inputq);
+ __skb_queue_tail(defq, skb);
+ return;
+ }
+
+ /* Matching SYN messages => return the one with data, if any */
+ __skb_unlink(_skb, defq);
+ if (msg_data_sz(hdr)) {
+ kfree_skb(_skb);
+ } else {
+ __skb_dequeue(inputq);
+ kfree_skb(skb);
+ __skb_queue_tail(inputq, _skb);
+ }
+
+ /* Deliver subsequent non-SYN messages from same peer */
+ skb_queue_walk_safe(defq, _skb, tmp) {
+ _hdr = buf_msg(_skb);
+ if (msg_orignode(_hdr) != node)
+ continue;
+ if (msg_origport(_hdr) != port)
+ continue;
+ if (msg_is_syn(_hdr))
+ break;
+ __skb_unlink(_skb, defq);
+ __skb_queue_tail(inputq, _skb);
+ }
+}
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
new file mode 100644
index 0000000000..2d9352dc7b
--- /dev/null
+++ b/net/tipc/bcast.h
@@ -0,0 +1,127 @@
+/*
+ * net/tipc/bcast.h: Include file for TIPC broadcast code
+ *
+ * Copyright (c) 2003-2006, 2014-2015, Ericsson AB
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_BCAST_H
+#define _TIPC_BCAST_H
+
+#include "core.h"
+
+struct tipc_node;
+struct tipc_msg;
+struct tipc_nl_msg;
+struct tipc_nlist;
+struct tipc_nitem;
+extern const char tipc_bclink_name[];
+extern unsigned long sysctl_tipc_bc_retruni;
+
+#define TIPC_METHOD_EXPIRE msecs_to_jiffies(5000)
+
+#define BCLINK_MODE_BCAST 0x1
+#define BCLINK_MODE_RCAST 0x2
+#define BCLINK_MODE_SEL 0x4
+
+struct tipc_nlist {
+ struct list_head list;
+ u32 self;
+ u16 remote;
+ bool local;
+};
+
+void tipc_nlist_init(struct tipc_nlist *nl, u32 self);
+void tipc_nlist_purge(struct tipc_nlist *nl);
+void tipc_nlist_add(struct tipc_nlist *nl, u32 node);
+void tipc_nlist_del(struct tipc_nlist *nl, u32 node);
+
+/* Cookie to be used between socket and broadcast layer
+ * @rcast: replicast (instead of broadcast) was used at previous xmit
+ * @mandatory: broadcast/replicast indication was set by user
+ * @deferredq: defer queue to make message in order
+ * @expires: re-evaluate non-mandatory transmit method if we are past this
+ */
+struct tipc_mc_method {
+ bool rcast;
+ bool mandatory;
+ struct sk_buff_head deferredq;
+ unsigned long expires;
+};
+
+int tipc_bcast_init(struct net *net);
+void tipc_bcast_stop(struct net *net);
+void tipc_bcast_add_peer(struct net *net, struct tipc_link *l,
+ struct sk_buff_head *xmitq);
+void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_bcl);
+void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id);
+void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
+int tipc_bcast_get_mtu(struct net *net);
+void tipc_bcast_toggle_rcast(struct net *net, bool supp);
+int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
+ struct tipc_mc_method *method, struct tipc_nlist *dests,
+ u16 *cong_link_cnt);
+int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
+ u16 *cong_link_cnt);
+int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr);
+int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr,
+ struct sk_buff_head *retrq);
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
+ struct tipc_link *bcl);
+int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
+int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l);
+
+u32 tipc_bcast_get_mode(struct net *net);
+u32 tipc_bcast_get_broadcast_ratio(struct net *net);
+
+void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq,
+ struct sk_buff_head *inputq);
+
+static inline void tipc_bcast_lock(struct net *net)
+{
+ spin_lock_bh(&tipc_net(net)->bclock);
+}
+
+static inline void tipc_bcast_unlock(struct net *net)
+{
+ spin_unlock_bh(&tipc_net(net)->bclock);
+}
+
+static inline struct tipc_link *tipc_bc_sndlink(struct net *net)
+{
+ return tipc_net(net)->bcl;
+}
+
+#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
new file mode 100644
index 0000000000..2cde375477
--- /dev/null
+++ b/net/tipc/bearer.c
@@ -0,0 +1,1370 @@
+/*
+ * net/tipc/bearer.c: TIPC bearer code
+ *
+ * Copyright (c) 1996-2006, 2013-2016, Ericsson AB
+ * Copyright (c) 2004-2006, 2010-2013, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <net/sock.h>
+#include "core.h"
+#include "bearer.h"
+#include "link.h"
+#include "discover.h"
+#include "monitor.h"
+#include "bcast.h"
+#include "netlink.h"
+#include "udp_media.h"
+#include "trace.h"
+#include "crypto.h"
+
+#define MAX_ADDR_STR 60
+
+static struct tipc_media * const media_info_array[] = {
+ &eth_media_info,
+#ifdef CONFIG_TIPC_MEDIA_IB
+ &ib_media_info,
+#endif
+#ifdef CONFIG_TIPC_MEDIA_UDP
+ &udp_media_info,
+#endif
+ NULL
+};
+
+static struct tipc_bearer *bearer_get(struct net *net, int bearer_id)
+{
+ struct tipc_net *tn = tipc_net(net);
+
+ return rcu_dereference(tn->bearer_list[bearer_id]);
+}
+
+static void bearer_disable(struct net *net, struct tipc_bearer *b);
+static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
+
+/**
+ * tipc_media_find - locates specified media object by name
+ * @name: name to locate
+ */
+struct tipc_media *tipc_media_find(const char *name)
+{
+ u32 i;
+
+ for (i = 0; media_info_array[i] != NULL; i++) {
+ if (!strcmp(media_info_array[i]->name, name))
+ break;
+ }
+ return media_info_array[i];
+}
+
+/**
+ * media_find_id - locates specified media object by type identifier
+ * @type: type identifier to locate
+ */
+static struct tipc_media *media_find_id(u8 type)
+{
+ u32 i;
+
+ for (i = 0; media_info_array[i] != NULL; i++) {
+ if (media_info_array[i]->type_id == type)
+ break;
+ }
+ return media_info_array[i];
+}
+
+/**
+ * tipc_media_addr_printf - record media address in print buffer
+ * @buf: output buffer
+ * @len: output buffer size remaining
+ * @a: input media address
+ */
+int tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
+{
+ char addr_str[MAX_ADDR_STR];
+ struct tipc_media *m;
+ int ret;
+
+ m = media_find_id(a->media_id);
+
+ if (m && !m->addr2str(a, addr_str, sizeof(addr_str)))
+ ret = scnprintf(buf, len, "%s(%s)", m->name, addr_str);
+ else {
+ u32 i;
+
+ ret = scnprintf(buf, len, "UNKNOWN(%u)", a->media_id);
+ for (i = 0; i < sizeof(a->value); i++)
+ ret += scnprintf(buf + ret, len - ret,
+ "-%x", a->value[i]);
+ }
+ return ret;
+}
+
+/**
+ * bearer_name_validate - validate & (optionally) deconstruct bearer name
+ * @name: ptr to bearer name string
+ * @name_parts: ptr to area for bearer name components (or NULL if not needed)
+ *
+ * Return: 1 if bearer name is valid, otherwise 0.
+ */
+static int bearer_name_validate(const char *name,
+ struct tipc_bearer_names *name_parts)
+{
+ char name_copy[TIPC_MAX_BEARER_NAME];
+ char *media_name;
+ char *if_name;
+ u32 media_len;
+ u32 if_len;
+
+ /* copy bearer name & ensure length is OK */
+ if (strscpy(name_copy, name, TIPC_MAX_BEARER_NAME) < 0)
+ return 0;
+
+ /* ensure all component parts of bearer name are present */
+ media_name = name_copy;
+ if_name = strchr(media_name, ':');
+ if (if_name == NULL)
+ return 0;
+ *(if_name++) = 0;
+ media_len = if_name - media_name;
+ if_len = strlen(if_name) + 1;
+
+ /* validate component parts of bearer name */
+ if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
+ (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME))
+ return 0;
+
+ /* return bearer name components, if necessary */
+ if (name_parts) {
+ strcpy(name_parts->media_name, media_name);
+ strcpy(name_parts->if_name, if_name);
+ }
+ return 1;
+}
+
+/**
+ * tipc_bearer_find - locates bearer object with matching bearer name
+ * @net: the applicable net namespace
+ * @name: bearer name to locate
+ */
+struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_bearer *b;
+ u32 i;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ b = rtnl_dereference(tn->bearer_list[i]);
+ if (b && (!strcmp(b->name, name)))
+ return b;
+ }
+ return NULL;
+}
+
+/* tipc_bearer_get_name - get the bearer name from its id.
+ * @net: network namespace
+ * @name: a pointer to the buffer where the name will be stored.
+ * @bearer_id: the id to get the name from.
+ */
+int tipc_bearer_get_name(struct net *net, char *name, u32 bearer_id)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_bearer *b;
+
+ if (bearer_id >= MAX_BEARERS)
+ return -EINVAL;
+
+ b = rtnl_dereference(tn->bearer_list[bearer_id]);
+ if (!b)
+ return -EINVAL;
+
+ strcpy(name, b->name);
+ return 0;
+}
+
+void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest)
+{
+ struct tipc_bearer *b;
+
+ rcu_read_lock();
+ b = bearer_get(net, bearer_id);
+ if (b)
+ tipc_disc_add_dest(b->disc);
+ rcu_read_unlock();
+}
+
+void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
+{
+ struct tipc_bearer *b;
+
+ rcu_read_lock();
+ b = bearer_get(net, bearer_id);
+ if (b)
+ tipc_disc_remove_dest(b->disc);
+ rcu_read_unlock();
+}
+
+/**
+ * tipc_enable_bearer - enable bearer with the given name
+ * @net: the applicable net namespace
+ * @name: bearer name to enable
+ * @disc_domain: bearer domain
+ * @prio: bearer priority
+ * @attr: nlattr array
+ * @extack: netlink extended ack
+ */
+static int tipc_enable_bearer(struct net *net, const char *name,
+ u32 disc_domain, u32 prio,
+ struct nlattr *attr[],
+ struct netlink_ext_ack *extack)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_bearer_names b_names;
+ int with_this_prio = 1;
+ struct tipc_bearer *b;
+ struct tipc_media *m;
+ struct sk_buff *skb;
+ int bearer_id = 0;
+ int res = -EINVAL;
+ char *errstr = "";
+ u32 i;
+
+ if (!bearer_name_validate(name, &b_names)) {
+ NL_SET_ERR_MSG(extack, "Illegal name");
+ return res;
+ }
+
+ if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) {
+ errstr = "illegal priority";
+ NL_SET_ERR_MSG(extack, "Illegal priority");
+ goto rejected;
+ }
+
+ m = tipc_media_find(b_names.media_name);
+ if (!m) {
+ errstr = "media not registered";
+ NL_SET_ERR_MSG(extack, "Media not registered");
+ goto rejected;
+ }
+
+ if (prio == TIPC_MEDIA_LINK_PRI)
+ prio = m->priority;
+
+ /* Check new bearer vs existing ones and find free bearer id if any */
+ bearer_id = MAX_BEARERS;
+ i = MAX_BEARERS;
+ while (i-- != 0) {
+ b = rtnl_dereference(tn->bearer_list[i]);
+ if (!b) {
+ bearer_id = i;
+ continue;
+ }
+ if (!strcmp(name, b->name)) {
+ errstr = "already enabled";
+ NL_SET_ERR_MSG(extack, "Already enabled");
+ goto rejected;
+ }
+
+ if (b->priority == prio &&
+ (++with_this_prio > 2)) {
+ pr_warn("Bearer <%s>: already 2 bearers with priority %u\n",
+ name, prio);
+
+ if (prio == TIPC_MIN_LINK_PRI) {
+ errstr = "cannot adjust to lower";
+ NL_SET_ERR_MSG(extack, "Cannot adjust to lower");
+ goto rejected;
+ }
+
+ pr_warn("Bearer <%s>: trying with adjusted priority\n",
+ name);
+ prio--;
+ bearer_id = MAX_BEARERS;
+ i = MAX_BEARERS;
+ with_this_prio = 1;
+ }
+ }
+
+ if (bearer_id >= MAX_BEARERS) {
+ errstr = "max 3 bearers permitted";
+ NL_SET_ERR_MSG(extack, "Max 3 bearers permitted");
+ goto rejected;
+ }
+
+ b = kzalloc(sizeof(*b), GFP_ATOMIC);
+ if (!b)
+ return -ENOMEM;
+
+ strcpy(b->name, name);
+ b->media = m;
+ res = m->enable_media(net, b, attr);
+ if (res) {
+ kfree(b);
+ errstr = "failed to enable media";
+ NL_SET_ERR_MSG(extack, "Failed to enable media");
+ goto rejected;
+ }
+
+ b->identity = bearer_id;
+ b->tolerance = m->tolerance;
+ b->min_win = m->min_win;
+ b->max_win = m->max_win;
+ b->domain = disc_domain;
+ b->net_plane = bearer_id + 'A';
+ b->priority = prio;
+ refcount_set(&b->refcnt, 1);
+
+ res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
+ if (res) {
+ bearer_disable(net, b);
+ errstr = "failed to create discoverer";
+ NL_SET_ERR_MSG(extack, "Failed to create discoverer");
+ goto rejected;
+ }
+
+ /* Create monitoring data before accepting activate messages */
+ if (tipc_mon_create(net, bearer_id)) {
+ bearer_disable(net, b);
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ test_and_set_bit_lock(0, &b->up);
+ rcu_assign_pointer(tn->bearer_list[bearer_id], b);
+ if (skb)
+ tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
+
+ pr_info("Enabled bearer <%s>, priority %u\n", name, prio);
+
+ return res;
+rejected:
+ pr_warn("Enabling of bearer <%s> rejected, %s\n", name, errstr);
+ return res;
+}
+
+/**
+ * tipc_reset_bearer - Reset all links established over this bearer
+ * @net: the applicable net namespace
+ * @b: the target bearer
+ */
+static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
+{
+ pr_info("Resetting bearer <%s>\n", b->name);
+ tipc_node_delete_links(net, b->identity);
+ tipc_disc_reset(net, b);
+ return 0;
+}
+
+bool tipc_bearer_hold(struct tipc_bearer *b)
+{
+ return (b && refcount_inc_not_zero(&b->refcnt));
+}
+
+void tipc_bearer_put(struct tipc_bearer *b)
+{
+ if (b && refcount_dec_and_test(&b->refcnt))
+ kfree_rcu(b, rcu);
+}
+
+/**
+ * bearer_disable - disable this bearer
+ * @net: the applicable net namespace
+ * @b: the bearer to disable
+ *
+ * Note: This routine assumes caller holds RTNL lock.
+ */
+static void bearer_disable(struct net *net, struct tipc_bearer *b)
+{
+ struct tipc_net *tn = tipc_net(net);
+ int bearer_id = b->identity;
+
+ pr_info("Disabling bearer <%s>\n", b->name);
+ clear_bit_unlock(0, &b->up);
+ tipc_node_delete_links(net, bearer_id);
+ b->media->disable_media(b);
+ RCU_INIT_POINTER(b->media_ptr, NULL);
+ if (b->disc)
+ tipc_disc_delete(b->disc);
+ RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL);
+ tipc_bearer_put(b);
+ tipc_mon_delete(net, bearer_id);
+}
+
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
+ struct nlattr *attr[])
+{
+ char *dev_name = strchr((const char *)b->name, ':') + 1;
+ int hwaddr_len = b->media->hwaddr_len;
+ u8 node_id[NODE_ID_LEN] = {0,};
+ struct net_device *dev;
+
+ /* Find device with specified name */
+ dev = dev_get_by_name(net, dev_name);
+ if (!dev)
+ return -ENODEV;
+ if (tipc_mtu_bad(dev)) {
+ dev_put(dev);
+ return -EINVAL;
+ }
+ if (dev == net->loopback_dev) {
+ dev_put(dev);
+ pr_info("Enabling <%s> not permitted\n", b->name);
+ return -EINVAL;
+ }
+
+ /* Autoconfigure own node identity if needed */
+ if (!tipc_own_id(net) && hwaddr_len <= NODE_ID_LEN) {
+ memcpy(node_id, dev->dev_addr, hwaddr_len);
+ tipc_net_init(net, node_id, 0);
+ }
+ if (!tipc_own_id(net)) {
+ dev_put(dev);
+ pr_warn("Failed to obtain node identity\n");
+ return -EINVAL;
+ }
+
+ /* Associate TIPC bearer with L2 bearer */
+ rcu_assign_pointer(b->media_ptr, dev);
+ b->pt.dev = dev;
+ b->pt.type = htons(ETH_P_TIPC);
+ b->pt.func = tipc_l2_rcv_msg;
+ dev_add_pack(&b->pt);
+ memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
+ memcpy(b->bcast_addr.value, dev->broadcast, hwaddr_len);
+ b->bcast_addr.media_id = b->media->type_id;
+ b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT;
+ b->mtu = dev->mtu;
+ b->media->raw2addr(b, &b->addr, (const char *)dev->dev_addr);
+ rcu_assign_pointer(dev->tipc_ptr, b);
+ return 0;
+}
+
+/* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
+ * @b: the target bearer
+ *
+ * Mark L2 bearer as inactive so that incoming buffers are thrown away
+ */
+void tipc_disable_l2_media(struct tipc_bearer *b)
+{
+ struct net_device *dev;
+
+ dev = (struct net_device *)rtnl_dereference(b->media_ptr);
+ dev_remove_pack(&b->pt);
+ RCU_INIT_POINTER(dev->tipc_ptr, NULL);
+ synchronize_net();
+ dev_put(dev);
+}
+
+/**
+ * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
+ * @net: the associated network namespace
+ * @skb: the packet to be sent
+ * @b: the bearer through which the packet is to be sent
+ * @dest: peer destination address
+ */
+int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
+ struct tipc_bearer *b, struct tipc_media_addr *dest)
+{
+ struct net_device *dev;
+ int delta;
+
+ dev = (struct net_device *)rcu_dereference(b->media_ptr);
+ if (!dev)
+ return 0;
+
+ delta = SKB_DATA_ALIGN(dev->hard_header_len - skb_headroom(skb));
+ if ((delta > 0) && pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) {
+ kfree_skb(skb);
+ return 0;
+ }
+ skb_reset_network_header(skb);
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_TIPC);
+ dev_hard_header(skb, dev, ETH_P_TIPC, dest->value,
+ dev->dev_addr, skb->len);
+ dev_queue_xmit(skb);
+ return 0;
+}
+
+bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id)
+{
+ bool supp = false;
+ struct tipc_bearer *b;
+
+ rcu_read_lock();
+ b = bearer_get(net, bearer_id);
+ if (b)
+ supp = (b->bcast_addr.broadcast == TIPC_BROADCAST_SUPPORT);
+ rcu_read_unlock();
+ return supp;
+}
+
+int tipc_bearer_mtu(struct net *net, u32 bearer_id)
+{
+ int mtu = 0;
+ struct tipc_bearer *b;
+
+ rcu_read_lock();
+ b = bearer_get(net, bearer_id);
+ if (b)
+ mtu = b->mtu;
+ rcu_read_unlock();
+ return mtu;
+}
+
+int tipc_bearer_min_mtu(struct net *net, u32 bearer_id)
+{
+ int mtu = TIPC_MIN_BEARER_MTU;
+ struct tipc_bearer *b;
+
+ rcu_read_lock();
+ b = bearer_get(net, bearer_id);
+ if (b)
+ mtu += b->encap_hlen;
+ rcu_read_unlock();
+ return mtu;
+}
+
+/* tipc_bearer_xmit_skb - sends buffer to destination over bearer
+ */
+void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
+ struct sk_buff *skb,
+ struct tipc_media_addr *dest)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct tipc_bearer *b;
+
+ rcu_read_lock();
+ b = bearer_get(net, bearer_id);
+ if (likely(b && (test_bit(0, &b->up) || msg_is_reset(hdr)))) {
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_xmit(net, &skb, b, dest, NULL);
+ if (skb)
+#endif
+ b->media->send_msg(net, skb, b, dest);
+ } else {
+ kfree_skb(skb);
+ }
+ rcu_read_unlock();
+}
+
+/* tipc_bearer_xmit() -send buffer to destination over bearer
+ */
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+ struct sk_buff_head *xmitq,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode)
+{
+ struct tipc_bearer *b;
+ struct sk_buff *skb, *tmp;
+
+ if (skb_queue_empty(xmitq))
+ return;
+
+ rcu_read_lock();
+ b = bearer_get(net, bearer_id);
+ if (unlikely(!b))
+ __skb_queue_purge(xmitq);
+ skb_queue_walk_safe(xmitq, skb, tmp) {
+ __skb_dequeue(xmitq);
+ if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb)))) {
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_xmit(net, &skb, b, dst, __dnode);
+ if (skb)
+#endif
+ b->media->send_msg(net, skb, b, dst);
+ } else {
+ kfree_skb(skb);
+ }
+ }
+ rcu_read_unlock();
+}
+
+/* tipc_bearer_bc_xmit() - broadcast buffers to all destinations
+ */
+void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_media_addr *dst;
+ int net_id = tn->net_id;
+ struct tipc_bearer *b;
+ struct sk_buff *skb, *tmp;
+ struct tipc_msg *hdr;
+
+ rcu_read_lock();
+ b = bearer_get(net, bearer_id);
+ if (unlikely(!b || !test_bit(0, &b->up)))
+ __skb_queue_purge(xmitq);
+ skb_queue_walk_safe(xmitq, skb, tmp) {
+ hdr = buf_msg(skb);
+ msg_set_non_seq(hdr, 1);
+ msg_set_mc_netid(hdr, net_id);
+ __skb_dequeue(xmitq);
+ dst = &b->bcast_addr;
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_xmit(net, &skb, b, dst, NULL);
+ if (skb)
+#endif
+ b->media->send_msg(net, skb, b, dst);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * tipc_l2_rcv_msg - handle incoming TIPC message from an interface
+ * @skb: the received message
+ * @dev: the net device that the packet was received on
+ * @pt: the packet_type structure which was used to register this handler
+ * @orig_dev: the original receive net device in case the device is a bond
+ *
+ * Accept only packets explicitly sent to this node, or broadcast packets;
+ * ignores packets sent using interface multicast, and traffic sent to other
+ * nodes (which can happen if interface is running in promiscuous mode).
+ */
+static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct tipc_bearer *b;
+
+ rcu_read_lock();
+ b = rcu_dereference(dev->tipc_ptr) ?:
+ rcu_dereference(orig_dev->tipc_ptr);
+ if (likely(b && test_bit(0, &b->up) &&
+ (skb->pkt_type <= PACKET_MULTICAST))) {
+ skb_mark_not_on_list(skb);
+ TIPC_SKB_CB(skb)->flags = 0;
+ tipc_rcv(dev_net(b->pt.dev), skb, b);
+ rcu_read_unlock();
+ return NET_RX_SUCCESS;
+ }
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+/**
+ * tipc_l2_device_event - handle device events from network device
+ * @nb: the context of the notification
+ * @evt: the type of event
+ * @ptr: the net device that the event was on
+ *
+ * This function is called by the Ethernet driver in case of link
+ * change event.
+ */
+static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net *net = dev_net(dev);
+ struct tipc_bearer *b;
+
+ b = rtnl_dereference(dev->tipc_ptr);
+ if (!b)
+ return NOTIFY_DONE;
+
+ trace_tipc_l2_device_event(dev, b, evt);
+ switch (evt) {
+ case NETDEV_CHANGE:
+ if (netif_carrier_ok(dev) && netif_oper_up(dev)) {
+ test_and_set_bit_lock(0, &b->up);
+ break;
+ }
+ fallthrough;
+ case NETDEV_GOING_DOWN:
+ clear_bit_unlock(0, &b->up);
+ tipc_reset_bearer(net, b);
+ break;
+ case NETDEV_UP:
+ test_and_set_bit_lock(0, &b->up);
+ break;
+ case NETDEV_CHANGEMTU:
+ if (tipc_mtu_bad(dev)) {
+ bearer_disable(net, b);
+ break;
+ }
+ b->mtu = dev->mtu;
+ tipc_reset_bearer(net, b);
+ break;
+ case NETDEV_CHANGEADDR:
+ b->media->raw2addr(b, &b->addr,
+ (const char *)dev->dev_addr);
+ tipc_reset_bearer(net, b);
+ break;
+ case NETDEV_UNREGISTER:
+ case NETDEV_CHANGENAME:
+ bearer_disable(net, b);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block notifier = {
+ .notifier_call = tipc_l2_device_event,
+ .priority = 0,
+};
+
+int tipc_bearer_setup(void)
+{
+ return register_netdevice_notifier(&notifier);
+}
+
+void tipc_bearer_cleanup(void)
+{
+ unregister_netdevice_notifier(&notifier);
+}
+
+void tipc_bearer_stop(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_bearer *b;
+ u32 i;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ b = rtnl_dereference(tn->bearer_list[i]);
+ if (b) {
+ bearer_disable(net, b);
+ tn->bearer_list[i] = NULL;
+ }
+ }
+}
+
+void tipc_clone_to_loopback(struct net *net, struct sk_buff_head *pkts)
+{
+ struct net_device *dev = net->loopback_dev;
+ struct sk_buff *skb, *_skb;
+ int exp;
+
+ skb_queue_walk(pkts, _skb) {
+ skb = pskb_copy(_skb, GFP_ATOMIC);
+ if (!skb)
+ continue;
+
+ exp = SKB_DATA_ALIGN(dev->hard_header_len - skb_headroom(skb));
+ if (exp > 0 && pskb_expand_head(skb, exp, 0, GFP_ATOMIC)) {
+ kfree_skb(skb);
+ continue;
+ }
+
+ skb_reset_network_header(skb);
+ dev_hard_header(skb, dev, ETH_P_TIPC, dev->dev_addr,
+ dev->dev_addr, skb->len);
+ skb->dev = dev;
+ skb->pkt_type = PACKET_HOST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ }
+}
+
+static int tipc_loopback_rcv_pkt(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *od)
+{
+ consume_skb(skb);
+ return NET_RX_SUCCESS;
+}
+
+int tipc_attach_loopback(struct net *net)
+{
+ struct net_device *dev = net->loopback_dev;
+ struct tipc_net *tn = tipc_net(net);
+
+ if (!dev)
+ return -ENODEV;
+
+ netdev_hold(dev, &tn->loopback_pt.dev_tracker, GFP_KERNEL);
+ tn->loopback_pt.dev = dev;
+ tn->loopback_pt.type = htons(ETH_P_TIPC);
+ tn->loopback_pt.func = tipc_loopback_rcv_pkt;
+ dev_add_pack(&tn->loopback_pt);
+ return 0;
+}
+
+void tipc_detach_loopback(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+
+ dev_remove_pack(&tn->loopback_pt);
+ netdev_put(net->loopback_dev, &tn->loopback_pt.dev_tracker);
+}
+
+/* Caller should hold rtnl_lock to protect the bearer */
+static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
+ struct tipc_bearer *bearer, int nlflags)
+{
+ void *hdr;
+ struct nlattr *attrs;
+ struct nlattr *prop;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ nlflags, TIPC_NL_BEARER_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_BEARER);
+ if (!attrs)
+ goto msg_full;
+
+ if (nla_put_string(msg->skb, TIPC_NLA_BEARER_NAME, bearer->name))
+ goto attr_msg_full;
+
+ prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_BEARER_PROP);
+ if (!prop)
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, bearer->priority))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, bearer->tolerance))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bearer->max_win))
+ goto prop_msg_full;
+ if (bearer->media->type_id == TIPC_MEDIA_TYPE_UDP)
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_MTU, bearer->mtu))
+ goto prop_msg_full;
+
+ nla_nest_end(msg->skb, prop);
+
+#ifdef CONFIG_TIPC_MEDIA_UDP
+ if (bearer->media->type_id == TIPC_MEDIA_TYPE_UDP) {
+ if (tipc_udp_nl_add_bearer_data(msg, bearer))
+ goto attr_msg_full;
+ }
+#endif
+
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+prop_msg_full:
+ nla_nest_cancel(msg->skb, prop);
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int err;
+ int i = cb->args[0];
+ struct tipc_bearer *bearer;
+ struct tipc_nl_msg msg;
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = tipc_net(net);
+
+ if (i == MAX_BEARERS)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ rtnl_lock();
+ for (i = 0; i < MAX_BEARERS; i++) {
+ bearer = rtnl_dereference(tn->bearer_list[i]);
+ if (!bearer)
+ continue;
+
+ err = __tipc_nl_add_bearer(&msg, bearer, NLM_F_MULTI);
+ if (err)
+ break;
+ }
+ rtnl_unlock();
+
+ cb->args[0] = i;
+ return skb->len;
+}
+
+int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *name;
+ struct sk_buff *rep;
+ struct tipc_bearer *bearer;
+ struct tipc_nl_msg msg;
+ struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy, info->extack);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_BEARER_NAME])
+ return -EINVAL;
+ name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+
+ rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ msg.skb = rep;
+ msg.portid = info->snd_portid;
+ msg.seq = info->snd_seq;
+
+ rtnl_lock();
+ bearer = tipc_bearer_find(net, name);
+ if (!bearer) {
+ err = -EINVAL;
+ NL_SET_ERR_MSG(info->extack, "Bearer not found");
+ goto err_out;
+ }
+
+ err = __tipc_nl_add_bearer(&msg, bearer, 0);
+ if (err)
+ goto err_out;
+ rtnl_unlock();
+
+ return genlmsg_reply(rep, info);
+err_out:
+ rtnl_unlock();
+ nlmsg_free(rep);
+
+ return err;
+}
+
+int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *name;
+ struct tipc_bearer *bearer;
+ struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+
+ if (!info->attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy, info->extack);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_BEARER_NAME])
+ return -EINVAL;
+
+ name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+
+ bearer = tipc_bearer_find(net, name);
+ if (!bearer) {
+ NL_SET_ERR_MSG(info->extack, "Bearer not found");
+ return -EINVAL;
+ }
+
+ bearer_disable(net, bearer);
+
+ return 0;
+}
+
+int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_bearer_disable(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+
+int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *bearer;
+ struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ u32 domain = 0;
+ u32 prio;
+
+ prio = TIPC_MEDIA_LINK_PRI;
+
+ if (!info->attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy, info->extack);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_BEARER_NAME])
+ return -EINVAL;
+
+ bearer = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+
+ if (attrs[TIPC_NLA_BEARER_DOMAIN])
+ domain = nla_get_u32(attrs[TIPC_NLA_BEARER_DOMAIN]);
+
+ if (attrs[TIPC_NLA_BEARER_PROP]) {
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+ err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP],
+ props);
+ if (err)
+ return err;
+
+ if (props[TIPC_NLA_PROP_PRIO])
+ prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+ }
+
+ return tipc_enable_bearer(net, bearer, domain, prio, attrs,
+ info->extack);
+}
+
+int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_bearer_enable(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+
+int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *name;
+ struct tipc_bearer *b;
+ struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+
+ if (!info->attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy, info->extack);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_BEARER_NAME])
+ return -EINVAL;
+ name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+
+ rtnl_lock();
+ b = tipc_bearer_find(net, name);
+ if (!b) {
+ rtnl_unlock();
+ NL_SET_ERR_MSG(info->extack, "Bearer not found");
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_TIPC_MEDIA_UDP
+ if (attrs[TIPC_NLA_BEARER_UDP_OPTS]) {
+ err = tipc_udp_nl_bearer_add(b,
+ attrs[TIPC_NLA_BEARER_UDP_OPTS]);
+ if (err) {
+ rtnl_unlock();
+ return err;
+ }
+ }
+#endif
+ rtnl_unlock();
+
+ return 0;
+}
+
+int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct tipc_bearer *b;
+ struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ char *name;
+ int err;
+
+ if (!info->attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy, info->extack);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_BEARER_NAME])
+ return -EINVAL;
+ name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
+
+ b = tipc_bearer_find(net, name);
+ if (!b) {
+ NL_SET_ERR_MSG(info->extack, "Bearer not found");
+ return -EINVAL;
+ }
+
+ if (attrs[TIPC_NLA_BEARER_PROP]) {
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+ err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP],
+ props);
+ if (err)
+ return err;
+
+ if (props[TIPC_NLA_PROP_TOL]) {
+ b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
+ tipc_node_apply_property(net, b, TIPC_NLA_PROP_TOL);
+ }
+ if (props[TIPC_NLA_PROP_PRIO])
+ b->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+ if (props[TIPC_NLA_PROP_WIN])
+ b->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ if (props[TIPC_NLA_PROP_MTU]) {
+ if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
+ NL_SET_ERR_MSG(info->extack,
+ "MTU property is unsupported");
+ return -EINVAL;
+ }
+#ifdef CONFIG_TIPC_MEDIA_UDP
+ if (nla_get_u32(props[TIPC_NLA_PROP_MTU]) <
+ b->encap_hlen + TIPC_MIN_BEARER_MTU) {
+ NL_SET_ERR_MSG(info->extack,
+ "MTU value is out-of-range");
+ return -EINVAL;
+ }
+ b->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
+ tipc_node_apply_property(net, b, TIPC_NLA_PROP_MTU);
+#endif
+ }
+ }
+
+ return 0;
+}
+
+int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_bearer_set(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+
+static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
+ struct tipc_media *media, int nlflags)
+{
+ void *hdr;
+ struct nlattr *attrs;
+ struct nlattr *prop;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ nlflags, TIPC_NL_MEDIA_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MEDIA);
+ if (!attrs)
+ goto msg_full;
+
+ if (nla_put_string(msg->skb, TIPC_NLA_MEDIA_NAME, media->name))
+ goto attr_msg_full;
+
+ prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_MEDIA_PROP);
+ if (!prop)
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, media->priority))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, media->tolerance))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, media->max_win))
+ goto prop_msg_full;
+ if (media->type_id == TIPC_MEDIA_TYPE_UDP)
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_MTU, media->mtu))
+ goto prop_msg_full;
+
+ nla_nest_end(msg->skb, prop);
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+prop_msg_full:
+ nla_nest_cancel(msg->skb, prop);
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int err;
+ int i = cb->args[0];
+ struct tipc_nl_msg msg;
+
+ if (i == MAX_MEDIA)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ rtnl_lock();
+ for (; media_info_array[i] != NULL; i++) {
+ err = __tipc_nl_add_media(&msg, media_info_array[i],
+ NLM_F_MULTI);
+ if (err)
+ break;
+ }
+ rtnl_unlock();
+
+ cb->args[0] = i;
+ return skb->len;
+}
+
+int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *name;
+ struct tipc_nl_msg msg;
+ struct tipc_media *media;
+ struct sk_buff *rep;
+ struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1];
+
+ if (!info->attrs[TIPC_NLA_MEDIA])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MEDIA_MAX,
+ info->attrs[TIPC_NLA_MEDIA],
+ tipc_nl_media_policy, info->extack);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_MEDIA_NAME])
+ return -EINVAL;
+ name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
+
+ rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ msg.skb = rep;
+ msg.portid = info->snd_portid;
+ msg.seq = info->snd_seq;
+
+ rtnl_lock();
+ media = tipc_media_find(name);
+ if (!media) {
+ NL_SET_ERR_MSG(info->extack, "Media not found");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ err = __tipc_nl_add_media(&msg, media, 0);
+ if (err)
+ goto err_out;
+ rtnl_unlock();
+
+ return genlmsg_reply(rep, info);
+err_out:
+ rtnl_unlock();
+ nlmsg_free(rep);
+
+ return err;
+}
+
+int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *name;
+ struct tipc_media *m;
+ struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1];
+
+ if (!info->attrs[TIPC_NLA_MEDIA])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MEDIA_MAX,
+ info->attrs[TIPC_NLA_MEDIA],
+ tipc_nl_media_policy, info->extack);
+
+ if (!attrs[TIPC_NLA_MEDIA_NAME])
+ return -EINVAL;
+ name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
+
+ m = tipc_media_find(name);
+ if (!m) {
+ NL_SET_ERR_MSG(info->extack, "Media not found");
+ return -EINVAL;
+ }
+ if (attrs[TIPC_NLA_MEDIA_PROP]) {
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+ err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP],
+ props);
+ if (err)
+ return err;
+
+ if (props[TIPC_NLA_PROP_TOL])
+ m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
+ if (props[TIPC_NLA_PROP_PRIO])
+ m->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+ if (props[TIPC_NLA_PROP_WIN])
+ m->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ if (props[TIPC_NLA_PROP_MTU]) {
+ if (m->type_id != TIPC_MEDIA_TYPE_UDP) {
+ NL_SET_ERR_MSG(info->extack,
+ "MTU property is unsupported");
+ return -EINVAL;
+ }
+#ifdef CONFIG_TIPC_MEDIA_UDP
+ if (tipc_udp_mtu_bad(nla_get_u32
+ (props[TIPC_NLA_PROP_MTU]))) {
+ NL_SET_ERR_MSG(info->extack,
+ "MTU value is out-of-range");
+ return -EINVAL;
+ }
+ m->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
+#endif
+ }
+ }
+
+ return 0;
+}
+
+int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_media_set(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
new file mode 100644
index 0000000000..41eac1ee0c
--- /dev/null
+++ b/net/tipc/bearer.h
@@ -0,0 +1,266 @@
+/*
+ * net/tipc/bearer.h: Include file for TIPC bearer code
+ *
+ * Copyright (c) 1996-2006, 2013-2016, Ericsson AB
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_BEARER_H
+#define _TIPC_BEARER_H
+
+#include "netlink.h"
+#include "core.h"
+#include "msg.h"
+#include <net/genetlink.h>
+
+#define MAX_MEDIA 3
+
+/* Identifiers associated with TIPC message header media address info
+ * - address info field is 32 bytes long
+ * - the field's actual content and length is defined per media
+ * - remaining unused bytes in the field are set to zero
+ */
+#define TIPC_MEDIA_INFO_SIZE 32
+#define TIPC_MEDIA_TYPE_OFFSET 3
+#define TIPC_MEDIA_ADDR_OFFSET 4
+
+/*
+ * Identifiers of supported TIPC media types
+ */
+#define TIPC_MEDIA_TYPE_ETH 1
+#define TIPC_MEDIA_TYPE_IB 2
+#define TIPC_MEDIA_TYPE_UDP 3
+
+/* Minimum bearer MTU */
+#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE)
+
+/* Identifiers for distinguishing between broadcast/multicast and replicast
+ */
+#define TIPC_BROADCAST_SUPPORT 1
+#define TIPC_REPLICAST_SUPPORT 2
+
+/**
+ * struct tipc_media_addr - destination address used by TIPC bearers
+ * @value: address info (format defined by media)
+ * @media_id: TIPC media type identifier
+ * @broadcast: non-zero if address is a broadcast address
+ */
+struct tipc_media_addr {
+ u8 value[TIPC_MEDIA_INFO_SIZE];
+ u8 media_id;
+ u8 broadcast;
+};
+
+struct tipc_bearer;
+
+/**
+ * struct tipc_media - Media specific info exposed to generic bearer layer
+ * @send_msg: routine which handles buffer transmission
+ * @enable_media: routine which enables a media
+ * @disable_media: routine which disables a media
+ * @addr2str: convert media address format to string
+ * @addr2msg: convert from media addr format to discovery msg addr format
+ * @msg2addr: convert from discovery msg addr format to media addr format
+ * @raw2addr: convert from raw addr format to media addr format
+ * @priority: default link (and bearer) priority
+ * @tolerance: default time (in ms) before declaring link failure
+ * @min_win: minimum window (in packets) before declaring link congestion
+ * @max_win: maximum window (in packets) before declaring link congestion
+ * @mtu: max packet size bearer can support for media type not dependent on
+ * underlying device MTU
+ * @type_id: TIPC media identifier
+ * @hwaddr_len: TIPC media address len
+ * @name: media name
+ */
+struct tipc_media {
+ int (*send_msg)(struct net *net, struct sk_buff *buf,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dest);
+ int (*enable_media)(struct net *net, struct tipc_bearer *b,
+ struct nlattr *attr[]);
+ void (*disable_media)(struct tipc_bearer *b);
+ int (*addr2str)(struct tipc_media_addr *addr,
+ char *strbuf,
+ int bufsz);
+ int (*addr2msg)(char *msg, struct tipc_media_addr *addr);
+ int (*msg2addr)(struct tipc_bearer *b,
+ struct tipc_media_addr *addr,
+ char *msg);
+ int (*raw2addr)(struct tipc_bearer *b,
+ struct tipc_media_addr *addr,
+ const char *raw);
+ u32 priority;
+ u32 tolerance;
+ u32 min_win;
+ u32 max_win;
+ u32 mtu;
+ u32 type_id;
+ u32 hwaddr_len;
+ char name[TIPC_MAX_MEDIA_NAME];
+};
+
+/**
+ * struct tipc_bearer - Generic TIPC bearer structure
+ * @media_ptr: pointer to additional media-specific information about bearer
+ * @mtu: max packet size bearer can support
+ * @addr: media-specific address associated with bearer
+ * @name: bearer name (format = media:interface)
+ * @media: ptr to media structure associated with bearer
+ * @bcast_addr: media address used in broadcasting
+ * @pt: packet type for bearer
+ * @rcu: rcu struct for tipc_bearer
+ * @priority: default link priority for bearer
+ * @min_win: minimum window (in packets) before declaring link congestion
+ * @max_win: maximum window (in packets) before declaring link congestion
+ * @tolerance: default link tolerance for bearer
+ * @domain: network domain to which links can be established
+ * @identity: array index of this bearer within TIPC bearer array
+ * @disc: ptr to link setup request
+ * @net_plane: network plane ('A' through 'H') currently associated with bearer
+ * @encap_hlen: encap headers length
+ * @up: bearer up flag (bit 0)
+ * @refcnt: tipc_bearer reference counter
+ *
+ * Note: media-specific code is responsible for initialization of the fields
+ * indicated below when a bearer is enabled; TIPC's generic bearer code takes
+ * care of initializing all other fields.
+ */
+struct tipc_bearer {
+ void __rcu *media_ptr; /* initialized by media */
+ u32 mtu; /* initialized by media */
+ struct tipc_media_addr addr; /* initialized by media */
+ char name[TIPC_MAX_BEARER_NAME];
+ struct tipc_media *media;
+ struct tipc_media_addr bcast_addr;
+ struct packet_type pt;
+ struct rcu_head rcu;
+ u32 priority;
+ u32 min_win;
+ u32 max_win;
+ u32 tolerance;
+ u32 domain;
+ u32 identity;
+ struct tipc_discoverer *disc;
+ char net_plane;
+ u16 encap_hlen;
+ unsigned long up;
+ refcount_t refcnt;
+};
+
+struct tipc_bearer_names {
+ char media_name[TIPC_MAX_MEDIA_NAME];
+ char if_name[TIPC_MAX_IF_NAME];
+};
+
+/*
+ * TIPC routines available to supported media types
+ */
+
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b);
+
+/*
+ * Routines made available to TIPC by supported media types
+ */
+extern struct tipc_media eth_media_info;
+
+#ifdef CONFIG_TIPC_MEDIA_IB
+extern struct tipc_media ib_media_info;
+#endif
+#ifdef CONFIG_TIPC_MEDIA_UDP
+extern struct tipc_media udp_media_info;
+#endif
+
+int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info);
+
+int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
+
+int tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
+int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
+ struct nlattr *attrs[]);
+bool tipc_bearer_hold(struct tipc_bearer *b);
+void tipc_bearer_put(struct tipc_bearer *b);
+void tipc_disable_l2_media(struct tipc_bearer *b);
+int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
+ struct tipc_bearer *b, struct tipc_media_addr *dest);
+
+void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest);
+void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
+struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
+int tipc_bearer_get_name(struct net *net, char *name, u32 bearer_id);
+struct tipc_media *tipc_media_find(const char *name);
+int tipc_bearer_setup(void);
+void tipc_bearer_cleanup(void);
+void tipc_bearer_stop(struct net *net);
+int tipc_bearer_mtu(struct net *net, u32 bearer_id);
+int tipc_bearer_min_mtu(struct net *net, u32 bearer_id);
+bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id);
+void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
+ struct sk_buff *skb,
+ struct tipc_media_addr *dest);
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+ struct sk_buff_head *xmitq,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode);
+void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
+ struct sk_buff_head *xmitq);
+void tipc_clone_to_loopback(struct net *net, struct sk_buff_head *pkts);
+int tipc_attach_loopback(struct net *net);
+void tipc_detach_loopback(struct net *net);
+
+static inline void tipc_loopback_trace(struct net *net,
+ struct sk_buff_head *pkts)
+{
+ if (unlikely(dev_nit_active(net->loopback_dev)))
+ tipc_clone_to_loopback(net, pkts);
+}
+
+/* check if device MTU is too low for tipc headers */
+static inline bool tipc_mtu_bad(struct net_device *dev)
+{
+ if (dev->mtu >= TIPC_MIN_BEARER_MTU)
+ return false;
+ netdev_warn(dev, "MTU too low for tipc bearer\n");
+ return true;
+}
+
+#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/core.c b/net/tipc/core.c
new file mode 100644
index 0000000000..434e70eabe
--- /dev/null
+++ b/net/tipc/core.c
@@ -0,0 +1,229 @@
+/*
+ * net/tipc/core.c: TIPC module code
+ *
+ * Copyright (c) 2003-2006, 2013, Ericsson AB
+ * Copyright (c) 2005-2006, 2010-2013, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "name_table.h"
+#include "subscr.h"
+#include "bearer.h"
+#include "net.h"
+#include "socket.h"
+#include "bcast.h"
+#include "node.h"
+#include "crypto.h"
+
+#include <linux/module.h>
+
+/* configurable TIPC parameters */
+unsigned int tipc_net_id __read_mostly;
+int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */
+
+static int __net_init tipc_init_net(struct net *net)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ int err;
+
+ tn->net_id = 4711;
+ tn->node_addr = 0;
+ tn->trial_addr = 0;
+ tn->addr_trial_end = 0;
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ INIT_WORK(&tn->work, tipc_net_finalize_work);
+ memset(tn->node_id, 0, sizeof(tn->node_id));
+ memset(tn->node_id_string, 0, sizeof(tn->node_id_string));
+ tn->mon_threshold = TIPC_DEF_MON_THRESHOLD;
+ get_random_bytes(&tn->random, sizeof(int));
+ INIT_LIST_HEAD(&tn->node_list);
+ spin_lock_init(&tn->node_list_lock);
+
+#ifdef CONFIG_TIPC_CRYPTO
+ err = tipc_crypto_start(&tn->crypto_tx, net, NULL);
+ if (err)
+ goto out_crypto;
+#endif
+ err = tipc_sk_rht_init(net);
+ if (err)
+ goto out_sk_rht;
+
+ err = tipc_nametbl_init(net);
+ if (err)
+ goto out_nametbl;
+
+ err = tipc_bcast_init(net);
+ if (err)
+ goto out_bclink;
+
+ err = tipc_attach_loopback(net);
+ if (err)
+ goto out_bclink;
+
+ return 0;
+
+out_bclink:
+ tipc_nametbl_stop(net);
+out_nametbl:
+ tipc_sk_rht_destroy(net);
+out_sk_rht:
+
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_stop(&tn->crypto_tx);
+out_crypto:
+#endif
+ return err;
+}
+
+static void __net_exit tipc_exit_net(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+
+ tipc_detach_loopback(net);
+ tipc_net_stop(net);
+ /* Make sure the tipc_net_finalize_work() finished */
+ cancel_work_sync(&tn->work);
+ tipc_bcast_stop(net);
+ tipc_nametbl_stop(net);
+ tipc_sk_rht_destroy(net);
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_stop(&tipc_net(net)->crypto_tx);
+#endif
+ while (atomic_read(&tn->wq_count))
+ cond_resched();
+}
+
+static void __net_exit tipc_pernet_pre_exit(struct net *net)
+{
+ tipc_node_pre_cleanup_net(net);
+}
+
+static struct pernet_operations tipc_pernet_pre_exit_ops = {
+ .pre_exit = tipc_pernet_pre_exit,
+};
+
+static struct pernet_operations tipc_net_ops = {
+ .init = tipc_init_net,
+ .exit = tipc_exit_net,
+ .id = &tipc_net_id,
+ .size = sizeof(struct tipc_net),
+};
+
+static struct pernet_operations tipc_topsrv_net_ops = {
+ .init = tipc_topsrv_init_net,
+ .exit = tipc_topsrv_exit_net,
+};
+
+static int __init tipc_init(void)
+{
+ int err;
+
+ pr_info("Activated (version " TIPC_MOD_VER ")\n");
+
+ sysctl_tipc_rmem[0] = RCVBUF_MIN;
+ sysctl_tipc_rmem[1] = RCVBUF_DEF;
+ sysctl_tipc_rmem[2] = RCVBUF_MAX;
+
+ err = tipc_register_sysctl();
+ if (err)
+ goto out_sysctl;
+
+ err = register_pernet_device(&tipc_net_ops);
+ if (err)
+ goto out_pernet;
+
+ err = tipc_socket_init();
+ if (err)
+ goto out_socket;
+
+ err = register_pernet_device(&tipc_topsrv_net_ops);
+ if (err)
+ goto out_pernet_topsrv;
+
+ err = register_pernet_subsys(&tipc_pernet_pre_exit_ops);
+ if (err)
+ goto out_register_pernet_subsys;
+
+ err = tipc_bearer_setup();
+ if (err)
+ goto out_bearer;
+
+ err = tipc_netlink_start();
+ if (err)
+ goto out_netlink;
+
+ err = tipc_netlink_compat_start();
+ if (err)
+ goto out_netlink_compat;
+
+ pr_info("Started in single node mode\n");
+ return 0;
+
+out_netlink_compat:
+ tipc_netlink_stop();
+out_netlink:
+ tipc_bearer_cleanup();
+out_bearer:
+ unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
+out_register_pernet_subsys:
+ unregister_pernet_device(&tipc_topsrv_net_ops);
+out_pernet_topsrv:
+ tipc_socket_stop();
+out_socket:
+ unregister_pernet_device(&tipc_net_ops);
+out_pernet:
+ tipc_unregister_sysctl();
+out_sysctl:
+ pr_err("Unable to start in single node mode\n");
+ return err;
+}
+
+static void __exit tipc_exit(void)
+{
+ tipc_netlink_compat_stop();
+ tipc_netlink_stop();
+ tipc_bearer_cleanup();
+ unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
+ unregister_pernet_device(&tipc_topsrv_net_ops);
+ tipc_socket_stop();
+ unregister_pernet_device(&tipc_net_ops);
+ tipc_unregister_sysctl();
+
+ pr_info("Deactivated\n");
+}
+
+module_init(tipc_init);
+module_exit(tipc_exit);
+
+MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(TIPC_MOD_VER);
diff --git a/net/tipc/core.h b/net/tipc/core.h
new file mode 100644
index 0000000000..7eccd97e06
--- /dev/null
+++ b/net/tipc/core.h
@@ -0,0 +1,228 @@
+/*
+ * net/tipc/core.h: Include file for TIPC global declarations
+ *
+ * Copyright (c) 2005-2006, 2013-2018 Ericsson AB
+ * Copyright (c) 2005-2007, 2010-2013, Wind River Systems
+ * Copyright (c) 2020, Red Hat Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_CORE_H
+#define _TIPC_CORE_H
+
+#include <linux/tipc.h>
+#include <linux/tipc_config.h>
+#include <linux/tipc_netlink.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <net/netns/generic.h>
+#include <linux/rhashtable.h>
+#include <net/genetlink.h>
+#include <net/netns/hash.h>
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+struct tipc_node;
+struct tipc_bearer;
+struct tipc_bc_base;
+struct tipc_link;
+struct tipc_name_table;
+struct tipc_topsrv;
+struct tipc_monitor;
+#ifdef CONFIG_TIPC_CRYPTO
+struct tipc_crypto;
+#endif
+
+#define TIPC_MOD_VER "2.0.0"
+
+#define NODE_HTABLE_SIZE 512
+#define MAX_BEARERS 3
+#define TIPC_DEF_MON_THRESHOLD 32
+#define NODE_ID_LEN 16
+#define NODE_ID_STR_LEN (NODE_ID_LEN * 2 + 1)
+
+extern unsigned int tipc_net_id __read_mostly;
+extern int sysctl_tipc_rmem[3] __read_mostly;
+extern int sysctl_tipc_named_timeout __read_mostly;
+
+struct tipc_net {
+ u8 node_id[NODE_ID_LEN];
+ u32 node_addr;
+ u32 trial_addr;
+ unsigned long addr_trial_end;
+ char node_id_string[NODE_ID_STR_LEN];
+ int net_id;
+ int random;
+ bool legacy_addr_format;
+
+ /* Node table and node list */
+ spinlock_t node_list_lock;
+ struct hlist_head node_htable[NODE_HTABLE_SIZE];
+ struct list_head node_list;
+ u32 num_nodes;
+ u32 num_links;
+
+ /* Neighbor monitoring list */
+ struct tipc_monitor *monitors[MAX_BEARERS];
+ int mon_threshold;
+
+ /* Bearer list */
+ struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
+
+ /* Broadcast link */
+ spinlock_t bclock;
+ struct tipc_bc_base *bcbase;
+ struct tipc_link *bcl;
+
+ /* Socket hash table */
+ struct rhashtable sk_rht;
+
+ /* Name table */
+ spinlock_t nametbl_lock;
+ struct name_table *nametbl;
+
+ /* Topology subscription server */
+ struct tipc_topsrv *topsrv;
+ atomic_t subscription_count;
+
+ /* Cluster capabilities */
+ u16 capabilities;
+
+ /* Tracing of node internal messages */
+ struct packet_type loopback_pt;
+
+#ifdef CONFIG_TIPC_CRYPTO
+ /* TX crypto handler */
+ struct tipc_crypto *crypto_tx;
+#endif
+ /* Work item for net finalize */
+ struct work_struct work;
+ /* The numbers of work queues in schedule */
+ atomic_t wq_count;
+};
+
+static inline struct tipc_net *tipc_net(struct net *net)
+{
+ return net_generic(net, tipc_net_id);
+}
+
+static inline int tipc_netid(struct net *net)
+{
+ return tipc_net(net)->net_id;
+}
+
+static inline struct list_head *tipc_nodes(struct net *net)
+{
+ return &tipc_net(net)->node_list;
+}
+
+static inline struct name_table *tipc_name_table(struct net *net)
+{
+ return tipc_net(net)->nametbl;
+}
+
+static inline struct tipc_topsrv *tipc_topsrv(struct net *net)
+{
+ return tipc_net(net)->topsrv;
+}
+
+static inline unsigned int tipc_hashfn(u32 addr)
+{
+ return addr & (NODE_HTABLE_SIZE - 1);
+}
+
+static inline u16 mod(u16 x)
+{
+ return x & 0xffffu;
+}
+
+static inline int less_eq(u16 left, u16 right)
+{
+ return mod(right - left) < 32768u;
+}
+
+static inline int more(u16 left, u16 right)
+{
+ return !less_eq(left, right);
+}
+
+static inline int less(u16 left, u16 right)
+{
+ return less_eq(left, right) && (mod(right) != mod(left));
+}
+
+static inline int tipc_in_range(u16 val, u16 min, u16 max)
+{
+ return !less(val, min) && !more(val, max);
+}
+
+static inline u32 tipc_net_hash_mixes(struct net *net, int tn_rand)
+{
+ return net_hash_mix(&init_net) ^ net_hash_mix(net) ^ tn_rand;
+}
+
+static inline u32 hash128to32(char *bytes)
+{
+ __be32 *tmp = (__be32 *)bytes;
+ u32 res;
+
+ res = ntohl(tmp[0] ^ tmp[1] ^ tmp[2] ^ tmp[3]);
+ if (likely(res))
+ return res;
+ return ntohl(tmp[0] | tmp[1] | tmp[2] | tmp[3]);
+}
+
+#ifdef CONFIG_SYSCTL
+int tipc_register_sysctl(void);
+void tipc_unregister_sysctl(void);
+#else
+#define tipc_register_sysctl() 0
+#define tipc_unregister_sysctl()
+#endif
+#endif
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
new file mode 100644
index 0000000000..43c3f1c971
--- /dev/null
+++ b/net/tipc/crypto.c
@@ -0,0 +1,2475 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * net/tipc/crypto.c: TIPC crypto for key handling & packet en/decryption
+ *
+ * Copyright (c) 2019, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/rng.h>
+#include "crypto.h"
+#include "msg.h"
+#include "bcast.h"
+
+#define TIPC_TX_GRACE_PERIOD msecs_to_jiffies(5000) /* 5s */
+#define TIPC_TX_LASTING_TIME msecs_to_jiffies(10000) /* 10s */
+#define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000) /* 3s */
+#define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(15000) /* 15s */
+
+#define TIPC_MAX_TFMS_DEF 10
+#define TIPC_MAX_TFMS_LIM 1000
+
+#define TIPC_REKEYING_INTV_DEF (60 * 24) /* default: 1 day */
+
+/*
+ * TIPC Key ids
+ */
+enum {
+ KEY_MASTER = 0,
+ KEY_MIN = KEY_MASTER,
+ KEY_1 = 1,
+ KEY_2,
+ KEY_3,
+ KEY_MAX = KEY_3,
+};
+
+/*
+ * TIPC Crypto statistics
+ */
+enum {
+ STAT_OK,
+ STAT_NOK,
+ STAT_ASYNC,
+ STAT_ASYNC_OK,
+ STAT_ASYNC_NOK,
+ STAT_BADKEYS, /* tx only */
+ STAT_BADMSGS = STAT_BADKEYS, /* rx only */
+ STAT_NOKEYS,
+ STAT_SWITCHES,
+
+ MAX_STATS,
+};
+
+/* TIPC crypto statistics' header */
+static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok",
+ "async_nok", "badmsgs", "nokeys",
+ "switches"};
+
+/* Max TFMs number per key */
+int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF;
+/* Key exchange switch, default: on */
+int sysctl_tipc_key_exchange_enabled __read_mostly = 1;
+
+/*
+ * struct tipc_key - TIPC keys' status indicator
+ *
+ * 7 6 5 4 3 2 1 0
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * key: | (reserved)|passive idx| active idx|pending idx|
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ */
+struct tipc_key {
+#define KEY_BITS (2)
+#define KEY_MASK ((1 << KEY_BITS) - 1)
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 pending:2,
+ active:2,
+ passive:2, /* rx only */
+ reserved:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:2,
+ passive:2, /* rx only */
+ active:2,
+ pending:2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ } __packed;
+ u8 keys;
+ };
+};
+
+/**
+ * struct tipc_tfm - TIPC TFM structure to form a list of TFMs
+ * @tfm: cipher handle/key
+ * @list: linked list of TFMs
+ */
+struct tipc_tfm {
+ struct crypto_aead *tfm;
+ struct list_head list;
+};
+
+/**
+ * struct tipc_aead - TIPC AEAD key structure
+ * @tfm_entry: per-cpu pointer to one entry in TFM list
+ * @crypto: TIPC crypto owns this key
+ * @cloned: reference to the source key in case cloning
+ * @users: the number of the key users (TX/RX)
+ * @salt: the key's SALT value
+ * @authsize: authentication tag size (max = 16)
+ * @mode: crypto mode is applied to the key
+ * @hint: a hint for user key
+ * @rcu: struct rcu_head
+ * @key: the aead key
+ * @gen: the key's generation
+ * @seqno: the key seqno (cluster scope)
+ * @refcnt: the key reference counter
+ */
+struct tipc_aead {
+#define TIPC_AEAD_HINT_LEN (5)
+ struct tipc_tfm * __percpu *tfm_entry;
+ struct tipc_crypto *crypto;
+ struct tipc_aead *cloned;
+ atomic_t users;
+ u32 salt;
+ u8 authsize;
+ u8 mode;
+ char hint[2 * TIPC_AEAD_HINT_LEN + 1];
+ struct rcu_head rcu;
+ struct tipc_aead_key *key;
+ u16 gen;
+
+ atomic64_t seqno ____cacheline_aligned;
+ refcount_t refcnt ____cacheline_aligned;
+
+} ____cacheline_aligned;
+
+/**
+ * struct tipc_crypto_stats - TIPC Crypto statistics
+ * @stat: array of crypto statistics
+ */
+struct tipc_crypto_stats {
+ unsigned int stat[MAX_STATS];
+};
+
+/**
+ * struct tipc_crypto - TIPC TX/RX crypto structure
+ * @net: struct net
+ * @node: TIPC node (RX)
+ * @aead: array of pointers to AEAD keys for encryption/decryption
+ * @peer_rx_active: replicated peer RX active key index
+ * @key_gen: TX/RX key generation
+ * @key: the key states
+ * @skey_mode: session key's mode
+ * @skey: received session key
+ * @wq: common workqueue on TX crypto
+ * @work: delayed work sched for TX/RX
+ * @key_distr: key distributing state
+ * @rekeying_intv: rekeying interval (in minutes)
+ * @stats: the crypto statistics
+ * @name: the crypto name
+ * @sndnxt: the per-peer sndnxt (TX)
+ * @timer1: general timer 1 (jiffies)
+ * @timer2: general timer 2 (jiffies)
+ * @working: the crypto is working or not
+ * @key_master: flag indicates if master key exists
+ * @legacy_user: flag indicates if a peer joins w/o master key (for bwd comp.)
+ * @nokey: no key indication
+ * @flags: combined flags field
+ * @lock: tipc_key lock
+ */
+struct tipc_crypto {
+ struct net *net;
+ struct tipc_node *node;
+ struct tipc_aead __rcu *aead[KEY_MAX + 1];
+ atomic_t peer_rx_active;
+ u16 key_gen;
+ struct tipc_key key;
+ u8 skey_mode;
+ struct tipc_aead_key *skey;
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+#define KEY_DISTR_SCHED 1
+#define KEY_DISTR_COMPL 2
+ atomic_t key_distr;
+ u32 rekeying_intv;
+
+ struct tipc_crypto_stats __percpu *stats;
+ char name[48];
+
+ atomic64_t sndnxt ____cacheline_aligned;
+ unsigned long timer1;
+ unsigned long timer2;
+ union {
+ struct {
+ u8 working:1;
+ u8 key_master:1;
+ u8 legacy_user:1;
+ u8 nokey: 1;
+ };
+ u8 flags;
+ };
+ spinlock_t lock; /* crypto lock */
+
+} ____cacheline_aligned;
+
+/* struct tipc_crypto_tx_ctx - TX context for callbacks */
+struct tipc_crypto_tx_ctx {
+ struct tipc_aead *aead;
+ struct tipc_bearer *bearer;
+ struct tipc_media_addr dst;
+};
+
+/* struct tipc_crypto_rx_ctx - RX context for callbacks */
+struct tipc_crypto_rx_ctx {
+ struct tipc_aead *aead;
+ struct tipc_bearer *bearer;
+};
+
+static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead);
+static inline void tipc_aead_put(struct tipc_aead *aead);
+static void tipc_aead_free(struct rcu_head *rp);
+static int tipc_aead_users(struct tipc_aead __rcu *aead);
+static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim);
+static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim);
+static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val);
+static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead);
+static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
+ u8 mode);
+static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src);
+static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
+ unsigned int crypto_ctx_size,
+ u8 **iv, struct aead_request **req,
+ struct scatterlist **sg, int nsg);
+static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode);
+static void tipc_aead_encrypt_done(void *data, int err);
+static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
+ struct sk_buff *skb, struct tipc_bearer *b);
+static void tipc_aead_decrypt_done(void *data, int err);
+static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr);
+static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
+ u8 tx_key, struct sk_buff *skb,
+ struct tipc_crypto *__rx);
+static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
+ u8 new_passive,
+ u8 new_active,
+ u8 new_pending);
+static int tipc_crypto_key_attach(struct tipc_crypto *c,
+ struct tipc_aead *aead, u8 pos,
+ bool master_key);
+static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending);
+static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
+ struct tipc_crypto *rx,
+ struct sk_buff *skb,
+ u8 tx_key);
+static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb);
+static int tipc_crypto_key_revoke(struct net *net, u8 tx_key);
+static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode, u8 type);
+static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
+ struct tipc_bearer *b,
+ struct sk_buff **skb, int err);
+static void tipc_crypto_do_cmd(struct net *net, int cmd);
+static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf);
+static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
+ char *buf);
+static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
+ u16 gen, u8 mode, u32 dnode);
+static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr);
+static void tipc_crypto_work_tx(struct work_struct *work);
+static void tipc_crypto_work_rx(struct work_struct *work);
+static int tipc_aead_key_generate(struct tipc_aead_key *skey);
+
+#define is_tx(crypto) (!(crypto)->node)
+#define is_rx(crypto) (!is_tx(crypto))
+
+#define key_next(cur) ((cur) % KEY_MAX + 1)
+
+#define tipc_aead_rcu_ptr(rcu_ptr, lock) \
+ rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock))
+
+#define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \
+do { \
+ struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr), \
+ lockdep_is_held(lock)); \
+ rcu_assign_pointer((rcu_ptr), (ptr)); \
+ tipc_aead_put(__tmp); \
+} while (0)
+
+#define tipc_crypto_key_detach(rcu_ptr, lock) \
+ tipc_aead_rcu_replace((rcu_ptr), NULL, lock)
+
+/**
+ * tipc_aead_key_validate - Validate a AEAD user key
+ * @ukey: pointer to user key data
+ * @info: netlink info pointer
+ */
+int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info)
+{
+ int keylen;
+
+ /* Check if algorithm exists */
+ if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) {
+ GENL_SET_ERR_MSG(info, "unable to load the algorithm (module existed?)");
+ return -ENODEV;
+ }
+
+ /* Currently, we only support the "gcm(aes)" cipher algorithm */
+ if (strcmp(ukey->alg_name, "gcm(aes)")) {
+ GENL_SET_ERR_MSG(info, "not supported yet the algorithm");
+ return -ENOTSUPP;
+ }
+
+ /* Check if key size is correct */
+ keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
+ if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 &&
+ keylen != TIPC_AES_GCM_KEY_SIZE_192 &&
+ keylen != TIPC_AES_GCM_KEY_SIZE_256)) {
+ GENL_SET_ERR_MSG(info, "incorrect key length (20, 28 or 36 octets?)");
+ return -EKEYREJECTED;
+ }
+
+ return 0;
+}
+
+/**
+ * tipc_aead_key_generate - Generate new session key
+ * @skey: input/output key with new content
+ *
+ * Return: 0 in case of success, otherwise < 0
+ */
+static int tipc_aead_key_generate(struct tipc_aead_key *skey)
+{
+ int rc = 0;
+
+ /* Fill the key's content with a random value via RNG cipher */
+ rc = crypto_get_default_rng();
+ if (likely(!rc)) {
+ rc = crypto_rng_get_bytes(crypto_default_rng, skey->key,
+ skey->keylen);
+ crypto_put_default_rng();
+ }
+
+ return rc;
+}
+
+static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead)
+{
+ struct tipc_aead *tmp;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt)))
+ tmp = NULL;
+ rcu_read_unlock();
+
+ return tmp;
+}
+
+static inline void tipc_aead_put(struct tipc_aead *aead)
+{
+ if (aead && refcount_dec_and_test(&aead->refcnt))
+ call_rcu(&aead->rcu, tipc_aead_free);
+}
+
+/**
+ * tipc_aead_free - Release AEAD key incl. all the TFMs in the list
+ * @rp: rcu head pointer
+ */
+static void tipc_aead_free(struct rcu_head *rp)
+{
+ struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu);
+ struct tipc_tfm *tfm_entry, *head, *tmp;
+
+ if (aead->cloned) {
+ tipc_aead_put(aead->cloned);
+ } else {
+ head = *get_cpu_ptr(aead->tfm_entry);
+ put_cpu_ptr(aead->tfm_entry);
+ list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) {
+ crypto_free_aead(tfm_entry->tfm);
+ list_del(&tfm_entry->list);
+ kfree(tfm_entry);
+ }
+ /* Free the head */
+ crypto_free_aead(head->tfm);
+ list_del(&head->list);
+ kfree(head);
+ }
+ free_percpu(aead->tfm_entry);
+ kfree_sensitive(aead->key);
+ kfree(aead);
+}
+
+static int tipc_aead_users(struct tipc_aead __rcu *aead)
+{
+ struct tipc_aead *tmp;
+ int users = 0;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp)
+ users = atomic_read(&tmp->users);
+ rcu_read_unlock();
+
+ return users;
+}
+
+static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim)
+{
+ struct tipc_aead *tmp;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp)
+ atomic_add_unless(&tmp->users, 1, lim);
+ rcu_read_unlock();
+}
+
+static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim)
+{
+ struct tipc_aead *tmp;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp)
+ atomic_add_unless(&rcu_dereference(aead)->users, -1, lim);
+ rcu_read_unlock();
+}
+
+static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val)
+{
+ struct tipc_aead *tmp;
+ int cur;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp) {
+ do {
+ cur = atomic_read(&tmp->users);
+ if (cur == val)
+ break;
+ } while (atomic_cmpxchg(&tmp->users, cur, val) != cur);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it
+ * @aead: the AEAD key pointer
+ */
+static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead)
+{
+ struct tipc_tfm **tfm_entry;
+ struct crypto_aead *tfm;
+
+ tfm_entry = get_cpu_ptr(aead->tfm_entry);
+ *tfm_entry = list_next_entry(*tfm_entry, list);
+ tfm = (*tfm_entry)->tfm;
+ put_cpu_ptr(tfm_entry);
+
+ return tfm;
+}
+
+/**
+ * tipc_aead_init - Initiate TIPC AEAD
+ * @aead: returned new TIPC AEAD key handle pointer
+ * @ukey: pointer to user key data
+ * @mode: the key mode
+ *
+ * Allocate a (list of) new cipher transformation (TFM) with the specific user
+ * key data if valid. The number of the allocated TFMs can be set via the sysfs
+ * "net/tipc/max_tfms" first.
+ * Also, all the other AEAD data are also initialized.
+ *
+ * Return: 0 if the initiation is successful, otherwise: < 0
+ */
+static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
+ u8 mode)
+{
+ struct tipc_tfm *tfm_entry, *head;
+ struct crypto_aead *tfm;
+ struct tipc_aead *tmp;
+ int keylen, err, cpu;
+ int tfm_cnt = 0;
+
+ if (unlikely(*aead))
+ return -EEXIST;
+
+ /* Allocate a new AEAD */
+ tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
+ if (unlikely(!tmp))
+ return -ENOMEM;
+
+ /* The key consists of two parts: [AES-KEY][SALT] */
+ keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
+
+ /* Allocate per-cpu TFM entry pointer */
+ tmp->tfm_entry = alloc_percpu(struct tipc_tfm *);
+ if (!tmp->tfm_entry) {
+ kfree_sensitive(tmp);
+ return -ENOMEM;
+ }
+
+ /* Make a list of TFMs with the user key data */
+ do {
+ tfm = crypto_alloc_aead(ukey->alg_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ err = PTR_ERR(tfm);
+ break;
+ }
+
+ if (unlikely(!tfm_cnt &&
+ crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) {
+ crypto_free_aead(tfm);
+ err = -ENOTSUPP;
+ break;
+ }
+
+ err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE);
+ err |= crypto_aead_setkey(tfm, ukey->key, keylen);
+ if (unlikely(err)) {
+ crypto_free_aead(tfm);
+ break;
+ }
+
+ tfm_entry = kmalloc(sizeof(*tfm_entry), GFP_KERNEL);
+ if (unlikely(!tfm_entry)) {
+ crypto_free_aead(tfm);
+ err = -ENOMEM;
+ break;
+ }
+ INIT_LIST_HEAD(&tfm_entry->list);
+ tfm_entry->tfm = tfm;
+
+ /* First entry? */
+ if (!tfm_cnt) {
+ head = tfm_entry;
+ for_each_possible_cpu(cpu) {
+ *per_cpu_ptr(tmp->tfm_entry, cpu) = head;
+ }
+ } else {
+ list_add_tail(&tfm_entry->list, &head->list);
+ }
+
+ } while (++tfm_cnt < sysctl_tipc_max_tfms);
+
+ /* Not any TFM is allocated? */
+ if (!tfm_cnt) {
+ free_percpu(tmp->tfm_entry);
+ kfree_sensitive(tmp);
+ return err;
+ }
+
+ /* Form a hex string of some last bytes as the key's hint */
+ bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN,
+ TIPC_AEAD_HINT_LEN);
+
+ /* Initialize the other data */
+ tmp->mode = mode;
+ tmp->cloned = NULL;
+ tmp->authsize = TIPC_AES_GCM_TAG_SIZE;
+ tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL);
+ if (!tmp->key) {
+ tipc_aead_free(&tmp->rcu);
+ return -ENOMEM;
+ }
+ memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE);
+ atomic_set(&tmp->users, 0);
+ atomic64_set(&tmp->seqno, 0);
+ refcount_set(&tmp->refcnt, 1);
+
+ *aead = tmp;
+ return 0;
+}
+
+/**
+ * tipc_aead_clone - Clone a TIPC AEAD key
+ * @dst: dest key for the cloning
+ * @src: source key to clone from
+ *
+ * Make a "copy" of the source AEAD key data to the dest, the TFMs list is
+ * common for the keys.
+ * A reference to the source is hold in the "cloned" pointer for the later
+ * freeing purposes.
+ *
+ * Note: this must be done in cluster-key mode only!
+ * Return: 0 in case of success, otherwise < 0
+ */
+static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src)
+{
+ struct tipc_aead *aead;
+ int cpu;
+
+ if (!src)
+ return -ENOKEY;
+
+ if (src->mode != CLUSTER_KEY)
+ return -EINVAL;
+
+ if (unlikely(*dst))
+ return -EEXIST;
+
+ aead = kzalloc(sizeof(*aead), GFP_ATOMIC);
+ if (unlikely(!aead))
+ return -ENOMEM;
+
+ aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC);
+ if (unlikely(!aead->tfm_entry)) {
+ kfree_sensitive(aead);
+ return -ENOMEM;
+ }
+
+ for_each_possible_cpu(cpu) {
+ *per_cpu_ptr(aead->tfm_entry, cpu) =
+ *per_cpu_ptr(src->tfm_entry, cpu);
+ }
+
+ memcpy(aead->hint, src->hint, sizeof(src->hint));
+ aead->mode = src->mode;
+ aead->salt = src->salt;
+ aead->authsize = src->authsize;
+ atomic_set(&aead->users, 0);
+ atomic64_set(&aead->seqno, 0);
+ refcount_set(&aead->refcnt, 1);
+
+ WARN_ON(!refcount_inc_not_zero(&src->refcnt));
+ aead->cloned = src;
+
+ *dst = aead;
+ return 0;
+}
+
+/**
+ * tipc_aead_mem_alloc - Allocate memory for AEAD request operations
+ * @tfm: cipher handle to be registered with the request
+ * @crypto_ctx_size: size of crypto context for callback
+ * @iv: returned pointer to IV data
+ * @req: returned pointer to AEAD request data
+ * @sg: returned pointer to SG lists
+ * @nsg: number of SG lists to be allocated
+ *
+ * Allocate memory to store the crypto context data, AEAD request, IV and SG
+ * lists, the memory layout is as follows:
+ * crypto_ctx || iv || aead_req || sg[]
+ *
+ * Return: the pointer to the memory areas in case of success, otherwise NULL
+ */
+static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
+ unsigned int crypto_ctx_size,
+ u8 **iv, struct aead_request **req,
+ struct scatterlist **sg, int nsg)
+{
+ unsigned int iv_size, req_size;
+ unsigned int len;
+ u8 *mem;
+
+ iv_size = crypto_aead_ivsize(tfm);
+ req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
+
+ len = crypto_ctx_size;
+ len += iv_size;
+ len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
+ len = ALIGN(len, crypto_tfm_ctx_alignment());
+ len += req_size;
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += nsg * sizeof(**sg);
+
+ mem = kmalloc(len, GFP_ATOMIC);
+ if (!mem)
+ return NULL;
+
+ *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size,
+ crypto_aead_alignmask(tfm) + 1);
+ *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
+ crypto_tfm_ctx_alignment());
+ *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
+ __alignof__(struct scatterlist));
+
+ return (void *)mem;
+}
+
+/**
+ * tipc_aead_encrypt - Encrypt a message
+ * @aead: TIPC AEAD key for the message encryption
+ * @skb: the input/output skb
+ * @b: TIPC bearer where the message will be delivered after the encryption
+ * @dst: the destination media address
+ * @__dnode: TIPC dest node if "known"
+ *
+ * Return:
+ * * 0 : if the encryption has completed
+ * * -EINPROGRESS/-EBUSY : if a callback will be performed
+ * * < 0 : the encryption has failed
+ */
+static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode)
+{
+ struct crypto_aead *tfm = tipc_aead_tfm_next(aead);
+ struct tipc_crypto_tx_ctx *tx_ctx;
+ struct aead_request *req;
+ struct sk_buff *trailer;
+ struct scatterlist *sg;
+ struct tipc_ehdr *ehdr;
+ int ehsz, len, tailen, nsg, rc;
+ void *ctx;
+ u32 salt;
+ u8 *iv;
+
+ /* Make sure message len at least 4-byte aligned */
+ len = ALIGN(skb->len, 4);
+ tailen = len - skb->len + aead->authsize;
+
+ /* Expand skb tail for authentication tag:
+ * As for simplicity, we'd have made sure skb having enough tailroom
+ * for authentication tag @skb allocation. Even when skb is nonlinear
+ * but there is no frag_list, it should be still fine!
+ * Otherwise, we must cow it to be a writable buffer with the tailroom.
+ */
+ SKB_LINEAR_ASSERT(skb);
+ if (tailen > skb_tailroom(skb)) {
+ pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n",
+ skb_tailroom(skb), tailen);
+ }
+
+ nsg = skb_cow_data(skb, tailen, &trailer);
+ if (unlikely(nsg < 0)) {
+ pr_err("TX: skb_cow_data() returned %d\n", nsg);
+ return nsg;
+ }
+
+ pskb_put(skb, trailer, tailen);
+
+ /* Allocate memory for the AEAD operation */
+ ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg);
+ if (unlikely(!ctx))
+ return -ENOMEM;
+ TIPC_SKB_CB(skb)->crypto_ctx = ctx;
+
+ /* Map skb to the sg lists */
+ sg_init_table(sg, nsg);
+ rc = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(rc < 0)) {
+ pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg);
+ goto exit;
+ }
+
+ /* Prepare IV: [SALT (4 octets)][SEQNO (8 octets)]
+ * In case we're in cluster-key mode, SALT is varied by xor-ing with
+ * the source address (or w0 of id), otherwise with the dest address
+ * if dest is known.
+ */
+ ehdr = (struct tipc_ehdr *)skb->data;
+ salt = aead->salt;
+ if (aead->mode == CLUSTER_KEY)
+ salt ^= __be32_to_cpu(ehdr->addr);
+ else if (__dnode)
+ salt ^= tipc_node_get_addr(__dnode);
+ memcpy(iv, &salt, 4);
+ memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
+
+ /* Prepare request */
+ ehsz = tipc_ehdr_size(ehdr);
+ aead_request_set_tfm(req, tfm);
+ aead_request_set_ad(req, ehsz);
+ aead_request_set_crypt(req, sg, sg, len - ehsz, iv);
+
+ /* Set callback function & data */
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tipc_aead_encrypt_done, skb);
+ tx_ctx = (struct tipc_crypto_tx_ctx *)ctx;
+ tx_ctx->aead = aead;
+ tx_ctx->bearer = b;
+ memcpy(&tx_ctx->dst, dst, sizeof(*dst));
+
+ /* Hold bearer */
+ if (unlikely(!tipc_bearer_hold(b))) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ /* Now, do encrypt */
+ rc = crypto_aead_encrypt(req);
+ if (rc == -EINPROGRESS || rc == -EBUSY)
+ return rc;
+
+ tipc_bearer_put(b);
+
+exit:
+ kfree(ctx);
+ TIPC_SKB_CB(skb)->crypto_ctx = NULL;
+ return rc;
+}
+
+static void tipc_aead_encrypt_done(void *data, int err)
+{
+ struct sk_buff *skb = data;
+ struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
+ struct tipc_bearer *b = tx_ctx->bearer;
+ struct tipc_aead *aead = tx_ctx->aead;
+ struct tipc_crypto *tx = aead->crypto;
+ struct net *net = tx->net;
+
+ switch (err) {
+ case 0:
+ this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]);
+ rcu_read_lock();
+ if (likely(test_bit(0, &b->up)))
+ b->media->send_msg(net, skb, b, &tx_ctx->dst);
+ else
+ kfree_skb(skb);
+ rcu_read_unlock();
+ break;
+ case -EINPROGRESS:
+ return;
+ default:
+ this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]);
+ kfree_skb(skb);
+ break;
+ }
+
+ kfree(tx_ctx);
+ tipc_bearer_put(b);
+ tipc_aead_put(aead);
+}
+
+/**
+ * tipc_aead_decrypt - Decrypt an encrypted message
+ * @net: struct net
+ * @aead: TIPC AEAD for the message decryption
+ * @skb: the input/output skb
+ * @b: TIPC bearer where the message has been received
+ *
+ * Return:
+ * * 0 : if the decryption has completed
+ * * -EINPROGRESS/-EBUSY : if a callback will be performed
+ * * < 0 : the decryption has failed
+ */
+static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
+ struct sk_buff *skb, struct tipc_bearer *b)
+{
+ struct tipc_crypto_rx_ctx *rx_ctx;
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+ struct sk_buff *unused;
+ struct scatterlist *sg;
+ struct tipc_ehdr *ehdr;
+ int ehsz, nsg, rc;
+ void *ctx;
+ u32 salt;
+ u8 *iv;
+
+ if (unlikely(!aead))
+ return -ENOKEY;
+
+ nsg = skb_cow_data(skb, 0, &unused);
+ if (unlikely(nsg < 0)) {
+ pr_err("RX: skb_cow_data() returned %d\n", nsg);
+ return nsg;
+ }
+
+ /* Allocate memory for the AEAD operation */
+ tfm = tipc_aead_tfm_next(aead);
+ ctx = tipc_aead_mem_alloc(tfm, sizeof(*rx_ctx), &iv, &req, &sg, nsg);
+ if (unlikely(!ctx))
+ return -ENOMEM;
+ TIPC_SKB_CB(skb)->crypto_ctx = ctx;
+
+ /* Map skb to the sg lists */
+ sg_init_table(sg, nsg);
+ rc = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(rc < 0)) {
+ pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg);
+ goto exit;
+ }
+
+ /* Reconstruct IV: */
+ ehdr = (struct tipc_ehdr *)skb->data;
+ salt = aead->salt;
+ if (aead->mode == CLUSTER_KEY)
+ salt ^= __be32_to_cpu(ehdr->addr);
+ else if (ehdr->destined)
+ salt ^= tipc_own_addr(net);
+ memcpy(iv, &salt, 4);
+ memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
+
+ /* Prepare request */
+ ehsz = tipc_ehdr_size(ehdr);
+ aead_request_set_tfm(req, tfm);
+ aead_request_set_ad(req, ehsz);
+ aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv);
+
+ /* Set callback function & data */
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tipc_aead_decrypt_done, skb);
+ rx_ctx = (struct tipc_crypto_rx_ctx *)ctx;
+ rx_ctx->aead = aead;
+ rx_ctx->bearer = b;
+
+ /* Hold bearer */
+ if (unlikely(!tipc_bearer_hold(b))) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ /* Now, do decrypt */
+ rc = crypto_aead_decrypt(req);
+ if (rc == -EINPROGRESS || rc == -EBUSY)
+ return rc;
+
+ tipc_bearer_put(b);
+
+exit:
+ kfree(ctx);
+ TIPC_SKB_CB(skb)->crypto_ctx = NULL;
+ return rc;
+}
+
+static void tipc_aead_decrypt_done(void *data, int err)
+{
+ struct sk_buff *skb = data;
+ struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
+ struct tipc_bearer *b = rx_ctx->bearer;
+ struct tipc_aead *aead = rx_ctx->aead;
+ struct tipc_crypto_stats __percpu *stats = aead->crypto->stats;
+ struct net *net = aead->crypto->net;
+
+ switch (err) {
+ case 0:
+ this_cpu_inc(stats->stat[STAT_ASYNC_OK]);
+ break;
+ case -EINPROGRESS:
+ return;
+ default:
+ this_cpu_inc(stats->stat[STAT_ASYNC_NOK]);
+ break;
+ }
+
+ kfree(rx_ctx);
+ tipc_crypto_rcv_complete(net, aead, b, &skb, err);
+ if (likely(skb)) {
+ if (likely(test_bit(0, &b->up)))
+ tipc_rcv(net, skb, b);
+ else
+ kfree_skb(skb);
+ }
+
+ tipc_bearer_put(b);
+}
+
+static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr)
+{
+ return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
+}
+
+/**
+ * tipc_ehdr_validate - Validate an encryption message
+ * @skb: the message buffer
+ *
+ * Return: "true" if this is a valid encryption message, otherwise "false"
+ */
+bool tipc_ehdr_validate(struct sk_buff *skb)
+{
+ struct tipc_ehdr *ehdr;
+ int ehsz;
+
+ if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE)))
+ return false;
+
+ ehdr = (struct tipc_ehdr *)skb->data;
+ if (unlikely(ehdr->version != TIPC_EVERSION))
+ return false;
+ ehsz = tipc_ehdr_size(ehdr);
+ if (unlikely(!pskb_may_pull(skb, ehsz)))
+ return false;
+ if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE))
+ return false;
+
+ return true;
+}
+
+/**
+ * tipc_ehdr_build - Build TIPC encryption message header
+ * @net: struct net
+ * @aead: TX AEAD key to be used for the message encryption
+ * @tx_key: key id used for the message encryption
+ * @skb: input/output message skb
+ * @__rx: RX crypto handle if dest is "known"
+ *
+ * Return: the header size if the building is successful, otherwise < 0
+ */
+static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
+ u8 tx_key, struct sk_buff *skb,
+ struct tipc_crypto *__rx)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct tipc_ehdr *ehdr;
+ u32 user = msg_user(hdr);
+ u64 seqno;
+ int ehsz;
+
+ /* Make room for encryption header */
+ ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
+ WARN_ON(skb_headroom(skb) < ehsz);
+ ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz);
+
+ /* Obtain a seqno first:
+ * Use the key seqno (= cluster wise) if dest is unknown or we're in
+ * cluster key mode, otherwise it's better for a per-peer seqno!
+ */
+ if (!__rx || aead->mode == CLUSTER_KEY)
+ seqno = atomic64_inc_return(&aead->seqno);
+ else
+ seqno = atomic64_inc_return(&__rx->sndnxt);
+
+ /* Revoke the key if seqno is wrapped around */
+ if (unlikely(!seqno))
+ return tipc_crypto_key_revoke(net, tx_key);
+
+ /* Word 1-2 */
+ ehdr->seqno = cpu_to_be64(seqno);
+
+ /* Words 0, 3- */
+ ehdr->version = TIPC_EVERSION;
+ ehdr->user = 0;
+ ehdr->keepalive = 0;
+ ehdr->tx_key = tx_key;
+ ehdr->destined = (__rx) ? 1 : 0;
+ ehdr->rx_key_active = (__rx) ? __rx->key.active : 0;
+ ehdr->rx_nokey = (__rx) ? __rx->nokey : 0;
+ ehdr->master_key = aead->crypto->key_master;
+ ehdr->reserved_1 = 0;
+ ehdr->reserved_2 = 0;
+
+ switch (user) {
+ case LINK_CONFIG:
+ ehdr->user = LINK_CONFIG;
+ memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN);
+ break;
+ default:
+ if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) {
+ ehdr->user = LINK_PROTOCOL;
+ ehdr->keepalive = msg_is_keepalive(hdr);
+ }
+ ehdr->addr = hdr->hdr[3];
+ break;
+ }
+
+ return ehsz;
+}
+
+static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
+ u8 new_passive,
+ u8 new_active,
+ u8 new_pending)
+{
+ struct tipc_key old = c->key;
+ char buf[32];
+
+ c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) |
+ ((new_active & KEY_MASK) << (KEY_BITS)) |
+ ((new_pending & KEY_MASK));
+
+ pr_debug("%s: key changing %s ::%pS\n", c->name,
+ tipc_key_change_dump(old, c->key, buf),
+ __builtin_return_address(0));
+}
+
+/**
+ * tipc_crypto_key_init - Initiate a new user / AEAD key
+ * @c: TIPC crypto to which new key is attached
+ * @ukey: the user key
+ * @mode: the key mode (CLUSTER_KEY or PER_NODE_KEY)
+ * @master_key: specify this is a cluster master key
+ *
+ * A new TIPC AEAD key will be allocated and initiated with the specified user
+ * key, then attached to the TIPC crypto.
+ *
+ * Return: new key id in case of success, otherwise: < 0
+ */
+int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
+ u8 mode, bool master_key)
+{
+ struct tipc_aead *aead = NULL;
+ int rc = 0;
+
+ /* Initiate with the new user key */
+ rc = tipc_aead_init(&aead, ukey, mode);
+
+ /* Attach it to the crypto */
+ if (likely(!rc)) {
+ rc = tipc_crypto_key_attach(c, aead, 0, master_key);
+ if (rc < 0)
+ tipc_aead_free(&aead->rcu);
+ }
+
+ return rc;
+}
+
+/**
+ * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto
+ * @c: TIPC crypto to which the new AEAD key is attached
+ * @aead: the new AEAD key pointer
+ * @pos: desired slot in the crypto key array, = 0 if any!
+ * @master_key: specify this is a cluster master key
+ *
+ * Return: new key id in case of success, otherwise: -EBUSY
+ */
+static int tipc_crypto_key_attach(struct tipc_crypto *c,
+ struct tipc_aead *aead, u8 pos,
+ bool master_key)
+{
+ struct tipc_key key;
+ int rc = -EBUSY;
+ u8 new_key;
+
+ spin_lock_bh(&c->lock);
+ key = c->key;
+ if (master_key) {
+ new_key = KEY_MASTER;
+ goto attach;
+ }
+ if (key.active && key.passive)
+ goto exit;
+ if (key.pending) {
+ if (tipc_aead_users(c->aead[key.pending]) > 0)
+ goto exit;
+ /* if (pos): ok with replacing, will be aligned when needed */
+ /* Replace it */
+ new_key = key.pending;
+ } else {
+ if (pos) {
+ if (key.active && pos != key_next(key.active)) {
+ key.passive = pos;
+ new_key = pos;
+ goto attach;
+ } else if (!key.active && !key.passive) {
+ key.pending = pos;
+ new_key = pos;
+ goto attach;
+ }
+ }
+ key.pending = key_next(key.active ?: key.passive);
+ new_key = key.pending;
+ }
+
+attach:
+ aead->crypto = c;
+ aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen;
+ tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock);
+ if (likely(c->key.keys != key.keys))
+ tipc_crypto_key_set_state(c, key.passive, key.active,
+ key.pending);
+ c->working = 1;
+ c->nokey = 0;
+ c->key_master |= master_key;
+ rc = new_key;
+
+exit:
+ spin_unlock_bh(&c->lock);
+ return rc;
+}
+
+void tipc_crypto_key_flush(struct tipc_crypto *c)
+{
+ struct tipc_crypto *tx, *rx;
+ int k;
+
+ spin_lock_bh(&c->lock);
+ if (is_rx(c)) {
+ /* Try to cancel pending work */
+ rx = c;
+ tx = tipc_net(rx->net)->crypto_tx;
+ if (cancel_delayed_work(&rx->work)) {
+ kfree(rx->skey);
+ rx->skey = NULL;
+ atomic_xchg(&rx->key_distr, 0);
+ tipc_node_put(rx->node);
+ }
+ /* RX stopping => decrease TX key users if any */
+ k = atomic_xchg(&rx->peer_rx_active, 0);
+ if (k) {
+ tipc_aead_users_dec(tx->aead[k], 0);
+ /* Mark the point TX key users changed */
+ tx->timer1 = jiffies;
+ }
+ }
+
+ c->flags = 0;
+ tipc_crypto_key_set_state(c, 0, 0, 0);
+ for (k = KEY_MIN; k <= KEY_MAX; k++)
+ tipc_crypto_key_detach(c->aead[k], &c->lock);
+ atomic64_set(&c->sndnxt, 0);
+ spin_unlock_bh(&c->lock);
+}
+
+/**
+ * tipc_crypto_key_try_align - Align RX keys if possible
+ * @rx: RX crypto handle
+ * @new_pending: new pending slot if aligned (= TX key from peer)
+ *
+ * Peer has used an unknown key slot, this only happens when peer has left and
+ * rejoned, or we are newcomer.
+ * That means, there must be no active key but a pending key at unaligned slot.
+ * If so, we try to move the pending key to the new slot.
+ * Note: A potential passive key can exist, it will be shifted correspondingly!
+ *
+ * Return: "true" if key is successfully aligned, otherwise "false"
+ */
+static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending)
+{
+ struct tipc_aead *tmp1, *tmp2 = NULL;
+ struct tipc_key key;
+ bool aligned = false;
+ u8 new_passive = 0;
+ int x;
+
+ spin_lock(&rx->lock);
+ key = rx->key;
+ if (key.pending == new_pending) {
+ aligned = true;
+ goto exit;
+ }
+ if (key.active)
+ goto exit;
+ if (!key.pending)
+ goto exit;
+ if (tipc_aead_users(rx->aead[key.pending]) > 0)
+ goto exit;
+
+ /* Try to "isolate" this pending key first */
+ tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock);
+ if (!refcount_dec_if_one(&tmp1->refcnt))
+ goto exit;
+ rcu_assign_pointer(rx->aead[key.pending], NULL);
+
+ /* Move passive key if any */
+ if (key.passive) {
+ tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock));
+ x = (key.passive - key.pending + new_pending) % KEY_MAX;
+ new_passive = (x <= 0) ? x + KEY_MAX : x;
+ }
+
+ /* Re-allocate the key(s) */
+ tipc_crypto_key_set_state(rx, new_passive, 0, new_pending);
+ rcu_assign_pointer(rx->aead[new_pending], tmp1);
+ if (new_passive)
+ rcu_assign_pointer(rx->aead[new_passive], tmp2);
+ refcount_set(&tmp1->refcnt, 1);
+ aligned = true;
+ pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending,
+ new_pending);
+
+exit:
+ spin_unlock(&rx->lock);
+ return aligned;
+}
+
+/**
+ * tipc_crypto_key_pick_tx - Pick one TX key for message decryption
+ * @tx: TX crypto handle
+ * @rx: RX crypto handle (can be NULL)
+ * @skb: the message skb which will be decrypted later
+ * @tx_key: peer TX key id
+ *
+ * This function looks up the existing TX keys and pick one which is suitable
+ * for the message decryption, that must be a cluster key and not used before
+ * on the same message (i.e. recursive).
+ *
+ * Return: the TX AEAD key handle in case of success, otherwise NULL
+ */
+static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
+ struct tipc_crypto *rx,
+ struct sk_buff *skb,
+ u8 tx_key)
+{
+ struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb);
+ struct tipc_aead *aead = NULL;
+ struct tipc_key key = tx->key;
+ u8 k, i = 0;
+
+ /* Initialize data if not yet */
+ if (!skb_cb->tx_clone_deferred) {
+ skb_cb->tx_clone_deferred = 1;
+ memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
+ }
+
+ skb_cb->tx_clone_ctx.rx = rx;
+ if (++skb_cb->tx_clone_ctx.recurs > 2)
+ return NULL;
+
+ /* Pick one TX key */
+ spin_lock(&tx->lock);
+ if (tx_key == KEY_MASTER) {
+ aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock);
+ goto done;
+ }
+ do {
+ k = (i == 0) ? key.pending :
+ ((i == 1) ? key.active : key.passive);
+ if (!k)
+ continue;
+ aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock);
+ if (!aead)
+ continue;
+ if (aead->mode != CLUSTER_KEY ||
+ aead == skb_cb->tx_clone_ctx.last) {
+ aead = NULL;
+ continue;
+ }
+ /* Ok, found one cluster key */
+ skb_cb->tx_clone_ctx.last = aead;
+ WARN_ON(skb->next);
+ skb->next = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb->next))
+ pr_warn("Failed to clone skb for next round if any\n");
+ break;
+ } while (++i < 3);
+
+done:
+ if (likely(aead))
+ WARN_ON(!refcount_inc_not_zero(&aead->refcnt));
+ spin_unlock(&tx->lock);
+
+ return aead;
+}
+
+/**
+ * tipc_crypto_key_synch: Synch own key data according to peer key status
+ * @rx: RX crypto handle
+ * @skb: TIPCv2 message buffer (incl. the ehdr from peer)
+ *
+ * This function updates the peer node related data as the peer RX active key
+ * has changed, so the number of TX keys' users on this node are increased and
+ * decreased correspondingly.
+ *
+ * It also considers if peer has no key, then we need to make own master key
+ * (if any) taking over i.e. starting grace period and also trigger key
+ * distributing process.
+ *
+ * The "per-peer" sndnxt is also reset when the peer key has switched.
+ */
+static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb)
+{
+ struct tipc_ehdr *ehdr = (struct tipc_ehdr *)skb_network_header(skb);
+ struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
+ struct tipc_msg *hdr = buf_msg(skb);
+ u32 self = tipc_own_addr(rx->net);
+ u8 cur, new;
+ unsigned long delay;
+
+ /* Update RX 'key_master' flag according to peer, also mark "legacy" if
+ * a peer has no master key.
+ */
+ rx->key_master = ehdr->master_key;
+ if (!rx->key_master)
+ tx->legacy_user = 1;
+
+ /* For later cases, apply only if message is destined to this node */
+ if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self)
+ return;
+
+ /* Case 1: Peer has no keys, let's make master key take over */
+ if (ehdr->rx_nokey) {
+ /* Set or extend grace period */
+ tx->timer2 = jiffies;
+ /* Schedule key distributing for the peer if not yet */
+ if (tx->key.keys &&
+ !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) {
+ get_random_bytes(&delay, 2);
+ delay %= 5;
+ delay = msecs_to_jiffies(500 * ++delay);
+ if (queue_delayed_work(tx->wq, &rx->work, delay))
+ tipc_node_get(rx->node);
+ }
+ } else {
+ /* Cancel a pending key distributing if any */
+ atomic_xchg(&rx->key_distr, 0);
+ }
+
+ /* Case 2: Peer RX active key has changed, let's update own TX users */
+ cur = atomic_read(&rx->peer_rx_active);
+ new = ehdr->rx_key_active;
+ if (tx->key.keys &&
+ cur != new &&
+ atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) {
+ if (new)
+ tipc_aead_users_inc(tx->aead[new], INT_MAX);
+ if (cur)
+ tipc_aead_users_dec(tx->aead[cur], 0);
+
+ atomic64_set(&rx->sndnxt, 0);
+ /* Mark the point TX key users changed */
+ tx->timer1 = jiffies;
+
+ pr_debug("%s: key users changed %d-- %d++, peer %s\n",
+ tx->name, cur, new, rx->name);
+ }
+}
+
+static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
+{
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ struct tipc_key key;
+
+ spin_lock_bh(&tx->lock);
+ key = tx->key;
+ WARN_ON(!key.active || tx_key != key.active);
+
+ /* Free the active key */
+ tipc_crypto_key_set_state(tx, key.passive, 0, key.pending);
+ tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
+ spin_unlock_bh(&tx->lock);
+
+ pr_warn("%s: key is revoked\n", tx->name);
+ return -EKEYREVOKED;
+}
+
+int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
+ struct tipc_node *node)
+{
+ struct tipc_crypto *c;
+
+ if (*crypto)
+ return -EEXIST;
+
+ /* Allocate crypto */
+ c = kzalloc(sizeof(*c), GFP_ATOMIC);
+ if (!c)
+ return -ENOMEM;
+
+ /* Allocate workqueue on TX */
+ if (!node) {
+ c->wq = alloc_ordered_workqueue("tipc_crypto", 0);
+ if (!c->wq) {
+ kfree(c);
+ return -ENOMEM;
+ }
+ }
+
+ /* Allocate statistic structure */
+ c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
+ if (!c->stats) {
+ if (c->wq)
+ destroy_workqueue(c->wq);
+ kfree_sensitive(c);
+ return -ENOMEM;
+ }
+
+ c->flags = 0;
+ c->net = net;
+ c->node = node;
+ get_random_bytes(&c->key_gen, 2);
+ tipc_crypto_key_set_state(c, 0, 0, 0);
+ atomic_set(&c->key_distr, 0);
+ atomic_set(&c->peer_rx_active, 0);
+ atomic64_set(&c->sndnxt, 0);
+ c->timer1 = jiffies;
+ c->timer2 = jiffies;
+ c->rekeying_intv = TIPC_REKEYING_INTV_DEF;
+ spin_lock_init(&c->lock);
+ scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX",
+ (is_rx(c)) ? tipc_node_get_id_str(c->node) :
+ tipc_own_id_string(c->net));
+
+ if (is_rx(c))
+ INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx);
+ else
+ INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx);
+
+ *crypto = c;
+ return 0;
+}
+
+void tipc_crypto_stop(struct tipc_crypto **crypto)
+{
+ struct tipc_crypto *c = *crypto;
+ u8 k;
+
+ if (!c)
+ return;
+
+ /* Flush any queued works & destroy wq */
+ if (is_tx(c)) {
+ c->rekeying_intv = 0;
+ cancel_delayed_work_sync(&c->work);
+ destroy_workqueue(c->wq);
+ }
+
+ /* Release AEAD keys */
+ rcu_read_lock();
+ for (k = KEY_MIN; k <= KEY_MAX; k++)
+ tipc_aead_put(rcu_dereference(c->aead[k]));
+ rcu_read_unlock();
+ pr_debug("%s: has been stopped\n", c->name);
+
+ /* Free this crypto statistics */
+ free_percpu(c->stats);
+
+ *crypto = NULL;
+ kfree_sensitive(c);
+}
+
+void tipc_crypto_timeout(struct tipc_crypto *rx)
+{
+ struct tipc_net *tn = tipc_net(rx->net);
+ struct tipc_crypto *tx = tn->crypto_tx;
+ struct tipc_key key;
+ int cmd;
+
+ /* TX pending: taking all users & stable -> active */
+ spin_lock(&tx->lock);
+ key = tx->key;
+ if (key.active && tipc_aead_users(tx->aead[key.active]) > 0)
+ goto s1;
+ if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0)
+ goto s1;
+ if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME))
+ goto s1;
+
+ tipc_crypto_key_set_state(tx, key.passive, key.pending, 0);
+ if (key.active)
+ tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
+ this_cpu_inc(tx->stats->stat[STAT_SWITCHES]);
+ pr_info("%s: key[%d] is activated\n", tx->name, key.pending);
+
+s1:
+ spin_unlock(&tx->lock);
+
+ /* RX pending: having user -> active */
+ spin_lock(&rx->lock);
+ key = rx->key;
+ if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0)
+ goto s2;
+
+ if (key.active)
+ key.passive = key.active;
+ key.active = key.pending;
+ rx->timer2 = jiffies;
+ tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
+ this_cpu_inc(rx->stats->stat[STAT_SWITCHES]);
+ pr_info("%s: key[%d] is activated\n", rx->name, key.pending);
+ goto s5;
+
+s2:
+ /* RX pending: not working -> remove */
+ if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10)
+ goto s3;
+
+ tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
+ tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock);
+ pr_debug("%s: key[%d] is removed\n", rx->name, key.pending);
+ goto s5;
+
+s3:
+ /* RX active: timed out or no user -> pending */
+ if (!key.active)
+ goto s4;
+ if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) &&
+ tipc_aead_users(rx->aead[key.active]) > 0)
+ goto s4;
+
+ if (key.pending)
+ key.passive = key.active;
+ else
+ key.pending = key.active;
+ rx->timer2 = jiffies;
+ tipc_crypto_key_set_state(rx, key.passive, 0, key.pending);
+ tipc_aead_users_set(rx->aead[key.pending], 0);
+ pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active);
+ goto s5;
+
+s4:
+ /* RX passive: outdated or not working -> free */
+ if (!key.passive)
+ goto s5;
+ if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) &&
+ tipc_aead_users(rx->aead[key.passive]) > -10)
+ goto s5;
+
+ tipc_crypto_key_set_state(rx, 0, key.active, key.pending);
+ tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock);
+ pr_debug("%s: key[%d] is freed\n", rx->name, key.passive);
+
+s5:
+ spin_unlock(&rx->lock);
+
+ /* Relax it here, the flag will be set again if it really is, but only
+ * when we are not in grace period for safety!
+ */
+ if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD))
+ tx->legacy_user = 0;
+
+ /* Limit max_tfms & do debug commands if needed */
+ if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM))
+ return;
+
+ cmd = sysctl_tipc_max_tfms;
+ sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF;
+ tipc_crypto_do_cmd(rx->net, cmd);
+}
+
+static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode, u8 type)
+{
+ struct sk_buff *skb;
+
+ skb = skb_clone(_skb, GFP_ATOMIC);
+ if (skb) {
+ TIPC_SKB_CB(skb)->xmit_type = type;
+ tipc_crypto_xmit(net, &skb, b, dst, __dnode);
+ if (skb)
+ b->media->send_msg(net, skb, b, dst);
+ }
+}
+
+/**
+ * tipc_crypto_xmit - Build & encrypt TIPC message for xmit
+ * @net: struct net
+ * @skb: input/output message skb pointer
+ * @b: bearer used for xmit later
+ * @dst: destination media address
+ * @__dnode: destination node for reference if any
+ *
+ * First, build an encryption message header on the top of the message, then
+ * encrypt the original TIPC message by using the pending, master or active
+ * key with this preference order.
+ * If the encryption is successful, the encrypted skb is returned directly or
+ * via the callback.
+ * Otherwise, the skb is freed!
+ *
+ * Return:
+ * * 0 : the encryption has succeeded (or no encryption)
+ * * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made
+ * * -ENOKEK : the encryption has failed due to no key
+ * * -EKEYREVOKED : the encryption has failed due to key revoked
+ * * -ENOMEM : the encryption has failed due to no memory
+ * * < 0 : the encryption has failed due to other reasons
+ */
+int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
+ struct tipc_bearer *b, struct tipc_media_addr *dst,
+ struct tipc_node *__dnode)
+{
+ struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode);
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ struct tipc_crypto_stats __percpu *stats = tx->stats;
+ struct tipc_msg *hdr = buf_msg(*skb);
+ struct tipc_key key = tx->key;
+ struct tipc_aead *aead = NULL;
+ u32 user = msg_user(hdr);
+ u32 type = msg_type(hdr);
+ int rc = -ENOKEY;
+ u8 tx_key = 0;
+
+ /* No encryption? */
+ if (!tx->working)
+ return 0;
+
+ /* Pending key if peer has active on it or probing time */
+ if (unlikely(key.pending)) {
+ tx_key = key.pending;
+ if (!tx->key_master && !key.active)
+ goto encrypt;
+ if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key)
+ goto encrypt;
+ if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) {
+ pr_debug("%s: probing for key[%d]\n", tx->name,
+ key.pending);
+ goto encrypt;
+ }
+ if (user == LINK_CONFIG || user == LINK_PROTOCOL)
+ tipc_crypto_clone_msg(net, *skb, b, dst, __dnode,
+ SKB_PROBING);
+ }
+
+ /* Master key if this is a *vital* message or in grace period */
+ if (tx->key_master) {
+ tx_key = KEY_MASTER;
+ if (!key.active)
+ goto encrypt;
+ if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) {
+ pr_debug("%s: gracing for msg (%d %d)\n", tx->name,
+ user, type);
+ goto encrypt;
+ }
+ if (user == LINK_CONFIG ||
+ (user == LINK_PROTOCOL && type == RESET_MSG) ||
+ (user == MSG_CRYPTO && type == KEY_DISTR_MSG) ||
+ time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) {
+ if (__rx && __rx->key_master &&
+ !atomic_read(&__rx->peer_rx_active))
+ goto encrypt;
+ if (!__rx) {
+ if (likely(!tx->legacy_user))
+ goto encrypt;
+ tipc_crypto_clone_msg(net, *skb, b, dst,
+ __dnode, SKB_GRACING);
+ }
+ }
+ }
+
+ /* Else, use the active key if any */
+ if (likely(key.active)) {
+ tx_key = key.active;
+ goto encrypt;
+ }
+
+ goto exit;
+
+encrypt:
+ aead = tipc_aead_get(tx->aead[tx_key]);
+ if (unlikely(!aead))
+ goto exit;
+ rc = tipc_ehdr_build(net, aead, tx_key, *skb, __rx);
+ if (likely(rc > 0))
+ rc = tipc_aead_encrypt(aead, *skb, b, dst, __dnode);
+
+exit:
+ switch (rc) {
+ case 0:
+ this_cpu_inc(stats->stat[STAT_OK]);
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ this_cpu_inc(stats->stat[STAT_ASYNC]);
+ *skb = NULL;
+ return rc;
+ default:
+ this_cpu_inc(stats->stat[STAT_NOK]);
+ if (rc == -ENOKEY)
+ this_cpu_inc(stats->stat[STAT_NOKEYS]);
+ else if (rc == -EKEYREVOKED)
+ this_cpu_inc(stats->stat[STAT_BADKEYS]);
+ kfree_skb(*skb);
+ *skb = NULL;
+ break;
+ }
+
+ tipc_aead_put(aead);
+ return rc;
+}
+
+/**
+ * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer
+ * @net: struct net
+ * @rx: RX crypto handle
+ * @skb: input/output message skb pointer
+ * @b: bearer where the message has been received
+ *
+ * If the decryption is successful, the decrypted skb is returned directly or
+ * as the callback, the encryption header and auth tag will be trimed out
+ * before forwarding to tipc_rcv() via the tipc_crypto_rcv_complete().
+ * Otherwise, the skb will be freed!
+ * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX
+ * cluster key(s) can be taken for decryption (- recursive).
+ *
+ * Return:
+ * * 0 : the decryption has successfully completed
+ * * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made
+ * * -ENOKEY : the decryption has failed due to no key
+ * * -EBADMSG : the decryption has failed due to bad message
+ * * -ENOMEM : the decryption has failed due to no memory
+ * * < 0 : the decryption has failed due to other reasons
+ */
+int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
+ struct sk_buff **skb, struct tipc_bearer *b)
+{
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ struct tipc_crypto_stats __percpu *stats;
+ struct tipc_aead *aead = NULL;
+ struct tipc_key key;
+ int rc = -ENOKEY;
+ u8 tx_key, n;
+
+ tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key;
+
+ /* New peer?
+ * Let's try with TX key (i.e. cluster mode) & verify the skb first!
+ */
+ if (unlikely(!rx || tx_key == KEY_MASTER))
+ goto pick_tx;
+
+ /* Pick RX key according to TX key if any */
+ key = rx->key;
+ if (tx_key == key.active || tx_key == key.pending ||
+ tx_key == key.passive)
+ goto decrypt;
+
+ /* Unknown key, let's try to align RX key(s) */
+ if (tipc_crypto_key_try_align(rx, tx_key))
+ goto decrypt;
+
+pick_tx:
+ /* No key suitable? Try to pick one from TX... */
+ aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key);
+ if (aead)
+ goto decrypt;
+ goto exit;
+
+decrypt:
+ rcu_read_lock();
+ if (!aead)
+ aead = tipc_aead_get(rx->aead[tx_key]);
+ rc = tipc_aead_decrypt(net, aead, *skb, b);
+ rcu_read_unlock();
+
+exit:
+ stats = ((rx) ?: tx)->stats;
+ switch (rc) {
+ case 0:
+ this_cpu_inc(stats->stat[STAT_OK]);
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ this_cpu_inc(stats->stat[STAT_ASYNC]);
+ *skb = NULL;
+ return rc;
+ default:
+ this_cpu_inc(stats->stat[STAT_NOK]);
+ if (rc == -ENOKEY) {
+ kfree_skb(*skb);
+ *skb = NULL;
+ if (rx) {
+ /* Mark rx->nokey only if we dont have a
+ * pending received session key, nor a newer
+ * one i.e. in the next slot.
+ */
+ n = key_next(tx_key);
+ rx->nokey = !(rx->skey ||
+ rcu_access_pointer(rx->aead[n]));
+ pr_debug_ratelimited("%s: nokey %d, key %d/%x\n",
+ rx->name, rx->nokey,
+ tx_key, rx->key.keys);
+ tipc_node_put(rx->node);
+ }
+ this_cpu_inc(stats->stat[STAT_NOKEYS]);
+ return rc;
+ } else if (rc == -EBADMSG) {
+ this_cpu_inc(stats->stat[STAT_BADMSGS]);
+ }
+ break;
+ }
+
+ tipc_crypto_rcv_complete(net, aead, b, skb, rc);
+ return rc;
+}
+
+static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
+ struct tipc_bearer *b,
+ struct sk_buff **skb, int err)
+{
+ struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb);
+ struct tipc_crypto *rx = aead->crypto;
+ struct tipc_aead *tmp = NULL;
+ struct tipc_ehdr *ehdr;
+ struct tipc_node *n;
+
+ /* Is this completed by TX? */
+ if (unlikely(is_tx(aead->crypto))) {
+ rx = skb_cb->tx_clone_ctx.rx;
+ pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n",
+ (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead,
+ (*skb)->next, skb_cb->flags);
+ pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n",
+ skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last,
+ aead->crypto->aead[1], aead->crypto->aead[2],
+ aead->crypto->aead[3]);
+ if (unlikely(err)) {
+ if (err == -EBADMSG && (*skb)->next)
+ tipc_rcv(net, (*skb)->next, b);
+ goto free_skb;
+ }
+
+ if (likely((*skb)->next)) {
+ kfree_skb((*skb)->next);
+ (*skb)->next = NULL;
+ }
+ ehdr = (struct tipc_ehdr *)(*skb)->data;
+ if (!rx) {
+ WARN_ON(ehdr->user != LINK_CONFIG);
+ n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0,
+ true);
+ rx = tipc_node_crypto_rx(n);
+ if (unlikely(!rx))
+ goto free_skb;
+ }
+
+ /* Ignore cloning if it was TX master key */
+ if (ehdr->tx_key == KEY_MASTER)
+ goto rcv;
+ if (tipc_aead_clone(&tmp, aead) < 0)
+ goto rcv;
+ WARN_ON(!refcount_inc_not_zero(&tmp->refcnt));
+ if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) {
+ tipc_aead_free(&tmp->rcu);
+ goto rcv;
+ }
+ tipc_aead_put(aead);
+ aead = tmp;
+ }
+
+ if (unlikely(err)) {
+ tipc_aead_users_dec((struct tipc_aead __force __rcu *)aead, INT_MIN);
+ goto free_skb;
+ }
+
+ /* Set the RX key's user */
+ tipc_aead_users_set((struct tipc_aead __force __rcu *)aead, 1);
+
+ /* Mark this point, RX works */
+ rx->timer1 = jiffies;
+
+rcv:
+ /* Remove ehdr & auth. tag prior to tipc_rcv() */
+ ehdr = (struct tipc_ehdr *)(*skb)->data;
+
+ /* Mark this point, RX passive still works */
+ if (rx->key.passive && ehdr->tx_key == rx->key.passive)
+ rx->timer2 = jiffies;
+
+ skb_reset_network_header(*skb);
+ skb_pull(*skb, tipc_ehdr_size(ehdr));
+ if (pskb_trim(*skb, (*skb)->len - aead->authsize))
+ goto free_skb;
+
+ /* Validate TIPCv2 message */
+ if (unlikely(!tipc_msg_validate(skb))) {
+ pr_err_ratelimited("Packet dropped after decryption!\n");
+ goto free_skb;
+ }
+
+ /* Ok, everything's fine, try to synch own keys according to peers' */
+ tipc_crypto_key_synch(rx, *skb);
+
+ /* Re-fetch skb cb as skb might be changed in tipc_msg_validate */
+ skb_cb = TIPC_SKB_CB(*skb);
+
+ /* Mark skb decrypted */
+ skb_cb->decrypted = 1;
+
+ /* Clear clone cxt if any */
+ if (likely(!skb_cb->tx_clone_deferred))
+ goto exit;
+ skb_cb->tx_clone_deferred = 0;
+ memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
+ goto exit;
+
+free_skb:
+ kfree_skb(*skb);
+ *skb = NULL;
+
+exit:
+ tipc_aead_put(aead);
+ if (rx)
+ tipc_node_put(rx->node);
+}
+
+static void tipc_crypto_do_cmd(struct net *net, int cmd)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_crypto *tx = tn->crypto_tx, *rx;
+ struct list_head *p;
+ unsigned int stat;
+ int i, j, cpu;
+ char buf[200];
+
+ /* Currently only one command is supported */
+ switch (cmd) {
+ case 0xfff1:
+ goto print_stats;
+ default:
+ return;
+ }
+
+print_stats:
+ /* Print a header */
+ pr_info("\n=============== TIPC Crypto Statistics ===============\n\n");
+
+ /* Print key status */
+ pr_info("Key status:\n");
+ pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net),
+ tipc_crypto_key_dump(tx, buf));
+
+ rcu_read_lock();
+ for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
+ rx = tipc_node_crypto_rx_by_list(p);
+ pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node),
+ tipc_crypto_key_dump(rx, buf));
+ }
+ rcu_read_unlock();
+
+ /* Print crypto statistics */
+ for (i = 0, j = 0; i < MAX_STATS; i++)
+ j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]);
+ pr_info("Counter %s", buf);
+
+ memset(buf, '-', 115);
+ buf[115] = '\0';
+ pr_info("%s\n", buf);
+
+ j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net));
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < MAX_STATS; i++) {
+ stat = per_cpu_ptr(tx->stats, cpu)->stat[i];
+ j += scnprintf(buf + j, 200 - j, "|%11d ", stat);
+ }
+ pr_info("%s", buf);
+ j = scnprintf(buf, 200, "%12s", " ");
+ }
+
+ rcu_read_lock();
+ for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
+ rx = tipc_node_crypto_rx_by_list(p);
+ j = scnprintf(buf, 200, "RX(%7.7s) ",
+ tipc_node_get_id_str(rx->node));
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < MAX_STATS; i++) {
+ stat = per_cpu_ptr(rx->stats, cpu)->stat[i];
+ j += scnprintf(buf + j, 200 - j, "|%11d ",
+ stat);
+ }
+ pr_info("%s", buf);
+ j = scnprintf(buf, 200, "%12s", " ");
+ }
+ }
+ rcu_read_unlock();
+
+ pr_info("\n======================== Done ========================\n");
+}
+
+static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf)
+{
+ struct tipc_key key = c->key;
+ struct tipc_aead *aead;
+ int k, i = 0;
+ char *s;
+
+ for (k = KEY_MIN; k <= KEY_MAX; k++) {
+ if (k == KEY_MASTER) {
+ if (is_rx(c))
+ continue;
+ if (time_before(jiffies,
+ c->timer2 + TIPC_TX_GRACE_PERIOD))
+ s = "ACT";
+ else
+ s = "PAS";
+ } else {
+ if (k == key.passive)
+ s = "PAS";
+ else if (k == key.active)
+ s = "ACT";
+ else if (k == key.pending)
+ s = "PEN";
+ else
+ s = "-";
+ }
+ i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s);
+
+ rcu_read_lock();
+ aead = rcu_dereference(c->aead[k]);
+ if (aead)
+ i += scnprintf(buf + i, 200 - i,
+ "{\"0x...%s\", \"%s\"}/%d:%d",
+ aead->hint,
+ (aead->mode == CLUSTER_KEY) ? "c" : "p",
+ atomic_read(&aead->users),
+ refcount_read(&aead->refcnt));
+ rcu_read_unlock();
+ i += scnprintf(buf + i, 200 - i, "\n");
+ }
+
+ if (is_rx(c))
+ i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n",
+ atomic_read(&c->peer_rx_active));
+
+ return buf;
+}
+
+static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
+ char *buf)
+{
+ struct tipc_key *key = &old;
+ int k, i = 0;
+ char *s;
+
+ /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */
+again:
+ i += scnprintf(buf + i, 32 - i, "[");
+ for (k = KEY_1; k <= KEY_3; k++) {
+ if (k == key->passive)
+ s = "pas";
+ else if (k == key->active)
+ s = "act";
+ else if (k == key->pending)
+ s = "pen";
+ else
+ s = "-";
+ i += scnprintf(buf + i, 32 - i,
+ (k != KEY_3) ? "%s " : "%s", s);
+ }
+ if (key != &new) {
+ i += scnprintf(buf + i, 32 - i, "] -> ");
+ key = &new;
+ goto again;
+ }
+ i += scnprintf(buf + i, 32 - i, "]");
+ return buf;
+}
+
+/**
+ * tipc_crypto_msg_rcv - Common 'MSG_CRYPTO' processing point
+ * @net: the struct net
+ * @skb: the receiving message buffer
+ */
+void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb)
+{
+ struct tipc_crypto *rx;
+ struct tipc_msg *hdr;
+
+ if (unlikely(skb_linearize(skb)))
+ goto exit;
+
+ hdr = buf_msg(skb);
+ rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr));
+ if (unlikely(!rx))
+ goto exit;
+
+ switch (msg_type(hdr)) {
+ case KEY_DISTR_MSG:
+ if (tipc_crypto_key_rcv(rx, hdr))
+ goto exit;
+ break;
+ default:
+ break;
+ }
+
+ tipc_node_put(rx->node);
+
+exit:
+ kfree_skb(skb);
+}
+
+/**
+ * tipc_crypto_key_distr - Distribute a TX key
+ * @tx: the TX crypto
+ * @key: the key's index
+ * @dest: the destination tipc node, = NULL if distributing to all nodes
+ *
+ * Return: 0 in case of success, otherwise < 0
+ */
+int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key,
+ struct tipc_node *dest)
+{
+ struct tipc_aead *aead;
+ u32 dnode = tipc_node_get_addr(dest);
+ int rc = -ENOKEY;
+
+ if (!sysctl_tipc_key_exchange_enabled)
+ return 0;
+
+ if (key) {
+ rcu_read_lock();
+ aead = tipc_aead_get(tx->aead[key]);
+ if (likely(aead)) {
+ rc = tipc_crypto_key_xmit(tx->net, aead->key,
+ aead->gen, aead->mode,
+ dnode);
+ tipc_aead_put(aead);
+ }
+ rcu_read_unlock();
+ }
+
+ return rc;
+}
+
+/**
+ * tipc_crypto_key_xmit - Send a session key
+ * @net: the struct net
+ * @skey: the session key to be sent
+ * @gen: the key's generation
+ * @mode: the key's mode
+ * @dnode: the destination node address, = 0 if broadcasting to all nodes
+ *
+ * The session key 'skey' is packed in a TIPC v2 'MSG_CRYPTO/KEY_DISTR_MSG'
+ * as its data section, then xmit-ed through the uc/bc link.
+ *
+ * Return: 0 in case of success, otherwise < 0
+ */
+static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
+ u16 gen, u8 mode, u32 dnode)
+{
+ struct sk_buff_head pkts;
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
+ u16 size, cong_link_cnt;
+ u8 *data;
+ int rc;
+
+ size = tipc_aead_key_size(skey);
+ skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = buf_msg(skb);
+ tipc_msg_init(tipc_own_addr(net), hdr, MSG_CRYPTO, KEY_DISTR_MSG,
+ INT_H_SIZE, dnode);
+ msg_set_size(hdr, INT_H_SIZE + size);
+ msg_set_key_gen(hdr, gen);
+ msg_set_key_mode(hdr, mode);
+
+ data = msg_data(hdr);
+ *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen);
+ memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME);
+ memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key,
+ skey->keylen);
+
+ __skb_queue_head_init(&pkts);
+ __skb_queue_tail(&pkts, skb);
+ if (dnode)
+ rc = tipc_node_xmit(net, &pkts, dnode, 0);
+ else
+ rc = tipc_bcast_xmit(net, &pkts, &cong_link_cnt);
+
+ return rc;
+}
+
+/**
+ * tipc_crypto_key_rcv - Receive a session key
+ * @rx: the RX crypto
+ * @hdr: the TIPC v2 message incl. the receiving session key in its data
+ *
+ * This function retrieves the session key in the message from peer, then
+ * schedules a RX work to attach the key to the corresponding RX crypto.
+ *
+ * Return: "true" if the key has been scheduled for attaching, otherwise
+ * "false".
+ */
+static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
+{
+ struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
+ struct tipc_aead_key *skey = NULL;
+ u16 key_gen = msg_key_gen(hdr);
+ u32 size = msg_data_sz(hdr);
+ u8 *data = msg_data(hdr);
+ unsigned int keylen;
+
+ /* Verify whether the size can exist in the packet */
+ if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) {
+ pr_debug("%s: message data size is too small\n", rx->name);
+ goto exit;
+ }
+
+ keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
+
+ /* Verify the supplied size values */
+ if (unlikely(size != keylen + sizeof(struct tipc_aead_key) ||
+ keylen > TIPC_AEAD_KEY_SIZE_MAX)) {
+ pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name);
+ goto exit;
+ }
+
+ spin_lock(&rx->lock);
+ if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) {
+ pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name,
+ rx->skey, key_gen, rx->key_gen);
+ goto exit_unlock;
+ }
+
+ /* Allocate memory for the key */
+ skey = kmalloc(size, GFP_ATOMIC);
+ if (unlikely(!skey)) {
+ pr_err("%s: unable to allocate memory for skey\n", rx->name);
+ goto exit_unlock;
+ }
+
+ /* Copy key from msg data */
+ skey->keylen = keylen;
+ memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME);
+ memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32),
+ skey->keylen);
+
+ rx->key_gen = key_gen;
+ rx->skey_mode = msg_key_mode(hdr);
+ rx->skey = skey;
+ rx->nokey = 0;
+ mb(); /* for nokey flag */
+
+exit_unlock:
+ spin_unlock(&rx->lock);
+
+exit:
+ /* Schedule the key attaching on this crypto */
+ if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0)))
+ return true;
+
+ return false;
+}
+
+/**
+ * tipc_crypto_work_rx - Scheduled RX works handler
+ * @work: the struct RX work
+ *
+ * The function processes the previous scheduled works i.e. distributing TX key
+ * or attaching a received session key on RX crypto.
+ */
+static void tipc_crypto_work_rx(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work);
+ struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
+ unsigned long delay = msecs_to_jiffies(5000);
+ bool resched = false;
+ u8 key;
+ int rc;
+
+ /* Case 1: Distribute TX key to peer if scheduled */
+ if (atomic_cmpxchg(&rx->key_distr,
+ KEY_DISTR_SCHED,
+ KEY_DISTR_COMPL) == KEY_DISTR_SCHED) {
+ /* Always pick the newest one for distributing */
+ key = tx->key.pending ?: tx->key.active;
+ rc = tipc_crypto_key_distr(tx, key, rx->node);
+ if (unlikely(rc))
+ pr_warn("%s: unable to distr key[%d] to %s, err %d\n",
+ tx->name, key, tipc_node_get_id_str(rx->node),
+ rc);
+
+ /* Sched for key_distr releasing */
+ resched = true;
+ } else {
+ atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0);
+ }
+
+ /* Case 2: Attach a pending received session key from peer if any */
+ if (rx->skey) {
+ rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false);
+ if (unlikely(rc < 0))
+ pr_warn("%s: unable to attach received skey, err %d\n",
+ rx->name, rc);
+ switch (rc) {
+ case -EBUSY:
+ case -ENOMEM:
+ /* Resched the key attaching */
+ resched = true;
+ break;
+ default:
+ synchronize_rcu();
+ kfree(rx->skey);
+ rx->skey = NULL;
+ break;
+ }
+ }
+
+ if (resched && queue_delayed_work(tx->wq, &rx->work, delay))
+ return;
+
+ tipc_node_put(rx->node);
+}
+
+/**
+ * tipc_crypto_rekeying_sched - (Re)schedule rekeying w/o new interval
+ * @tx: TX crypto
+ * @changed: if the rekeying needs to be rescheduled with new interval
+ * @new_intv: new rekeying interval (when "changed" = true)
+ */
+void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed,
+ u32 new_intv)
+{
+ unsigned long delay;
+ bool now = false;
+
+ if (changed) {
+ if (new_intv == TIPC_REKEYING_NOW)
+ now = true;
+ else
+ tx->rekeying_intv = new_intv;
+ cancel_delayed_work_sync(&tx->work);
+ }
+
+ if (tx->rekeying_intv || now) {
+ delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000;
+ queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay));
+ }
+}
+
+/**
+ * tipc_crypto_work_tx - Scheduled TX works handler
+ * @work: the struct TX work
+ *
+ * The function processes the previous scheduled work, i.e. key rekeying, by
+ * generating a new session key based on current one, then attaching it to the
+ * TX crypto and finally distributing it to peers. It also re-schedules the
+ * rekeying if needed.
+ */
+static void tipc_crypto_work_tx(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work);
+ struct tipc_aead_key *skey = NULL;
+ struct tipc_key key = tx->key;
+ struct tipc_aead *aead;
+ int rc = -ENOMEM;
+
+ if (unlikely(key.pending))
+ goto resched;
+
+ /* Take current key as a template */
+ rcu_read_lock();
+ aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]);
+ if (unlikely(!aead)) {
+ rcu_read_unlock();
+ /* At least one key should exist for securing */
+ return;
+ }
+
+ /* Lets duplicate it first */
+ skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
+ rcu_read_unlock();
+
+ /* Now, generate new key, initiate & distribute it */
+ if (likely(skey)) {
+ rc = tipc_aead_key_generate(skey) ?:
+ tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false);
+ if (likely(rc > 0))
+ rc = tipc_crypto_key_distr(tx, rc, NULL);
+ kfree_sensitive(skey);
+ }
+
+ if (unlikely(rc))
+ pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc);
+
+resched:
+ /* Re-schedule rekeying if any */
+ tipc_crypto_rekeying_sched(tx, false, 0);
+}
diff --git a/net/tipc/crypto.h b/net/tipc/crypto.h
new file mode 100644
index 0000000000..ce7d4cc8a9
--- /dev/null
+++ b/net/tipc/crypto.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * net/tipc/crypto.h: Include file for TIPC crypto
+ *
+ * Copyright (c) 2019, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef CONFIG_TIPC_CRYPTO
+#ifndef _TIPC_CRYPTO_H
+#define _TIPC_CRYPTO_H
+
+#include "core.h"
+#include "node.h"
+#include "msg.h"
+#include "bearer.h"
+
+#define TIPC_EVERSION 7
+
+/* AEAD aes(gcm) */
+#define TIPC_AES_GCM_KEY_SIZE_128 16
+#define TIPC_AES_GCM_KEY_SIZE_192 24
+#define TIPC_AES_GCM_KEY_SIZE_256 32
+
+#define TIPC_AES_GCM_SALT_SIZE 4
+#define TIPC_AES_GCM_IV_SIZE 12
+#define TIPC_AES_GCM_TAG_SIZE 16
+
+/*
+ * TIPC crypto modes:
+ * - CLUSTER_KEY:
+ * One single key is used for both TX & RX in all nodes in the cluster.
+ * - PER_NODE_KEY:
+ * Each nodes in the cluster has one TX key, for RX a node needs to know
+ * its peers' TX key for the decryption of messages from those nodes.
+ */
+enum {
+ CLUSTER_KEY = 1,
+ PER_NODE_KEY = (1 << 1),
+};
+
+extern int sysctl_tipc_max_tfms __read_mostly;
+extern int sysctl_tipc_key_exchange_enabled __read_mostly;
+
+/*
+ * TIPC encryption message format:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w0:|Ver=7| User |D|TX |RX |K|M|N| Rsvd |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w1:| Seqno |
+ * w2:| (8 octets) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w3:\ Prevnode \
+ * / (4 or 16 octets) /
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ \
+ * / Encrypted complete TIPC V2 header and user data /
+ * \ \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * | AuthTag |
+ * | (16 octets) |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Word0:
+ * Ver : = 7 i.e. TIPC encryption message version
+ * User : = 7 (for LINK_PROTOCOL); = 13 (for LINK_CONFIG) or = 0
+ * D : The destined bit i.e. the message's destination node is
+ * "known" or not at the message encryption
+ * TX : TX key used for the message encryption
+ * RX : Currently RX active key corresponding to the destination
+ * node's TX key (when the "D" bit is set)
+ * K : Keep-alive bit (for RPS, LINK_PROTOCOL/STATE_MSG only)
+ * M : Bit indicates if sender has master key
+ * N : Bit indicates if sender has no RX keys corresponding to the
+ * receiver's TX (when the "D" bit is set)
+ * Rsvd : Reserved bit, field
+ * Word1-2:
+ * Seqno : The 64-bit sequence number of the encrypted message, also
+ * part of the nonce used for the message encryption/decryption
+ * Word3-:
+ * Prevnode: The source node address, or ID in case LINK_CONFIG only
+ * AuthTag : The authentication tag for the message integrity checking
+ * generated by the message encryption
+ */
+struct tipc_ehdr {
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 destined:1,
+ user:4,
+ version:3;
+ __u8 reserved_1:1,
+ rx_nokey:1,
+ master_key:1,
+ keepalive:1,
+ rx_key_active:2,
+ tx_key:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version:3,
+ user:4,
+ destined:1;
+ __u8 tx_key:2,
+ rx_key_active:2,
+ keepalive:1,
+ master_key:1,
+ rx_nokey:1,
+ reserved_1:1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be16 reserved_2;
+ } __packed;
+ __be32 w0;
+ };
+ __be64 seqno;
+ union {
+ __be32 addr;
+ __u8 id[NODE_ID_LEN]; /* For a LINK_CONFIG message only! */
+ };
+#define EHDR_SIZE (offsetof(struct tipc_ehdr, addr) + sizeof(__be32))
+#define EHDR_CFG_SIZE (sizeof(struct tipc_ehdr))
+#define EHDR_MIN_SIZE (EHDR_SIZE)
+#define EHDR_MAX_SIZE (EHDR_CFG_SIZE)
+#define EMSG_OVERHEAD (EHDR_SIZE + TIPC_AES_GCM_TAG_SIZE)
+} __packed;
+
+int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
+ struct tipc_node *node);
+void tipc_crypto_stop(struct tipc_crypto **crypto);
+void tipc_crypto_timeout(struct tipc_crypto *rx);
+int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
+ struct tipc_bearer *b, struct tipc_media_addr *dst,
+ struct tipc_node *__dnode);
+int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
+ struct sk_buff **skb, struct tipc_bearer *b);
+int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
+ u8 mode, bool master_key);
+void tipc_crypto_key_flush(struct tipc_crypto *c);
+int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key,
+ struct tipc_node *dest);
+void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb);
+void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed,
+ u32 new_intv);
+int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info);
+bool tipc_ehdr_validate(struct sk_buff *skb);
+
+static inline u32 msg_key_gen(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 16, 0xffff);
+}
+
+static inline void msg_set_key_gen(struct tipc_msg *m, u32 gen)
+{
+ msg_set_bits(m, 4, 16, 0xffff, gen);
+}
+
+static inline u32 msg_key_mode(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 0, 0xf);
+}
+
+static inline void msg_set_key_mode(struct tipc_msg *m, u32 mode)
+{
+ msg_set_bits(m, 4, 0, 0xf, mode);
+}
+
+#endif /* _TIPC_CRYPTO_H */
+#endif
diff --git a/net/tipc/diag.c b/net/tipc/diag.c
new file mode 100644
index 0000000000..73137f4aeb
--- /dev/null
+++ b/net/tipc/diag.c
@@ -0,0 +1,116 @@
+/*
+ * net/tipc/diag.c: TIPC socket diag
+ *
+ * Copyright (c) 2018, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "ASIS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "socket.h"
+#include <linux/sock_diag.h>
+#include <linux/tipc_sockets_diag.h>
+
+static u64 __tipc_diag_gen_cookie(struct sock *sk)
+{
+ u32 res[2];
+
+ sock_diag_save_cookie(sk, res);
+ return *((u64 *)res);
+}
+
+static int __tipc_add_sock_diag(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct tipc_sock *tsk)
+{
+ struct tipc_sock_diag_req *req = nlmsg_data(cb->nlh);
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = nlmsg_put_answer(skb, cb, SOCK_DIAG_BY_FAMILY, 0,
+ NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ err = tipc_sk_fill_sock_diag(skb, cb, tsk, req->tidiag_states,
+ __tipc_diag_gen_cookie);
+ if (err)
+ return err;
+
+ nlmsg_end(skb, nlh);
+ return 0;
+}
+
+static int tipc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ return tipc_nl_sk_walk(skb, cb, __tipc_add_sock_diag);
+}
+
+static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
+ struct nlmsghdr *h)
+{
+ int hdrlen = sizeof(struct tipc_sock_diag_req);
+ struct net *net = sock_net(skb->sk);
+
+ if (nlmsg_len(h) < hdrlen)
+ return -EINVAL;
+
+ if (h->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .start = tipc_dump_start,
+ .dump = tipc_diag_dump,
+ .done = tipc_dump_done,
+ };
+ netlink_dump_start(net->diag_nlsk, skb, h, &c);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static const struct sock_diag_handler tipc_sock_diag_handler = {
+ .family = AF_TIPC,
+ .dump = tipc_sock_diag_handler_dump,
+};
+
+static int __init tipc_diag_init(void)
+{
+ return sock_diag_register(&tipc_sock_diag_handler);
+}
+
+static void __exit tipc_diag_exit(void)
+{
+ sock_diag_unregister(&tipc_sock_diag_handler);
+}
+
+module_init(tipc_diag_init);
+module_exit(tipc_diag_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_TIPC);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
new file mode 100644
index 0000000000..685389d4b2
--- /dev/null
+++ b/net/tipc/discover.c
@@ -0,0 +1,420 @@
+/*
+ * net/tipc/discover.c
+ *
+ * Copyright (c) 2003-2006, 2014-2018, Ericsson AB
+ * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "node.h"
+#include "discover.h"
+
+/* min delay during bearer start up */
+#define TIPC_DISC_INIT msecs_to_jiffies(125)
+/* max delay if bearer has no links */
+#define TIPC_DISC_FAST msecs_to_jiffies(1000)
+/* max delay if bearer has links */
+#define TIPC_DISC_SLOW msecs_to_jiffies(60000)
+/* indicates no timer in use */
+#define TIPC_DISC_INACTIVE 0xffffffff
+
+/**
+ * struct tipc_discoverer - information about an ongoing link setup request
+ * @bearer_id: identity of bearer issuing requests
+ * @net: network namespace instance
+ * @dest: destination address for request messages
+ * @domain: network domain to which links can be established
+ * @num_nodes: number of nodes currently discovered (i.e. with an active link)
+ * @lock: spinlock for controlling access to requests
+ * @skb: request message to be (repeatedly) sent
+ * @timer: timer governing period between requests
+ * @timer_intv: current interval between requests (in ms)
+ */
+struct tipc_discoverer {
+ u32 bearer_id;
+ struct tipc_media_addr dest;
+ struct net *net;
+ u32 domain;
+ int num_nodes;
+ spinlock_t lock;
+ struct sk_buff *skb;
+ struct timer_list timer;
+ unsigned long timer_intv;
+};
+
+/**
+ * tipc_disc_init_msg - initialize a link setup message
+ * @net: the applicable net namespace
+ * @skb: buffer containing message
+ * @mtyp: message type (request or response)
+ * @b: ptr to bearer issuing message
+ */
+static void tipc_disc_init_msg(struct net *net, struct sk_buff *skb,
+ u32 mtyp, struct tipc_bearer *b)
+{
+ struct tipc_net *tn = tipc_net(net);
+ u32 dest_domain = b->domain;
+ struct tipc_msg *hdr;
+
+ hdr = buf_msg(skb);
+ tipc_msg_init(tn->trial_addr, hdr, LINK_CONFIG, mtyp,
+ MAX_H_SIZE, dest_domain);
+ msg_set_size(hdr, MAX_H_SIZE + NODE_ID_LEN);
+ msg_set_non_seq(hdr, 1);
+ msg_set_node_sig(hdr, tn->random);
+ msg_set_node_capabilities(hdr, TIPC_NODE_CAPABILITIES);
+ msg_set_dest_domain(hdr, dest_domain);
+ msg_set_bc_netid(hdr, tn->net_id);
+ b->media->addr2msg(msg_media_addr(hdr), &b->addr);
+ msg_set_peer_net_hash(hdr, tipc_net_hash_mixes(net, tn->random));
+ msg_set_node_id(hdr, tipc_own_id(net));
+}
+
+static void tipc_disc_msg_xmit(struct net *net, u32 mtyp, u32 dst,
+ u32 src, u32 sugg_addr,
+ struct tipc_media_addr *maddr,
+ struct tipc_bearer *b)
+{
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
+
+ skb = tipc_buf_acquire(MAX_H_SIZE + NODE_ID_LEN, GFP_ATOMIC);
+ if (!skb)
+ return;
+ hdr = buf_msg(skb);
+ tipc_disc_init_msg(net, skb, mtyp, b);
+ msg_set_sugg_node_addr(hdr, sugg_addr);
+ msg_set_dest_domain(hdr, dst);
+ tipc_bearer_xmit_skb(net, b->identity, skb, maddr);
+}
+
+/**
+ * disc_dupl_alert - issue node address duplication alert
+ * @b: pointer to bearer detecting duplication
+ * @node_addr: duplicated node address
+ * @media_addr: media address advertised by duplicated node
+ */
+static void disc_dupl_alert(struct tipc_bearer *b, u32 node_addr,
+ struct tipc_media_addr *media_addr)
+{
+ char media_addr_str[64];
+
+ tipc_media_addr_printf(media_addr_str, sizeof(media_addr_str),
+ media_addr);
+ pr_warn("Duplicate %x using %s seen on <%s>\n", node_addr,
+ media_addr_str, b->name);
+}
+
+/* tipc_disc_addr_trial(): - handle an address uniqueness trial from peer
+ * Returns true if message should be dropped by caller, i.e., if it is a
+ * trial message or we are inside trial period. Otherwise false.
+ */
+static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
+ struct tipc_media_addr *maddr,
+ struct tipc_bearer *b,
+ u32 dst, u32 src,
+ u32 sugg_addr,
+ u8 *peer_id,
+ int mtyp)
+{
+ struct net *net = d->net;
+ struct tipc_net *tn = tipc_net(net);
+ u32 self = tipc_own_addr(net);
+ bool trial = time_before(jiffies, tn->addr_trial_end) && !self;
+
+ if (mtyp == DSC_TRIAL_FAIL_MSG) {
+ if (!trial)
+ return true;
+
+ /* Ignore if somebody else already gave new suggestion */
+ if (dst != tn->trial_addr)
+ return true;
+
+ /* Otherwise update trial address and restart trial period */
+ tn->trial_addr = sugg_addr;
+ msg_set_prevnode(buf_msg(d->skb), sugg_addr);
+ tn->addr_trial_end = jiffies + msecs_to_jiffies(1000);
+ return true;
+ }
+
+ /* Apply trial address if we just left trial period */
+ if (!trial && !self) {
+ schedule_work(&tn->work);
+ msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
+ msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+ }
+
+ /* Accept regular link requests/responses only after trial period */
+ if (mtyp != DSC_TRIAL_MSG)
+ return trial;
+
+ sugg_addr = tipc_node_try_addr(net, peer_id, src);
+ if (sugg_addr)
+ tipc_disc_msg_xmit(net, DSC_TRIAL_FAIL_MSG, src,
+ self, sugg_addr, maddr, b);
+ return true;
+}
+
+/**
+ * tipc_disc_rcv - handle incoming discovery message (request or response)
+ * @net: applicable net namespace
+ * @skb: buffer containing message
+ * @b: bearer that message arrived on
+ */
+void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
+ struct tipc_bearer *b)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_msg *hdr = buf_msg(skb);
+ u32 pnet_hash = msg_peer_net_hash(hdr);
+ u16 caps = msg_node_capabilities(hdr);
+ bool legacy = tn->legacy_addr_format;
+ u32 sugg = msg_sugg_node_addr(hdr);
+ u32 signature = msg_node_sig(hdr);
+ u8 peer_id[NODE_ID_LEN] = {0,};
+ u32 dst = msg_dest_domain(hdr);
+ u32 net_id = msg_bc_netid(hdr);
+ struct tipc_media_addr maddr;
+ u32 src = msg_prevnode(hdr);
+ u32 mtyp = msg_type(hdr);
+ bool dupl_addr = false;
+ bool respond = false;
+ u32 self;
+ int err;
+
+ if (skb_linearize(skb)) {
+ kfree_skb(skb);
+ return;
+ }
+ hdr = buf_msg(skb);
+
+ if (caps & TIPC_NODE_ID128)
+ memcpy(peer_id, msg_node_id(hdr), NODE_ID_LEN);
+ else
+ sprintf(peer_id, "%x", src);
+
+ err = b->media->msg2addr(b, &maddr, msg_media_addr(hdr));
+ kfree_skb(skb);
+ if (err || maddr.broadcast) {
+ pr_warn_ratelimited("Rcv corrupt discovery message\n");
+ return;
+ }
+ /* Ignore discovery messages from own node */
+ if (!memcmp(&maddr, &b->addr, sizeof(maddr)))
+ return;
+ if (net_id != tn->net_id)
+ return;
+ if (tipc_disc_addr_trial_msg(b->disc, &maddr, b, dst,
+ src, sugg, peer_id, mtyp))
+ return;
+ self = tipc_own_addr(net);
+
+ /* Message from somebody using this node's address */
+ if (in_own_node(net, src)) {
+ disc_dupl_alert(b, self, &maddr);
+ return;
+ }
+ if (!tipc_in_scope(legacy, dst, self))
+ return;
+ if (!tipc_in_scope(legacy, b->domain, src))
+ return;
+ tipc_node_check_dest(net, src, peer_id, b, caps, signature, pnet_hash,
+ &maddr, &respond, &dupl_addr);
+ if (dupl_addr)
+ disc_dupl_alert(b, src, &maddr);
+ if (!respond)
+ return;
+ if (mtyp != DSC_REQ_MSG)
+ return;
+ tipc_disc_msg_xmit(net, DSC_RESP_MSG, src, self, 0, &maddr, b);
+}
+
+/* tipc_disc_add_dest - increment set of discovered nodes
+ */
+void tipc_disc_add_dest(struct tipc_discoverer *d)
+{
+ spin_lock_bh(&d->lock);
+ d->num_nodes++;
+ spin_unlock_bh(&d->lock);
+}
+
+/* tipc_disc_remove_dest - decrement set of discovered nodes
+ */
+void tipc_disc_remove_dest(struct tipc_discoverer *d)
+{
+ int intv, num;
+
+ spin_lock_bh(&d->lock);
+ d->num_nodes--;
+ num = d->num_nodes;
+ intv = d->timer_intv;
+ if (!num && (intv == TIPC_DISC_INACTIVE || intv > TIPC_DISC_FAST)) {
+ d->timer_intv = TIPC_DISC_INIT;
+ mod_timer(&d->timer, jiffies + d->timer_intv);
+ }
+ spin_unlock_bh(&d->lock);
+}
+
+/* tipc_disc_timeout - send a periodic link setup request
+ * Called whenever a link setup request timer associated with a bearer expires.
+ * - Keep doubling time between sent request until limit is reached;
+ * - Hold at fast polling rate if we don't have any associated nodes
+ * - Otherwise hold at slow polling rate
+ */
+static void tipc_disc_timeout(struct timer_list *t)
+{
+ struct tipc_discoverer *d = from_timer(d, t, timer);
+ struct tipc_net *tn = tipc_net(d->net);
+ struct tipc_media_addr maddr;
+ struct sk_buff *skb = NULL;
+ struct net *net = d->net;
+ u32 bearer_id;
+
+ spin_lock_bh(&d->lock);
+
+ /* Stop searching if only desired node has been found */
+ if (tipc_node(d->domain) && d->num_nodes) {
+ d->timer_intv = TIPC_DISC_INACTIVE;
+ goto exit;
+ }
+
+ /* Did we just leave trial period ? */
+ if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
+ mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
+ spin_unlock_bh(&d->lock);
+ schedule_work(&tn->work);
+ return;
+ }
+
+ /* Adjust timeout interval according to discovery phase */
+ if (time_before(jiffies, tn->addr_trial_end)) {
+ d->timer_intv = TIPC_DISC_INIT;
+ } else {
+ d->timer_intv *= 2;
+ if (d->num_nodes && d->timer_intv > TIPC_DISC_SLOW)
+ d->timer_intv = TIPC_DISC_SLOW;
+ else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST)
+ d->timer_intv = TIPC_DISC_FAST;
+ msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+ msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
+ }
+
+ mod_timer(&d->timer, jiffies + d->timer_intv);
+ memcpy(&maddr, &d->dest, sizeof(maddr));
+ skb = skb_clone(d->skb, GFP_ATOMIC);
+ bearer_id = d->bearer_id;
+exit:
+ spin_unlock_bh(&d->lock);
+ if (skb)
+ tipc_bearer_xmit_skb(net, bearer_id, skb, &maddr);
+}
+
+/**
+ * tipc_disc_create - create object to send periodic link setup requests
+ * @net: the applicable net namespace
+ * @b: ptr to bearer issuing requests
+ * @dest: destination address for request messages
+ * @skb: pointer to created frame
+ *
+ * Return: 0 if successful, otherwise -errno.
+ */
+int tipc_disc_create(struct net *net, struct tipc_bearer *b,
+ struct tipc_media_addr *dest, struct sk_buff **skb)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_discoverer *d;
+
+ d = kmalloc(sizeof(*d), GFP_ATOMIC);
+ if (!d)
+ return -ENOMEM;
+ d->skb = tipc_buf_acquire(MAX_H_SIZE + NODE_ID_LEN, GFP_ATOMIC);
+ if (!d->skb) {
+ kfree(d);
+ return -ENOMEM;
+ }
+ tipc_disc_init_msg(net, d->skb, DSC_REQ_MSG, b);
+
+ /* Do we need an address trial period first ? */
+ if (!tipc_own_addr(net)) {
+ tn->addr_trial_end = jiffies + msecs_to_jiffies(1000);
+ msg_set_type(buf_msg(d->skb), DSC_TRIAL_MSG);
+ }
+ memcpy(&d->dest, dest, sizeof(*dest));
+ d->net = net;
+ d->bearer_id = b->identity;
+ d->domain = b->domain;
+ d->num_nodes = 0;
+ d->timer_intv = TIPC_DISC_INIT;
+ spin_lock_init(&d->lock);
+ timer_setup(&d->timer, tipc_disc_timeout, 0);
+ mod_timer(&d->timer, jiffies + d->timer_intv);
+ b->disc = d;
+ *skb = skb_clone(d->skb, GFP_ATOMIC);
+ return 0;
+}
+
+/**
+ * tipc_disc_delete - destroy object sending periodic link setup requests
+ * @d: ptr to link dest structure
+ */
+void tipc_disc_delete(struct tipc_discoverer *d)
+{
+ timer_shutdown_sync(&d->timer);
+ kfree_skb(d->skb);
+ kfree(d);
+}
+
+/**
+ * tipc_disc_reset - reset object to send periodic link setup requests
+ * @net: the applicable net namespace
+ * @b: ptr to bearer issuing requests
+ */
+void tipc_disc_reset(struct net *net, struct tipc_bearer *b)
+{
+ struct tipc_discoverer *d = b->disc;
+ struct tipc_media_addr maddr;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&d->lock);
+ tipc_disc_init_msg(net, d->skb, DSC_REQ_MSG, b);
+ d->net = net;
+ d->bearer_id = b->identity;
+ d->domain = b->domain;
+ d->num_nodes = 0;
+ d->timer_intv = TIPC_DISC_INIT;
+ memcpy(&maddr, &d->dest, sizeof(maddr));
+ mod_timer(&d->timer, jiffies + d->timer_intv);
+ skb = skb_clone(d->skb, GFP_ATOMIC);
+ spin_unlock_bh(&d->lock);
+ if (skb)
+ tipc_bearer_xmit_skb(net, b->identity, skb, &maddr);
+}
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
new file mode 100644
index 0000000000..521d96c41d
--- /dev/null
+++ b/net/tipc/discover.h
@@ -0,0 +1,51 @@
+/*
+ * net/tipc/discover.h
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_DISCOVER_H
+#define _TIPC_DISCOVER_H
+
+struct tipc_discoverer;
+
+int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
+ struct tipc_media_addr *dest, struct sk_buff **skb);
+void tipc_disc_delete(struct tipc_discoverer *req);
+void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr);
+void tipc_disc_add_dest(struct tipc_discoverer *req);
+void tipc_disc_remove_dest(struct tipc_discoverer *req);
+void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
+ struct tipc_bearer *b_ptr);
+
+#endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
new file mode 100644
index 0000000000..cb0d185e06
--- /dev/null
+++ b/net/tipc/eth_media.c
@@ -0,0 +1,98 @@
+/*
+ * net/tipc/eth_media.c: Ethernet bearer support for TIPC
+ *
+ * Copyright (c) 2001-2007, 2013-2014, Ericsson AB
+ * Copyright (c) 2005-2008, 2011-2013, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "bearer.h"
+
+/* Convert Ethernet address (media address format) to string */
+static int tipc_eth_addr2str(struct tipc_media_addr *addr,
+ char *strbuf, int bufsz)
+{
+ if (bufsz < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
+ return 1;
+
+ sprintf(strbuf, "%pM", addr->value);
+ return 0;
+}
+
+/* Convert from media address format to discovery message addr format */
+static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr)
+{
+ memset(msg, 0, TIPC_MEDIA_INFO_SIZE);
+ msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
+ memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, addr->value, ETH_ALEN);
+ return 0;
+}
+
+/* Convert raw mac address format to media addr format */
+static int tipc_eth_raw2addr(struct tipc_bearer *b,
+ struct tipc_media_addr *addr,
+ const char *msg)
+{
+ memset(addr, 0, sizeof(*addr));
+ ether_addr_copy(addr->value, msg);
+ addr->media_id = TIPC_MEDIA_TYPE_ETH;
+ addr->broadcast = is_broadcast_ether_addr(addr->value);
+ return 0;
+}
+
+/* Convert discovery msg addr format to Ethernet media addr format */
+static int tipc_eth_msg2addr(struct tipc_bearer *b,
+ struct tipc_media_addr *addr,
+ char *msg)
+{
+ /* Skip past preamble: */
+ msg += TIPC_MEDIA_ADDR_OFFSET;
+ return tipc_eth_raw2addr(b, addr, msg);
+}
+
+/* Ethernet media registration info */
+struct tipc_media eth_media_info = {
+ .send_msg = tipc_l2_send_msg,
+ .enable_media = tipc_enable_l2_media,
+ .disable_media = tipc_disable_l2_media,
+ .addr2str = tipc_eth_addr2str,
+ .addr2msg = tipc_eth_addr2msg,
+ .msg2addr = tipc_eth_msg2addr,
+ .raw2addr = tipc_eth_raw2addr,
+ .priority = TIPC_DEF_LINK_PRI,
+ .tolerance = TIPC_DEF_LINK_TOL,
+ .min_win = TIPC_DEF_LINK_WIN,
+ .max_win = TIPC_MAX_LINK_WIN,
+ .type_id = TIPC_MEDIA_TYPE_ETH,
+ .hwaddr_len = ETH_ALEN,
+ .name = "eth"
+};
diff --git a/net/tipc/group.c b/net/tipc/group.c
new file mode 100644
index 0000000000..3e137d8c9d
--- /dev/null
+++ b/net/tipc/group.c
@@ -0,0 +1,959 @@
+/*
+ * net/tipc/group.c: TIPC group messaging code
+ *
+ * Copyright (c) 2017, Ericsson AB
+ * Copyright (c) 2020, Red Hat Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "addr.h"
+#include "group.h"
+#include "bcast.h"
+#include "topsrv.h"
+#include "msg.h"
+#include "socket.h"
+#include "node.h"
+#include "name_table.h"
+#include "subscr.h"
+
+#define ADV_UNIT (((MAX_MSG_SIZE + MAX_H_SIZE) / FLOWCTL_BLK_SZ) + 1)
+#define ADV_IDLE ADV_UNIT
+#define ADV_ACTIVE (ADV_UNIT * 12)
+
+enum mbr_state {
+ MBR_JOINING,
+ MBR_PUBLISHED,
+ MBR_JOINED,
+ MBR_PENDING,
+ MBR_ACTIVE,
+ MBR_RECLAIMING,
+ MBR_REMITTED,
+ MBR_LEAVING
+};
+
+struct tipc_member {
+ struct rb_node tree_node;
+ struct list_head list;
+ struct list_head small_win;
+ struct sk_buff_head deferredq;
+ struct tipc_group *group;
+ u32 node;
+ u32 port;
+ u32 instance;
+ enum mbr_state state;
+ u16 advertised;
+ u16 window;
+ u16 bc_rcv_nxt;
+ u16 bc_syncpt;
+ u16 bc_acked;
+};
+
+struct tipc_group {
+ struct rb_root members;
+ struct list_head small_win;
+ struct list_head pending;
+ struct list_head active;
+ struct tipc_nlist dests;
+ struct net *net;
+ int subid;
+ u32 type;
+ u32 instance;
+ u32 scope;
+ u32 portid;
+ u16 member_cnt;
+ u16 active_cnt;
+ u16 max_active;
+ u16 bc_snd_nxt;
+ u16 bc_ackers;
+ bool *open;
+ bool loopback;
+ bool events;
+};
+
+static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
+ int mtyp, struct sk_buff_head *xmitq);
+
+static void tipc_group_open(struct tipc_member *m, bool *wakeup)
+{
+ *wakeup = false;
+ if (list_empty(&m->small_win))
+ return;
+ list_del_init(&m->small_win);
+ *m->group->open = true;
+ *wakeup = true;
+}
+
+static void tipc_group_decr_active(struct tipc_group *grp,
+ struct tipc_member *m)
+{
+ if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
+ m->state == MBR_REMITTED)
+ grp->active_cnt--;
+}
+
+static int tipc_group_rcvbuf_limit(struct tipc_group *grp)
+{
+ int max_active, active_pool, idle_pool;
+ int mcnt = grp->member_cnt + 1;
+
+ /* Limit simultaneous reception from other members */
+ max_active = min(mcnt / 8, 64);
+ max_active = max(max_active, 16);
+ grp->max_active = max_active;
+
+ /* Reserve blocks for active and idle members */
+ active_pool = max_active * ADV_ACTIVE;
+ idle_pool = (mcnt - max_active) * ADV_IDLE;
+
+ /* Scale to bytes, considering worst-case truesize/msgsize ratio */
+ return (active_pool + idle_pool) * FLOWCTL_BLK_SZ * 4;
+}
+
+u16 tipc_group_bc_snd_nxt(struct tipc_group *grp)
+{
+ return grp->bc_snd_nxt;
+}
+
+static bool tipc_group_is_receiver(struct tipc_member *m)
+{
+ return m && m->state != MBR_JOINING && m->state != MBR_LEAVING;
+}
+
+static bool tipc_group_is_sender(struct tipc_member *m)
+{
+ return m && m->state != MBR_JOINING && m->state != MBR_PUBLISHED;
+}
+
+u32 tipc_group_exclude(struct tipc_group *grp)
+{
+ if (!grp->loopback)
+ return grp->portid;
+ return 0;
+}
+
+struct tipc_group *tipc_group_create(struct net *net, u32 portid,
+ struct tipc_group_req *mreq,
+ bool *group_is_open)
+{
+ u32 filter = TIPC_SUB_PORTS | TIPC_SUB_NO_STATUS;
+ bool global = mreq->scope != TIPC_NODE_SCOPE;
+ struct tipc_group *grp;
+ u32 type = mreq->type;
+
+ grp = kzalloc(sizeof(*grp), GFP_ATOMIC);
+ if (!grp)
+ return NULL;
+ tipc_nlist_init(&grp->dests, tipc_own_addr(net));
+ INIT_LIST_HEAD(&grp->small_win);
+ INIT_LIST_HEAD(&grp->active);
+ INIT_LIST_HEAD(&grp->pending);
+ grp->members = RB_ROOT;
+ grp->net = net;
+ grp->portid = portid;
+ grp->type = type;
+ grp->instance = mreq->instance;
+ grp->scope = mreq->scope;
+ grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
+ grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
+ grp->open = group_is_open;
+ *grp->open = false;
+ filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE;
+ if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0,
+ filter, &grp->subid))
+ return grp;
+ kfree(grp);
+ return NULL;
+}
+
+void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf)
+{
+ struct rb_root *tree = &grp->members;
+ struct tipc_member *m, *tmp;
+ struct sk_buff_head xmitq;
+
+ __skb_queue_head_init(&xmitq);
+ rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
+ tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, &xmitq);
+ tipc_group_update_member(m, 0);
+ }
+ tipc_node_distr_xmit(net, &xmitq);
+ *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
+}
+
+void tipc_group_delete(struct net *net, struct tipc_group *grp)
+{
+ struct rb_root *tree = &grp->members;
+ struct tipc_member *m, *tmp;
+ struct sk_buff_head xmitq;
+
+ __skb_queue_head_init(&xmitq);
+
+ rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
+ tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq);
+ __skb_queue_purge(&m->deferredq);
+ list_del(&m->list);
+ kfree(m);
+ }
+ tipc_node_distr_xmit(net, &xmitq);
+ tipc_nlist_purge(&grp->dests);
+ tipc_topsrv_kern_unsubscr(net, grp->subid);
+ kfree(grp);
+}
+
+static struct tipc_member *tipc_group_find_member(struct tipc_group *grp,
+ u32 node, u32 port)
+{
+ struct rb_node *n = grp->members.rb_node;
+ u64 nkey, key = (u64)node << 32 | port;
+ struct tipc_member *m;
+
+ while (n) {
+ m = container_of(n, struct tipc_member, tree_node);
+ nkey = (u64)m->node << 32 | m->port;
+ if (key < nkey)
+ n = n->rb_left;
+ else if (key > nkey)
+ n = n->rb_right;
+ else
+ return m;
+ }
+ return NULL;
+}
+
+static struct tipc_member *tipc_group_find_dest(struct tipc_group *grp,
+ u32 node, u32 port)
+{
+ struct tipc_member *m;
+
+ m = tipc_group_find_member(grp, node, port);
+ if (m && tipc_group_is_receiver(m))
+ return m;
+ return NULL;
+}
+
+static struct tipc_member *tipc_group_find_node(struct tipc_group *grp,
+ u32 node)
+{
+ struct tipc_member *m;
+ struct rb_node *n;
+
+ for (n = rb_first(&grp->members); n; n = rb_next(n)) {
+ m = container_of(n, struct tipc_member, tree_node);
+ if (m->node == node)
+ return m;
+ }
+ return NULL;
+}
+
+static int tipc_group_add_to_tree(struct tipc_group *grp,
+ struct tipc_member *m)
+{
+ u64 nkey, key = (u64)m->node << 32 | m->port;
+ struct rb_node **n, *parent = NULL;
+ struct tipc_member *tmp;
+
+ n = &grp->members.rb_node;
+ while (*n) {
+ tmp = container_of(*n, struct tipc_member, tree_node);
+ parent = *n;
+ tmp = container_of(parent, struct tipc_member, tree_node);
+ nkey = (u64)tmp->node << 32 | tmp->port;
+ if (key < nkey)
+ n = &(*n)->rb_left;
+ else if (key > nkey)
+ n = &(*n)->rb_right;
+ else
+ return -EEXIST;
+ }
+ rb_link_node(&m->tree_node, parent, n);
+ rb_insert_color(&m->tree_node, &grp->members);
+ return 0;
+}
+
+static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
+ u32 node, u32 port,
+ u32 instance, int state)
+{
+ struct tipc_member *m;
+ int ret;
+
+ m = kzalloc(sizeof(*m), GFP_ATOMIC);
+ if (!m)
+ return NULL;
+ INIT_LIST_HEAD(&m->list);
+ INIT_LIST_HEAD(&m->small_win);
+ __skb_queue_head_init(&m->deferredq);
+ m->group = grp;
+ m->node = node;
+ m->port = port;
+ m->instance = instance;
+ m->bc_acked = grp->bc_snd_nxt - 1;
+ ret = tipc_group_add_to_tree(grp, m);
+ if (ret < 0) {
+ kfree(m);
+ return NULL;
+ }
+ grp->member_cnt++;
+ tipc_nlist_add(&grp->dests, m->node);
+ m->state = state;
+ return m;
+}
+
+void tipc_group_add_member(struct tipc_group *grp, u32 node,
+ u32 port, u32 instance)
+{
+ tipc_group_create_member(grp, node, port, instance, MBR_PUBLISHED);
+}
+
+static void tipc_group_delete_member(struct tipc_group *grp,
+ struct tipc_member *m)
+{
+ rb_erase(&m->tree_node, &grp->members);
+ grp->member_cnt--;
+
+ /* Check if we were waiting for replicast ack from this member */
+ if (grp->bc_ackers && less(m->bc_acked, grp->bc_snd_nxt - 1))
+ grp->bc_ackers--;
+
+ list_del_init(&m->list);
+ list_del_init(&m->small_win);
+ tipc_group_decr_active(grp, m);
+
+ /* If last member on a node, remove node from dest list */
+ if (!tipc_group_find_node(grp, m->node))
+ tipc_nlist_del(&grp->dests, m->node);
+
+ kfree(m);
+}
+
+struct tipc_nlist *tipc_group_dests(struct tipc_group *grp)
+{
+ return &grp->dests;
+}
+
+void tipc_group_self(struct tipc_group *grp, struct tipc_service_range *seq,
+ int *scope)
+{
+ seq->type = grp->type;
+ seq->lower = grp->instance;
+ seq->upper = grp->instance;
+ *scope = grp->scope;
+}
+
+void tipc_group_update_member(struct tipc_member *m, int len)
+{
+ struct tipc_group *grp = m->group;
+ struct tipc_member *_m, *tmp;
+
+ if (!tipc_group_is_receiver(m))
+ return;
+
+ m->window -= len;
+
+ if (m->window >= ADV_IDLE)
+ return;
+
+ list_del_init(&m->small_win);
+
+ /* Sort member into small_window members' list */
+ list_for_each_entry_safe(_m, tmp, &grp->small_win, small_win) {
+ if (_m->window > m->window)
+ break;
+ }
+ list_add_tail(&m->small_win, &_m->small_win);
+}
+
+void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
+{
+ u16 prev = grp->bc_snd_nxt - 1;
+ struct tipc_member *m;
+ struct rb_node *n;
+ u16 ackers = 0;
+
+ for (n = rb_first(&grp->members); n; n = rb_next(n)) {
+ m = container_of(n, struct tipc_member, tree_node);
+ if (tipc_group_is_receiver(m)) {
+ tipc_group_update_member(m, len);
+ m->bc_acked = prev;
+ ackers++;
+ }
+ }
+
+ /* Mark number of acknowledges to expect, if any */
+ if (ack)
+ grp->bc_ackers = ackers;
+ grp->bc_snd_nxt++;
+}
+
+bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
+ int len, struct tipc_member **mbr)
+{
+ struct sk_buff_head xmitq;
+ struct tipc_member *m;
+ int adv, state;
+
+ m = tipc_group_find_dest(grp, dnode, dport);
+ if (!tipc_group_is_receiver(m)) {
+ *mbr = NULL;
+ return false;
+ }
+ *mbr = m;
+
+ if (m->window >= len)
+ return false;
+
+ *grp->open = false;
+
+ /* If not fully advertised, do it now to prevent mutual blocking */
+ adv = m->advertised;
+ state = m->state;
+ if (state == MBR_JOINED && adv == ADV_IDLE)
+ return true;
+ if (state == MBR_ACTIVE && adv == ADV_ACTIVE)
+ return true;
+ if (state == MBR_PENDING && adv == ADV_IDLE)
+ return true;
+ __skb_queue_head_init(&xmitq);
+ tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, &xmitq);
+ tipc_node_distr_xmit(grp->net, &xmitq);
+ return true;
+}
+
+bool tipc_group_bc_cong(struct tipc_group *grp, int len)
+{
+ struct tipc_member *m = NULL;
+
+ /* If prev bcast was replicast, reject until all receivers have acked */
+ if (grp->bc_ackers) {
+ *grp->open = false;
+ return true;
+ }
+ if (list_empty(&grp->small_win))
+ return false;
+
+ m = list_first_entry(&grp->small_win, struct tipc_member, small_win);
+ if (m->window >= len)
+ return false;
+
+ return tipc_group_cong(grp, m->node, m->port, len, &m);
+}
+
+/* tipc_group_sort_msg() - sort msg into queue by bcast sequence number
+ */
+static void tipc_group_sort_msg(struct sk_buff *skb, struct sk_buff_head *defq)
+{
+ struct tipc_msg *_hdr, *hdr = buf_msg(skb);
+ u16 bc_seqno = msg_grp_bc_seqno(hdr);
+ struct sk_buff *_skb, *tmp;
+ int mtyp = msg_type(hdr);
+
+ /* Bcast/mcast may be bypassed by ucast or other bcast, - sort it in */
+ if (mtyp == TIPC_GRP_BCAST_MSG || mtyp == TIPC_GRP_MCAST_MSG) {
+ skb_queue_walk_safe(defq, _skb, tmp) {
+ _hdr = buf_msg(_skb);
+ if (!less(bc_seqno, msg_grp_bc_seqno(_hdr)))
+ continue;
+ __skb_queue_before(defq, _skb, skb);
+ return;
+ }
+ /* Bcast was not bypassed, - add to tail */
+ }
+ /* Unicasts are never bypassed, - always add to tail */
+ __skb_queue_tail(defq, skb);
+}
+
+/* tipc_group_filter_msg() - determine if we should accept arriving message
+ */
+void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff *skb = __skb_dequeue(inputq);
+ bool ack, deliver, update, leave = false;
+ struct sk_buff_head *defq;
+ struct tipc_member *m;
+ struct tipc_msg *hdr;
+ u32 node, port;
+ int mtyp, blks;
+
+ if (!skb)
+ return;
+
+ hdr = buf_msg(skb);
+ node = msg_orignode(hdr);
+ port = msg_origport(hdr);
+
+ if (!msg_in_group(hdr))
+ goto drop;
+
+ m = tipc_group_find_member(grp, node, port);
+ if (!tipc_group_is_sender(m))
+ goto drop;
+
+ if (less(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
+ goto drop;
+
+ TIPC_SKB_CB(skb)->orig_member = m->instance;
+ defq = &m->deferredq;
+ tipc_group_sort_msg(skb, defq);
+
+ while ((skb = skb_peek(defq))) {
+ hdr = buf_msg(skb);
+ mtyp = msg_type(hdr);
+ blks = msg_blocks(hdr);
+ deliver = true;
+ ack = false;
+ update = false;
+
+ if (more(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
+ break;
+
+ /* Decide what to do with message */
+ switch (mtyp) {
+ case TIPC_GRP_MCAST_MSG:
+ if (msg_nameinst(hdr) != grp->instance) {
+ update = true;
+ deliver = false;
+ }
+ fallthrough;
+ case TIPC_GRP_BCAST_MSG:
+ m->bc_rcv_nxt++;
+ ack = msg_grp_bc_ack_req(hdr);
+ break;
+ case TIPC_GRP_UCAST_MSG:
+ break;
+ case TIPC_GRP_MEMBER_EVT:
+ if (m->state == MBR_LEAVING)
+ leave = true;
+ if (!grp->events)
+ deliver = false;
+ break;
+ default:
+ break;
+ }
+
+ /* Execute decisions */
+ __skb_dequeue(defq);
+ if (deliver)
+ __skb_queue_tail(inputq, skb);
+ else
+ kfree_skb(skb);
+
+ if (ack)
+ tipc_group_proto_xmit(grp, m, GRP_ACK_MSG, xmitq);
+
+ if (leave) {
+ __skb_queue_purge(defq);
+ tipc_group_delete_member(grp, m);
+ break;
+ }
+ if (!update)
+ continue;
+
+ tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
+ }
+ return;
+drop:
+ kfree_skb(skb);
+}
+
+void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
+ u32 port, struct sk_buff_head *xmitq)
+{
+ struct list_head *active = &grp->active;
+ int max_active = grp->max_active;
+ int reclaim_limit = max_active * 3 / 4;
+ int active_cnt = grp->active_cnt;
+ struct tipc_member *m, *rm, *pm;
+
+ m = tipc_group_find_member(grp, node, port);
+ if (!m)
+ return;
+
+ m->advertised -= blks;
+
+ switch (m->state) {
+ case MBR_JOINED:
+ /* First, decide if member can go active */
+ if (active_cnt <= max_active) {
+ m->state = MBR_ACTIVE;
+ list_add_tail(&m->list, active);
+ grp->active_cnt++;
+ tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+ } else {
+ m->state = MBR_PENDING;
+ list_add_tail(&m->list, &grp->pending);
+ }
+
+ if (active_cnt < reclaim_limit)
+ break;
+
+ /* Reclaim from oldest active member, if possible */
+ if (!list_empty(active)) {
+ rm = list_first_entry(active, struct tipc_member, list);
+ rm->state = MBR_RECLAIMING;
+ list_del_init(&rm->list);
+ tipc_group_proto_xmit(grp, rm, GRP_RECLAIM_MSG, xmitq);
+ break;
+ }
+ /* Nobody to reclaim from; - revert oldest pending to JOINED */
+ pm = list_first_entry(&grp->pending, struct tipc_member, list);
+ list_del_init(&pm->list);
+ pm->state = MBR_JOINED;
+ tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
+ break;
+ case MBR_ACTIVE:
+ if (!list_is_last(&m->list, &grp->active))
+ list_move_tail(&m->list, &grp->active);
+ if (m->advertised > (ADV_ACTIVE * 3 / 4))
+ break;
+ tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+ break;
+ case MBR_REMITTED:
+ if (m->advertised > ADV_IDLE)
+ break;
+ m->state = MBR_JOINED;
+ grp->active_cnt--;
+ if (m->advertised < ADV_IDLE) {
+ pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
+ tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+ }
+
+ if (list_empty(&grp->pending))
+ return;
+
+ /* Set oldest pending member to active and advertise */
+ pm = list_first_entry(&grp->pending, struct tipc_member, list);
+ pm->state = MBR_ACTIVE;
+ list_move_tail(&pm->list, &grp->active);
+ grp->active_cnt++;
+ tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
+ break;
+ case MBR_RECLAIMING:
+ case MBR_JOINING:
+ case MBR_LEAVING:
+ default:
+ break;
+ }
+}
+
+static void tipc_group_create_event(struct tipc_group *grp,
+ struct tipc_member *m,
+ u32 event, u16 seqno,
+ struct sk_buff_head *inputq)
+{ u32 dnode = tipc_own_addr(grp->net);
+ struct tipc_event evt;
+ struct sk_buff *skb;
+ struct tipc_msg *hdr;
+
+ memset(&evt, 0, sizeof(evt));
+ evt.event = event;
+ evt.found_lower = m->instance;
+ evt.found_upper = m->instance;
+ evt.port.ref = m->port;
+ evt.port.node = m->node;
+ evt.s.seq.type = grp->type;
+ evt.s.seq.lower = m->instance;
+ evt.s.seq.upper = m->instance;
+
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_GRP_MEMBER_EVT,
+ GROUP_H_SIZE, sizeof(evt), dnode, m->node,
+ grp->portid, m->port, 0);
+ if (!skb)
+ return;
+
+ hdr = buf_msg(skb);
+ msg_set_nametype(hdr, grp->type);
+ msg_set_grp_evt(hdr, event);
+ msg_set_dest_droppable(hdr, true);
+ msg_set_grp_bc_seqno(hdr, seqno);
+ memcpy(msg_data(hdr), &evt, sizeof(evt));
+ TIPC_SKB_CB(skb)->orig_member = m->instance;
+ __skb_queue_tail(inputq, skb);
+}
+
+static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
+ int mtyp, struct sk_buff_head *xmitq)
+{
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
+ int adv = 0;
+
+ skb = tipc_msg_create(GROUP_PROTOCOL, mtyp, INT_H_SIZE, 0,
+ m->node, tipc_own_addr(grp->net),
+ m->port, grp->portid, 0);
+ if (!skb)
+ return;
+
+ if (m->state == MBR_ACTIVE)
+ adv = ADV_ACTIVE - m->advertised;
+ else if (m->state == MBR_JOINED || m->state == MBR_PENDING)
+ adv = ADV_IDLE - m->advertised;
+
+ hdr = buf_msg(skb);
+
+ if (mtyp == GRP_JOIN_MSG) {
+ msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt);
+ msg_set_adv_win(hdr, adv);
+ m->advertised += adv;
+ } else if (mtyp == GRP_LEAVE_MSG) {
+ msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt);
+ } else if (mtyp == GRP_ADV_MSG) {
+ msg_set_adv_win(hdr, adv);
+ m->advertised += adv;
+ } else if (mtyp == GRP_ACK_MSG) {
+ msg_set_grp_bc_acked(hdr, m->bc_rcv_nxt);
+ } else if (mtyp == GRP_REMIT_MSG) {
+ msg_set_grp_remitted(hdr, m->window);
+ }
+ msg_set_dest_droppable(hdr, true);
+ __skb_queue_tail(xmitq, skb);
+}
+
+void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
+ struct tipc_msg *hdr, struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq)
+{
+ u32 node = msg_orignode(hdr);
+ u32 port = msg_origport(hdr);
+ struct tipc_member *m, *pm;
+ u16 remitted, in_flight;
+
+ if (!grp)
+ return;
+
+ if (grp->scope == TIPC_NODE_SCOPE && node != tipc_own_addr(grp->net))
+ return;
+
+ m = tipc_group_find_member(grp, node, port);
+
+ switch (msg_type(hdr)) {
+ case GRP_JOIN_MSG:
+ if (!m)
+ m = tipc_group_create_member(grp, node, port,
+ 0, MBR_JOINING);
+ if (!m)
+ return;
+ m->bc_syncpt = msg_grp_bc_syncpt(hdr);
+ m->bc_rcv_nxt = m->bc_syncpt;
+ m->window += msg_adv_win(hdr);
+
+ /* Wait until PUBLISH event is received if necessary */
+ if (m->state != MBR_PUBLISHED)
+ return;
+
+ /* Member can be taken into service */
+ m->state = MBR_JOINED;
+ tipc_group_open(m, usr_wakeup);
+ tipc_group_update_member(m, 0);
+ tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+ tipc_group_create_event(grp, m, TIPC_PUBLISHED,
+ m->bc_syncpt, inputq);
+ return;
+ case GRP_LEAVE_MSG:
+ if (!m)
+ return;
+ m->bc_syncpt = msg_grp_bc_syncpt(hdr);
+ list_del_init(&m->list);
+ tipc_group_open(m, usr_wakeup);
+ tipc_group_decr_active(grp, m);
+ m->state = MBR_LEAVING;
+ tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
+ m->bc_syncpt, inputq);
+ return;
+ case GRP_ADV_MSG:
+ if (!m)
+ return;
+ m->window += msg_adv_win(hdr);
+ tipc_group_open(m, usr_wakeup);
+ return;
+ case GRP_ACK_MSG:
+ if (!m)
+ return;
+ m->bc_acked = msg_grp_bc_acked(hdr);
+ if (--grp->bc_ackers)
+ return;
+ list_del_init(&m->small_win);
+ *m->group->open = true;
+ *usr_wakeup = true;
+ tipc_group_update_member(m, 0);
+ return;
+ case GRP_RECLAIM_MSG:
+ if (!m)
+ return;
+ tipc_group_proto_xmit(grp, m, GRP_REMIT_MSG, xmitq);
+ m->window = ADV_IDLE;
+ tipc_group_open(m, usr_wakeup);
+ return;
+ case GRP_REMIT_MSG:
+ if (!m || m->state != MBR_RECLAIMING)
+ return;
+
+ remitted = msg_grp_remitted(hdr);
+
+ /* Messages preceding the REMIT still in receive queue */
+ if (m->advertised > remitted) {
+ m->state = MBR_REMITTED;
+ in_flight = m->advertised - remitted;
+ m->advertised = ADV_IDLE + in_flight;
+ return;
+ }
+ /* This should never happen */
+ if (m->advertised < remitted)
+ pr_warn_ratelimited("Unexpected REMIT msg\n");
+
+ /* All messages preceding the REMIT have been read */
+ m->state = MBR_JOINED;
+ grp->active_cnt--;
+ m->advertised = ADV_IDLE;
+
+ /* Set oldest pending member to active and advertise */
+ if (list_empty(&grp->pending))
+ return;
+ pm = list_first_entry(&grp->pending, struct tipc_member, list);
+ pm->state = MBR_ACTIVE;
+ list_move_tail(&pm->list, &grp->active);
+ grp->active_cnt++;
+ if (pm->advertised <= (ADV_ACTIVE * 3 / 4))
+ tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
+ return;
+ default:
+ pr_warn("Received unknown GROUP_PROTO message\n");
+ }
+}
+
+/* tipc_group_member_evt() - receive and handle a member up/down event
+ */
+void tipc_group_member_evt(struct tipc_group *grp,
+ bool *usr_wakeup,
+ int *sk_rcvbuf,
+ struct tipc_msg *hdr,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_event *evt = (void *)msg_data(hdr);
+ u32 instance = evt->found_lower;
+ u32 node = evt->port.node;
+ u32 port = evt->port.ref;
+ int event = evt->event;
+ struct tipc_member *m;
+ struct net *net;
+ u32 self;
+
+ if (!grp)
+ return;
+
+ net = grp->net;
+ self = tipc_own_addr(net);
+ if (!grp->loopback && node == self && port == grp->portid)
+ return;
+
+ m = tipc_group_find_member(grp, node, port);
+
+ switch (event) {
+ case TIPC_PUBLISHED:
+ /* Send and wait for arrival of JOIN message if necessary */
+ if (!m) {
+ m = tipc_group_create_member(grp, node, port, instance,
+ MBR_PUBLISHED);
+ if (!m)
+ break;
+ tipc_group_update_member(m, 0);
+ tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
+ break;
+ }
+
+ if (m->state != MBR_JOINING)
+ break;
+
+ /* Member can be taken into service */
+ m->instance = instance;
+ m->state = MBR_JOINED;
+ tipc_group_open(m, usr_wakeup);
+ tipc_group_update_member(m, 0);
+ tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
+ tipc_group_create_event(grp, m, TIPC_PUBLISHED,
+ m->bc_syncpt, inputq);
+ break;
+ case TIPC_WITHDRAWN:
+ if (!m)
+ break;
+
+ tipc_group_decr_active(grp, m);
+ m->state = MBR_LEAVING;
+ list_del_init(&m->list);
+ tipc_group_open(m, usr_wakeup);
+
+ /* Only send event if no LEAVE message can be expected */
+ if (!tipc_node_is_up(net, node))
+ tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
+ m->bc_rcv_nxt, inputq);
+ break;
+ default:
+ break;
+ }
+ *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
+}
+
+int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb)
+{
+ struct nlattr *group = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_GROUP);
+
+ if (!group)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID,
+ grp->type) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE,
+ grp->instance) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT,
+ grp->bc_snd_nxt))
+ goto group_msg_cancel;
+
+ if (grp->scope == TIPC_NODE_SCOPE)
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_NODE_SCOPE))
+ goto group_msg_cancel;
+
+ if (grp->scope == TIPC_CLUSTER_SCOPE)
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE))
+ goto group_msg_cancel;
+
+ if (*grp->open)
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_OPEN))
+ goto group_msg_cancel;
+
+ nla_nest_end(skb, group);
+ return 0;
+
+group_msg_cancel:
+ nla_nest_cancel(skb, group);
+ return -1;
+}
diff --git a/net/tipc/group.h b/net/tipc/group.h
new file mode 100644
index 0000000000..ea4c3be64c
--- /dev/null
+++ b/net/tipc/group.h
@@ -0,0 +1,77 @@
+/*
+ * net/tipc/group.h: Include file for TIPC group unicast/multicast functions
+ *
+ * Copyright (c) 2017, Ericsson AB
+ * Copyright (c) 2020, Red Hat Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_GROUP_H
+#define _TIPC_GROUP_H
+
+#include "core.h"
+
+struct tipc_group;
+struct tipc_member;
+struct tipc_msg;
+
+struct tipc_group *tipc_group_create(struct net *net, u32 portid,
+ struct tipc_group_req *mreq,
+ bool *group_is_open);
+void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcv_buf);
+void tipc_group_delete(struct net *net, struct tipc_group *grp);
+void tipc_group_add_member(struct tipc_group *grp, u32 node,
+ u32 port, u32 instance);
+struct tipc_nlist *tipc_group_dests(struct tipc_group *grp);
+void tipc_group_self(struct tipc_group *grp, struct tipc_service_range *seq,
+ int *scope);
+u32 tipc_group_exclude(struct tipc_group *grp);
+void tipc_group_filter_msg(struct tipc_group *grp,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq);
+void tipc_group_member_evt(struct tipc_group *grp, bool *wakeup,
+ int *sk_rcvbuf, struct tipc_msg *hdr,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq);
+void tipc_group_proto_rcv(struct tipc_group *grp, bool *wakeup,
+ struct tipc_msg *hdr,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq);
+void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack);
+bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
+ int len, struct tipc_member **m);
+bool tipc_group_bc_cong(struct tipc_group *grp, int len);
+void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
+ u32 port, struct sk_buff_head *xmitq);
+u16 tipc_group_bc_snd_nxt(struct tipc_group *grp);
+void tipc_group_update_member(struct tipc_member *m, int len);
+int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb);
+#endif
diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c
new file mode 100644
index 0000000000..b9ad0434c3
--- /dev/null
+++ b/net/tipc/ib_media.c
@@ -0,0 +1,104 @@
+/*
+ * net/tipc/ib_media.c: Infiniband bearer support for TIPC
+ *
+ * Copyright (c) 2013 Patrick McHardy <kaber@trash.net>
+ *
+ * Based on eth_media.c, which carries the following copyright notice:
+ *
+ * Copyright (c) 2001-2007, Ericsson AB
+ * Copyright (c) 2005-2008, 2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/if_infiniband.h>
+#include "core.h"
+#include "bearer.h"
+
+#define TIPC_MAX_IB_LINK_WIN 500
+
+/* convert InfiniBand address (media address format) media address to string */
+static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
+ int str_size)
+{
+ if (str_size < 60) /* 60 = 19 * strlen("xx:") + strlen("xx\0") */
+ return 1;
+
+ sprintf(str_buf, "%20phC", a->value);
+
+ return 0;
+}
+
+/* Convert from media address format to discovery message addr format */
+static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr)
+{
+ memset(msg, 0, TIPC_MEDIA_INFO_SIZE);
+ memcpy(msg, addr->value, INFINIBAND_ALEN);
+ return 0;
+}
+
+/* Convert raw InfiniBand address format to media addr format */
+static int tipc_ib_raw2addr(struct tipc_bearer *b,
+ struct tipc_media_addr *addr,
+ const char *msg)
+{
+ memset(addr, 0, sizeof(*addr));
+ memcpy(addr->value, msg, INFINIBAND_ALEN);
+ addr->media_id = TIPC_MEDIA_TYPE_IB;
+ addr->broadcast = !memcmp(msg, b->bcast_addr.value,
+ INFINIBAND_ALEN);
+ return 0;
+}
+
+/* Convert discovery msg addr format to InfiniBand media addr format */
+static int tipc_ib_msg2addr(struct tipc_bearer *b,
+ struct tipc_media_addr *addr,
+ char *msg)
+{
+ return tipc_ib_raw2addr(b, addr, msg);
+}
+
+/* InfiniBand media registration info */
+struct tipc_media ib_media_info = {
+ .send_msg = tipc_l2_send_msg,
+ .enable_media = tipc_enable_l2_media,
+ .disable_media = tipc_disable_l2_media,
+ .addr2str = tipc_ib_addr2str,
+ .addr2msg = tipc_ib_addr2msg,
+ .msg2addr = tipc_ib_msg2addr,
+ .raw2addr = tipc_ib_raw2addr,
+ .priority = TIPC_DEF_LINK_PRI,
+ .tolerance = TIPC_DEF_LINK_TOL,
+ .min_win = TIPC_DEF_LINK_WIN,
+ .max_win = TIPC_MAX_IB_LINK_WIN,
+ .type_id = TIPC_MEDIA_TYPE_IB,
+ .hwaddr_len = INFINIBAND_ALEN,
+ .name = "ib"
+};
diff --git a/net/tipc/link.c b/net/tipc/link.c
new file mode 100644
index 0000000000..d014382365
--- /dev/null
+++ b/net/tipc/link.c
@@ -0,0 +1,3009 @@
+/*
+ * net/tipc/link.c: TIPC link code
+ *
+ * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
+ * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "subscr.h"
+#include "link.h"
+#include "bcast.h"
+#include "socket.h"
+#include "name_distr.h"
+#include "discover.h"
+#include "netlink.h"
+#include "monitor.h"
+#include "trace.h"
+#include "crypto.h"
+
+#include <linux/pkt_sched.h>
+
+struct tipc_stats {
+ u32 sent_pkts;
+ u32 recv_pkts;
+ u32 sent_states;
+ u32 recv_states;
+ u32 sent_probes;
+ u32 recv_probes;
+ u32 sent_nacks;
+ u32 recv_nacks;
+ u32 sent_acks;
+ u32 sent_bundled;
+ u32 sent_bundles;
+ u32 recv_bundled;
+ u32 recv_bundles;
+ u32 retransmitted;
+ u32 sent_fragmented;
+ u32 sent_fragments;
+ u32 recv_fragmented;
+ u32 recv_fragments;
+ u32 link_congs; /* # port sends blocked by congestion */
+ u32 deferred_recv;
+ u32 duplicates;
+ u32 max_queue_sz; /* send queue size high water mark */
+ u32 accu_queue_sz; /* used for send queue size profiling */
+ u32 queue_sz_counts; /* used for send queue size profiling */
+ u32 msg_length_counts; /* used for message length profiling */
+ u32 msg_lengths_total; /* used for message length profiling */
+ u32 msg_length_profile[7]; /* used for msg. length profiling */
+};
+
+/**
+ * struct tipc_link - TIPC link data structure
+ * @addr: network address of link's peer node
+ * @name: link name character string
+ * @media_addr: media address to use when sending messages over link
+ * @timer: link timer
+ * @net: pointer to namespace struct
+ * @refcnt: reference counter for permanent references (owner node & timer)
+ * @peer_session: link session # being used by peer end of link
+ * @peer_bearer_id: bearer id used by link's peer endpoint
+ * @bearer_id: local bearer id used by link
+ * @tolerance: minimum link continuity loss needed to reset link [in ms]
+ * @abort_limit: # of unacknowledged continuity probes needed to reset link
+ * @state: current state of link FSM
+ * @peer_caps: bitmap describing capabilities of peer node
+ * @silent_intv_cnt: # of timer intervals without any reception from peer
+ * @proto_msg: template for control messages generated by link
+ * @pmsg: convenience pointer to "proto_msg" field
+ * @priority: current link priority
+ * @net_plane: current link network plane ('A' through 'H')
+ * @mon_state: cookie with information needed by link monitor
+ * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
+ * @exp_msg_count: # of tunnelled messages expected during link changeover
+ * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
+ * @mtu: current maximum packet size for this link
+ * @advertised_mtu: advertised own mtu when link is being established
+ * @transmitq: queue for sent, non-acked messages
+ * @backlogq: queue for messages waiting to be sent
+ * @snt_nxt: next sequence number to use for outbound messages
+ * @ackers: # of peers that needs to ack each packet before it can be released
+ * @acked: # last packet acked by a certain peer. Used for broadcast.
+ * @rcv_nxt: next sequence number to expect for inbound messages
+ * @deferred_queue: deferred queue saved OOS b'cast message received from node
+ * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
+ * @inputq: buffer queue for messages to be delivered upwards
+ * @namedq: buffer queue for name table messages to be delivered upwards
+ * @next_out: ptr to first unsent outbound message in queue
+ * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
+ * @long_msg_seq_no: next identifier to use for outbound fragmented messages
+ * @reasm_buf: head of partially reassembled inbound message fragments
+ * @bc_rcvr: marks that this is a broadcast receiver link
+ * @stats: collects statistics regarding link activity
+ * @session: session to be used by link
+ * @snd_nxt_state: next send seq number
+ * @rcv_nxt_state: next rcv seq number
+ * @in_session: have received ACTIVATE_MSG from peer
+ * @active: link is active
+ * @if_name: associated interface name
+ * @rst_cnt: link reset counter
+ * @drop_point: seq number for failover handling (FIXME)
+ * @failover_reasm_skb: saved failover msg ptr (FIXME)
+ * @failover_deferdq: deferred message queue for failover processing (FIXME)
+ * @transmq: the link's transmit queue
+ * @backlog: link's backlog by priority (importance)
+ * @snd_nxt: next sequence number to be used
+ * @rcv_unacked: # messages read by user, but not yet acked back to peer
+ * @deferdq: deferred receive queue
+ * @window: sliding window size for congestion handling
+ * @min_win: minimal send window to be used by link
+ * @ssthresh: slow start threshold for congestion handling
+ * @max_win: maximal send window to be used by link
+ * @cong_acks: congestion acks for congestion avoidance (FIXME)
+ * @checkpoint: seq number for congestion window size handling
+ * @reasm_tnlmsg: fragmentation/reassembly area for tunnel protocol message
+ * @last_gap: last gap ack blocks for bcast (FIXME)
+ * @last_ga: ptr to gap ack blocks
+ * @bc_rcvlink: the peer specific link used for broadcast reception
+ * @bc_sndlink: the namespace global link used for broadcast sending
+ * @nack_state: bcast nack state
+ * @bc_peer_is_up: peer has acked the bcast init msg
+ */
+struct tipc_link {
+ u32 addr;
+ char name[TIPC_MAX_LINK_NAME];
+ struct net *net;
+
+ /* Management and link supervision data */
+ u16 peer_session;
+ u16 session;
+ u16 snd_nxt_state;
+ u16 rcv_nxt_state;
+ u32 peer_bearer_id;
+ u32 bearer_id;
+ u32 tolerance;
+ u32 abort_limit;
+ u32 state;
+ u16 peer_caps;
+ bool in_session;
+ bool active;
+ u32 silent_intv_cnt;
+ char if_name[TIPC_MAX_IF_NAME];
+ u32 priority;
+ char net_plane;
+ struct tipc_mon_state mon_state;
+ u16 rst_cnt;
+
+ /* Failover/synch */
+ u16 drop_point;
+ struct sk_buff *failover_reasm_skb;
+ struct sk_buff_head failover_deferdq;
+
+ /* Max packet negotiation */
+ u16 mtu;
+ u16 advertised_mtu;
+
+ /* Sending */
+ struct sk_buff_head transmq;
+ struct sk_buff_head backlogq;
+ struct {
+ u16 len;
+ u16 limit;
+ struct sk_buff *target_bskb;
+ } backlog[5];
+ u16 snd_nxt;
+
+ /* Reception */
+ u16 rcv_nxt;
+ u32 rcv_unacked;
+ struct sk_buff_head deferdq;
+ struct sk_buff_head *inputq;
+ struct sk_buff_head *namedq;
+
+ /* Congestion handling */
+ struct sk_buff_head wakeupq;
+ u16 window;
+ u16 min_win;
+ u16 ssthresh;
+ u16 max_win;
+ u16 cong_acks;
+ u16 checkpoint;
+
+ /* Fragmentation/reassembly */
+ struct sk_buff *reasm_buf;
+ struct sk_buff *reasm_tnlmsg;
+
+ /* Broadcast */
+ u16 ackers;
+ u16 acked;
+ u16 last_gap;
+ struct tipc_gap_ack_blks *last_ga;
+ struct tipc_link *bc_rcvlink;
+ struct tipc_link *bc_sndlink;
+ u8 nack_state;
+ bool bc_peer_is_up;
+
+ /* Statistics */
+ struct tipc_stats stats;
+};
+
+/*
+ * Error message prefixes
+ */
+static const char *link_co_err = "Link tunneling error, ";
+static const char *link_rst_msg = "Resetting link ";
+
+/* Send states for broadcast NACKs
+ */
+enum {
+ BC_NACK_SND_CONDITIONAL,
+ BC_NACK_SND_UNCONDITIONAL,
+ BC_NACK_SND_SUPPRESS,
+};
+
+#define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10))
+#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
+
+/* Link FSM states:
+ */
+enum {
+ LINK_ESTABLISHED = 0xe,
+ LINK_ESTABLISHING = 0xe << 4,
+ LINK_RESET = 0x1 << 8,
+ LINK_RESETTING = 0x2 << 12,
+ LINK_PEER_RESET = 0xd << 16,
+ LINK_FAILINGOVER = 0xf << 20,
+ LINK_SYNCHING = 0xc << 24
+};
+
+/* Link FSM state checking routines
+ */
+static int link_is_up(struct tipc_link *l)
+{
+ return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
+}
+
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq);
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+ bool probe_reply, u16 rcvgap,
+ int tolerance, int priority,
+ struct sk_buff_head *xmitq);
+static void link_print(struct tipc_link *l, const char *str);
+static int tipc_link_build_nack_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq);
+static void tipc_link_build_bc_init_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq);
+static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
+ struct tipc_link *l, u8 start_index);
+static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr);
+static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
+ u16 acked, u16 gap,
+ struct tipc_gap_ack_blks *ga,
+ struct sk_buff_head *xmitq,
+ bool *retransmitted, int *rc);
+static void tipc_link_update_cwin(struct tipc_link *l, int released,
+ bool retransmitted);
+/*
+ * Simple non-static link routines (i.e. referenced outside this file)
+ */
+bool tipc_link_is_up(struct tipc_link *l)
+{
+ return link_is_up(l);
+}
+
+bool tipc_link_peer_is_down(struct tipc_link *l)
+{
+ return l->state == LINK_PEER_RESET;
+}
+
+bool tipc_link_is_reset(struct tipc_link *l)
+{
+ return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
+}
+
+bool tipc_link_is_establishing(struct tipc_link *l)
+{
+ return l->state == LINK_ESTABLISHING;
+}
+
+bool tipc_link_is_synching(struct tipc_link *l)
+{
+ return l->state == LINK_SYNCHING;
+}
+
+bool tipc_link_is_failingover(struct tipc_link *l)
+{
+ return l->state == LINK_FAILINGOVER;
+}
+
+bool tipc_link_is_blocked(struct tipc_link *l)
+{
+ return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
+}
+
+static bool link_is_bc_sndlink(struct tipc_link *l)
+{
+ return !l->bc_sndlink;
+}
+
+static bool link_is_bc_rcvlink(struct tipc_link *l)
+{
+ return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
+}
+
+void tipc_link_set_active(struct tipc_link *l, bool active)
+{
+ l->active = active;
+}
+
+u32 tipc_link_id(struct tipc_link *l)
+{
+ return l->peer_bearer_id << 16 | l->bearer_id;
+}
+
+int tipc_link_min_win(struct tipc_link *l)
+{
+ return l->min_win;
+}
+
+int tipc_link_max_win(struct tipc_link *l)
+{
+ return l->max_win;
+}
+
+int tipc_link_prio(struct tipc_link *l)
+{
+ return l->priority;
+}
+
+unsigned long tipc_link_tolerance(struct tipc_link *l)
+{
+ return l->tolerance;
+}
+
+struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
+{
+ return l->inputq;
+}
+
+char tipc_link_plane(struct tipc_link *l)
+{
+ return l->net_plane;
+}
+
+struct net *tipc_link_net(struct tipc_link *l)
+{
+ return l->net;
+}
+
+void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
+{
+ l->peer_caps = capabilities;
+}
+
+void tipc_link_add_bc_peer(struct tipc_link *snd_l,
+ struct tipc_link *uc_l,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_link *rcv_l = uc_l->bc_rcvlink;
+
+ snd_l->ackers++;
+ rcv_l->acked = snd_l->snd_nxt - 1;
+ snd_l->state = LINK_ESTABLISHED;
+ tipc_link_build_bc_init_msg(uc_l, xmitq);
+}
+
+void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
+ struct tipc_link *rcv_l,
+ struct sk_buff_head *xmitq)
+{
+ u16 ack = snd_l->snd_nxt - 1;
+
+ snd_l->ackers--;
+ rcv_l->bc_peer_is_up = true;
+ rcv_l->state = LINK_ESTABLISHED;
+ tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL);
+ trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
+ tipc_link_reset(rcv_l);
+ rcv_l->state = LINK_RESET;
+ if (!snd_l->ackers) {
+ trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
+ tipc_link_reset(snd_l);
+ snd_l->state = LINK_RESET;
+ __skb_queue_purge(xmitq);
+ }
+}
+
+int tipc_link_bc_peers(struct tipc_link *l)
+{
+ return l->ackers;
+}
+
+static u16 link_bc_rcv_gap(struct tipc_link *l)
+{
+ struct sk_buff *skb = skb_peek(&l->deferdq);
+ u16 gap = 0;
+
+ if (more(l->snd_nxt, l->rcv_nxt))
+ gap = l->snd_nxt - l->rcv_nxt;
+ if (skb)
+ gap = buf_seqno(skb) - l->rcv_nxt;
+ return gap;
+}
+
+void tipc_link_set_mtu(struct tipc_link *l, int mtu)
+{
+ l->mtu = mtu;
+}
+
+int tipc_link_mtu(struct tipc_link *l)
+{
+ return l->mtu;
+}
+
+int tipc_link_mss(struct tipc_link *l)
+{
+#ifdef CONFIG_TIPC_CRYPTO
+ return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
+#else
+ return l->mtu - INT_H_SIZE;
+#endif
+}
+
+u16 tipc_link_rcv_nxt(struct tipc_link *l)
+{
+ return l->rcv_nxt;
+}
+
+u16 tipc_link_acked(struct tipc_link *l)
+{
+ return l->acked;
+}
+
+char *tipc_link_name(struct tipc_link *l)
+{
+ return l->name;
+}
+
+u32 tipc_link_state(struct tipc_link *l)
+{
+ return l->state;
+}
+
+/**
+ * tipc_link_create - create a new link
+ * @net: pointer to associated network namespace
+ * @if_name: associated interface name
+ * @bearer_id: id (index) of associated bearer
+ * @tolerance: link tolerance to be used by link
+ * @net_plane: network plane (A,B,c..) this link belongs to
+ * @mtu: mtu to be advertised by link
+ * @priority: priority to be used by link
+ * @min_win: minimal send window to be used by link
+ * @max_win: maximal send window to be used by link
+ * @session: session to be used by link
+ * @peer: node id of peer node
+ * @peer_caps: bitmap describing peer node capabilities
+ * @bc_sndlink: the namespace global link used for broadcast sending
+ * @bc_rcvlink: the peer specific link used for broadcast reception
+ * @inputq: queue to put messages ready for delivery
+ * @namedq: queue to put binding table update messages ready for delivery
+ * @link: return value, pointer to put the created link
+ * @self: local unicast link id
+ * @peer_id: 128-bit ID of peer
+ *
+ * Return: true if link was created, otherwise false
+ */
+bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
+ int tolerance, char net_plane, u32 mtu, int priority,
+ u32 min_win, u32 max_win, u32 session, u32 self,
+ u32 peer, u8 *peer_id, u16 peer_caps,
+ struct tipc_link *bc_sndlink,
+ struct tipc_link *bc_rcvlink,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *namedq,
+ struct tipc_link **link)
+{
+ char peer_str[NODE_ID_STR_LEN] = {0,};
+ char self_str[NODE_ID_STR_LEN] = {0,};
+ struct tipc_link *l;
+
+ l = kzalloc(sizeof(*l), GFP_ATOMIC);
+ if (!l)
+ return false;
+ *link = l;
+ l->session = session;
+
+ /* Set link name for unicast links only */
+ if (peer_id) {
+ tipc_nodeid2string(self_str, tipc_own_id(net));
+ if (strlen(self_str) > 16)
+ sprintf(self_str, "%x", self);
+ tipc_nodeid2string(peer_str, peer_id);
+ if (strlen(peer_str) > 16)
+ sprintf(peer_str, "%x", peer);
+ }
+ /* Peer i/f name will be completed by reset/activate message */
+ snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
+ self_str, if_name, peer_str);
+
+ strcpy(l->if_name, if_name);
+ l->addr = peer;
+ l->peer_caps = peer_caps;
+ l->net = net;
+ l->in_session = false;
+ l->bearer_id = bearer_id;
+ l->tolerance = tolerance;
+ if (bc_rcvlink)
+ bc_rcvlink->tolerance = tolerance;
+ l->net_plane = net_plane;
+ l->advertised_mtu = mtu;
+ l->mtu = mtu;
+ l->priority = priority;
+ tipc_link_set_queue_limits(l, min_win, max_win);
+ l->ackers = 1;
+ l->bc_sndlink = bc_sndlink;
+ l->bc_rcvlink = bc_rcvlink;
+ l->inputq = inputq;
+ l->namedq = namedq;
+ l->state = LINK_RESETTING;
+ __skb_queue_head_init(&l->transmq);
+ __skb_queue_head_init(&l->backlogq);
+ __skb_queue_head_init(&l->deferdq);
+ __skb_queue_head_init(&l->failover_deferdq);
+ skb_queue_head_init(&l->wakeupq);
+ skb_queue_head_init(l->inputq);
+ return true;
+}
+
+/**
+ * tipc_link_bc_create - create new link to be used for broadcast
+ * @net: pointer to associated network namespace
+ * @mtu: mtu to be used initially if no peers
+ * @min_win: minimal send window to be used by link
+ * @max_win: maximal send window to be used by link
+ * @inputq: queue to put messages ready for delivery
+ * @namedq: queue to put binding table update messages ready for delivery
+ * @link: return value, pointer to put the created link
+ * @ownnode: identity of own node
+ * @peer: node id of peer node
+ * @peer_id: 128-bit ID of peer
+ * @peer_caps: bitmap describing peer node capabilities
+ * @bc_sndlink: the namespace global link used for broadcast sending
+ *
+ * Return: true if link was created, otherwise false
+ */
+bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
+ int mtu, u32 min_win, u32 max_win, u16 peer_caps,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *namedq,
+ struct tipc_link *bc_sndlink,
+ struct tipc_link **link)
+{
+ struct tipc_link *l;
+
+ if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win,
+ max_win, 0, ownnode, peer, NULL, peer_caps,
+ bc_sndlink, NULL, inputq, namedq, link))
+ return false;
+
+ l = *link;
+ if (peer_id) {
+ char peer_str[NODE_ID_STR_LEN] = {0,};
+
+ tipc_nodeid2string(peer_str, peer_id);
+ if (strlen(peer_str) > 16)
+ sprintf(peer_str, "%x", peer);
+ /* Broadcast receiver link name: "broadcast-link:<peer>" */
+ snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name,
+ peer_str);
+ } else {
+ strcpy(l->name, tipc_bclink_name);
+ }
+ trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
+ tipc_link_reset(l);
+ l->state = LINK_RESET;
+ l->ackers = 0;
+ l->bc_rcvlink = l;
+
+ /* Broadcast send link is always up */
+ if (link_is_bc_sndlink(l))
+ l->state = LINK_ESTABLISHED;
+
+ /* Disable replicast if even a single peer doesn't support it */
+ if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
+ tipc_bcast_toggle_rcast(net, false);
+
+ return true;
+}
+
+/**
+ * tipc_link_fsm_evt - link finite state machine
+ * @l: pointer to link
+ * @evt: state machine event to be processed
+ */
+int tipc_link_fsm_evt(struct tipc_link *l, int evt)
+{
+ int rc = 0;
+ int old_state = l->state;
+
+ switch (l->state) {
+ case LINK_RESETTING:
+ switch (evt) {
+ case LINK_PEER_RESET_EVT:
+ l->state = LINK_PEER_RESET;
+ break;
+ case LINK_RESET_EVT:
+ l->state = LINK_RESET;
+ break;
+ case LINK_FAILURE_EVT:
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_ESTABLISH_EVT:
+ case LINK_FAILOVER_END_EVT:
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_RESET:
+ switch (evt) {
+ case LINK_PEER_RESET_EVT:
+ l->state = LINK_ESTABLISHING;
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ l->state = LINK_FAILINGOVER;
+ break;
+ case LINK_FAILURE_EVT:
+ case LINK_RESET_EVT:
+ case LINK_ESTABLISH_EVT:
+ case LINK_FAILOVER_END_EVT:
+ break;
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_PEER_RESET:
+ switch (evt) {
+ case LINK_RESET_EVT:
+ l->state = LINK_ESTABLISHING;
+ break;
+ case LINK_PEER_RESET_EVT:
+ case LINK_ESTABLISH_EVT:
+ case LINK_FAILURE_EVT:
+ break;
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_SYNCH_END_EVT:
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_FAILINGOVER:
+ switch (evt) {
+ case LINK_FAILOVER_END_EVT:
+ l->state = LINK_RESET;
+ break;
+ case LINK_PEER_RESET_EVT:
+ case LINK_RESET_EVT:
+ case LINK_ESTABLISH_EVT:
+ case LINK_FAILURE_EVT:
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_ESTABLISHING:
+ switch (evt) {
+ case LINK_ESTABLISH_EVT:
+ l->state = LINK_ESTABLISHED;
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ l->state = LINK_FAILINGOVER;
+ break;
+ case LINK_RESET_EVT:
+ l->state = LINK_RESET;
+ break;
+ case LINK_FAILURE_EVT:
+ case LINK_PEER_RESET_EVT:
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_FAILOVER_END_EVT:
+ break;
+ case LINK_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_ESTABLISHED:
+ switch (evt) {
+ case LINK_PEER_RESET_EVT:
+ l->state = LINK_PEER_RESET;
+ rc |= TIPC_LINK_DOWN_EVT;
+ break;
+ case LINK_FAILURE_EVT:
+ l->state = LINK_RESETTING;
+ rc |= TIPC_LINK_DOWN_EVT;
+ break;
+ case LINK_RESET_EVT:
+ l->state = LINK_RESET;
+ break;
+ case LINK_ESTABLISH_EVT:
+ case LINK_SYNCH_END_EVT:
+ break;
+ case LINK_SYNCH_BEGIN_EVT:
+ l->state = LINK_SYNCHING;
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_SYNCHING:
+ switch (evt) {
+ case LINK_PEER_RESET_EVT:
+ l->state = LINK_PEER_RESET;
+ rc |= TIPC_LINK_DOWN_EVT;
+ break;
+ case LINK_FAILURE_EVT:
+ l->state = LINK_RESETTING;
+ rc |= TIPC_LINK_DOWN_EVT;
+ break;
+ case LINK_RESET_EVT:
+ l->state = LINK_RESET;
+ break;
+ case LINK_ESTABLISH_EVT:
+ case LINK_SYNCH_BEGIN_EVT:
+ break;
+ case LINK_SYNCH_END_EVT:
+ l->state = LINK_ESTABLISHED;
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ default:
+ pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
+ }
+ trace_tipc_link_fsm(l->name, old_state, l->state, evt);
+ return rc;
+illegal_evt:
+ pr_err("Illegal FSM event %x in state %x on link %s\n",
+ evt, l->state, l->name);
+ trace_tipc_link_fsm(l->name, old_state, l->state, evt);
+ return rc;
+}
+
+/* link_profile_stats - update statistical profiling of traffic
+ */
+static void link_profile_stats(struct tipc_link *l)
+{
+ struct sk_buff *skb;
+ struct tipc_msg *msg;
+ int length;
+
+ /* Update counters used in statistical profiling of send traffic */
+ l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
+ l->stats.queue_sz_counts++;
+
+ skb = skb_peek(&l->transmq);
+ if (!skb)
+ return;
+ msg = buf_msg(skb);
+ length = msg_size(msg);
+
+ if (msg_user(msg) == MSG_FRAGMENTER) {
+ if (msg_type(msg) != FIRST_FRAGMENT)
+ return;
+ length = msg_size(msg_inner_hdr(msg));
+ }
+ l->stats.msg_lengths_total += length;
+ l->stats.msg_length_counts++;
+ if (length <= 64)
+ l->stats.msg_length_profile[0]++;
+ else if (length <= 256)
+ l->stats.msg_length_profile[1]++;
+ else if (length <= 1024)
+ l->stats.msg_length_profile[2]++;
+ else if (length <= 4096)
+ l->stats.msg_length_profile[3]++;
+ else if (length <= 16384)
+ l->stats.msg_length_profile[4]++;
+ else if (length <= 32768)
+ l->stats.msg_length_profile[5]++;
+ else
+ l->stats.msg_length_profile[6]++;
+}
+
+/**
+ * tipc_link_too_silent - check if link is "too silent"
+ * @l: tipc link to be checked
+ *
+ * Return: true if the link 'silent_intv_cnt' is about to reach the
+ * 'abort_limit' value, otherwise false
+ */
+bool tipc_link_too_silent(struct tipc_link *l)
+{
+ return (l->silent_intv_cnt + 2 > l->abort_limit);
+}
+
+/* tipc_link_timeout - perform periodic task as instructed from node timeout
+ */
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
+{
+ int mtyp = 0;
+ int rc = 0;
+ bool state = false;
+ bool probe = false;
+ bool setup = false;
+ u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
+ u16 bc_acked = l->bc_rcvlink->acked;
+ struct tipc_mon_state *mstate = &l->mon_state;
+
+ trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
+ trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
+ switch (l->state) {
+ case LINK_ESTABLISHED:
+ case LINK_SYNCHING:
+ mtyp = STATE_MSG;
+ link_profile_stats(l);
+ tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
+ if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
+ return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+ state = bc_acked != bc_snt;
+ state |= l->bc_rcvlink->rcv_unacked;
+ state |= l->rcv_unacked;
+ state |= !skb_queue_empty(&l->transmq);
+ probe = mstate->probing;
+ probe |= l->silent_intv_cnt;
+ if (probe || mstate->monitoring)
+ l->silent_intv_cnt++;
+ probe |= !skb_queue_empty(&l->deferdq);
+ if (l->snd_nxt == l->checkpoint) {
+ tipc_link_update_cwin(l, 0, 0);
+ probe = true;
+ }
+ l->checkpoint = l->snd_nxt;
+ break;
+ case LINK_RESET:
+ setup = l->rst_cnt++ <= 4;
+ setup |= !(l->rst_cnt % 16);
+ mtyp = RESET_MSG;
+ break;
+ case LINK_ESTABLISHING:
+ setup = true;
+ mtyp = ACTIVATE_MSG;
+ break;
+ case LINK_PEER_RESET:
+ case LINK_RESETTING:
+ case LINK_FAILINGOVER:
+ break;
+ default:
+ break;
+ }
+
+ if (state || probe || setup)
+ tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
+
+ return rc;
+}
+
+/**
+ * link_schedule_user - schedule a message sender for wakeup after congestion
+ * @l: congested link
+ * @hdr: header of message that is being sent
+ * Create pseudo msg to send back to user when congestion abates
+ */
+static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
+{
+ u32 dnode = tipc_own_addr(l->net);
+ u32 dport = msg_origport(hdr);
+ struct sk_buff *skb;
+
+ /* Create and schedule wakeup pseudo message */
+ skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
+ dnode, l->addr, dport, 0, 0);
+ if (!skb)
+ return -ENOBUFS;
+ msg_set_dest_droppable(buf_msg(skb), true);
+ TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
+ skb_queue_tail(&l->wakeupq, skb);
+ l->stats.link_congs++;
+ trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
+ return -ELINKCONG;
+}
+
+/**
+ * link_prepare_wakeup - prepare users for wakeup after congestion
+ * @l: congested link
+ * Wake up a number of waiting users, as permitted by available space
+ * in the send queue
+ */
+static void link_prepare_wakeup(struct tipc_link *l)
+{
+ struct sk_buff_head *wakeupq = &l->wakeupq;
+ struct sk_buff_head *inputq = l->inputq;
+ struct sk_buff *skb, *tmp;
+ struct sk_buff_head tmpq;
+ int avail[5] = {0,};
+ int imp = 0;
+
+ __skb_queue_head_init(&tmpq);
+
+ for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
+ avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
+
+ skb_queue_walk_safe(wakeupq, skb, tmp) {
+ imp = TIPC_SKB_CB(skb)->chain_imp;
+ if (avail[imp] <= 0)
+ continue;
+ avail[imp]--;
+ __skb_unlink(skb, wakeupq);
+ __skb_queue_tail(&tmpq, skb);
+ }
+
+ spin_lock_bh(&inputq->lock);
+ skb_queue_splice_tail(&tmpq, inputq);
+ spin_unlock_bh(&inputq->lock);
+
+}
+
+/**
+ * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
+ * the given skb should be next attempted
+ * @skb: skb to set a future retransmission time for
+ * @l: link the skb will be transmitted on
+ */
+static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
+ struct tipc_link *l)
+{
+ if (link_is_bc_sndlink(l))
+ TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+ else
+ TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
+}
+
+void tipc_link_reset(struct tipc_link *l)
+{
+ struct sk_buff_head list;
+ u32 imp;
+
+ __skb_queue_head_init(&list);
+
+ l->in_session = false;
+ /* Force re-synch of peer session number before establishing */
+ l->peer_session--;
+ l->session++;
+ l->mtu = l->advertised_mtu;
+
+ spin_lock_bh(&l->wakeupq.lock);
+ skb_queue_splice_init(&l->wakeupq, &list);
+ spin_unlock_bh(&l->wakeupq.lock);
+
+ spin_lock_bh(&l->inputq->lock);
+ skb_queue_splice_init(&list, l->inputq);
+ spin_unlock_bh(&l->inputq->lock);
+
+ __skb_queue_purge(&l->transmq);
+ __skb_queue_purge(&l->deferdq);
+ __skb_queue_purge(&l->backlogq);
+ __skb_queue_purge(&l->failover_deferdq);
+ for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
+ l->backlog[imp].len = 0;
+ l->backlog[imp].target_bskb = NULL;
+ }
+ kfree_skb(l->reasm_buf);
+ kfree_skb(l->reasm_tnlmsg);
+ kfree_skb(l->failover_reasm_skb);
+ l->reasm_buf = NULL;
+ l->reasm_tnlmsg = NULL;
+ l->failover_reasm_skb = NULL;
+ l->rcv_unacked = 0;
+ l->snd_nxt = 1;
+ l->rcv_nxt = 1;
+ l->snd_nxt_state = 1;
+ l->rcv_nxt_state = 1;
+ l->acked = 0;
+ l->last_gap = 0;
+ kfree(l->last_ga);
+ l->last_ga = NULL;
+ l->silent_intv_cnt = 0;
+ l->rst_cnt = 0;
+ l->bc_peer_is_up = false;
+ memset(&l->mon_state, 0, sizeof(l->mon_state));
+ tipc_link_reset_stats(l);
+}
+
+/**
+ * tipc_link_xmit(): enqueue buffer list according to queue situation
+ * @l: link to use
+ * @list: chain of buffers containing message
+ * @xmitq: returned list of packets to be sent by caller
+ *
+ * Consumes the buffer chain.
+ * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
+ * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
+ */
+int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff_head *backlogq = &l->backlogq;
+ struct sk_buff_head *transmq = &l->transmq;
+ struct sk_buff *skb, *_skb;
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+ u16 ack = l->rcv_nxt - 1;
+ u16 seqno = l->snd_nxt;
+ int pkt_cnt = skb_queue_len(list);
+ unsigned int mss = tipc_link_mss(l);
+ unsigned int cwin = l->window;
+ unsigned int mtu = l->mtu;
+ struct tipc_msg *hdr;
+ bool new_bundle;
+ int rc = 0;
+ int imp;
+
+ if (pkt_cnt <= 0)
+ return 0;
+
+ hdr = buf_msg(skb_peek(list));
+ if (unlikely(msg_size(hdr) > mtu)) {
+ pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
+ skb_queue_len(list), msg_user(hdr),
+ msg_type(hdr), msg_size(hdr), mtu);
+ __skb_queue_purge(list);
+ return -EMSGSIZE;
+ }
+
+ imp = msg_importance(hdr);
+ /* Allow oversubscription of one data msg per source at congestion */
+ if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
+ if (imp == TIPC_SYSTEM_IMPORTANCE) {
+ pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
+ return -ENOBUFS;
+ }
+ rc = link_schedule_user(l, hdr);
+ }
+
+ if (pkt_cnt > 1) {
+ l->stats.sent_fragmented++;
+ l->stats.sent_fragments += pkt_cnt;
+ }
+
+ /* Prepare each packet for sending, and add to relevant queue: */
+ while ((skb = __skb_dequeue(list))) {
+ if (likely(skb_queue_len(transmq) < cwin)) {
+ hdr = buf_msg(skb);
+ msg_set_seqno(hdr, seqno);
+ msg_set_ack(hdr, ack);
+ msg_set_bcast_ack(hdr, bc_ack);
+ _skb = skb_clone(skb, GFP_ATOMIC);
+ if (!_skb) {
+ kfree_skb(skb);
+ __skb_queue_purge(list);
+ return -ENOBUFS;
+ }
+ __skb_queue_tail(transmq, skb);
+ tipc_link_set_skb_retransmit_time(skb, l);
+ __skb_queue_tail(xmitq, _skb);
+ TIPC_SKB_CB(skb)->ackers = l->ackers;
+ l->rcv_unacked = 0;
+ l->stats.sent_pkts++;
+ seqno++;
+ continue;
+ }
+ if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
+ mss, l->addr, &new_bundle)) {
+ if (skb) {
+ /* Keep a ref. to the skb for next try */
+ l->backlog[imp].target_bskb = skb;
+ l->backlog[imp].len++;
+ __skb_queue_tail(backlogq, skb);
+ } else {
+ if (new_bundle) {
+ l->stats.sent_bundles++;
+ l->stats.sent_bundled++;
+ }
+ l->stats.sent_bundled++;
+ }
+ continue;
+ }
+ l->backlog[imp].target_bskb = NULL;
+ l->backlog[imp].len += (1 + skb_queue_len(list));
+ __skb_queue_tail(backlogq, skb);
+ skb_queue_splice_tail_init(list, backlogq);
+ }
+ l->snd_nxt = seqno;
+ return rc;
+}
+
+static void tipc_link_update_cwin(struct tipc_link *l, int released,
+ bool retransmitted)
+{
+ int bklog_len = skb_queue_len(&l->backlogq);
+ struct sk_buff_head *txq = &l->transmq;
+ int txq_len = skb_queue_len(txq);
+ u16 cwin = l->window;
+
+ /* Enter fast recovery */
+ if (unlikely(retransmitted)) {
+ l->ssthresh = max_t(u16, l->window / 2, 300);
+ l->window = min_t(u16, l->ssthresh, l->window);
+ return;
+ }
+ /* Enter slow start */
+ if (unlikely(!released)) {
+ l->ssthresh = max_t(u16, l->window / 2, 300);
+ l->window = l->min_win;
+ return;
+ }
+ /* Don't increase window if no pressure on the transmit queue */
+ if (txq_len + bklog_len < cwin)
+ return;
+
+ /* Don't increase window if there are holes the transmit queue */
+ if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len)
+ return;
+
+ l->cong_acks += released;
+
+ /* Slow start */
+ if (cwin <= l->ssthresh) {
+ l->window = min_t(u16, cwin + released, l->max_win);
+ return;
+ }
+ /* Congestion avoidance */
+ if (l->cong_acks < cwin)
+ return;
+ l->window = min_t(u16, ++cwin, l->max_win);
+ l->cong_acks = 0;
+}
+
+static void tipc_link_advance_backlog(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
+{
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+ struct sk_buff_head *txq = &l->transmq;
+ struct sk_buff *skb, *_skb;
+ u16 ack = l->rcv_nxt - 1;
+ u16 seqno = l->snd_nxt;
+ struct tipc_msg *hdr;
+ u16 cwin = l->window;
+ u32 imp;
+
+ while (skb_queue_len(txq) < cwin) {
+ skb = skb_peek(&l->backlogq);
+ if (!skb)
+ break;
+ _skb = skb_clone(skb, GFP_ATOMIC);
+ if (!_skb)
+ break;
+ __skb_dequeue(&l->backlogq);
+ hdr = buf_msg(skb);
+ imp = msg_importance(hdr);
+ l->backlog[imp].len--;
+ if (unlikely(skb == l->backlog[imp].target_bskb))
+ l->backlog[imp].target_bskb = NULL;
+ __skb_queue_tail(&l->transmq, skb);
+ tipc_link_set_skb_retransmit_time(skb, l);
+
+ __skb_queue_tail(xmitq, _skb);
+ TIPC_SKB_CB(skb)->ackers = l->ackers;
+ msg_set_seqno(hdr, seqno);
+ msg_set_ack(hdr, ack);
+ msg_set_bcast_ack(hdr, bc_ack);
+ l->rcv_unacked = 0;
+ l->stats.sent_pkts++;
+ seqno++;
+ }
+ l->snd_nxt = seqno;
+}
+
+/**
+ * link_retransmit_failure() - Detect repeated retransmit failures
+ * @l: tipc link sender
+ * @r: tipc link receiver (= l in case of unicast)
+ * @rc: returned code
+ *
+ * Return: true if the repeated retransmit failures happens, otherwise
+ * false
+ */
+static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
+ int *rc)
+{
+ struct sk_buff *skb = skb_peek(&l->transmq);
+ struct tipc_msg *hdr;
+
+ if (!skb)
+ return false;
+
+ if (!TIPC_SKB_CB(skb)->retr_cnt)
+ return false;
+
+ if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
+ msecs_to_jiffies(r->tolerance * 10)))
+ return false;
+
+ hdr = buf_msg(skb);
+ if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
+ return false;
+
+ pr_warn("Retransmission failure on link <%s>\n", l->name);
+ link_print(l, "State of link ");
+ pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
+ msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
+ pr_info("sqno %u, prev: %x, dest: %x\n",
+ msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
+ pr_info("retr_stamp %d, retr_cnt %d\n",
+ jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
+ TIPC_SKB_CB(skb)->retr_cnt);
+
+ trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
+ trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
+ trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
+
+ if (link_is_bc_sndlink(l)) {
+ r->state = LINK_RESET;
+ *rc |= TIPC_LINK_DOWN_EVT;
+ } else {
+ *rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+ }
+
+ return true;
+}
+
+/* tipc_data_input - deliver data and name distr msgs to upper layer
+ *
+ * Consumes buffer if message is of right type
+ * Node lock must be held
+ */
+static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *inputq)
+{
+ struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
+ struct tipc_msg *hdr = buf_msg(skb);
+
+ switch (msg_user(hdr)) {
+ case TIPC_LOW_IMPORTANCE:
+ case TIPC_MEDIUM_IMPORTANCE:
+ case TIPC_HIGH_IMPORTANCE:
+ case TIPC_CRITICAL_IMPORTANCE:
+ if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
+ skb_queue_tail(mc_inputq, skb);
+ return true;
+ }
+ fallthrough;
+ case CONN_MANAGER:
+ skb_queue_tail(inputq, skb);
+ return true;
+ case GROUP_PROTOCOL:
+ skb_queue_tail(mc_inputq, skb);
+ return true;
+ case NAME_DISTRIBUTOR:
+ l->bc_rcvlink->state = LINK_ESTABLISHED;
+ skb_queue_tail(l->namedq, skb);
+ return true;
+ case MSG_BUNDLER:
+ case TUNNEL_PROTOCOL:
+ case MSG_FRAGMENTER:
+ case BCAST_PROTOCOL:
+ return false;
+#ifdef CONFIG_TIPC_CRYPTO
+ case MSG_CRYPTO:
+ if (sysctl_tipc_key_exchange_enabled &&
+ TIPC_SKB_CB(skb)->decrypted) {
+ tipc_crypto_msg_rcv(l->net, skb);
+ return true;
+ }
+ fallthrough;
+#endif
+ default:
+ pr_warn("Dropping received illegal msg type\n");
+ kfree_skb(skb);
+ return true;
+ }
+}
+
+/* tipc_link_input - process packet that has passed link protocol check
+ *
+ * Consumes buffer
+ */
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *inputq,
+ struct sk_buff **reasm_skb)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct sk_buff *iskb;
+ struct sk_buff_head tmpq;
+ int usr = msg_user(hdr);
+ int pos = 0;
+
+ if (usr == MSG_BUNDLER) {
+ skb_queue_head_init(&tmpq);
+ l->stats.recv_bundles++;
+ l->stats.recv_bundled += msg_msgcnt(hdr);
+ while (tipc_msg_extract(skb, &iskb, &pos))
+ tipc_data_input(l, iskb, &tmpq);
+ tipc_skb_queue_splice_tail(&tmpq, inputq);
+ return 0;
+ } else if (usr == MSG_FRAGMENTER) {
+ l->stats.recv_fragments++;
+ if (tipc_buf_append(reasm_skb, &skb)) {
+ l->stats.recv_fragmented++;
+ tipc_data_input(l, skb, inputq);
+ } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
+ pr_warn_ratelimited("Unable to build fragment list\n");
+ return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+ }
+ return 0;
+ } else if (usr == BCAST_PROTOCOL) {
+ tipc_bcast_lock(l->net);
+ tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
+ tipc_bcast_unlock(l->net);
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+
+/* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
+ * inner message along with the ones in the old link's
+ * deferdq
+ * @l: tunnel link
+ * @skb: TUNNEL_PROTOCOL message
+ * @inputq: queue to put messages ready for delivery
+ */
+static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *inputq)
+{
+ struct sk_buff **reasm_skb = &l->failover_reasm_skb;
+ struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
+ struct sk_buff_head *fdefq = &l->failover_deferdq;
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct sk_buff *iskb;
+ int ipos = 0;
+ int rc = 0;
+ u16 seqno;
+
+ if (msg_type(hdr) == SYNCH_MSG) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ /* Not a fragment? */
+ if (likely(!msg_nof_fragms(hdr))) {
+ if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
+ pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
+ skb_queue_len(fdefq));
+ return 0;
+ }
+ kfree_skb(skb);
+ } else {
+ /* Set fragment type for buf_append */
+ if (msg_fragm_no(hdr) == 1)
+ msg_set_type(hdr, FIRST_FRAGMENT);
+ else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
+ msg_set_type(hdr, FRAGMENT);
+ else
+ msg_set_type(hdr, LAST_FRAGMENT);
+
+ if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
+ /* Successful but non-complete reassembly? */
+ if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
+ return 0;
+ pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
+ return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+ }
+ iskb = skb;
+ }
+
+ do {
+ seqno = buf_seqno(iskb);
+ if (unlikely(less(seqno, l->drop_point))) {
+ kfree_skb(iskb);
+ continue;
+ }
+ if (unlikely(seqno != l->drop_point)) {
+ __tipc_skb_queue_sorted(fdefq, seqno, iskb);
+ continue;
+ }
+
+ l->drop_point++;
+ if (!tipc_data_input(l, iskb, inputq))
+ rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
+ if (unlikely(rc))
+ break;
+ } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
+
+ return rc;
+}
+
+/**
+ * tipc_get_gap_ack_blks - get Gap ACK blocks from PROTOCOL/STATE_MSG
+ * @ga: returned pointer to the Gap ACK blocks if any
+ * @l: the tipc link
+ * @hdr: the PROTOCOL/STATE_MSG header
+ * @uc: desired Gap ACK blocks type, i.e. unicast (= 1) or broadcast (= 0)
+ *
+ * Return: the total Gap ACK blocks size
+ */
+u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
+ struct tipc_msg *hdr, bool uc)
+{
+ struct tipc_gap_ack_blks *p;
+ u16 sz = 0;
+
+ /* Does peer support the Gap ACK blocks feature? */
+ if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
+ p = (struct tipc_gap_ack_blks *)msg_data(hdr);
+ sz = ntohs(p->len);
+ /* Sanity check */
+ if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) {
+ /* Good, check if the desired type exists */
+ if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
+ goto ok;
+ /* Backward compatible: peer might not support bc, but uc? */
+ } else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) {
+ if (p->ugack_cnt) {
+ p->bgack_cnt = 0;
+ goto ok;
+ }
+ }
+ }
+ /* Other cases: ignore! */
+ p = NULL;
+
+ok:
+ *ga = p;
+ return sz;
+}
+
+static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
+ struct tipc_link *l, u8 start_index)
+{
+ struct tipc_gap_ack *gacks = &ga->gacks[start_index];
+ struct sk_buff *skb = skb_peek(&l->deferdq);
+ u16 expect, seqno = 0;
+ u8 n = 0;
+
+ if (!skb)
+ return 0;
+
+ expect = buf_seqno(skb);
+ skb_queue_walk(&l->deferdq, skb) {
+ seqno = buf_seqno(skb);
+ if (unlikely(more(seqno, expect))) {
+ gacks[n].ack = htons(expect - 1);
+ gacks[n].gap = htons(seqno - expect);
+ if (++n >= MAX_GAP_ACK_BLKS / 2) {
+ pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
+ l->name, n,
+ skb_queue_len(&l->deferdq));
+ return n;
+ }
+ } else if (unlikely(less(seqno, expect))) {
+ pr_warn("Unexpected skb in deferdq!\n");
+ continue;
+ }
+ expect = seqno + 1;
+ }
+
+ /* last block */
+ gacks[n].ack = htons(seqno);
+ gacks[n].gap = 0;
+ n++;
+ return n;
+}
+
+/* tipc_build_gap_ack_blks - build Gap ACK blocks
+ * @l: tipc unicast link
+ * @hdr: the tipc message buffer to store the Gap ACK blocks after built
+ *
+ * The function builds Gap ACK blocks for both the unicast & broadcast receiver
+ * links of a certain peer, the buffer after built has the network data format
+ * as found at the struct tipc_gap_ack_blks definition.
+ *
+ * returns the actual allocated memory size
+ */
+static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
+{
+ struct tipc_link *bcl = l->bc_rcvlink;
+ struct tipc_gap_ack_blks *ga;
+ u16 len;
+
+ ga = (struct tipc_gap_ack_blks *)msg_data(hdr);
+
+ /* Start with broadcast link first */
+ tipc_bcast_lock(bcl->net);
+ msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
+ msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
+ ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0);
+ tipc_bcast_unlock(bcl->net);
+
+ /* Now for unicast link, but an explicit NACK only (???) */
+ ga->ugack_cnt = (msg_seq_gap(hdr)) ?
+ __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
+
+ /* Total len */
+ len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt));
+ ga->len = htons(len);
+ return len;
+}
+
+/* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
+ * acked packets, also doing retransmissions if
+ * gaps found
+ * @l: tipc link with transmq queue to be advanced
+ * @r: tipc link "receiver" i.e. in case of broadcast (= "l" if unicast)
+ * @acked: seqno of last packet acked by peer without any gaps before
+ * @gap: # of gap packets
+ * @ga: buffer pointer to Gap ACK blocks from peer
+ * @xmitq: queue for accumulating the retransmitted packets if any
+ * @retransmitted: returned boolean value if a retransmission is really issued
+ * @rc: returned code e.g. TIPC_LINK_DOWN_EVT if a repeated retransmit failures
+ * happens (- unlikely case)
+ *
+ * Return: the number of packets released from the link transmq
+ */
+static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
+ u16 acked, u16 gap,
+ struct tipc_gap_ack_blks *ga,
+ struct sk_buff_head *xmitq,
+ bool *retransmitted, int *rc)
+{
+ struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL;
+ struct tipc_gap_ack *gacks = NULL;
+ struct sk_buff *skb, *_skb, *tmp;
+ struct tipc_msg *hdr;
+ u32 qlen = skb_queue_len(&l->transmq);
+ u16 nacked = acked, ngap = gap, gack_cnt = 0;
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+ u16 ack = l->rcv_nxt - 1;
+ u16 seqno, n = 0;
+ u16 end = r->acked, start = end, offset = r->last_gap;
+ u16 si = (last_ga) ? last_ga->start_index : 0;
+ bool is_uc = !link_is_bc_sndlink(l);
+ bool bc_has_acked = false;
+
+ trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq);
+
+ /* Determine Gap ACK blocks if any for the particular link */
+ if (ga && is_uc) {
+ /* Get the Gap ACKs, uc part */
+ gack_cnt = ga->ugack_cnt;
+ gacks = &ga->gacks[ga->bgack_cnt];
+ } else if (ga) {
+ /* Copy the Gap ACKs, bc part, for later renewal if needed */
+ this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt),
+ GFP_ATOMIC);
+ if (likely(this_ga)) {
+ this_ga->start_index = 0;
+ /* Start with the bc Gap ACKs */
+ gack_cnt = this_ga->bgack_cnt;
+ gacks = &this_ga->gacks[0];
+ } else {
+ /* Hmm, we can get in trouble..., simply ignore it */
+ pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
+ }
+ }
+
+ /* Advance the link transmq */
+ skb_queue_walk_safe(&l->transmq, skb, tmp) {
+ seqno = buf_seqno(skb);
+
+next_gap_ack:
+ if (less_eq(seqno, nacked)) {
+ if (is_uc)
+ goto release;
+ /* Skip packets peer has already acked */
+ if (!more(seqno, r->acked))
+ continue;
+ /* Get the next of last Gap ACK blocks */
+ while (more(seqno, end)) {
+ if (!last_ga || si >= last_ga->bgack_cnt)
+ break;
+ start = end + offset + 1;
+ end = ntohs(last_ga->gacks[si].ack);
+ offset = ntohs(last_ga->gacks[si].gap);
+ si++;
+ WARN_ONCE(more(start, end) ||
+ (!offset &&
+ si < last_ga->bgack_cnt) ||
+ si > MAX_GAP_ACK_BLKS,
+ "Corrupted Gap ACK: %d %d %d %d %d\n",
+ start, end, offset, si,
+ last_ga->bgack_cnt);
+ }
+ /* Check against the last Gap ACK block */
+ if (tipc_in_range(seqno, start, end))
+ continue;
+ /* Update/release the packet peer is acking */
+ bc_has_acked = true;
+ if (--TIPC_SKB_CB(skb)->ackers)
+ continue;
+release:
+ /* release skb */
+ __skb_unlink(skb, &l->transmq);
+ kfree_skb(skb);
+ } else if (less_eq(seqno, nacked + ngap)) {
+ /* First gap: check if repeated retrans failures? */
+ if (unlikely(seqno == acked + 1 &&
+ link_retransmit_failure(l, r, rc))) {
+ /* Ignore this bc Gap ACKs if any */
+ kfree(this_ga);
+ this_ga = NULL;
+ break;
+ }
+ /* retransmit skb if unrestricted*/
+ if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
+ continue;
+ tipc_link_set_skb_retransmit_time(skb, l);
+ _skb = pskb_copy(skb, GFP_ATOMIC);
+ if (!_skb)
+ continue;
+ hdr = buf_msg(_skb);
+ msg_set_ack(hdr, ack);
+ msg_set_bcast_ack(hdr, bc_ack);
+ _skb->priority = TC_PRIO_CONTROL;
+ __skb_queue_tail(xmitq, _skb);
+ l->stats.retransmitted++;
+ if (!is_uc)
+ r->stats.retransmitted++;
+ *retransmitted = true;
+ /* Increase actual retrans counter & mark first time */
+ if (!TIPC_SKB_CB(skb)->retr_cnt++)
+ TIPC_SKB_CB(skb)->retr_stamp = jiffies;
+ } else {
+ /* retry with Gap ACK blocks if any */
+ if (n >= gack_cnt)
+ break;
+ nacked = ntohs(gacks[n].ack);
+ ngap = ntohs(gacks[n].gap);
+ n++;
+ goto next_gap_ack;
+ }
+ }
+
+ /* Renew last Gap ACK blocks for bc if needed */
+ if (bc_has_acked) {
+ if (this_ga) {
+ kfree(last_ga);
+ r->last_ga = this_ga;
+ r->last_gap = gap;
+ } else if (last_ga) {
+ if (less(acked, start)) {
+ si--;
+ offset = start - acked - 1;
+ } else if (less(acked, end)) {
+ acked = end;
+ }
+ if (si < last_ga->bgack_cnt) {
+ last_ga->start_index = si;
+ r->last_gap = offset;
+ } else {
+ kfree(last_ga);
+ r->last_ga = NULL;
+ r->last_gap = 0;
+ }
+ } else {
+ r->last_gap = 0;
+ }
+ r->acked = acked;
+ } else {
+ kfree(this_ga);
+ }
+
+ return qlen - skb_queue_len(&l->transmq);
+}
+
+/* tipc_link_build_state_msg: prepare link state message for transmission
+ *
+ * Note that sending of broadcast ack is coordinated among nodes, to reduce
+ * risk of ack storms towards the sender
+ */
+int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
+{
+ if (!l)
+ return 0;
+
+ /* Broadcast ACK must be sent via a unicast link => defer to caller */
+ if (link_is_bc_rcvlink(l)) {
+ if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
+ return 0;
+ l->rcv_unacked = 0;
+
+ /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
+ l->snd_nxt = l->rcv_nxt;
+ return TIPC_LINK_SND_STATE;
+ }
+ /* Unicast ACK */
+ l->rcv_unacked = 0;
+ l->stats.sent_acks++;
+ tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
+ return 0;
+}
+
+/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
+ */
+void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
+{
+ int mtyp = RESET_MSG;
+ struct sk_buff *skb;
+
+ if (l->state == LINK_ESTABLISHING)
+ mtyp = ACTIVATE_MSG;
+
+ tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
+
+ /* Inform peer that this endpoint is going down if applicable */
+ skb = skb_peek_tail(xmitq);
+ if (skb && (l->state == LINK_RESET))
+ msg_set_peer_stopping(buf_msg(skb), 1);
+}
+
+/* tipc_link_build_nack_msg: prepare link nack message for transmission
+ * Note that sending of broadcast NACK is coordinated among nodes, to
+ * reduce the risk of NACK storms towards the sender
+ */
+static int tipc_link_build_nack_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
+{
+ u32 def_cnt = ++l->stats.deferred_recv;
+ struct sk_buff_head *dfq = &l->deferdq;
+ u32 defq_len = skb_queue_len(dfq);
+ int match1, match2;
+
+ if (link_is_bc_rcvlink(l)) {
+ match1 = def_cnt & 0xf;
+ match2 = tipc_own_addr(l->net) & 0xf;
+ if (match1 == match2)
+ return TIPC_LINK_SND_STATE;
+ return 0;
+ }
+
+ if (defq_len >= 3 && !((defq_len - 3) % 16)) {
+ u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
+
+ tipc_link_build_proto_msg(l, STATE_MSG, 0, 0,
+ rcvgap, 0, 0, xmitq);
+ }
+ return 0;
+}
+
+/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
+ * @l: the link that should handle the message
+ * @skb: TIPC packet
+ * @xmitq: queue to place packets to be sent after this call
+ */
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff_head *defq = &l->deferdq;
+ struct tipc_msg *hdr = buf_msg(skb);
+ u16 seqno, rcv_nxt, win_lim;
+ int released = 0;
+ int rc = 0;
+
+ /* Verify and update link state */
+ if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
+ return tipc_link_proto_rcv(l, skb, xmitq);
+
+ /* Don't send probe at next timeout expiration */
+ l->silent_intv_cnt = 0;
+
+ do {
+ hdr = buf_msg(skb);
+ seqno = msg_seqno(hdr);
+ rcv_nxt = l->rcv_nxt;
+ win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
+
+ if (unlikely(!link_is_up(l))) {
+ if (l->state == LINK_ESTABLISHING)
+ rc = TIPC_LINK_UP_EVT;
+ kfree_skb(skb);
+ break;
+ }
+
+ /* Drop if outside receive window */
+ if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
+ l->stats.duplicates++;
+ kfree_skb(skb);
+ break;
+ }
+ released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0,
+ NULL, NULL, NULL, NULL);
+
+ /* Defer delivery if sequence gap */
+ if (unlikely(seqno != rcv_nxt)) {
+ if (!__tipc_skb_queue_sorted(defq, seqno, skb))
+ l->stats.duplicates++;
+ rc |= tipc_link_build_nack_msg(l, xmitq);
+ break;
+ }
+
+ /* Deliver packet */
+ l->rcv_nxt++;
+ l->stats.recv_pkts++;
+
+ if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
+ rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
+ else if (!tipc_data_input(l, skb, l->inputq))
+ rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
+ if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
+ rc |= tipc_link_build_state_msg(l, xmitq);
+ if (unlikely(rc & ~TIPC_LINK_SND_STATE))
+ break;
+ } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
+
+ /* Forward queues and wake up waiting users */
+ if (released) {
+ tipc_link_update_cwin(l, released, 0);
+ tipc_link_advance_backlog(l, xmitq);
+ if (unlikely(!skb_queue_empty(&l->wakeupq)))
+ link_prepare_wakeup(l);
+ }
+ return rc;
+}
+
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+ bool probe_reply, u16 rcvgap,
+ int tolerance, int priority,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_mon_state *mstate = &l->mon_state;
+ struct sk_buff_head *dfq = &l->deferdq;
+ struct tipc_link *bcl = l->bc_rcvlink;
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
+ bool node_up = link_is_up(bcl);
+ u16 glen = 0, bc_rcvgap = 0;
+ int dlen = 0;
+ void *data;
+
+ /* Don't send protocol message during reset or link failover */
+ if (tipc_link_is_blocked(l))
+ return;
+
+ if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
+ return;
+
+ if ((probe || probe_reply) && !skb_queue_empty(dfq))
+ rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
+
+ skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
+ tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
+ l->addr, tipc_own_addr(l->net), 0, 0, 0);
+ if (!skb)
+ return;
+
+ hdr = buf_msg(skb);
+ data = msg_data(hdr);
+ msg_set_session(hdr, l->session);
+ msg_set_bearer_id(hdr, l->bearer_id);
+ msg_set_net_plane(hdr, l->net_plane);
+ msg_set_next_sent(hdr, l->snd_nxt);
+ msg_set_ack(hdr, l->rcv_nxt - 1);
+ msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
+ msg_set_bc_ack_invalid(hdr, !node_up);
+ msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
+ msg_set_link_tolerance(hdr, tolerance);
+ msg_set_linkprio(hdr, priority);
+ msg_set_redundant_link(hdr, node_up);
+ msg_set_seq_gap(hdr, 0);
+ msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
+
+ if (mtyp == STATE_MSG) {
+ if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
+ msg_set_seqno(hdr, l->snd_nxt_state++);
+ msg_set_seq_gap(hdr, rcvgap);
+ bc_rcvgap = link_bc_rcv_gap(bcl);
+ msg_set_bc_gap(hdr, bc_rcvgap);
+ msg_set_probe(hdr, probe);
+ msg_set_is_keepalive(hdr, probe || probe_reply);
+ if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
+ glen = tipc_build_gap_ack_blks(l, hdr);
+ tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
+ msg_set_size(hdr, INT_H_SIZE + glen + dlen);
+ skb_trim(skb, INT_H_SIZE + glen + dlen);
+ l->stats.sent_states++;
+ l->rcv_unacked = 0;
+ } else {
+ /* RESET_MSG or ACTIVATE_MSG */
+ if (mtyp == ACTIVATE_MSG) {
+ msg_set_dest_session_valid(hdr, 1);
+ msg_set_dest_session(hdr, l->peer_session);
+ }
+ msg_set_max_pkt(hdr, l->advertised_mtu);
+ strcpy(data, l->if_name);
+ msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
+ skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
+ }
+ if (probe)
+ l->stats.sent_probes++;
+ if (rcvgap)
+ l->stats.sent_nacks++;
+ if (bc_rcvgap)
+ bcl->stats.sent_nacks++;
+ skb->priority = TC_PRIO_CONTROL;
+ __skb_queue_tail(xmitq, skb);
+ trace_tipc_proto_build(skb, false, l->name);
+}
+
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
+{
+ u32 onode = tipc_own_addr(l->net);
+ struct tipc_msg *hdr, *ihdr;
+ struct sk_buff_head tnlq;
+ struct sk_buff *skb;
+ u32 dnode = l->addr;
+
+ __skb_queue_head_init(&tnlq);
+ skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
+ INT_H_SIZE, BASIC_H_SIZE,
+ dnode, onode, 0, 0, 0);
+ if (!skb) {
+ pr_warn("%sunable to create tunnel packet\n", link_co_err);
+ return;
+ }
+
+ hdr = buf_msg(skb);
+ msg_set_msgcnt(hdr, 1);
+ msg_set_bearer_id(hdr, l->peer_bearer_id);
+
+ ihdr = (struct tipc_msg *)msg_data(hdr);
+ tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+ BASIC_H_SIZE, dnode);
+ msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
+ __skb_queue_tail(&tnlq, skb);
+ tipc_link_xmit(l, &tnlq, xmitq);
+}
+
+/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
+ * with contents of the link's transmit and backlog queues.
+ */
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+ int mtyp, struct sk_buff_head *xmitq)
+{
+ struct sk_buff_head *fdefq = &tnl->failover_deferdq;
+ struct sk_buff *skb, *tnlskb;
+ struct tipc_msg *hdr, tnlhdr;
+ struct sk_buff_head *queue = &l->transmq;
+ struct sk_buff_head tmpxq, tnlq, frags;
+ u16 pktlen, pktcnt, seqno = l->snd_nxt;
+ bool pktcnt_need_update = false;
+ u16 syncpt;
+ int rc;
+
+ if (!tnl)
+ return;
+
+ __skb_queue_head_init(&tnlq);
+ /* Link Synching:
+ * From now on, send only one single ("dummy") SYNCH message
+ * to peer. The SYNCH message does not contain any data, just
+ * a header conveying the synch point to the peer.
+ */
+ if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
+ tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
+ INT_H_SIZE, 0, l->addr,
+ tipc_own_addr(l->net),
+ 0, 0, 0);
+ if (!tnlskb) {
+ pr_warn("%sunable to create dummy SYNCH_MSG\n",
+ link_co_err);
+ return;
+ }
+
+ hdr = buf_msg(tnlskb);
+ syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
+ msg_set_syncpt(hdr, syncpt);
+ msg_set_bearer_id(hdr, l->peer_bearer_id);
+ __skb_queue_tail(&tnlq, tnlskb);
+ tipc_link_xmit(tnl, &tnlq, xmitq);
+ return;
+ }
+
+ __skb_queue_head_init(&tmpxq);
+ __skb_queue_head_init(&frags);
+ /* At least one packet required for safe algorithm => add dummy */
+ skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+ BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
+ 0, 0, TIPC_ERR_NO_PORT);
+ if (!skb) {
+ pr_warn("%sunable to create tunnel packet\n", link_co_err);
+ return;
+ }
+ __skb_queue_tail(&tnlq, skb);
+ tipc_link_xmit(l, &tnlq, &tmpxq);
+ __skb_queue_purge(&tmpxq);
+
+ /* Initialize reusable tunnel packet header */
+ tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
+ mtyp, INT_H_SIZE, l->addr);
+ if (mtyp == SYNCH_MSG)
+ pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
+ else
+ pktcnt = skb_queue_len(&l->transmq);
+ pktcnt += skb_queue_len(&l->backlogq);
+ msg_set_msgcnt(&tnlhdr, pktcnt);
+ msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
+tnl:
+ /* Wrap each packet into a tunnel packet */
+ skb_queue_walk(queue, skb) {
+ hdr = buf_msg(skb);
+ if (queue == &l->backlogq)
+ msg_set_seqno(hdr, seqno++);
+ pktlen = msg_size(hdr);
+
+ /* Tunnel link MTU is not large enough? This could be
+ * due to:
+ * 1) Link MTU has just changed or set differently;
+ * 2) Or FAILOVER on the top of a SYNCH message
+ *
+ * The 2nd case should not happen if peer supports
+ * TIPC_TUNNEL_ENHANCED
+ */
+ if (pktlen > tnl->mtu - INT_H_SIZE) {
+ if (mtyp == FAILOVER_MSG &&
+ (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
+ rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
+ &frags);
+ if (rc) {
+ pr_warn("%sunable to frag msg: rc %d\n",
+ link_co_err, rc);
+ return;
+ }
+ pktcnt += skb_queue_len(&frags) - 1;
+ pktcnt_need_update = true;
+ skb_queue_splice_tail_init(&frags, &tnlq);
+ continue;
+ }
+ /* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
+ * => Just warn it and return!
+ */
+ pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
+ link_co_err, msg_user(hdr),
+ msg_type(hdr), msg_size(hdr));
+ return;
+ }
+
+ msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
+ tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
+ if (!tnlskb) {
+ pr_warn("%sunable to send packet\n", link_co_err);
+ return;
+ }
+ skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
+ skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
+ __skb_queue_tail(&tnlq, tnlskb);
+ }
+ if (queue != &l->backlogq) {
+ queue = &l->backlogq;
+ goto tnl;
+ }
+
+ if (pktcnt_need_update)
+ skb_queue_walk(&tnlq, skb) {
+ hdr = buf_msg(skb);
+ msg_set_msgcnt(hdr, pktcnt);
+ }
+
+ tipc_link_xmit(tnl, &tnlq, xmitq);
+
+ if (mtyp == FAILOVER_MSG) {
+ tnl->drop_point = l->rcv_nxt;
+ tnl->failover_reasm_skb = l->reasm_buf;
+ l->reasm_buf = NULL;
+
+ /* Failover the link's deferdq */
+ if (unlikely(!skb_queue_empty(fdefq))) {
+ pr_warn("Link failover deferdq not empty: %d!\n",
+ skb_queue_len(fdefq));
+ __skb_queue_purge(fdefq);
+ }
+ skb_queue_splice_init(&l->deferdq, fdefq);
+ }
+}
+
+/**
+ * tipc_link_failover_prepare() - prepare tnl for link failover
+ *
+ * This is a special version of the precursor - tipc_link_tnl_prepare(),
+ * see the tipc_node_link_failover() for details
+ *
+ * @l: failover link
+ * @tnl: tunnel link
+ * @xmitq: queue for messages to be xmited
+ */
+void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff_head *fdefq = &tnl->failover_deferdq;
+
+ tipc_link_create_dummy_tnl_msg(tnl, xmitq);
+
+ /* This failover link endpoint was never established before,
+ * so it has not received anything from peer.
+ * Otherwise, it must be a normal failover situation or the
+ * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
+ * would have to start over from scratch instead.
+ */
+ tnl->drop_point = 1;
+ tnl->failover_reasm_skb = NULL;
+
+ /* Initiate the link's failover deferdq */
+ if (unlikely(!skb_queue_empty(fdefq))) {
+ pr_warn("Link failover deferdq not empty: %d!\n",
+ skb_queue_len(fdefq));
+ __skb_queue_purge(fdefq);
+ }
+}
+
+/* tipc_link_validate_msg(): validate message against current link state
+ * Returns true if message should be accepted, otherwise false
+ */
+bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
+{
+ u16 curr_session = l->peer_session;
+ u16 session = msg_session(hdr);
+ int mtyp = msg_type(hdr);
+
+ if (msg_user(hdr) != LINK_PROTOCOL)
+ return true;
+
+ switch (mtyp) {
+ case RESET_MSG:
+ if (!l->in_session)
+ return true;
+ /* Accept only RESET with new session number */
+ return more(session, curr_session);
+ case ACTIVATE_MSG:
+ if (!l->in_session)
+ return true;
+ /* Accept only ACTIVATE with new or current session number */
+ return !less(session, curr_session);
+ case STATE_MSG:
+ /* Accept only STATE with current session number */
+ if (!l->in_session)
+ return false;
+ if (session != curr_session)
+ return false;
+ /* Extra sanity check */
+ if (!link_is_up(l) && msg_ack(hdr))
+ return false;
+ if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
+ return true;
+ /* Accept only STATE with new sequence number */
+ return !less(msg_seqno(hdr), l->rcv_nxt_state);
+ default:
+ return false;
+ }
+}
+
+/* tipc_link_proto_rcv(): receive link level protocol message :
+ * Note that network plane id propagates through the network, and may
+ * change at any time. The node with lowest numerical id determines
+ * network plane
+ */
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct tipc_gap_ack_blks *ga = NULL;
+ bool reply = msg_probe(hdr), retransmitted = false;
+ u32 dlen = msg_data_sz(hdr), glen = 0, msg_max;
+ u16 peers_snd_nxt = msg_next_sent(hdr);
+ u16 peers_tol = msg_link_tolerance(hdr);
+ u16 peers_prio = msg_linkprio(hdr);
+ u16 gap = msg_seq_gap(hdr);
+ u16 ack = msg_ack(hdr);
+ u16 rcv_nxt = l->rcv_nxt;
+ u16 rcvgap = 0;
+ int mtyp = msg_type(hdr);
+ int rc = 0, released;
+ char *if_name;
+ void *data;
+
+ trace_tipc_proto_rcv(skb, false, l->name);
+
+ if (dlen > U16_MAX)
+ goto exit;
+
+ if (tipc_link_is_blocked(l) || !xmitq)
+ goto exit;
+
+ if (tipc_own_addr(l->net) > msg_prevnode(hdr))
+ l->net_plane = msg_net_plane(hdr);
+
+ if (skb_linearize(skb))
+ goto exit;
+
+ hdr = buf_msg(skb);
+ data = msg_data(hdr);
+
+ if (!tipc_link_validate_msg(l, hdr)) {
+ trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
+ trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
+ goto exit;
+ }
+
+ switch (mtyp) {
+ case RESET_MSG:
+ case ACTIVATE_MSG:
+ msg_max = msg_max_pkt(hdr);
+ if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id))
+ break;
+ /* Complete own link name with peer's interface name */
+ if_name = strrchr(l->name, ':') + 1;
+ if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
+ break;
+ if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
+ break;
+ strncpy(if_name, data, TIPC_MAX_IF_NAME);
+
+ /* Update own tolerance if peer indicates a non-zero value */
+ if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
+ l->tolerance = peers_tol;
+ l->bc_rcvlink->tolerance = peers_tol;
+ }
+ /* Update own priority if peer's priority is higher */
+ if (tipc_in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
+ l->priority = peers_prio;
+
+ /* If peer is going down we want full re-establish cycle */
+ if (msg_peer_stopping(hdr)) {
+ rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+ break;
+ }
+
+ /* If this endpoint was re-created while peer was ESTABLISHING
+ * it doesn't know current session number. Force re-synch.
+ */
+ if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
+ l->session != msg_dest_session(hdr)) {
+ if (less(l->session, msg_dest_session(hdr)))
+ l->session = msg_dest_session(hdr) + 1;
+ break;
+ }
+
+ /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
+ if (mtyp == RESET_MSG || !link_is_up(l))
+ rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+
+ /* ACTIVATE_MSG takes up link if it was already locally reset */
+ if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
+ rc = TIPC_LINK_UP_EVT;
+
+ l->peer_session = msg_session(hdr);
+ l->in_session = true;
+ l->peer_bearer_id = msg_bearer_id(hdr);
+ if (l->mtu > msg_max)
+ l->mtu = msg_max;
+ break;
+
+ case STATE_MSG:
+ /* Validate Gap ACK blocks, drop if invalid */
+ glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
+ if (glen > dlen)
+ break;
+
+ l->rcv_nxt_state = msg_seqno(hdr) + 1;
+
+ /* Update own tolerance if peer indicates a non-zero value */
+ if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
+ l->tolerance = peers_tol;
+ l->bc_rcvlink->tolerance = peers_tol;
+ }
+ /* Update own prio if peer indicates a different value */
+ if ((peers_prio != l->priority) &&
+ tipc_in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
+ l->priority = peers_prio;
+ rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+ }
+
+ l->silent_intv_cnt = 0;
+ l->stats.recv_states++;
+ if (msg_probe(hdr))
+ l->stats.recv_probes++;
+
+ if (!link_is_up(l)) {
+ if (l->state == LINK_ESTABLISHING)
+ rc = TIPC_LINK_UP_EVT;
+ break;
+ }
+
+ tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
+ &l->mon_state, l->bearer_id);
+
+ /* Send NACK if peer has sent pkts we haven't received yet */
+ if ((reply || msg_is_keepalive(hdr)) &&
+ more(peers_snd_nxt, rcv_nxt) &&
+ !tipc_link_is_synching(l) &&
+ skb_queue_empty(&l->deferdq))
+ rcvgap = peers_snd_nxt - l->rcv_nxt;
+ if (rcvgap || reply)
+ tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
+ rcvgap, 0, 0, xmitq);
+
+ released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq,
+ &retransmitted, &rc);
+ if (gap)
+ l->stats.recv_nacks++;
+ if (released || retransmitted)
+ tipc_link_update_cwin(l, released, retransmitted);
+ if (released)
+ tipc_link_advance_backlog(l, xmitq);
+ if (unlikely(!skb_queue_empty(&l->wakeupq)))
+ link_prepare_wakeup(l);
+ }
+exit:
+ kfree_skb(skb);
+ return rc;
+}
+
+/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
+ */
+static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
+ u16 peers_snd_nxt,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff *skb;
+ struct tipc_msg *hdr;
+ struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
+ u16 ack = l->rcv_nxt - 1;
+ u16 gap_to = peers_snd_nxt - 1;
+
+ skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
+ 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
+ if (!skb)
+ return false;
+ hdr = buf_msg(skb);
+ msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
+ msg_set_bcast_ack(hdr, ack);
+ msg_set_bcgap_after(hdr, ack);
+ if (dfrd_skb)
+ gap_to = buf_seqno(dfrd_skb) - 1;
+ msg_set_bcgap_to(hdr, gap_to);
+ msg_set_non_seq(hdr, bcast);
+ __skb_queue_tail(xmitq, skb);
+ return true;
+}
+
+/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
+ *
+ * Give a newly added peer node the sequence number where it should
+ * start receiving and acking broadcast packets.
+ */
+static void tipc_link_build_bc_init_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff_head list;
+
+ __skb_queue_head_init(&list);
+ if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
+ return;
+ msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
+ tipc_link_xmit(l, &list, xmitq);
+}
+
+/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
+ */
+void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
+{
+ int mtyp = msg_type(hdr);
+ u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
+
+ if (link_is_up(l))
+ return;
+
+ if (msg_user(hdr) == BCAST_PROTOCOL) {
+ l->rcv_nxt = peers_snd_nxt;
+ l->state = LINK_ESTABLISHED;
+ return;
+ }
+
+ if (l->peer_caps & TIPC_BCAST_SYNCH)
+ return;
+
+ if (msg_peer_node_is_up(hdr))
+ return;
+
+ /* Compatibility: accept older, less safe initial synch data */
+ if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
+ l->rcv_nxt = peers_snd_nxt;
+}
+
+/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
+ */
+int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
+ struct sk_buff_head *xmitq)
+{
+ u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
+ int rc = 0;
+
+ if (!link_is_up(l))
+ return rc;
+
+ if (!msg_peer_node_is_up(hdr))
+ return rc;
+
+ /* Open when peer acknowledges our bcast init msg (pkt #1) */
+ if (msg_ack(hdr))
+ l->bc_peer_is_up = true;
+
+ if (!l->bc_peer_is_up)
+ return rc;
+
+ /* Ignore if peers_snd_nxt goes beyond receive window */
+ if (more(peers_snd_nxt, l->rcv_nxt + l->window))
+ return rc;
+
+ l->snd_nxt = peers_snd_nxt;
+ if (link_bc_rcv_gap(l))
+ rc |= TIPC_LINK_SND_STATE;
+
+ /* Return now if sender supports nack via STATE messages */
+ if (l->peer_caps & TIPC_BCAST_STATE_NACK)
+ return rc;
+
+ /* Otherwise, be backwards compatible */
+
+ if (!more(peers_snd_nxt, l->rcv_nxt)) {
+ l->nack_state = BC_NACK_SND_CONDITIONAL;
+ return 0;
+ }
+
+ /* Don't NACK if one was recently sent or peeked */
+ if (l->nack_state == BC_NACK_SND_SUPPRESS) {
+ l->nack_state = BC_NACK_SND_UNCONDITIONAL;
+ return 0;
+ }
+
+ /* Conditionally delay NACK sending until next synch rcv */
+ if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
+ l->nack_state = BC_NACK_SND_UNCONDITIONAL;
+ if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
+ return 0;
+ }
+
+ /* Send NACK now but suppress next one */
+ tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
+ l->nack_state = BC_NACK_SND_SUPPRESS;
+ return 0;
+}
+
+int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
+ struct tipc_gap_ack_blks *ga,
+ struct sk_buff_head *xmitq,
+ struct sk_buff_head *retrq)
+{
+ struct tipc_link *l = r->bc_sndlink;
+ bool unused = false;
+ int rc = 0;
+
+ if (!link_is_up(r) || !r->bc_peer_is_up)
+ return 0;
+
+ if (gap) {
+ l->stats.recv_nacks++;
+ r->stats.recv_nacks++;
+ }
+
+ if (less(acked, r->acked) || (acked == r->acked && !gap && !ga))
+ return 0;
+
+ trace_tipc_link_bc_ack(r, acked, gap, &l->transmq);
+ tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc);
+
+ tipc_link_advance_backlog(l, xmitq);
+ if (unlikely(!skb_queue_empty(&l->wakeupq)))
+ link_prepare_wakeup(l);
+
+ return rc;
+}
+
+/* tipc_link_bc_nack_rcv(): receive broadcast nack message
+ * This function is here for backwards compatibility, since
+ * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
+ */
+int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ u32 dnode = msg_destnode(hdr);
+ int mtyp = msg_type(hdr);
+ u16 acked = msg_bcast_ack(hdr);
+ u16 from = acked + 1;
+ u16 to = msg_bcgap_to(hdr);
+ u16 peers_snd_nxt = to + 1;
+ int rc = 0;
+
+ kfree_skb(skb);
+
+ if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
+ return 0;
+
+ if (mtyp != STATE_MSG)
+ return 0;
+
+ if (dnode == tipc_own_addr(l->net)) {
+ rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq,
+ xmitq);
+ l->stats.recv_nacks++;
+ return rc;
+ }
+
+ /* Msg for other node => suppress own NACK at next sync if applicable */
+ if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
+ l->nack_state = BC_NACK_SND_SUPPRESS;
+
+ return 0;
+}
+
+void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
+{
+ int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
+
+ l->min_win = min_win;
+ l->ssthresh = max_win;
+ l->max_win = max_win;
+ l->window = min_win;
+ l->backlog[TIPC_LOW_IMPORTANCE].limit = min_win * 2;
+ l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = min_win * 4;
+ l->backlog[TIPC_HIGH_IMPORTANCE].limit = min_win * 6;
+ l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8;
+ l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
+}
+
+/**
+ * tipc_link_reset_stats - reset link statistics
+ * @l: pointer to link
+ */
+void tipc_link_reset_stats(struct tipc_link *l)
+{
+ memset(&l->stats, 0, sizeof(l->stats));
+}
+
+static void link_print(struct tipc_link *l, const char *str)
+{
+ struct sk_buff *hskb = skb_peek(&l->transmq);
+ u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
+ u16 tail = l->snd_nxt - 1;
+
+ pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
+ pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
+ skb_queue_len(&l->transmq), head, tail,
+ skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
+}
+
+/* Parse and validate nested (link) properties valid for media, bearer and link
+ */
+int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
+{
+ int err;
+
+ err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
+ tipc_nl_prop_policy, NULL);
+ if (err)
+ return err;
+
+ if (props[TIPC_NLA_PROP_PRIO]) {
+ u32 prio;
+
+ prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+ if (prio > TIPC_MAX_LINK_PRI)
+ return -EINVAL;
+ }
+
+ if (props[TIPC_NLA_PROP_TOL]) {
+ u32 tol;
+
+ tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
+ if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
+ return -EINVAL;
+ }
+
+ if (props[TIPC_NLA_PROP_WIN]) {
+ u32 max_win;
+
+ max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
+{
+ int i;
+ struct nlattr *stats;
+
+ struct nla_map {
+ u32 key;
+ u32 val;
+ };
+
+ struct nla_map map[] = {
+ {TIPC_NLA_STATS_RX_INFO, 0},
+ {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
+ {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
+ {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
+ {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
+ {TIPC_NLA_STATS_TX_INFO, 0},
+ {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
+ {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
+ {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
+ {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
+ {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
+ s->msg_length_counts : 1},
+ {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
+ {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
+ {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
+ {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
+ {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
+ {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
+ {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
+ {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
+ {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
+ {TIPC_NLA_STATS_RX_STATES, s->recv_states},
+ {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
+ {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
+ {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
+ {TIPC_NLA_STATS_TX_STATES, s->sent_states},
+ {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
+ {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
+ {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
+ {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
+ {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
+ {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
+ {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
+ {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
+ (s->accu_queue_sz / s->queue_sz_counts) : 0}
+ };
+
+ stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
+ if (!stats)
+ return -EMSGSIZE;
+
+ for (i = 0; i < ARRAY_SIZE(map); i++)
+ if (nla_put_u32(skb, map[i].key, map[i].val))
+ goto msg_full;
+
+ nla_nest_end(skb, stats);
+
+ return 0;
+msg_full:
+ nla_nest_cancel(skb, stats);
+
+ return -EMSGSIZE;
+}
+
+/* Caller should hold appropriate locks to protect the link */
+int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
+ struct tipc_link *link, int nlflags)
+{
+ u32 self = tipc_own_addr(net);
+ struct nlattr *attrs;
+ struct nlattr *prop;
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ nlflags, TIPC_NL_LINK_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
+ if (!attrs)
+ goto msg_full;
+
+ if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
+ goto attr_msg_full;
+
+ if (tipc_link_is_up(link))
+ if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
+ goto attr_msg_full;
+ if (link->active)
+ if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
+ goto attr_msg_full;
+
+ prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
+ if (!prop)
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
+ link->window))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
+ goto prop_msg_full;
+ nla_nest_end(msg->skb, prop);
+
+ err = __tipc_nl_add_stats(msg->skb, &link->stats);
+ if (err)
+ goto attr_msg_full;
+
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+prop_msg_full:
+ nla_nest_cancel(msg->skb, prop);
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
+ struct tipc_stats *stats)
+{
+ int i;
+ struct nlattr *nest;
+
+ struct nla_map {
+ __u32 key;
+ __u32 val;
+ };
+
+ struct nla_map map[] = {
+ {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
+ {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
+ {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
+ {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
+ {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
+ {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
+ {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
+ {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
+ {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
+ {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
+ {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
+ {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
+ {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
+ {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
+ {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
+ {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
+ {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
+ {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
+ {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
+ (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
+ };
+
+ nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
+ if (!nest)
+ return -EMSGSIZE;
+
+ for (i = 0; i < ARRAY_SIZE(map); i++)
+ if (nla_put_u32(skb, map[i].key, map[i].val))
+ goto msg_full;
+
+ nla_nest_end(skb, nest);
+
+ return 0;
+msg_full:
+ nla_nest_cancel(skb, nest);
+
+ return -EMSGSIZE;
+}
+
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
+ struct tipc_link *bcl)
+{
+ int err;
+ void *hdr;
+ struct nlattr *attrs;
+ struct nlattr *prop;
+ u32 bc_mode = tipc_bcast_get_mode(net);
+ u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
+
+ if (!bcl)
+ return 0;
+
+ tipc_bcast_lock(net);
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ NLM_F_MULTI, TIPC_NL_LINK_GET);
+ if (!hdr) {
+ tipc_bcast_unlock(net);
+ return -EMSGSIZE;
+ }
+
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
+ if (!attrs)
+ goto msg_full;
+
+ /* The broadcast link is always up */
+ if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
+ goto attr_msg_full;
+
+ if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
+ goto attr_msg_full;
+ if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
+ goto attr_msg_full;
+
+ prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
+ if (!prop)
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
+ goto prop_msg_full;
+ if (bc_mode & BCLINK_MODE_SEL)
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
+ bc_ratio))
+ goto prop_msg_full;
+ nla_nest_end(msg->skb, prop);
+
+ err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
+ if (err)
+ goto attr_msg_full;
+
+ tipc_bcast_unlock(net);
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+prop_msg_full:
+ nla_nest_cancel(msg->skb, prop);
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ tipc_bcast_unlock(net);
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
+ struct sk_buff_head *xmitq)
+{
+ l->tolerance = tol;
+ if (l->bc_rcvlink)
+ l->bc_rcvlink->tolerance = tol;
+ if (link_is_up(l))
+ tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
+}
+
+void tipc_link_set_prio(struct tipc_link *l, u32 prio,
+ struct sk_buff_head *xmitq)
+{
+ l->priority = prio;
+ tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
+}
+
+void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
+{
+ l->abort_limit = limit;
+}
+
+/**
+ * tipc_link_dump - dump TIPC link data
+ * @l: tipc link to be dumped
+ * @dqueues: bitmask to decide if any link queue to be dumped?
+ * - TIPC_DUMP_NONE: don't dump link queues
+ * - TIPC_DUMP_TRANSMQ: dump link transmq queue
+ * - TIPC_DUMP_BACKLOGQ: dump link backlog queue
+ * - TIPC_DUMP_DEFERDQ: dump link deferd queue
+ * - TIPC_DUMP_INPUTQ: dump link input queue
+ * - TIPC_DUMP_WAKEUP: dump link wakeup queue
+ * - TIPC_DUMP_ALL: dump all the link queues above
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
+{
+ int i = 0;
+ size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
+ struct sk_buff_head *list;
+ struct sk_buff *hskb, *tskb;
+ u32 len;
+
+ if (!l) {
+ i += scnprintf(buf, sz, "link data: (null)\n");
+ return i;
+ }
+
+ i += scnprintf(buf, sz, "link data: %x", l->addr);
+ i += scnprintf(buf + i, sz - i, " %x", l->state);
+ i += scnprintf(buf + i, sz - i, " %u", l->in_session);
+ i += scnprintf(buf + i, sz - i, " %u", l->session);
+ i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
+ i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
+ i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
+ i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
+ i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
+ i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
+ i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
+ i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
+ i += scnprintf(buf + i, sz - i, " %u", 0);
+ i += scnprintf(buf + i, sz - i, " %u", 0);
+ i += scnprintf(buf + i, sz - i, " %u", l->acked);
+
+ list = &l->transmq;
+ len = skb_queue_len(list);
+ hskb = skb_peek(list);
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
+ (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
+ (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
+
+ list = &l->deferdq;
+ len = skb_queue_len(list);
+ hskb = skb_peek(list);
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
+ (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
+ (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
+
+ list = &l->backlogq;
+ len = skb_queue_len(list);
+ hskb = skb_peek(list);
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
+ (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
+ (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
+
+ list = l->inputq;
+ len = skb_queue_len(list);
+ hskb = skb_peek(list);
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
+ (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
+ (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
+
+ if (dqueues & TIPC_DUMP_TRANSMQ) {
+ i += scnprintf(buf + i, sz - i, "transmq: ");
+ i += tipc_list_dump(&l->transmq, false, buf + i);
+ }
+ if (dqueues & TIPC_DUMP_BACKLOGQ) {
+ i += scnprintf(buf + i, sz - i,
+ "backlogq: <%u %u %u %u %u>, ",
+ l->backlog[TIPC_LOW_IMPORTANCE].len,
+ l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
+ l->backlog[TIPC_HIGH_IMPORTANCE].len,
+ l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
+ l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
+ i += tipc_list_dump(&l->backlogq, false, buf + i);
+ }
+ if (dqueues & TIPC_DUMP_DEFERDQ) {
+ i += scnprintf(buf + i, sz - i, "deferdq: ");
+ i += tipc_list_dump(&l->deferdq, false, buf + i);
+ }
+ if (dqueues & TIPC_DUMP_INPUTQ) {
+ i += scnprintf(buf + i, sz - i, "inputq: ");
+ i += tipc_list_dump(l->inputq, false, buf + i);
+ }
+ if (dqueues & TIPC_DUMP_WAKEUP) {
+ i += scnprintf(buf + i, sz - i, "wakeup: ");
+ i += tipc_list_dump(&l->wakeupq, false, buf + i);
+ }
+
+ return i;
+}
diff --git a/net/tipc/link.h b/net/tipc/link.h
new file mode 100644
index 0000000000..d80f5649b3
--- /dev/null
+++ b/net/tipc/link.h
@@ -0,0 +1,158 @@
+/*
+ * net/tipc/link.h: Include file for TIPC link code
+ *
+ * Copyright (c) 1995-2006, 2013-2014, Ericsson AB
+ * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_LINK_H
+#define _TIPC_LINK_H
+
+#include <net/genetlink.h>
+#include "msg.h"
+#include "node.h"
+
+/* TIPC-specific error codes
+*/
+#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
+
+/* Link FSM events:
+ */
+enum {
+ LINK_ESTABLISH_EVT = 0xec1ab1e,
+ LINK_PEER_RESET_EVT = 0x9eed0e,
+ LINK_FAILURE_EVT = 0xfa110e,
+ LINK_RESET_EVT = 0x10ca1d0e,
+ LINK_FAILOVER_BEGIN_EVT = 0xfa110bee,
+ LINK_FAILOVER_END_EVT = 0xfa110ede,
+ LINK_SYNCH_BEGIN_EVT = 0xc1ccbee,
+ LINK_SYNCH_END_EVT = 0xc1ccede
+};
+
+/* Events returned from link at packet reception or at timeout
+ */
+enum {
+ TIPC_LINK_UP_EVT = 1,
+ TIPC_LINK_DOWN_EVT = (1 << 1),
+ TIPC_LINK_SND_STATE = (1 << 2)
+};
+
+/* Starting value for maximum packet size negotiation on unicast links
+ * (unless bearer MTU is less)
+ */
+#define MAX_PKT_DEFAULT 1500
+
+bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
+ int tolerance, char net_plane, u32 mtu, int priority,
+ u32 min_win, u32 max_win, u32 session, u32 ownnode,
+ u32 peer, u8 *peer_id, u16 peer_caps,
+ struct tipc_link *bc_sndlink,
+ struct tipc_link *bc_rcvlink,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *namedq,
+ struct tipc_link **link);
+bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
+ int mtu, u32 min_win, u32 max_win, u16 peer_caps,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *namedq,
+ struct tipc_link *bc_sndlink,
+ struct tipc_link **link);
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+ int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *tnl,
+ struct sk_buff_head *xmitq);
+void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
+ struct sk_buff_head *xmitq);
+void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
+int tipc_link_fsm_evt(struct tipc_link *l, int evt);
+bool tipc_link_is_up(struct tipc_link *l);
+bool tipc_link_peer_is_down(struct tipc_link *l);
+bool tipc_link_is_reset(struct tipc_link *l);
+bool tipc_link_is_establishing(struct tipc_link *l);
+bool tipc_link_is_synching(struct tipc_link *l);
+bool tipc_link_is_failingover(struct tipc_link *l);
+bool tipc_link_is_blocked(struct tipc_link *l);
+void tipc_link_set_active(struct tipc_link *l, bool active);
+void tipc_link_reset(struct tipc_link *l);
+void tipc_link_reset_stats(struct tipc_link *l);
+int tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list,
+ struct sk_buff_head *xmitq);
+struct sk_buff_head *tipc_link_inputq(struct tipc_link *l);
+u16 tipc_link_rcv_nxt(struct tipc_link *l);
+u16 tipc_link_acked(struct tipc_link *l);
+u32 tipc_link_id(struct tipc_link *l);
+char *tipc_link_name(struct tipc_link *l);
+u32 tipc_link_state(struct tipc_link *l);
+char tipc_link_plane(struct tipc_link *l);
+int tipc_link_prio(struct tipc_link *l);
+int tipc_link_min_win(struct tipc_link *l);
+int tipc_link_max_win(struct tipc_link *l);
+void tipc_link_update_caps(struct tipc_link *l, u16 capabilities);
+bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr);
+unsigned long tipc_link_tolerance(struct tipc_link *l);
+void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
+ struct sk_buff_head *xmitq);
+void tipc_link_set_prio(struct tipc_link *l, u32 prio,
+ struct sk_buff_head *xmitq);
+void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit);
+void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win);
+int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
+ struct tipc_link *link, int nlflags);
+int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq);
+int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
+void tipc_link_add_bc_peer(struct tipc_link *snd_l,
+ struct tipc_link *uc_l,
+ struct sk_buff_head *xmitq);
+void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
+ struct tipc_link *rcv_l,
+ struct sk_buff_head *xmitq);
+int tipc_link_bc_peers(struct tipc_link *l);
+void tipc_link_set_mtu(struct tipc_link *l, int mtu);
+int tipc_link_mtu(struct tipc_link *l);
+int tipc_link_mss(struct tipc_link *l);
+u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
+ struct tipc_msg *hdr, bool uc);
+int tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked, u16 gap,
+ struct tipc_gap_ack_blks *ga,
+ struct sk_buff_head *xmitq,
+ struct sk_buff_head *retrq);
+void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr);
+int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
+ struct sk_buff_head *xmitq);
+int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq);
+bool tipc_link_too_silent(struct tipc_link *l);
+struct net *tipc_link_net(struct tipc_link *l);
+#endif
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
new file mode 100644
index 0000000000..77a3d016ca
--- /dev/null
+++ b/net/tipc/monitor.c
@@ -0,0 +1,874 @@
+/*
+ * net/tipc/monitor.c
+ *
+ * Copyright (c) 2016, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <net/genetlink.h>
+#include "core.h"
+#include "addr.h"
+#include "monitor.h"
+#include "bearer.h"
+
+#define MAX_MON_DOMAIN 64
+#define MON_TIMEOUT 120000
+#define MAX_PEER_DOWN_EVENTS 4
+
+/* struct tipc_mon_domain: domain record to be transferred between peers
+ * @len: actual size of domain record
+ * @gen: current generation of sender's domain
+ * @ack_gen: most recent generation of self's domain acked by peer
+ * @member_cnt: number of domain member nodes described in this record
+ * @up_map: bit map indicating which of the members the sender considers up
+ * @members: identity of the domain members
+ */
+struct tipc_mon_domain {
+ u16 len;
+ u16 gen;
+ u16 ack_gen;
+ u16 member_cnt;
+ u64 up_map;
+ u32 members[MAX_MON_DOMAIN];
+};
+
+/* struct tipc_peer: state of a peer node and its domain
+ * @addr: tipc node identity of peer
+ * @head_map: shows which other nodes currently consider peer 'up'
+ * @domain: most recent domain record from peer
+ * @hash: position in hashed lookup list
+ * @list: position in linked list, in circular ascending order by 'addr'
+ * @applied: number of reported domain members applied on this monitor list
+ * @is_up: peer is up as seen from this node
+ * @is_head: peer is assigned domain head as seen from this node
+ * @is_local: peer is in local domain and should be continuously monitored
+ * @down_cnt: - numbers of other peers which have reported this on lost
+ */
+struct tipc_peer {
+ u32 addr;
+ struct tipc_mon_domain *domain;
+ struct hlist_node hash;
+ struct list_head list;
+ u8 applied;
+ u8 down_cnt;
+ bool is_up;
+ bool is_head;
+ bool is_local;
+};
+
+struct tipc_monitor {
+ struct hlist_head peers[NODE_HTABLE_SIZE];
+ int peer_cnt;
+ struct tipc_peer *self;
+ rwlock_t lock;
+ struct tipc_mon_domain cache;
+ u16 list_gen;
+ u16 dom_gen;
+ struct net *net;
+ struct timer_list timer;
+ unsigned long timer_intv;
+};
+
+static struct tipc_monitor *tipc_monitor(struct net *net, int bearer_id)
+{
+ return tipc_net(net)->monitors[bearer_id];
+}
+
+const int tipc_max_domain_size = sizeof(struct tipc_mon_domain);
+
+static inline u16 mon_cpu_to_le16(u16 val)
+{
+ return (__force __u16)htons(val);
+}
+
+static inline u32 mon_cpu_to_le32(u32 val)
+{
+ return (__force __u32)htonl(val);
+}
+
+static inline u64 mon_cpu_to_le64(u64 val)
+{
+ return (__force __u64)cpu_to_be64(val);
+}
+
+static inline u16 mon_le16_to_cpu(u16 val)
+{
+ return ntohs((__force __be16)val);
+}
+
+static inline u32 mon_le32_to_cpu(u32 val)
+{
+ return ntohl((__force __be32)val);
+}
+
+static inline u64 mon_le64_to_cpu(u64 val)
+{
+ return be64_to_cpu((__force __be64)val);
+}
+
+/* dom_rec_len(): actual length of domain record for transport
+ */
+static int dom_rec_len(struct tipc_mon_domain *dom, u16 mcnt)
+{
+ return (offsetof(struct tipc_mon_domain, members)) + (mcnt * sizeof(u32));
+}
+
+/* dom_size() : calculate size of own domain based on number of peers
+ */
+static int dom_size(int peers)
+{
+ int i = 0;
+
+ while ((i * i) < peers)
+ i++;
+ return i < MAX_MON_DOMAIN ? i : MAX_MON_DOMAIN;
+}
+
+static void map_set(u64 *up_map, int i, unsigned int v)
+{
+ *up_map &= ~(1ULL << i);
+ *up_map |= ((u64)v << i);
+}
+
+static int map_get(u64 up_map, int i)
+{
+ return (up_map & (1ULL << i)) >> i;
+}
+
+static struct tipc_peer *peer_prev(struct tipc_peer *peer)
+{
+ return list_last_entry(&peer->list, struct tipc_peer, list);
+}
+
+static struct tipc_peer *peer_nxt(struct tipc_peer *peer)
+{
+ return list_first_entry(&peer->list, struct tipc_peer, list);
+}
+
+static struct tipc_peer *peer_head(struct tipc_peer *peer)
+{
+ while (!peer->is_head)
+ peer = peer_prev(peer);
+ return peer;
+}
+
+static struct tipc_peer *get_peer(struct tipc_monitor *mon, u32 addr)
+{
+ struct tipc_peer *peer;
+ unsigned int thash = tipc_hashfn(addr);
+
+ hlist_for_each_entry(peer, &mon->peers[thash], hash) {
+ if (peer->addr == addr)
+ return peer;
+ }
+ return NULL;
+}
+
+static struct tipc_peer *get_self(struct net *net, int bearer_id)
+{
+ struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
+
+ return mon->self;
+}
+
+static inline bool tipc_mon_is_active(struct net *net, struct tipc_monitor *mon)
+{
+ struct tipc_net *tn = tipc_net(net);
+
+ return mon->peer_cnt > tn->mon_threshold;
+}
+
+/* mon_identify_lost_members() : - identify amd mark potentially lost members
+ */
+static void mon_identify_lost_members(struct tipc_peer *peer,
+ struct tipc_mon_domain *dom_bef,
+ int applied_bef)
+{
+ struct tipc_peer *member = peer;
+ struct tipc_mon_domain *dom_aft = peer->domain;
+ int applied_aft = peer->applied;
+ int i;
+
+ for (i = 0; i < applied_bef; i++) {
+ member = peer_nxt(member);
+
+ /* Do nothing if self or peer already see member as down */
+ if (!member->is_up || !map_get(dom_bef->up_map, i))
+ continue;
+
+ /* Loss of local node must be detected by active probing */
+ if (member->is_local)
+ continue;
+
+ /* Start probing if member was removed from applied domain */
+ if (!applied_aft || (applied_aft < i)) {
+ member->down_cnt = 1;
+ continue;
+ }
+
+ /* Member loss is confirmed if it is still in applied domain */
+ if (!map_get(dom_aft->up_map, i))
+ member->down_cnt++;
+ }
+}
+
+/* mon_apply_domain() : match a peer's domain record against monitor list
+ */
+static void mon_apply_domain(struct tipc_monitor *mon,
+ struct tipc_peer *peer)
+{
+ struct tipc_mon_domain *dom = peer->domain;
+ struct tipc_peer *member;
+ u32 addr;
+ int i;
+
+ if (!dom || !peer->is_up)
+ return;
+
+ /* Scan across domain members and match against monitor list */
+ peer->applied = 0;
+ member = peer_nxt(peer);
+ for (i = 0; i < dom->member_cnt; i++) {
+ addr = dom->members[i];
+ if (addr != member->addr)
+ return;
+ peer->applied++;
+ member = peer_nxt(member);
+ }
+}
+
+/* mon_update_local_domain() : update after peer addition/removal/up/down
+ */
+static void mon_update_local_domain(struct tipc_monitor *mon)
+{
+ struct tipc_peer *self = mon->self;
+ struct tipc_mon_domain *cache = &mon->cache;
+ struct tipc_mon_domain *dom = self->domain;
+ struct tipc_peer *peer = self;
+ u64 prev_up_map = dom->up_map;
+ u16 member_cnt, i;
+ bool diff;
+
+ /* Update local domain size based on current size of cluster */
+ member_cnt = dom_size(mon->peer_cnt) - 1;
+ self->applied = member_cnt;
+
+ /* Update native and cached outgoing local domain records */
+ dom->len = dom_rec_len(dom, member_cnt);
+ diff = dom->member_cnt != member_cnt;
+ dom->member_cnt = member_cnt;
+ for (i = 0; i < member_cnt; i++) {
+ peer = peer_nxt(peer);
+ diff |= dom->members[i] != peer->addr;
+ dom->members[i] = peer->addr;
+ map_set(&dom->up_map, i, peer->is_up);
+ cache->members[i] = mon_cpu_to_le32(peer->addr);
+ }
+ diff |= dom->up_map != prev_up_map;
+ if (!diff)
+ return;
+ dom->gen = ++mon->dom_gen;
+ cache->len = mon_cpu_to_le16(dom->len);
+ cache->gen = mon_cpu_to_le16(dom->gen);
+ cache->member_cnt = mon_cpu_to_le16(member_cnt);
+ cache->up_map = mon_cpu_to_le64(dom->up_map);
+ mon_apply_domain(mon, self);
+}
+
+/* mon_update_neighbors() : update preceding neighbors of added/removed peer
+ */
+static void mon_update_neighbors(struct tipc_monitor *mon,
+ struct tipc_peer *peer)
+{
+ int dz, i;
+
+ dz = dom_size(mon->peer_cnt);
+ for (i = 0; i < dz; i++) {
+ mon_apply_domain(mon, peer);
+ peer = peer_prev(peer);
+ }
+}
+
+/* mon_assign_roles() : reassign peer roles after a network change
+ * The monitor list is consistent at this stage; i.e., each peer is monitoring
+ * a set of domain members as matched between domain record and the monitor list
+ */
+static void mon_assign_roles(struct tipc_monitor *mon, struct tipc_peer *head)
+{
+ struct tipc_peer *peer = peer_nxt(head);
+ struct tipc_peer *self = mon->self;
+ int i = 0;
+
+ for (; peer != self; peer = peer_nxt(peer)) {
+ peer->is_local = false;
+
+ /* Update domain member */
+ if (i++ < head->applied) {
+ peer->is_head = false;
+ if (head == self)
+ peer->is_local = true;
+ continue;
+ }
+ /* Assign next domain head */
+ if (!peer->is_up)
+ continue;
+ if (peer->is_head)
+ break;
+ head = peer;
+ head->is_head = true;
+ i = 0;
+ }
+ mon->list_gen++;
+}
+
+void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id)
+{
+ struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
+ struct tipc_peer *self;
+ struct tipc_peer *peer, *prev, *head;
+
+ if (!mon)
+ return;
+
+ self = get_self(net, bearer_id);
+ write_lock_bh(&mon->lock);
+ peer = get_peer(mon, addr);
+ if (!peer)
+ goto exit;
+ prev = peer_prev(peer);
+ list_del(&peer->list);
+ hlist_del(&peer->hash);
+ kfree(peer->domain);
+ kfree(peer);
+ mon->peer_cnt--;
+ head = peer_head(prev);
+ if (head == self)
+ mon_update_local_domain(mon);
+ mon_update_neighbors(mon, prev);
+
+ /* Revert to full-mesh monitoring if we reach threshold */
+ if (!tipc_mon_is_active(net, mon)) {
+ list_for_each_entry(peer, &self->list, list) {
+ kfree(peer->domain);
+ peer->domain = NULL;
+ peer->applied = 0;
+ }
+ }
+ mon_assign_roles(mon, head);
+exit:
+ write_unlock_bh(&mon->lock);
+}
+
+static bool tipc_mon_add_peer(struct tipc_monitor *mon, u32 addr,
+ struct tipc_peer **peer)
+{
+ struct tipc_peer *self = mon->self;
+ struct tipc_peer *cur, *prev, *p;
+
+ p = kzalloc(sizeof(*p), GFP_ATOMIC);
+ *peer = p;
+ if (!p)
+ return false;
+ p->addr = addr;
+
+ /* Add new peer to lookup list */
+ INIT_LIST_HEAD(&p->list);
+ hlist_add_head(&p->hash, &mon->peers[tipc_hashfn(addr)]);
+
+ /* Sort new peer into iterator list, in ascending circular order */
+ prev = self;
+ list_for_each_entry(cur, &self->list, list) {
+ if ((addr > prev->addr) && (addr < cur->addr))
+ break;
+ if (((addr < cur->addr) || (addr > prev->addr)) &&
+ (prev->addr > cur->addr))
+ break;
+ prev = cur;
+ }
+ list_add_tail(&p->list, &cur->list);
+ mon->peer_cnt++;
+ mon_update_neighbors(mon, p);
+ return true;
+}
+
+void tipc_mon_peer_up(struct net *net, u32 addr, int bearer_id)
+{
+ struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
+ struct tipc_peer *self = get_self(net, bearer_id);
+ struct tipc_peer *peer, *head;
+
+ write_lock_bh(&mon->lock);
+ peer = get_peer(mon, addr);
+ if (!peer && !tipc_mon_add_peer(mon, addr, &peer))
+ goto exit;
+ peer->is_up = true;
+ head = peer_head(peer);
+ if (head == self)
+ mon_update_local_domain(mon);
+ mon_assign_roles(mon, head);
+exit:
+ write_unlock_bh(&mon->lock);
+}
+
+void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id)
+{
+ struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
+ struct tipc_peer *self;
+ struct tipc_peer *peer, *head;
+ struct tipc_mon_domain *dom;
+ int applied;
+
+ if (!mon)
+ return;
+
+ self = get_self(net, bearer_id);
+ write_lock_bh(&mon->lock);
+ peer = get_peer(mon, addr);
+ if (!peer) {
+ pr_warn("Mon: unknown link %x/%u DOWN\n", addr, bearer_id);
+ goto exit;
+ }
+ applied = peer->applied;
+ peer->applied = 0;
+ dom = peer->domain;
+ peer->domain = NULL;
+ if (peer->is_head)
+ mon_identify_lost_members(peer, dom, applied);
+ kfree(dom);
+ peer->is_up = false;
+ peer->is_head = false;
+ peer->is_local = false;
+ peer->down_cnt = 0;
+ head = peer_head(peer);
+ if (head == self)
+ mon_update_local_domain(mon);
+ mon_assign_roles(mon, head);
+exit:
+ write_unlock_bh(&mon->lock);
+}
+
+/* tipc_mon_rcv - process monitor domain event message
+ */
+void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
+ struct tipc_mon_state *state, int bearer_id)
+{
+ struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
+ struct tipc_mon_domain *arrv_dom = data;
+ struct tipc_mon_domain dom_bef;
+ struct tipc_mon_domain *dom;
+ struct tipc_peer *peer;
+ u16 new_member_cnt = mon_le16_to_cpu(arrv_dom->member_cnt);
+ int new_dlen = dom_rec_len(arrv_dom, new_member_cnt);
+ u16 new_gen = mon_le16_to_cpu(arrv_dom->gen);
+ u16 acked_gen = mon_le16_to_cpu(arrv_dom->ack_gen);
+ u16 arrv_dlen = mon_le16_to_cpu(arrv_dom->len);
+ bool probing = state->probing;
+ int i, applied_bef;
+
+ state->probing = false;
+
+ /* Sanity check received domain record */
+ if (new_member_cnt > MAX_MON_DOMAIN)
+ return;
+ if (dlen < dom_rec_len(arrv_dom, 0))
+ return;
+ if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
+ return;
+ if (dlen < new_dlen || arrv_dlen != new_dlen)
+ return;
+
+ /* Synch generation numbers with peer if link just came up */
+ if (!state->synched) {
+ state->peer_gen = new_gen - 1;
+ state->acked_gen = acked_gen;
+ state->synched = true;
+ }
+
+ if (more(acked_gen, state->acked_gen))
+ state->acked_gen = acked_gen;
+
+ /* Drop duplicate unless we are waiting for a probe response */
+ if (!more(new_gen, state->peer_gen) && !probing)
+ return;
+
+ write_lock_bh(&mon->lock);
+ peer = get_peer(mon, addr);
+ if (!peer || !peer->is_up)
+ goto exit;
+
+ /* Peer is confirmed, stop any ongoing probing */
+ peer->down_cnt = 0;
+
+ /* Task is done for duplicate record */
+ if (!more(new_gen, state->peer_gen))
+ goto exit;
+
+ state->peer_gen = new_gen;
+
+ /* Cache current domain record for later use */
+ dom_bef.member_cnt = 0;
+ dom = peer->domain;
+ if (dom)
+ memcpy(&dom_bef, dom, dom->len);
+
+ /* Transform and store received domain record */
+ if (!dom || (dom->len < new_dlen)) {
+ kfree(dom);
+ dom = kmalloc(new_dlen, GFP_ATOMIC);
+ peer->domain = dom;
+ if (!dom)
+ goto exit;
+ }
+ dom->len = new_dlen;
+ dom->gen = new_gen;
+ dom->member_cnt = new_member_cnt;
+ dom->up_map = mon_le64_to_cpu(arrv_dom->up_map);
+ for (i = 0; i < new_member_cnt; i++)
+ dom->members[i] = mon_le32_to_cpu(arrv_dom->members[i]);
+
+ /* Update peers affected by this domain record */
+ applied_bef = peer->applied;
+ mon_apply_domain(mon, peer);
+ mon_identify_lost_members(peer, &dom_bef, applied_bef);
+ mon_assign_roles(mon, peer_head(peer));
+exit:
+ write_unlock_bh(&mon->lock);
+}
+
+void tipc_mon_prep(struct net *net, void *data, int *dlen,
+ struct tipc_mon_state *state, int bearer_id)
+{
+ struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
+ struct tipc_mon_domain *dom = data;
+ u16 gen = mon->dom_gen;
+ u16 len;
+
+ /* Send invalid record if not active */
+ if (!tipc_mon_is_active(net, mon)) {
+ dom->len = 0;
+ return;
+ }
+
+ /* Send only a dummy record with ack if peer has acked our last sent */
+ if (likely(state->acked_gen == gen)) {
+ len = dom_rec_len(dom, 0);
+ *dlen = len;
+ dom->len = mon_cpu_to_le16(len);
+ dom->gen = mon_cpu_to_le16(gen);
+ dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
+ dom->member_cnt = 0;
+ return;
+ }
+ /* Send the full record */
+ read_lock_bh(&mon->lock);
+ len = mon_le16_to_cpu(mon->cache.len);
+ *dlen = len;
+ memcpy(data, &mon->cache, len);
+ read_unlock_bh(&mon->lock);
+ dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
+}
+
+void tipc_mon_get_state(struct net *net, u32 addr,
+ struct tipc_mon_state *state,
+ int bearer_id)
+{
+ struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
+ struct tipc_peer *peer;
+
+ if (!tipc_mon_is_active(net, mon)) {
+ state->probing = false;
+ state->monitoring = true;
+ return;
+ }
+
+ /* Used cached state if table has not changed */
+ if (!state->probing &&
+ (state->list_gen == mon->list_gen) &&
+ (state->acked_gen == mon->dom_gen))
+ return;
+
+ read_lock_bh(&mon->lock);
+ peer = get_peer(mon, addr);
+ if (peer) {
+ state->probing = state->acked_gen != mon->dom_gen;
+ state->probing |= peer->down_cnt;
+ state->reset |= peer->down_cnt >= MAX_PEER_DOWN_EVENTS;
+ state->monitoring = peer->is_local;
+ state->monitoring |= peer->is_head;
+ state->list_gen = mon->list_gen;
+ }
+ read_unlock_bh(&mon->lock);
+}
+
+static void mon_timeout(struct timer_list *t)
+{
+ struct tipc_monitor *mon = from_timer(mon, t, timer);
+ struct tipc_peer *self;
+ int best_member_cnt = dom_size(mon->peer_cnt) - 1;
+
+ write_lock_bh(&mon->lock);
+ self = mon->self;
+ if (self && (best_member_cnt != self->applied)) {
+ mon_update_local_domain(mon);
+ mon_assign_roles(mon, self);
+ }
+ write_unlock_bh(&mon->lock);
+ mod_timer(&mon->timer, jiffies + mon->timer_intv);
+}
+
+int tipc_mon_create(struct net *net, int bearer_id)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_monitor *mon;
+ struct tipc_peer *self;
+ struct tipc_mon_domain *dom;
+
+ if (tn->monitors[bearer_id])
+ return 0;
+
+ mon = kzalloc(sizeof(*mon), GFP_ATOMIC);
+ self = kzalloc(sizeof(*self), GFP_ATOMIC);
+ dom = kzalloc(sizeof(*dom), GFP_ATOMIC);
+ if (!mon || !self || !dom) {
+ kfree(mon);
+ kfree(self);
+ kfree(dom);
+ return -ENOMEM;
+ }
+ tn->monitors[bearer_id] = mon;
+ rwlock_init(&mon->lock);
+ mon->net = net;
+ mon->peer_cnt = 1;
+ mon->self = self;
+ self->domain = dom;
+ self->addr = tipc_own_addr(net);
+ self->is_up = true;
+ self->is_head = true;
+ INIT_LIST_HEAD(&self->list);
+ timer_setup(&mon->timer, mon_timeout, 0);
+ mon->timer_intv = msecs_to_jiffies(MON_TIMEOUT + (tn->random & 0xffff));
+ mod_timer(&mon->timer, jiffies + mon->timer_intv);
+ return 0;
+}
+
+void tipc_mon_delete(struct net *net, int bearer_id)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
+ struct tipc_peer *self;
+ struct tipc_peer *peer, *tmp;
+
+ if (!mon)
+ return;
+
+ self = get_self(net, bearer_id);
+ write_lock_bh(&mon->lock);
+ tn->monitors[bearer_id] = NULL;
+ list_for_each_entry_safe(peer, tmp, &self->list, list) {
+ list_del(&peer->list);
+ hlist_del(&peer->hash);
+ kfree(peer->domain);
+ kfree(peer);
+ }
+ mon->self = NULL;
+ write_unlock_bh(&mon->lock);
+ timer_shutdown_sync(&mon->timer);
+ kfree(self->domain);
+ kfree(self);
+ kfree(mon);
+}
+
+void tipc_mon_reinit_self(struct net *net)
+{
+ struct tipc_monitor *mon;
+ int bearer_id;
+
+ for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ mon = tipc_monitor(net, bearer_id);
+ if (!mon)
+ continue;
+ write_lock_bh(&mon->lock);
+ mon->self->addr = tipc_own_addr(net);
+ write_unlock_bh(&mon->lock);
+ }
+}
+
+int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size)
+{
+ struct tipc_net *tn = tipc_net(net);
+
+ if (cluster_size > TIPC_CLUSTER_SIZE)
+ return -EINVAL;
+
+ tn->mon_threshold = cluster_size;
+
+ return 0;
+}
+
+int tipc_nl_monitor_get_threshold(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+
+ return tn->mon_threshold;
+}
+
+static int __tipc_nl_add_monitor_peer(struct tipc_peer *peer,
+ struct tipc_nl_msg *msg)
+{
+ struct tipc_mon_domain *dom = peer->domain;
+ struct nlattr *attrs;
+ void *hdr;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ NLM_F_MULTI, TIPC_NL_MON_PEER_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON_PEER);
+ if (!attrs)
+ goto msg_full;
+
+ if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_ADDR, peer->addr))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_APPLIED, peer->applied))
+ goto attr_msg_full;
+
+ if (peer->is_up)
+ if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_UP))
+ goto attr_msg_full;
+ if (peer->is_local)
+ if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_LOCAL))
+ goto attr_msg_full;
+ if (peer->is_head)
+ if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_HEAD))
+ goto attr_msg_full;
+
+ if (dom) {
+ if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_DOMGEN, dom->gen))
+ goto attr_msg_full;
+ if (nla_put_u64_64bit(msg->skb, TIPC_NLA_MON_PEER_UPMAP,
+ dom->up_map, TIPC_NLA_MON_PEER_PAD))
+ goto attr_msg_full;
+ if (nla_put(msg->skb, TIPC_NLA_MON_PEER_MEMBERS,
+ dom->member_cnt * sizeof(u32), &dom->members))
+ goto attr_msg_full;
+ }
+
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+ return 0;
+
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
+ u32 bearer_id, u32 *prev_node)
+{
+ struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
+ struct tipc_peer *peer;
+
+ if (!mon)
+ return -EINVAL;
+
+ read_lock_bh(&mon->lock);
+ peer = mon->self;
+ do {
+ if (*prev_node) {
+ if (peer->addr == *prev_node)
+ *prev_node = 0;
+ else
+ continue;
+ }
+ if (__tipc_nl_add_monitor_peer(peer, msg)) {
+ *prev_node = peer->addr;
+ read_unlock_bh(&mon->lock);
+ return -EMSGSIZE;
+ }
+ } while ((peer = peer_nxt(peer)) != mon->self);
+ read_unlock_bh(&mon->lock);
+
+ return 0;
+}
+
+int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
+ u32 bearer_id)
+{
+ struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
+ char bearer_name[TIPC_MAX_BEARER_NAME];
+ struct nlattr *attrs;
+ void *hdr;
+ int ret;
+
+ ret = tipc_bearer_get_name(net, bearer_name, bearer_id);
+ if (ret || !mon)
+ return 0;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ NLM_F_MULTI, TIPC_NL_MON_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
+ if (!attrs)
+ goto msg_full;
+
+ read_lock_bh(&mon->lock);
+ if (nla_put_u32(msg->skb, TIPC_NLA_MON_REF, bearer_id))
+ goto attr_msg_full;
+ if (tipc_mon_is_active(net, mon))
+ if (nla_put_flag(msg->skb, TIPC_NLA_MON_ACTIVE))
+ goto attr_msg_full;
+ if (nla_put_string(msg->skb, TIPC_NLA_MON_BEARER_NAME, bearer_name))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEERCNT, mon->peer_cnt))
+ goto attr_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_MON_LISTGEN, mon->list_gen))
+ goto attr_msg_full;
+
+ read_unlock_bh(&mon->lock);
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+attr_msg_full:
+ read_unlock_bh(&mon->lock);
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
diff --git a/net/tipc/monitor.h b/net/tipc/monitor.h
new file mode 100644
index 0000000000..ed63d2e650
--- /dev/null
+++ b/net/tipc/monitor.h
@@ -0,0 +1,83 @@
+/*
+ * net/tipc/monitor.h
+ *
+ * Copyright (c) 2015, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_MONITOR_H
+#define _TIPC_MONITOR_H
+
+#include "netlink.h"
+
+/* struct tipc_mon_state: link instance's cache of monitor list and domain state
+ * @list_gen: current generation of this node's monitor list
+ * @gen: current generation of this node's local domain
+ * @peer_gen: most recent domain generation received from peer
+ * @acked_gen: most recent generation of self's domain acked by peer
+ * @monitoring: this peer endpoint should continuously monitored
+ * @probing: peer endpoint should be temporarily probed for potential loss
+ * @synched: domain record's generation has been synched with peer after reset
+ */
+struct tipc_mon_state {
+ u16 list_gen;
+ u16 peer_gen;
+ u16 acked_gen;
+ bool monitoring :1;
+ bool probing :1;
+ bool reset :1;
+ bool synched :1;
+};
+
+int tipc_mon_create(struct net *net, int bearer_id);
+void tipc_mon_delete(struct net *net, int bearer_id);
+
+void tipc_mon_peer_up(struct net *net, u32 addr, int bearer_id);
+void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id);
+void tipc_mon_prep(struct net *net, void *data, int *dlen,
+ struct tipc_mon_state *state, int bearer_id);
+void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
+ struct tipc_mon_state *state, int bearer_id);
+void tipc_mon_get_state(struct net *net, u32 addr,
+ struct tipc_mon_state *state,
+ int bearer_id);
+void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id);
+
+int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size);
+int tipc_nl_monitor_get_threshold(struct net *net);
+int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
+ u32 bearer_id);
+int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
+ u32 bearer_id, u32 *prev_node);
+void tipc_mon_reinit_self(struct net *net);
+
+extern const int tipc_max_domain_size;
+#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
new file mode 100644
index 0000000000..5c9fd4791c
--- /dev/null
+++ b/net/tipc/msg.c
@@ -0,0 +1,851 @@
+/*
+ * net/tipc/msg.c: TIPC message header routines
+ *
+ * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <net/sock.h>
+#include "core.h"
+#include "msg.h"
+#include "addr.h"
+#include "name_table.h"
+#include "crypto.h"
+
+#define BUF_ALIGN(x) ALIGN(x, 4)
+#define MAX_FORWARD_SIZE 1024
+#ifdef CONFIG_TIPC_CRYPTO
+#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
+#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
+#else
+#define BUF_HEADROOM (LL_MAX_HEADER + 48)
+#define BUF_OVERHEAD BUF_HEADROOM
+#endif
+
+const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+/**
+ * tipc_buf_acquire - creates a TIPC message buffer
+ * @size: message size (including TIPC header)
+ * @gfp: memory allocation flags
+ *
+ * Return: a new buffer with data pointers set to the specified size.
+ *
+ * NOTE:
+ * Headroom is reserved to allow prepending of a data link header.
+ * There may also be unrequested tailroom present at the buffer's end.
+ */
+struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
+ if (skb) {
+ skb_reserve(skb, BUF_HEADROOM);
+ skb_put(skb, size);
+ skb->next = NULL;
+ }
+ return skb;
+}
+
+void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
+ u32 hsize, u32 dnode)
+{
+ memset(m, 0, hsize);
+ msg_set_version(m);
+ msg_set_user(m, user);
+ msg_set_hdr_sz(m, hsize);
+ msg_set_size(m, hsize);
+ msg_set_prevnode(m, own_node);
+ msg_set_type(m, type);
+ if (hsize > SHORT_H_SIZE) {
+ msg_set_orignode(m, own_node);
+ msg_set_destnode(m, dnode);
+ }
+}
+
+struct sk_buff *tipc_msg_create(uint user, uint type,
+ uint hdr_sz, uint data_sz, u32 dnode,
+ u32 onode, u32 dport, u32 oport, int errcode)
+{
+ struct tipc_msg *msg;
+ struct sk_buff *buf;
+
+ buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
+ if (unlikely(!buf))
+ return NULL;
+
+ msg = buf_msg(buf);
+ tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
+ msg_set_size(msg, hdr_sz + data_sz);
+ msg_set_origport(msg, oport);
+ msg_set_destport(msg, dport);
+ msg_set_errcode(msg, errcode);
+ return buf;
+}
+
+/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
+ * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
+ * out: set when successful non-complete reassembly, otherwise NULL
+ * @*buf: in: the buffer to append. Always defined
+ * out: head buf after successful complete reassembly, otherwise NULL
+ * Returns 1 when reassembly complete, otherwise 0
+ */
+int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+{
+ struct sk_buff *head = *headbuf;
+ struct sk_buff *frag = *buf;
+ struct sk_buff *tail = NULL;
+ struct tipc_msg *msg;
+ u32 fragid;
+ int delta;
+ bool headstolen;
+
+ if (!frag)
+ goto err;
+
+ msg = buf_msg(frag);
+ fragid = msg_type(msg);
+ frag->next = NULL;
+ skb_pull(frag, msg_hdr_sz(msg));
+
+ if (fragid == FIRST_FRAGMENT) {
+ if (unlikely(head))
+ goto err;
+ *buf = NULL;
+ if (skb_has_frag_list(frag) && __skb_linearize(frag))
+ goto err;
+ frag = skb_unshare(frag, GFP_ATOMIC);
+ if (unlikely(!frag))
+ goto err;
+ head = *headbuf = frag;
+ TIPC_SKB_CB(head)->tail = NULL;
+ return 0;
+ }
+
+ if (!head)
+ goto err;
+
+ if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
+ kfree_skb_partial(frag, headstolen);
+ } else {
+ tail = TIPC_SKB_CB(head)->tail;
+ if (!skb_has_frag_list(head))
+ skb_shinfo(head)->frag_list = frag;
+ else
+ tail->next = frag;
+ head->truesize += frag->truesize;
+ head->data_len += frag->len;
+ head->len += frag->len;
+ TIPC_SKB_CB(head)->tail = frag;
+ }
+
+ if (fragid == LAST_FRAGMENT) {
+ TIPC_SKB_CB(head)->validated = 0;
+ if (unlikely(!tipc_msg_validate(&head)))
+ goto err;
+ *buf = head;
+ TIPC_SKB_CB(head)->tail = NULL;
+ *headbuf = NULL;
+ return 1;
+ }
+ *buf = NULL;
+ return 0;
+err:
+ kfree_skb(*buf);
+ kfree_skb(*headbuf);
+ *buf = *headbuf = NULL;
+ return 0;
+}
+
+/**
+ * tipc_msg_append(): Append data to tail of an existing buffer queue
+ * @_hdr: header to be used
+ * @m: the data to be appended
+ * @mss: max allowable size of buffer
+ * @dlen: size of data to be appended
+ * @txq: queue to append to
+ *
+ * Return: the number of 1k blocks appended or errno value
+ */
+int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
+ int mss, struct sk_buff_head *txq)
+{
+ struct sk_buff *skb;
+ int accounted, total, curr;
+ int mlen, cpy, rem = dlen;
+ struct tipc_msg *hdr;
+
+ skb = skb_peek_tail(txq);
+ accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
+ total = accounted;
+
+ do {
+ if (!skb || skb->len >= mss) {
+ skb = tipc_buf_acquire(mss, GFP_KERNEL);
+ if (unlikely(!skb))
+ return -ENOMEM;
+ skb_orphan(skb);
+ skb_trim(skb, MIN_H_SIZE);
+ hdr = buf_msg(skb);
+ skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
+ msg_set_hdr_sz(hdr, MIN_H_SIZE);
+ msg_set_size(hdr, MIN_H_SIZE);
+ __skb_queue_tail(txq, skb);
+ total += 1;
+ }
+ hdr = buf_msg(skb);
+ curr = msg_blocks(hdr);
+ mlen = msg_size(hdr);
+ cpy = min_t(size_t, rem, mss - mlen);
+ if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
+ return -EFAULT;
+ msg_set_size(hdr, mlen + cpy);
+ skb_put(skb, cpy);
+ rem -= cpy;
+ total += msg_blocks(hdr) - curr;
+ } while (rem > 0);
+ return total - accounted;
+}
+
+/* tipc_msg_validate - validate basic format of received message
+ *
+ * This routine ensures a TIPC message has an acceptable header, and at least
+ * as much data as the header indicates it should. The routine also ensures
+ * that the entire message header is stored in the main fragment of the message
+ * buffer, to simplify future access to message header fields.
+ *
+ * Note: Having extra info present in the message header or data areas is OK.
+ * TIPC will ignore the excess, under the assumption that it is optional info
+ * introduced by a later release of the protocol.
+ */
+bool tipc_msg_validate(struct sk_buff **_skb)
+{
+ struct sk_buff *skb = *_skb;
+ struct tipc_msg *hdr;
+ int msz, hsz;
+
+ /* Ensure that flow control ratio condition is satisfied */
+ if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
+ skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
+ if (!skb)
+ return false;
+ kfree_skb(*_skb);
+ *_skb = skb;
+ }
+
+ if (unlikely(TIPC_SKB_CB(skb)->validated))
+ return true;
+
+ if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
+ return false;
+
+ hsz = msg_hdr_sz(buf_msg(skb));
+ if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
+ return false;
+ if (unlikely(!pskb_may_pull(skb, hsz)))
+ return false;
+
+ hdr = buf_msg(skb);
+ if (unlikely(msg_version(hdr) != TIPC_VERSION))
+ return false;
+
+ msz = msg_size(hdr);
+ if (unlikely(msz < hsz))
+ return false;
+ if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
+ return false;
+ if (unlikely(skb->len < msz))
+ return false;
+
+ TIPC_SKB_CB(skb)->validated = 1;
+ return true;
+}
+
+/**
+ * tipc_msg_fragment - build a fragment skb list for TIPC message
+ *
+ * @skb: TIPC message skb
+ * @hdr: internal msg header to be put on the top of the fragments
+ * @pktmax: max size of a fragment incl. the header
+ * @frags: returned fragment skb list
+ *
+ * Return: 0 if the fragmentation is successful, otherwise: -EINVAL
+ * or -ENOMEM
+ */
+int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
+ int pktmax, struct sk_buff_head *frags)
+{
+ int pktno, nof_fragms, dsz, dmax, eat;
+ struct tipc_msg *_hdr;
+ struct sk_buff *_skb;
+ u8 *data;
+
+ /* Non-linear buffer? */
+ if (skb_linearize(skb))
+ return -ENOMEM;
+
+ data = (u8 *)skb->data;
+ dsz = msg_size(buf_msg(skb));
+ dmax = pktmax - INT_H_SIZE;
+ if (dsz <= dmax || !dmax)
+ return -EINVAL;
+
+ nof_fragms = dsz / dmax + 1;
+ for (pktno = 1; pktno <= nof_fragms; pktno++) {
+ if (pktno < nof_fragms)
+ eat = dmax;
+ else
+ eat = dsz % dmax;
+ /* Allocate a new fragment */
+ _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
+ if (!_skb)
+ goto error;
+ skb_orphan(_skb);
+ __skb_queue_tail(frags, _skb);
+ /* Copy header & data to the fragment */
+ skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
+ skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
+ data += eat;
+ /* Update the fragment's header */
+ _hdr = buf_msg(_skb);
+ msg_set_fragm_no(_hdr, pktno);
+ msg_set_nof_fragms(_hdr, nof_fragms);
+ msg_set_size(_hdr, INT_H_SIZE + eat);
+ }
+ return 0;
+
+error:
+ __skb_queue_purge(frags);
+ __skb_queue_head_init(frags);
+ return -ENOMEM;
+}
+
+/**
+ * tipc_msg_build - create buffer chain containing specified header and data
+ * @mhdr: Message header, to be prepended to data
+ * @m: User message
+ * @offset: buffer offset for fragmented messages (FIXME)
+ * @dsz: Total length of user data
+ * @pktmax: Max packet size that can be used
+ * @list: Buffer or chain of buffers to be returned to caller
+ *
+ * Note that the recursive call we are making here is safe, since it can
+ * logically go only one further level down.
+ *
+ * Return: message data size or errno: -ENOMEM, -EFAULT
+ */
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
+ int dsz, int pktmax, struct sk_buff_head *list)
+{
+ int mhsz = msg_hdr_sz(mhdr);
+ struct tipc_msg pkthdr;
+ int msz = mhsz + dsz;
+ int pktrem = pktmax;
+ struct sk_buff *skb;
+ int drem = dsz;
+ int pktno = 1;
+ char *pktpos;
+ int pktsz;
+ int rc;
+
+ msg_set_size(mhdr, msz);
+
+ /* No fragmentation needed? */
+ if (likely(msz <= pktmax)) {
+ skb = tipc_buf_acquire(msz, GFP_KERNEL);
+
+ /* Fall back to smaller MTU if node local message */
+ if (unlikely(!skb)) {
+ if (pktmax != MAX_MSG_SIZE)
+ return -ENOMEM;
+ rc = tipc_msg_build(mhdr, m, offset, dsz,
+ one_page_mtu, list);
+ if (rc != dsz)
+ return rc;
+ if (tipc_msg_assemble(list))
+ return dsz;
+ return -ENOMEM;
+ }
+ skb_orphan(skb);
+ __skb_queue_tail(list, skb);
+ skb_copy_to_linear_data(skb, mhdr, mhsz);
+ pktpos = skb->data + mhsz;
+ if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
+ return dsz;
+ rc = -EFAULT;
+ goto error;
+ }
+
+ /* Prepare reusable fragment header */
+ tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
+ FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
+ msg_set_size(&pkthdr, pktmax);
+ msg_set_fragm_no(&pkthdr, pktno);
+ msg_set_importance(&pkthdr, msg_importance(mhdr));
+
+ /* Prepare first fragment */
+ skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_orphan(skb);
+ __skb_queue_tail(list, skb);
+ pktpos = skb->data;
+ skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
+ pktpos += INT_H_SIZE;
+ pktrem -= INT_H_SIZE;
+ skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
+ pktpos += mhsz;
+ pktrem -= mhsz;
+
+ do {
+ if (drem < pktrem)
+ pktrem = drem;
+
+ if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
+ rc = -EFAULT;
+ goto error;
+ }
+ drem -= pktrem;
+
+ if (!drem)
+ break;
+
+ /* Prepare new fragment: */
+ if (drem < (pktmax - INT_H_SIZE))
+ pktsz = drem + INT_H_SIZE;
+ else
+ pktsz = pktmax;
+ skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto error;
+ }
+ skb_orphan(skb);
+ __skb_queue_tail(list, skb);
+ msg_set_type(&pkthdr, FRAGMENT);
+ msg_set_size(&pkthdr, pktsz);
+ msg_set_fragm_no(&pkthdr, ++pktno);
+ skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
+ pktpos = skb->data + INT_H_SIZE;
+ pktrem = pktsz - INT_H_SIZE;
+
+ } while (1);
+ msg_set_type(buf_msg(skb), LAST_FRAGMENT);
+ return dsz;
+error:
+ __skb_queue_purge(list);
+ __skb_queue_head_init(list);
+ return rc;
+}
+
+/**
+ * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
+ * @bskb: the bundle buffer to append to
+ * @msg: message to be appended
+ * @max: max allowable size for the bundle buffer
+ *
+ * Return: "true" if bundling has been performed, otherwise "false"
+ */
+static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
+ u32 max)
+{
+ struct tipc_msg *bmsg = buf_msg(bskb);
+ u32 msz, bsz, offset, pad;
+
+ msz = msg_size(msg);
+ bsz = msg_size(bmsg);
+ offset = BUF_ALIGN(bsz);
+ pad = offset - bsz;
+
+ if (unlikely(skb_tailroom(bskb) < (pad + msz)))
+ return false;
+ if (unlikely(max < (offset + msz)))
+ return false;
+
+ skb_put(bskb, pad + msz);
+ skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
+ msg_set_size(bmsg, offset + msz);
+ msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
+ return true;
+}
+
+/**
+ * tipc_msg_try_bundle - Try to bundle a new message to the last one
+ * @tskb: the last/target message to which the new one will be appended
+ * @skb: the new message skb pointer
+ * @mss: max message size (header inclusive)
+ * @dnode: destination node for the message
+ * @new_bundle: if this call made a new bundle or not
+ *
+ * Return: "true" if the new message skb is potential for bundling this time or
+ * later, in the case a bundling has been done this time, the skb is consumed
+ * (the skb pointer = NULL).
+ * Otherwise, "false" if the skb cannot be bundled at all.
+ */
+bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
+ u32 dnode, bool *new_bundle)
+{
+ struct tipc_msg *msg, *inner, *outer;
+ u32 tsz;
+
+ /* First, check if the new buffer is suitable for bundling */
+ msg = buf_msg(*skb);
+ if (msg_user(msg) == MSG_FRAGMENTER)
+ return false;
+ if (msg_user(msg) == TUNNEL_PROTOCOL)
+ return false;
+ if (msg_user(msg) == BCAST_PROTOCOL)
+ return false;
+ if (mss <= INT_H_SIZE + msg_size(msg))
+ return false;
+
+ /* Ok, but the last/target buffer can be empty? */
+ if (unlikely(!tskb))
+ return true;
+
+ /* Is it a bundle already? Try to bundle the new message to it */
+ if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
+ *new_bundle = false;
+ goto bundle;
+ }
+
+ /* Make a new bundle of the two messages if possible */
+ tsz = msg_size(buf_msg(tskb));
+ if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg)))
+ return true;
+ if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
+ GFP_ATOMIC)))
+ return true;
+ inner = buf_msg(tskb);
+ skb_push(tskb, INT_H_SIZE);
+ outer = buf_msg(tskb);
+ tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
+ dnode);
+ msg_set_importance(outer, msg_importance(inner));
+ msg_set_size(outer, INT_H_SIZE + tsz);
+ msg_set_msgcnt(outer, 1);
+ *new_bundle = true;
+
+bundle:
+ if (likely(tipc_msg_bundle(tskb, msg, mss))) {
+ consume_skb(*skb);
+ *skb = NULL;
+ }
+ return true;
+}
+
+/**
+ * tipc_msg_extract(): extract bundled inner packet from buffer
+ * @skb: buffer to be extracted from.
+ * @iskb: extracted inner buffer, to be returned
+ * @pos: position in outer message of msg to be extracted.
+ * Returns position of next msg.
+ * Consumes outer buffer when last packet extracted
+ * Return: true when there is an extracted buffer, otherwise false
+ */
+bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
+{
+ struct tipc_msg *hdr, *ihdr;
+ int imsz;
+
+ *iskb = NULL;
+ if (unlikely(skb_linearize(skb)))
+ goto none;
+
+ hdr = buf_msg(skb);
+ if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
+ goto none;
+
+ ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
+ imsz = msg_size(ihdr);
+
+ if ((*pos + imsz) > msg_data_sz(hdr))
+ goto none;
+
+ *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
+ if (!*iskb)
+ goto none;
+
+ skb_copy_to_linear_data(*iskb, ihdr, imsz);
+ if (unlikely(!tipc_msg_validate(iskb)))
+ goto none;
+
+ *pos += BUF_ALIGN(imsz);
+ return true;
+none:
+ kfree_skb(skb);
+ kfree_skb(*iskb);
+ *iskb = NULL;
+ return false;
+}
+
+/**
+ * tipc_msg_reverse(): swap source and destination addresses and add error code
+ * @own_node: originating node id for reversed message
+ * @skb: buffer containing message to be reversed; will be consumed
+ * @err: error code to be set in message, if any
+ * Replaces consumed buffer with new one when successful
+ * Return: true if success, otherwise false
+ */
+bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
+{
+ struct sk_buff *_skb = *skb;
+ struct tipc_msg *_hdr, *hdr;
+ int hlen, dlen;
+
+ if (skb_linearize(_skb))
+ goto exit;
+ _hdr = buf_msg(_skb);
+ dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
+ hlen = msg_hdr_sz(_hdr);
+
+ if (msg_dest_droppable(_hdr))
+ goto exit;
+ if (msg_errcode(_hdr))
+ goto exit;
+
+ /* Never return SHORT header */
+ if (hlen == SHORT_H_SIZE)
+ hlen = BASIC_H_SIZE;
+
+ /* Don't return data along with SYN+, - sender has a clone */
+ if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
+ dlen = 0;
+
+ /* Allocate new buffer to return */
+ *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
+ if (!*skb)
+ goto exit;
+ memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
+ memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
+
+ /* Build reverse header in new buffer */
+ hdr = buf_msg(*skb);
+ msg_set_hdr_sz(hdr, hlen);
+ msg_set_errcode(hdr, err);
+ msg_set_non_seq(hdr, 0);
+ msg_set_origport(hdr, msg_destport(_hdr));
+ msg_set_destport(hdr, msg_origport(_hdr));
+ msg_set_destnode(hdr, msg_prevnode(_hdr));
+ msg_set_prevnode(hdr, own_node);
+ msg_set_orignode(hdr, own_node);
+ msg_set_size(hdr, hlen + dlen);
+ skb_orphan(_skb);
+ kfree_skb(_skb);
+ return true;
+exit:
+ kfree_skb(_skb);
+ *skb = NULL;
+ return false;
+}
+
+bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
+{
+ struct sk_buff *skb, *_skb;
+
+ skb_queue_walk(msg, skb) {
+ _skb = skb_clone(skb, GFP_ATOMIC);
+ if (!_skb) {
+ __skb_queue_purge(cpy);
+ pr_err_ratelimited("Failed to clone buffer chain\n");
+ return false;
+ }
+ __skb_queue_tail(cpy, _skb);
+ }
+ return true;
+}
+
+/**
+ * tipc_msg_lookup_dest(): try to find new destination for named message
+ * @net: pointer to associated network namespace
+ * @skb: the buffer containing the message.
+ * @err: error code to be used by caller if lookup fails
+ * Does not consume buffer
+ * Return: true if a destination is found, false otherwise
+ */
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
+{
+ struct tipc_msg *msg = buf_msg(skb);
+ u32 scope = msg_lookup_scope(msg);
+ u32 self = tipc_own_addr(net);
+ u32 inst = msg_nameinst(msg);
+ struct tipc_socket_addr sk;
+ struct tipc_uaddr ua;
+
+ if (!msg_isdata(msg))
+ return false;
+ if (!msg_named(msg))
+ return false;
+ if (msg_errcode(msg))
+ return false;
+ *err = TIPC_ERR_NO_NAME;
+ if (skb_linearize(skb))
+ return false;
+ msg = buf_msg(skb);
+ if (msg_reroute_cnt(msg))
+ return false;
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope,
+ msg_nametype(msg), inst, inst);
+ sk.node = tipc_scope2node(net, scope);
+ if (!tipc_nametbl_lookup_anycast(net, &ua, &sk))
+ return false;
+ msg_incr_reroute_cnt(msg);
+ if (sk.node != self)
+ msg_set_prevnode(msg, self);
+ msg_set_destnode(msg, sk.node);
+ msg_set_destport(msg, sk.ref);
+ *err = TIPC_OK;
+
+ return true;
+}
+
+/* tipc_msg_assemble() - assemble chain of fragments into one message
+ */
+bool tipc_msg_assemble(struct sk_buff_head *list)
+{
+ struct sk_buff *skb, *tmp = NULL;
+
+ if (skb_queue_len(list) == 1)
+ return true;
+
+ while ((skb = __skb_dequeue(list))) {
+ skb->next = NULL;
+ if (tipc_buf_append(&tmp, &skb)) {
+ __skb_queue_tail(list, skb);
+ return true;
+ }
+ if (!tmp)
+ break;
+ }
+ __skb_queue_purge(list);
+ __skb_queue_head_init(list);
+ pr_warn("Failed do assemble buffer\n");
+ return false;
+}
+
+/* tipc_msg_reassemble() - clone a buffer chain of fragments and
+ * reassemble the clones into one message
+ */
+bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
+{
+ struct sk_buff *skb, *_skb;
+ struct sk_buff *frag = NULL;
+ struct sk_buff *head = NULL;
+ int hdr_len;
+
+ /* Copy header if single buffer */
+ if (skb_queue_len(list) == 1) {
+ skb = skb_peek(list);
+ hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
+ _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
+ if (!_skb)
+ return false;
+ __skb_queue_tail(rcvq, _skb);
+ return true;
+ }
+
+ /* Clone all fragments and reassemble */
+ skb_queue_walk(list, skb) {
+ frag = skb_clone(skb, GFP_ATOMIC);
+ if (!frag)
+ goto error;
+ frag->next = NULL;
+ if (tipc_buf_append(&head, &frag))
+ break;
+ if (!head)
+ goto error;
+ }
+ __skb_queue_tail(rcvq, frag);
+ return true;
+error:
+ pr_warn("Failed do clone local mcast rcv buffer\n");
+ kfree_skb(head);
+ return false;
+}
+
+bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
+ struct sk_buff_head *cpy)
+{
+ struct sk_buff *skb, *_skb;
+
+ skb_queue_walk(msg, skb) {
+ _skb = pskb_copy(skb, GFP_ATOMIC);
+ if (!_skb) {
+ __skb_queue_purge(cpy);
+ return false;
+ }
+ msg_set_destnode(buf_msg(_skb), dst);
+ __skb_queue_tail(cpy, _skb);
+ }
+ return true;
+}
+
+/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
+ * @list: list to be appended to
+ * @seqno: sequence number of buffer to add
+ * @skb: buffer to add
+ */
+bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
+ struct sk_buff *skb)
+{
+ struct sk_buff *_skb, *tmp;
+
+ if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
+ __skb_queue_head(list, skb);
+ return true;
+ }
+
+ if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
+ __skb_queue_tail(list, skb);
+ return true;
+ }
+
+ skb_queue_walk_safe(list, _skb, tmp) {
+ if (more(seqno, buf_seqno(_skb)))
+ continue;
+ if (seqno == buf_seqno(_skb))
+ break;
+ __skb_queue_before(list, _skb, skb);
+ return true;
+ }
+ kfree_skb(skb);
+ return false;
+}
+
+void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
+{
+ if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
+ __skb_queue_tail(xmitq, skb);
+}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
new file mode 100644
index 0000000000..c5eec16213
--- /dev/null
+++ b/net/tipc/msg.h
@@ -0,0 +1,1310 @@
+/*
+ * net/tipc/msg.h: Include file for TIPC message header routines
+ *
+ * Copyright (c) 2000-2007, 2014-2017 Ericsson AB
+ * Copyright (c) 2005-2008, 2010-2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_MSG_H
+#define _TIPC_MSG_H
+
+#include <linux/tipc.h>
+#include "core.h"
+
+/*
+ * Constants and routines used to read and write TIPC payload message headers
+ *
+ * Note: Some items are also used with TIPC internal message headers
+ */
+#define TIPC_VERSION 2
+struct plist;
+
+/*
+ * Payload message users are defined in TIPC's public API:
+ * - TIPC_LOW_IMPORTANCE
+ * - TIPC_MEDIUM_IMPORTANCE
+ * - TIPC_HIGH_IMPORTANCE
+ * - TIPC_CRITICAL_IMPORTANCE
+ */
+#define TIPC_SYSTEM_IMPORTANCE 4
+
+
+/*
+ * Payload message types
+ */
+#define TIPC_CONN_MSG 0
+#define TIPC_MCAST_MSG 1
+#define TIPC_NAMED_MSG 2
+#define TIPC_DIRECT_MSG 3
+#define TIPC_GRP_MEMBER_EVT 4
+#define TIPC_GRP_BCAST_MSG 5
+#define TIPC_GRP_MCAST_MSG 6
+#define TIPC_GRP_UCAST_MSG 7
+
+/*
+ * Internal message users
+ */
+#define BCAST_PROTOCOL 5
+#define MSG_BUNDLER 6
+#define LINK_PROTOCOL 7
+#define CONN_MANAGER 8
+#define GROUP_PROTOCOL 9
+#define TUNNEL_PROTOCOL 10
+#define NAME_DISTRIBUTOR 11
+#define MSG_FRAGMENTER 12
+#define LINK_CONFIG 13
+#define MSG_CRYPTO 14
+#define SOCK_WAKEUP 14 /* pseudo user */
+#define TOP_SRV 15 /* pseudo user */
+
+/*
+ * Message header sizes
+ */
+#define SHORT_H_SIZE 24 /* In-cluster basic payload message */
+#define BASIC_H_SIZE 32 /* Basic payload message */
+#define NAMED_H_SIZE 40 /* Named payload message */
+#define MCAST_H_SIZE 44 /* Multicast payload message */
+#define GROUP_H_SIZE 44 /* Group payload message */
+#define INT_H_SIZE 40 /* Internal messages */
+#define MIN_H_SIZE 24 /* Smallest legal TIPC header size */
+#define MAX_H_SIZE 60 /* Largest possible TIPC header size */
+
+#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
+#define TIPC_MEDIA_INFO_OFFSET 5
+
+extern const int one_page_mtu;
+
+struct tipc_skb_cb {
+ union {
+ struct {
+ struct sk_buff *tail;
+ unsigned long nxt_retr;
+ unsigned long retr_stamp;
+ u32 bytes_read;
+ u32 orig_member;
+ u16 chain_imp;
+ u16 ackers;
+ u16 retr_cnt;
+ } __packed;
+#ifdef CONFIG_TIPC_CRYPTO
+ struct {
+ struct tipc_crypto *rx;
+ struct tipc_aead *last;
+ u8 recurs;
+ } tx_clone_ctx __packed;
+#endif
+ } __packed;
+ union {
+ struct {
+ u8 validated:1;
+#ifdef CONFIG_TIPC_CRYPTO
+ u8 encrypted:1;
+ u8 decrypted:1;
+#define SKB_PROBING 1
+#define SKB_GRACING 2
+ u8 xmit_type:2;
+ u8 tx_clone_deferred:1;
+#endif
+ };
+ u8 flags;
+ };
+ u8 reserved;
+#ifdef CONFIG_TIPC_CRYPTO
+ void *crypto_ctx;
+#endif
+} __packed;
+
+#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
+
+struct tipc_msg {
+ __be32 hdr[15];
+};
+
+/* struct tipc_gap_ack - TIPC Gap ACK block
+ * @ack: seqno of the last consecutive packet in link deferdq
+ * @gap: number of gap packets since the last ack
+ *
+ * E.g:
+ * link deferdq: 1 2 3 4 10 11 13 14 15 20
+ * --> Gap ACK blocks: <4, 5>, <11, 1>, <15, 4>, <20, 0>
+ */
+struct tipc_gap_ack {
+ __be16 ack;
+ __be16 gap;
+};
+
+/* struct tipc_gap_ack_blks
+ * @len: actual length of the record
+ * @ugack_cnt: number of Gap ACK blocks for unicast (following the broadcast
+ * ones)
+ * @start_index: starting index for "valid" broadcast Gap ACK blocks
+ * @bgack_cnt: number of Gap ACK blocks for broadcast in the record
+ * @gacks: array of Gap ACK blocks
+ *
+ * 31 16 15 0
+ * +-------------+-------------+-------------+-------------+
+ * | bgack_cnt | ugack_cnt | len |
+ * +-------------+-------------+-------------+-------------+ -
+ * | gap | ack | |
+ * +-------------+-------------+-------------+-------------+ > bc gacks
+ * : : : |
+ * +-------------+-------------+-------------+-------------+ -
+ * | gap | ack | |
+ * +-------------+-------------+-------------+-------------+ > uc gacks
+ * : : : |
+ * +-------------+-------------+-------------+-------------+ -
+ */
+struct tipc_gap_ack_blks {
+ __be16 len;
+ union {
+ u8 ugack_cnt;
+ u8 start_index;
+ };
+ u8 bgack_cnt;
+ struct tipc_gap_ack gacks[];
+};
+
+#define MAX_GAP_ACK_BLKS 128
+#define MAX_GAP_ACK_BLKS_SZ (sizeof(struct tipc_gap_ack_blks) + \
+ sizeof(struct tipc_gap_ack) * MAX_GAP_ACK_BLKS)
+
+static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
+{
+ return (struct tipc_msg *)skb->data;
+}
+
+static inline u32 msg_word(struct tipc_msg *m, u32 pos)
+{
+ return ntohl(m->hdr[pos]);
+}
+
+static inline void msg_set_word(struct tipc_msg *m, u32 w, u32 val)
+{
+ m->hdr[w] = htonl(val);
+}
+
+static inline u32 msg_bits(struct tipc_msg *m, u32 w, u32 pos, u32 mask)
+{
+ return (msg_word(m, w) >> pos) & mask;
+}
+
+static inline void msg_set_bits(struct tipc_msg *m, u32 w,
+ u32 pos, u32 mask, u32 val)
+{
+ val = (val & mask) << pos;
+ mask = mask << pos;
+ m->hdr[w] &= ~htonl(mask);
+ m->hdr[w] |= htonl(val);
+}
+
+/*
+ * Word 0
+ */
+static inline u32 msg_version(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 29, 7);
+}
+
+static inline void msg_set_version(struct tipc_msg *m)
+{
+ msg_set_bits(m, 0, 29, 7, TIPC_VERSION);
+}
+
+static inline u32 msg_user(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 25, 0xf);
+}
+
+static inline u32 msg_isdata(struct tipc_msg *m)
+{
+ return msg_user(m) <= TIPC_CRITICAL_IMPORTANCE;
+}
+
+static inline void msg_set_user(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 0, 25, 0xf, n);
+}
+
+static inline u32 msg_hdr_sz(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 21, 0xf) << 2;
+}
+
+static inline void msg_set_hdr_sz(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 0, 21, 0xf, n>>2);
+}
+
+static inline u32 msg_size(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 0, 0x1ffff);
+}
+
+static inline u32 msg_blocks(struct tipc_msg *m)
+{
+ return (msg_size(m) / 1024) + 1;
+}
+
+static inline u32 msg_data_sz(struct tipc_msg *m)
+{
+ return msg_size(m) - msg_hdr_sz(m);
+}
+
+static inline int msg_non_seq(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 20, 1);
+}
+
+static inline void msg_set_non_seq(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 0, 20, 1, n);
+}
+
+static inline int msg_is_syn(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 17, 1);
+}
+
+static inline void msg_set_syn(struct tipc_msg *m, u32 d)
+{
+ msg_set_bits(m, 0, 17, 1, d);
+}
+
+static inline int msg_dest_droppable(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 19, 1);
+}
+
+static inline void msg_set_dest_droppable(struct tipc_msg *m, u32 d)
+{
+ msg_set_bits(m, 0, 19, 1, d);
+}
+
+static inline int msg_is_keepalive(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 19, 1);
+}
+
+static inline void msg_set_is_keepalive(struct tipc_msg *m, u32 d)
+{
+ msg_set_bits(m, 0, 19, 1, d);
+}
+
+static inline int msg_src_droppable(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 18, 1);
+}
+
+static inline void msg_set_src_droppable(struct tipc_msg *m, u32 d)
+{
+ msg_set_bits(m, 0, 18, 1, d);
+}
+
+static inline int msg_ack_required(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 18, 1);
+}
+
+static inline void msg_set_ack_required(struct tipc_msg *m)
+{
+ msg_set_bits(m, 0, 18, 1, 1);
+}
+
+static inline int msg_nagle_ack(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 18, 1);
+}
+
+static inline void msg_set_nagle_ack(struct tipc_msg *m)
+{
+ msg_set_bits(m, 0, 18, 1, 1);
+}
+
+static inline bool msg_is_rcast(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 18, 0x1);
+}
+
+static inline void msg_set_is_rcast(struct tipc_msg *m, bool d)
+{
+ msg_set_bits(m, 0, 18, 0x1, d);
+}
+
+static inline void msg_set_size(struct tipc_msg *m, u32 sz)
+{
+ m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz);
+}
+
+static inline unchar *msg_data(struct tipc_msg *m)
+{
+ return ((unchar *)m) + msg_hdr_sz(m);
+}
+
+static inline struct tipc_msg *msg_inner_hdr(struct tipc_msg *m)
+{
+ return (struct tipc_msg *)msg_data(m);
+}
+
+/*
+ * Word 1
+ */
+static inline u32 msg_type(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 29, 0x7);
+}
+
+static inline void msg_set_type(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 1, 29, 0x7, n);
+}
+
+static inline int msg_in_group(struct tipc_msg *m)
+{
+ int mtyp = msg_type(m);
+
+ return mtyp >= TIPC_GRP_MEMBER_EVT && mtyp <= TIPC_GRP_UCAST_MSG;
+}
+
+static inline bool msg_is_grp_evt(struct tipc_msg *m)
+{
+ return msg_type(m) == TIPC_GRP_MEMBER_EVT;
+}
+
+static inline u32 msg_named(struct tipc_msg *m)
+{
+ return msg_type(m) == TIPC_NAMED_MSG;
+}
+
+static inline u32 msg_mcast(struct tipc_msg *m)
+{
+ int mtyp = msg_type(m);
+
+ return ((mtyp == TIPC_MCAST_MSG) || (mtyp == TIPC_GRP_BCAST_MSG) ||
+ (mtyp == TIPC_GRP_MCAST_MSG));
+}
+
+static inline u32 msg_connected(struct tipc_msg *m)
+{
+ return msg_type(m) == TIPC_CONN_MSG;
+}
+
+static inline u32 msg_direct(struct tipc_msg *m)
+{
+ return msg_type(m) == TIPC_DIRECT_MSG;
+}
+
+static inline u32 msg_errcode(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 25, 0xf);
+}
+
+static inline void msg_set_errcode(struct tipc_msg *m, u32 err)
+{
+ msg_set_bits(m, 1, 25, 0xf, err);
+}
+
+static inline void msg_set_bulk(struct tipc_msg *m)
+{
+ msg_set_bits(m, 1, 28, 0x1, 1);
+}
+
+static inline u32 msg_is_bulk(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 28, 0x1);
+}
+
+static inline void msg_set_last_bulk(struct tipc_msg *m)
+{
+ msg_set_bits(m, 1, 27, 0x1, 1);
+}
+
+static inline u32 msg_is_last_bulk(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 27, 0x1);
+}
+
+static inline void msg_set_non_legacy(struct tipc_msg *m)
+{
+ msg_set_bits(m, 1, 26, 0x1, 1);
+}
+
+static inline u32 msg_is_legacy(struct tipc_msg *m)
+{
+ return !msg_bits(m, 1, 26, 0x1);
+}
+
+static inline u32 msg_reroute_cnt(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 21, 0xf);
+}
+
+static inline void msg_incr_reroute_cnt(struct tipc_msg *m)
+{
+ msg_set_bits(m, 1, 21, 0xf, msg_reroute_cnt(m) + 1);
+}
+
+static inline u32 msg_lookup_scope(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 19, 0x3);
+}
+
+static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 1, 19, 0x3, n);
+}
+
+static inline u16 msg_bcast_ack(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 0, 0xffff);
+}
+
+static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 1, 0, 0xffff, n);
+}
+
+/* Note: reusing bits in word 1 for ACTIVATE_MSG only, to re-synch
+ * link peer session number
+ */
+static inline bool msg_dest_session_valid(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 16, 0x1);
+}
+
+static inline void msg_set_dest_session_valid(struct tipc_msg *m, bool valid)
+{
+ msg_set_bits(m, 1, 16, 0x1, valid);
+}
+
+static inline u16 msg_dest_session(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 0, 0xffff);
+}
+
+static inline void msg_set_dest_session(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 1, 0, 0xffff, n);
+}
+
+/*
+ * Word 2
+ */
+static inline u16 msg_ack(struct tipc_msg *m)
+{
+ return msg_bits(m, 2, 16, 0xffff);
+}
+
+static inline void msg_set_ack(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 2, 16, 0xffff, n);
+}
+
+static inline u16 msg_seqno(struct tipc_msg *m)
+{
+ return msg_bits(m, 2, 0, 0xffff);
+}
+
+static inline void msg_set_seqno(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 2, 0, 0xffff, n);
+}
+
+/*
+ * Words 3-10
+ */
+static inline u32 msg_importance(struct tipc_msg *m)
+{
+ int usr = msg_user(m);
+
+ if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m)))
+ return usr;
+ if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))
+ return msg_bits(m, 9, 0, 0x7);
+ return TIPC_SYSTEM_IMPORTANCE;
+}
+
+static inline void msg_set_importance(struct tipc_msg *m, u32 i)
+{
+ int usr = msg_user(m);
+
+ if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)))
+ msg_set_bits(m, 9, 0, 0x7, i);
+ else if (i < TIPC_SYSTEM_IMPORTANCE)
+ msg_set_user(m, i);
+ else
+ pr_warn("Trying to set illegal importance in message\n");
+}
+
+static inline u32 msg_prevnode(struct tipc_msg *m)
+{
+ return msg_word(m, 3);
+}
+
+static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
+{
+ msg_set_word(m, 3, a);
+}
+
+static inline u32 msg_origport(struct tipc_msg *m)
+{
+ if (msg_user(m) == MSG_FRAGMENTER)
+ m = msg_inner_hdr(m);
+ return msg_word(m, 4);
+}
+
+static inline void msg_set_origport(struct tipc_msg *m, u32 p)
+{
+ msg_set_word(m, 4, p);
+}
+
+static inline u16 msg_named_seqno(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 0, 0xffff);
+}
+
+static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 4, 0, 0xffff, n);
+}
+
+static inline u32 msg_destport(struct tipc_msg *m)
+{
+ return msg_word(m, 5);
+}
+
+static inline void msg_set_destport(struct tipc_msg *m, u32 p)
+{
+ msg_set_word(m, 5, p);
+}
+
+static inline u32 msg_mc_netid(struct tipc_msg *m)
+{
+ return msg_word(m, 5);
+}
+
+static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p)
+{
+ msg_set_word(m, 5, p);
+}
+
+static inline int msg_short(struct tipc_msg *m)
+{
+ return msg_hdr_sz(m) == SHORT_H_SIZE;
+}
+
+static inline u32 msg_orignode(struct tipc_msg *m)
+{
+ if (likely(msg_short(m)))
+ return msg_prevnode(m);
+ return msg_word(m, 6);
+}
+
+static inline void msg_set_orignode(struct tipc_msg *m, u32 a)
+{
+ msg_set_word(m, 6, a);
+}
+
+static inline u32 msg_destnode(struct tipc_msg *m)
+{
+ return msg_word(m, 7);
+}
+
+static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
+{
+ msg_set_word(m, 7, a);
+}
+
+static inline u32 msg_nametype(struct tipc_msg *m)
+{
+ return msg_word(m, 8);
+}
+
+static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 8, n);
+}
+
+static inline u32 msg_nameinst(struct tipc_msg *m)
+{
+ return msg_word(m, 9);
+}
+
+static inline u32 msg_namelower(struct tipc_msg *m)
+{
+ return msg_nameinst(m);
+}
+
+static inline void msg_set_namelower(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 9, n);
+}
+
+static inline void msg_set_nameinst(struct tipc_msg *m, u32 n)
+{
+ msg_set_namelower(m, n);
+}
+
+static inline u32 msg_nameupper(struct tipc_msg *m)
+{
+ return msg_word(m, 10);
+}
+
+static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 10, n);
+}
+
+/*
+ * Constants and routines used to read and write TIPC internal message headers
+ */
+
+/*
+ * Connection management protocol message types
+ */
+#define CONN_PROBE 0
+#define CONN_PROBE_REPLY 1
+#define CONN_ACK 2
+
+/*
+ * Name distributor message types
+ */
+#define PUBLICATION 0
+#define WITHDRAWAL 1
+
+/*
+ * Segmentation message types
+ */
+#define FIRST_FRAGMENT 0
+#define FRAGMENT 1
+#define LAST_FRAGMENT 2
+
+/*
+ * Link management protocol message types
+ */
+#define STATE_MSG 0
+#define RESET_MSG 1
+#define ACTIVATE_MSG 2
+
+/*
+ * Changeover tunnel message types
+ */
+#define SYNCH_MSG 0
+#define FAILOVER_MSG 1
+
+/*
+ * Config protocol message types
+ */
+#define DSC_REQ_MSG 0
+#define DSC_RESP_MSG 1
+#define DSC_TRIAL_MSG 2
+#define DSC_TRIAL_FAIL_MSG 3
+
+/*
+ * Group protocol message types
+ */
+#define GRP_JOIN_MSG 0
+#define GRP_LEAVE_MSG 1
+#define GRP_ADV_MSG 2
+#define GRP_ACK_MSG 3
+#define GRP_RECLAIM_MSG 4
+#define GRP_REMIT_MSG 5
+
+/* Crypto message types */
+#define KEY_DISTR_MSG 0
+
+/*
+ * Word 1
+ */
+static inline u32 msg_seq_gap(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 16, 0x1fff);
+}
+
+static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 1, 16, 0x1fff, n);
+}
+
+static inline u32 msg_node_sig(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 0, 0xffff);
+}
+
+static inline void msg_set_node_sig(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 1, 0, 0xffff, n);
+}
+
+static inline u32 msg_node_capabilities(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 15, 0x1fff);
+}
+
+static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 1, 15, 0x1fff, n);
+}
+
+/*
+ * Word 2
+ */
+static inline u32 msg_dest_domain(struct tipc_msg *m)
+{
+ return msg_word(m, 2);
+}
+
+static inline void msg_set_dest_domain(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 2, n);
+}
+
+static inline void msg_set_bcgap_after(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 2, 16, 0xffff, n);
+}
+
+static inline u32 msg_bcgap_to(struct tipc_msg *m)
+{
+ return msg_bits(m, 2, 0, 0xffff);
+}
+
+static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 2, 0, 0xffff, n);
+}
+
+/*
+ * Word 4
+ */
+static inline u32 msg_last_bcast(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 16, 0xffff);
+}
+
+static inline u32 msg_bc_snd_nxt(struct tipc_msg *m)
+{
+ return msg_last_bcast(m) + 1;
+}
+
+static inline void msg_set_last_bcast(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 4, 16, 0xffff, n);
+}
+
+static inline u32 msg_nof_fragms(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 0, 0xffff);
+}
+
+static inline void msg_set_nof_fragms(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 4, 0, 0xffff, n);
+}
+
+static inline u32 msg_fragm_no(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 16, 0xffff);
+}
+
+static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 4, 16, 0xffff, n);
+}
+
+static inline u16 msg_next_sent(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 0, 0xffff);
+}
+
+static inline void msg_set_next_sent(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 4, 0, 0xffff, n);
+}
+
+static inline u32 msg_bc_netid(struct tipc_msg *m)
+{
+ return msg_word(m, 4);
+}
+
+static inline void msg_set_bc_netid(struct tipc_msg *m, u32 id)
+{
+ msg_set_word(m, 4, id);
+}
+
+static inline u32 msg_link_selector(struct tipc_msg *m)
+{
+ if (msg_user(m) == MSG_FRAGMENTER)
+ m = (void *)msg_data(m);
+ return msg_bits(m, 4, 0, 1);
+}
+
+/*
+ * Word 5
+ */
+static inline u16 msg_session(struct tipc_msg *m)
+{
+ return msg_bits(m, 5, 16, 0xffff);
+}
+
+static inline void msg_set_session(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 5, 16, 0xffff, n);
+}
+
+static inline u32 msg_probe(struct tipc_msg *m)
+{
+ return msg_bits(m, 5, 0, 1);
+}
+
+static inline void msg_set_probe(struct tipc_msg *m, u32 val)
+{
+ msg_set_bits(m, 5, 0, 1, val);
+}
+
+static inline char msg_net_plane(struct tipc_msg *m)
+{
+ return msg_bits(m, 5, 1, 7) + 'A';
+}
+
+static inline void msg_set_net_plane(struct tipc_msg *m, char n)
+{
+ msg_set_bits(m, 5, 1, 7, (n - 'A'));
+}
+
+static inline u32 msg_linkprio(struct tipc_msg *m)
+{
+ return msg_bits(m, 5, 4, 0x1f);
+}
+
+static inline void msg_set_linkprio(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 5, 4, 0x1f, n);
+}
+
+static inline u32 msg_bearer_id(struct tipc_msg *m)
+{
+ return msg_bits(m, 5, 9, 0x7);
+}
+
+static inline void msg_set_bearer_id(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 5, 9, 0x7, n);
+}
+
+static inline u32 msg_redundant_link(struct tipc_msg *m)
+{
+ return msg_bits(m, 5, 12, 0x1);
+}
+
+static inline void msg_set_redundant_link(struct tipc_msg *m, u32 r)
+{
+ msg_set_bits(m, 5, 12, 0x1, r);
+}
+
+static inline u32 msg_peer_stopping(struct tipc_msg *m)
+{
+ return msg_bits(m, 5, 13, 0x1);
+}
+
+static inline void msg_set_peer_stopping(struct tipc_msg *m, u32 s)
+{
+ msg_set_bits(m, 5, 13, 0x1, s);
+}
+
+static inline bool msg_bc_ack_invalid(struct tipc_msg *m)
+{
+ switch (msg_user(m)) {
+ case BCAST_PROTOCOL:
+ case NAME_DISTRIBUTOR:
+ case LINK_PROTOCOL:
+ return msg_bits(m, 5, 14, 0x1);
+ default:
+ return false;
+ }
+}
+
+static inline void msg_set_bc_ack_invalid(struct tipc_msg *m, bool invalid)
+{
+ msg_set_bits(m, 5, 14, 0x1, invalid);
+}
+
+static inline char *msg_media_addr(struct tipc_msg *m)
+{
+ return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET];
+}
+
+static inline u32 msg_bc_gap(struct tipc_msg *m)
+{
+ return msg_bits(m, 8, 0, 0x3ff);
+}
+
+static inline void msg_set_bc_gap(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 8, 0, 0x3ff, n);
+}
+
+/*
+ * Word 9
+ */
+static inline u16 msg_msgcnt(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_msgcnt(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+static inline u16 msg_syncpt(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_syncpt(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+static inline u32 msg_conn_ack(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_conn_ack(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+static inline u16 msg_adv_win(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 0, 0xffff);
+}
+
+static inline void msg_set_adv_win(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 9, 0, 0xffff, n);
+}
+
+static inline u32 msg_max_pkt(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 16, 0xffff) * 4;
+}
+
+static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, (n / 4));
+}
+
+static inline u32 msg_link_tolerance(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 0, 0xffff);
+}
+
+static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 9, 0, 0xffff, n);
+}
+
+static inline u16 msg_grp_bc_syncpt(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_grp_bc_syncpt(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+static inline u16 msg_grp_bc_acked(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_grp_bc_acked(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+static inline u16 msg_grp_remitted(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_grp_remitted(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+/* Word 10
+ */
+static inline u16 msg_grp_evt(struct tipc_msg *m)
+{
+ return msg_bits(m, 10, 0, 0x3);
+}
+
+static inline void msg_set_grp_evt(struct tipc_msg *m, int n)
+{
+ msg_set_bits(m, 10, 0, 0x3, n);
+}
+
+static inline u16 msg_grp_bc_ack_req(struct tipc_msg *m)
+{
+ return msg_bits(m, 10, 0, 0x1);
+}
+
+static inline void msg_set_grp_bc_ack_req(struct tipc_msg *m, bool n)
+{
+ msg_set_bits(m, 10, 0, 0x1, n);
+}
+
+static inline u16 msg_grp_bc_seqno(struct tipc_msg *m)
+{
+ return msg_bits(m, 10, 16, 0xffff);
+}
+
+static inline void msg_set_grp_bc_seqno(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 10, 16, 0xffff, n);
+}
+
+static inline bool msg_peer_link_is_up(struct tipc_msg *m)
+{
+ if (likely(msg_user(m) != LINK_PROTOCOL))
+ return true;
+ if (msg_type(m) == STATE_MSG)
+ return true;
+ return false;
+}
+
+static inline bool msg_peer_node_is_up(struct tipc_msg *m)
+{
+ if (msg_peer_link_is_up(m))
+ return true;
+ return msg_redundant_link(m);
+}
+
+static inline bool msg_is_reset(struct tipc_msg *hdr)
+{
+ return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
+}
+
+/* Word 13
+ */
+static inline void msg_set_peer_net_hash(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 13, n);
+}
+
+static inline u32 msg_peer_net_hash(struct tipc_msg *m)
+{
+ return msg_word(m, 13);
+}
+
+/* Word 14
+ */
+static inline u32 msg_sugg_node_addr(struct tipc_msg *m)
+{
+ return msg_word(m, 14);
+}
+
+static inline void msg_set_sugg_node_addr(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 14, n);
+}
+
+static inline void msg_set_node_id(struct tipc_msg *hdr, u8 *id)
+{
+ memcpy(msg_data(hdr), id, 16);
+}
+
+static inline u8 *msg_node_id(struct tipc_msg *hdr)
+{
+ return (u8 *)msg_data(hdr);
+}
+
+struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp);
+bool tipc_msg_validate(struct sk_buff **_skb);
+bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
+void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
+ struct sk_buff_head *xmitq);
+void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
+ u32 hsize, u32 destnode);
+struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
+ uint data_sz, u32 dnode, u32 onode,
+ u32 dport, u32 oport, int errcode);
+int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
+bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
+ u32 dnode, bool *new_bundle);
+bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
+int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
+ int pktmax, struct sk_buff_head *frags);
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
+ int offset, int dsz, int mtu, struct sk_buff_head *list);
+int tipc_msg_append(struct tipc_msg *hdr, struct msghdr *m, int dlen,
+ int mss, struct sk_buff_head *txq);
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
+bool tipc_msg_assemble(struct sk_buff_head *list);
+bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq);
+bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
+ struct sk_buff_head *cpy);
+bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
+ struct sk_buff *skb);
+bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy);
+
+static inline u16 buf_seqno(struct sk_buff *skb)
+{
+ return msg_seqno(buf_msg(skb));
+}
+
+static inline int buf_roundup_len(struct sk_buff *skb)
+{
+ return (skb->len / 1024 + 1) * 1024;
+}
+
+/* tipc_skb_peek(): peek and reserve first buffer in list
+ * @list: list to be peeked in
+ * Returns pointer to first buffer in list, if any
+ */
+static inline struct sk_buff *tipc_skb_peek(struct sk_buff_head *list,
+ spinlock_t *lock)
+{
+ struct sk_buff *skb;
+
+ spin_lock_bh(lock);
+ skb = skb_peek(list);
+ if (skb)
+ skb_get(skb);
+ spin_unlock_bh(lock);
+ return skb;
+}
+
+/* tipc_skb_peek_port(): find a destination port, ignoring all destinations
+ * up to and including 'filter'.
+ * Note: ignoring previously tried destinations minimizes the risk of
+ * contention on the socket lock
+ * @list: list to be peeked in
+ * @filter: last destination to be ignored from search
+ * Returns a destination port number, of applicable.
+ */
+static inline u32 tipc_skb_peek_port(struct sk_buff_head *list, u32 filter)
+{
+ struct sk_buff *skb;
+ u32 dport = 0;
+ bool ignore = true;
+
+ spin_lock_bh(&list->lock);
+ skb_queue_walk(list, skb) {
+ dport = msg_destport(buf_msg(skb));
+ if (!filter || skb_queue_is_last(list, skb))
+ break;
+ if (dport == filter)
+ ignore = false;
+ else if (!ignore)
+ break;
+ }
+ spin_unlock_bh(&list->lock);
+ return dport;
+}
+
+/* tipc_skb_dequeue(): unlink first buffer with dest 'dport' from list
+ * @list: list to be unlinked from
+ * @dport: selection criteria for buffer to unlink
+ */
+static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list,
+ u32 dport)
+{
+ struct sk_buff *_skb, *tmp, *skb = NULL;
+
+ spin_lock_bh(&list->lock);
+ skb_queue_walk_safe(list, _skb, tmp) {
+ if (msg_destport(buf_msg(_skb)) == dport) {
+ __skb_unlink(_skb, list);
+ skb = _skb;
+ break;
+ }
+ }
+ spin_unlock_bh(&list->lock);
+ return skb;
+}
+
+/* tipc_skb_queue_splice_tail - append an skb list to lock protected list
+ * @list: the new list to append. Not lock protected
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail(struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ spin_lock_bh(&head->lock);
+ skb_queue_splice_tail(list, head);
+ spin_unlock_bh(&head->lock);
+}
+
+/* tipc_skb_queue_splice_tail_init - merge two lock protected skb lists
+ * @list: the new list to add. Lock protected. Will be reinitialized
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail_init(struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ struct sk_buff_head tmp;
+
+ __skb_queue_head_init(&tmp);
+
+ spin_lock_bh(&list->lock);
+ skb_queue_splice_tail_init(list, &tmp);
+ spin_unlock_bh(&list->lock);
+ tipc_skb_queue_splice_tail(&tmp, head);
+}
+
+/* __tipc_skb_dequeue() - dequeue the head skb according to expected seqno
+ * @list: list to be dequeued from
+ * @seqno: seqno of the expected msg
+ *
+ * returns skb dequeued from the list if its seqno is less than or equal to
+ * the expected one, otherwise the skb is still hold
+ *
+ * Note: must be used with appropriate locks held only
+ */
+static inline struct sk_buff *__tipc_skb_dequeue(struct sk_buff_head *list,
+ u16 seqno)
+{
+ struct sk_buff *skb = skb_peek(list);
+
+ if (skb && less_eq(buf_seqno(skb), seqno)) {
+ __skb_unlink(skb, list);
+ return skb;
+ }
+ return NULL;
+}
+
+#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
new file mode 100644
index 0000000000..190b49c5cb
--- /dev/null
+++ b/net/tipc/name_distr.c
@@ -0,0 +1,411 @@
+/*
+ * net/tipc/name_distr.c: TIPC name distribution code
+ *
+ * Copyright (c) 2000-2006, 2014-2019, Ericsson AB
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "link.h"
+#include "name_distr.h"
+
+int sysctl_tipc_named_timeout __read_mostly = 2000;
+
+/**
+ * publ_to_item - add publication info to a publication message
+ * @p: publication info
+ * @i: location of item in the message
+ */
+static void publ_to_item(struct distr_item *i, struct publication *p)
+{
+ i->type = htonl(p->sr.type);
+ i->lower = htonl(p->sr.lower);
+ i->upper = htonl(p->sr.upper);
+ i->port = htonl(p->sk.ref);
+ i->key = htonl(p->key);
+}
+
+/**
+ * named_prepare_buf - allocate & initialize a publication message
+ * @net: the associated network namespace
+ * @type: message type
+ * @size: payload size
+ * @dest: destination node
+ *
+ * The buffer returned is of size INT_H_SIZE + payload size
+ */
+static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
+ u32 dest)
+{
+ struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
+ u32 self = tipc_own_addr(net);
+ struct tipc_msg *msg;
+
+ if (buf != NULL) {
+ msg = buf_msg(buf);
+ tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
+ type, INT_H_SIZE, dest);
+ msg_set_size(msg, INT_H_SIZE + size);
+ }
+ return buf;
+}
+
+/**
+ * tipc_named_publish - tell other nodes about a new publication by this node
+ * @net: the associated network namespace
+ * @p: the new publication
+ */
+struct sk_buff *tipc_named_publish(struct net *net, struct publication *p)
+{
+ struct name_table *nt = tipc_name_table(net);
+ struct distr_item *item;
+ struct sk_buff *skb;
+
+ if (p->scope == TIPC_NODE_SCOPE) {
+ list_add_tail_rcu(&p->binding_node, &nt->node_scope);
+ return NULL;
+ }
+ write_lock_bh(&nt->cluster_scope_lock);
+ list_add_tail(&p->binding_node, &nt->cluster_scope);
+ write_unlock_bh(&nt->cluster_scope_lock);
+ skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
+ if (!skb) {
+ pr_warn("Publication distribution failure\n");
+ return NULL;
+ }
+ msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
+ msg_set_non_legacy(buf_msg(skb));
+ item = (struct distr_item *)msg_data(buf_msg(skb));
+ publ_to_item(item, p);
+ return skb;
+}
+
+/**
+ * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
+ * @net: the associated network namespace
+ * @p: the withdrawn publication
+ */
+struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *p)
+{
+ struct name_table *nt = tipc_name_table(net);
+ struct distr_item *item;
+ struct sk_buff *skb;
+
+ write_lock_bh(&nt->cluster_scope_lock);
+ list_del(&p->binding_node);
+ write_unlock_bh(&nt->cluster_scope_lock);
+ if (p->scope == TIPC_NODE_SCOPE)
+ return NULL;
+
+ skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
+ if (!skb) {
+ pr_warn("Withdrawal distribution failure\n");
+ return NULL;
+ }
+ msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
+ msg_set_non_legacy(buf_msg(skb));
+ item = (struct distr_item *)msg_data(buf_msg(skb));
+ publ_to_item(item, p);
+ return skb;
+}
+
+/**
+ * named_distribute - prepare name info for bulk distribution to another node
+ * @net: the associated network namespace
+ * @list: list of messages (buffers) to be returned from this function
+ * @dnode: node to be updated
+ * @pls: linked list of publication items to be packed into buffer chain
+ * @seqno: sequence number for this message
+ */
+static void named_distribute(struct net *net, struct sk_buff_head *list,
+ u32 dnode, struct list_head *pls, u16 seqno)
+{
+ struct publication *publ;
+ struct sk_buff *skb = NULL;
+ struct distr_item *item = NULL;
+ u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
+ ITEM_SIZE) * ITEM_SIZE;
+ u32 msg_rem = msg_dsz;
+ struct tipc_msg *hdr;
+
+ list_for_each_entry(publ, pls, binding_node) {
+ /* Prepare next buffer: */
+ if (!skb) {
+ skb = named_prepare_buf(net, PUBLICATION, msg_rem,
+ dnode);
+ if (!skb) {
+ pr_warn("Bulk publication failure\n");
+ return;
+ }
+ hdr = buf_msg(skb);
+ msg_set_bc_ack_invalid(hdr, true);
+ msg_set_bulk(hdr);
+ msg_set_non_legacy(hdr);
+ item = (struct distr_item *)msg_data(hdr);
+ }
+
+ /* Pack publication into message: */
+ publ_to_item(item, publ);
+ item++;
+ msg_rem -= ITEM_SIZE;
+
+ /* Append full buffer to list: */
+ if (!msg_rem) {
+ __skb_queue_tail(list, skb);
+ skb = NULL;
+ msg_rem = msg_dsz;
+ }
+ }
+ if (skb) {
+ hdr = buf_msg(skb);
+ msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem));
+ skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
+ __skb_queue_tail(list, skb);
+ }
+ hdr = buf_msg(skb_peek_tail(list));
+ msg_set_last_bulk(hdr);
+ msg_set_named_seqno(hdr, seqno);
+}
+
+/**
+ * tipc_named_node_up - tell specified node about all publications by this node
+ * @net: the associated network namespace
+ * @dnode: destination node
+ * @capabilities: peer node's capabilities
+ */
+void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
+{
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_net *tn = tipc_net(net);
+ struct sk_buff_head head;
+ u16 seqno;
+
+ __skb_queue_head_init(&head);
+ spin_lock_bh(&tn->nametbl_lock);
+ if (!(capabilities & TIPC_NAMED_BCAST))
+ nt->rc_dests++;
+ seqno = nt->snd_nxt;
+ spin_unlock_bh(&tn->nametbl_lock);
+
+ read_lock_bh(&nt->cluster_scope_lock);
+ named_distribute(net, &head, dnode, &nt->cluster_scope, seqno);
+ tipc_node_xmit(net, &head, dnode, 0);
+ read_unlock_bh(&nt->cluster_scope_lock);
+}
+
+/**
+ * tipc_publ_purge - remove publication associated with a failed node
+ * @net: the associated network namespace
+ * @p: the publication to remove
+ * @addr: failed node's address
+ *
+ * Invoked for each publication issued by a newly failed node.
+ * Removes publication structure from name table & deletes it.
+ */
+static void tipc_publ_purge(struct net *net, struct publication *p, u32 addr)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct publication *_p;
+ struct tipc_uaddr ua;
+
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, p->scope, p->sr.type,
+ p->sr.lower, p->sr.upper);
+ spin_lock_bh(&tn->nametbl_lock);
+ _p = tipc_nametbl_remove_publ(net, &ua, &p->sk, p->key);
+ if (_p)
+ tipc_node_unsubscribe(net, &_p->binding_node, addr);
+ spin_unlock_bh(&tn->nametbl_lock);
+ if (_p)
+ kfree_rcu(_p, rcu);
+}
+
+void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
+ u32 addr, u16 capabilities)
+{
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_net *tn = tipc_net(net);
+
+ struct publication *publ, *tmp;
+
+ list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
+ tipc_publ_purge(net, publ, addr);
+ spin_lock_bh(&tn->nametbl_lock);
+ if (!(capabilities & TIPC_NAMED_BCAST))
+ nt->rc_dests--;
+ spin_unlock_bh(&tn->nametbl_lock);
+}
+
+/**
+ * tipc_update_nametbl - try to process a nametable update and notify
+ * subscribers
+ * @net: the associated network namespace
+ * @i: location of item in the message
+ * @node: node address
+ * @dtype: name distributor message type
+ *
+ * tipc_nametbl_lock must be held.
+ * Return: the publication item if successful, otherwise NULL.
+ */
+static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
+ u32 node, u32 dtype)
+{
+ struct publication *p = NULL;
+ struct tipc_socket_addr sk;
+ struct tipc_uaddr ua;
+ u32 key = ntohl(i->key);
+
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_CLUSTER_SCOPE,
+ ntohl(i->type), ntohl(i->lower), ntohl(i->upper));
+ sk.ref = ntohl(i->port);
+ sk.node = node;
+
+ if (dtype == PUBLICATION) {
+ p = tipc_nametbl_insert_publ(net, &ua, &sk, key);
+ if (p) {
+ tipc_node_subscribe(net, &p->binding_node, node);
+ return true;
+ }
+ } else if (dtype == WITHDRAWAL) {
+ p = tipc_nametbl_remove_publ(net, &ua, &sk, key);
+ if (p) {
+ tipc_node_unsubscribe(net, &p->binding_node, node);
+ kfree_rcu(p, rcu);
+ return true;
+ }
+ pr_warn_ratelimited("Failed to remove binding %u,%u from %u\n",
+ ua.sr.type, ua.sr.lower, node);
+ } else {
+ pr_warn_ratelimited("Unknown name table message received\n");
+ }
+ return false;
+}
+
+static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
+ u16 *rcv_nxt, bool *open)
+{
+ struct sk_buff *skb, *tmp;
+ struct tipc_msg *hdr;
+ u16 seqno;
+
+ spin_lock_bh(&namedq->lock);
+ skb_queue_walk_safe(namedq, skb, tmp) {
+ if (unlikely(skb_linearize(skb))) {
+ __skb_unlink(skb, namedq);
+ kfree_skb(skb);
+ continue;
+ }
+ hdr = buf_msg(skb);
+ seqno = msg_named_seqno(hdr);
+ if (msg_is_last_bulk(hdr)) {
+ *rcv_nxt = seqno;
+ *open = true;
+ }
+
+ if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
+ __skb_unlink(skb, namedq);
+ spin_unlock_bh(&namedq->lock);
+ return skb;
+ }
+
+ if (*open && (*rcv_nxt == seqno)) {
+ (*rcv_nxt)++;
+ __skb_unlink(skb, namedq);
+ spin_unlock_bh(&namedq->lock);
+ return skb;
+ }
+
+ if (less(seqno, *rcv_nxt)) {
+ __skb_unlink(skb, namedq);
+ kfree_skb(skb);
+ continue;
+ }
+ }
+ spin_unlock_bh(&namedq->lock);
+ return NULL;
+}
+
+/**
+ * tipc_named_rcv - process name table update messages sent by another node
+ * @net: the associated network namespace
+ * @namedq: queue to receive from
+ * @rcv_nxt: store last received seqno here
+ * @open: last bulk msg was received (FIXME)
+ */
+void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
+ u16 *rcv_nxt, bool *open)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct distr_item *item;
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
+ u32 count, node;
+
+ spin_lock_bh(&tn->nametbl_lock);
+ while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) {
+ hdr = buf_msg(skb);
+ node = msg_orignode(hdr);
+ item = (struct distr_item *)msg_data(hdr);
+ count = msg_data_sz(hdr) / ITEM_SIZE;
+ while (count--) {
+ tipc_update_nametbl(net, item, node, msg_type(hdr));
+ item++;
+ }
+ kfree_skb(skb);
+ }
+ spin_unlock_bh(&tn->nametbl_lock);
+}
+
+/**
+ * tipc_named_reinit - re-initialize local publications
+ * @net: the associated network namespace
+ *
+ * This routine is called whenever TIPC networking is enabled.
+ * All name table entries published by this node are updated to reflect
+ * the node's new network address.
+ */
+void tipc_named_reinit(struct net *net)
+{
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_net *tn = tipc_net(net);
+ struct publication *p;
+ u32 self = tipc_own_addr(net);
+
+ spin_lock_bh(&tn->nametbl_lock);
+
+ list_for_each_entry_rcu(p, &nt->node_scope, binding_node)
+ p->sk.node = self;
+ list_for_each_entry_rcu(p, &nt->cluster_scope, binding_node)
+ p->sk.node = self;
+ nt->rc_dests = 0;
+ spin_unlock_bh(&tn->nametbl_lock);
+}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
new file mode 100644
index 0000000000..c677f6f082
--- /dev/null
+++ b/net/tipc/name_distr.h
@@ -0,0 +1,79 @@
+/*
+ * net/tipc/name_distr.h: Include file for TIPC name distribution code
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NAME_DISTR_H
+#define _TIPC_NAME_DISTR_H
+
+#include "name_table.h"
+
+#define ITEM_SIZE sizeof(struct distr_item)
+
+/**
+ * struct distr_item - publication info distributed to other nodes
+ * @type: name sequence type
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @port: publishing port reference
+ * @key: publication key
+ *
+ * ===> All fields are stored in network byte order. <===
+ *
+ * First 3 fields identify (name or) name sequence being published.
+ * Reference field uniquely identifies port that published name sequence.
+ * Key field uniquely identifies publication, in the event a port has
+ * multiple publications of the same name sequence.
+ *
+ * Note: There is no field that identifies the publishing node because it is
+ * the same for all items contained within a publication message.
+ */
+struct distr_item {
+ __be32 type;
+ __be32 lower;
+ __be32 upper;
+ __be32 port;
+ __be32 key;
+};
+
+struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ);
+struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ);
+void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities);
+void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
+ u16 *rcv_nxt, bool *open);
+void tipc_named_reinit(struct net *net);
+void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
+ u32 addr, u16 capabilities);
+
+#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
new file mode 100644
index 0000000000..d1180370fd
--- /dev/null
+++ b/net/tipc/name_table.c
@@ -0,0 +1,1204 @@
+/*
+ * net/tipc/name_table.c: TIPC name table code
+ *
+ * Copyright (c) 2000-2006, 2014-2018, Ericsson AB
+ * Copyright (c) 2004-2008, 2010-2014, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <net/sock.h>
+#include <linux/list_sort.h>
+#include <linux/rbtree_augmented.h>
+#include "core.h"
+#include "netlink.h"
+#include "name_table.h"
+#include "name_distr.h"
+#include "subscr.h"
+#include "bcast.h"
+#include "addr.h"
+#include "node.h"
+#include "group.h"
+
+/**
+ * struct service_range - container for all bindings of a service range
+ * @lower: service range lower bound
+ * @upper: service range upper bound
+ * @tree_node: member of service range RB tree
+ * @max: largest 'upper' in this node subtree
+ * @local_publ: list of identical publications made from this node
+ * Used by closest_first lookup and multicast lookup algorithm
+ * @all_publ: all publications identical to this one, whatever node and scope
+ * Used by round-robin lookup algorithm
+ */
+struct service_range {
+ u32 lower;
+ u32 upper;
+ struct rb_node tree_node;
+ u32 max;
+ struct list_head local_publ;
+ struct list_head all_publ;
+};
+
+/**
+ * struct tipc_service - container for all published instances of a service type
+ * @type: 32 bit 'type' value for service
+ * @publ_cnt: increasing counter for publications in this service
+ * @ranges: rb tree containing all service ranges for this service
+ * @service_list: links to adjacent name ranges in hash chain
+ * @subscriptions: list of subscriptions for this service type
+ * @lock: spinlock controlling access to pertaining service ranges/publications
+ * @rcu: RCU callback head used for deferred freeing
+ */
+struct tipc_service {
+ u32 type;
+ u32 publ_cnt;
+ struct rb_root ranges;
+ struct hlist_node service_list;
+ struct list_head subscriptions;
+ spinlock_t lock; /* Covers service range list */
+ struct rcu_head rcu;
+};
+
+#define service_range_upper(sr) ((sr)->upper)
+RB_DECLARE_CALLBACKS_MAX(static, sr_callbacks,
+ struct service_range, tree_node, u32, max,
+ service_range_upper)
+
+#define service_range_entry(rbtree_node) \
+ (container_of(rbtree_node, struct service_range, tree_node))
+
+#define service_range_overlap(sr, start, end) \
+ ((sr)->lower <= (end) && (sr)->upper >= (start))
+
+/**
+ * service_range_foreach_match - iterate over tipc service rbtree for each
+ * range match
+ * @sr: the service range pointer as a loop cursor
+ * @sc: the pointer to tipc service which holds the service range rbtree
+ * @start: beginning of the search range (end >= start) for matching
+ * @end: end of the search range (end >= start) for matching
+ */
+#define service_range_foreach_match(sr, sc, start, end) \
+ for (sr = service_range_match_first((sc)->ranges.rb_node, \
+ start, \
+ end); \
+ sr; \
+ sr = service_range_match_next(&(sr)->tree_node, \
+ start, \
+ end))
+
+/**
+ * service_range_match_first - find first service range matching a range
+ * @n: the root node of service range rbtree for searching
+ * @start: beginning of the search range (end >= start) for matching
+ * @end: end of the search range (end >= start) for matching
+ *
+ * Return: the leftmost service range node in the rbtree that overlaps the
+ * specific range if any. Otherwise, returns NULL.
+ */
+static struct service_range *service_range_match_first(struct rb_node *n,
+ u32 start, u32 end)
+{
+ struct service_range *sr;
+ struct rb_node *l, *r;
+
+ /* Non overlaps in tree at all? */
+ if (!n || service_range_entry(n)->max < start)
+ return NULL;
+
+ while (n) {
+ l = n->rb_left;
+ if (l && service_range_entry(l)->max >= start) {
+ /* A leftmost overlap range node must be one in the left
+ * subtree. If not, it has lower > end, then nodes on
+ * the right side cannot satisfy the condition either.
+ */
+ n = l;
+ continue;
+ }
+
+ /* No one in the left subtree can match, return if this node is
+ * an overlap i.e. leftmost.
+ */
+ sr = service_range_entry(n);
+ if (service_range_overlap(sr, start, end))
+ return sr;
+
+ /* Ok, try to lookup on the right side */
+ r = n->rb_right;
+ if (sr->lower <= end &&
+ r && service_range_entry(r)->max >= start) {
+ n = r;
+ continue;
+ }
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * service_range_match_next - find next service range matching a range
+ * @n: a node in service range rbtree from which the searching starts
+ * @start: beginning of the search range (end >= start) for matching
+ * @end: end of the search range (end >= start) for matching
+ *
+ * Return: the next service range node to the given node in the rbtree that
+ * overlaps the specific range if any. Otherwise, returns NULL.
+ */
+static struct service_range *service_range_match_next(struct rb_node *n,
+ u32 start, u32 end)
+{
+ struct service_range *sr;
+ struct rb_node *p, *r;
+
+ while (n) {
+ r = n->rb_right;
+ if (r && service_range_entry(r)->max >= start)
+ /* A next overlap range node must be one in the right
+ * subtree. If not, it has lower > end, then any next
+ * successor (- an ancestor) of this node cannot
+ * satisfy the condition either.
+ */
+ return service_range_match_first(r, start, end);
+
+ /* No one in the right subtree can match, go up to find an
+ * ancestor of this node which is parent of a left-hand child.
+ */
+ while ((p = rb_parent(n)) && n == p->rb_right)
+ n = p;
+ if (!p)
+ break;
+
+ /* Return if this ancestor is an overlap */
+ sr = service_range_entry(p);
+ if (service_range_overlap(sr, start, end))
+ return sr;
+
+ /* Ok, try to lookup more from this ancestor */
+ if (sr->lower <= end) {
+ n = p;
+ continue;
+ }
+ break;
+ }
+
+ return NULL;
+}
+
+static int hash(int x)
+{
+ return x & (TIPC_NAMETBL_SIZE - 1);
+}
+
+/**
+ * tipc_publ_create - create a publication structure
+ * @ua: the service range the user is binding to
+ * @sk: the address of the socket that is bound
+ * @key: publication key
+ */
+static struct publication *tipc_publ_create(struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk,
+ u32 key)
+{
+ struct publication *p = kzalloc(sizeof(*p), GFP_ATOMIC);
+
+ if (!p)
+ return NULL;
+
+ p->sr = ua->sr;
+ p->sk = *sk;
+ p->scope = ua->scope;
+ p->key = key;
+ INIT_LIST_HEAD(&p->binding_sock);
+ INIT_LIST_HEAD(&p->binding_node);
+ INIT_LIST_HEAD(&p->local_publ);
+ INIT_LIST_HEAD(&p->all_publ);
+ INIT_LIST_HEAD(&p->list);
+ return p;
+}
+
+/**
+ * tipc_service_create - create a service structure for the specified 'type'
+ * @net: network namespace
+ * @ua: address representing the service to be bound
+ *
+ * Allocates a single range structure and sets it to all 0's.
+ */
+static struct tipc_service *tipc_service_create(struct net *net,
+ struct tipc_uaddr *ua)
+{
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_service *service;
+ struct hlist_head *hd;
+
+ service = kzalloc(sizeof(*service), GFP_ATOMIC);
+ if (!service) {
+ pr_warn("Service creation failed, no memory\n");
+ return NULL;
+ }
+
+ spin_lock_init(&service->lock);
+ service->type = ua->sr.type;
+ service->ranges = RB_ROOT;
+ INIT_HLIST_NODE(&service->service_list);
+ INIT_LIST_HEAD(&service->subscriptions);
+ hd = &nt->services[hash(ua->sr.type)];
+ hlist_add_head_rcu(&service->service_list, hd);
+ return service;
+}
+
+/* tipc_service_find_range - find service range matching publication parameters
+ */
+static struct service_range *tipc_service_find_range(struct tipc_service *sc,
+ struct tipc_uaddr *ua)
+{
+ struct service_range *sr;
+
+ service_range_foreach_match(sr, sc, ua->sr.lower, ua->sr.upper) {
+ /* Look for exact match */
+ if (sr->lower == ua->sr.lower && sr->upper == ua->sr.upper)
+ return sr;
+ }
+
+ return NULL;
+}
+
+static struct service_range *tipc_service_create_range(struct tipc_service *sc,
+ struct publication *p)
+{
+ struct rb_node **n, *parent = NULL;
+ struct service_range *sr;
+ u32 lower = p->sr.lower;
+ u32 upper = p->sr.upper;
+
+ n = &sc->ranges.rb_node;
+ while (*n) {
+ parent = *n;
+ sr = service_range_entry(parent);
+ if (lower == sr->lower && upper == sr->upper)
+ return sr;
+ if (sr->max < upper)
+ sr->max = upper;
+ if (lower <= sr->lower)
+ n = &parent->rb_left;
+ else
+ n = &parent->rb_right;
+ }
+ sr = kzalloc(sizeof(*sr), GFP_ATOMIC);
+ if (!sr)
+ return NULL;
+ sr->lower = lower;
+ sr->upper = upper;
+ sr->max = upper;
+ INIT_LIST_HEAD(&sr->local_publ);
+ INIT_LIST_HEAD(&sr->all_publ);
+ rb_link_node(&sr->tree_node, parent, n);
+ rb_insert_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks);
+ return sr;
+}
+
+static bool tipc_service_insert_publ(struct net *net,
+ struct tipc_service *sc,
+ struct publication *p)
+{
+ struct tipc_subscription *sub, *tmp;
+ struct service_range *sr;
+ struct publication *_p;
+ u32 node = p->sk.node;
+ bool first = false;
+ bool res = false;
+ u32 key = p->key;
+
+ spin_lock_bh(&sc->lock);
+ sr = tipc_service_create_range(sc, p);
+ if (!sr)
+ goto exit;
+
+ first = list_empty(&sr->all_publ);
+
+ /* Return if the publication already exists */
+ list_for_each_entry(_p, &sr->all_publ, all_publ) {
+ if (_p->key == key && (!_p->sk.node || _p->sk.node == node)) {
+ pr_debug("Failed to bind duplicate %u,%u,%u/%u:%u/%u\n",
+ p->sr.type, p->sr.lower, p->sr.upper,
+ node, p->sk.ref, key);
+ goto exit;
+ }
+ }
+
+ if (in_own_node(net, p->sk.node))
+ list_add(&p->local_publ, &sr->local_publ);
+ list_add(&p->all_publ, &sr->all_publ);
+ p->id = sc->publ_cnt++;
+
+ /* Any subscriptions waiting for notification? */
+ list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) {
+ tipc_sub_report_overlap(sub, p, TIPC_PUBLISHED, first);
+ }
+ res = true;
+exit:
+ if (!res)
+ pr_warn("Failed to bind to %u,%u,%u\n",
+ p->sr.type, p->sr.lower, p->sr.upper);
+ spin_unlock_bh(&sc->lock);
+ return res;
+}
+
+/**
+ * tipc_service_remove_publ - remove a publication from a service
+ * @r: service_range to remove publication from
+ * @sk: address publishing socket
+ * @key: target publication key
+ */
+static struct publication *tipc_service_remove_publ(struct service_range *r,
+ struct tipc_socket_addr *sk,
+ u32 key)
+{
+ struct publication *p;
+ u32 node = sk->node;
+
+ list_for_each_entry(p, &r->all_publ, all_publ) {
+ if (p->key != key || (node && node != p->sk.node))
+ continue;
+ list_del(&p->all_publ);
+ list_del(&p->local_publ);
+ return p;
+ }
+ return NULL;
+}
+
+/*
+ * Code reused: time_after32() for the same purpose
+ */
+#define publication_after(pa, pb) time_after32((pa)->id, (pb)->id)
+static int tipc_publ_sort(void *priv, const struct list_head *a,
+ const struct list_head *b)
+{
+ struct publication *pa, *pb;
+
+ pa = container_of(a, struct publication, list);
+ pb = container_of(b, struct publication, list);
+ return publication_after(pa, pb);
+}
+
+/**
+ * tipc_service_subscribe - attach a subscription, and optionally
+ * issue the prescribed number of events if there is any service
+ * range overlapping with the requested range
+ * @service: the tipc_service to attach the @sub to
+ * @sub: the subscription to attach
+ */
+static void tipc_service_subscribe(struct tipc_service *service,
+ struct tipc_subscription *sub)
+{
+ struct publication *p, *first, *tmp;
+ struct list_head publ_list;
+ struct service_range *sr;
+ u32 filter, lower, upper;
+
+ filter = sub->s.filter;
+ lower = sub->s.seq.lower;
+ upper = sub->s.seq.upper;
+
+ tipc_sub_get(sub);
+ list_add(&sub->service_list, &service->subscriptions);
+
+ if (filter & TIPC_SUB_NO_STATUS)
+ return;
+
+ INIT_LIST_HEAD(&publ_list);
+ service_range_foreach_match(sr, service, lower, upper) {
+ first = NULL;
+ list_for_each_entry(p, &sr->all_publ, all_publ) {
+ if (filter & TIPC_SUB_PORTS)
+ list_add_tail(&p->list, &publ_list);
+ else if (!first || publication_after(first, p))
+ /* Pick this range's *first* publication */
+ first = p;
+ }
+ if (first)
+ list_add_tail(&first->list, &publ_list);
+ }
+
+ /* Sort the publications before reporting */
+ list_sort(NULL, &publ_list, tipc_publ_sort);
+ list_for_each_entry_safe(p, tmp, &publ_list, list) {
+ tipc_sub_report_overlap(sub, p, TIPC_PUBLISHED, true);
+ list_del_init(&p->list);
+ }
+}
+
+static struct tipc_service *tipc_service_find(struct net *net,
+ struct tipc_uaddr *ua)
+{
+ struct name_table *nt = tipc_name_table(net);
+ struct hlist_head *service_head;
+ struct tipc_service *service;
+
+ service_head = &nt->services[hash(ua->sr.type)];
+ hlist_for_each_entry_rcu(service, service_head, service_list) {
+ if (service->type == ua->sr.type)
+ return service;
+ }
+ return NULL;
+};
+
+struct publication *tipc_nametbl_insert_publ(struct net *net,
+ struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk,
+ u32 key)
+{
+ struct tipc_service *sc;
+ struct publication *p;
+
+ p = tipc_publ_create(ua, sk, key);
+ if (!p)
+ return NULL;
+
+ sc = tipc_service_find(net, ua);
+ if (!sc)
+ sc = tipc_service_create(net, ua);
+ if (sc && tipc_service_insert_publ(net, sc, p))
+ return p;
+ kfree(p);
+ return NULL;
+}
+
+struct publication *tipc_nametbl_remove_publ(struct net *net,
+ struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk,
+ u32 key)
+{
+ struct tipc_subscription *sub, *tmp;
+ struct publication *p = NULL;
+ struct service_range *sr;
+ struct tipc_service *sc;
+ bool last;
+
+ sc = tipc_service_find(net, ua);
+ if (!sc)
+ goto exit;
+
+ spin_lock_bh(&sc->lock);
+ sr = tipc_service_find_range(sc, ua);
+ if (!sr)
+ goto unlock;
+ p = tipc_service_remove_publ(sr, sk, key);
+ if (!p)
+ goto unlock;
+
+ /* Notify any waiting subscriptions */
+ last = list_empty(&sr->all_publ);
+ list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) {
+ tipc_sub_report_overlap(sub, p, TIPC_WITHDRAWN, last);
+ }
+
+ /* Remove service range item if this was its last publication */
+ if (list_empty(&sr->all_publ)) {
+ rb_erase_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks);
+ kfree(sr);
+ }
+
+ /* Delete service item if no more publications and subscriptions */
+ if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) {
+ hlist_del_init_rcu(&sc->service_list);
+ kfree_rcu(sc, rcu);
+ }
+unlock:
+ spin_unlock_bh(&sc->lock);
+exit:
+ if (!p) {
+ pr_err("Failed to remove unknown binding: %u,%u,%u/%u:%u/%u\n",
+ ua->sr.type, ua->sr.lower, ua->sr.upper,
+ sk->node, sk->ref, key);
+ }
+ return p;
+}
+
+/**
+ * tipc_nametbl_lookup_anycast - perform service instance to socket translation
+ * @net: network namespace
+ * @ua: service address to look up
+ * @sk: address to socket we want to find
+ *
+ * On entry, a non-zero 'sk->node' indicates the node where we want lookup to be
+ * performed, which may not be this one.
+ *
+ * On exit:
+ *
+ * - If lookup is deferred to another node, leave 'sk->node' unchanged and
+ * return 'true'.
+ * - If lookup is successful, set the 'sk->node' and 'sk->ref' (== portid) which
+ * represent the bound socket and return 'true'.
+ * - If lookup fails, return 'false'
+ *
+ * Note that for legacy users (node configured with Z.C.N address format) the
+ * 'closest-first' lookup algorithm must be maintained, i.e., if sk.node is 0
+ * we must look in the local binding list first
+ */
+bool tipc_nametbl_lookup_anycast(struct net *net,
+ struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk)
+{
+ struct tipc_net *tn = tipc_net(net);
+ bool legacy = tn->legacy_addr_format;
+ u32 self = tipc_own_addr(net);
+ u32 inst = ua->sa.instance;
+ struct service_range *r;
+ struct tipc_service *sc;
+ struct publication *p;
+ struct list_head *l;
+ bool res = false;
+
+ if (!tipc_in_scope(legacy, sk->node, self))
+ return true;
+
+ rcu_read_lock();
+ sc = tipc_service_find(net, ua);
+ if (unlikely(!sc))
+ goto exit;
+
+ spin_lock_bh(&sc->lock);
+ service_range_foreach_match(r, sc, inst, inst) {
+ /* Select lookup algo: local, closest-first or round-robin */
+ if (sk->node == self) {
+ l = &r->local_publ;
+ if (list_empty(l))
+ continue;
+ p = list_first_entry(l, struct publication, local_publ);
+ list_move_tail(&p->local_publ, &r->local_publ);
+ } else if (legacy && !sk->node && !list_empty(&r->local_publ)) {
+ l = &r->local_publ;
+ p = list_first_entry(l, struct publication, local_publ);
+ list_move_tail(&p->local_publ, &r->local_publ);
+ } else {
+ l = &r->all_publ;
+ p = list_first_entry(l, struct publication, all_publ);
+ list_move_tail(&p->all_publ, &r->all_publ);
+ }
+ *sk = p->sk;
+ res = true;
+ /* Todo: as for legacy, pick the first matching range only, a
+ * "true" round-robin will be performed as needed.
+ */
+ break;
+ }
+ spin_unlock_bh(&sc->lock);
+
+exit:
+ rcu_read_unlock();
+ return res;
+}
+
+/* tipc_nametbl_lookup_group(): lookup destinaton(s) in a communication group
+ * Returns a list of one (== group anycast) or more (== group multicast)
+ * destination socket/node pairs matching the given address.
+ * The requester may or may not want to exclude himself from the list.
+ */
+bool tipc_nametbl_lookup_group(struct net *net, struct tipc_uaddr *ua,
+ struct list_head *dsts, int *dstcnt,
+ u32 exclude, bool mcast)
+{
+ u32 self = tipc_own_addr(net);
+ u32 inst = ua->sa.instance;
+ struct service_range *sr;
+ struct tipc_service *sc;
+ struct publication *p;
+
+ *dstcnt = 0;
+ rcu_read_lock();
+ sc = tipc_service_find(net, ua);
+ if (unlikely(!sc))
+ goto exit;
+
+ spin_lock_bh(&sc->lock);
+
+ /* Todo: a full search i.e. service_range_foreach_match() instead? */
+ sr = service_range_match_first(sc->ranges.rb_node, inst, inst);
+ if (!sr)
+ goto no_match;
+
+ list_for_each_entry(p, &sr->all_publ, all_publ) {
+ if (p->scope != ua->scope)
+ continue;
+ if (p->sk.ref == exclude && p->sk.node == self)
+ continue;
+ tipc_dest_push(dsts, p->sk.node, p->sk.ref);
+ (*dstcnt)++;
+ if (mcast)
+ continue;
+ list_move_tail(&p->all_publ, &sr->all_publ);
+ break;
+ }
+no_match:
+ spin_unlock_bh(&sc->lock);
+exit:
+ rcu_read_unlock();
+ return !list_empty(dsts);
+}
+
+/* tipc_nametbl_lookup_mcast_sockets(): look up node local destinaton sockets
+ * matching the given address
+ * Used on nodes which have received a multicast/broadcast message
+ * Returns a list of local sockets
+ */
+void tipc_nametbl_lookup_mcast_sockets(struct net *net, struct tipc_uaddr *ua,
+ struct list_head *dports)
+{
+ struct service_range *sr;
+ struct tipc_service *sc;
+ struct publication *p;
+ u8 scope = ua->scope;
+
+ rcu_read_lock();
+ sc = tipc_service_find(net, ua);
+ if (!sc)
+ goto exit;
+
+ spin_lock_bh(&sc->lock);
+ service_range_foreach_match(sr, sc, ua->sr.lower, ua->sr.upper) {
+ list_for_each_entry(p, &sr->local_publ, local_publ) {
+ if (scope == p->scope || scope == TIPC_ANY_SCOPE)
+ tipc_dest_push(dports, 0, p->sk.ref);
+ }
+ }
+ spin_unlock_bh(&sc->lock);
+exit:
+ rcu_read_unlock();
+}
+
+/* tipc_nametbl_lookup_mcast_nodes(): look up all destination nodes matching
+ * the given address. Used in sending node.
+ * Used on nodes which are sending out a multicast/broadcast message
+ * Returns a list of nodes, including own node if applicable
+ */
+void tipc_nametbl_lookup_mcast_nodes(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_nlist *nodes)
+{
+ struct service_range *sr;
+ struct tipc_service *sc;
+ struct publication *p;
+
+ rcu_read_lock();
+ sc = tipc_service_find(net, ua);
+ if (!sc)
+ goto exit;
+
+ spin_lock_bh(&sc->lock);
+ service_range_foreach_match(sr, sc, ua->sr.lower, ua->sr.upper) {
+ list_for_each_entry(p, &sr->all_publ, all_publ) {
+ tipc_nlist_add(nodes, p->sk.node);
+ }
+ }
+ spin_unlock_bh(&sc->lock);
+exit:
+ rcu_read_unlock();
+}
+
+/* tipc_nametbl_build_group - build list of communication group members
+ */
+void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp,
+ struct tipc_uaddr *ua)
+{
+ struct service_range *sr;
+ struct tipc_service *sc;
+ struct publication *p;
+ struct rb_node *n;
+
+ rcu_read_lock();
+ sc = tipc_service_find(net, ua);
+ if (!sc)
+ goto exit;
+
+ spin_lock_bh(&sc->lock);
+ for (n = rb_first(&sc->ranges); n; n = rb_next(n)) {
+ sr = container_of(n, struct service_range, tree_node);
+ list_for_each_entry(p, &sr->all_publ, all_publ) {
+ if (p->scope != ua->scope)
+ continue;
+ tipc_group_add_member(grp, p->sk.node, p->sk.ref,
+ p->sr.lower);
+ }
+ }
+ spin_unlock_bh(&sc->lock);
+exit:
+ rcu_read_unlock();
+}
+
+/* tipc_nametbl_publish - add service binding to name table
+ */
+struct publication *tipc_nametbl_publish(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk, u32 key)
+{
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_net *tn = tipc_net(net);
+ struct publication *p = NULL;
+ struct sk_buff *skb = NULL;
+ u32 rc_dests;
+
+ spin_lock_bh(&tn->nametbl_lock);
+
+ if (nt->local_publ_count >= TIPC_MAX_PUBL) {
+ pr_warn("Bind failed, max limit %u reached\n", TIPC_MAX_PUBL);
+ goto exit;
+ }
+
+ p = tipc_nametbl_insert_publ(net, ua, sk, key);
+ if (p) {
+ nt->local_publ_count++;
+ skb = tipc_named_publish(net, p);
+ }
+ rc_dests = nt->rc_dests;
+exit:
+ spin_unlock_bh(&tn->nametbl_lock);
+
+ if (skb)
+ tipc_node_broadcast(net, skb, rc_dests);
+ return p;
+
+}
+
+/**
+ * tipc_nametbl_withdraw - withdraw a service binding
+ * @net: network namespace
+ * @ua: service address/range being unbound
+ * @sk: address of the socket being unbound from
+ * @key: target publication key
+ */
+void tipc_nametbl_withdraw(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk, u32 key)
+{
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_net *tn = tipc_net(net);
+ struct sk_buff *skb = NULL;
+ struct publication *p;
+ u32 rc_dests;
+
+ spin_lock_bh(&tn->nametbl_lock);
+
+ p = tipc_nametbl_remove_publ(net, ua, sk, key);
+ if (p) {
+ nt->local_publ_count--;
+ skb = tipc_named_withdraw(net, p);
+ list_del_init(&p->binding_sock);
+ kfree_rcu(p, rcu);
+ }
+ rc_dests = nt->rc_dests;
+ spin_unlock_bh(&tn->nametbl_lock);
+
+ if (skb)
+ tipc_node_broadcast(net, skb, rc_dests);
+}
+
+/**
+ * tipc_nametbl_subscribe - add a subscription object to the name table
+ * @sub: subscription to add
+ */
+bool tipc_nametbl_subscribe(struct tipc_subscription *sub)
+{
+ struct tipc_net *tn = tipc_net(sub->net);
+ u32 type = sub->s.seq.type;
+ struct tipc_service *sc;
+ struct tipc_uaddr ua;
+ bool res = true;
+
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE, type,
+ sub->s.seq.lower, sub->s.seq.upper);
+ spin_lock_bh(&tn->nametbl_lock);
+ sc = tipc_service_find(sub->net, &ua);
+ if (!sc)
+ sc = tipc_service_create(sub->net, &ua);
+ if (sc) {
+ spin_lock_bh(&sc->lock);
+ tipc_service_subscribe(sc, sub);
+ spin_unlock_bh(&sc->lock);
+ } else {
+ pr_warn("Failed to subscribe for {%u,%u,%u}\n",
+ type, sub->s.seq.lower, sub->s.seq.upper);
+ res = false;
+ }
+ spin_unlock_bh(&tn->nametbl_lock);
+ return res;
+}
+
+/**
+ * tipc_nametbl_unsubscribe - remove a subscription object from name table
+ * @sub: subscription to remove
+ */
+void tipc_nametbl_unsubscribe(struct tipc_subscription *sub)
+{
+ struct tipc_net *tn = tipc_net(sub->net);
+ struct tipc_service *sc;
+ struct tipc_uaddr ua;
+
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
+ sub->s.seq.type, sub->s.seq.lower, sub->s.seq.upper);
+ spin_lock_bh(&tn->nametbl_lock);
+ sc = tipc_service_find(sub->net, &ua);
+ if (!sc)
+ goto exit;
+
+ spin_lock_bh(&sc->lock);
+ list_del_init(&sub->service_list);
+ tipc_sub_put(sub);
+
+ /* Delete service item if no more publications and subscriptions */
+ if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) {
+ hlist_del_init_rcu(&sc->service_list);
+ kfree_rcu(sc, rcu);
+ }
+ spin_unlock_bh(&sc->lock);
+exit:
+ spin_unlock_bh(&tn->nametbl_lock);
+}
+
+int tipc_nametbl_init(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct name_table *nt;
+ int i;
+
+ nt = kzalloc(sizeof(*nt), GFP_KERNEL);
+ if (!nt)
+ return -ENOMEM;
+
+ for (i = 0; i < TIPC_NAMETBL_SIZE; i++)
+ INIT_HLIST_HEAD(&nt->services[i]);
+
+ INIT_LIST_HEAD(&nt->node_scope);
+ INIT_LIST_HEAD(&nt->cluster_scope);
+ rwlock_init(&nt->cluster_scope_lock);
+ tn->nametbl = nt;
+ spin_lock_init(&tn->nametbl_lock);
+ return 0;
+}
+
+/**
+ * tipc_service_delete - purge all publications for a service and delete it
+ * @net: the associated network namespace
+ * @sc: tipc_service to delete
+ */
+static void tipc_service_delete(struct net *net, struct tipc_service *sc)
+{
+ struct service_range *sr, *tmpr;
+ struct publication *p, *tmp;
+
+ spin_lock_bh(&sc->lock);
+ rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) {
+ list_for_each_entry_safe(p, tmp, &sr->all_publ, all_publ) {
+ tipc_service_remove_publ(sr, &p->sk, p->key);
+ kfree_rcu(p, rcu);
+ }
+ rb_erase_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks);
+ kfree(sr);
+ }
+ hlist_del_init_rcu(&sc->service_list);
+ spin_unlock_bh(&sc->lock);
+ kfree_rcu(sc, rcu);
+}
+
+void tipc_nametbl_stop(struct net *net)
+{
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_net *tn = tipc_net(net);
+ struct hlist_head *service_head;
+ struct tipc_service *service;
+ u32 i;
+
+ /* Verify name table is empty and purge any lingering
+ * publications, then release the name table
+ */
+ spin_lock_bh(&tn->nametbl_lock);
+ for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
+ if (hlist_empty(&nt->services[i]))
+ continue;
+ service_head = &nt->services[i];
+ hlist_for_each_entry_rcu(service, service_head, service_list) {
+ tipc_service_delete(net, service);
+ }
+ }
+ spin_unlock_bh(&tn->nametbl_lock);
+
+ synchronize_net();
+ kfree(nt);
+}
+
+static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
+ struct tipc_service *service,
+ struct service_range *sr,
+ u32 *last_key)
+{
+ struct publication *p;
+ struct nlattr *attrs;
+ struct nlattr *b;
+ void *hdr;
+
+ if (*last_key) {
+ list_for_each_entry(p, &sr->all_publ, all_publ)
+ if (p->key == *last_key)
+ break;
+ if (list_entry_is_head(p, &sr->all_publ, all_publ))
+ return -EPIPE;
+ } else {
+ p = list_first_entry(&sr->all_publ,
+ struct publication,
+ all_publ);
+ }
+
+ list_for_each_entry_from(p, &sr->all_publ, all_publ) {
+ *last_key = p->key;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq,
+ &tipc_genl_family, NLM_F_MULTI,
+ TIPC_NL_NAME_TABLE_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NAME_TABLE);
+ if (!attrs)
+ goto msg_full;
+
+ b = nla_nest_start_noflag(msg->skb, TIPC_NLA_NAME_TABLE_PUBL);
+ if (!b)
+ goto attr_msg_full;
+
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, service->type))
+ goto publ_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sr->lower))
+ goto publ_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sr->upper))
+ goto publ_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope))
+ goto publ_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->sk.node))
+ goto publ_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->sk.ref))
+ goto publ_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key))
+ goto publ_msg_full;
+
+ nla_nest_end(msg->skb, b);
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+ }
+ *last_key = 0;
+
+ return 0;
+
+publ_msg_full:
+ nla_nest_cancel(msg->skb, b);
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+static int __tipc_nl_service_range_list(struct tipc_nl_msg *msg,
+ struct tipc_service *sc,
+ u32 *last_lower, u32 *last_key)
+{
+ struct service_range *sr;
+ struct rb_node *n;
+ int err;
+
+ for (n = rb_first(&sc->ranges); n; n = rb_next(n)) {
+ sr = container_of(n, struct service_range, tree_node);
+ if (sr->lower < *last_lower)
+ continue;
+ err = __tipc_nl_add_nametable_publ(msg, sc, sr, last_key);
+ if (err) {
+ *last_lower = sr->lower;
+ return err;
+ }
+ }
+ *last_lower = 0;
+ return 0;
+}
+
+static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg,
+ u32 *last_type, u32 *last_lower, u32 *last_key)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_service *service = NULL;
+ struct hlist_head *head;
+ struct tipc_uaddr ua;
+ int err;
+ int i;
+
+ if (*last_type)
+ i = hash(*last_type);
+ else
+ i = 0;
+
+ for (; i < TIPC_NAMETBL_SIZE; i++) {
+ head = &tn->nametbl->services[i];
+
+ if (*last_type ||
+ (!i && *last_key && (*last_lower == *last_key))) {
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
+ *last_type, *last_lower, *last_lower);
+ service = tipc_service_find(net, &ua);
+ if (!service)
+ return -EPIPE;
+ } else {
+ hlist_for_each_entry_rcu(service, head, service_list)
+ break;
+ if (!service)
+ continue;
+ }
+
+ hlist_for_each_entry_from_rcu(service, service_list) {
+ spin_lock_bh(&service->lock);
+ err = __tipc_nl_service_range_list(msg, service,
+ last_lower,
+ last_key);
+
+ if (err) {
+ *last_type = service->type;
+ spin_unlock_bh(&service->lock);
+ return err;
+ }
+ spin_unlock_bh(&service->lock);
+ }
+ *last_type = 0;
+ }
+ return 0;
+}
+
+int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ u32 last_type = cb->args[0];
+ u32 last_lower = cb->args[1];
+ u32 last_key = cb->args[2];
+ int done = cb->args[3];
+ struct tipc_nl_msg msg;
+ int err;
+
+ if (done)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ rcu_read_lock();
+ err = tipc_nl_service_list(net, &msg, &last_type,
+ &last_lower, &last_key);
+ if (!err) {
+ done = 1;
+ } else if (err != -EMSGSIZE) {
+ /* We never set seq or call nl_dump_check_consistent() this
+ * means that setting prev_seq here will cause the consistence
+ * check to fail in the netlink callback handler. Resulting in
+ * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if
+ * we got an error.
+ */
+ cb->prev_seq = 1;
+ }
+ rcu_read_unlock();
+
+ cb->args[0] = last_type;
+ cb->args[1] = last_lower;
+ cb->args[2] = last_key;
+ cb->args[3] = done;
+
+ return skb->len;
+}
+
+struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port)
+{
+ struct tipc_dest *dst;
+
+ list_for_each_entry(dst, l, list) {
+ if (dst->node == node && dst->port == port)
+ return dst;
+ }
+ return NULL;
+}
+
+bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
+{
+ struct tipc_dest *dst;
+
+ if (tipc_dest_find(l, node, port))
+ return false;
+
+ dst = kmalloc(sizeof(*dst), GFP_ATOMIC);
+ if (unlikely(!dst))
+ return false;
+ dst->node = node;
+ dst->port = port;
+ list_add(&dst->list, l);
+ return true;
+}
+
+bool tipc_dest_pop(struct list_head *l, u32 *node, u32 *port)
+{
+ struct tipc_dest *dst;
+
+ if (list_empty(l))
+ return false;
+ dst = list_first_entry(l, typeof(*dst), list);
+ if (port)
+ *port = dst->port;
+ if (node)
+ *node = dst->node;
+ list_del(&dst->list);
+ kfree(dst);
+ return true;
+}
+
+bool tipc_dest_del(struct list_head *l, u32 node, u32 port)
+{
+ struct tipc_dest *dst;
+
+ dst = tipc_dest_find(l, node, port);
+ if (!dst)
+ return false;
+ list_del(&dst->list);
+ kfree(dst);
+ return true;
+}
+
+void tipc_dest_list_purge(struct list_head *l)
+{
+ struct tipc_dest *dst, *tmp;
+
+ list_for_each_entry_safe(dst, tmp, l, list) {
+ list_del(&dst->list);
+ kfree(dst);
+ }
+}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
new file mode 100644
index 0000000000..3bcd9ef8ce
--- /dev/null
+++ b/net/tipc/name_table.h
@@ -0,0 +1,155 @@
+/*
+ * net/tipc/name_table.h: Include file for TIPC name table code
+ *
+ * Copyright (c) 2000-2006, 2014-2018, Ericsson AB
+ * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NAME_TABLE_H
+#define _TIPC_NAME_TABLE_H
+
+struct tipc_subscription;
+struct tipc_plist;
+struct tipc_nlist;
+struct tipc_group;
+struct tipc_uaddr;
+
+/*
+ * TIPC name types reserved for internal TIPC use (both current and planned)
+ */
+#define TIPC_ZM_SRV 3 /* zone master service name type */
+#define TIPC_PUBL_SCOPE_NUM (TIPC_NODE_SCOPE + 1)
+#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
+
+#define TIPC_ANY_SCOPE 10 /* Both node and cluster scope will match */
+
+/**
+ * struct publication - info about a published service address or range
+ * @sr: service range represented by this publication
+ * @sk: address of socket bound to this publication
+ * @scope: scope of publication, TIPC_NODE_SCOPE or TIPC_CLUSTER_SCOPE
+ * @key: publication key, unique across the cluster
+ * @id: publication id
+ * @binding_node: all publications from the same node which bound this one
+ * - Remote publications: in node->publ_list;
+ * Used by node/name distr to withdraw publications when node is lost
+ * - Local/node scope publications: in name_table->node_scope list
+ * - Local/cluster scope publications: in name_table->cluster_scope list
+ * @binding_sock: all publications from the same socket which bound this one
+ * Used by socket to withdraw publications when socket is unbound/released
+ * @local_publ: list of identical publications made from this node
+ * Used by closest_first and multicast receive lookup algorithms
+ * @all_publ: all publications identical to this one, whatever node and scope
+ * Used by round-robin lookup algorithm
+ * @list: to form a list of publications in temporal order
+ * @rcu: RCU callback head used for deferred freeing
+ */
+struct publication {
+ struct tipc_service_range sr;
+ struct tipc_socket_addr sk;
+ u16 scope;
+ u32 key;
+ u32 id;
+ struct list_head binding_node;
+ struct list_head binding_sock;
+ struct list_head local_publ;
+ struct list_head all_publ;
+ struct list_head list;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct name_table - table containing all existing port name publications
+ * @services: name sequence hash lists
+ * @node_scope: all local publications with node scope
+ * - used by name_distr during re-init of name table
+ * @cluster_scope: all local publications with cluster scope
+ * - used by name_distr to send bulk updates to new nodes
+ * - used by name_distr during re-init of name table
+ * @cluster_scope_lock: lock for accessing @cluster_scope
+ * @local_publ_count: number of publications issued by this node
+ * @rc_dests: destination node counter
+ * @snd_nxt: next sequence number to be used
+ */
+struct name_table {
+ struct hlist_head services[TIPC_NAMETBL_SIZE];
+ struct list_head node_scope;
+ struct list_head cluster_scope;
+ rwlock_t cluster_scope_lock;
+ u32 local_publ_count;
+ u32 rc_dests;
+ u32 snd_nxt;
+};
+
+int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
+bool tipc_nametbl_lookup_anycast(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk);
+void tipc_nametbl_lookup_mcast_sockets(struct net *net, struct tipc_uaddr *ua,
+ struct list_head *dports);
+void tipc_nametbl_lookup_mcast_nodes(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_nlist *nodes);
+bool tipc_nametbl_lookup_group(struct net *net, struct tipc_uaddr *ua,
+ struct list_head *dsts, int *dstcnt,
+ u32 exclude, bool mcast);
+void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp,
+ struct tipc_uaddr *ua);
+struct publication *tipc_nametbl_publish(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk, u32 key);
+void tipc_nametbl_withdraw(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk, u32 key);
+struct publication *tipc_nametbl_insert_publ(struct net *net,
+ struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk,
+ u32 key);
+struct publication *tipc_nametbl_remove_publ(struct net *net,
+ struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk,
+ u32 key);
+bool tipc_nametbl_subscribe(struct tipc_subscription *s);
+void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
+int tipc_nametbl_init(struct net *net);
+void tipc_nametbl_stop(struct net *net);
+
+struct tipc_dest {
+ struct list_head list;
+ u32 port;
+ u32 node;
+};
+
+struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port);
+bool tipc_dest_push(struct list_head *l, u32 node, u32 port);
+bool tipc_dest_pop(struct list_head *l, u32 *node, u32 *port);
+bool tipc_dest_del(struct list_head *l, u32 node, u32 port);
+void tipc_dest_list_purge(struct list_head *l);
+
+#endif
diff --git a/net/tipc/net.c b/net/tipc/net.c
new file mode 100644
index 0000000000..0e95572e56
--- /dev/null
+++ b/net/tipc/net.c
@@ -0,0 +1,345 @@
+/*
+ * net/tipc/net.c: TIPC network routing code
+ *
+ * Copyright (c) 1995-2006, 2014, Ericsson AB
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "net.h"
+#include "name_distr.h"
+#include "subscr.h"
+#include "socket.h"
+#include "node.h"
+#include "bcast.h"
+#include "link.h"
+#include "netlink.h"
+#include "monitor.h"
+
+/*
+ * The TIPC locking policy is designed to ensure a very fine locking
+ * granularity, permitting complete parallel access to individual
+ * port and node/link instances. The code consists of four major
+ * locking domains, each protected with their own disjunct set of locks.
+ *
+ * 1: The bearer level.
+ * RTNL lock is used to serialize the process of configuring bearer
+ * on update side, and RCU lock is applied on read side to make
+ * bearer instance valid on both paths of message transmission and
+ * reception.
+ *
+ * 2: The node and link level.
+ * All node instances are saved into two tipc_node_list and node_htable
+ * lists. The two lists are protected by node_list_lock on write side,
+ * and they are guarded with RCU lock on read side. Especially node
+ * instance is destroyed only when TIPC module is removed, and we can
+ * confirm that there has no any user who is accessing the node at the
+ * moment. Therefore, Except for iterating the two lists within RCU
+ * protection, it's no needed to hold RCU that we access node instance
+ * in other places.
+ *
+ * In addition, all members in node structure including link instances
+ * are protected by node spin lock.
+ *
+ * 3: The transport level of the protocol.
+ * This consists of the structures port, (and its user level
+ * representations, such as user_port and tipc_sock), reference and
+ * tipc_user (port.c, reg.c, socket.c).
+ *
+ * This layer has four different locks:
+ * - The tipc_port spin_lock. This is protecting each port instance
+ * from parallel data access and removal. Since we can not place
+ * this lock in the port itself, it has been placed in the
+ * corresponding reference table entry, which has the same life
+ * cycle as the module. This entry is difficult to access from
+ * outside the TIPC core, however, so a pointer to the lock has
+ * been added in the port instance, -to be used for unlocking
+ * only.
+ * - A read/write lock to protect the reference table itself (teg.c).
+ * (Nobody is using read-only access to this, so it can just as
+ * well be changed to a spin_lock)
+ * - A spin lock to protect the registry of kernel/driver users (reg.c)
+ * - A global spin_lock (tipc_port_lock), which only task is to ensure
+ * consistency where more than one port is involved in an operation,
+ * i.e., when a port is part of a linked list of ports.
+ * There are two such lists; 'port_list', which is used for management,
+ * and 'wait_list', which is used to queue ports during congestion.
+ *
+ * 4: The name table (name_table.c, name_distr.c, subscription.c)
+ * - There is one big read/write-lock (tipc_nametbl_lock) protecting the
+ * overall name table structure. Nothing must be added/removed to
+ * this structure without holding write access to it.
+ * - There is one local spin_lock per sub_sequence, which can be seen
+ * as a sub-domain to the tipc_nametbl_lock domain. It is used only
+ * for translation operations, and is needed because a translation
+ * steps the root of the 'publication' linked list between each lookup.
+ * This is always used within the scope of a tipc_nametbl_lock(read).
+ * - A local spin_lock protecting the queue of subscriber events.
+*/
+
+static void tipc_net_finalize(struct net *net, u32 addr);
+
+int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
+{
+ if (tipc_own_id(net)) {
+ pr_info("Cannot configure node identity twice\n");
+ return -1;
+ }
+ pr_info("Started in network mode\n");
+
+ if (node_id)
+ tipc_set_node_id(net, node_id);
+ if (addr)
+ tipc_net_finalize(net, addr);
+ return 0;
+}
+
+static void tipc_net_finalize(struct net *net, u32 addr)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_socket_addr sk = {0, addr};
+ struct tipc_uaddr ua;
+
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_CLUSTER_SCOPE,
+ TIPC_NODE_STATE, addr, addr);
+
+ if (cmpxchg(&tn->node_addr, 0, addr))
+ return;
+ tipc_set_node_addr(net, addr);
+ tipc_named_reinit(net);
+ tipc_sk_reinit(net);
+ tipc_mon_reinit_self(net);
+ tipc_nametbl_publish(net, &ua, &sk, addr);
+}
+
+void tipc_net_finalize_work(struct work_struct *work)
+{
+ struct tipc_net *tn = container_of(work, struct tipc_net, work);
+
+ tipc_net_finalize(tipc_link_net(tn->bcl), tn->trial_addr);
+}
+
+void tipc_net_stop(struct net *net)
+{
+ if (!tipc_own_id(net))
+ return;
+
+ rtnl_lock();
+ tipc_bearer_stop(net);
+ tipc_node_stop(net);
+ rtnl_unlock();
+
+ pr_info("Left network mode\n");
+}
+
+static int __tipc_nl_add_net(struct net *net, struct tipc_nl_msg *msg)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ u64 *w0 = (u64 *)&tn->node_id[0];
+ u64 *w1 = (u64 *)&tn->node_id[8];
+ struct nlattr *attrs;
+ void *hdr;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ NLM_F_MULTI, TIPC_NL_NET_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NET);
+ if (!attrs)
+ goto msg_full;
+
+ if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id))
+ goto attr_msg_full;
+ if (nla_put_u64_64bit(msg->skb, TIPC_NLA_NET_NODEID, *w0, 0))
+ goto attr_msg_full;
+ if (nla_put_u64_64bit(msg->skb, TIPC_NLA_NET_NODEID_W1, *w1, 0))
+ goto attr_msg_full;
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ int err;
+ int done = cb->args[0];
+ struct tipc_nl_msg msg;
+
+ if (done)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ err = __tipc_nl_add_net(net, &msg);
+ if (err)
+ goto out;
+
+ done = 1;
+out:
+ cb->args[0] = done;
+
+ return skb->len;
+}
+
+int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = tipc_net(net);
+ int err;
+
+ if (!info->attrs[TIPC_NLA_NET])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
+ info->attrs[TIPC_NLA_NET],
+ tipc_nl_net_policy, info->extack);
+
+ if (err)
+ return err;
+
+ /* Can't change net id once TIPC has joined a network */
+ if (tipc_own_addr(net))
+ return -EPERM;
+
+ if (attrs[TIPC_NLA_NET_ID]) {
+ u32 val;
+
+ val = nla_get_u32(attrs[TIPC_NLA_NET_ID]);
+ if (val < 1 || val > 9999)
+ return -EINVAL;
+
+ tn->net_id = val;
+ }
+
+ if (attrs[TIPC_NLA_NET_ADDR]) {
+ u32 addr;
+
+ addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
+ if (!addr)
+ return -EINVAL;
+ tn->legacy_addr_format = true;
+ tipc_net_init(net, NULL, addr);
+ }
+
+ if (attrs[TIPC_NLA_NET_NODEID]) {
+ u8 node_id[NODE_ID_LEN];
+ u64 *w0 = (u64 *)&node_id[0];
+ u64 *w1 = (u64 *)&node_id[8];
+
+ if (!attrs[TIPC_NLA_NET_NODEID_W1])
+ return -EINVAL;
+ *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
+ *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
+ tipc_net_init(net, node_id, 0);
+ }
+ return 0;
+}
+
+int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_net_set(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+
+static int __tipc_nl_addr_legacy_get(struct net *net, struct tipc_nl_msg *msg)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct nlattr *attrs;
+ void *hdr;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ 0, TIPC_NL_ADDR_LEGACY_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start(msg->skb, TIPC_NLA_NET);
+ if (!attrs)
+ goto msg_full;
+
+ if (tn->legacy_addr_format)
+ if (nla_put_flag(msg->skb, TIPC_NLA_NET_ADDR_LEGACY))
+ goto attr_msg_full;
+
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+int tipc_nl_net_addr_legacy_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tipc_nl_msg msg;
+ struct sk_buff *rep;
+ int err;
+
+ rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ msg.skb = rep;
+ msg.portid = info->snd_portid;
+ msg.seq = info->snd_seq;
+
+ err = __tipc_nl_addr_legacy_get(net, &msg);
+ if (err) {
+ nlmsg_free(msg.skb);
+ return err;
+ }
+
+ return genlmsg_reply(msg.skb, info);
+}
diff --git a/net/tipc/net.h b/net/tipc/net.h
new file mode 100644
index 0000000000..1cb1e43cf3
--- /dev/null
+++ b/net/tipc/net.h
@@ -0,0 +1,52 @@
+/*
+ * net/tipc/net.h: Include file for TIPC network routing code
+ *
+ * Copyright (c) 1995-2006, 2014, Ericsson AB
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NET_H
+#define _TIPC_NET_H
+
+#include <net/genetlink.h>
+
+extern const struct nla_policy tipc_nl_net_policy[];
+
+int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
+void tipc_net_finalize_work(struct work_struct *work);
+void tipc_net_stop(struct net *net);
+int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_net_addr_legacy_get(struct sk_buff *skb, struct genl_info *info);
+
+#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
new file mode 100644
index 0000000000..1a9a5bdacc
--- /dev/null
+++ b/net/tipc/netlink.c
@@ -0,0 +1,315 @@
+/*
+ * net/tipc/netlink.c: TIPC configuration handling
+ *
+ * Copyright (c) 2005-2006, 2014, Ericsson AB
+ * Copyright (c) 2005-2007, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "socket.h"
+#include "name_table.h"
+#include "bearer.h"
+#include "link.h"
+#include "node.h"
+#include "net.h"
+#include "udp_media.h"
+#include <net/genetlink.h>
+
+static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = {
+ [TIPC_NLA_UNSPEC] = { .type = NLA_UNSPEC, },
+ [TIPC_NLA_BEARER] = { .type = NLA_NESTED, },
+ [TIPC_NLA_SOCK] = { .type = NLA_NESTED, },
+ [TIPC_NLA_PUBL] = { .type = NLA_NESTED, },
+ [TIPC_NLA_LINK] = { .type = NLA_NESTED, },
+ [TIPC_NLA_MEDIA] = { .type = NLA_NESTED, },
+ [TIPC_NLA_NODE] = { .type = NLA_NESTED, },
+ [TIPC_NLA_NET] = { .type = NLA_NESTED, },
+ [TIPC_NLA_NAME_TABLE] = { .type = NLA_NESTED, },
+ [TIPC_NLA_MON] = { .type = NLA_NESTED, },
+};
+
+const struct nla_policy
+tipc_nl_name_table_policy[TIPC_NLA_NAME_TABLE_MAX + 1] = {
+ [TIPC_NLA_NAME_TABLE_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_NAME_TABLE_PUBL] = { .type = NLA_NESTED }
+};
+
+const struct nla_policy tipc_nl_monitor_policy[TIPC_NLA_MON_MAX + 1] = {
+ [TIPC_NLA_MON_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_MON_REF] = { .type = NLA_U32 },
+ [TIPC_NLA_MON_ACTIVATION_THRESHOLD] = { .type = NLA_U32 },
+};
+
+const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
+ [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_SOCK_ADDR] = { .type = NLA_U32 },
+ [TIPC_NLA_SOCK_REF] = { .type = NLA_U32 },
+ [TIPC_NLA_SOCK_CON] = { .type = NLA_NESTED },
+ [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG }
+};
+
+const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
+ [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_NET_ID] = { .type = NLA_U32 },
+ [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 },
+ [TIPC_NLA_NET_NODEID] = { .type = NLA_U64 },
+ [TIPC_NLA_NET_NODEID_W1] = { .type = NLA_U64 },
+ [TIPC_NLA_NET_ADDR_LEGACY] = { .type = NLA_FLAG }
+};
+
+const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
+ [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_LINK_NAME] = { .type = NLA_NUL_STRING,
+ .len = TIPC_MAX_LINK_NAME },
+ [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
+ [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
+ [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
+ [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
+ [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
+ [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
+ [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
+ [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
+};
+
+const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
+ [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 },
+ [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG },
+ [TIPC_NLA_NODE_ID] = { .type = NLA_BINARY,
+ .len = TIPC_NODEID_LEN},
+ [TIPC_NLA_NODE_KEY] = { .type = NLA_BINARY,
+ .len = TIPC_AEAD_KEY_SIZE_MAX},
+ [TIPC_NLA_NODE_KEY_MASTER] = { .type = NLA_FLAG },
+ [TIPC_NLA_NODE_REKEYING] = { .type = NLA_U32 },
+};
+
+/* Properties valid for media, bearer and link */
+const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
+ [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
+ [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
+ [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 },
+ [TIPC_NLA_PROP_MTU] = { .type = NLA_U32 },
+ [TIPC_NLA_PROP_BROADCAST] = { .type = NLA_U32 },
+ [TIPC_NLA_PROP_BROADCAST_RATIO] = { .type = NLA_U32 }
+};
+
+const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
+ [TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_BEARER_NAME] = { .type = NLA_NUL_STRING,
+ .len = TIPC_MAX_BEARER_NAME },
+ [TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED },
+ [TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
+};
+
+const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
+ [TIPC_NLA_MEDIA_UNSPEC] = { .type = NLA_UNSPEC },
+ [TIPC_NLA_MEDIA_NAME] = { .type = NLA_STRING },
+ [TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED }
+};
+
+const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
+ [TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC},
+ [TIPC_NLA_UDP_LOCAL] = {.type = NLA_BINARY,
+ .len = sizeof(struct sockaddr_storage)},
+ [TIPC_NLA_UDP_REMOTE] = {.type = NLA_BINARY,
+ .len = sizeof(struct sockaddr_storage)},
+};
+
+/* Users of the legacy API (tipc-config) can't handle that we add operations,
+ * so we have a separate genl handling for the new API.
+ */
+static const struct genl_ops tipc_genl_v2_ops[] = {
+ {
+ .cmd = TIPC_NL_BEARER_DISABLE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_bearer_disable,
+ },
+ {
+ .cmd = TIPC_NL_BEARER_ENABLE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_bearer_enable,
+ },
+ {
+ .cmd = TIPC_NL_BEARER_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_bearer_get,
+ .dumpit = tipc_nl_bearer_dump,
+ },
+ {
+ .cmd = TIPC_NL_BEARER_ADD,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_bearer_add,
+ },
+ {
+ .cmd = TIPC_NL_BEARER_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_bearer_set,
+ },
+ {
+ .cmd = TIPC_NL_SOCK_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .start = tipc_dump_start,
+ .dumpit = tipc_nl_sk_dump,
+ .done = tipc_dump_done,
+ },
+ {
+ .cmd = TIPC_NL_PUBL_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
+ .dumpit = tipc_nl_publ_dump,
+ },
+ {
+ .cmd = TIPC_NL_LINK_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = tipc_nl_node_get_link,
+ .dumpit = tipc_nl_node_dump_link,
+ },
+ {
+ .cmd = TIPC_NL_LINK_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_node_set_link,
+ },
+ {
+ .cmd = TIPC_NL_LINK_RESET_STATS,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_node_reset_link_stats,
+ },
+ {
+ .cmd = TIPC_NL_MEDIA_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_media_get,
+ .dumpit = tipc_nl_media_dump,
+ },
+ {
+ .cmd = TIPC_NL_MEDIA_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_media_set,
+ },
+ {
+ .cmd = TIPC_NL_NODE_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .dumpit = tipc_nl_node_dump,
+ },
+ {
+ .cmd = TIPC_NL_NET_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .dumpit = tipc_nl_net_dump,
+ },
+ {
+ .cmd = TIPC_NL_NET_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_net_set,
+ },
+ {
+ .cmd = TIPC_NL_NAME_TABLE_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .dumpit = tipc_nl_name_table_dump,
+ },
+ {
+ .cmd = TIPC_NL_MON_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_node_set_monitor,
+ },
+ {
+ .cmd = TIPC_NL_MON_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_node_get_monitor,
+ .dumpit = tipc_nl_node_dump_monitor,
+ },
+ {
+ .cmd = TIPC_NL_MON_PEER_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
+ .dumpit = tipc_nl_node_dump_monitor_peer,
+ },
+ {
+ .cmd = TIPC_NL_PEER_REMOVE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_peer_rm,
+ },
+#ifdef CONFIG_TIPC_MEDIA_UDP
+ {
+ .cmd = TIPC_NL_UDP_GET_REMOTEIP,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
+ .dumpit = tipc_udp_nl_dump_remoteip,
+ },
+#endif
+#ifdef CONFIG_TIPC_CRYPTO
+ {
+ .cmd = TIPC_NL_KEY_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_node_set_key,
+ },
+ {
+ .cmd = TIPC_NL_KEY_FLUSH,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_node_flush_key,
+ },
+#endif
+ {
+ .cmd = TIPC_NL_ADDR_LEGACY_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_net_addr_legacy_get,
+ },
+};
+
+struct genl_family tipc_genl_family __ro_after_init = {
+ .name = TIPC_GENL_V2_NAME,
+ .version = TIPC_GENL_V2_VERSION,
+ .hdrsize = 0,
+ .maxattr = TIPC_NLA_MAX,
+ .policy = tipc_nl_policy,
+ .netnsok = true,
+ .module = THIS_MODULE,
+ .ops = tipc_genl_v2_ops,
+ .n_ops = ARRAY_SIZE(tipc_genl_v2_ops),
+ .resv_start_op = TIPC_NL_ADDR_LEGACY_GET + 1,
+};
+
+int __init tipc_netlink_start(void)
+{
+ int res;
+
+ res = genl_register_family(&tipc_genl_family);
+ if (res) {
+ pr_err("Failed to register netlink interface\n");
+ return res;
+ }
+ return 0;
+}
+
+void tipc_netlink_stop(void)
+{
+ genl_unregister_family(&tipc_genl_family);
+}
diff --git a/net/tipc/netlink.h b/net/tipc/netlink.h
new file mode 100644
index 0000000000..7cf777723e
--- /dev/null
+++ b/net/tipc/netlink.h
@@ -0,0 +1,64 @@
+/*
+ * net/tipc/netlink.h: Include file for TIPC netlink code
+ *
+ * Copyright (c) 2014, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NETLINK_H
+#define _TIPC_NETLINK_H
+#include <net/netlink.h>
+
+extern struct genl_family tipc_genl_family;
+
+struct tipc_nl_msg {
+ struct sk_buff *skb;
+ u32 portid;
+ u32 seq;
+};
+
+extern const struct nla_policy tipc_nl_name_table_policy[];
+extern const struct nla_policy tipc_nl_sock_policy[];
+extern const struct nla_policy tipc_nl_net_policy[];
+extern const struct nla_policy tipc_nl_link_policy[];
+extern const struct nla_policy tipc_nl_node_policy[];
+extern const struct nla_policy tipc_nl_prop_policy[];
+extern const struct nla_policy tipc_nl_bearer_policy[];
+extern const struct nla_policy tipc_nl_media_policy[];
+extern const struct nla_policy tipc_nl_udp_policy[];
+extern const struct nla_policy tipc_nl_monitor_policy[];
+
+int tipc_netlink_start(void);
+int tipc_netlink_compat_start(void);
+void tipc_netlink_stop(void);
+void tipc_netlink_compat_stop(void);
+
+#endif
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
new file mode 100644
index 0000000000..c763008a8a
--- /dev/null
+++ b/net/tipc/netlink_compat.c
@@ -0,0 +1,1376 @@
+/*
+ * Copyright (c) 2014, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "bearer.h"
+#include "link.h"
+#include "name_table.h"
+#include "socket.h"
+#include "node.h"
+#include "net.h"
+#include <net/genetlink.h>
+#include <linux/string_helpers.h>
+#include <linux/tipc_config.h>
+
+/* The legacy API had an artificial message length limit called
+ * ULTRA_STRING_MAX_LEN.
+ */
+#define ULTRA_STRING_MAX_LEN 32768
+
+#define TIPC_SKB_MAX TLV_SPACE(ULTRA_STRING_MAX_LEN)
+
+#define REPLY_TRUNCATED "<truncated>\n"
+
+struct tipc_nl_compat_msg {
+ u16 cmd;
+ int rep_type;
+ int rep_size;
+ int req_type;
+ int req_size;
+ struct net *net;
+ struct sk_buff *rep;
+ struct tlv_desc *req;
+ struct sock *dst_sk;
+};
+
+struct tipc_nl_compat_cmd_dump {
+ int (*header)(struct tipc_nl_compat_msg *);
+ int (*dumpit)(struct sk_buff *, struct netlink_callback *);
+ int (*format)(struct tipc_nl_compat_msg *msg, struct nlattr **attrs);
+};
+
+struct tipc_nl_compat_cmd_doit {
+ int (*doit)(struct sk_buff *skb, struct genl_info *info);
+ int (*transcode)(struct tipc_nl_compat_cmd_doit *cmd,
+ struct sk_buff *skb, struct tipc_nl_compat_msg *msg);
+};
+
+static int tipc_skb_tailroom(struct sk_buff *skb)
+{
+ int tailroom;
+ int limit;
+
+ tailroom = skb_tailroom(skb);
+ limit = TIPC_SKB_MAX - skb->len;
+
+ if (tailroom < limit)
+ return tailroom;
+
+ return limit;
+}
+
+static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
+{
+ return TLV_GET_LEN(tlv) - TLV_SPACE(0);
+}
+
+static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
+{
+ struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
+
+ if (tipc_skb_tailroom(skb) < TLV_SPACE(len))
+ return -EMSGSIZE;
+
+ skb_put(skb, TLV_SPACE(len));
+ memset(tlv, 0, TLV_SPACE(len));
+ tlv->tlv_type = htons(type);
+ tlv->tlv_len = htons(TLV_LENGTH(len));
+ if (len && data)
+ memcpy(TLV_DATA(tlv), data, len);
+
+ return 0;
+}
+
+static void tipc_tlv_init(struct sk_buff *skb, u16 type)
+{
+ struct tlv_desc *tlv = (struct tlv_desc *)skb->data;
+
+ TLV_SET_LEN(tlv, 0);
+ TLV_SET_TYPE(tlv, type);
+ skb_put(skb, sizeof(struct tlv_desc));
+}
+
+static __printf(2, 3) int tipc_tlv_sprintf(struct sk_buff *skb,
+ const char *fmt, ...)
+{
+ int n;
+ u16 len;
+ u32 rem;
+ char *buf;
+ struct tlv_desc *tlv;
+ va_list args;
+
+ rem = tipc_skb_tailroom(skb);
+
+ tlv = (struct tlv_desc *)skb->data;
+ len = TLV_GET_LEN(tlv);
+ buf = TLV_DATA(tlv) + len;
+
+ va_start(args, fmt);
+ n = vscnprintf(buf, rem, fmt, args);
+ va_end(args);
+
+ TLV_SET_LEN(tlv, n + len);
+ skb_put(skb, n);
+
+ return n;
+}
+
+static struct sk_buff *tipc_tlv_alloc(int size)
+{
+ int hdr_len;
+ struct sk_buff *buf;
+
+ size = TLV_SPACE(size);
+ hdr_len = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
+
+ buf = alloc_skb(hdr_len + size, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ skb_reserve(buf, hdr_len);
+
+ return buf;
+}
+
+static struct sk_buff *tipc_get_err_tlv(char *str)
+{
+ int str_len = strlen(str) + 1;
+ struct sk_buff *buf;
+
+ buf = tipc_tlv_alloc(TLV_SPACE(str_len));
+ if (buf)
+ tipc_add_tlv(buf, TIPC_TLV_ERROR_STRING, str, str_len);
+
+ return buf;
+}
+
+static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
+ struct tipc_nl_compat_msg *msg,
+ struct sk_buff *arg)
+{
+ struct genl_dumpit_info info;
+ int len = 0;
+ int err;
+ struct sk_buff *buf;
+ struct nlmsghdr *nlmsg;
+ struct netlink_callback cb;
+ struct nlattr **attrbuf;
+
+ memset(&cb, 0, sizeof(cb));
+ cb.nlh = (struct nlmsghdr *)arg->data;
+ cb.skb = arg;
+ cb.data = &info;
+
+ buf = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->sk = msg->dst_sk;
+ if (__tipc_dump_start(&cb, msg->net)) {
+ kfree_skb(buf);
+ return -ENOMEM;
+ }
+
+ attrbuf = kcalloc(tipc_genl_family.maxattr + 1,
+ sizeof(struct nlattr *), GFP_KERNEL);
+ if (!attrbuf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ info.info.attrs = attrbuf;
+
+ if (nlmsg_len(cb.nlh) > 0) {
+ err = nlmsg_parse_deprecated(cb.nlh, GENL_HDRLEN, attrbuf,
+ tipc_genl_family.maxattr,
+ tipc_genl_family.policy, NULL);
+ if (err)
+ goto err_out;
+ }
+ do {
+ int rem;
+
+ len = (*cmd->dumpit)(buf, &cb);
+
+ nlmsg_for_each_msg(nlmsg, nlmsg_hdr(buf), len, rem) {
+ err = nlmsg_parse_deprecated(nlmsg, GENL_HDRLEN,
+ attrbuf,
+ tipc_genl_family.maxattr,
+ tipc_genl_family.policy,
+ NULL);
+ if (err)
+ goto err_out;
+
+ err = (*cmd->format)(msg, attrbuf);
+ if (err)
+ goto err_out;
+
+ if (tipc_skb_tailroom(msg->rep) <= 1) {
+ err = -EMSGSIZE;
+ goto err_out;
+ }
+ }
+
+ skb_reset_tail_pointer(buf);
+ buf->len = 0;
+
+ } while (len);
+
+ err = 0;
+
+err_out:
+ kfree(attrbuf);
+ tipc_dump_done(&cb);
+ kfree_skb(buf);
+
+ if (err == -EMSGSIZE) {
+ /* The legacy API only considered messages filling
+ * "ULTRA_STRING_MAX_LEN" to be truncated.
+ */
+ if ((TIPC_SKB_MAX - msg->rep->len) <= 1) {
+ char *tail = skb_tail_pointer(msg->rep);
+
+ if (*tail != '\0')
+ sprintf(tail - sizeof(REPLY_TRUNCATED) - 1,
+ REPLY_TRUNCATED);
+ }
+
+ return 0;
+ }
+
+ return err;
+}
+
+static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
+ struct tipc_nl_compat_msg *msg)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *arg;
+ int err;
+
+ if (msg->req_type && (!msg->req_size ||
+ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
+ return -EINVAL;
+
+ msg->rep = tipc_tlv_alloc(msg->rep_size);
+ if (!msg->rep)
+ return -ENOMEM;
+
+ if (msg->rep_type)
+ tipc_tlv_init(msg->rep, msg->rep_type);
+
+ if (cmd->header) {
+ err = (*cmd->header)(msg);
+ if (err) {
+ kfree_skb(msg->rep);
+ msg->rep = NULL;
+ return err;
+ }
+ }
+
+ arg = nlmsg_new(0, GFP_KERNEL);
+ if (!arg) {
+ kfree_skb(msg->rep);
+ msg->rep = NULL;
+ return -ENOMEM;
+ }
+
+ nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI);
+ if (!nlh) {
+ kfree_skb(arg);
+ kfree_skb(msg->rep);
+ msg->rep = NULL;
+ return -EMSGSIZE;
+ }
+ nlmsg_end(arg, nlh);
+
+ err = __tipc_nl_compat_dumpit(cmd, msg, arg);
+ if (err) {
+ kfree_skb(msg->rep);
+ msg->rep = NULL;
+ }
+ kfree_skb(arg);
+
+ return err;
+}
+
+static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
+ struct tipc_nl_compat_msg *msg)
+{
+ int err;
+ struct sk_buff *doit_buf;
+ struct sk_buff *trans_buf;
+ struct nlattr **attrbuf;
+ struct genl_info info;
+
+ trans_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!trans_buf)
+ return -ENOMEM;
+
+ attrbuf = kmalloc_array(tipc_genl_family.maxattr + 1,
+ sizeof(struct nlattr *),
+ GFP_KERNEL);
+ if (!attrbuf) {
+ err = -ENOMEM;
+ goto trans_out;
+ }
+
+ doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!doit_buf) {
+ err = -ENOMEM;
+ goto attrbuf_out;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.attrs = attrbuf;
+
+ rtnl_lock();
+ err = (*cmd->transcode)(cmd, trans_buf, msg);
+ if (err)
+ goto doit_out;
+
+ err = nla_parse_deprecated(attrbuf, tipc_genl_family.maxattr,
+ (const struct nlattr *)trans_buf->data,
+ trans_buf->len, NULL, NULL);
+ if (err)
+ goto doit_out;
+
+ doit_buf->sk = msg->dst_sk;
+
+ err = (*cmd->doit)(doit_buf, &info);
+doit_out:
+ rtnl_unlock();
+
+ kfree_skb(doit_buf);
+attrbuf_out:
+ kfree(attrbuf);
+trans_out:
+ kfree_skb(trans_buf);
+
+ return err;
+}
+
+static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
+ struct tipc_nl_compat_msg *msg)
+{
+ int err;
+
+ if (msg->req_type && (!msg->req_size ||
+ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
+ return -EINVAL;
+
+ err = __tipc_nl_compat_doit(cmd, msg);
+ if (err)
+ return err;
+
+ /* The legacy API considered an empty message a success message */
+ msg->rep = tipc_tlv_alloc(0);
+ if (!msg->rep)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1];
+ int err;
+
+ if (!attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(bearer, TIPC_NLA_BEARER_MAX,
+ attrs[TIPC_NLA_BEARER], NULL, NULL);
+ if (err)
+ return err;
+
+ return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME,
+ nla_data(bearer[TIPC_NLA_BEARER_NAME]),
+ nla_len(bearer[TIPC_NLA_BEARER_NAME]));
+}
+
+static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
+ struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ struct nlattr *prop;
+ struct nlattr *bearer;
+ struct tipc_bearer_config *b;
+ int len;
+
+ b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
+
+ bearer = nla_nest_start_noflag(skb, TIPC_NLA_BEARER);
+ if (!bearer)
+ return -EMSGSIZE;
+
+ len = TLV_GET_DATA_LEN(msg->req);
+ len -= offsetof(struct tipc_bearer_config, name);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
+ if (!string_is_terminated(b->name, len))
+ return -EINVAL;
+
+ if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, TIPC_NLA_BEARER_DOMAIN, ntohl(b->disc_domain)))
+ return -EMSGSIZE;
+
+ if (ntohl(b->priority) <= TIPC_MAX_LINK_PRI) {
+ prop = nla_nest_start_noflag(skb, TIPC_NLA_BEARER_PROP);
+ if (!prop)
+ return -EMSGSIZE;
+ if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(b->priority)))
+ return -EMSGSIZE;
+ nla_nest_end(skb, prop);
+ }
+ nla_nest_end(skb, bearer);
+
+ return 0;
+}
+
+static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
+ struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ char *name;
+ struct nlattr *bearer;
+ int len;
+
+ name = (char *)TLV_DATA(msg->req);
+
+ bearer = nla_nest_start_noflag(skb, TIPC_NLA_BEARER);
+ if (!bearer)
+ return -EMSGSIZE;
+
+ len = TLV_GET_DATA_LEN(msg->req);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
+ if (!string_is_terminated(name, len))
+ return -EINVAL;
+
+ if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
+ return -EMSGSIZE;
+
+ nla_nest_end(skb, bearer);
+
+ return 0;
+}
+
+static inline u32 perc(u32 count, u32 total)
+{
+ return (count * 100 + (total / 2)) / total;
+}
+
+static void __fill_bc_link_stat(struct tipc_nl_compat_msg *msg,
+ struct nlattr *prop[], struct nlattr *stats[])
+{
+ tipc_tlv_sprintf(msg->rep, " Window:%u packets\n",
+ nla_get_u32(prop[TIPC_NLA_PROP_WIN]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED]));
+
+ tipc_tlv_sprintf(msg->rep, " RX naks:%u defs:%u dups:%u\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]),
+ nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES]));
+
+ tipc_tlv_sprintf(msg->rep, " TX naks:%u acks:%u dups:%u\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " Congestion link:%u Send queue max:%u avg:%u",
+ nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]),
+ nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE]));
+}
+
+static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ char *name;
+ struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
+ struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
+ struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
+ int err;
+ int len;
+
+ if (!attrs[TIPC_NLA_LINK])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(link, TIPC_NLA_LINK_MAX,
+ attrs[TIPC_NLA_LINK], NULL, NULL);
+ if (err)
+ return err;
+
+ if (!link[TIPC_NLA_LINK_PROP])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(prop, TIPC_NLA_PROP_MAX,
+ link[TIPC_NLA_LINK_PROP], NULL,
+ NULL);
+ if (err)
+ return err;
+
+ if (!link[TIPC_NLA_LINK_STATS])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(stats, TIPC_NLA_STATS_MAX,
+ link[TIPC_NLA_LINK_STATS], NULL,
+ NULL);
+ if (err)
+ return err;
+
+ name = (char *)TLV_DATA(msg->req);
+
+ len = TLV_GET_DATA_LEN(msg->req);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
+ if (!string_is_terminated(name, len))
+ return -EINVAL;
+
+ if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
+ return 0;
+
+ tipc_tlv_sprintf(msg->rep, "\nLink <%s>\n",
+ (char *)nla_data(link[TIPC_NLA_LINK_NAME]));
+
+ if (link[TIPC_NLA_LINK_BROADCAST]) {
+ __fill_bc_link_stat(msg, prop, stats);
+ return 0;
+ }
+
+ if (link[TIPC_NLA_LINK_ACTIVE])
+ tipc_tlv_sprintf(msg->rep, " ACTIVE");
+ else if (link[TIPC_NLA_LINK_UP])
+ tipc_tlv_sprintf(msg->rep, " STANDBY");
+ else
+ tipc_tlv_sprintf(msg->rep, " DEFUNCT");
+
+ tipc_tlv_sprintf(msg->rep, " MTU:%u Priority:%u",
+ nla_get_u32(link[TIPC_NLA_LINK_MTU]),
+ nla_get_u32(prop[TIPC_NLA_PROP_PRIO]));
+
+ tipc_tlv_sprintf(msg->rep, " Tolerance:%u ms Window:%u packets\n",
+ nla_get_u32(prop[TIPC_NLA_PROP_TOL]),
+ nla_get_u32(prop[TIPC_NLA_PROP_WIN]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ nla_get_u32(link[TIPC_NLA_LINK_RX]) -
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ nla_get_u32(link[TIPC_NLA_LINK_TX]) -
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " TX profile sample:%u packets average:%u octets\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_CNT]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_TOT]) /
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% ",
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P0]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P1]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P2]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P3]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])));
+
+ tipc_tlv_sprintf(msg->rep, "-16384:%u%% -32768:%u%% -66000:%u%%\n",
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P4]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P5]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
+ perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P6]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])));
+
+ tipc_tlv_sprintf(msg->rep,
+ " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_STATES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_PROBES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]),
+ nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_STATES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_PROBES]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED]));
+
+ tipc_tlv_sprintf(msg->rep,
+ " Congestion link:%u Send queue max:%u avg:%u",
+ nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]),
+ nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]),
+ nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE]));
+
+ return 0;
+}
+
+static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
+ struct tipc_link_info link_info;
+ int err;
+
+ if (!attrs[TIPC_NLA_LINK])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(link, TIPC_NLA_LINK_MAX,
+ attrs[TIPC_NLA_LINK], NULL, NULL);
+ if (err)
+ return err;
+
+ link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST]));
+ link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
+ nla_strscpy(link_info.str, link[TIPC_NLA_LINK_NAME],
+ TIPC_MAX_LINK_NAME);
+
+ return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
+ &link_info, sizeof(link_info));
+}
+
+static int __tipc_add_link_prop(struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg,
+ struct tipc_link_config *lc)
+{
+ switch (msg->cmd) {
+ case TIPC_CMD_SET_LINK_PRI:
+ return nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value));
+ case TIPC_CMD_SET_LINK_TOL:
+ return nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value));
+ case TIPC_CMD_SET_LINK_WINDOW:
+ return nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value));
+ }
+
+ return -EINVAL;
+}
+
+static int tipc_nl_compat_media_set(struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ struct nlattr *prop;
+ struct nlattr *media;
+ struct tipc_link_config *lc;
+
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+ media = nla_nest_start_noflag(skb, TIPC_NLA_MEDIA);
+ if (!media)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
+ return -EMSGSIZE;
+
+ prop = nla_nest_start_noflag(skb, TIPC_NLA_MEDIA_PROP);
+ if (!prop)
+ return -EMSGSIZE;
+
+ __tipc_add_link_prop(skb, msg, lc);
+ nla_nest_end(skb, prop);
+ nla_nest_end(skb, media);
+
+ return 0;
+}
+
+static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ struct nlattr *prop;
+ struct nlattr *bearer;
+ struct tipc_link_config *lc;
+
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+ bearer = nla_nest_start_noflag(skb, TIPC_NLA_BEARER);
+ if (!bearer)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
+ return -EMSGSIZE;
+
+ prop = nla_nest_start_noflag(skb, TIPC_NLA_BEARER_PROP);
+ if (!prop)
+ return -EMSGSIZE;
+
+ __tipc_add_link_prop(skb, msg, lc);
+ nla_nest_end(skb, prop);
+ nla_nest_end(skb, bearer);
+
+ return 0;
+}
+
+static int __tipc_nl_compat_link_set(struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ struct nlattr *prop;
+ struct nlattr *link;
+ struct tipc_link_config *lc;
+
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+ link = nla_nest_start_noflag(skb, TIPC_NLA_LINK);
+ if (!link)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, TIPC_NLA_LINK_NAME, lc->name))
+ return -EMSGSIZE;
+
+ prop = nla_nest_start_noflag(skb, TIPC_NLA_LINK_PROP);
+ if (!prop)
+ return -EMSGSIZE;
+
+ __tipc_add_link_prop(skb, msg, lc);
+ nla_nest_end(skb, prop);
+ nla_nest_end(skb, link);
+
+ return 0;
+}
+
+static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
+ struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ struct tipc_link_config *lc;
+ struct tipc_bearer *bearer;
+ struct tipc_media *media;
+ int len;
+
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+ len = TLV_GET_DATA_LEN(msg->req);
+ len -= offsetof(struct tipc_link_config, name);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
+ if (!string_is_terminated(lc->name, len))
+ return -EINVAL;
+
+ media = tipc_media_find(lc->name);
+ if (media) {
+ cmd->doit = &__tipc_nl_media_set;
+ return tipc_nl_compat_media_set(skb, msg);
+ }
+
+ bearer = tipc_bearer_find(msg->net, lc->name);
+ if (bearer) {
+ cmd->doit = &__tipc_nl_bearer_set;
+ return tipc_nl_compat_bearer_set(skb, msg);
+ }
+
+ return __tipc_nl_compat_link_set(skb, msg);
+}
+
+static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
+ struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ char *name;
+ struct nlattr *link;
+ int len;
+
+ name = (char *)TLV_DATA(msg->req);
+
+ link = nla_nest_start_noflag(skb, TIPC_NLA_LINK);
+ if (!link)
+ return -EMSGSIZE;
+
+ len = TLV_GET_DATA_LEN(msg->req);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
+ if (!string_is_terminated(name, len))
+ return -EINVAL;
+
+ if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
+ return -EMSGSIZE;
+
+ nla_nest_end(skb, link);
+
+ return 0;
+}
+
+static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
+{
+ int i;
+ u32 depth;
+ struct tipc_name_table_query *ntq;
+ static const char * const header[] = {
+ "Type ",
+ "Lower Upper ",
+ "Port Identity ",
+ "Publication Scope"
+ };
+
+ ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
+ if (TLV_GET_DATA_LEN(msg->req) < (int)sizeof(struct tipc_name_table_query))
+ return -EINVAL;
+
+ depth = ntohl(ntq->depth);
+
+ if (depth > 4)
+ depth = 4;
+ for (i = 0; i < depth; i++)
+ tipc_tlv_sprintf(msg->rep, header[i]);
+ tipc_tlv_sprintf(msg->rep, "\n");
+
+ return 0;
+}
+
+static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ char port_str[27];
+ struct tipc_name_table_query *ntq;
+ struct nlattr *nt[TIPC_NLA_NAME_TABLE_MAX + 1];
+ struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
+ u32 node, depth, type, lowbound, upbound;
+ static const char * const scope_str[] = {"", " zone", " cluster",
+ " node"};
+ int err;
+
+ if (!attrs[TIPC_NLA_NAME_TABLE])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(nt, TIPC_NLA_NAME_TABLE_MAX,
+ attrs[TIPC_NLA_NAME_TABLE], NULL,
+ NULL);
+ if (err)
+ return err;
+
+ if (!nt[TIPC_NLA_NAME_TABLE_PUBL])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(publ, TIPC_NLA_PUBL_MAX,
+ nt[TIPC_NLA_NAME_TABLE_PUBL], NULL,
+ NULL);
+ if (err)
+ return err;
+
+ ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
+
+ depth = ntohl(ntq->depth);
+ type = ntohl(ntq->type);
+ lowbound = ntohl(ntq->lowbound);
+ upbound = ntohl(ntq->upbound);
+
+ if (!(depth & TIPC_NTQ_ALLTYPES) &&
+ (type != nla_get_u32(publ[TIPC_NLA_PUBL_TYPE])))
+ return 0;
+ if (lowbound && (lowbound > nla_get_u32(publ[TIPC_NLA_PUBL_UPPER])))
+ return 0;
+ if (upbound && (upbound < nla_get_u32(publ[TIPC_NLA_PUBL_LOWER])))
+ return 0;
+
+ tipc_tlv_sprintf(msg->rep, "%-10u ",
+ nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]));
+
+ if (depth == 1)
+ goto out;
+
+ tipc_tlv_sprintf(msg->rep, "%-10u %-10u ",
+ nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]),
+ nla_get_u32(publ[TIPC_NLA_PUBL_UPPER]));
+
+ if (depth == 2)
+ goto out;
+
+ node = nla_get_u32(publ[TIPC_NLA_PUBL_NODE]);
+ sprintf(port_str, "<%u.%u.%u:%u>", tipc_zone(node), tipc_cluster(node),
+ tipc_node(node), nla_get_u32(publ[TIPC_NLA_PUBL_REF]));
+ tipc_tlv_sprintf(msg->rep, "%-26s ", port_str);
+
+ if (depth == 3)
+ goto out;
+
+ tipc_tlv_sprintf(msg->rep, "%-10u %s",
+ nla_get_u32(publ[TIPC_NLA_PUBL_KEY]),
+ scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]);
+out:
+ tipc_tlv_sprintf(msg->rep, "\n");
+
+ return 0;
+}
+
+static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ u32 type, lower, upper;
+ struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
+ int err;
+
+ if (!attrs[TIPC_NLA_PUBL])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(publ, TIPC_NLA_PUBL_MAX,
+ attrs[TIPC_NLA_PUBL], NULL, NULL);
+ if (err)
+ return err;
+
+ type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]);
+ lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]);
+ upper = nla_get_u32(publ[TIPC_NLA_PUBL_UPPER]);
+
+ if (lower == upper)
+ tipc_tlv_sprintf(msg->rep, " {%u,%u}", type, lower);
+ else
+ tipc_tlv_sprintf(msg->rep, " {%u,%u,%u}", type, lower, upper);
+
+ return 0;
+}
+
+static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock)
+{
+ int err;
+ void *hdr;
+ struct nlattr *nest;
+ struct sk_buff *args;
+ struct tipc_nl_compat_cmd_dump dump;
+
+ args = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI,
+ TIPC_NL_PUBL_GET);
+ if (!hdr) {
+ kfree_skb(args);
+ return -EMSGSIZE;
+ }
+
+ nest = nla_nest_start_noflag(args, TIPC_NLA_SOCK);
+ if (!nest) {
+ kfree_skb(args);
+ return -EMSGSIZE;
+ }
+
+ if (nla_put_u32(args, TIPC_NLA_SOCK_REF, sock)) {
+ kfree_skb(args);
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(args, nest);
+ genlmsg_end(args, hdr);
+
+ dump.dumpit = tipc_nl_publ_dump;
+ dump.format = __tipc_nl_compat_publ_dump;
+
+ err = __tipc_nl_compat_dumpit(&dump, msg, args);
+
+ kfree_skb(args);
+
+ return err;
+}
+
+static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ int err;
+ u32 sock_ref;
+ struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
+
+ if (!attrs[TIPC_NLA_SOCK])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
+ attrs[TIPC_NLA_SOCK], NULL, NULL);
+ if (err)
+ return err;
+
+ sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
+ tipc_tlv_sprintf(msg->rep, "%u:", sock_ref);
+
+ if (sock[TIPC_NLA_SOCK_CON]) {
+ u32 node;
+ struct nlattr *con[TIPC_NLA_CON_MAX + 1];
+
+ err = nla_parse_nested_deprecated(con, TIPC_NLA_CON_MAX,
+ sock[TIPC_NLA_SOCK_CON],
+ NULL, NULL);
+
+ if (err)
+ return err;
+
+ node = nla_get_u32(con[TIPC_NLA_CON_NODE]);
+ tipc_tlv_sprintf(msg->rep, " connected to <%u.%u.%u:%u>",
+ tipc_zone(node),
+ tipc_cluster(node),
+ tipc_node(node),
+ nla_get_u32(con[TIPC_NLA_CON_SOCK]));
+
+ if (con[TIPC_NLA_CON_FLAG])
+ tipc_tlv_sprintf(msg->rep, " via {%u,%u}\n",
+ nla_get_u32(con[TIPC_NLA_CON_TYPE]),
+ nla_get_u32(con[TIPC_NLA_CON_INST]));
+ else
+ tipc_tlv_sprintf(msg->rep, "\n");
+ } else if (sock[TIPC_NLA_SOCK_HAS_PUBL]) {
+ tipc_tlv_sprintf(msg->rep, " bound to");
+
+ err = tipc_nl_compat_publ_dump(msg, sock_ref);
+ if (err)
+ return err;
+ }
+ tipc_tlv_sprintf(msg->rep, "\n");
+
+ return 0;
+}
+
+static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1];
+ int err;
+
+ if (!attrs[TIPC_NLA_MEDIA])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(media, TIPC_NLA_MEDIA_MAX,
+ attrs[TIPC_NLA_MEDIA], NULL, NULL);
+ if (err)
+ return err;
+
+ return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME,
+ nla_data(media[TIPC_NLA_MEDIA_NAME]),
+ nla_len(media[TIPC_NLA_MEDIA_NAME]));
+}
+
+static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ struct tipc_node_info node_info;
+ struct nlattr *node[TIPC_NLA_NODE_MAX + 1];
+ int err;
+
+ if (!attrs[TIPC_NLA_NODE])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(node, TIPC_NLA_NODE_MAX,
+ attrs[TIPC_NLA_NODE], NULL, NULL);
+ if (err)
+ return err;
+
+ node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR]));
+ node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP]));
+
+ return tipc_add_tlv(msg->rep, TIPC_TLV_NODE_INFO, &node_info,
+ sizeof(node_info));
+}
+
+static int tipc_nl_compat_net_set(struct tipc_nl_compat_cmd_doit *cmd,
+ struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ u32 val;
+ struct nlattr *net;
+
+ val = ntohl(*(__be32 *)TLV_DATA(msg->req));
+
+ net = nla_nest_start_noflag(skb, TIPC_NLA_NET);
+ if (!net)
+ return -EMSGSIZE;
+
+ if (msg->cmd == TIPC_CMD_SET_NODE_ADDR) {
+ if (nla_put_u32(skb, TIPC_NLA_NET_ADDR, val))
+ return -EMSGSIZE;
+ } else if (msg->cmd == TIPC_CMD_SET_NETID) {
+ if (nla_put_u32(skb, TIPC_NLA_NET_ID, val))
+ return -EMSGSIZE;
+ }
+ nla_nest_end(skb, net);
+
+ return 0;
+}
+
+static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg,
+ struct nlattr **attrs)
+{
+ __be32 id;
+ struct nlattr *net[TIPC_NLA_NET_MAX + 1];
+ int err;
+
+ if (!attrs[TIPC_NLA_NET])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(net, TIPC_NLA_NET_MAX,
+ attrs[TIPC_NLA_NET], NULL, NULL);
+ if (err)
+ return err;
+
+ id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID]));
+
+ return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id));
+}
+
+static int tipc_cmd_show_stats_compat(struct tipc_nl_compat_msg *msg)
+{
+ msg->rep = tipc_tlv_alloc(ULTRA_STRING_MAX_LEN);
+ if (!msg->rep)
+ return -ENOMEM;
+
+ tipc_tlv_init(msg->rep, TIPC_TLV_ULTRA_STRING);
+ tipc_tlv_sprintf(msg->rep, "TIPC version " TIPC_MOD_VER "\n");
+
+ return 0;
+}
+
+static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
+{
+ struct tipc_nl_compat_cmd_dump dump;
+ struct tipc_nl_compat_cmd_doit doit;
+
+ memset(&dump, 0, sizeof(dump));
+ memset(&doit, 0, sizeof(doit));
+
+ switch (msg->cmd) {
+ case TIPC_CMD_NOOP:
+ msg->rep = tipc_tlv_alloc(0);
+ if (!msg->rep)
+ return -ENOMEM;
+ return 0;
+ case TIPC_CMD_GET_BEARER_NAMES:
+ msg->rep_size = MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME);
+ dump.dumpit = tipc_nl_bearer_dump;
+ dump.format = tipc_nl_compat_bearer_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_ENABLE_BEARER:
+ msg->req_type = TIPC_TLV_BEARER_CONFIG;
+ doit.doit = __tipc_nl_bearer_enable;
+ doit.transcode = tipc_nl_compat_bearer_enable;
+ return tipc_nl_compat_doit(&doit, msg);
+ case TIPC_CMD_DISABLE_BEARER:
+ msg->req_type = TIPC_TLV_BEARER_NAME;
+ doit.doit = __tipc_nl_bearer_disable;
+ doit.transcode = tipc_nl_compat_bearer_disable;
+ return tipc_nl_compat_doit(&doit, msg);
+ case TIPC_CMD_SHOW_LINK_STATS:
+ msg->req_type = TIPC_TLV_LINK_NAME;
+ msg->rep_size = ULTRA_STRING_MAX_LEN;
+ msg->rep_type = TIPC_TLV_ULTRA_STRING;
+ dump.dumpit = tipc_nl_node_dump_link;
+ dump.format = tipc_nl_compat_link_stat_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_GET_LINKS:
+ msg->req_type = TIPC_TLV_NET_ADDR;
+ msg->rep_size = ULTRA_STRING_MAX_LEN;
+ dump.dumpit = tipc_nl_node_dump_link;
+ dump.format = tipc_nl_compat_link_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_SET_LINK_TOL:
+ case TIPC_CMD_SET_LINK_PRI:
+ case TIPC_CMD_SET_LINK_WINDOW:
+ msg->req_type = TIPC_TLV_LINK_CONFIG;
+ doit.doit = tipc_nl_node_set_link;
+ doit.transcode = tipc_nl_compat_link_set;
+ return tipc_nl_compat_doit(&doit, msg);
+ case TIPC_CMD_RESET_LINK_STATS:
+ msg->req_type = TIPC_TLV_LINK_NAME;
+ doit.doit = tipc_nl_node_reset_link_stats;
+ doit.transcode = tipc_nl_compat_link_reset_stats;
+ return tipc_nl_compat_doit(&doit, msg);
+ case TIPC_CMD_SHOW_NAME_TABLE:
+ msg->req_type = TIPC_TLV_NAME_TBL_QUERY;
+ msg->rep_size = ULTRA_STRING_MAX_LEN;
+ msg->rep_type = TIPC_TLV_ULTRA_STRING;
+ dump.header = tipc_nl_compat_name_table_dump_header;
+ dump.dumpit = tipc_nl_name_table_dump;
+ dump.format = tipc_nl_compat_name_table_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_SHOW_PORTS:
+ msg->rep_size = ULTRA_STRING_MAX_LEN;
+ msg->rep_type = TIPC_TLV_ULTRA_STRING;
+ dump.dumpit = tipc_nl_sk_dump;
+ dump.format = tipc_nl_compat_sk_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_GET_MEDIA_NAMES:
+ msg->rep_size = MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME);
+ dump.dumpit = tipc_nl_media_dump;
+ dump.format = tipc_nl_compat_media_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_GET_NODES:
+ msg->rep_size = ULTRA_STRING_MAX_LEN;
+ dump.dumpit = tipc_nl_node_dump;
+ dump.format = tipc_nl_compat_node_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_SET_NODE_ADDR:
+ msg->req_type = TIPC_TLV_NET_ADDR;
+ doit.doit = __tipc_nl_net_set;
+ doit.transcode = tipc_nl_compat_net_set;
+ return tipc_nl_compat_doit(&doit, msg);
+ case TIPC_CMD_SET_NETID:
+ msg->req_type = TIPC_TLV_UNSIGNED;
+ doit.doit = __tipc_nl_net_set;
+ doit.transcode = tipc_nl_compat_net_set;
+ return tipc_nl_compat_doit(&doit, msg);
+ case TIPC_CMD_GET_NETID:
+ msg->rep_size = sizeof(u32);
+ dump.dumpit = tipc_nl_net_dump;
+ dump.format = tipc_nl_compat_net_dump;
+ return tipc_nl_compat_dumpit(&dump, msg);
+ case TIPC_CMD_SHOW_STATS:
+ return tipc_cmd_show_stats_compat(msg);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ int len;
+ struct tipc_nl_compat_msg msg;
+ struct nlmsghdr *req_nlh;
+ struct nlmsghdr *rep_nlh;
+ struct tipc_genlmsghdr *req_userhdr = genl_info_userhdr(info);
+
+ memset(&msg, 0, sizeof(msg));
+
+ req_nlh = (struct nlmsghdr *)skb->data;
+ msg.req = nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN;
+ msg.cmd = req_userhdr->cmd;
+ msg.net = genl_info_net(info);
+ msg.dst_sk = skb->sk;
+
+ if ((msg.cmd & 0xC000) && (!netlink_net_capable(skb, CAP_NET_ADMIN))) {
+ msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_NET_ADMIN);
+ err = -EACCES;
+ goto send;
+ }
+
+ msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
+ if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
+ msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
+ err = -EOPNOTSUPP;
+ goto send;
+ }
+
+ err = tipc_nl_compat_handle(&msg);
+ if ((err == -EOPNOTSUPP) || (err == -EPERM))
+ msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
+ else if (err == -EINVAL)
+ msg.rep = tipc_get_err_tlv(TIPC_CFG_TLV_ERROR);
+send:
+ if (!msg.rep)
+ return err;
+
+ len = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
+ skb_push(msg.rep, len);
+ rep_nlh = nlmsg_hdr(msg.rep);
+ memcpy(rep_nlh, info->nlhdr, len);
+ rep_nlh->nlmsg_len = msg.rep->len;
+ genlmsg_unicast(msg.net, msg.rep, NETLINK_CB(skb).portid);
+
+ return err;
+}
+
+static const struct genl_small_ops tipc_genl_compat_ops[] = {
+ {
+ .cmd = TIPC_GENL_CMD,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_compat_recv,
+ },
+};
+
+static struct genl_family tipc_genl_compat_family __ro_after_init = {
+ .name = TIPC_GENL_NAME,
+ .version = TIPC_GENL_VERSION,
+ .hdrsize = TIPC_GENL_HDRLEN,
+ .maxattr = 0,
+ .netnsok = true,
+ .module = THIS_MODULE,
+ .small_ops = tipc_genl_compat_ops,
+ .n_small_ops = ARRAY_SIZE(tipc_genl_compat_ops),
+ .resv_start_op = TIPC_GENL_CMD + 1,
+};
+
+int __init tipc_netlink_compat_start(void)
+{
+ int res;
+
+ res = genl_register_family(&tipc_genl_compat_family);
+ if (res) {
+ pr_err("Failed to register legacy compat interface\n");
+ return res;
+ }
+
+ return 0;
+}
+
+void tipc_netlink_compat_stop(void)
+{
+ genl_unregister_family(&tipc_genl_compat_family);
+}
diff --git a/net/tipc/node.c b/net/tipc/node.c
new file mode 100644
index 0000000000..3105abe97b
--- /dev/null
+++ b/net/tipc/node.c
@@ -0,0 +1,3166 @@
+/*
+ * net/tipc/node.c: TIPC node management routines
+ *
+ * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
+ * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "link.h"
+#include "node.h"
+#include "name_distr.h"
+#include "socket.h"
+#include "bcast.h"
+#include "monitor.h"
+#include "discover.h"
+#include "netlink.h"
+#include "trace.h"
+#include "crypto.h"
+
+#define INVALID_NODE_SIG 0x10000
+#define NODE_CLEANUP_AFTER 300000
+
+/* Flags used to take different actions according to flag type
+ * TIPC_NOTIFY_NODE_DOWN: notify node is down
+ * TIPC_NOTIFY_NODE_UP: notify node is up
+ * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
+ */
+enum {
+ TIPC_NOTIFY_NODE_DOWN = (1 << 3),
+ TIPC_NOTIFY_NODE_UP = (1 << 4),
+ TIPC_NOTIFY_LINK_UP = (1 << 6),
+ TIPC_NOTIFY_LINK_DOWN = (1 << 7)
+};
+
+struct tipc_link_entry {
+ struct tipc_link *link;
+ spinlock_t lock; /* per link */
+ u32 mtu;
+ struct sk_buff_head inputq;
+ struct tipc_media_addr maddr;
+};
+
+struct tipc_bclink_entry {
+ struct tipc_link *link;
+ struct sk_buff_head inputq1;
+ struct sk_buff_head arrvq;
+ struct sk_buff_head inputq2;
+ struct sk_buff_head namedq;
+ u16 named_rcv_nxt;
+ bool named_open;
+};
+
+/**
+ * struct tipc_node - TIPC node structure
+ * @addr: network address of node
+ * @kref: reference counter to node object
+ * @lock: rwlock governing access to structure
+ * @net: the applicable net namespace
+ * @hash: links to adjacent nodes in unsorted hash chain
+ * @inputq: pointer to input queue containing messages for msg event
+ * @namedq: pointer to name table input queue with name table messages
+ * @active_links: bearer ids of active links, used as index into links[] array
+ * @links: array containing references to all links to node
+ * @bc_entry: broadcast link entry
+ * @action_flags: bit mask of different types of node actions
+ * @state: connectivity state vs peer node
+ * @preliminary: a preliminary node or not
+ * @failover_sent: failover sent or not
+ * @sync_point: sequence number where synch/failover is finished
+ * @list: links to adjacent nodes in sorted list of cluster's nodes
+ * @working_links: number of working links to node (both active and standby)
+ * @link_cnt: number of links to node
+ * @capabilities: bitmap, indicating peer node's functional capabilities
+ * @signature: node instance identifier
+ * @link_id: local and remote bearer ids of changing link, if any
+ * @peer_id: 128-bit ID of peer
+ * @peer_id_string: ID string of peer
+ * @publ_list: list of publications
+ * @conn_sks: list of connections (FIXME)
+ * @timer: node's keepalive timer
+ * @keepalive_intv: keepalive interval in milliseconds
+ * @rcu: rcu struct for tipc_node
+ * @delete_at: indicates the time for deleting a down node
+ * @peer_net: peer's net namespace
+ * @peer_hash_mix: hash for this peer (FIXME)
+ * @crypto_rx: RX crypto handler
+ */
+struct tipc_node {
+ u32 addr;
+ struct kref kref;
+ rwlock_t lock;
+ struct net *net;
+ struct hlist_node hash;
+ int active_links[2];
+ struct tipc_link_entry links[MAX_BEARERS];
+ struct tipc_bclink_entry bc_entry;
+ int action_flags;
+ struct list_head list;
+ int state;
+ bool preliminary;
+ bool failover_sent;
+ u16 sync_point;
+ int link_cnt;
+ u16 working_links;
+ u16 capabilities;
+ u32 signature;
+ u32 link_id;
+ u8 peer_id[16];
+ char peer_id_string[NODE_ID_STR_LEN];
+ struct list_head publ_list;
+ struct list_head conn_sks;
+ unsigned long keepalive_intv;
+ struct timer_list timer;
+ struct rcu_head rcu;
+ unsigned long delete_at;
+ struct net *peer_net;
+ u32 peer_hash_mix;
+#ifdef CONFIG_TIPC_CRYPTO
+ struct tipc_crypto *crypto_rx;
+#endif
+};
+
+/* Node FSM states and events:
+ */
+enum {
+ SELF_DOWN_PEER_DOWN = 0xdd,
+ SELF_UP_PEER_UP = 0xaa,
+ SELF_DOWN_PEER_LEAVING = 0xd1,
+ SELF_UP_PEER_COMING = 0xac,
+ SELF_COMING_PEER_UP = 0xca,
+ SELF_LEAVING_PEER_DOWN = 0x1d,
+ NODE_FAILINGOVER = 0xf0,
+ NODE_SYNCHING = 0xcc
+};
+
+enum {
+ SELF_ESTABL_CONTACT_EVT = 0xece,
+ SELF_LOST_CONTACT_EVT = 0x1ce,
+ PEER_ESTABL_CONTACT_EVT = 0x9ece,
+ PEER_LOST_CONTACT_EVT = 0x91ce,
+ NODE_FAILOVER_BEGIN_EVT = 0xfbe,
+ NODE_FAILOVER_END_EVT = 0xfee,
+ NODE_SYNCH_BEGIN_EVT = 0xcbe,
+ NODE_SYNCH_END_EVT = 0xcee
+};
+
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+ struct sk_buff_head *xmitq,
+ struct tipc_media_addr **maddr);
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
+ bool delete);
+static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
+static void tipc_node_delete(struct tipc_node *node);
+static void tipc_node_timeout(struct timer_list *t);
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
+static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
+static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
+static bool node_is_up(struct tipc_node *n);
+static void tipc_node_delete_from_list(struct tipc_node *node);
+
+struct tipc_sock_conn {
+ u32 port;
+ u32 peer_port;
+ u32 peer_node;
+ struct list_head list;
+};
+
+static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
+{
+ int bearer_id = n->active_links[sel & 1];
+
+ if (unlikely(bearer_id == INVALID_BEARER_ID))
+ return NULL;
+
+ return n->links[bearer_id].link;
+}
+
+int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
+{
+ struct tipc_node *n;
+ int bearer_id;
+ unsigned int mtu = MAX_MSG_SIZE;
+
+ n = tipc_node_find(net, addr);
+ if (unlikely(!n))
+ return mtu;
+
+ /* Allow MAX_MSG_SIZE when building connection oriented message
+ * if they are in the same core network
+ */
+ if (n->peer_net && connected) {
+ tipc_node_put(n);
+ return mtu;
+ }
+
+ bearer_id = n->active_links[sel & 1];
+ if (likely(bearer_id != INVALID_BEARER_ID))
+ mtu = n->links[bearer_id].mtu;
+ tipc_node_put(n);
+ return mtu;
+}
+
+bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
+{
+ u8 *own_id = tipc_own_id(net);
+ struct tipc_node *n;
+
+ if (!own_id)
+ return true;
+
+ if (addr == tipc_own_addr(net)) {
+ memcpy(id, own_id, TIPC_NODEID_LEN);
+ return true;
+ }
+ n = tipc_node_find(net, addr);
+ if (!n)
+ return false;
+
+ memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
+ tipc_node_put(n);
+ return true;
+}
+
+u16 tipc_node_get_capabilities(struct net *net, u32 addr)
+{
+ struct tipc_node *n;
+ u16 caps;
+
+ n = tipc_node_find(net, addr);
+ if (unlikely(!n))
+ return TIPC_NODE_CAPABILITIES;
+ caps = n->capabilities;
+ tipc_node_put(n);
+ return caps;
+}
+
+u32 tipc_node_get_addr(struct tipc_node *node)
+{
+ return (node) ? node->addr : 0;
+}
+
+char *tipc_node_get_id_str(struct tipc_node *node)
+{
+ return node->peer_id_string;
+}
+
+#ifdef CONFIG_TIPC_CRYPTO
+/**
+ * tipc_node_crypto_rx - Retrieve crypto RX handle from node
+ * @__n: target tipc_node
+ * Note: node ref counter must be held first!
+ */
+struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
+{
+ return (__n) ? __n->crypto_rx : NULL;
+}
+
+struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
+{
+ return container_of(pos, struct tipc_node, list)->crypto_rx;
+}
+
+struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr)
+{
+ struct tipc_node *n;
+
+ n = tipc_node_find(net, addr);
+ return (n) ? n->crypto_rx : NULL;
+}
+#endif
+
+static void tipc_node_free(struct rcu_head *rp)
+{
+ struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
+
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_stop(&n->crypto_rx);
+#endif
+ kfree(n);
+}
+
+static void tipc_node_kref_release(struct kref *kref)
+{
+ struct tipc_node *n = container_of(kref, struct tipc_node, kref);
+
+ kfree(n->bc_entry.link);
+ call_rcu(&n->rcu, tipc_node_free);
+}
+
+void tipc_node_put(struct tipc_node *node)
+{
+ kref_put(&node->kref, tipc_node_kref_release);
+}
+
+void tipc_node_get(struct tipc_node *node)
+{
+ kref_get(&node->kref);
+}
+
+/*
+ * tipc_node_find - locate specified node object, if it exists
+ */
+static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_node *node;
+ unsigned int thash = tipc_hashfn(addr);
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
+ if (node->addr != addr || node->preliminary)
+ continue;
+ if (!kref_get_unless_zero(&node->kref))
+ node = NULL;
+ break;
+ }
+ rcu_read_unlock();
+ return node;
+}
+
+/* tipc_node_find_by_id - locate specified node object by its 128-bit id
+ * Note: this function is called only when a discovery request failed
+ * to find the node by its 32-bit id, and is not time critical
+ */
+static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_node *n;
+ bool found = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ read_lock_bh(&n->lock);
+ if (!memcmp(id, n->peer_id, 16) &&
+ kref_get_unless_zero(&n->kref))
+ found = true;
+ read_unlock_bh(&n->lock);
+ if (found)
+ break;
+ }
+ rcu_read_unlock();
+ return found ? n : NULL;
+}
+
+static void tipc_node_read_lock(struct tipc_node *n)
+ __acquires(n->lock)
+{
+ read_lock_bh(&n->lock);
+}
+
+static void tipc_node_read_unlock(struct tipc_node *n)
+ __releases(n->lock)
+{
+ read_unlock_bh(&n->lock);
+}
+
+static void tipc_node_write_lock(struct tipc_node *n)
+ __acquires(n->lock)
+{
+ write_lock_bh(&n->lock);
+}
+
+static void tipc_node_write_unlock_fast(struct tipc_node *n)
+ __releases(n->lock)
+{
+ write_unlock_bh(&n->lock);
+}
+
+static void tipc_node_write_unlock(struct tipc_node *n)
+ __releases(n->lock)
+{
+ struct tipc_socket_addr sk;
+ struct net *net = n->net;
+ u32 flags = n->action_flags;
+ struct list_head *publ_list;
+ struct tipc_uaddr ua;
+ u32 bearer_id, node;
+
+ if (likely(!flags)) {
+ write_unlock_bh(&n->lock);
+ return;
+ }
+
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
+ TIPC_LINK_STATE, n->addr, n->addr);
+ sk.ref = n->link_id;
+ sk.node = tipc_own_addr(net);
+ node = n->addr;
+ bearer_id = n->link_id & 0xffff;
+ publ_list = &n->publ_list;
+
+ n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
+ TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
+
+ write_unlock_bh(&n->lock);
+
+ if (flags & TIPC_NOTIFY_NODE_DOWN)
+ tipc_publ_notify(net, publ_list, node, n->capabilities);
+
+ if (flags & TIPC_NOTIFY_NODE_UP)
+ tipc_named_node_up(net, node, n->capabilities);
+
+ if (flags & TIPC_NOTIFY_LINK_UP) {
+ tipc_mon_peer_up(net, node, bearer_id);
+ tipc_nametbl_publish(net, &ua, &sk, sk.ref);
+ }
+ if (flags & TIPC_NOTIFY_LINK_DOWN) {
+ tipc_mon_peer_down(net, node, bearer_id);
+ tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
+ }
+}
+
+static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
+{
+ int net_id = tipc_netid(n->net);
+ struct tipc_net *tn_peer;
+ struct net *tmp;
+ u32 hash_chk;
+
+ if (n->peer_net)
+ return;
+
+ for_each_net_rcu(tmp) {
+ tn_peer = tipc_net(tmp);
+ if (!tn_peer)
+ continue;
+ /* Integrity checking whether node exists in namespace or not */
+ if (tn_peer->net_id != net_id)
+ continue;
+ if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
+ continue;
+ hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
+ if (hash_mixes ^ hash_chk)
+ continue;
+ n->peer_net = tmp;
+ n->peer_hash_mix = hash_mixes;
+ break;
+ }
+}
+
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
+ u16 capabilities, u32 hash_mixes,
+ bool preliminary)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *l, *snd_l = tipc_bc_sndlink(net);
+ struct tipc_node *n, *temp_node;
+ unsigned long intv;
+ int bearer_id;
+ int i;
+
+ spin_lock_bh(&tn->node_list_lock);
+ n = tipc_node_find(net, addr) ?:
+ tipc_node_find_by_id(net, peer_id);
+ if (n) {
+ if (!n->preliminary)
+ goto update;
+ if (preliminary)
+ goto exit;
+ /* A preliminary node becomes "real" now, refresh its data */
+ tipc_node_write_lock(n);
+ if (!tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
+ tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
+ n->capabilities, &n->bc_entry.inputq1,
+ &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
+ pr_warn("Broadcast rcv link refresh failed, no memory\n");
+ tipc_node_write_unlock_fast(n);
+ tipc_node_put(n);
+ n = NULL;
+ goto exit;
+ }
+ n->preliminary = false;
+ n->addr = addr;
+ hlist_del_rcu(&n->hash);
+ hlist_add_head_rcu(&n->hash,
+ &tn->node_htable[tipc_hashfn(addr)]);
+ list_del_rcu(&n->list);
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ if (n->addr < temp_node->addr)
+ break;
+ }
+ list_add_tail_rcu(&n->list, &temp_node->list);
+ tipc_node_write_unlock_fast(n);
+
+update:
+ if (n->peer_hash_mix ^ hash_mixes)
+ tipc_node_assign_peer_net(n, hash_mixes);
+ if (n->capabilities == capabilities)
+ goto exit;
+ /* Same node may come back with new capabilities */
+ tipc_node_write_lock(n);
+ n->capabilities = capabilities;
+ for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ l = n->links[bearer_id].link;
+ if (l)
+ tipc_link_update_caps(l, capabilities);
+ }
+ tipc_node_write_unlock_fast(n);
+
+ /* Calculate cluster capabilities */
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
+
+ tipc_bcast_toggle_rcast(net,
+ (tn->capabilities & TIPC_BCAST_RCAST));
+
+ goto exit;
+ }
+ n = kzalloc(sizeof(*n), GFP_ATOMIC);
+ if (!n) {
+ pr_warn("Node creation failed, no memory\n");
+ goto exit;
+ }
+ tipc_nodeid2string(n->peer_id_string, peer_id);
+#ifdef CONFIG_TIPC_CRYPTO
+ if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
+ pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
+ kfree(n);
+ n = NULL;
+ goto exit;
+ }
+#endif
+ n->addr = addr;
+ n->preliminary = preliminary;
+ memcpy(&n->peer_id, peer_id, 16);
+ n->net = net;
+ n->peer_net = NULL;
+ n->peer_hash_mix = 0;
+ /* Assign kernel local namespace if exists */
+ tipc_node_assign_peer_net(n, hash_mixes);
+ n->capabilities = capabilities;
+ kref_init(&n->kref);
+ rwlock_init(&n->lock);
+ INIT_HLIST_NODE(&n->hash);
+ INIT_LIST_HEAD(&n->list);
+ INIT_LIST_HEAD(&n->publ_list);
+ INIT_LIST_HEAD(&n->conn_sks);
+ skb_queue_head_init(&n->bc_entry.namedq);
+ skb_queue_head_init(&n->bc_entry.inputq1);
+ __skb_queue_head_init(&n->bc_entry.arrvq);
+ skb_queue_head_init(&n->bc_entry.inputq2);
+ for (i = 0; i < MAX_BEARERS; i++)
+ spin_lock_init(&n->links[i].lock);
+ n->state = SELF_DOWN_PEER_LEAVING;
+ n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
+ n->signature = INVALID_NODE_SIG;
+ n->active_links[0] = INVALID_BEARER_ID;
+ n->active_links[1] = INVALID_BEARER_ID;
+ if (!preliminary &&
+ !tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
+ tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
+ n->capabilities, &n->bc_entry.inputq1,
+ &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
+ pr_warn("Broadcast rcv link creation failed, no memory\n");
+ tipc_node_put(n);
+ n = NULL;
+ goto exit;
+ }
+ tipc_node_get(n);
+ timer_setup(&n->timer, tipc_node_timeout, 0);
+ /* Start a slow timer anyway, crypto needs it */
+ n->keepalive_intv = 10000;
+ intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
+ if (!mod_timer(&n->timer, intv))
+ tipc_node_get(n);
+ hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ if (n->addr < temp_node->addr)
+ break;
+ }
+ list_add_tail_rcu(&n->list, &temp_node->list);
+ /* Calculate cluster capabilities */
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
+ tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
+ trace_tipc_node_create(n, true, " ");
+exit:
+ spin_unlock_bh(&tn->node_list_lock);
+ return n;
+}
+
+static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
+{
+ unsigned long tol = tipc_link_tolerance(l);
+ unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
+
+ /* Link with lowest tolerance determines timer interval */
+ if (intv < n->keepalive_intv)
+ n->keepalive_intv = intv;
+
+ /* Ensure link's abort limit corresponds to current tolerance */
+ tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
+}
+
+static void tipc_node_delete_from_list(struct tipc_node *node)
+{
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_key_flush(node->crypto_rx);
+#endif
+ list_del_rcu(&node->list);
+ hlist_del_rcu(&node->hash);
+ tipc_node_put(node);
+}
+
+static void tipc_node_delete(struct tipc_node *node)
+{
+ trace_tipc_node_delete(node, true, " ");
+ tipc_node_delete_from_list(node);
+
+ del_timer_sync(&node->timer);
+ tipc_node_put(node);
+}
+
+void tipc_node_stop(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_node *node, *t_node;
+
+ spin_lock_bh(&tn->node_list_lock);
+ list_for_each_entry_safe(node, t_node, &tn->node_list, list)
+ tipc_node_delete(node);
+ spin_unlock_bh(&tn->node_list_lock);
+}
+
+void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
+{
+ struct tipc_node *n;
+
+ if (in_own_node(net, addr))
+ return;
+
+ n = tipc_node_find(net, addr);
+ if (!n) {
+ pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
+ return;
+ }
+ tipc_node_write_lock(n);
+ list_add_tail(subscr, &n->publ_list);
+ tipc_node_write_unlock_fast(n);
+ tipc_node_put(n);
+}
+
+void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
+{
+ struct tipc_node *n;
+
+ if (in_own_node(net, addr))
+ return;
+
+ n = tipc_node_find(net, addr);
+ if (!n) {
+ pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
+ return;
+ }
+ tipc_node_write_lock(n);
+ list_del_init(subscr);
+ tipc_node_write_unlock_fast(n);
+ tipc_node_put(n);
+}
+
+int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
+{
+ struct tipc_node *node;
+ struct tipc_sock_conn *conn;
+ int err = 0;
+
+ if (in_own_node(net, dnode))
+ return 0;
+
+ node = tipc_node_find(net, dnode);
+ if (!node) {
+ pr_warn("Connecting sock to node 0x%x failed\n", dnode);
+ return -EHOSTUNREACH;
+ }
+ conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
+ if (!conn) {
+ err = -EHOSTUNREACH;
+ goto exit;
+ }
+ conn->peer_node = dnode;
+ conn->port = port;
+ conn->peer_port = peer_port;
+
+ tipc_node_write_lock(node);
+ list_add_tail(&conn->list, &node->conn_sks);
+ tipc_node_write_unlock(node);
+exit:
+ tipc_node_put(node);
+ return err;
+}
+
+void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
+{
+ struct tipc_node *node;
+ struct tipc_sock_conn *conn, *safe;
+
+ if (in_own_node(net, dnode))
+ return;
+
+ node = tipc_node_find(net, dnode);
+ if (!node)
+ return;
+
+ tipc_node_write_lock(node);
+ list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
+ if (port != conn->port)
+ continue;
+ list_del(&conn->list);
+ kfree(conn);
+ }
+ tipc_node_write_unlock(node);
+ tipc_node_put(node);
+}
+
+static void tipc_node_clear_links(struct tipc_node *node)
+{
+ int i;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ struct tipc_link_entry *le = &node->links[i];
+
+ if (le->link) {
+ kfree(le->link);
+ le->link = NULL;
+ node->link_cnt--;
+ }
+ }
+}
+
+/* tipc_node_cleanup - delete nodes that does not
+ * have active links for NODE_CLEANUP_AFTER time
+ */
+static bool tipc_node_cleanup(struct tipc_node *peer)
+{
+ struct tipc_node *temp_node;
+ struct tipc_net *tn = tipc_net(peer->net);
+ bool deleted = false;
+
+ /* If lock held by tipc_node_stop() the node will be deleted anyway */
+ if (!spin_trylock_bh(&tn->node_list_lock))
+ return false;
+
+ tipc_node_write_lock(peer);
+
+ if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
+ tipc_node_clear_links(peer);
+ tipc_node_delete_from_list(peer);
+ deleted = true;
+ }
+ tipc_node_write_unlock(peer);
+
+ if (!deleted) {
+ spin_unlock_bh(&tn->node_list_lock);
+ return deleted;
+ }
+
+ /* Calculate cluster capabilities */
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
+ tipc_bcast_toggle_rcast(peer->net,
+ (tn->capabilities & TIPC_BCAST_RCAST));
+ spin_unlock_bh(&tn->node_list_lock);
+ return deleted;
+}
+
+/* tipc_node_timeout - handle expiration of node timer
+ */
+static void tipc_node_timeout(struct timer_list *t)
+{
+ struct tipc_node *n = from_timer(n, t, timer);
+ struct tipc_link_entry *le;
+ struct sk_buff_head xmitq;
+ int remains = n->link_cnt;
+ int bearer_id;
+ int rc = 0;
+
+ trace_tipc_node_timeout(n, false, " ");
+ if (!node_is_up(n) && tipc_node_cleanup(n)) {
+ /*Removing the reference of Timer*/
+ tipc_node_put(n);
+ return;
+ }
+
+#ifdef CONFIG_TIPC_CRYPTO
+ /* Take any crypto key related actions first */
+ tipc_crypto_timeout(n->crypto_rx);
+#endif
+ __skb_queue_head_init(&xmitq);
+
+ /* Initial node interval to value larger (10 seconds), then it will be
+ * recalculated with link lowest tolerance
+ */
+ tipc_node_read_lock(n);
+ n->keepalive_intv = 10000;
+ tipc_node_read_unlock(n);
+ for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
+ tipc_node_read_lock(n);
+ le = &n->links[bearer_id];
+ if (le->link) {
+ spin_lock_bh(&le->lock);
+ /* Link tolerance may change asynchronously: */
+ tipc_node_calculate_timer(n, le->link);
+ rc = tipc_link_timeout(le->link, &xmitq);
+ spin_unlock_bh(&le->lock);
+ remains--;
+ }
+ tipc_node_read_unlock(n);
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
+ if (rc & TIPC_LINK_DOWN_EVT)
+ tipc_node_link_down(n, bearer_id, false);
+ }
+ mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
+}
+
+/**
+ * __tipc_node_link_up - handle addition of link
+ * @n: target tipc_node
+ * @bearer_id: id of the bearer
+ * @xmitq: queue for messages to be xmited on
+ * Node lock must be held by caller
+ * Link becomes active (alone or shared) or standby, depending on its priority.
+ */
+static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
+ struct sk_buff_head *xmitq)
+{
+ int *slot0 = &n->active_links[0];
+ int *slot1 = &n->active_links[1];
+ struct tipc_link *ol = node_active_link(n, 0);
+ struct tipc_link *nl = n->links[bearer_id].link;
+
+ if (!nl || tipc_link_is_up(nl))
+ return;
+
+ tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
+ if (!tipc_link_is_up(nl))
+ return;
+
+ n->working_links++;
+ n->action_flags |= TIPC_NOTIFY_LINK_UP;
+ n->link_id = tipc_link_id(nl);
+
+ /* Leave room for tunnel header when returning 'mtu' to users: */
+ n->links[bearer_id].mtu = tipc_link_mss(nl);
+
+ tipc_bearer_add_dest(n->net, bearer_id, n->addr);
+ tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
+
+ pr_debug("Established link <%s> on network plane %c\n",
+ tipc_link_name(nl), tipc_link_plane(nl));
+ trace_tipc_node_link_up(n, true, " ");
+
+ /* Ensure that a STATE message goes first */
+ tipc_link_build_state_msg(nl, xmitq);
+
+ /* First link? => give it both slots */
+ if (!ol) {
+ *slot0 = bearer_id;
+ *slot1 = bearer_id;
+ tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
+ n->action_flags |= TIPC_NOTIFY_NODE_UP;
+ tipc_link_set_active(nl, true);
+ tipc_bcast_add_peer(n->net, nl, xmitq);
+ return;
+ }
+
+ /* Second link => redistribute slots */
+ if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
+ pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
+ *slot0 = bearer_id;
+ *slot1 = bearer_id;
+ tipc_link_set_active(nl, true);
+ tipc_link_set_active(ol, false);
+ } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
+ tipc_link_set_active(nl, true);
+ *slot1 = bearer_id;
+ } else {
+ pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
+ }
+
+ /* Prepare synchronization with first link */
+ tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
+}
+
+/**
+ * tipc_node_link_up - handle addition of link
+ * @n: target tipc_node
+ * @bearer_id: id of the bearer
+ * @xmitq: queue for messages to be xmited on
+ *
+ * Link becomes active (alone or shared) or standby, depending on its priority.
+ */
+static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_media_addr *maddr;
+
+ tipc_node_write_lock(n);
+ __tipc_node_link_up(n, bearer_id, xmitq);
+ maddr = &n->links[bearer_id].maddr;
+ tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
+ tipc_node_write_unlock(n);
+}
+
+/**
+ * tipc_node_link_failover() - start failover in case "half-failover"
+ *
+ * This function is only called in a very special situation where link
+ * failover can be already started on peer node but not on this node.
+ * This can happen when e.g.::
+ *
+ * 1. Both links <1A-2A>, <1B-2B> down
+ * 2. Link endpoint 2A up, but 1A still down (e.g. due to network
+ * disturbance, wrong session, etc.)
+ * 3. Link <1B-2B> up
+ * 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
+ * 5. Node 2 starts failover onto link <1B-2B>
+ *
+ * ==> Node 1 does never start link/node failover!
+ *
+ * @n: tipc node structure
+ * @l: link peer endpoint failingover (- can be NULL)
+ * @tnl: tunnel link
+ * @xmitq: queue for messages to be xmited on tnl link later
+ */
+static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
+ struct tipc_link *tnl,
+ struct sk_buff_head *xmitq)
+{
+ /* Avoid to be "self-failover" that can never end */
+ if (!tipc_link_is_up(tnl))
+ return;
+
+ /* Don't rush, failure link may be in the process of resetting */
+ if (l && !tipc_link_is_reset(l))
+ return;
+
+ tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+ tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+
+ n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
+ tipc_link_failover_prepare(l, tnl, xmitq);
+
+ if (l)
+ tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+ tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
+}
+
+/**
+ * __tipc_node_link_down - handle loss of link
+ * @n: target tipc_node
+ * @bearer_id: id of the bearer
+ * @xmitq: queue for messages to be xmited on
+ * @maddr: output media address of the bearer
+ */
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+ struct sk_buff_head *xmitq,
+ struct tipc_media_addr **maddr)
+{
+ struct tipc_link_entry *le = &n->links[*bearer_id];
+ int *slot0 = &n->active_links[0];
+ int *slot1 = &n->active_links[1];
+ int i, highest = 0, prio;
+ struct tipc_link *l, *_l, *tnl;
+
+ l = n->links[*bearer_id].link;
+ if (!l || tipc_link_is_reset(l))
+ return;
+
+ n->working_links--;
+ n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
+ n->link_id = tipc_link_id(l);
+
+ tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
+
+ pr_debug("Lost link <%s> on network plane %c\n",
+ tipc_link_name(l), tipc_link_plane(l));
+
+ /* Select new active link if any available */
+ *slot0 = INVALID_BEARER_ID;
+ *slot1 = INVALID_BEARER_ID;
+ for (i = 0; i < MAX_BEARERS; i++) {
+ _l = n->links[i].link;
+ if (!_l || !tipc_link_is_up(_l))
+ continue;
+ if (_l == l)
+ continue;
+ prio = tipc_link_prio(_l);
+ if (prio < highest)
+ continue;
+ if (prio > highest) {
+ highest = prio;
+ *slot0 = i;
+ *slot1 = i;
+ continue;
+ }
+ *slot1 = i;
+ }
+
+ if (!node_is_up(n)) {
+ if (tipc_link_peer_is_down(l))
+ tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
+ tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
+ trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
+ tipc_link_fsm_evt(l, LINK_RESET_EVT);
+ tipc_link_reset(l);
+ tipc_link_build_reset_msg(l, xmitq);
+ *maddr = &n->links[*bearer_id].maddr;
+ node_lost_contact(n, &le->inputq);
+ tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
+ return;
+ }
+ tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
+
+ /* There is still a working link => initiate failover */
+ *bearer_id = n->active_links[0];
+ tnl = n->links[*bearer_id].link;
+ tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+ tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+ n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
+ tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
+ trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
+ tipc_link_reset(l);
+ tipc_link_fsm_evt(l, LINK_RESET_EVT);
+ tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+ tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
+ *maddr = &n->links[*bearer_id].maddr;
+}
+
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
+{
+ struct tipc_link_entry *le = &n->links[bearer_id];
+ struct tipc_media_addr *maddr = NULL;
+ struct tipc_link *l = le->link;
+ int old_bearer_id = bearer_id;
+ struct sk_buff_head xmitq;
+
+ if (!l)
+ return;
+
+ __skb_queue_head_init(&xmitq);
+
+ tipc_node_write_lock(n);
+ if (!tipc_link_is_establishing(l)) {
+ __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
+ } else {
+ /* Defuse pending tipc_node_link_up() */
+ tipc_link_reset(l);
+ tipc_link_fsm_evt(l, LINK_RESET_EVT);
+ }
+ if (delete) {
+ kfree(l);
+ le->link = NULL;
+ n->link_cnt--;
+ }
+ trace_tipc_node_link_down(n, true, "node link down or deleted!");
+ tipc_node_write_unlock(n);
+ if (delete)
+ tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
+ if (!skb_queue_empty(&xmitq))
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
+ tipc_sk_rcv(n->net, &le->inputq);
+}
+
+static bool node_is_up(struct tipc_node *n)
+{
+ return n->active_links[0] != INVALID_BEARER_ID;
+}
+
+bool tipc_node_is_up(struct net *net, u32 addr)
+{
+ struct tipc_node *n;
+ bool retval = false;
+
+ if (in_own_node(net, addr))
+ return true;
+
+ n = tipc_node_find(net, addr);
+ if (!n)
+ return false;
+ retval = node_is_up(n);
+ tipc_node_put(n);
+ return retval;
+}
+
+static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
+{
+ struct tipc_node *n;
+
+ addr ^= tipc_net(net)->random;
+ while ((n = tipc_node_find(net, addr))) {
+ tipc_node_put(n);
+ addr++;
+ }
+ return addr;
+}
+
+/* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
+ * Returns suggested address if any, otherwise 0
+ */
+u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_node *n;
+ bool preliminary;
+ u32 sugg_addr;
+
+ /* Suggest new address if some other peer is using this one */
+ n = tipc_node_find(net, addr);
+ if (n) {
+ if (!memcmp(n->peer_id, id, NODE_ID_LEN))
+ addr = 0;
+ tipc_node_put(n);
+ if (!addr)
+ return 0;
+ return tipc_node_suggest_addr(net, addr);
+ }
+
+ /* Suggest previously used address if peer is known */
+ n = tipc_node_find_by_id(net, id);
+ if (n) {
+ sugg_addr = n->addr;
+ preliminary = n->preliminary;
+ tipc_node_put(n);
+ if (!preliminary)
+ return sugg_addr;
+ }
+
+ /* Even this node may be in conflict */
+ if (tn->trial_addr == addr)
+ return tipc_node_suggest_addr(net, addr);
+
+ return 0;
+}
+
+void tipc_node_check_dest(struct net *net, u32 addr,
+ u8 *peer_id, struct tipc_bearer *b,
+ u16 capabilities, u32 signature, u32 hash_mixes,
+ struct tipc_media_addr *maddr,
+ bool *respond, bool *dupl_addr)
+{
+ struct tipc_node *n;
+ struct tipc_link *l;
+ struct tipc_link_entry *le;
+ bool addr_match = false;
+ bool sign_match = false;
+ bool link_up = false;
+ bool link_is_reset = false;
+ bool accept_addr = false;
+ bool reset = false;
+ char *if_name;
+ unsigned long intv;
+ u16 session;
+
+ *dupl_addr = false;
+ *respond = false;
+
+ n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
+ false);
+ if (!n)
+ return;
+
+ tipc_node_write_lock(n);
+
+ le = &n->links[b->identity];
+
+ /* Prepare to validate requesting node's signature and media address */
+ l = le->link;
+ link_up = l && tipc_link_is_up(l);
+ link_is_reset = l && tipc_link_is_reset(l);
+ addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
+ sign_match = (signature == n->signature);
+
+ /* These three flags give us eight permutations: */
+
+ if (sign_match && addr_match && link_up) {
+ /* All is fine. Ignore requests. */
+ /* Peer node is not a container/local namespace */
+ if (!n->peer_hash_mix)
+ n->peer_hash_mix = hash_mixes;
+ } else if (sign_match && addr_match && !link_up) {
+ /* Respond. The link will come up in due time */
+ *respond = true;
+ } else if (sign_match && !addr_match && link_up) {
+ /* Peer has changed i/f address without rebooting.
+ * If so, the link will reset soon, and the next
+ * discovery will be accepted. So we can ignore it.
+ * It may also be a cloned or malicious peer having
+ * chosen the same node address and signature as an
+ * existing one.
+ * Ignore requests until the link goes down, if ever.
+ */
+ *dupl_addr = true;
+ } else if (sign_match && !addr_match && !link_up) {
+ /* Peer link has changed i/f address without rebooting.
+ * It may also be a cloned or malicious peer; we can't
+ * distinguish between the two.
+ * The signature is correct, so we must accept.
+ */
+ accept_addr = true;
+ *respond = true;
+ reset = true;
+ } else if (!sign_match && addr_match && link_up) {
+ /* Peer node rebooted. Two possibilities:
+ * - Delayed re-discovery; this link endpoint has already
+ * reset and re-established contact with the peer, before
+ * receiving a discovery message from that node.
+ * (The peer happened to receive one from this node first).
+ * - The peer came back so fast that our side has not
+ * discovered it yet. Probing from this side will soon
+ * reset the link, since there can be no working link
+ * endpoint at the peer end, and the link will re-establish.
+ * Accept the signature, since it comes from a known peer.
+ */
+ n->signature = signature;
+ } else if (!sign_match && addr_match && !link_up) {
+ /* The peer node has rebooted.
+ * Accept signature, since it is a known peer.
+ */
+ n->signature = signature;
+ *respond = true;
+ } else if (!sign_match && !addr_match && link_up) {
+ /* Peer rebooted with new address, or a new/duplicate peer.
+ * Ignore until the link goes down, if ever.
+ */
+ *dupl_addr = true;
+ } else if (!sign_match && !addr_match && !link_up) {
+ /* Peer rebooted with new address, or it is a new peer.
+ * Accept signature and address.
+ */
+ n->signature = signature;
+ accept_addr = true;
+ *respond = true;
+ reset = true;
+ }
+
+ if (!accept_addr)
+ goto exit;
+
+ /* Now create new link if not already existing */
+ if (!l) {
+ if (n->link_cnt == 2)
+ goto exit;
+
+ if_name = strchr(b->name, ':') + 1;
+ get_random_bytes(&session, sizeof(u16));
+ if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
+ b->net_plane, b->mtu, b->priority,
+ b->min_win, b->max_win, session,
+ tipc_own_addr(net), addr, peer_id,
+ n->capabilities,
+ tipc_bc_sndlink(n->net), n->bc_entry.link,
+ &le->inputq,
+ &n->bc_entry.namedq, &l)) {
+ *respond = false;
+ goto exit;
+ }
+ trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
+ tipc_link_reset(l);
+ tipc_link_fsm_evt(l, LINK_RESET_EVT);
+ if (n->state == NODE_FAILINGOVER)
+ tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+ link_is_reset = tipc_link_is_reset(l);
+ le->link = l;
+ n->link_cnt++;
+ tipc_node_calculate_timer(n, l);
+ if (n->link_cnt == 1) {
+ intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
+ if (!mod_timer(&n->timer, intv))
+ tipc_node_get(n);
+ }
+ }
+ memcpy(&le->maddr, maddr, sizeof(*maddr));
+exit:
+ tipc_node_write_unlock(n);
+ if (reset && !link_is_reset)
+ tipc_node_link_down(n, b->identity, false);
+ tipc_node_put(n);
+}
+
+void tipc_node_delete_links(struct net *net, int bearer_id)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_node *n;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ tipc_node_link_down(n, bearer_id, true);
+ }
+ rcu_read_unlock();
+}
+
+static void tipc_node_reset_links(struct tipc_node *n)
+{
+ int i;
+
+ pr_warn("Resetting all links to %x\n", n->addr);
+
+ trace_tipc_node_reset_links(n, true, " ");
+ for (i = 0; i < MAX_BEARERS; i++) {
+ tipc_node_link_down(n, i, false);
+ }
+}
+
+/* tipc_node_fsm_evt - node finite state machine
+ * Determines when contact is allowed with peer node
+ */
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
+{
+ int state = n->state;
+
+ switch (state) {
+ case SELF_DOWN_PEER_DOWN:
+ switch (evt) {
+ case SELF_ESTABL_CONTACT_EVT:
+ state = SELF_UP_PEER_COMING;
+ break;
+ case PEER_ESTABL_CONTACT_EVT:
+ state = SELF_COMING_PEER_UP;
+ break;
+ case SELF_LOST_CONTACT_EVT:
+ case PEER_LOST_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_UP_PEER_UP:
+ switch (evt) {
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_LEAVING;
+ break;
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_LEAVING_PEER_DOWN;
+ break;
+ case NODE_SYNCH_BEGIN_EVT:
+ state = NODE_SYNCHING;
+ break;
+ case NODE_FAILOVER_BEGIN_EVT:
+ state = NODE_FAILINGOVER;
+ break;
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ case NODE_SYNCH_END_EVT:
+ case NODE_FAILOVER_END_EVT:
+ break;
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_DOWN_PEER_LEAVING:
+ switch (evt) {
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_DOWN;
+ break;
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ case SELF_LOST_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_UP_PEER_COMING:
+ switch (evt) {
+ case PEER_ESTABL_CONTACT_EVT:
+ state = SELF_UP_PEER_UP;
+ break;
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_DOWN;
+ break;
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_LOST_CONTACT_EVT:
+ case NODE_SYNCH_END_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ break;
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_COMING_PEER_UP:
+ switch (evt) {
+ case SELF_ESTABL_CONTACT_EVT:
+ state = SELF_UP_PEER_UP;
+ break;
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_DOWN;
+ break;
+ case SELF_LOST_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_LEAVING_PEER_DOWN:
+ switch (evt) {
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_DOWN;
+ break;
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ case PEER_LOST_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case NODE_FAILINGOVER:
+ switch (evt) {
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_LEAVING;
+ break;
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_LEAVING_PEER_DOWN;
+ break;
+ case NODE_FAILOVER_END_EVT:
+ state = SELF_UP_PEER_UP;
+ break;
+ case NODE_FAILOVER_BEGIN_EVT:
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case NODE_SYNCHING:
+ switch (evt) {
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_LEAVING;
+ break;
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_LEAVING_PEER_DOWN;
+ break;
+ case NODE_SYNCH_END_EVT:
+ state = SELF_UP_PEER_UP;
+ break;
+ case NODE_FAILOVER_BEGIN_EVT:
+ state = NODE_FAILINGOVER;
+ break;
+ case NODE_SYNCH_BEGIN_EVT:
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ break;
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ default:
+ pr_err("Unknown node fsm state %x\n", state);
+ break;
+ }
+ trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
+ n->state = state;
+ return;
+
+illegal_evt:
+ pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
+ trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
+}
+
+static void node_lost_contact(struct tipc_node *n,
+ struct sk_buff_head *inputq)
+{
+ struct tipc_sock_conn *conn, *safe;
+ struct tipc_link *l;
+ struct list_head *conns = &n->conn_sks;
+ struct sk_buff *skb;
+ uint i;
+
+ pr_debug("Lost contact with %x\n", n->addr);
+ n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
+ trace_tipc_node_lost_contact(n, true, " ");
+
+ /* Clean up broadcast state */
+ tipc_bcast_remove_peer(n->net, n->bc_entry.link);
+ skb_queue_purge(&n->bc_entry.namedq);
+
+ /* Abort any ongoing link failover */
+ for (i = 0; i < MAX_BEARERS; i++) {
+ l = n->links[i].link;
+ if (l)
+ tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
+ }
+
+ /* Notify publications from this node */
+ n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
+ n->peer_net = NULL;
+ n->peer_hash_mix = 0;
+ /* Notify sockets connected to node */
+ list_for_each_entry_safe(conn, safe, conns, list) {
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
+ SHORT_H_SIZE, 0, tipc_own_addr(n->net),
+ conn->peer_node, conn->port,
+ conn->peer_port, TIPC_ERR_NO_NODE);
+ if (likely(skb))
+ skb_queue_tail(inputq, skb);
+ list_del(&conn->list);
+ kfree(conn);
+ }
+}
+
+/**
+ * tipc_node_get_linkname - get the name of a link
+ *
+ * @net: the applicable net namespace
+ * @bearer_id: id of the bearer
+ * @addr: peer node address
+ * @linkname: link name output buffer
+ * @len: size of @linkname output buffer
+ *
+ * Return: 0 on success
+ */
+int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
+ char *linkname, size_t len)
+{
+ struct tipc_link *link;
+ int err = -EINVAL;
+ struct tipc_node *node = tipc_node_find(net, addr);
+
+ if (!node)
+ return err;
+
+ if (bearer_id >= MAX_BEARERS)
+ goto exit;
+
+ tipc_node_read_lock(node);
+ link = node->links[bearer_id].link;
+ if (link) {
+ strncpy(linkname, tipc_link_name(link), len);
+ err = 0;
+ }
+ tipc_node_read_unlock(node);
+exit:
+ tipc_node_put(node);
+ return err;
+}
+
+/* Caller should hold node lock for the passed node */
+static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
+{
+ void *hdr;
+ struct nlattr *attrs;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ NLM_F_MULTI, TIPC_NL_NODE_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
+ if (!attrs)
+ goto msg_full;
+
+ if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
+ goto attr_msg_full;
+ if (node_is_up(node))
+ if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
+ goto attr_msg_full;
+
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
+{
+ struct tipc_msg *hdr = buf_msg(skb_peek(list));
+ struct sk_buff_head inputq;
+
+ switch (msg_user(hdr)) {
+ case TIPC_LOW_IMPORTANCE:
+ case TIPC_MEDIUM_IMPORTANCE:
+ case TIPC_HIGH_IMPORTANCE:
+ case TIPC_CRITICAL_IMPORTANCE:
+ if (msg_connected(hdr) || msg_named(hdr) ||
+ msg_direct(hdr)) {
+ tipc_loopback_trace(peer_net, list);
+ spin_lock_init(&list->lock);
+ tipc_sk_rcv(peer_net, list);
+ return;
+ }
+ if (msg_mcast(hdr)) {
+ tipc_loopback_trace(peer_net, list);
+ skb_queue_head_init(&inputq);
+ tipc_sk_mcast_rcv(peer_net, list, &inputq);
+ __skb_queue_purge(list);
+ skb_queue_purge(&inputq);
+ return;
+ }
+ return;
+ case MSG_FRAGMENTER:
+ if (tipc_msg_assemble(list)) {
+ tipc_loopback_trace(peer_net, list);
+ skb_queue_head_init(&inputq);
+ tipc_sk_mcast_rcv(peer_net, list, &inputq);
+ __skb_queue_purge(list);
+ skb_queue_purge(&inputq);
+ }
+ return;
+ case GROUP_PROTOCOL:
+ case CONN_MANAGER:
+ tipc_loopback_trace(peer_net, list);
+ spin_lock_init(&list->lock);
+ tipc_sk_rcv(peer_net, list);
+ return;
+ case LINK_PROTOCOL:
+ case NAME_DISTRIBUTOR:
+ case TUNNEL_PROTOCOL:
+ case BCAST_PROTOCOL:
+ return;
+ default:
+ return;
+ }
+}
+
+/**
+ * tipc_node_xmit() - general link level function for message sending
+ * @net: the applicable net namespace
+ * @list: chain of buffers containing message
+ * @dnode: address of destination node
+ * @selector: a number used for deterministic link selection
+ * Consumes the buffer chain.
+ * Return: 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
+ */
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
+ u32 dnode, int selector)
+{
+ struct tipc_link_entry *le = NULL;
+ struct tipc_node *n;
+ struct sk_buff_head xmitq;
+ bool node_up = false;
+ struct net *peer_net;
+ int bearer_id;
+ int rc;
+
+ if (in_own_node(net, dnode)) {
+ tipc_loopback_trace(net, list);
+ spin_lock_init(&list->lock);
+ tipc_sk_rcv(net, list);
+ return 0;
+ }
+
+ n = tipc_node_find(net, dnode);
+ if (unlikely(!n)) {
+ __skb_queue_purge(list);
+ return -EHOSTUNREACH;
+ }
+
+ rcu_read_lock();
+ tipc_node_read_lock(n);
+ node_up = node_is_up(n);
+ peer_net = n->peer_net;
+ tipc_node_read_unlock(n);
+ if (node_up && peer_net && check_net(peer_net)) {
+ /* xmit inner linux container */
+ tipc_lxc_xmit(peer_net, list);
+ if (likely(skb_queue_empty(list))) {
+ rcu_read_unlock();
+ tipc_node_put(n);
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+
+ tipc_node_read_lock(n);
+ bearer_id = n->active_links[selector & 1];
+ if (unlikely(bearer_id == INVALID_BEARER_ID)) {
+ tipc_node_read_unlock(n);
+ tipc_node_put(n);
+ __skb_queue_purge(list);
+ return -EHOSTUNREACH;
+ }
+
+ __skb_queue_head_init(&xmitq);
+ le = &n->links[bearer_id];
+ spin_lock_bh(&le->lock);
+ rc = tipc_link_xmit(le->link, list, &xmitq);
+ spin_unlock_bh(&le->lock);
+ tipc_node_read_unlock(n);
+
+ if (unlikely(rc == -ENOBUFS))
+ tipc_node_link_down(n, bearer_id, false);
+ else
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
+
+ tipc_node_put(n);
+
+ return rc;
+}
+
+/* tipc_node_xmit_skb(): send single buffer to destination
+ * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE
+ * messages, which will not be rejected
+ * The only exception is datagram messages rerouted after secondary
+ * lookup, which are rare and safe to dispose of anyway.
+ */
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
+ u32 selector)
+{
+ struct sk_buff_head head;
+
+ __skb_queue_head_init(&head);
+ __skb_queue_tail(&head, skb);
+ tipc_node_xmit(net, &head, dnode, selector);
+ return 0;
+}
+
+/* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
+ * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
+ */
+int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
+{
+ struct sk_buff *skb;
+ u32 selector, dnode;
+
+ while ((skb = __skb_dequeue(xmitq))) {
+ selector = msg_origport(buf_msg(skb));
+ dnode = msg_destnode(buf_msg(skb));
+ tipc_node_xmit_skb(net, skb, dnode, selector);
+ }
+ return 0;
+}
+
+void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests)
+{
+ struct sk_buff_head xmitq;
+ struct sk_buff *txskb;
+ struct tipc_node *n;
+ u16 dummy;
+ u32 dst;
+
+ /* Use broadcast if all nodes support it */
+ if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) {
+ __skb_queue_head_init(&xmitq);
+ __skb_queue_tail(&xmitq, skb);
+ tipc_bcast_xmit(net, &xmitq, &dummy);
+ return;
+ }
+
+ /* Otherwise use legacy replicast method */
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, tipc_nodes(net), list) {
+ dst = n->addr;
+ if (in_own_node(net, dst))
+ continue;
+ if (!node_is_up(n))
+ continue;
+ txskb = pskb_copy(skb, GFP_ATOMIC);
+ if (!txskb)
+ break;
+ msg_set_destnode(buf_msg(txskb), dst);
+ tipc_node_xmit_skb(net, txskb, dst, 0);
+ }
+ rcu_read_unlock();
+ kfree_skb(skb);
+}
+
+static void tipc_node_mcast_rcv(struct tipc_node *n)
+{
+ struct tipc_bclink_entry *be = &n->bc_entry;
+
+ /* 'arrvq' is under inputq2's lock protection */
+ spin_lock_bh(&be->inputq2.lock);
+ spin_lock_bh(&be->inputq1.lock);
+ skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
+ spin_unlock_bh(&be->inputq1.lock);
+ spin_unlock_bh(&be->inputq2.lock);
+ tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
+}
+
+static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
+ int bearer_id, struct sk_buff_head *xmitq)
+{
+ struct tipc_link *ucl;
+ int rc;
+
+ rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
+
+ if (rc & TIPC_LINK_DOWN_EVT) {
+ tipc_node_reset_links(n);
+ return;
+ }
+
+ if (!(rc & TIPC_LINK_SND_STATE))
+ return;
+
+ /* If probe message, a STATE response will be sent anyway */
+ if (msg_probe(hdr))
+ return;
+
+ /* Produce a STATE message carrying broadcast NACK */
+ tipc_node_read_lock(n);
+ ucl = n->links[bearer_id].link;
+ if (ucl)
+ tipc_link_build_state_msg(ucl, xmitq);
+ tipc_node_read_unlock(n);
+}
+
+/**
+ * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
+ * @net: the applicable net namespace
+ * @skb: TIPC packet
+ * @bearer_id: id of bearer message arrived on
+ *
+ * Invoked with no locks held.
+ */
+static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
+{
+ int rc;
+ struct sk_buff_head xmitq;
+ struct tipc_bclink_entry *be;
+ struct tipc_link_entry *le;
+ struct tipc_msg *hdr = buf_msg(skb);
+ int usr = msg_user(hdr);
+ u32 dnode = msg_destnode(hdr);
+ struct tipc_node *n;
+
+ __skb_queue_head_init(&xmitq);
+
+ /* If NACK for other node, let rcv link for that node peek into it */
+ if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
+ n = tipc_node_find(net, dnode);
+ else
+ n = tipc_node_find(net, msg_prevnode(hdr));
+ if (!n) {
+ kfree_skb(skb);
+ return;
+ }
+ be = &n->bc_entry;
+ le = &n->links[bearer_id];
+
+ rc = tipc_bcast_rcv(net, be->link, skb);
+
+ /* Broadcast ACKs are sent on a unicast link */
+ if (rc & TIPC_LINK_SND_STATE) {
+ tipc_node_read_lock(n);
+ tipc_link_build_state_msg(le->link, &xmitq);
+ tipc_node_read_unlock(n);
+ }
+
+ if (!skb_queue_empty(&xmitq))
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
+
+ if (!skb_queue_empty(&be->inputq1))
+ tipc_node_mcast_rcv(n);
+
+ /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
+ if (!skb_queue_empty(&n->bc_entry.namedq))
+ tipc_named_rcv(net, &n->bc_entry.namedq,
+ &n->bc_entry.named_rcv_nxt,
+ &n->bc_entry.named_open);
+
+ /* If reassembly or retransmission failure => reset all links to peer */
+ if (rc & TIPC_LINK_DOWN_EVT)
+ tipc_node_reset_links(n);
+
+ tipc_node_put(n);
+}
+
+/**
+ * tipc_node_check_state - check and if necessary update node state
+ * @n: target tipc_node
+ * @skb: TIPC packet
+ * @bearer_id: identity of bearer delivering the packet
+ * @xmitq: queue for messages to be xmited on
+ * Return: true if state and msg are ok, otherwise false
+ */
+static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
+ int bearer_id, struct sk_buff_head *xmitq)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ int usr = msg_user(hdr);
+ int mtyp = msg_type(hdr);
+ u16 oseqno = msg_seqno(hdr);
+ u16 exp_pkts = msg_msgcnt(hdr);
+ u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
+ int state = n->state;
+ struct tipc_link *l, *tnl, *pl = NULL;
+ struct tipc_media_addr *maddr;
+ int pb_id;
+
+ if (trace_tipc_node_check_state_enabled()) {
+ trace_tipc_skb_dump(skb, false, "skb for node state check");
+ trace_tipc_node_check_state(n, true, " ");
+ }
+ l = n->links[bearer_id].link;
+ if (!l)
+ return false;
+ rcv_nxt = tipc_link_rcv_nxt(l);
+
+
+ if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
+ return true;
+
+ /* Find parallel link, if any */
+ for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
+ if ((pb_id != bearer_id) && n->links[pb_id].link) {
+ pl = n->links[pb_id].link;
+ break;
+ }
+ }
+
+ if (!tipc_link_validate_msg(l, hdr)) {
+ trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
+ trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
+ return false;
+ }
+
+ /* Check and update node accesibility if applicable */
+ if (state == SELF_UP_PEER_COMING) {
+ if (!tipc_link_is_up(l))
+ return true;
+ if (!msg_peer_link_is_up(hdr))
+ return true;
+ tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
+ }
+
+ if (state == SELF_DOWN_PEER_LEAVING) {
+ if (msg_peer_node_is_up(hdr))
+ return false;
+ tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
+ return true;
+ }
+
+ if (state == SELF_LEAVING_PEER_DOWN)
+ return false;
+
+ /* Ignore duplicate packets */
+ if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
+ return true;
+
+ /* Initiate or update failover mode if applicable */
+ if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
+ syncpt = oseqno + exp_pkts - 1;
+ if (pl && !tipc_link_is_reset(pl)) {
+ __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
+ trace_tipc_node_link_down(n, true,
+ "node link down <- failover!");
+ tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
+ tipc_link_inputq(l));
+ }
+
+ /* If parallel link was already down, and this happened before
+ * the tunnel link came up, node failover was never started.
+ * Ensure that a FAILOVER_MSG is sent to get peer out of
+ * NODE_FAILINGOVER state, also this node must accept
+ * TUNNEL_MSGs from peer.
+ */
+ if (n->state != NODE_FAILINGOVER)
+ tipc_node_link_failover(n, pl, l, xmitq);
+
+ /* If pkts arrive out of order, use lowest calculated syncpt */
+ if (less(syncpt, n->sync_point))
+ n->sync_point = syncpt;
+ }
+
+ /* Open parallel link when tunnel link reaches synch point */
+ if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
+ if (!more(rcv_nxt, n->sync_point))
+ return true;
+ tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
+ if (pl)
+ tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
+ return true;
+ }
+
+ /* No syncing needed if only one link */
+ if (!pl || !tipc_link_is_up(pl))
+ return true;
+
+ /* Initiate synch mode if applicable */
+ if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
+ if (n->capabilities & TIPC_TUNNEL_ENHANCED)
+ syncpt = msg_syncpt(hdr);
+ else
+ syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
+ if (!tipc_link_is_up(l))
+ __tipc_node_link_up(n, bearer_id, xmitq);
+ if (n->state == SELF_UP_PEER_UP) {
+ n->sync_point = syncpt;
+ tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
+ tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
+ }
+ }
+
+ /* Open tunnel link when parallel link reaches synch point */
+ if (n->state == NODE_SYNCHING) {
+ if (tipc_link_is_synching(l)) {
+ tnl = l;
+ } else {
+ tnl = pl;
+ pl = l;
+ }
+ inputq_len = skb_queue_len(tipc_link_inputq(pl));
+ dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
+ if (more(dlv_nxt, n->sync_point)) {
+ tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+ tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+ return true;
+ }
+ if (l == pl)
+ return true;
+ if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
+ return true;
+ if (usr == LINK_PROTOCOL)
+ return true;
+ return false;
+ }
+ return true;
+}
+
+/**
+ * tipc_rcv - process TIPC packets/messages arriving from off-node
+ * @net: the applicable net namespace
+ * @skb: TIPC packet
+ * @b: pointer to bearer message arrived on
+ *
+ * Invoked with no locks held. Bearer pointer must point to a valid bearer
+ * structure (i.e. cannot be NULL), but bearer can be inactive.
+ */
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
+{
+ struct sk_buff_head xmitq;
+ struct tipc_link_entry *le;
+ struct tipc_msg *hdr;
+ struct tipc_node *n;
+ int bearer_id = b->identity;
+ u32 self = tipc_own_addr(net);
+ int usr, rc = 0;
+ u16 bc_ack;
+#ifdef CONFIG_TIPC_CRYPTO
+ struct tipc_ehdr *ehdr;
+
+ /* Check if message must be decrypted first */
+ if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
+ goto rcv;
+
+ ehdr = (struct tipc_ehdr *)skb->data;
+ if (likely(ehdr->user != LINK_CONFIG)) {
+ n = tipc_node_find(net, ntohl(ehdr->addr));
+ if (unlikely(!n))
+ goto discard;
+ } else {
+ n = tipc_node_find_by_id(net, ehdr->id);
+ }
+ tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
+ if (!skb)
+ return;
+
+rcv:
+#endif
+ /* Ensure message is well-formed before touching the header */
+ if (unlikely(!tipc_msg_validate(&skb)))
+ goto discard;
+ __skb_queue_head_init(&xmitq);
+ hdr = buf_msg(skb);
+ usr = msg_user(hdr);
+ bc_ack = msg_bcast_ack(hdr);
+
+ /* Handle arrival of discovery or broadcast packet */
+ if (unlikely(msg_non_seq(hdr))) {
+ if (unlikely(usr == LINK_CONFIG))
+ return tipc_disc_rcv(net, skb, b);
+ else
+ return tipc_node_bc_rcv(net, skb, bearer_id);
+ }
+
+ /* Discard unicast link messages destined for another node */
+ if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
+ goto discard;
+
+ /* Locate neighboring node that sent packet */
+ n = tipc_node_find(net, msg_prevnode(hdr));
+ if (unlikely(!n))
+ goto discard;
+ le = &n->links[bearer_id];
+
+ /* Ensure broadcast reception is in synch with peer's send state */
+ if (unlikely(usr == LINK_PROTOCOL)) {
+ if (unlikely(skb_linearize(skb))) {
+ tipc_node_put(n);
+ goto discard;
+ }
+ hdr = buf_msg(skb);
+ tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
+ } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) {
+ tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
+ }
+
+ /* Receive packet directly if conditions permit */
+ tipc_node_read_lock(n);
+ if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
+ spin_lock_bh(&le->lock);
+ if (le->link) {
+ rc = tipc_link_rcv(le->link, skb, &xmitq);
+ skb = NULL;
+ }
+ spin_unlock_bh(&le->lock);
+ }
+ tipc_node_read_unlock(n);
+
+ /* Check/update node state before receiving */
+ if (unlikely(skb)) {
+ if (unlikely(skb_linearize(skb)))
+ goto out_node_put;
+ tipc_node_write_lock(n);
+ if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
+ if (le->link) {
+ rc = tipc_link_rcv(le->link, skb, &xmitq);
+ skb = NULL;
+ }
+ }
+ tipc_node_write_unlock(n);
+ }
+
+ if (unlikely(rc & TIPC_LINK_UP_EVT))
+ tipc_node_link_up(n, bearer_id, &xmitq);
+
+ if (unlikely(rc & TIPC_LINK_DOWN_EVT))
+ tipc_node_link_down(n, bearer_id, false);
+
+ if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
+ tipc_named_rcv(net, &n->bc_entry.namedq,
+ &n->bc_entry.named_rcv_nxt,
+ &n->bc_entry.named_open);
+
+ if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
+ tipc_node_mcast_rcv(n);
+
+ if (!skb_queue_empty(&le->inputq))
+ tipc_sk_rcv(net, &le->inputq);
+
+ if (!skb_queue_empty(&xmitq))
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
+
+out_node_put:
+ tipc_node_put(n);
+discard:
+ kfree_skb(skb);
+}
+
+void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
+ int prop)
+{
+ struct tipc_net *tn = tipc_net(net);
+ int bearer_id = b->identity;
+ struct sk_buff_head xmitq;
+ struct tipc_link_entry *e;
+ struct tipc_node *n;
+
+ __skb_queue_head_init(&xmitq);
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ tipc_node_write_lock(n);
+ e = &n->links[bearer_id];
+ if (e->link) {
+ if (prop == TIPC_NLA_PROP_TOL)
+ tipc_link_set_tolerance(e->link, b->tolerance,
+ &xmitq);
+ else if (prop == TIPC_NLA_PROP_MTU)
+ tipc_link_set_mtu(e->link, b->mtu);
+
+ /* Update MTU for node link entry */
+ e->mtu = tipc_link_mss(e->link);
+ }
+
+ tipc_node_write_unlock(n);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
+ }
+
+ rcu_read_unlock();
+}
+
+int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
+ struct tipc_node *peer, *temp_node;
+ u8 node_id[NODE_ID_LEN];
+ u64 *w0 = (u64 *)&node_id[0];
+ u64 *w1 = (u64 *)&node_id[8];
+ u32 addr;
+ int err;
+
+ /* We identify the peer by its net */
+ if (!info->attrs[TIPC_NLA_NET])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
+ info->attrs[TIPC_NLA_NET],
+ tipc_nl_net_policy, info->extack);
+ if (err)
+ return err;
+
+ /* attrs[TIPC_NLA_NET_NODEID] and attrs[TIPC_NLA_NET_ADDR] are
+ * mutually exclusive cases
+ */
+ if (attrs[TIPC_NLA_NET_ADDR]) {
+ addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
+ if (!addr)
+ return -EINVAL;
+ }
+
+ if (attrs[TIPC_NLA_NET_NODEID]) {
+ if (!attrs[TIPC_NLA_NET_NODEID_W1])
+ return -EINVAL;
+ *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
+ *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
+ addr = hash128to32(node_id);
+ }
+
+ if (in_own_node(net, addr))
+ return -ENOTSUPP;
+
+ spin_lock_bh(&tn->node_list_lock);
+ peer = tipc_node_find(net, addr);
+ if (!peer) {
+ spin_unlock_bh(&tn->node_list_lock);
+ return -ENXIO;
+ }
+
+ tipc_node_write_lock(peer);
+ if (peer->state != SELF_DOWN_PEER_DOWN &&
+ peer->state != SELF_DOWN_PEER_LEAVING) {
+ tipc_node_write_unlock(peer);
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ tipc_node_clear_links(peer);
+ tipc_node_write_unlock(peer);
+ tipc_node_delete(peer);
+
+ /* Calculate cluster capabilities */
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
+ tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
+ err = 0;
+err_out:
+ tipc_node_put(peer);
+ spin_unlock_bh(&tn->node_list_lock);
+
+ return err;
+}
+
+int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int err;
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ int done = cb->args[0];
+ int last_addr = cb->args[1];
+ struct tipc_node *node;
+ struct tipc_nl_msg msg;
+
+ if (done)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ rcu_read_lock();
+ if (last_addr) {
+ node = tipc_node_find(net, last_addr);
+ if (!node) {
+ rcu_read_unlock();
+ /* We never set seq or call nl_dump_check_consistent()
+ * this means that setting prev_seq here will cause the
+ * consistence check to fail in the netlink callback
+ * handler. Resulting in the NLMSG_DONE message having
+ * the NLM_F_DUMP_INTR flag set if the node state
+ * changed while we released the lock.
+ */
+ cb->prev_seq = 1;
+ return -EPIPE;
+ }
+ tipc_node_put(node);
+ }
+
+ list_for_each_entry_rcu(node, &tn->node_list, list) {
+ if (node->preliminary)
+ continue;
+ if (last_addr) {
+ if (node->addr == last_addr)
+ last_addr = 0;
+ else
+ continue;
+ }
+
+ tipc_node_read_lock(node);
+ err = __tipc_nl_add_node(&msg, node);
+ if (err) {
+ last_addr = node->addr;
+ tipc_node_read_unlock(node);
+ goto out;
+ }
+
+ tipc_node_read_unlock(node);
+ }
+ done = 1;
+out:
+ cb->args[0] = done;
+ cb->args[1] = last_addr;
+ rcu_read_unlock();
+
+ return skb->len;
+}
+
+/* tipc_node_find_by_name - locate owner node of link by link's name
+ * @net: the applicable net namespace
+ * @name: pointer to link name string
+ * @bearer_id: pointer to index in 'node->links' array where the link was found.
+ *
+ * Returns pointer to node owning the link, or 0 if no matching link is found.
+ */
+static struct tipc_node *tipc_node_find_by_name(struct net *net,
+ const char *link_name,
+ unsigned int *bearer_id)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *l;
+ struct tipc_node *n;
+ struct tipc_node *found_node = NULL;
+ int i;
+
+ *bearer_id = 0;
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ tipc_node_read_lock(n);
+ for (i = 0; i < MAX_BEARERS; i++) {
+ l = n->links[i].link;
+ if (l && !strcmp(tipc_link_name(l), link_name)) {
+ *bearer_id = i;
+ found_node = n;
+ break;
+ }
+ }
+ tipc_node_read_unlock(n);
+ if (found_node)
+ break;
+ }
+ rcu_read_unlock();
+
+ return found_node;
+}
+
+int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ int res = 0;
+ int bearer_id;
+ char *name;
+ struct tipc_link *link;
+ struct tipc_node *node;
+ struct sk_buff_head xmitq;
+ struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+
+ __skb_queue_head_init(&xmitq);
+
+ if (!info->attrs[TIPC_NLA_LINK])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
+ info->attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy, info->extack);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_LINK_NAME])
+ return -EINVAL;
+
+ name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
+
+ if (strcmp(name, tipc_bclink_name) == 0)
+ return tipc_nl_bc_link_set(net, attrs);
+
+ node = tipc_node_find_by_name(net, name, &bearer_id);
+ if (!node)
+ return -EINVAL;
+
+ tipc_node_read_lock(node);
+
+ link = node->links[bearer_id].link;
+ if (!link) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ if (attrs[TIPC_NLA_LINK_PROP]) {
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+ err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
+ if (err) {
+ res = err;
+ goto out;
+ }
+
+ if (props[TIPC_NLA_PROP_TOL]) {
+ u32 tol;
+
+ tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
+ tipc_link_set_tolerance(link, tol, &xmitq);
+ }
+ if (props[TIPC_NLA_PROP_PRIO]) {
+ u32 prio;
+
+ prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
+ tipc_link_set_prio(link, prio, &xmitq);
+ }
+ if (props[TIPC_NLA_PROP_WIN]) {
+ u32 max_win;
+
+ max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ tipc_link_set_queue_limits(link,
+ tipc_link_min_win(link),
+ max_win);
+ }
+ }
+
+out:
+ tipc_node_read_unlock(node);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
+ NULL);
+ return res;
+}
+
+int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+ struct tipc_nl_msg msg;
+ char *name;
+ int err;
+
+ msg.portid = info->snd_portid;
+ msg.seq = info->snd_seq;
+
+ if (!info->attrs[TIPC_NLA_LINK])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
+ info->attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy, info->extack);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_LINK_NAME])
+ return -EINVAL;
+
+ name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
+
+ msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg.skb)
+ return -ENOMEM;
+
+ if (strcmp(name, tipc_bclink_name) == 0) {
+ err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl);
+ if (err)
+ goto err_free;
+ } else {
+ int bearer_id;
+ struct tipc_node *node;
+ struct tipc_link *link;
+
+ node = tipc_node_find_by_name(net, name, &bearer_id);
+ if (!node) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ tipc_node_read_lock(node);
+ link = node->links[bearer_id].link;
+ if (!link) {
+ tipc_node_read_unlock(node);
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ err = __tipc_nl_add_link(net, &msg, link, 0);
+ tipc_node_read_unlock(node);
+ if (err)
+ goto err_free;
+ }
+
+ return genlmsg_reply(msg.skb, info);
+
+err_free:
+ nlmsg_free(msg.skb);
+ return err;
+}
+
+int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+ char *link_name;
+ unsigned int bearer_id;
+ struct tipc_link *link;
+ struct tipc_node *node;
+ struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_link_entry *le;
+
+ if (!info->attrs[TIPC_NLA_LINK])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
+ info->attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy, info->extack);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_LINK_NAME])
+ return -EINVAL;
+
+ link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
+
+ err = -EINVAL;
+ if (!strcmp(link_name, tipc_bclink_name)) {
+ err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net));
+ if (err)
+ return err;
+ return 0;
+ } else if (strstr(link_name, tipc_bclink_name)) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(node, &tn->node_list, list) {
+ tipc_node_read_lock(node);
+ link = node->bc_entry.link;
+ if (link && !strcmp(link_name, tipc_link_name(link))) {
+ err = tipc_bclink_reset_stats(net, link);
+ tipc_node_read_unlock(node);
+ break;
+ }
+ tipc_node_read_unlock(node);
+ }
+ rcu_read_unlock();
+ return err;
+ }
+
+ node = tipc_node_find_by_name(net, link_name, &bearer_id);
+ if (!node)
+ return -EINVAL;
+
+ le = &node->links[bearer_id];
+ tipc_node_read_lock(node);
+ spin_lock_bh(&le->lock);
+ link = node->links[bearer_id].link;
+ if (!link) {
+ spin_unlock_bh(&le->lock);
+ tipc_node_read_unlock(node);
+ return -EINVAL;
+ }
+ tipc_link_reset_stats(link);
+ spin_unlock_bh(&le->lock);
+ tipc_node_read_unlock(node);
+ return 0;
+}
+
+/* Caller should hold node lock */
+static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
+ struct tipc_node *node, u32 *prev_link,
+ bool bc_link)
+{
+ u32 i;
+ int err;
+
+ for (i = *prev_link; i < MAX_BEARERS; i++) {
+ *prev_link = i;
+
+ if (!node->links[i].link)
+ continue;
+
+ err = __tipc_nl_add_link(net, msg,
+ node->links[i].link, NLM_F_MULTI);
+ if (err)
+ return err;
+ }
+
+ if (bc_link) {
+ *prev_link = i;
+ err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
+ if (err)
+ return err;
+ }
+
+ *prev_link = 0;
+
+ return 0;
+}
+
+int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
+ struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_node *node;
+ struct tipc_nl_msg msg;
+ u32 prev_node = cb->args[0];
+ u32 prev_link = cb->args[1];
+ int done = cb->args[2];
+ bool bc_link = cb->args[3];
+ int err;
+
+ if (done)
+ return 0;
+
+ if (!prev_node) {
+ /* Check if broadcast-receiver links dumping is needed */
+ if (attrs && attrs[TIPC_NLA_LINK]) {
+ err = nla_parse_nested_deprecated(link,
+ TIPC_NLA_LINK_MAX,
+ attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy,
+ NULL);
+ if (unlikely(err))
+ return err;
+ if (unlikely(!link[TIPC_NLA_LINK_BROADCAST]))
+ return -EINVAL;
+ bc_link = true;
+ }
+ }
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ rcu_read_lock();
+ if (prev_node) {
+ node = tipc_node_find(net, prev_node);
+ if (!node) {
+ /* We never set seq or call nl_dump_check_consistent()
+ * this means that setting prev_seq here will cause the
+ * consistence check to fail in the netlink callback
+ * handler. Resulting in the last NLMSG_DONE message
+ * having the NLM_F_DUMP_INTR flag set.
+ */
+ cb->prev_seq = 1;
+ goto out;
+ }
+ tipc_node_put(node);
+
+ list_for_each_entry_continue_rcu(node, &tn->node_list,
+ list) {
+ tipc_node_read_lock(node);
+ err = __tipc_nl_add_node_links(net, &msg, node,
+ &prev_link, bc_link);
+ tipc_node_read_unlock(node);
+ if (err)
+ goto out;
+
+ prev_node = node->addr;
+ }
+ } else {
+ err = tipc_nl_add_bc_link(net, &msg, tn->bcl);
+ if (err)
+ goto out;
+
+ list_for_each_entry_rcu(node, &tn->node_list, list) {
+ tipc_node_read_lock(node);
+ err = __tipc_nl_add_node_links(net, &msg, node,
+ &prev_link, bc_link);
+ tipc_node_read_unlock(node);
+ if (err)
+ goto out;
+
+ prev_node = node->addr;
+ }
+ }
+ done = 1;
+out:
+ rcu_read_unlock();
+
+ cb->args[0] = prev_node;
+ cb->args[1] = prev_link;
+ cb->args[2] = done;
+ cb->args[3] = bc_link;
+
+ return skb->len;
+}
+
+int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ int err;
+
+ if (!info->attrs[TIPC_NLA_MON])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
+ info->attrs[TIPC_NLA_MON],
+ tipc_nl_monitor_policy,
+ info->extack);
+ if (err)
+ return err;
+
+ if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
+ u32 val;
+
+ val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
+ err = tipc_nl_monitor_set_threshold(net, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
+{
+ struct nlattr *attrs;
+ void *hdr;
+ u32 val;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ 0, TIPC_NL_MON_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
+ if (!attrs)
+ goto msg_full;
+
+ val = tipc_nl_monitor_get_threshold(net);
+
+ if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
+ goto attr_msg_full;
+
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
+
+ return 0;
+
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tipc_nl_msg msg;
+ int err;
+
+ msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg.skb)
+ return -ENOMEM;
+ msg.portid = info->snd_portid;
+ msg.seq = info->snd_seq;
+
+ err = __tipc_nl_add_monitor_prop(net, &msg);
+ if (err) {
+ nlmsg_free(msg.skb);
+ return err;
+ }
+
+ return genlmsg_reply(msg.skb, info);
+}
+
+int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ u32 prev_bearer = cb->args[0];
+ struct tipc_nl_msg msg;
+ int bearer_id;
+ int err;
+
+ if (prev_bearer == MAX_BEARERS)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ rtnl_lock();
+ for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
+ err = __tipc_nl_add_monitor(net, &msg, bearer_id);
+ if (err)
+ break;
+ }
+ rtnl_unlock();
+ cb->args[0] = bearer_id;
+
+ return skb->len;
+}
+
+int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ u32 prev_node = cb->args[1];
+ u32 bearer_id = cb->args[2];
+ int done = cb->args[0];
+ struct tipc_nl_msg msg;
+ int err;
+
+ if (!prev_node) {
+ struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
+ struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
+
+ if (!attrs[TIPC_NLA_MON])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
+ attrs[TIPC_NLA_MON],
+ tipc_nl_monitor_policy,
+ NULL);
+ if (err)
+ return err;
+
+ if (!mon[TIPC_NLA_MON_REF])
+ return -EINVAL;
+
+ bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
+
+ if (bearer_id >= MAX_BEARERS)
+ return -EINVAL;
+ }
+
+ if (done)
+ return 0;
+
+ msg.skb = skb;
+ msg.portid = NETLINK_CB(cb->skb).portid;
+ msg.seq = cb->nlh->nlmsg_seq;
+
+ rtnl_lock();
+ err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
+ if (!err)
+ done = 1;
+
+ rtnl_unlock();
+ cb->args[0] = done;
+ cb->args[1] = prev_node;
+ cb->args[2] = bearer_id;
+
+ return skb->len;
+}
+
+#ifdef CONFIG_TIPC_CRYPTO
+static int tipc_nl_retrieve_key(struct nlattr **attrs,
+ struct tipc_aead_key **pkey)
+{
+ struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
+ struct tipc_aead_key *key;
+
+ if (!attr)
+ return -ENODATA;
+
+ if (nla_len(attr) < sizeof(*key))
+ return -EINVAL;
+ key = (struct tipc_aead_key *)nla_data(attr);
+ if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
+ nla_len(attr) < tipc_aead_key_size(key))
+ return -EINVAL;
+
+ *pkey = key;
+ return 0;
+}
+
+static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
+{
+ struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
+
+ if (!attr)
+ return -ENODATA;
+
+ if (nla_len(attr) < TIPC_NODEID_LEN)
+ return -EINVAL;
+
+ *node_id = (u8 *)nla_data(attr);
+ return 0;
+}
+
+static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
+{
+ struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
+
+ if (!attr)
+ return -ENODATA;
+
+ *intv = nla_get_u32(attr);
+ return 0;
+}
+
+static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx;
+ struct tipc_node *n = NULL;
+ struct tipc_aead_key *ukey;
+ bool rekeying = true, master_key = false;
+ u8 *id, *own_id, mode;
+ u32 intv = 0;
+ int rc = 0;
+
+ if (!info->attrs[TIPC_NLA_NODE])
+ return -EINVAL;
+
+ rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
+ info->attrs[TIPC_NLA_NODE],
+ tipc_nl_node_policy, info->extack);
+ if (rc)
+ return rc;
+
+ own_id = tipc_own_id(net);
+ if (!own_id) {
+ GENL_SET_ERR_MSG(info, "not found own node identity (set id?)");
+ return -EPERM;
+ }
+
+ rc = tipc_nl_retrieve_rekeying(attrs, &intv);
+ if (rc == -ENODATA)
+ rekeying = false;
+
+ rc = tipc_nl_retrieve_key(attrs, &ukey);
+ if (rc == -ENODATA && rekeying)
+ goto rekeying;
+ else if (rc)
+ return rc;
+
+ rc = tipc_aead_key_validate(ukey, info);
+ if (rc)
+ return rc;
+
+ rc = tipc_nl_retrieve_nodeid(attrs, &id);
+ switch (rc) {
+ case -ENODATA:
+ mode = CLUSTER_KEY;
+ master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]);
+ break;
+ case 0:
+ mode = PER_NODE_KEY;
+ if (memcmp(id, own_id, NODE_ID_LEN)) {
+ n = tipc_node_find_by_id(net, id) ?:
+ tipc_node_create(net, 0, id, 0xffffu, 0, true);
+ if (unlikely(!n))
+ return -ENOMEM;
+ c = n->crypto_rx;
+ }
+ break;
+ default:
+ return rc;
+ }
+
+ /* Initiate the TX/RX key */
+ rc = tipc_crypto_key_init(c, ukey, mode, master_key);
+ if (n)
+ tipc_node_put(n);
+
+ if (unlikely(rc < 0)) {
+ GENL_SET_ERR_MSG(info, "unable to initiate or attach new key");
+ return rc;
+ } else if (c == tx) {
+ /* Distribute TX key but not master one */
+ if (!master_key && tipc_crypto_key_distr(tx, rc, NULL))
+ GENL_SET_ERR_MSG(info, "failed to replicate new key");
+rekeying:
+ /* Schedule TX rekeying if needed */
+ tipc_crypto_rekeying_sched(tx, rekeying, intv);
+ }
+
+ return 0;
+}
+
+int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_node_set_key(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+
+static int __tipc_nl_node_flush_key(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_node *n;
+
+ tipc_crypto_key_flush(tn->crypto_tx);
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, &tn->node_list, list)
+ tipc_crypto_key_flush(n->crypto_rx);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_node_flush_key(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+#endif
+
+/**
+ * tipc_node_dump - dump TIPC node data
+ * @n: tipc node to be dumped
+ * @more: dump more?
+ * - false: dump only tipc node data
+ * - true: dump node link data as well
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
+{
+ int i = 0;
+ size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
+
+ if (!n) {
+ i += scnprintf(buf, sz, "node data: (null)\n");
+ return i;
+ }
+
+ i += scnprintf(buf, sz, "node data: %x", n->addr);
+ i += scnprintf(buf + i, sz - i, " %x", n->state);
+ i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
+ i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
+ i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
+ i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
+ i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
+ i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
+ i += scnprintf(buf + i, sz - i, " %u", n->working_links);
+ i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
+ i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
+
+ if (!more)
+ return i;
+
+ i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
+ i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
+ i += scnprintf(buf + i, sz - i, " media: ");
+ i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
+ i += scnprintf(buf + i, sz - i, "\n");
+ i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
+ i += scnprintf(buf + i, sz - i, " inputq: ");
+ i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
+
+ i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
+ i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
+ i += scnprintf(buf + i, sz - i, " media: ");
+ i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
+ i += scnprintf(buf + i, sz - i, "\n");
+ i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
+ i += scnprintf(buf + i, sz - i, " inputq: ");
+ i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
+
+ i += scnprintf(buf + i, sz - i, "bclink:\n ");
+ i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
+
+ return i;
+}
+
+void tipc_node_pre_cleanup_net(struct net *exit_net)
+{
+ struct tipc_node *n;
+ struct tipc_net *tn;
+ struct net *tmp;
+
+ rcu_read_lock();
+ for_each_net_rcu(tmp) {
+ if (tmp == exit_net)
+ continue;
+ tn = tipc_net(tmp);
+ if (!tn)
+ continue;
+ spin_lock_bh(&tn->node_list_lock);
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ if (!n->peer_net)
+ continue;
+ if (n->peer_net != exit_net)
+ continue;
+ tipc_node_write_lock(n);
+ n->peer_net = NULL;
+ n->peer_hash_mix = 0;
+ tipc_node_write_unlock_fast(n);
+ break;
+ }
+ spin_unlock_bh(&tn->node_list_lock);
+ }
+ rcu_read_unlock();
+}
diff --git a/net/tipc/node.h b/net/tipc/node.h
new file mode 100644
index 0000000000..154a5bbb0d
--- /dev/null
+++ b/net/tipc/node.h
@@ -0,0 +1,131 @@
+/*
+ * net/tipc/node.h: Include file for TIPC node management routines
+ *
+ * Copyright (c) 2000-2006, 2014-2016, Ericsson AB
+ * Copyright (c) 2005, 2010-2014, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NODE_H
+#define _TIPC_NODE_H
+
+#include "addr.h"
+#include "net.h"
+#include "bearer.h"
+#include "msg.h"
+
+/* Optional capabilities supported by this code version
+ */
+enum {
+ TIPC_SYN_BIT = (1),
+ TIPC_BCAST_SYNCH = (1 << 1),
+ TIPC_BCAST_STATE_NACK = (1 << 2),
+ TIPC_BLOCK_FLOWCTL = (1 << 3),
+ TIPC_BCAST_RCAST = (1 << 4),
+ TIPC_NODE_ID128 = (1 << 5),
+ TIPC_LINK_PROTO_SEQNO = (1 << 6),
+ TIPC_MCAST_RBCTL = (1 << 7),
+ TIPC_GAP_ACK_BLOCK = (1 << 8),
+ TIPC_TUNNEL_ENHANCED = (1 << 9),
+ TIPC_NAGLE = (1 << 10),
+ TIPC_NAMED_BCAST = (1 << 11)
+};
+
+#define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \
+ TIPC_BCAST_SYNCH | \
+ TIPC_BCAST_STATE_NACK | \
+ TIPC_BCAST_RCAST | \
+ TIPC_BLOCK_FLOWCTL | \
+ TIPC_NODE_ID128 | \
+ TIPC_LINK_PROTO_SEQNO | \
+ TIPC_MCAST_RBCTL | \
+ TIPC_GAP_ACK_BLOCK | \
+ TIPC_TUNNEL_ENHANCED | \
+ TIPC_NAGLE | \
+ TIPC_NAMED_BCAST)
+
+#define INVALID_BEARER_ID -1
+
+void tipc_node_stop(struct net *net);
+bool tipc_node_get_id(struct net *net, u32 addr, u8 *id);
+u32 tipc_node_get_addr(struct tipc_node *node);
+char *tipc_node_get_id_str(struct tipc_node *node);
+void tipc_node_put(struct tipc_node *node);
+void tipc_node_get(struct tipc_node *node);
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
+ u16 capabilities, u32 hash_mixes,
+ bool preliminary);
+#ifdef CONFIG_TIPC_CRYPTO
+struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n);
+struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos);
+struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr);
+#endif
+u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
+void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
+ struct tipc_bearer *bearer,
+ u16 capabilities, u32 signature, u32 hash_mixes,
+ struct tipc_media_addr *maddr,
+ bool *respond, bool *dupl_addr);
+void tipc_node_delete_links(struct net *net, int bearer_id);
+void tipc_node_apply_property(struct net *net, struct tipc_bearer *b, int prop);
+int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
+ char *linkname, size_t len);
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
+ int selector);
+int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *list);
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
+ u32 selector);
+void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr);
+void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr);
+void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests);
+int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
+void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
+int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected);
+bool tipc_node_is_up(struct net *net, u32 addr);
+u16 tipc_node_get_capabilities(struct net *net, u32 addr);
+int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info);
+
+int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
+ struct netlink_callback *cb);
+#ifdef CONFIG_TIPC_CRYPTO
+int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info);
+#endif
+void tipc_node_pre_cleanup_net(struct net *exit_net);
+#endif
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
new file mode 100644
index 0000000000..bb1118d02f
--- /dev/null
+++ b/net/tipc/socket.c
@@ -0,0 +1,4019 @@
+/*
+ * net/tipc/socket.c: TIPC socket API
+ *
+ * Copyright (c) 2001-2007, 2012-2019, Ericsson AB
+ * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/rhashtable.h>
+#include <linux/sched/signal.h>
+#include <trace/events/sock.h>
+
+#include "core.h"
+#include "name_table.h"
+#include "node.h"
+#include "link.h"
+#include "name_distr.h"
+#include "socket.h"
+#include "bcast.h"
+#include "netlink.h"
+#include "group.h"
+#include "trace.h"
+
+#define NAGLE_START_INIT 4
+#define NAGLE_START_MAX 1024
+#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
+#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
+#define TIPC_MAX_PORT 0xffffffff
+#define TIPC_MIN_PORT 1
+#define TIPC_ACK_RATE 4 /* ACK at 1/4 of rcv window size */
+
+enum {
+ TIPC_LISTEN = TCP_LISTEN,
+ TIPC_ESTABLISHED = TCP_ESTABLISHED,
+ TIPC_OPEN = TCP_CLOSE,
+ TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
+ TIPC_CONNECTING = TCP_SYN_SENT,
+};
+
+struct sockaddr_pair {
+ struct sockaddr_tipc sock;
+ struct sockaddr_tipc member;
+};
+
+/**
+ * struct tipc_sock - TIPC socket structure
+ * @sk: socket - interacts with 'port' and with user via the socket API
+ * @max_pkt: maximum packet size "hint" used when building messages sent by port
+ * @maxnagle: maximum size of msg which can be subject to nagle
+ * @portid: unique port identity in TIPC socket hash table
+ * @phdr: preformatted message header used when sending messages
+ * @cong_links: list of congested links
+ * @publications: list of publications for port
+ * @blocking_link: address of the congested link we are currently sleeping on
+ * @pub_count: total # of publications port has made during its lifetime
+ * @conn_timeout: the time we can wait for an unresponded setup request
+ * @probe_unacked: probe has not received ack yet
+ * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
+ * @cong_link_cnt: number of congested links
+ * @snt_unacked: # messages sent by socket, and not yet acked by peer
+ * @snd_win: send window size
+ * @peer_caps: peer capabilities mask
+ * @rcv_unacked: # messages read by user, but not yet acked back to peer
+ * @rcv_win: receive window size
+ * @peer: 'connected' peer for dgram/rdm
+ * @node: hash table node
+ * @mc_method: cookie for use between socket and broadcast layer
+ * @rcu: rcu struct for tipc_sock
+ * @group: TIPC communications group
+ * @oneway: message count in one direction (FIXME)
+ * @nagle_start: current nagle value
+ * @snd_backlog: send backlog count
+ * @msg_acc: messages accepted; used in managing backlog and nagle
+ * @pkt_cnt: TIPC socket packet count
+ * @expect_ack: whether this TIPC socket is expecting an ack
+ * @nodelay: setsockopt() TIPC_NODELAY setting
+ * @group_is_open: TIPC socket group is fully open (FIXME)
+ * @published: true if port has one or more associated names
+ * @conn_addrtype: address type used when establishing connection
+ */
+struct tipc_sock {
+ struct sock sk;
+ u32 max_pkt;
+ u32 maxnagle;
+ u32 portid;
+ struct tipc_msg phdr;
+ struct list_head cong_links;
+ struct list_head publications;
+ u32 pub_count;
+ atomic_t dupl_rcvcnt;
+ u16 conn_timeout;
+ bool probe_unacked;
+ u16 cong_link_cnt;
+ u16 snt_unacked;
+ u16 snd_win;
+ u16 peer_caps;
+ u16 rcv_unacked;
+ u16 rcv_win;
+ struct sockaddr_tipc peer;
+ struct rhash_head node;
+ struct tipc_mc_method mc_method;
+ struct rcu_head rcu;
+ struct tipc_group *group;
+ u32 oneway;
+ u32 nagle_start;
+ u16 snd_backlog;
+ u16 msg_acc;
+ u16 pkt_cnt;
+ bool expect_ack;
+ bool nodelay;
+ bool group_is_open;
+ bool published;
+ u8 conn_addrtype;
+};
+
+static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+static void tipc_data_ready(struct sock *sk);
+static void tipc_write_space(struct sock *sk);
+static void tipc_sock_destruct(struct sock *sk);
+static int tipc_release(struct socket *sock);
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
+ bool kern);
+static void tipc_sk_timeout(struct timer_list *t);
+static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua);
+static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua);
+static int tipc_sk_leave(struct tipc_sock *tsk);
+static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
+static int tipc_sk_insert(struct tipc_sock *tsk);
+static void tipc_sk_remove(struct tipc_sock *tsk);
+static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
+static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
+static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
+static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
+
+static const struct proto_ops packet_ops;
+static const struct proto_ops stream_ops;
+static const struct proto_ops msg_ops;
+static struct proto tipc_proto;
+static const struct rhashtable_params tsk_rht_params;
+
+static u32 tsk_own_node(struct tipc_sock *tsk)
+{
+ return msg_prevnode(&tsk->phdr);
+}
+
+static u32 tsk_peer_node(struct tipc_sock *tsk)
+{
+ return msg_destnode(&tsk->phdr);
+}
+
+static u32 tsk_peer_port(struct tipc_sock *tsk)
+{
+ return msg_destport(&tsk->phdr);
+}
+
+static bool tsk_unreliable(struct tipc_sock *tsk)
+{
+ return msg_src_droppable(&tsk->phdr) != 0;
+}
+
+static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
+{
+ msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
+}
+
+static bool tsk_unreturnable(struct tipc_sock *tsk)
+{
+ return msg_dest_droppable(&tsk->phdr) != 0;
+}
+
+static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
+{
+ msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
+}
+
+static int tsk_importance(struct tipc_sock *tsk)
+{
+ return msg_importance(&tsk->phdr);
+}
+
+static struct tipc_sock *tipc_sk(const struct sock *sk)
+{
+ return container_of(sk, struct tipc_sock, sk);
+}
+
+int tsk_set_importance(struct sock *sk, int imp)
+{
+ if (imp > TIPC_CRITICAL_IMPORTANCE)
+ return -EINVAL;
+ msg_set_importance(&tipc_sk(sk)->phdr, (u32)imp);
+ return 0;
+}
+
+static bool tsk_conn_cong(struct tipc_sock *tsk)
+{
+ return tsk->snt_unacked > tsk->snd_win;
+}
+
+static u16 tsk_blocks(int len)
+{
+ return ((len / FLOWCTL_BLK_SZ) + 1);
+}
+
+/* tsk_blocks(): translate a buffer size in bytes to number of
+ * advertisable blocks, taking into account the ratio truesize(len)/len
+ * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
+ */
+static u16 tsk_adv_blocks(int len)
+{
+ return len / FLOWCTL_BLK_SZ / 4;
+}
+
+/* tsk_inc(): increment counter for sent or received data
+ * - If block based flow control is not supported by peer we
+ * fall back to message based ditto, incrementing the counter
+ */
+static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
+{
+ if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
+ return ((msglen / FLOWCTL_BLK_SZ) + 1);
+ return 1;
+}
+
+/* tsk_set_nagle - enable/disable nagle property by manipulating maxnagle
+ */
+static void tsk_set_nagle(struct tipc_sock *tsk)
+{
+ struct sock *sk = &tsk->sk;
+
+ tsk->maxnagle = 0;
+ if (sk->sk_type != SOCK_STREAM)
+ return;
+ if (tsk->nodelay)
+ return;
+ if (!(tsk->peer_caps & TIPC_NAGLE))
+ return;
+ /* Limit node local buffer size to avoid receive queue overflow */
+ if (tsk->max_pkt == MAX_MSG_SIZE)
+ tsk->maxnagle = 1500;
+ else
+ tsk->maxnagle = tsk->max_pkt;
+}
+
+/**
+ * tsk_advance_rx_queue - discard first buffer in socket receive queue
+ * @sk: network socket
+ *
+ * Caller must hold socket lock
+ */
+static void tsk_advance_rx_queue(struct sock *sk)
+{
+ trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
+ kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
+}
+
+/* tipc_sk_respond() : send response message back to sender
+ */
+static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
+{
+ u32 selector;
+ u32 dnode;
+ u32 onode = tipc_own_addr(sock_net(sk));
+
+ if (!tipc_msg_reverse(onode, &skb, err))
+ return;
+
+ trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
+ dnode = msg_destnode(buf_msg(skb));
+ selector = msg_origport(buf_msg(skb));
+ tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
+}
+
+/**
+ * tsk_rej_rx_queue - reject all buffers in socket receive queue
+ * @sk: network socket
+ * @error: response error code
+ *
+ * Caller must hold socket lock
+ */
+static void tsk_rej_rx_queue(struct sock *sk, int error)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
+ tipc_sk_respond(sk, skb, error);
+}
+
+static bool tipc_sk_connected(const struct sock *sk)
+{
+ return READ_ONCE(sk->sk_state) == TIPC_ESTABLISHED;
+}
+
+/* tipc_sk_type_connectionless - check if the socket is datagram socket
+ * @sk: socket
+ *
+ * Returns true if connection less, false otherwise
+ */
+static bool tipc_sk_type_connectionless(struct sock *sk)
+{
+ return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
+}
+
+/* tsk_peer_msg - verify if message was sent by connected port's peer
+ *
+ * Handles cases where the node's network address has changed from
+ * the default of <0.0.0> to its configured setting.
+ */
+static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
+{
+ struct sock *sk = &tsk->sk;
+ u32 self = tipc_own_addr(sock_net(sk));
+ u32 peer_port = tsk_peer_port(tsk);
+ u32 orig_node, peer_node;
+
+ if (unlikely(!tipc_sk_connected(sk)))
+ return false;
+
+ if (unlikely(msg_origport(msg) != peer_port))
+ return false;
+
+ orig_node = msg_orignode(msg);
+ peer_node = tsk_peer_node(tsk);
+
+ if (likely(orig_node == peer_node))
+ return true;
+
+ if (!orig_node && peer_node == self)
+ return true;
+
+ if (!peer_node && orig_node == self)
+ return true;
+
+ return false;
+}
+
+/* tipc_set_sk_state - set the sk_state of the socket
+ * @sk: socket
+ *
+ * Caller must hold socket lock
+ *
+ * Returns 0 on success, errno otherwise
+ */
+static int tipc_set_sk_state(struct sock *sk, int state)
+{
+ int oldsk_state = sk->sk_state;
+ int res = -EINVAL;
+
+ switch (state) {
+ case TIPC_OPEN:
+ res = 0;
+ break;
+ case TIPC_LISTEN:
+ case TIPC_CONNECTING:
+ if (oldsk_state == TIPC_OPEN)
+ res = 0;
+ break;
+ case TIPC_ESTABLISHED:
+ if (oldsk_state == TIPC_CONNECTING ||
+ oldsk_state == TIPC_OPEN)
+ res = 0;
+ break;
+ case TIPC_DISCONNECTING:
+ if (oldsk_state == TIPC_CONNECTING ||
+ oldsk_state == TIPC_ESTABLISHED)
+ res = 0;
+ break;
+ }
+
+ if (!res)
+ sk->sk_state = state;
+
+ return res;
+}
+
+static int tipc_sk_sock_err(struct socket *sock, long *timeout)
+{
+ struct sock *sk = sock->sk;
+ int err = sock_error(sk);
+ int typ = sock->type;
+
+ if (err)
+ return err;
+ if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
+ if (sk->sk_state == TIPC_DISCONNECTING)
+ return -EPIPE;
+ else if (!tipc_sk_connected(sk))
+ return -ENOTCONN;
+ }
+ if (!*timeout)
+ return -EAGAIN;
+ if (signal_pending(current))
+ return sock_intr_errno(*timeout);
+
+ return 0;
+}
+
+#define tipc_wait_for_cond(sock_, timeo_, condition_) \
+({ \
+ DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
+ struct sock *sk_; \
+ int rc_; \
+ \
+ while ((rc_ = !(condition_))) { \
+ /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
+ smp_rmb(); \
+ sk_ = (sock_)->sk; \
+ rc_ = tipc_sk_sock_err((sock_), timeo_); \
+ if (rc_) \
+ break; \
+ add_wait_queue(sk_sleep(sk_), &wait_); \
+ release_sock(sk_); \
+ *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
+ sched_annotate_sleep(); \
+ lock_sock(sk_); \
+ remove_wait_queue(sk_sleep(sk_), &wait_); \
+ } \
+ rc_; \
+})
+
+/**
+ * tipc_sk_create - create a TIPC socket
+ * @net: network namespace (must be default network)
+ * @sock: pre-allocated socket structure
+ * @protocol: protocol indicator (must be 0)
+ * @kern: caused by kernel or by userspace?
+ *
+ * This routine creates additional data structures used by the TIPC socket,
+ * initializes them, and links them together.
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int tipc_sk_create(struct net *net, struct socket *sock,
+ int protocol, int kern)
+{
+ const struct proto_ops *ops;
+ struct sock *sk;
+ struct tipc_sock *tsk;
+ struct tipc_msg *msg;
+
+ /* Validate arguments */
+ if (unlikely(protocol != 0))
+ return -EPROTONOSUPPORT;
+
+ switch (sock->type) {
+ case SOCK_STREAM:
+ ops = &stream_ops;
+ break;
+ case SOCK_SEQPACKET:
+ ops = &packet_ops;
+ break;
+ case SOCK_DGRAM:
+ case SOCK_RDM:
+ ops = &msg_ops;
+ break;
+ default:
+ return -EPROTOTYPE;
+ }
+
+ /* Allocate socket's protocol area */
+ sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
+ if (sk == NULL)
+ return -ENOMEM;
+
+ tsk = tipc_sk(sk);
+ tsk->max_pkt = MAX_PKT_DEFAULT;
+ tsk->maxnagle = 0;
+ tsk->nagle_start = NAGLE_START_INIT;
+ INIT_LIST_HEAD(&tsk->publications);
+ INIT_LIST_HEAD(&tsk->cong_links);
+ msg = &tsk->phdr;
+
+ /* Finish initializing socket data structures */
+ sock->ops = ops;
+ sock_init_data(sock, sk);
+ tipc_set_sk_state(sk, TIPC_OPEN);
+ if (tipc_sk_insert(tsk)) {
+ sk_free(sk);
+ pr_warn("Socket create failed; port number exhausted\n");
+ return -EINVAL;
+ }
+
+ /* Ensure tsk is visible before we read own_addr. */
+ smp_mb();
+
+ tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
+ TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
+
+ msg_set_origport(msg, tsk->portid);
+ timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
+ sk->sk_shutdown = 0;
+ sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
+ sk->sk_rcvbuf = READ_ONCE(sysctl_tipc_rmem[1]);
+ sk->sk_data_ready = tipc_data_ready;
+ sk->sk_write_space = tipc_write_space;
+ sk->sk_destruct = tipc_sock_destruct;
+ tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
+ tsk->group_is_open = true;
+ atomic_set(&tsk->dupl_rcvcnt, 0);
+
+ /* Start out with safe limits until we receive an advertised window */
+ tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
+ tsk->rcv_win = tsk->snd_win;
+
+ if (tipc_sk_type_connectionless(sk)) {
+ tsk_set_unreturnable(tsk, true);
+ if (sock->type == SOCK_DGRAM)
+ tsk_set_unreliable(tsk, true);
+ }
+ __skb_queue_head_init(&tsk->mc_method.deferredq);
+ trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
+ return 0;
+}
+
+static void tipc_sk_callback(struct rcu_head *head)
+{
+ struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
+
+ sock_put(&tsk->sk);
+}
+
+/* Caller should hold socket lock for the socket. */
+static void __tipc_shutdown(struct socket *sock, int error)
+{
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct net *net = sock_net(sk);
+ long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
+ u32 dnode = tsk_peer_node(tsk);
+ struct sk_buff *skb;
+
+ /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
+ tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
+ !tsk_conn_cong(tsk)));
+
+ /* Push out delayed messages if in Nagle mode */
+ tipc_sk_push_backlog(tsk, false);
+ /* Remove pending SYN */
+ __skb_queue_purge(&sk->sk_write_queue);
+
+ /* Remove partially received buffer if any */
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb && TIPC_SKB_CB(skb)->bytes_read) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ kfree_skb(skb);
+ }
+
+ /* Reject all unreceived messages if connectionless */
+ if (tipc_sk_type_connectionless(sk)) {
+ tsk_rej_rx_queue(sk, error);
+ return;
+ }
+
+ switch (sk->sk_state) {
+ case TIPC_CONNECTING:
+ case TIPC_ESTABLISHED:
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ tipc_node_remove_conn(net, dnode, tsk->portid);
+ /* Send a FIN+/- to its peer */
+ skb = __skb_dequeue(&sk->sk_receive_queue);
+ if (skb) {
+ __skb_queue_purge(&sk->sk_receive_queue);
+ tipc_sk_respond(sk, skb, error);
+ break;
+ }
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
+ TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
+ tsk_own_node(tsk), tsk_peer_port(tsk),
+ tsk->portid, error);
+ if (skb)
+ tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
+ break;
+ case TIPC_LISTEN:
+ /* Reject all SYN messages */
+ tsk_rej_rx_queue(sk, error);
+ break;
+ default:
+ __skb_queue_purge(&sk->sk_receive_queue);
+ break;
+ }
+}
+
+/**
+ * tipc_release - destroy a TIPC socket
+ * @sock: socket to destroy
+ *
+ * This routine cleans up any messages that are still queued on the socket.
+ * For DGRAM and RDM socket types, all queued messages are rejected.
+ * For SEQPACKET and STREAM socket types, the first message is rejected
+ * and any others are discarded. (If the first message on a STREAM socket
+ * is partially-read, it is discarded and the next one is rejected instead.)
+ *
+ * NOTE: Rejected messages are not necessarily returned to the sender! They
+ * are returned or discarded according to the "destination droppable" setting
+ * specified for the message by the sender.
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int tipc_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk;
+
+ /*
+ * Exit if socket isn't fully initialized (occurs when a failed accept()
+ * releases a pre-allocated child socket that was never used)
+ */
+ if (sk == NULL)
+ return 0;
+
+ tsk = tipc_sk(sk);
+ lock_sock(sk);
+
+ trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
+ __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ tipc_sk_leave(tsk);
+ tipc_sk_withdraw(tsk, NULL);
+ __skb_queue_purge(&tsk->mc_method.deferredq);
+ sk_stop_timer(sk, &sk->sk_timer);
+ tipc_sk_remove(tsk);
+
+ sock_orphan(sk);
+ /* Reject any messages that accumulated in backlog queue */
+ release_sock(sk);
+ tipc_dest_list_purge(&tsk->cong_links);
+ tsk->cong_link_cnt = 0;
+ call_rcu(&tsk->rcu, tipc_sk_callback);
+ sock->sk = NULL;
+
+ return 0;
+}
+
+/**
+ * __tipc_bind - associate or disassocate TIPC name(s) with a socket
+ * @sock: socket structure
+ * @skaddr: socket address describing name(s) and desired operation
+ * @alen: size of socket address data structure
+ *
+ * Name and name sequence binding are indicated using a positive scope value;
+ * a negative scope value unbinds the specified name. Specifying no name
+ * (i.e. a socket address length of 0) unbinds all names from the socket.
+ *
+ * Return: 0 on success, errno otherwise
+ *
+ * NOTE: This routine doesn't need to take the socket lock since it doesn't
+ * access any non-constant socket information.
+ */
+static int __tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
+{
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
+ struct tipc_sock *tsk = tipc_sk(sock->sk);
+ bool unbind = false;
+
+ if (unlikely(!alen))
+ return tipc_sk_withdraw(tsk, NULL);
+
+ if (ua->addrtype == TIPC_SERVICE_ADDR) {
+ ua->addrtype = TIPC_SERVICE_RANGE;
+ ua->sr.upper = ua->sr.lower;
+ }
+ if (ua->scope < 0) {
+ unbind = true;
+ ua->scope = -ua->scope;
+ }
+ /* Users may still use deprecated TIPC_ZONE_SCOPE */
+ if (ua->scope != TIPC_NODE_SCOPE)
+ ua->scope = TIPC_CLUSTER_SCOPE;
+
+ if (tsk->group)
+ return -EACCES;
+
+ if (unbind)
+ return tipc_sk_withdraw(tsk, ua);
+ return tipc_sk_publish(tsk, ua);
+}
+
+int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
+{
+ int res;
+
+ lock_sock(sock->sk);
+ res = __tipc_bind(sock, skaddr, alen);
+ release_sock(sock->sk);
+ return res;
+}
+
+static int tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
+{
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
+ u32 atype = ua->addrtype;
+
+ if (alen) {
+ if (!tipc_uaddr_valid(ua, alen))
+ return -EINVAL;
+ if (atype == TIPC_SOCKET_ADDR)
+ return -EAFNOSUPPORT;
+ if (ua->sr.type < TIPC_RESERVED_TYPES) {
+ pr_warn_once("Can't bind to reserved service type %u\n",
+ ua->sr.type);
+ return -EACCES;
+ }
+ }
+ return tipc_sk_bind(sock, skaddr, alen);
+}
+
+/**
+ * tipc_getname - get port ID of socket or peer socket
+ * @sock: socket structure
+ * @uaddr: area for returned socket address
+ * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
+ *
+ * Return: 0 on success, errno otherwise
+ *
+ * NOTE: This routine doesn't need to take the socket lock since it only
+ * accesses socket information that is unchanging (or which changes in
+ * a completely predictable manner).
+ */
+static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
+ int peer)
+{
+ struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+
+ memset(addr, 0, sizeof(*addr));
+ if (peer) {
+ if ((!tipc_sk_connected(sk)) &&
+ ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
+ return -ENOTCONN;
+ addr->addr.id.ref = tsk_peer_port(tsk);
+ addr->addr.id.node = tsk_peer_node(tsk);
+ } else {
+ addr->addr.id.ref = tsk->portid;
+ addr->addr.id.node = tipc_own_addr(sock_net(sk));
+ }
+
+ addr->addrtype = TIPC_SOCKET_ADDR;
+ addr->family = AF_TIPC;
+ addr->scope = 0;
+ addr->addr.name.domain = 0;
+
+ return sizeof(*addr);
+}
+
+/**
+ * tipc_poll - read and possibly block on pollmask
+ * @file: file structure associated with the socket
+ * @sock: socket for which to calculate the poll bits
+ * @wait: ???
+ *
+ * Return: pollmask value
+ *
+ * COMMENTARY:
+ * It appears that the usual socket locking mechanisms are not useful here
+ * since the pollmask info is potentially out-of-date the moment this routine
+ * exits. TCP and other protocols seem to rely on higher level poll routines
+ * to handle any preventable race conditions, so TIPC will do the same ...
+ *
+ * IMPORTANT: The fact that a read or write operation is indicated does NOT
+ * imply that the operation will succeed, merely that it should be performed
+ * and will not block.
+ */
+static __poll_t tipc_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ __poll_t revents = 0;
+
+ sock_poll_wait(file, sock, wait);
+ trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
+ revents |= EPOLLHUP;
+
+ switch (sk->sk_state) {
+ case TIPC_ESTABLISHED:
+ if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
+ revents |= EPOLLOUT;
+ fallthrough;
+ case TIPC_LISTEN:
+ case TIPC_CONNECTING:
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ revents |= EPOLLIN | EPOLLRDNORM;
+ break;
+ case TIPC_OPEN:
+ if (tsk->group_is_open && !tsk->cong_link_cnt)
+ revents |= EPOLLOUT;
+ if (!tipc_sk_type_connectionless(sk))
+ break;
+ if (skb_queue_empty_lockless(&sk->sk_receive_queue))
+ break;
+ revents |= EPOLLIN | EPOLLRDNORM;
+ break;
+ case TIPC_DISCONNECTING:
+ revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
+ break;
+ }
+ return revents;
+}
+
+/**
+ * tipc_sendmcast - send multicast message
+ * @sock: socket structure
+ * @ua: destination address struct
+ * @msg: message to send
+ * @dlen: length of data to send
+ * @timeout: timeout to wait for wakeup
+ *
+ * Called from function tipc_sendmsg(), which has done all sanity checks
+ * Return: the number of bytes sent on success, or errno
+ */
+static int tipc_sendmcast(struct socket *sock, struct tipc_uaddr *ua,
+ struct msghdr *msg, size_t dlen, long timeout)
+{
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_msg *hdr = &tsk->phdr;
+ struct net *net = sock_net(sk);
+ int mtu = tipc_bcast_get_mtu(net);
+ struct sk_buff_head pkts;
+ struct tipc_nlist dsts;
+ int rc;
+
+ if (tsk->group)
+ return -EACCES;
+
+ /* Block or return if any destination link is congested */
+ rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
+ if (unlikely(rc))
+ return rc;
+
+ /* Lookup destination nodes */
+ tipc_nlist_init(&dsts, tipc_own_addr(net));
+ tipc_nametbl_lookup_mcast_nodes(net, ua, &dsts);
+ if (!dsts.local && !dsts.remote)
+ return -EHOSTUNREACH;
+
+ /* Build message header */
+ msg_set_type(hdr, TIPC_MCAST_MSG);
+ msg_set_hdr_sz(hdr, MCAST_H_SIZE);
+ msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
+ msg_set_destport(hdr, 0);
+ msg_set_destnode(hdr, 0);
+ msg_set_nametype(hdr, ua->sr.type);
+ msg_set_namelower(hdr, ua->sr.lower);
+ msg_set_nameupper(hdr, ua->sr.upper);
+
+ /* Build message as chain of buffers */
+ __skb_queue_head_init(&pkts);
+ rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
+
+ /* Send message if build was successful */
+ if (unlikely(rc == dlen)) {
+ trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
+ TIPC_DUMP_SK_SNDQ, " ");
+ rc = tipc_mcast_xmit(net, &pkts, &tsk->mc_method, &dsts,
+ &tsk->cong_link_cnt);
+ }
+
+ tipc_nlist_purge(&dsts);
+
+ return rc ? rc : dlen;
+}
+
+/**
+ * tipc_send_group_msg - send a message to a member in the group
+ * @net: network namespace
+ * @tsk: tipc socket
+ * @m: message to send
+ * @mb: group member
+ * @dnode: destination node
+ * @dport: destination port
+ * @dlen: total length of message data
+ */
+static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
+ struct msghdr *m, struct tipc_member *mb,
+ u32 dnode, u32 dport, int dlen)
+{
+ u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
+ struct tipc_mc_method *method = &tsk->mc_method;
+ int blks = tsk_blocks(GROUP_H_SIZE + dlen);
+ struct tipc_msg *hdr = &tsk->phdr;
+ struct sk_buff_head pkts;
+ int mtu, rc;
+
+ /* Complete message header */
+ msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
+ msg_set_hdr_sz(hdr, GROUP_H_SIZE);
+ msg_set_destport(hdr, dport);
+ msg_set_destnode(hdr, dnode);
+ msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
+
+ /* Build message as chain of buffers */
+ __skb_queue_head_init(&pkts);
+ mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
+ rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
+ if (unlikely(rc != dlen))
+ return rc;
+
+ /* Send message */
+ rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
+ if (unlikely(rc == -ELINKCONG)) {
+ tipc_dest_push(&tsk->cong_links, dnode, 0);
+ tsk->cong_link_cnt++;
+ }
+
+ /* Update send window */
+ tipc_group_update_member(mb, blks);
+
+ /* A broadcast sent within next EXPIRE period must follow same path */
+ method->rcast = true;
+ method->mandatory = true;
+ return dlen;
+}
+
+/**
+ * tipc_send_group_unicast - send message to a member in the group
+ * @sock: socket structure
+ * @m: message to send
+ * @dlen: total length of message data
+ * @timeout: timeout to wait for wakeup
+ *
+ * Called from function tipc_sendmsg(), which has done all sanity checks
+ * Return: the number of bytes sent on success, or errno
+ */
+static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
+ int dlen, long timeout)
+{
+ struct sock *sk = sock->sk;
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
+ int blks = tsk_blocks(GROUP_H_SIZE + dlen);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct net *net = sock_net(sk);
+ struct tipc_member *mb = NULL;
+ u32 node, port;
+ int rc;
+
+ node = ua->sk.node;
+ port = ua->sk.ref;
+ if (!port && !node)
+ return -EHOSTUNREACH;
+
+ /* Block or return if destination link or member is congested */
+ rc = tipc_wait_for_cond(sock, &timeout,
+ !tipc_dest_find(&tsk->cong_links, node, 0) &&
+ tsk->group &&
+ !tipc_group_cong(tsk->group, node, port, blks,
+ &mb));
+ if (unlikely(rc))
+ return rc;
+
+ if (unlikely(!mb))
+ return -EHOSTUNREACH;
+
+ rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
+
+ return rc ? rc : dlen;
+}
+
+/**
+ * tipc_send_group_anycast - send message to any member with given identity
+ * @sock: socket structure
+ * @m: message to send
+ * @dlen: total length of message data
+ * @timeout: timeout to wait for wakeup
+ *
+ * Called from function tipc_sendmsg(), which has done all sanity checks
+ * Return: the number of bytes sent on success, or errno
+ */
+static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
+ int dlen, long timeout)
+{
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct list_head *cong_links = &tsk->cong_links;
+ int blks = tsk_blocks(GROUP_H_SIZE + dlen);
+ struct tipc_msg *hdr = &tsk->phdr;
+ struct tipc_member *first = NULL;
+ struct tipc_member *mbr = NULL;
+ struct net *net = sock_net(sk);
+ u32 node, port, exclude;
+ struct list_head dsts;
+ int lookups = 0;
+ int dstcnt, rc;
+ bool cong;
+
+ INIT_LIST_HEAD(&dsts);
+ ua->sa.type = msg_nametype(hdr);
+ ua->scope = msg_lookup_scope(hdr);
+
+ while (++lookups < 4) {
+ exclude = tipc_group_exclude(tsk->group);
+
+ first = NULL;
+
+ /* Look for a non-congested destination member, if any */
+ while (1) {
+ if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt,
+ exclude, false))
+ return -EHOSTUNREACH;
+ tipc_dest_pop(&dsts, &node, &port);
+ cong = tipc_group_cong(tsk->group, node, port, blks,
+ &mbr);
+ if (!cong)
+ break;
+ if (mbr == first)
+ break;
+ if (!first)
+ first = mbr;
+ }
+
+ /* Start over if destination was not in member list */
+ if (unlikely(!mbr))
+ continue;
+
+ if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
+ break;
+
+ /* Block or return if destination link or member is congested */
+ rc = tipc_wait_for_cond(sock, &timeout,
+ !tipc_dest_find(cong_links, node, 0) &&
+ tsk->group &&
+ !tipc_group_cong(tsk->group, node, port,
+ blks, &mbr));
+ if (unlikely(rc))
+ return rc;
+
+ /* Send, unless destination disappeared while waiting */
+ if (likely(mbr))
+ break;
+ }
+
+ if (unlikely(lookups >= 4))
+ return -EHOSTUNREACH;
+
+ rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
+
+ return rc ? rc : dlen;
+}
+
+/**
+ * tipc_send_group_bcast - send message to all members in communication group
+ * @sock: socket structure
+ * @m: message to send
+ * @dlen: total length of message data
+ * @timeout: timeout to wait for wakeup
+ *
+ * Called from function tipc_sendmsg(), which has done all sanity checks
+ * Return: the number of bytes sent on success, or errno
+ */
+static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
+ int dlen, long timeout)
+{
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
+ struct sock *sk = sock->sk;
+ struct net *net = sock_net(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_nlist *dsts;
+ struct tipc_mc_method *method = &tsk->mc_method;
+ bool ack = method->mandatory && method->rcast;
+ int blks = tsk_blocks(MCAST_H_SIZE + dlen);
+ struct tipc_msg *hdr = &tsk->phdr;
+ int mtu = tipc_bcast_get_mtu(net);
+ struct sk_buff_head pkts;
+ int rc = -EHOSTUNREACH;
+
+ /* Block or return if any destination link or member is congested */
+ rc = tipc_wait_for_cond(sock, &timeout,
+ !tsk->cong_link_cnt && tsk->group &&
+ !tipc_group_bc_cong(tsk->group, blks));
+ if (unlikely(rc))
+ return rc;
+
+ dsts = tipc_group_dests(tsk->group);
+ if (!dsts->local && !dsts->remote)
+ return -EHOSTUNREACH;
+
+ /* Complete message header */
+ if (ua) {
+ msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
+ msg_set_nameinst(hdr, ua->sa.instance);
+ } else {
+ msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
+ msg_set_nameinst(hdr, 0);
+ }
+ msg_set_hdr_sz(hdr, GROUP_H_SIZE);
+ msg_set_destport(hdr, 0);
+ msg_set_destnode(hdr, 0);
+ msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
+
+ /* Avoid getting stuck with repeated forced replicasts */
+ msg_set_grp_bc_ack_req(hdr, ack);
+
+ /* Build message as chain of buffers */
+ __skb_queue_head_init(&pkts);
+ rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
+ if (unlikely(rc != dlen))
+ return rc;
+
+ /* Send message */
+ rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
+ if (unlikely(rc))
+ return rc;
+
+ /* Update broadcast sequence number and send windows */
+ tipc_group_update_bc_members(tsk->group, blks, ack);
+
+ /* Broadcast link is now free to choose method for next broadcast */
+ method->mandatory = false;
+ method->expires = jiffies;
+
+ return dlen;
+}
+
+/**
+ * tipc_send_group_mcast - send message to all members with given identity
+ * @sock: socket structure
+ * @m: message to send
+ * @dlen: total length of message data
+ * @timeout: timeout to wait for wakeup
+ *
+ * Called from function tipc_sendmsg(), which has done all sanity checks
+ * Return: the number of bytes sent on success, or errno
+ */
+static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
+ int dlen, long timeout)
+{
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_group *grp = tsk->group;
+ struct tipc_msg *hdr = &tsk->phdr;
+ struct net *net = sock_net(sk);
+ struct list_head dsts;
+ u32 dstcnt, exclude;
+
+ INIT_LIST_HEAD(&dsts);
+ ua->sa.type = msg_nametype(hdr);
+ ua->scope = msg_lookup_scope(hdr);
+ exclude = tipc_group_exclude(grp);
+
+ if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt, exclude, true))
+ return -EHOSTUNREACH;
+
+ if (dstcnt == 1) {
+ tipc_dest_pop(&dsts, &ua->sk.node, &ua->sk.ref);
+ return tipc_send_group_unicast(sock, m, dlen, timeout);
+ }
+
+ tipc_dest_list_purge(&dsts);
+ return tipc_send_group_bcast(sock, m, dlen, timeout);
+}
+
+/**
+ * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
+ * @net: the associated network namespace
+ * @arrvq: queue with arriving messages, to be cloned after destination lookup
+ * @inputq: queue with cloned messages, delivered to socket after dest lookup
+ *
+ * Multi-threaded: parallel calls with reference to same queues may occur
+ */
+void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
+ struct sk_buff_head *inputq)
+{
+ u32 self = tipc_own_addr(net);
+ struct sk_buff *skb, *_skb;
+ u32 portid, onode;
+ struct sk_buff_head tmpq;
+ struct list_head dports;
+ struct tipc_msg *hdr;
+ struct tipc_uaddr ua;
+ int user, mtyp, hlen;
+
+ __skb_queue_head_init(&tmpq);
+ INIT_LIST_HEAD(&dports);
+ ua.addrtype = TIPC_SERVICE_RANGE;
+
+ /* tipc_skb_peek() increments the head skb's reference counter */
+ skb = tipc_skb_peek(arrvq, &inputq->lock);
+ for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
+ hdr = buf_msg(skb);
+ user = msg_user(hdr);
+ mtyp = msg_type(hdr);
+ hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
+ onode = msg_orignode(hdr);
+ ua.sr.type = msg_nametype(hdr);
+ ua.sr.lower = msg_namelower(hdr);
+ ua.sr.upper = msg_nameupper(hdr);
+ if (onode == self)
+ ua.scope = TIPC_ANY_SCOPE;
+ else
+ ua.scope = TIPC_CLUSTER_SCOPE;
+
+ if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
+ spin_lock_bh(&inputq->lock);
+ if (skb_peek(arrvq) == skb) {
+ __skb_dequeue(arrvq);
+ __skb_queue_tail(inputq, skb);
+ }
+ kfree_skb(skb);
+ spin_unlock_bh(&inputq->lock);
+ continue;
+ }
+
+ /* Group messages require exact scope match */
+ if (msg_in_group(hdr)) {
+ ua.sr.lower = 0;
+ ua.sr.upper = ~0;
+ ua.scope = msg_lookup_scope(hdr);
+ }
+
+ /* Create destination port list: */
+ tipc_nametbl_lookup_mcast_sockets(net, &ua, &dports);
+
+ /* Clone message per destination */
+ while (tipc_dest_pop(&dports, NULL, &portid)) {
+ _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
+ if (_skb) {
+ msg_set_destport(buf_msg(_skb), portid);
+ __skb_queue_tail(&tmpq, _skb);
+ continue;
+ }
+ pr_warn("Failed to clone mcast rcv buffer\n");
+ }
+ /* Append clones to inputq only if skb is still head of arrvq */
+ spin_lock_bh(&inputq->lock);
+ if (skb_peek(arrvq) == skb) {
+ skb_queue_splice_tail_init(&tmpq, inputq);
+ /* Decrement the skb's refcnt */
+ kfree_skb(__skb_dequeue(arrvq));
+ }
+ spin_unlock_bh(&inputq->lock);
+ __skb_queue_purge(&tmpq);
+ kfree_skb(skb);
+ }
+ tipc_sk_rcv(net, inputq);
+}
+
+/* tipc_sk_push_backlog(): send accumulated buffers in socket write queue
+ * when socket is in Nagle mode
+ */
+static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
+{
+ struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
+ struct sk_buff *skb = skb_peek_tail(txq);
+ struct net *net = sock_net(&tsk->sk);
+ u32 dnode = tsk_peer_node(tsk);
+ int rc;
+
+ if (nagle_ack) {
+ tsk->pkt_cnt += skb_queue_len(txq);
+ if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
+ tsk->oneway = 0;
+ if (tsk->nagle_start < NAGLE_START_MAX)
+ tsk->nagle_start *= 2;
+ tsk->expect_ack = false;
+ pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
+ tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
+ tsk->nagle_start);
+ } else {
+ tsk->nagle_start = NAGLE_START_INIT;
+ if (skb) {
+ msg_set_ack_required(buf_msg(skb));
+ tsk->expect_ack = true;
+ } else {
+ tsk->expect_ack = false;
+ }
+ }
+ tsk->msg_acc = 0;
+ tsk->pkt_cnt = 0;
+ }
+
+ if (!skb || tsk->cong_link_cnt)
+ return;
+
+ /* Do not send SYN again after congestion */
+ if (msg_is_syn(buf_msg(skb)))
+ return;
+
+ if (tsk->msg_acc)
+ tsk->pkt_cnt += skb_queue_len(txq);
+ tsk->snt_unacked += tsk->snd_backlog;
+ tsk->snd_backlog = 0;
+ rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
+ if (rc == -ELINKCONG)
+ tsk->cong_link_cnt = 1;
+}
+
+/**
+ * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
+ * @tsk: receiving socket
+ * @skb: pointer to message buffer.
+ * @inputq: buffer list containing the buffers
+ * @xmitq: output message area
+ */
+static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ u32 onode = tsk_own_node(tsk);
+ struct sock *sk = &tsk->sk;
+ int mtyp = msg_type(hdr);
+ bool was_cong;
+
+ /* Ignore if connection cannot be validated: */
+ if (!tsk_peer_msg(tsk, hdr)) {
+ trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
+ goto exit;
+ }
+
+ if (unlikely(msg_errcode(hdr))) {
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
+ tsk_peer_port(tsk));
+ sk->sk_state_change(sk);
+
+ /* State change is ignored if socket already awake,
+ * - convert msg to abort msg and add to inqueue
+ */
+ msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
+ msg_set_type(hdr, TIPC_CONN_MSG);
+ msg_set_size(hdr, BASIC_H_SIZE);
+ msg_set_hdr_sz(hdr, BASIC_H_SIZE);
+ __skb_queue_tail(inputq, skb);
+ return;
+ }
+
+ tsk->probe_unacked = false;
+
+ if (mtyp == CONN_PROBE) {
+ msg_set_type(hdr, CONN_PROBE_REPLY);
+ if (tipc_msg_reverse(onode, &skb, TIPC_OK))
+ __skb_queue_tail(xmitq, skb);
+ return;
+ } else if (mtyp == CONN_ACK) {
+ was_cong = tsk_conn_cong(tsk);
+ tipc_sk_push_backlog(tsk, msg_nagle_ack(hdr));
+ tsk->snt_unacked -= msg_conn_ack(hdr);
+ if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
+ tsk->snd_win = msg_adv_win(hdr);
+ if (was_cong && !tsk_conn_cong(tsk))
+ sk->sk_write_space(sk);
+ } else if (mtyp != CONN_PROBE_REPLY) {
+ pr_warn("Received unknown CONN_PROTO msg\n");
+ }
+exit:
+ kfree_skb(skb);
+}
+
+/**
+ * tipc_sendmsg - send message in connectionless manner
+ * @sock: socket structure
+ * @m: message to send
+ * @dsz: amount of user data to be sent
+ *
+ * Message must have an destination specified explicitly.
+ * Used for SOCK_RDM and SOCK_DGRAM messages,
+ * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
+ * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
+ *
+ * Return: the number of bytes sent on success, or errno otherwise
+ */
+static int tipc_sendmsg(struct socket *sock,
+ struct msghdr *m, size_t dsz)
+{
+ struct sock *sk = sock->sk;
+ int ret;
+
+ lock_sock(sk);
+ ret = __tipc_sendmsg(sock, m, dsz);
+ release_sock(sk);
+
+ return ret;
+}
+
+static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
+{
+ struct sock *sk = sock->sk;
+ struct net *net = sock_net(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
+ long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+ struct list_head *clinks = &tsk->cong_links;
+ bool syn = !tipc_sk_type_connectionless(sk);
+ struct tipc_group *grp = tsk->group;
+ struct tipc_msg *hdr = &tsk->phdr;
+ struct tipc_socket_addr skaddr;
+ struct sk_buff_head pkts;
+ int atype, mtu, rc;
+
+ if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
+ return -EMSGSIZE;
+
+ if (ua) {
+ if (!tipc_uaddr_valid(ua, m->msg_namelen))
+ return -EINVAL;
+ atype = ua->addrtype;
+ }
+
+ /* If socket belongs to a communication group follow other paths */
+ if (grp) {
+ if (!ua)
+ return tipc_send_group_bcast(sock, m, dlen, timeout);
+ if (atype == TIPC_SERVICE_ADDR)
+ return tipc_send_group_anycast(sock, m, dlen, timeout);
+ if (atype == TIPC_SOCKET_ADDR)
+ return tipc_send_group_unicast(sock, m, dlen, timeout);
+ if (atype == TIPC_SERVICE_RANGE)
+ return tipc_send_group_mcast(sock, m, dlen, timeout);
+ return -EINVAL;
+ }
+
+ if (!ua) {
+ ua = (struct tipc_uaddr *)&tsk->peer;
+ if (!syn && ua->family != AF_TIPC)
+ return -EDESTADDRREQ;
+ atype = ua->addrtype;
+ }
+
+ if (unlikely(syn)) {
+ if (sk->sk_state == TIPC_LISTEN)
+ return -EPIPE;
+ if (sk->sk_state != TIPC_OPEN)
+ return -EISCONN;
+ if (tsk->published)
+ return -EOPNOTSUPP;
+ if (atype == TIPC_SERVICE_ADDR)
+ tsk->conn_addrtype = atype;
+ msg_set_syn(hdr, 1);
+ }
+
+ memset(&skaddr, 0, sizeof(skaddr));
+
+ /* Determine destination */
+ if (atype == TIPC_SERVICE_RANGE) {
+ return tipc_sendmcast(sock, ua, m, dlen, timeout);
+ } else if (atype == TIPC_SERVICE_ADDR) {
+ skaddr.node = ua->lookup_node;
+ ua->scope = tipc_node2scope(skaddr.node);
+ if (!tipc_nametbl_lookup_anycast(net, ua, &skaddr))
+ return -EHOSTUNREACH;
+ } else if (atype == TIPC_SOCKET_ADDR) {
+ skaddr = ua->sk;
+ } else {
+ return -EINVAL;
+ }
+
+ /* Block or return if destination link is congested */
+ rc = tipc_wait_for_cond(sock, &timeout,
+ !tipc_dest_find(clinks, skaddr.node, 0));
+ if (unlikely(rc))
+ return rc;
+
+ /* Finally build message header */
+ msg_set_destnode(hdr, skaddr.node);
+ msg_set_destport(hdr, skaddr.ref);
+ if (atype == TIPC_SERVICE_ADDR) {
+ msg_set_type(hdr, TIPC_NAMED_MSG);
+ msg_set_hdr_sz(hdr, NAMED_H_SIZE);
+ msg_set_nametype(hdr, ua->sa.type);
+ msg_set_nameinst(hdr, ua->sa.instance);
+ msg_set_lookup_scope(hdr, ua->scope);
+ } else { /* TIPC_SOCKET_ADDR */
+ msg_set_type(hdr, TIPC_DIRECT_MSG);
+ msg_set_lookup_scope(hdr, 0);
+ msg_set_hdr_sz(hdr, BASIC_H_SIZE);
+ }
+
+ /* Add message body */
+ __skb_queue_head_init(&pkts);
+ mtu = tipc_node_get_mtu(net, skaddr.node, tsk->portid, true);
+ rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
+ if (unlikely(rc != dlen))
+ return rc;
+ if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
+ __skb_queue_purge(&pkts);
+ return -ENOMEM;
+ }
+
+ /* Send message */
+ trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
+ rc = tipc_node_xmit(net, &pkts, skaddr.node, tsk->portid);
+ if (unlikely(rc == -ELINKCONG)) {
+ tipc_dest_push(clinks, skaddr.node, 0);
+ tsk->cong_link_cnt++;
+ rc = 0;
+ }
+
+ if (unlikely(syn && !rc)) {
+ tipc_set_sk_state(sk, TIPC_CONNECTING);
+ if (dlen && timeout) {
+ timeout = msecs_to_jiffies(timeout);
+ tipc_wait_for_connect(sock, &timeout);
+ }
+ }
+
+ return rc ? rc : dlen;
+}
+
+/**
+ * tipc_sendstream - send stream-oriented data
+ * @sock: socket structure
+ * @m: data to send
+ * @dsz: total length of data to be transmitted
+ *
+ * Used for SOCK_STREAM data.
+ *
+ * Return: the number of bytes sent on success (or partial success),
+ * or errno if no data sent
+ */
+static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
+{
+ struct sock *sk = sock->sk;
+ int ret;
+
+ lock_sock(sk);
+ ret = __tipc_sendstream(sock, m, dsz);
+ release_sock(sk);
+
+ return ret;
+}
+
+static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
+{
+ struct sock *sk = sock->sk;
+ DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
+ long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+ struct sk_buff_head *txq = &sk->sk_write_queue;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_msg *hdr = &tsk->phdr;
+ struct net *net = sock_net(sk);
+ struct sk_buff *skb;
+ u32 dnode = tsk_peer_node(tsk);
+ int maxnagle = tsk->maxnagle;
+ int maxpkt = tsk->max_pkt;
+ int send, sent = 0;
+ int blocks, rc = 0;
+
+ if (unlikely(dlen > INT_MAX))
+ return -EMSGSIZE;
+
+ /* Handle implicit connection setup */
+ if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
+ rc = __tipc_sendmsg(sock, m, dlen);
+ if (dlen && dlen == rc) {
+ tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
+ tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
+ }
+ return rc;
+ }
+
+ do {
+ rc = tipc_wait_for_cond(sock, &timeout,
+ (!tsk->cong_link_cnt &&
+ !tsk_conn_cong(tsk) &&
+ tipc_sk_connected(sk)));
+ if (unlikely(rc))
+ break;
+ send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
+ blocks = tsk->snd_backlog;
+ if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
+ send <= maxnagle) {
+ rc = tipc_msg_append(hdr, m, send, maxnagle, txq);
+ if (unlikely(rc < 0))
+ break;
+ blocks += rc;
+ tsk->msg_acc++;
+ if (blocks <= 64 && tsk->expect_ack) {
+ tsk->snd_backlog = blocks;
+ sent += send;
+ break;
+ } else if (blocks > 64) {
+ tsk->pkt_cnt += skb_queue_len(txq);
+ } else {
+ skb = skb_peek_tail(txq);
+ if (skb) {
+ msg_set_ack_required(buf_msg(skb));
+ tsk->expect_ack = true;
+ } else {
+ tsk->expect_ack = false;
+ }
+ tsk->msg_acc = 0;
+ tsk->pkt_cnt = 0;
+ }
+ } else {
+ rc = tipc_msg_build(hdr, m, sent, send, maxpkt, txq);
+ if (unlikely(rc != send))
+ break;
+ blocks += tsk_inc(tsk, send + MIN_H_SIZE);
+ }
+ trace_tipc_sk_sendstream(sk, skb_peek(txq),
+ TIPC_DUMP_SK_SNDQ, " ");
+ rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
+ if (unlikely(rc == -ELINKCONG)) {
+ tsk->cong_link_cnt = 1;
+ rc = 0;
+ }
+ if (likely(!rc)) {
+ tsk->snt_unacked += blocks;
+ tsk->snd_backlog = 0;
+ sent += send;
+ }
+ } while (sent < dlen && !rc);
+
+ return sent ? sent : rc;
+}
+
+/**
+ * tipc_send_packet - send a connection-oriented message
+ * @sock: socket structure
+ * @m: message to send
+ * @dsz: length of data to be transmitted
+ *
+ * Used for SOCK_SEQPACKET messages.
+ *
+ * Return: the number of bytes sent on success, or errno otherwise
+ */
+static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
+{
+ if (dsz > TIPC_MAX_USER_MSG_SIZE)
+ return -EMSGSIZE;
+
+ return tipc_sendstream(sock, m, dsz);
+}
+
+/* tipc_sk_finish_conn - complete the setup of a connection
+ */
+static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
+ u32 peer_node)
+{
+ struct sock *sk = &tsk->sk;
+ struct net *net = sock_net(sk);
+ struct tipc_msg *msg = &tsk->phdr;
+
+ msg_set_syn(msg, 0);
+ msg_set_destnode(msg, peer_node);
+ msg_set_destport(msg, peer_port);
+ msg_set_type(msg, TIPC_CONN_MSG);
+ msg_set_lookup_scope(msg, 0);
+ msg_set_hdr_sz(msg, SHORT_H_SIZE);
+
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
+ tipc_set_sk_state(sk, TIPC_ESTABLISHED);
+ tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
+ tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
+ tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
+ tsk_set_nagle(tsk);
+ __skb_queue_purge(&sk->sk_write_queue);
+ if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
+ return;
+
+ /* Fall back to message based flow control */
+ tsk->rcv_win = FLOWCTL_MSG_WIN;
+ tsk->snd_win = FLOWCTL_MSG_WIN;
+}
+
+/**
+ * tipc_sk_set_orig_addr - capture sender's address for received message
+ * @m: descriptor for message info
+ * @skb: received message
+ *
+ * Note: Address is not captured if not requested by receiver.
+ */
+static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
+{
+ DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
+ struct tipc_msg *hdr = buf_msg(skb);
+
+ if (!srcaddr)
+ return;
+
+ srcaddr->sock.family = AF_TIPC;
+ srcaddr->sock.addrtype = TIPC_SOCKET_ADDR;
+ srcaddr->sock.scope = 0;
+ srcaddr->sock.addr.id.ref = msg_origport(hdr);
+ srcaddr->sock.addr.id.node = msg_orignode(hdr);
+ srcaddr->sock.addr.name.domain = 0;
+ m->msg_namelen = sizeof(struct sockaddr_tipc);
+
+ if (!msg_in_group(hdr))
+ return;
+
+ /* Group message users may also want to know sending member's id */
+ srcaddr->member.family = AF_TIPC;
+ srcaddr->member.addrtype = TIPC_SERVICE_ADDR;
+ srcaddr->member.scope = 0;
+ srcaddr->member.addr.name.name.type = msg_nametype(hdr);
+ srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
+ srcaddr->member.addr.name.domain = 0;
+ m->msg_namelen = sizeof(*srcaddr);
+}
+
+/**
+ * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
+ * @m: descriptor for message info
+ * @skb: received message buffer
+ * @tsk: TIPC port associated with message
+ *
+ * Note: Ancillary data is not captured if not requested by receiver.
+ *
+ * Return: 0 if successful, otherwise errno
+ */
+static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
+ struct tipc_sock *tsk)
+{
+ struct tipc_msg *hdr;
+ u32 data[3] = {0,};
+ bool has_addr;
+ int dlen, rc;
+
+ if (likely(m->msg_controllen == 0))
+ return 0;
+
+ hdr = buf_msg(skb);
+ dlen = msg_data_sz(hdr);
+
+ /* Capture errored message object, if any */
+ if (msg_errcode(hdr)) {
+ if (skb_linearize(skb))
+ return -ENOMEM;
+ hdr = buf_msg(skb);
+ data[0] = msg_errcode(hdr);
+ data[1] = dlen;
+ rc = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, data);
+ if (rc || !dlen)
+ return rc;
+ rc = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, dlen, msg_data(hdr));
+ if (rc)
+ return rc;
+ }
+
+ /* Capture TIPC_SERVICE_ADDR/RANGE destination address, if any */
+ switch (msg_type(hdr)) {
+ case TIPC_NAMED_MSG:
+ has_addr = true;
+ data[0] = msg_nametype(hdr);
+ data[1] = msg_namelower(hdr);
+ data[2] = data[1];
+ break;
+ case TIPC_MCAST_MSG:
+ has_addr = true;
+ data[0] = msg_nametype(hdr);
+ data[1] = msg_namelower(hdr);
+ data[2] = msg_nameupper(hdr);
+ break;
+ case TIPC_CONN_MSG:
+ has_addr = !!tsk->conn_addrtype;
+ data[0] = msg_nametype(&tsk->phdr);
+ data[1] = msg_nameinst(&tsk->phdr);
+ data[2] = data[1];
+ break;
+ default:
+ has_addr = false;
+ }
+ if (!has_addr)
+ return 0;
+ return put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, data);
+}
+
+static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
+{
+ struct sock *sk = &tsk->sk;
+ struct sk_buff *skb = NULL;
+ struct tipc_msg *msg;
+ u32 peer_port = tsk_peer_port(tsk);
+ u32 dnode = tsk_peer_node(tsk);
+
+ if (!tipc_sk_connected(sk))
+ return NULL;
+ skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
+ dnode, tsk_own_node(tsk), peer_port,
+ tsk->portid, TIPC_OK);
+ if (!skb)
+ return NULL;
+ msg = buf_msg(skb);
+ msg_set_conn_ack(msg, tsk->rcv_unacked);
+ tsk->rcv_unacked = 0;
+
+ /* Adjust to and advertize the correct window limit */
+ if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
+ tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
+ msg_set_adv_win(msg, tsk->rcv_win);
+ }
+ return skb;
+}
+
+static void tipc_sk_send_ack(struct tipc_sock *tsk)
+{
+ struct sk_buff *skb;
+
+ skb = tipc_sk_build_ack(tsk);
+ if (!skb)
+ return;
+
+ tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
+ msg_link_selector(buf_msg(skb)));
+}
+
+static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
+{
+ struct sock *sk = sock->sk;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ long timeo = *timeop;
+ int err = sock_error(sk);
+
+ if (err)
+ return err;
+
+ for (;;) {
+ if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
+ err = -ENOTCONN;
+ break;
+ }
+ add_wait_queue(sk_sleep(sk), &wait);
+ release_sock(sk);
+ timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+ sched_annotate_sleep();
+ lock_sock(sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ }
+ err = 0;
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ break;
+ err = -EAGAIN;
+ if (!timeo)
+ break;
+ err = sock_intr_errno(timeo);
+ if (signal_pending(current))
+ break;
+
+ err = sock_error(sk);
+ if (err)
+ break;
+ }
+ *timeop = timeo;
+ return err;
+}
+
+/**
+ * tipc_recvmsg - receive packet-oriented message
+ * @sock: network socket
+ * @m: descriptor for message info
+ * @buflen: length of user buffer area
+ * @flags: receive flags
+ *
+ * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
+ * If the complete message doesn't fit in user area, truncate it.
+ *
+ * Return: size of returned message data, errno otherwise
+ */
+static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
+ size_t buflen, int flags)
+{
+ struct sock *sk = sock->sk;
+ bool connected = !tipc_sk_type_connectionless(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ int rc, err, hlen, dlen, copy;
+ struct tipc_skb_cb *skb_cb;
+ struct sk_buff_head xmitq;
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
+ bool grp_evt;
+ long timeout;
+
+ /* Catch invalid receive requests */
+ if (unlikely(!buflen))
+ return -EINVAL;
+
+ lock_sock(sk);
+ if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
+ rc = -ENOTCONN;
+ goto exit;
+ }
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+
+ /* Step rcv queue to first msg with data or error; wait if necessary */
+ do {
+ rc = tipc_wait_for_rcvmsg(sock, &timeout);
+ if (unlikely(rc))
+ goto exit;
+ skb = skb_peek(&sk->sk_receive_queue);
+ skb_cb = TIPC_SKB_CB(skb);
+ hdr = buf_msg(skb);
+ dlen = msg_data_sz(hdr);
+ hlen = msg_hdr_sz(hdr);
+ err = msg_errcode(hdr);
+ grp_evt = msg_is_grp_evt(hdr);
+ if (likely(dlen || err))
+ break;
+ tsk_advance_rx_queue(sk);
+ } while (1);
+
+ /* Collect msg meta data, including error code and rejected data */
+ tipc_sk_set_orig_addr(m, skb);
+ rc = tipc_sk_anc_data_recv(m, skb, tsk);
+ if (unlikely(rc))
+ goto exit;
+ hdr = buf_msg(skb);
+
+ /* Capture data if non-error msg, otherwise just set return value */
+ if (likely(!err)) {
+ int offset = skb_cb->bytes_read;
+
+ copy = min_t(int, dlen - offset, buflen);
+ rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
+ if (unlikely(rc))
+ goto exit;
+ if (unlikely(offset + copy < dlen)) {
+ if (flags & MSG_EOR) {
+ if (!(flags & MSG_PEEK))
+ skb_cb->bytes_read = offset + copy;
+ } else {
+ m->msg_flags |= MSG_TRUNC;
+ skb_cb->bytes_read = 0;
+ }
+ } else {
+ if (flags & MSG_EOR)
+ m->msg_flags |= MSG_EOR;
+ skb_cb->bytes_read = 0;
+ }
+ } else {
+ copy = 0;
+ rc = 0;
+ if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
+ rc = -ECONNRESET;
+ goto exit;
+ }
+ }
+
+ /* Mark message as group event if applicable */
+ if (unlikely(grp_evt)) {
+ if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
+ m->msg_flags |= MSG_EOR;
+ m->msg_flags |= MSG_OOB;
+ copy = 0;
+ }
+
+ /* Caption of data or error code/rejected data was successful */
+ if (unlikely(flags & MSG_PEEK))
+ goto exit;
+
+ /* Send group flow control advertisement when applicable */
+ if (tsk->group && msg_in_group(hdr) && !grp_evt) {
+ __skb_queue_head_init(&xmitq);
+ tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
+ msg_orignode(hdr), msg_origport(hdr),
+ &xmitq);
+ tipc_node_distr_xmit(sock_net(sk), &xmitq);
+ }
+
+ if (skb_cb->bytes_read)
+ goto exit;
+
+ tsk_advance_rx_queue(sk);
+
+ if (likely(!connected))
+ goto exit;
+
+ /* Send connection flow control advertisement when applicable */
+ tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
+ if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
+ tipc_sk_send_ack(tsk);
+exit:
+ release_sock(sk);
+ return rc ? rc : copy;
+}
+
+/**
+ * tipc_recvstream - receive stream-oriented data
+ * @sock: network socket
+ * @m: descriptor for message info
+ * @buflen: total size of user buffer area
+ * @flags: receive flags
+ *
+ * Used for SOCK_STREAM messages only. If not enough data is available
+ * will optionally wait for more; never truncates data.
+ *
+ * Return: size of returned message data, errno otherwise
+ */
+static int tipc_recvstream(struct socket *sock, struct msghdr *m,
+ size_t buflen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct sk_buff *skb;
+ struct tipc_msg *hdr;
+ struct tipc_skb_cb *skb_cb;
+ bool peek = flags & MSG_PEEK;
+ int offset, required, copy, copied = 0;
+ int hlen, dlen, err, rc;
+ long timeout;
+
+ /* Catch invalid receive attempts */
+ if (unlikely(!buflen))
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ if (unlikely(sk->sk_state == TIPC_OPEN)) {
+ rc = -ENOTCONN;
+ goto exit;
+ }
+ required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+
+ do {
+ /* Look at first msg in receive queue; wait if necessary */
+ rc = tipc_wait_for_rcvmsg(sock, &timeout);
+ if (unlikely(rc))
+ break;
+ skb = skb_peek(&sk->sk_receive_queue);
+ skb_cb = TIPC_SKB_CB(skb);
+ hdr = buf_msg(skb);
+ dlen = msg_data_sz(hdr);
+ hlen = msg_hdr_sz(hdr);
+ err = msg_errcode(hdr);
+
+ /* Discard any empty non-errored (SYN-) message */
+ if (unlikely(!dlen && !err)) {
+ tsk_advance_rx_queue(sk);
+ continue;
+ }
+
+ /* Collect msg meta data, incl. error code and rejected data */
+ if (!copied) {
+ tipc_sk_set_orig_addr(m, skb);
+ rc = tipc_sk_anc_data_recv(m, skb, tsk);
+ if (rc)
+ break;
+ hdr = buf_msg(skb);
+ }
+
+ /* Copy data if msg ok, otherwise return error/partial data */
+ if (likely(!err)) {
+ offset = skb_cb->bytes_read;
+ copy = min_t(int, dlen - offset, buflen - copied);
+ rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
+ if (unlikely(rc))
+ break;
+ copied += copy;
+ offset += copy;
+ if (unlikely(offset < dlen)) {
+ if (!peek)
+ skb_cb->bytes_read = offset;
+ break;
+ }
+ } else {
+ rc = 0;
+ if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
+ rc = -ECONNRESET;
+ if (copied || rc)
+ break;
+ }
+
+ if (unlikely(peek))
+ break;
+
+ tsk_advance_rx_queue(sk);
+
+ /* Send connection flow control advertisement when applicable */
+ tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
+ if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
+ tipc_sk_send_ack(tsk);
+
+ /* Exit if all requested data or FIN/error received */
+ if (copied == buflen || err)
+ break;
+
+ } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
+exit:
+ release_sock(sk);
+ return copied ? copied : rc;
+}
+
+/**
+ * tipc_write_space - wake up thread if port congestion is released
+ * @sk: socket
+ */
+static void tipc_write_space(struct sock *sk)
+{
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (skwq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
+ EPOLLWRNORM | EPOLLWRBAND);
+ rcu_read_unlock();
+}
+
+/**
+ * tipc_data_ready - wake up threads to indicate messages have been received
+ * @sk: socket
+ */
+static void tipc_data_ready(struct sock *sk)
+{
+ struct socket_wq *wq;
+
+ trace_sk_data_ready(sk);
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (skwq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
+ EPOLLRDNORM | EPOLLRDBAND);
+ rcu_read_unlock();
+}
+
+static void tipc_sock_destruct(struct sock *sk)
+{
+ __skb_queue_purge(&sk->sk_receive_queue);
+}
+
+static void tipc_sk_proto_rcv(struct sock *sk,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff *skb = __skb_dequeue(inputq);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct tipc_group *grp = tsk->group;
+ bool wakeup = false;
+
+ switch (msg_user(hdr)) {
+ case CONN_MANAGER:
+ tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
+ return;
+ case SOCK_WAKEUP:
+ tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
+ /* coupled with smp_rmb() in tipc_wait_for_cond() */
+ smp_wmb();
+ tsk->cong_link_cnt--;
+ wakeup = true;
+ tipc_sk_push_backlog(tsk, false);
+ break;
+ case GROUP_PROTOCOL:
+ tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
+ break;
+ case TOP_SRV:
+ tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
+ hdr, inputq, xmitq);
+ break;
+ default:
+ break;
+ }
+
+ if (wakeup)
+ sk->sk_write_space(sk);
+
+ kfree_skb(skb);
+}
+
+/**
+ * tipc_sk_filter_connect - check incoming message for a connection-based socket
+ * @tsk: TIPC socket
+ * @skb: pointer to message buffer.
+ * @xmitq: for Nagle ACK if any
+ * Return: true if message should be added to receive queue, false otherwise
+ */
+static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
+{
+ struct sock *sk = &tsk->sk;
+ struct net *net = sock_net(sk);
+ struct tipc_msg *hdr = buf_msg(skb);
+ bool con_msg = msg_connected(hdr);
+ u32 pport = tsk_peer_port(tsk);
+ u32 pnode = tsk_peer_node(tsk);
+ u32 oport = msg_origport(hdr);
+ u32 onode = msg_orignode(hdr);
+ int err = msg_errcode(hdr);
+ unsigned long delay;
+
+ if (unlikely(msg_mcast(hdr)))
+ return false;
+ tsk->oneway = 0;
+
+ switch (sk->sk_state) {
+ case TIPC_CONNECTING:
+ /* Setup ACK */
+ if (likely(con_msg)) {
+ if (err)
+ break;
+ tipc_sk_finish_conn(tsk, oport, onode);
+ msg_set_importance(&tsk->phdr, msg_importance(hdr));
+ /* ACK+ message with data is added to receive queue */
+ if (msg_data_sz(hdr))
+ return true;
+ /* Empty ACK-, - wake up sleeping connect() and drop */
+ sk->sk_state_change(sk);
+ msg_set_dest_droppable(hdr, 1);
+ return false;
+ }
+ /* Ignore connectionless message if not from listening socket */
+ if (oport != pport || onode != pnode)
+ return false;
+
+ /* Rejected SYN */
+ if (err != TIPC_ERR_OVERLOAD)
+ break;
+
+ /* Prepare for new setup attempt if we have a SYN clone */
+ if (skb_queue_empty(&sk->sk_write_queue))
+ break;
+ get_random_bytes(&delay, 2);
+ delay %= (tsk->conn_timeout / 4);
+ delay = msecs_to_jiffies(delay + 100);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
+ return false;
+ case TIPC_OPEN:
+ case TIPC_DISCONNECTING:
+ return false;
+ case TIPC_LISTEN:
+ /* Accept only SYN message */
+ if (!msg_is_syn(hdr) &&
+ tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
+ return false;
+ if (!con_msg && !err)
+ return true;
+ return false;
+ case TIPC_ESTABLISHED:
+ if (!skb_queue_empty(&sk->sk_write_queue))
+ tipc_sk_push_backlog(tsk, false);
+ /* Accept only connection-based messages sent by peer */
+ if (likely(con_msg && !err && pport == oport &&
+ pnode == onode)) {
+ if (msg_ack_required(hdr)) {
+ struct sk_buff *skb;
+
+ skb = tipc_sk_build_ack(tsk);
+ if (skb) {
+ msg_set_nagle_ack(buf_msg(skb));
+ __skb_queue_tail(xmitq, skb);
+ }
+ }
+ return true;
+ }
+ if (!tsk_peer_msg(tsk, hdr))
+ return false;
+ if (!err)
+ return true;
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ tipc_node_remove_conn(net, pnode, tsk->portid);
+ sk->sk_state_change(sk);
+ return true;
+ default:
+ pr_err("Unknown sk_state %u\n", sk->sk_state);
+ }
+ /* Abort connection setup attempt */
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ sk->sk_err = ECONNREFUSED;
+ sk->sk_state_change(sk);
+ return true;
+}
+
+/**
+ * rcvbuf_limit - get proper overload limit of socket receive queue
+ * @sk: socket
+ * @skb: message
+ *
+ * For connection oriented messages, irrespective of importance,
+ * default queue limit is 2 MB.
+ *
+ * For connectionless messages, queue limits are based on message
+ * importance as follows:
+ *
+ * TIPC_LOW_IMPORTANCE (2 MB)
+ * TIPC_MEDIUM_IMPORTANCE (4 MB)
+ * TIPC_HIGH_IMPORTANCE (8 MB)
+ * TIPC_CRITICAL_IMPORTANCE (16 MB)
+ *
+ * Return: overload limit according to corresponding message importance
+ */
+static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
+{
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_msg *hdr = buf_msg(skb);
+
+ if (unlikely(msg_in_group(hdr)))
+ return READ_ONCE(sk->sk_rcvbuf);
+
+ if (unlikely(!msg_connected(hdr)))
+ return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
+
+ if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
+ return READ_ONCE(sk->sk_rcvbuf);
+
+ return FLOWCTL_MSG_LIM;
+}
+
+/**
+ * tipc_sk_filter_rcv - validate incoming message
+ * @sk: socket
+ * @skb: pointer to message.
+ * @xmitq: output message area (FIXME)
+ *
+ * Enqueues message on receive queue if acceptable; optionally handles
+ * disconnect indication for a connected socket.
+ *
+ * Called with socket lock already taken
+ */
+static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
+{
+ bool sk_conn = !tipc_sk_type_connectionless(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_group *grp = tsk->group;
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct net *net = sock_net(sk);
+ struct sk_buff_head inputq;
+ int mtyp = msg_type(hdr);
+ int limit, err = TIPC_OK;
+
+ trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
+ TIPC_SKB_CB(skb)->bytes_read = 0;
+ __skb_queue_head_init(&inputq);
+ __skb_queue_tail(&inputq, skb);
+
+ if (unlikely(!msg_isdata(hdr)))
+ tipc_sk_proto_rcv(sk, &inputq, xmitq);
+
+ if (unlikely(grp))
+ tipc_group_filter_msg(grp, &inputq, xmitq);
+
+ if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
+ tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
+
+ /* Validate and add to receive buffer if there is space */
+ while ((skb = __skb_dequeue(&inputq))) {
+ hdr = buf_msg(skb);
+ limit = rcvbuf_limit(sk, skb);
+ if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
+ (!sk_conn && msg_connected(hdr)) ||
+ (!grp && msg_in_group(hdr)))
+ err = TIPC_ERR_NO_PORT;
+ else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
+ trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
+ "err_overload2!");
+ atomic_inc(&sk->sk_drops);
+ err = TIPC_ERR_OVERLOAD;
+ }
+
+ if (unlikely(err)) {
+ if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
+ trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
+ "@filter_rcv!");
+ __skb_queue_tail(xmitq, skb);
+ }
+ err = TIPC_OK;
+ continue;
+ }
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ skb_set_owner_r(skb, sk);
+ trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
+ "rcvq >90% allocated!");
+ sk->sk_data_ready(sk);
+ }
+}
+
+/**
+ * tipc_sk_backlog_rcv - handle incoming message from backlog queue
+ * @sk: socket
+ * @skb: message
+ *
+ * Caller must hold socket lock
+ */
+static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ unsigned int before = sk_rmem_alloc_get(sk);
+ struct sk_buff_head xmitq;
+ unsigned int added;
+
+ __skb_queue_head_init(&xmitq);
+
+ tipc_sk_filter_rcv(sk, skb, &xmitq);
+ added = sk_rmem_alloc_get(sk) - before;
+ atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
+
+ /* Send pending response/rejected messages, if any */
+ tipc_node_distr_xmit(sock_net(sk), &xmitq);
+ return 0;
+}
+
+/**
+ * tipc_sk_enqueue - extract all buffers with destination 'dport' from
+ * inputq and try adding them to socket or backlog queue
+ * @inputq: list of incoming buffers with potentially different destinations
+ * @sk: socket where the buffers should be enqueued
+ * @dport: port number for the socket
+ * @xmitq: output queue
+ *
+ * Caller must hold socket lock
+ */
+static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
+ u32 dport, struct sk_buff_head *xmitq)
+{
+ unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
+ struct sk_buff *skb;
+ unsigned int lim;
+ atomic_t *dcnt;
+ u32 onode;
+
+ while (skb_queue_len(inputq)) {
+ if (unlikely(time_after_eq(jiffies, time_limit)))
+ return;
+
+ skb = tipc_skb_dequeue(inputq, dport);
+ if (unlikely(!skb))
+ return;
+
+ /* Add message directly to receive queue if possible */
+ if (!sock_owned_by_user(sk)) {
+ tipc_sk_filter_rcv(sk, skb, xmitq);
+ continue;
+ }
+
+ /* Try backlog, compensating for double-counted bytes */
+ dcnt = &tipc_sk(sk)->dupl_rcvcnt;
+ if (!sk->sk_backlog.len)
+ atomic_set(dcnt, 0);
+ lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
+ if (likely(!sk_add_backlog(sk, skb, lim))) {
+ trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
+ "bklg & rcvq >90% allocated!");
+ continue;
+ }
+
+ trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
+ /* Overload => reject message back to sender */
+ onode = tipc_own_addr(sock_net(sk));
+ atomic_inc(&sk->sk_drops);
+ if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
+ trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
+ "@sk_enqueue!");
+ __skb_queue_tail(xmitq, skb);
+ }
+ break;
+ }
+}
+
+/**
+ * tipc_sk_rcv - handle a chain of incoming buffers
+ * @net: the associated network namespace
+ * @inputq: buffer list containing the buffers
+ * Consumes all buffers in list until inputq is empty
+ * Note: may be called in multiple threads referring to the same queue
+ */
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
+{
+ struct sk_buff_head xmitq;
+ u32 dnode, dport = 0;
+ int err;
+ struct tipc_sock *tsk;
+ struct sock *sk;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&xmitq);
+ while (skb_queue_len(inputq)) {
+ dport = tipc_skb_peek_port(inputq, dport);
+ tsk = tipc_sk_lookup(net, dport);
+
+ if (likely(tsk)) {
+ sk = &tsk->sk;
+ if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
+ tipc_sk_enqueue(inputq, sk, dport, &xmitq);
+ spin_unlock_bh(&sk->sk_lock.slock);
+ }
+ /* Send pending response/rejected messages, if any */
+ tipc_node_distr_xmit(sock_net(sk), &xmitq);
+ sock_put(sk);
+ continue;
+ }
+ /* No destination socket => dequeue skb if still there */
+ skb = tipc_skb_dequeue(inputq, dport);
+ if (!skb)
+ return;
+
+ /* Try secondary lookup if unresolved named message */
+ err = TIPC_ERR_NO_PORT;
+ if (tipc_msg_lookup_dest(net, skb, &err))
+ goto xmit;
+
+ /* Prepare for message rejection */
+ if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
+ continue;
+
+ trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
+xmit:
+ dnode = msg_destnode(buf_msg(skb));
+ tipc_node_xmit_skb(net, skb, dnode, dport);
+ }
+}
+
+static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct sock *sk = sock->sk;
+ int done;
+
+ do {
+ int err = sock_error(sk);
+ if (err)
+ return err;
+ if (!*timeo_p)
+ return -ETIMEDOUT;
+ if (signal_pending(current))
+ return sock_intr_errno(*timeo_p);
+ if (sk->sk_state == TIPC_DISCONNECTING)
+ break;
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
+ &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ } while (!done);
+ return 0;
+}
+
+static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
+{
+ if (addr->family != AF_TIPC)
+ return false;
+ if (addr->addrtype == TIPC_SERVICE_RANGE)
+ return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
+ return (addr->addrtype == TIPC_SERVICE_ADDR ||
+ addr->addrtype == TIPC_SOCKET_ADDR);
+}
+
+/**
+ * tipc_connect - establish a connection to another TIPC port
+ * @sock: socket structure
+ * @dest: socket address for destination port
+ * @destlen: size of socket address data structure
+ * @flags: file-related flags associated with socket
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int tipc_connect(struct socket *sock, struct sockaddr *dest,
+ int destlen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
+ struct msghdr m = {NULL,};
+ long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
+ int previous;
+ int res = 0;
+
+ if (destlen != sizeof(struct sockaddr_tipc))
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ if (tsk->group) {
+ res = -EINVAL;
+ goto exit;
+ }
+
+ if (dst->family == AF_UNSPEC) {
+ memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
+ if (!tipc_sk_type_connectionless(sk))
+ res = -EINVAL;
+ goto exit;
+ }
+ if (!tipc_sockaddr_is_sane(dst)) {
+ res = -EINVAL;
+ goto exit;
+ }
+ /* DGRAM/RDM connect(), just save the destaddr */
+ if (tipc_sk_type_connectionless(sk)) {
+ memcpy(&tsk->peer, dest, destlen);
+ goto exit;
+ } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
+ res = -EINVAL;
+ goto exit;
+ }
+
+ previous = sk->sk_state;
+
+ switch (sk->sk_state) {
+ case TIPC_OPEN:
+ /* Send a 'SYN-' to destination */
+ m.msg_name = dest;
+ m.msg_namelen = destlen;
+ iov_iter_kvec(&m.msg_iter, ITER_SOURCE, NULL, 0, 0);
+
+ /* If connect is in non-blocking case, set MSG_DONTWAIT to
+ * indicate send_msg() is never blocked.
+ */
+ if (!timeout)
+ m.msg_flags = MSG_DONTWAIT;
+
+ res = __tipc_sendmsg(sock, &m, 0);
+ if ((res < 0) && (res != -EWOULDBLOCK))
+ goto exit;
+
+ /* Just entered TIPC_CONNECTING state; the only
+ * difference is that return value in non-blocking
+ * case is EINPROGRESS, rather than EALREADY.
+ */
+ res = -EINPROGRESS;
+ fallthrough;
+ case TIPC_CONNECTING:
+ if (!timeout) {
+ if (previous == TIPC_CONNECTING)
+ res = -EALREADY;
+ goto exit;
+ }
+ timeout = msecs_to_jiffies(timeout);
+ /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
+ res = tipc_wait_for_connect(sock, &timeout);
+ break;
+ case TIPC_ESTABLISHED:
+ res = -EISCONN;
+ break;
+ default:
+ res = -EINVAL;
+ }
+
+exit:
+ release_sock(sk);
+ return res;
+}
+
+/**
+ * tipc_listen - allow socket to listen for incoming connections
+ * @sock: socket structure
+ * @len: (unused)
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int tipc_listen(struct socket *sock, int len)
+{
+ struct sock *sk = sock->sk;
+ int res;
+
+ lock_sock(sk);
+ res = tipc_set_sk_state(sk, TIPC_LISTEN);
+ release_sock(sk);
+
+ return res;
+}
+
+static int tipc_wait_for_accept(struct socket *sock, long timeo)
+{
+ struct sock *sk = sock->sk;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int err;
+
+ /* True wake-one mechanism for incoming connections: only
+ * one process gets woken up, not the 'whole herd'.
+ * Since we do not 'race & poll' for established sockets
+ * anymore, the common case will execute the loop only once.
+ */
+ for (;;) {
+ if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
+ add_wait_queue(sk_sleep(sk), &wait);
+ release_sock(sk);
+ timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+ lock_sock(sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ }
+ err = 0;
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ break;
+ err = -EAGAIN;
+ if (!timeo)
+ break;
+ err = sock_intr_errno(timeo);
+ if (signal_pending(current))
+ break;
+ }
+ return err;
+}
+
+/**
+ * tipc_accept - wait for connection request
+ * @sock: listening socket
+ * @new_sock: new socket that is to be connected
+ * @flags: file-related flags associated with socket
+ * @kern: caused by kernel or by userspace?
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
+ bool kern)
+{
+ struct sock *new_sk, *sk = sock->sk;
+ struct tipc_sock *new_tsock;
+ struct msghdr m = {NULL,};
+ struct tipc_msg *msg;
+ struct sk_buff *buf;
+ long timeo;
+ int res;
+
+ lock_sock(sk);
+
+ if (sk->sk_state != TIPC_LISTEN) {
+ res = -EINVAL;
+ goto exit;
+ }
+ timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ res = tipc_wait_for_accept(sock, timeo);
+ if (res)
+ goto exit;
+
+ buf = skb_peek(&sk->sk_receive_queue);
+
+ res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
+ if (res)
+ goto exit;
+ security_sk_clone(sock->sk, new_sock->sk);
+
+ new_sk = new_sock->sk;
+ new_tsock = tipc_sk(new_sk);
+ msg = buf_msg(buf);
+
+ /* we lock on new_sk; but lockdep sees the lock on sk */
+ lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
+
+ /*
+ * Reject any stray messages received by new socket
+ * before the socket lock was taken (very, very unlikely)
+ */
+ tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
+
+ /* Connect new socket to it's peer */
+ tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
+
+ tsk_set_importance(new_sk, msg_importance(msg));
+ if (msg_named(msg)) {
+ new_tsock->conn_addrtype = TIPC_SERVICE_ADDR;
+ msg_set_nametype(&new_tsock->phdr, msg_nametype(msg));
+ msg_set_nameinst(&new_tsock->phdr, msg_nameinst(msg));
+ }
+
+ /*
+ * Respond to 'SYN-' by discarding it & returning 'ACK'.
+ * Respond to 'SYN+' by queuing it on new socket & returning 'ACK'.
+ */
+ if (!msg_data_sz(msg)) {
+ tsk_advance_rx_queue(sk);
+ } else {
+ __skb_dequeue(&sk->sk_receive_queue);
+ __skb_queue_head(&new_sk->sk_receive_queue, buf);
+ skb_set_owner_r(buf, new_sk);
+ }
+ iov_iter_kvec(&m.msg_iter, ITER_SOURCE, NULL, 0, 0);
+ __tipc_sendstream(new_sock, &m, 0);
+ release_sock(new_sk);
+exit:
+ release_sock(sk);
+ return res;
+}
+
+/**
+ * tipc_shutdown - shutdown socket connection
+ * @sock: socket structure
+ * @how: direction to close (must be SHUT_RDWR)
+ *
+ * Terminates connection (if necessary), then purges socket's receive queue.
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int tipc_shutdown(struct socket *sock, int how)
+{
+ struct sock *sk = sock->sk;
+ int res;
+
+ if (how != SHUT_RDWR)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
+ __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+ if (sk->sk_state == TIPC_DISCONNECTING) {
+ /* Discard any unreceived messages */
+ __skb_queue_purge(&sk->sk_receive_queue);
+
+ res = 0;
+ } else {
+ res = -ENOTCONN;
+ }
+ /* Wake up anyone sleeping in poll. */
+ sk->sk_state_change(sk);
+
+ release_sock(sk);
+ return res;
+}
+
+static void tipc_sk_check_probing_state(struct sock *sk,
+ struct sk_buff_head *list)
+{
+ struct tipc_sock *tsk = tipc_sk(sk);
+ u32 pnode = tsk_peer_node(tsk);
+ u32 pport = tsk_peer_port(tsk);
+ u32 self = tsk_own_node(tsk);
+ u32 oport = tsk->portid;
+ struct sk_buff *skb;
+
+ if (tsk->probe_unacked) {
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ sk->sk_err = ECONNABORTED;
+ tipc_node_remove_conn(sock_net(sk), pnode, pport);
+ sk->sk_state_change(sk);
+ return;
+ }
+ /* Prepare new probe */
+ skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
+ pnode, self, pport, oport, TIPC_OK);
+ if (skb)
+ __skb_queue_tail(list, skb);
+ tsk->probe_unacked = true;
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
+}
+
+static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
+{
+ struct tipc_sock *tsk = tipc_sk(sk);
+
+ /* Try again later if dest link is congested */
+ if (tsk->cong_link_cnt) {
+ sk_reset_timer(sk, &sk->sk_timer,
+ jiffies + msecs_to_jiffies(100));
+ return;
+ }
+ /* Prepare SYN for retransmit */
+ tipc_msg_skb_clone(&sk->sk_write_queue, list);
+}
+
+static void tipc_sk_timeout(struct timer_list *t)
+{
+ struct sock *sk = from_timer(sk, t, sk_timer);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ u32 pnode = tsk_peer_node(tsk);
+ struct sk_buff_head list;
+ int rc = 0;
+
+ __skb_queue_head_init(&list);
+ bh_lock_sock(sk);
+
+ /* Try again later if socket is busy */
+ if (sock_owned_by_user(sk)) {
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ return;
+ }
+
+ if (sk->sk_state == TIPC_ESTABLISHED)
+ tipc_sk_check_probing_state(sk, &list);
+ else if (sk->sk_state == TIPC_CONNECTING)
+ tipc_sk_retry_connect(sk, &list);
+
+ bh_unlock_sock(sk);
+
+ if (!skb_queue_empty(&list))
+ rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
+
+ /* SYN messages may cause link congestion */
+ if (rc == -ELINKCONG) {
+ tipc_dest_push(&tsk->cong_links, pnode, 0);
+ tsk->cong_link_cnt = 1;
+ }
+ sock_put(sk);
+}
+
+static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua)
+{
+ struct sock *sk = &tsk->sk;
+ struct net *net = sock_net(sk);
+ struct tipc_socket_addr skaddr;
+ struct publication *p;
+ u32 key;
+
+ if (tipc_sk_connected(sk))
+ return -EINVAL;
+ key = tsk->portid + tsk->pub_count + 1;
+ if (key == tsk->portid)
+ return -EADDRINUSE;
+ skaddr.ref = tsk->portid;
+ skaddr.node = tipc_own_addr(net);
+ p = tipc_nametbl_publish(net, ua, &skaddr, key);
+ if (unlikely(!p))
+ return -EINVAL;
+
+ list_add(&p->binding_sock, &tsk->publications);
+ tsk->pub_count++;
+ tsk->published = true;
+ return 0;
+}
+
+static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua)
+{
+ struct net *net = sock_net(&tsk->sk);
+ struct publication *safe, *p;
+ struct tipc_uaddr _ua;
+ int rc = -EINVAL;
+
+ list_for_each_entry_safe(p, safe, &tsk->publications, binding_sock) {
+ if (!ua) {
+ tipc_uaddr(&_ua, TIPC_SERVICE_RANGE, p->scope,
+ p->sr.type, p->sr.lower, p->sr.upper);
+ tipc_nametbl_withdraw(net, &_ua, &p->sk, p->key);
+ continue;
+ }
+ /* Unbind specific publication */
+ if (p->scope != ua->scope)
+ continue;
+ if (p->sr.type != ua->sr.type)
+ continue;
+ if (p->sr.lower != ua->sr.lower)
+ continue;
+ if (p->sr.upper != ua->sr.upper)
+ break;
+ tipc_nametbl_withdraw(net, ua, &p->sk, p->key);
+ rc = 0;
+ break;
+ }
+ if (list_empty(&tsk->publications)) {
+ tsk->published = 0;
+ rc = 0;
+ }
+ return rc;
+}
+
+/* tipc_sk_reinit: set non-zero address in all existing sockets
+ * when we go from standalone to network mode.
+ */
+void tipc_sk_reinit(struct net *net)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct rhashtable_iter iter;
+ struct tipc_sock *tsk;
+ struct tipc_msg *msg;
+
+ rhashtable_walk_enter(&tn->sk_rht, &iter);
+
+ do {
+ rhashtable_walk_start(&iter);
+
+ while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
+ sock_hold(&tsk->sk);
+ rhashtable_walk_stop(&iter);
+ lock_sock(&tsk->sk);
+ msg = &tsk->phdr;
+ msg_set_prevnode(msg, tipc_own_addr(net));
+ msg_set_orignode(msg, tipc_own_addr(net));
+ release_sock(&tsk->sk);
+ rhashtable_walk_start(&iter);
+ sock_put(&tsk->sk);
+ }
+
+ rhashtable_walk_stop(&iter);
+ } while (tsk == ERR_PTR(-EAGAIN));
+
+ rhashtable_walk_exit(&iter);
+}
+
+static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_sock *tsk;
+
+ rcu_read_lock();
+ tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
+ if (tsk)
+ sock_hold(&tsk->sk);
+ rcu_read_unlock();
+
+ return tsk;
+}
+
+static int tipc_sk_insert(struct tipc_sock *tsk)
+{
+ struct sock *sk = &tsk->sk;
+ struct net *net = sock_net(sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
+ u32 portid = get_random_u32_below(remaining) + TIPC_MIN_PORT;
+
+ while (remaining--) {
+ portid++;
+ if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
+ portid = TIPC_MIN_PORT;
+ tsk->portid = portid;
+ sock_hold(&tsk->sk);
+ if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
+ tsk_rht_params))
+ return 0;
+ sock_put(&tsk->sk);
+ }
+
+ return -1;
+}
+
+static void tipc_sk_remove(struct tipc_sock *tsk)
+{
+ struct sock *sk = &tsk->sk;
+ struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
+
+ if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
+ WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
+ __sock_put(sk);
+ }
+}
+
+static const struct rhashtable_params tsk_rht_params = {
+ .nelem_hint = 192,
+ .head_offset = offsetof(struct tipc_sock, node),
+ .key_offset = offsetof(struct tipc_sock, portid),
+ .key_len = sizeof(u32), /* portid */
+ .max_size = 1048576,
+ .min_size = 256,
+ .automatic_shrinking = true,
+};
+
+int tipc_sk_rht_init(struct net *net)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
+}
+
+void tipc_sk_rht_destroy(struct net *net)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ /* Wait for socket readers to complete */
+ synchronize_net();
+
+ rhashtable_destroy(&tn->sk_rht);
+}
+
+static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
+{
+ struct net *net = sock_net(&tsk->sk);
+ struct tipc_group *grp = tsk->group;
+ struct tipc_msg *hdr = &tsk->phdr;
+ struct tipc_uaddr ua;
+ int rc;
+
+ if (mreq->type < TIPC_RESERVED_TYPES)
+ return -EACCES;
+ if (mreq->scope > TIPC_NODE_SCOPE)
+ return -EINVAL;
+ if (mreq->scope != TIPC_NODE_SCOPE)
+ mreq->scope = TIPC_CLUSTER_SCOPE;
+ if (grp)
+ return -EACCES;
+ grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
+ if (!grp)
+ return -ENOMEM;
+ tsk->group = grp;
+ msg_set_lookup_scope(hdr, mreq->scope);
+ msg_set_nametype(hdr, mreq->type);
+ msg_set_dest_droppable(hdr, true);
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, mreq->scope,
+ mreq->type, mreq->instance, mreq->instance);
+ tipc_nametbl_build_group(net, grp, &ua);
+ rc = tipc_sk_publish(tsk, &ua);
+ if (rc) {
+ tipc_group_delete(net, grp);
+ tsk->group = NULL;
+ return rc;
+ }
+ /* Eliminate any risk that a broadcast overtakes sent JOINs */
+ tsk->mc_method.rcast = true;
+ tsk->mc_method.mandatory = true;
+ tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
+ return rc;
+}
+
+static int tipc_sk_leave(struct tipc_sock *tsk)
+{
+ struct net *net = sock_net(&tsk->sk);
+ struct tipc_group *grp = tsk->group;
+ struct tipc_uaddr ua;
+ int scope;
+
+ if (!grp)
+ return -EINVAL;
+ ua.addrtype = TIPC_SERVICE_RANGE;
+ tipc_group_self(grp, &ua.sr, &scope);
+ ua.scope = scope;
+ tipc_group_delete(net, grp);
+ tsk->group = NULL;
+ tipc_sk_withdraw(tsk, &ua);
+ return 0;
+}
+
+/**
+ * tipc_setsockopt - set socket option
+ * @sock: socket structure
+ * @lvl: option level
+ * @opt: option identifier
+ * @ov: pointer to new option value
+ * @ol: length of option value
+ *
+ * For stream sockets only, accepts and ignores all IPPROTO_TCP options
+ * (to ease compatibility).
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
+ sockptr_t ov, unsigned int ol)
+{
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_group_req mreq;
+ u32 value = 0;
+ int res = 0;
+
+ if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
+ return 0;
+ if (lvl != SOL_TIPC)
+ return -ENOPROTOOPT;
+
+ switch (opt) {
+ case TIPC_IMPORTANCE:
+ case TIPC_SRC_DROPPABLE:
+ case TIPC_DEST_DROPPABLE:
+ case TIPC_CONN_TIMEOUT:
+ case TIPC_NODELAY:
+ if (ol < sizeof(value))
+ return -EINVAL;
+ if (copy_from_sockptr(&value, ov, sizeof(u32)))
+ return -EFAULT;
+ break;
+ case TIPC_GROUP_JOIN:
+ if (ol < sizeof(mreq))
+ return -EINVAL;
+ if (copy_from_sockptr(&mreq, ov, sizeof(mreq)))
+ return -EFAULT;
+ break;
+ default:
+ if (!sockptr_is_null(ov) || ol)
+ return -EINVAL;
+ }
+
+ lock_sock(sk);
+
+ switch (opt) {
+ case TIPC_IMPORTANCE:
+ res = tsk_set_importance(sk, value);
+ break;
+ case TIPC_SRC_DROPPABLE:
+ if (sock->type != SOCK_STREAM)
+ tsk_set_unreliable(tsk, value);
+ else
+ res = -ENOPROTOOPT;
+ break;
+ case TIPC_DEST_DROPPABLE:
+ tsk_set_unreturnable(tsk, value);
+ break;
+ case TIPC_CONN_TIMEOUT:
+ tipc_sk(sk)->conn_timeout = value;
+ break;
+ case TIPC_MCAST_BROADCAST:
+ tsk->mc_method.rcast = false;
+ tsk->mc_method.mandatory = true;
+ break;
+ case TIPC_MCAST_REPLICAST:
+ tsk->mc_method.rcast = true;
+ tsk->mc_method.mandatory = true;
+ break;
+ case TIPC_GROUP_JOIN:
+ res = tipc_sk_join(tsk, &mreq);
+ break;
+ case TIPC_GROUP_LEAVE:
+ res = tipc_sk_leave(tsk);
+ break;
+ case TIPC_NODELAY:
+ tsk->nodelay = !!value;
+ tsk_set_nagle(tsk);
+ break;
+ default:
+ res = -EINVAL;
+ }
+
+ release_sock(sk);
+
+ return res;
+}
+
+/**
+ * tipc_getsockopt - get socket option
+ * @sock: socket structure
+ * @lvl: option level
+ * @opt: option identifier
+ * @ov: receptacle for option value
+ * @ol: receptacle for length of option value
+ *
+ * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
+ * (to ease compatibility).
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
+ char __user *ov, int __user *ol)
+{
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_service_range seq;
+ int len, scope;
+ u32 value;
+ int res;
+
+ if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
+ return put_user(0, ol);
+ if (lvl != SOL_TIPC)
+ return -ENOPROTOOPT;
+ res = get_user(len, ol);
+ if (res)
+ return res;
+
+ lock_sock(sk);
+
+ switch (opt) {
+ case TIPC_IMPORTANCE:
+ value = tsk_importance(tsk);
+ break;
+ case TIPC_SRC_DROPPABLE:
+ value = tsk_unreliable(tsk);
+ break;
+ case TIPC_DEST_DROPPABLE:
+ value = tsk_unreturnable(tsk);
+ break;
+ case TIPC_CONN_TIMEOUT:
+ value = tsk->conn_timeout;
+ /* no need to set "res", since already 0 at this point */
+ break;
+ case TIPC_NODE_RECVQ_DEPTH:
+ value = 0; /* was tipc_queue_size, now obsolete */
+ break;
+ case TIPC_SOCK_RECVQ_DEPTH:
+ value = skb_queue_len(&sk->sk_receive_queue);
+ break;
+ case TIPC_SOCK_RECVQ_USED:
+ value = sk_rmem_alloc_get(sk);
+ break;
+ case TIPC_GROUP_JOIN:
+ seq.type = 0;
+ if (tsk->group)
+ tipc_group_self(tsk->group, &seq, &scope);
+ value = seq.type;
+ break;
+ default:
+ res = -EINVAL;
+ }
+
+ release_sock(sk);
+
+ if (res)
+ return res; /* "get" failed */
+
+ if (len < sizeof(value))
+ return -EINVAL;
+
+ if (copy_to_user(ov, &value, sizeof(value)))
+ return -EFAULT;
+
+ return put_user(sizeof(value), ol);
+}
+
+static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ struct net *net = sock_net(sock->sk);
+ struct tipc_sioc_nodeid_req nr = {0};
+ struct tipc_sioc_ln_req lnr;
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case SIOCGETLINKNAME:
+ if (copy_from_user(&lnr, argp, sizeof(lnr)))
+ return -EFAULT;
+ if (!tipc_node_get_linkname(net,
+ lnr.bearer_id & 0xffff, lnr.peer,
+ lnr.linkname, TIPC_MAX_LINK_NAME)) {
+ if (copy_to_user(argp, &lnr, sizeof(lnr)))
+ return -EFAULT;
+ return 0;
+ }
+ return -EADDRNOTAVAIL;
+ case SIOCGETNODEID:
+ if (copy_from_user(&nr, argp, sizeof(nr)))
+ return -EFAULT;
+ if (!tipc_node_get_id(net, nr.peer, nr.node_id))
+ return -EADDRNOTAVAIL;
+ if (copy_to_user(argp, &nr, sizeof(nr)))
+ return -EFAULT;
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
+{
+ struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
+ struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
+ u32 onode = tipc_own_addr(sock_net(sock1->sk));
+
+ tsk1->peer.family = AF_TIPC;
+ tsk1->peer.addrtype = TIPC_SOCKET_ADDR;
+ tsk1->peer.scope = TIPC_NODE_SCOPE;
+ tsk1->peer.addr.id.ref = tsk2->portid;
+ tsk1->peer.addr.id.node = onode;
+ tsk2->peer.family = AF_TIPC;
+ tsk2->peer.addrtype = TIPC_SOCKET_ADDR;
+ tsk2->peer.scope = TIPC_NODE_SCOPE;
+ tsk2->peer.addr.id.ref = tsk1->portid;
+ tsk2->peer.addr.id.node = onode;
+
+ tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
+ tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
+ return 0;
+}
+
+/* Protocol switches for the various types of TIPC sockets */
+
+static const struct proto_ops msg_ops = {
+ .owner = THIS_MODULE,
+ .family = AF_TIPC,
+ .release = tipc_release,
+ .bind = tipc_bind,
+ .connect = tipc_connect,
+ .socketpair = tipc_socketpair,
+ .accept = sock_no_accept,
+ .getname = tipc_getname,
+ .poll = tipc_poll,
+ .ioctl = tipc_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = tipc_shutdown,
+ .setsockopt = tipc_setsockopt,
+ .getsockopt = tipc_getsockopt,
+ .sendmsg = tipc_sendmsg,
+ .recvmsg = tipc_recvmsg,
+ .mmap = sock_no_mmap,
+};
+
+static const struct proto_ops packet_ops = {
+ .owner = THIS_MODULE,
+ .family = AF_TIPC,
+ .release = tipc_release,
+ .bind = tipc_bind,
+ .connect = tipc_connect,
+ .socketpair = tipc_socketpair,
+ .accept = tipc_accept,
+ .getname = tipc_getname,
+ .poll = tipc_poll,
+ .ioctl = tipc_ioctl,
+ .listen = tipc_listen,
+ .shutdown = tipc_shutdown,
+ .setsockopt = tipc_setsockopt,
+ .getsockopt = tipc_getsockopt,
+ .sendmsg = tipc_send_packet,
+ .recvmsg = tipc_recvmsg,
+ .mmap = sock_no_mmap,
+};
+
+static const struct proto_ops stream_ops = {
+ .owner = THIS_MODULE,
+ .family = AF_TIPC,
+ .release = tipc_release,
+ .bind = tipc_bind,
+ .connect = tipc_connect,
+ .socketpair = tipc_socketpair,
+ .accept = tipc_accept,
+ .getname = tipc_getname,
+ .poll = tipc_poll,
+ .ioctl = tipc_ioctl,
+ .listen = tipc_listen,
+ .shutdown = tipc_shutdown,
+ .setsockopt = tipc_setsockopt,
+ .getsockopt = tipc_getsockopt,
+ .sendmsg = tipc_sendstream,
+ .recvmsg = tipc_recvstream,
+ .mmap = sock_no_mmap,
+};
+
+static const struct net_proto_family tipc_family_ops = {
+ .owner = THIS_MODULE,
+ .family = AF_TIPC,
+ .create = tipc_sk_create
+};
+
+static struct proto tipc_proto = {
+ .name = "TIPC",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct tipc_sock),
+ .sysctl_rmem = sysctl_tipc_rmem
+};
+
+/**
+ * tipc_socket_init - initialize TIPC socket interface
+ *
+ * Return: 0 on success, errno otherwise
+ */
+int tipc_socket_init(void)
+{
+ int res;
+
+ res = proto_register(&tipc_proto, 1);
+ if (res) {
+ pr_err("Failed to register TIPC protocol type\n");
+ goto out;
+ }
+
+ res = sock_register(&tipc_family_ops);
+ if (res) {
+ pr_err("Failed to register TIPC socket type\n");
+ proto_unregister(&tipc_proto);
+ goto out;
+ }
+ out:
+ return res;
+}
+
+/**
+ * tipc_socket_stop - stop TIPC socket interface
+ */
+void tipc_socket_stop(void)
+{
+ sock_unregister(tipc_family_ops.family);
+ proto_unregister(&tipc_proto);
+}
+
+/* Caller should hold socket lock for the passed tipc socket. */
+static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
+{
+ u32 peer_node, peer_port;
+ u32 conn_type, conn_instance;
+ struct nlattr *nest;
+
+ peer_node = tsk_peer_node(tsk);
+ peer_port = tsk_peer_port(tsk);
+ conn_type = msg_nametype(&tsk->phdr);
+ conn_instance = msg_nameinst(&tsk->phdr);
+ nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
+ goto msg_full;
+ if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
+ goto msg_full;
+
+ if (tsk->conn_addrtype != 0) {
+ if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
+ goto msg_full;
+ if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, conn_type))
+ goto msg_full;
+ if (nla_put_u32(skb, TIPC_NLA_CON_INST, conn_instance))
+ goto msg_full;
+ }
+ nla_nest_end(skb, nest);
+
+ return 0;
+
+msg_full:
+ nla_nest_cancel(skb, nest);
+
+ return -EMSGSIZE;
+}
+
+static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
+ *tsk)
+{
+ struct net *net = sock_net(skb->sk);
+ struct sock *sk = &tsk->sk;
+
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
+ return -EMSGSIZE;
+
+ if (tipc_sk_connected(sk)) {
+ if (__tipc_nl_add_sk_con(skb, tsk))
+ return -EMSGSIZE;
+ } else if (!list_empty(&tsk->publications)) {
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+/* Caller should hold socket lock for the passed tipc socket. */
+static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
+ struct tipc_sock *tsk)
+{
+ struct nlattr *attrs;
+ void *hdr;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
+ if (!hdr)
+ goto msg_cancel;
+
+ attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
+ if (!attrs)
+ goto genlmsg_cancel;
+
+ if (__tipc_nl_add_sk_info(skb, tsk))
+ goto attr_msg_cancel;
+
+ nla_nest_end(skb, attrs);
+ genlmsg_end(skb, hdr);
+
+ return 0;
+
+attr_msg_cancel:
+ nla_nest_cancel(skb, attrs);
+genlmsg_cancel:
+ genlmsg_cancel(skb, hdr);
+msg_cancel:
+ return -EMSGSIZE;
+}
+
+int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
+ int (*skb_handler)(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct tipc_sock *tsk))
+{
+ struct rhashtable_iter *iter = (void *)cb->args[4];
+ struct tipc_sock *tsk;
+ int err;
+
+ rhashtable_walk_start(iter);
+ while ((tsk = rhashtable_walk_next(iter)) != NULL) {
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ if (err == -EAGAIN) {
+ err = 0;
+ continue;
+ }
+ break;
+ }
+
+ sock_hold(&tsk->sk);
+ rhashtable_walk_stop(iter);
+ lock_sock(&tsk->sk);
+ err = skb_handler(skb, cb, tsk);
+ if (err) {
+ release_sock(&tsk->sk);
+ sock_put(&tsk->sk);
+ goto out;
+ }
+ release_sock(&tsk->sk);
+ rhashtable_walk_start(iter);
+ sock_put(&tsk->sk);
+ }
+ rhashtable_walk_stop(iter);
+out:
+ return skb->len;
+}
+EXPORT_SYMBOL(tipc_nl_sk_walk);
+
+int tipc_dump_start(struct netlink_callback *cb)
+{
+ return __tipc_dump_start(cb, sock_net(cb->skb->sk));
+}
+EXPORT_SYMBOL(tipc_dump_start);
+
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
+{
+ /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
+ struct rhashtable_iter *iter = (void *)cb->args[4];
+ struct tipc_net *tn = tipc_net(net);
+
+ if (!iter) {
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
+
+ cb->args[4] = (long)iter;
+ }
+
+ rhashtable_walk_enter(&tn->sk_rht, iter);
+ return 0;
+}
+
+int tipc_dump_done(struct netlink_callback *cb)
+{
+ struct rhashtable_iter *hti = (void *)cb->args[4];
+
+ rhashtable_walk_exit(hti);
+ kfree(hti);
+ return 0;
+}
+EXPORT_SYMBOL(tipc_dump_done);
+
+int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
+ struct tipc_sock *tsk, u32 sk_filter_state,
+ u64 (*tipc_diag_gen_cookie)(struct sock *sk))
+{
+ struct sock *sk = &tsk->sk;
+ struct nlattr *attrs;
+ struct nlattr *stat;
+
+ /*filter response w.r.t sk_state*/
+ if (!(sk_filter_state & (1 << sk->sk_state)))
+ return 0;
+
+ attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
+ if (!attrs)
+ goto msg_cancel;
+
+ if (__tipc_nl_add_sk_info(skb, tsk))
+ goto attr_msg_cancel;
+
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_UID,
+ from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
+ sock_i_uid(sk))) ||
+ nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
+ tipc_diag_gen_cookie(sk),
+ TIPC_NLA_SOCK_PAD))
+ goto attr_msg_cancel;
+
+ stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
+ if (!stat)
+ goto attr_msg_cancel;
+
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
+ skb_queue_len(&sk->sk_receive_queue)) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
+ skb_queue_len(&sk->sk_write_queue)) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
+ atomic_read(&sk->sk_drops)))
+ goto stat_msg_cancel;
+
+ if (tsk->cong_link_cnt &&
+ nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
+ goto stat_msg_cancel;
+
+ if (tsk_conn_cong(tsk) &&
+ nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
+ goto stat_msg_cancel;
+
+ nla_nest_end(skb, stat);
+
+ if (tsk->group)
+ if (tipc_group_fill_sock_diag(tsk->group, skb))
+ goto stat_msg_cancel;
+
+ nla_nest_end(skb, attrs);
+
+ return 0;
+
+stat_msg_cancel:
+ nla_nest_cancel(skb, stat);
+attr_msg_cancel:
+ nla_nest_cancel(skb, attrs);
+msg_cancel:
+ return -EMSGSIZE;
+}
+EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
+
+int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
+}
+
+/* Caller should hold socket lock for the passed tipc socket. */
+static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct publication *publ)
+{
+ void *hdr;
+ struct nlattr *attrs;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
+ if (!hdr)
+ goto msg_cancel;
+
+ attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
+ if (!attrs)
+ goto genlmsg_cancel;
+
+ if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
+ goto attr_msg_cancel;
+ if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->sr.type))
+ goto attr_msg_cancel;
+ if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->sr.lower))
+ goto attr_msg_cancel;
+ if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->sr.upper))
+ goto attr_msg_cancel;
+
+ nla_nest_end(skb, attrs);
+ genlmsg_end(skb, hdr);
+
+ return 0;
+
+attr_msg_cancel:
+ nla_nest_cancel(skb, attrs);
+genlmsg_cancel:
+ genlmsg_cancel(skb, hdr);
+msg_cancel:
+ return -EMSGSIZE;
+}
+
+/* Caller should hold socket lock for the passed tipc socket. */
+static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct tipc_sock *tsk, u32 *last_publ)
+{
+ int err;
+ struct publication *p;
+
+ if (*last_publ) {
+ list_for_each_entry(p, &tsk->publications, binding_sock) {
+ if (p->key == *last_publ)
+ break;
+ }
+ if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
+ /* We never set seq or call nl_dump_check_consistent()
+ * this means that setting prev_seq here will cause the
+ * consistence check to fail in the netlink callback
+ * handler. Resulting in the last NLMSG_DONE message
+ * having the NLM_F_DUMP_INTR flag set.
+ */
+ cb->prev_seq = 1;
+ *last_publ = 0;
+ return -EPIPE;
+ }
+ } else {
+ p = list_first_entry(&tsk->publications, struct publication,
+ binding_sock);
+ }
+
+ list_for_each_entry_from(p, &tsk->publications, binding_sock) {
+ err = __tipc_nl_add_sk_publ(skb, cb, p);
+ if (err) {
+ *last_publ = p->key;
+ return err;
+ }
+ }
+ *last_publ = 0;
+
+ return 0;
+}
+
+int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int err;
+ u32 tsk_portid = cb->args[0];
+ u32 last_publ = cb->args[1];
+ u32 done = cb->args[2];
+ struct net *net = sock_net(skb->sk);
+ struct tipc_sock *tsk;
+
+ if (!tsk_portid) {
+ struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
+ struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
+
+ if (!attrs[TIPC_NLA_SOCK])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
+ attrs[TIPC_NLA_SOCK],
+ tipc_nl_sock_policy, NULL);
+ if (err)
+ return err;
+
+ if (!sock[TIPC_NLA_SOCK_REF])
+ return -EINVAL;
+
+ tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
+ }
+
+ if (done)
+ return 0;
+
+ tsk = tipc_sk_lookup(net, tsk_portid);
+ if (!tsk)
+ return -EINVAL;
+
+ lock_sock(&tsk->sk);
+ err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
+ if (!err)
+ done = 1;
+ release_sock(&tsk->sk);
+ sock_put(&tsk->sk);
+
+ cb->args[0] = tsk_portid;
+ cb->args[1] = last_publ;
+ cb->args[2] = done;
+
+ return skb->len;
+}
+
+/**
+ * tipc_sk_filtering - check if a socket should be traced
+ * @sk: the socket to be examined
+ *
+ * @sysctl_tipc_sk_filter is used as the socket tuple for filtering:
+ * (portid, sock type, name type, name lower, name upper)
+ *
+ * Return: true if the socket meets the socket tuple data
+ * (value 0 = 'any') or when there is no tuple set (all = 0),
+ * otherwise false
+ */
+bool tipc_sk_filtering(struct sock *sk)
+{
+ struct tipc_sock *tsk;
+ struct publication *p;
+ u32 _port, _sktype, _type, _lower, _upper;
+ u32 type = 0, lower = 0, upper = 0;
+
+ if (!sk)
+ return true;
+
+ tsk = tipc_sk(sk);
+
+ _port = sysctl_tipc_sk_filter[0];
+ _sktype = sysctl_tipc_sk_filter[1];
+ _type = sysctl_tipc_sk_filter[2];
+ _lower = sysctl_tipc_sk_filter[3];
+ _upper = sysctl_tipc_sk_filter[4];
+
+ if (!_port && !_sktype && !_type && !_lower && !_upper)
+ return true;
+
+ if (_port)
+ return (_port == tsk->portid);
+
+ if (_sktype && _sktype != sk->sk_type)
+ return false;
+
+ if (tsk->published) {
+ p = list_first_entry_or_null(&tsk->publications,
+ struct publication, binding_sock);
+ if (p) {
+ type = p->sr.type;
+ lower = p->sr.lower;
+ upper = p->sr.upper;
+ }
+ }
+
+ if (!tipc_sk_type_connectionless(sk)) {
+ type = msg_nametype(&tsk->phdr);
+ lower = msg_nameinst(&tsk->phdr);
+ upper = lower;
+ }
+
+ if ((_type && _type != type) || (_lower && _lower != lower) ||
+ (_upper && _upper != upper))
+ return false;
+
+ return true;
+}
+
+u32 tipc_sock_get_portid(struct sock *sk)
+{
+ return (sk) ? (tipc_sk(sk))->portid : 0;
+}
+
+/**
+ * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
+ * both the rcv and backlog queues are considered
+ * @sk: tipc sk to be checked
+ * @skb: tipc msg to be checked
+ *
+ * Return: true if the socket rx queue allocation is > 90%, otherwise false
+ */
+
+bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
+{
+ atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
+ unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
+ unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
+
+ return (qsize > lim * 90 / 100);
+}
+
+/**
+ * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
+ * only the rcv queue is considered
+ * @sk: tipc sk to be checked
+ * @skb: tipc msg to be checked
+ *
+ * Return: true if the socket rx queue allocation is > 90%, otherwise false
+ */
+
+bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
+{
+ unsigned int lim = rcvbuf_limit(sk, skb);
+ unsigned int qsize = sk_rmem_alloc_get(sk);
+
+ return (qsize > lim * 90 / 100);
+}
+
+/**
+ * tipc_sk_dump - dump TIPC socket
+ * @sk: tipc sk to be dumped
+ * @dqueues: bitmask to decide if any socket queue to be dumped?
+ * - TIPC_DUMP_NONE: don't dump socket queues
+ * - TIPC_DUMP_SK_SNDQ: dump socket send queue
+ * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
+ * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
+ * - TIPC_DUMP_ALL: dump all the socket queues above
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
+{
+ int i = 0;
+ size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
+ u32 conn_type, conn_instance;
+ struct tipc_sock *tsk;
+ struct publication *p;
+ bool tsk_connected;
+
+ if (!sk) {
+ i += scnprintf(buf, sz, "sk data: (null)\n");
+ return i;
+ }
+
+ tsk = tipc_sk(sk);
+ tsk_connected = !tipc_sk_type_connectionless(sk);
+
+ i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
+ i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
+ i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
+ i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
+ i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
+ if (tsk_connected) {
+ i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
+ i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
+ conn_type = msg_nametype(&tsk->phdr);
+ conn_instance = msg_nameinst(&tsk->phdr);
+ i += scnprintf(buf + i, sz - i, " %u", conn_type);
+ i += scnprintf(buf + i, sz - i, " %u", conn_instance);
+ }
+ i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
+ if (tsk->published) {
+ p = list_first_entry_or_null(&tsk->publications,
+ struct publication, binding_sock);
+ i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.type : 0);
+ i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.lower : 0);
+ i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.upper : 0);
+ }
+ i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
+ i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
+ i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
+ i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
+ i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
+ i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
+ i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
+ i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
+ i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
+
+ if (dqueues & TIPC_DUMP_SK_SNDQ) {
+ i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
+ i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
+ }
+
+ if (dqueues & TIPC_DUMP_SK_RCVQ) {
+ i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
+ i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
+ }
+
+ if (dqueues & TIPC_DUMP_SK_BKLGQ) {
+ i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
+ i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
+ if (sk->sk_backlog.tail != sk->sk_backlog.head) {
+ i += scnprintf(buf + i, sz - i, " tail ");
+ i += tipc_skb_dump(sk->sk_backlog.tail, false,
+ buf + i);
+ }
+ }
+
+ return i;
+}
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
new file mode 100644
index 0000000000..02cdf16680
--- /dev/null
+++ b/net/tipc/socket.h
@@ -0,0 +1,80 @@
+/* net/tipc/socket.h: Include file for TIPC socket code
+ *
+ * Copyright (c) 2014-2016, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_SOCK_H
+#define _TIPC_SOCK_H
+
+#include <net/sock.h>
+#include <net/genetlink.h>
+
+/* Compatibility values for deprecated message based flow control */
+#define FLOWCTL_MSG_WIN 512
+#define FLOWCTL_MSG_LIM ((FLOWCTL_MSG_WIN * 2 + 1) * SKB_TRUESIZE(MAX_MSG_SIZE))
+
+#define FLOWCTL_BLK_SZ 1024
+
+/* Socket receive buffer sizes */
+#define RCVBUF_MIN (FLOWCTL_BLK_SZ * 512)
+#define RCVBUF_DEF (FLOWCTL_BLK_SZ * 1024 * 2)
+#define RCVBUF_MAX (FLOWCTL_BLK_SZ * 1024 * 16)
+
+struct tipc_sock;
+
+int tipc_socket_init(void);
+void tipc_socket_stop(void);
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
+void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
+ struct sk_buff_head *inputq);
+void tipc_sk_reinit(struct net *net);
+int tipc_sk_rht_init(struct net *net);
+void tipc_sk_rht_destroy(struct net *net);
+int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
+ struct tipc_sock *tsk, u32 sk_filter_state,
+ u64 (*tipc_diag_gen_cookie)(struct sock *sk));
+int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
+ int (*skb_handler)(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct tipc_sock *tsk));
+int tipc_dump_start(struct netlink_callback *cb);
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
+int tipc_dump_done(struct netlink_callback *cb);
+u32 tipc_sock_get_portid(struct sock *sk);
+bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb);
+bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb);
+int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen);
+int tsk_set_importance(struct sock *sk, int imp);
+
+#endif
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
new file mode 100644
index 0000000000..05d49ad812
--- /dev/null
+++ b/net/tipc/subscr.c
@@ -0,0 +1,183 @@
+/*
+ * net/tipc/subscr.c: TIPC network topology service
+ *
+ * Copyright (c) 2000-2017, Ericsson AB
+ * Copyright (c) 2005-2007, 2010-2013, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "name_table.h"
+#include "subscr.h"
+
+static void tipc_sub_send_event(struct tipc_subscription *sub,
+ struct publication *p,
+ u32 event)
+{
+ struct tipc_subscr *s = &sub->evt.s;
+ struct tipc_event *evt = &sub->evt;
+
+ if (sub->inactive)
+ return;
+ tipc_evt_write(evt, event, event);
+ if (p) {
+ tipc_evt_write(evt, found_lower, p->sr.lower);
+ tipc_evt_write(evt, found_upper, p->sr.upper);
+ tipc_evt_write(evt, port.ref, p->sk.ref);
+ tipc_evt_write(evt, port.node, p->sk.node);
+ } else {
+ tipc_evt_write(evt, found_lower, s->seq.lower);
+ tipc_evt_write(evt, found_upper, s->seq.upper);
+ tipc_evt_write(evt, port.ref, 0);
+ tipc_evt_write(evt, port.node, 0);
+ }
+ tipc_topsrv_queue_evt(sub->net, sub->conid, event, evt);
+}
+
+/**
+ * tipc_sub_check_overlap - test for subscription overlap with the given values
+ * @subscribed: the service range subscribed for
+ * @found: the service range we are checking for match
+ *
+ * Returns true if there is overlap, otherwise false.
+ */
+static bool tipc_sub_check_overlap(struct tipc_service_range *subscribed,
+ struct tipc_service_range *found)
+{
+ u32 found_lower = found->lower;
+ u32 found_upper = found->upper;
+
+ if (found_lower < subscribed->lower)
+ found_lower = subscribed->lower;
+ if (found_upper > subscribed->upper)
+ found_upper = subscribed->upper;
+ return found_lower <= found_upper;
+}
+
+void tipc_sub_report_overlap(struct tipc_subscription *sub,
+ struct publication *p,
+ u32 event, bool must)
+{
+ struct tipc_service_range *sr = &sub->s.seq;
+ u32 filter = sub->s.filter;
+
+ if (!tipc_sub_check_overlap(sr, &p->sr))
+ return;
+ if (!must && !(filter & TIPC_SUB_PORTS))
+ return;
+ if (filter & TIPC_SUB_CLUSTER_SCOPE && p->scope == TIPC_NODE_SCOPE)
+ return;
+ if (filter & TIPC_SUB_NODE_SCOPE && p->scope != TIPC_NODE_SCOPE)
+ return;
+ spin_lock(&sub->lock);
+ tipc_sub_send_event(sub, p, event);
+ spin_unlock(&sub->lock);
+}
+
+static void tipc_sub_timeout(struct timer_list *t)
+{
+ struct tipc_subscription *sub = from_timer(sub, t, timer);
+
+ spin_lock(&sub->lock);
+ tipc_sub_send_event(sub, NULL, TIPC_SUBSCR_TIMEOUT);
+ sub->inactive = true;
+ spin_unlock(&sub->lock);
+}
+
+static void tipc_sub_kref_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct tipc_subscription, kref));
+}
+
+void tipc_sub_put(struct tipc_subscription *subscription)
+{
+ kref_put(&subscription->kref, tipc_sub_kref_release);
+}
+
+void tipc_sub_get(struct tipc_subscription *subscription)
+{
+ kref_get(&subscription->kref);
+}
+
+struct tipc_subscription *tipc_sub_subscribe(struct net *net,
+ struct tipc_subscr *s,
+ int conid)
+{
+ u32 lower = tipc_sub_read(s, seq.lower);
+ u32 upper = tipc_sub_read(s, seq.upper);
+ u32 filter = tipc_sub_read(s, filter);
+ struct tipc_subscription *sub;
+ u32 timeout;
+
+ if ((filter & TIPC_SUB_PORTS && filter & TIPC_SUB_SERVICE) ||
+ lower > upper) {
+ pr_warn("Subscription rejected, illegal request\n");
+ return NULL;
+ }
+ sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
+ if (!sub) {
+ pr_warn("Subscription rejected, no memory\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&sub->service_list);
+ INIT_LIST_HEAD(&sub->sub_list);
+ sub->net = net;
+ sub->conid = conid;
+ sub->inactive = false;
+ memcpy(&sub->evt.s, s, sizeof(*s));
+ sub->s.seq.type = tipc_sub_read(s, seq.type);
+ sub->s.seq.lower = lower;
+ sub->s.seq.upper = upper;
+ sub->s.filter = filter;
+ sub->s.timeout = tipc_sub_read(s, timeout);
+ memcpy(sub->s.usr_handle, s->usr_handle, 8);
+ spin_lock_init(&sub->lock);
+ kref_init(&sub->kref);
+ if (!tipc_nametbl_subscribe(sub)) {
+ kfree(sub);
+ return NULL;
+ }
+ timer_setup(&sub->timer, tipc_sub_timeout, 0);
+ timeout = tipc_sub_read(&sub->evt.s, timeout);
+ if (timeout != TIPC_WAIT_FOREVER)
+ mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+ return sub;
+}
+
+void tipc_sub_unsubscribe(struct tipc_subscription *sub)
+{
+ tipc_nametbl_unsubscribe(sub);
+ if (sub->evt.s.timeout != TIPC_WAIT_FOREVER)
+ del_timer_sync(&sub->timer);
+ list_del(&sub->sub_list);
+ tipc_sub_put(sub);
+}
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
new file mode 100644
index 0000000000..60b877531b
--- /dev/null
+++ b/net/tipc/subscr.h
@@ -0,0 +1,122 @@
+/*
+ * net/tipc/subscr.h: Include file for TIPC network topology service
+ *
+ * Copyright (c) 2003-2017, Ericsson AB
+ * Copyright (c) 2005-2007, 2012-2013, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_SUBSCR_H
+#define _TIPC_SUBSCR_H
+
+#include "topsrv.h"
+
+#define TIPC_MAX_SUBSCR 65535
+#define TIPC_MAX_PUBL 65535
+
+struct publication;
+struct tipc_subscription;
+struct tipc_conn;
+
+/**
+ * struct tipc_subscription - TIPC network topology subscription object
+ * @s: host-endian copy of the user subscription
+ * @evt: template for events generated by subscription
+ * @kref: reference count for this subscription
+ * @net: network namespace associated with subscription
+ * @timer: timer governing subscription duration (optional)
+ * @service_list: adjacent subscriptions in name sequence's subscription list
+ * @sub_list: adjacent subscriptions in subscriber's subscription list
+ * @conid: connection identifier of topology server
+ * @inactive: true if this subscription is inactive
+ * @lock: serialize up/down and timer events
+ */
+struct tipc_subscription {
+ struct tipc_subscr s;
+ struct tipc_event evt;
+ struct kref kref;
+ struct net *net;
+ struct timer_list timer;
+ struct list_head service_list;
+ struct list_head sub_list;
+ int conid;
+ bool inactive;
+ spinlock_t lock;
+};
+
+struct tipc_subscription *tipc_sub_subscribe(struct net *net,
+ struct tipc_subscr *s,
+ int conid);
+void tipc_sub_unsubscribe(struct tipc_subscription *sub);
+void tipc_sub_report_overlap(struct tipc_subscription *sub,
+ struct publication *p,
+ u32 event, bool must);
+
+int __net_init tipc_topsrv_init_net(struct net *net);
+void __net_exit tipc_topsrv_exit_net(struct net *net);
+
+void tipc_sub_put(struct tipc_subscription *subscription);
+void tipc_sub_get(struct tipc_subscription *subscription);
+
+#define TIPC_FILTER_MASK (TIPC_SUB_PORTS | TIPC_SUB_SERVICE | TIPC_SUB_CANCEL)
+
+/* tipc_sub_read - return field_ of struct sub_ in host endian format
+ */
+#define tipc_sub_read(sub_, field_) \
+ ({ \
+ struct tipc_subscr *sub__ = sub_; \
+ u32 val__ = (sub__)->field_; \
+ int swap_ = !((sub__)->filter & TIPC_FILTER_MASK); \
+ (swap_ ? swab32(val__) : val__); \
+ })
+
+/* tipc_sub_write - write val_ to field_ of struct sub_ in user endian format
+ */
+#define tipc_sub_write(sub_, field_, val_) \
+ ({ \
+ struct tipc_subscr *sub__ = sub_; \
+ u32 val__ = val_; \
+ int swap_ = !((sub__)->filter & TIPC_FILTER_MASK); \
+ (sub__)->field_ = swap_ ? swab32(val__) : val__; \
+ })
+
+/* tipc_evt_write - write val_ to field_ of struct evt_ in user endian format
+ */
+#define tipc_evt_write(evt_, field_, val_) \
+ ({ \
+ struct tipc_event *evt__ = evt_; \
+ u32 val__ = val_; \
+ int swap_ = !((evt__)->s.filter & (TIPC_FILTER_MASK)); \
+ (evt__)->field_ = swap_ ? swab32(val__) : val__; \
+ })
+
+#endif
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
new file mode 100644
index 0000000000..9fb65c988f
--- /dev/null
+++ b/net/tipc/sysctl.c
@@ -0,0 +1,108 @@
+/*
+ * net/tipc/sysctl.c: sysctl interface to TIPC subsystem
+ *
+ * Copyright (c) 2013, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "trace.h"
+#include "crypto.h"
+#include "bcast.h"
+#include <linux/sysctl.h>
+
+static struct ctl_table_header *tipc_ctl_hdr;
+
+static struct ctl_table tipc_table[] = {
+ {
+ .procname = "tipc_rmem",
+ .data = &sysctl_tipc_rmem,
+ .maxlen = sizeof(sysctl_tipc_rmem),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ },
+ {
+ .procname = "named_timeout",
+ .data = &sysctl_tipc_named_timeout,
+ .maxlen = sizeof(sysctl_tipc_named_timeout),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
+ {
+ .procname = "sk_filter",
+ .data = &sysctl_tipc_sk_filter,
+ .maxlen = sizeof(sysctl_tipc_sk_filter),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+#ifdef CONFIG_TIPC_CRYPTO
+ {
+ .procname = "max_tfms",
+ .data = &sysctl_tipc_max_tfms,
+ .maxlen = sizeof(sysctl_tipc_max_tfms),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ },
+ {
+ .procname = "key_exchange_enabled",
+ .data = &sysctl_tipc_key_exchange_enabled,
+ .maxlen = sizeof(sysctl_tipc_key_exchange_enabled),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+#endif
+ {
+ .procname = "bc_retruni",
+ .data = &sysctl_tipc_bc_retruni,
+ .maxlen = sizeof(sysctl_tipc_bc_retruni),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {}
+};
+
+int tipc_register_sysctl(void)
+{
+ tipc_ctl_hdr = register_net_sysctl(&init_net, "net/tipc", tipc_table);
+ if (tipc_ctl_hdr == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void tipc_unregister_sysctl(void)
+{
+ unregister_net_sysctl_table(tipc_ctl_hdr);
+}
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
new file mode 100644
index 0000000000..8ee0c07d00
--- /dev/null
+++ b/net/tipc/topsrv.c
@@ -0,0 +1,731 @@
+/*
+ * net/tipc/server.c: TIPC server infrastructure
+ *
+ * Copyright (c) 2012-2013, Wind River Systems
+ * Copyright (c) 2017-2018, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "subscr.h"
+#include "topsrv.h"
+#include "core.h"
+#include "socket.h"
+#include "addr.h"
+#include "msg.h"
+#include "bearer.h"
+#include <net/sock.h>
+#include <linux/module.h>
+#include <trace/events/sock.h>
+
+/* Number of messages to send before rescheduling */
+#define MAX_SEND_MSG_COUNT 25
+#define MAX_RECV_MSG_COUNT 25
+#define CF_CONNECTED 1
+
+#define TIPC_SERVER_NAME_LEN 32
+
+/**
+ * struct tipc_topsrv - TIPC server structure
+ * @conn_idr: identifier set of connection
+ * @idr_lock: protect the connection identifier set
+ * @idr_in_use: amount of allocated identifier entry
+ * @net: network namspace instance
+ * @awork: accept work item
+ * @rcv_wq: receive workqueue
+ * @send_wq: send workqueue
+ * @listener: topsrv listener socket
+ * @name: server name
+ */
+struct tipc_topsrv {
+ struct idr conn_idr;
+ spinlock_t idr_lock; /* for idr list */
+ int idr_in_use;
+ struct net *net;
+ struct work_struct awork;
+ struct workqueue_struct *rcv_wq;
+ struct workqueue_struct *send_wq;
+ struct socket *listener;
+ char name[TIPC_SERVER_NAME_LEN];
+};
+
+/**
+ * struct tipc_conn - TIPC connection structure
+ * @kref: reference counter to connection object
+ * @conid: connection identifier
+ * @sock: socket handler associated with connection
+ * @flags: indicates connection state
+ * @server: pointer to connected server
+ * @sub_list: lsit to all pertaing subscriptions
+ * @sub_lock: lock protecting the subscription list
+ * @rwork: receive work item
+ * @outqueue: pointer to first outbound message in queue
+ * @outqueue_lock: control access to the outqueue
+ * @swork: send work item
+ */
+struct tipc_conn {
+ struct kref kref;
+ int conid;
+ struct socket *sock;
+ unsigned long flags;
+ struct tipc_topsrv *server;
+ struct list_head sub_list;
+ spinlock_t sub_lock; /* for subscription list */
+ struct work_struct rwork;
+ struct list_head outqueue;
+ spinlock_t outqueue_lock; /* for outqueue */
+ struct work_struct swork;
+};
+
+/* An entry waiting to be sent */
+struct outqueue_entry {
+ bool inactive;
+ struct tipc_event evt;
+ struct list_head list;
+};
+
+static void tipc_conn_recv_work(struct work_struct *work);
+static void tipc_conn_send_work(struct work_struct *work);
+static void tipc_topsrv_kern_evt(struct net *net, struct tipc_event *evt);
+static void tipc_conn_delete_sub(struct tipc_conn *con, struct tipc_subscr *s);
+
+static bool connected(struct tipc_conn *con)
+{
+ return con && test_bit(CF_CONNECTED, &con->flags);
+}
+
+static void tipc_conn_kref_release(struct kref *kref)
+{
+ struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
+ struct tipc_topsrv *s = con->server;
+ struct outqueue_entry *e, *safe;
+
+ spin_lock_bh(&s->idr_lock);
+ idr_remove(&s->conn_idr, con->conid);
+ s->idr_in_use--;
+ spin_unlock_bh(&s->idr_lock);
+ if (con->sock)
+ sock_release(con->sock);
+
+ spin_lock_bh(&con->outqueue_lock);
+ list_for_each_entry_safe(e, safe, &con->outqueue, list) {
+ list_del(&e->list);
+ kfree(e);
+ }
+ spin_unlock_bh(&con->outqueue_lock);
+ kfree(con);
+}
+
+static void conn_put(struct tipc_conn *con)
+{
+ kref_put(&con->kref, tipc_conn_kref_release);
+}
+
+static void conn_get(struct tipc_conn *con)
+{
+ kref_get(&con->kref);
+}
+
+static void tipc_conn_close(struct tipc_conn *con)
+{
+ struct sock *sk = con->sock->sk;
+ bool disconnect = false;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ disconnect = test_and_clear_bit(CF_CONNECTED, &con->flags);
+
+ if (disconnect) {
+ sk->sk_user_data = NULL;
+ tipc_conn_delete_sub(con, NULL);
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ /* Handle concurrent calls from sending and receiving threads */
+ if (!disconnect)
+ return;
+
+ /* Don't flush pending works, -just let them expire */
+ kernel_sock_shutdown(con->sock, SHUT_RDWR);
+
+ conn_put(con);
+}
+
+static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s, struct socket *sock)
+{
+ struct tipc_conn *con;
+ int ret;
+
+ con = kzalloc(sizeof(*con), GFP_ATOMIC);
+ if (!con)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&con->kref);
+ INIT_LIST_HEAD(&con->outqueue);
+ INIT_LIST_HEAD(&con->sub_list);
+ spin_lock_init(&con->outqueue_lock);
+ spin_lock_init(&con->sub_lock);
+ INIT_WORK(&con->swork, tipc_conn_send_work);
+ INIT_WORK(&con->rwork, tipc_conn_recv_work);
+
+ spin_lock_bh(&s->idr_lock);
+ ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC);
+ if (ret < 0) {
+ kfree(con);
+ spin_unlock_bh(&s->idr_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+ con->conid = ret;
+ s->idr_in_use++;
+
+ set_bit(CF_CONNECTED, &con->flags);
+ con->server = s;
+ con->sock = sock;
+ conn_get(con);
+ spin_unlock_bh(&s->idr_lock);
+
+ return con;
+}
+
+static struct tipc_conn *tipc_conn_lookup(struct tipc_topsrv *s, int conid)
+{
+ struct tipc_conn *con;
+
+ spin_lock_bh(&s->idr_lock);
+ con = idr_find(&s->conn_idr, conid);
+ if (!connected(con) || !kref_get_unless_zero(&con->kref))
+ con = NULL;
+ spin_unlock_bh(&s->idr_lock);
+ return con;
+}
+
+/* tipc_conn_delete_sub - delete a specific or all subscriptions
+ * for a given subscriber
+ */
+static void tipc_conn_delete_sub(struct tipc_conn *con, struct tipc_subscr *s)
+{
+ struct tipc_net *tn = tipc_net(con->server->net);
+ struct list_head *sub_list = &con->sub_list;
+ struct tipc_subscription *sub, *tmp;
+
+ spin_lock_bh(&con->sub_lock);
+ list_for_each_entry_safe(sub, tmp, sub_list, sub_list) {
+ if (!s || !memcmp(s, &sub->evt.s, sizeof(*s))) {
+ tipc_sub_unsubscribe(sub);
+ atomic_dec(&tn->subscription_count);
+ if (s)
+ break;
+ }
+ }
+ spin_unlock_bh(&con->sub_lock);
+}
+
+static void tipc_conn_send_to_sock(struct tipc_conn *con)
+{
+ struct list_head *queue = &con->outqueue;
+ struct tipc_topsrv *srv = con->server;
+ struct outqueue_entry *e;
+ struct tipc_event *evt;
+ struct msghdr msg;
+ struct kvec iov;
+ int count = 0;
+ int ret;
+
+ spin_lock_bh(&con->outqueue_lock);
+
+ while (!list_empty(queue)) {
+ e = list_first_entry(queue, struct outqueue_entry, list);
+ evt = &e->evt;
+ spin_unlock_bh(&con->outqueue_lock);
+
+ if (e->inactive)
+ tipc_conn_delete_sub(con, &evt->s);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_flags = MSG_DONTWAIT;
+ iov.iov_base = evt;
+ iov.iov_len = sizeof(*evt);
+ msg.msg_name = NULL;
+
+ if (con->sock) {
+ ret = kernel_sendmsg(con->sock, &msg, &iov,
+ 1, sizeof(*evt));
+ if (ret == -EWOULDBLOCK || ret == 0) {
+ cond_resched();
+ return;
+ } else if (ret < 0) {
+ return tipc_conn_close(con);
+ }
+ } else {
+ tipc_topsrv_kern_evt(srv->net, evt);
+ }
+
+ /* Don't starve users filling buffers */
+ if (++count >= MAX_SEND_MSG_COUNT) {
+ cond_resched();
+ count = 0;
+ }
+ spin_lock_bh(&con->outqueue_lock);
+ list_del(&e->list);
+ kfree(e);
+ }
+ spin_unlock_bh(&con->outqueue_lock);
+}
+
+static void tipc_conn_send_work(struct work_struct *work)
+{
+ struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
+
+ if (connected(con))
+ tipc_conn_send_to_sock(con);
+
+ conn_put(con);
+}
+
+/* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance
+ * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock()
+ */
+void tipc_topsrv_queue_evt(struct net *net, int conid,
+ u32 event, struct tipc_event *evt)
+{
+ struct tipc_topsrv *srv = tipc_topsrv(net);
+ struct outqueue_entry *e;
+ struct tipc_conn *con;
+
+ con = tipc_conn_lookup(srv, conid);
+ if (!con)
+ return;
+
+ if (!connected(con))
+ goto err;
+
+ e = kmalloc(sizeof(*e), GFP_ATOMIC);
+ if (!e)
+ goto err;
+ e->inactive = (event == TIPC_SUBSCR_TIMEOUT);
+ memcpy(&e->evt, evt, sizeof(*evt));
+ spin_lock_bh(&con->outqueue_lock);
+ list_add_tail(&e->list, &con->outqueue);
+ spin_unlock_bh(&con->outqueue_lock);
+
+ if (queue_work(srv->send_wq, &con->swork))
+ return;
+err:
+ conn_put(con);
+}
+
+/* tipc_conn_write_space - interrupt callback after a sendmsg EAGAIN
+ * Indicates that there now is more space in the send buffer
+ * The queued work is launched into tipc_send_work()->tipc_conn_send_to_sock()
+ */
+static void tipc_conn_write_space(struct sock *sk)
+{
+ struct tipc_conn *con;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ con = sk->sk_user_data;
+ if (connected(con)) {
+ conn_get(con);
+ if (!queue_work(con->server->send_wq, &con->swork))
+ conn_put(con);
+ }
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
+ struct tipc_conn *con,
+ struct tipc_subscr *s)
+{
+ struct tipc_net *tn = tipc_net(srv->net);
+ struct tipc_subscription *sub;
+ u32 s_filter = tipc_sub_read(s, filter);
+
+ if (s_filter & TIPC_SUB_CANCEL) {
+ tipc_sub_write(s, filter, s_filter & ~TIPC_SUB_CANCEL);
+ tipc_conn_delete_sub(con, s);
+ return 0;
+ }
+ if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCR) {
+ pr_warn("Subscription rejected, max (%u)\n", TIPC_MAX_SUBSCR);
+ return -1;
+ }
+ sub = tipc_sub_subscribe(srv->net, s, con->conid);
+ if (!sub)
+ return -1;
+ atomic_inc(&tn->subscription_count);
+ spin_lock_bh(&con->sub_lock);
+ list_add(&sub->sub_list, &con->sub_list);
+ spin_unlock_bh(&con->sub_lock);
+ return 0;
+}
+
+static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
+{
+ struct tipc_topsrv *srv = con->server;
+ struct sock *sk = con->sock->sk;
+ struct msghdr msg = {};
+ struct tipc_subscr s;
+ struct kvec iov;
+ int ret;
+
+ iov.iov_base = &s;
+ iov.iov_len = sizeof(s);
+ msg.msg_name = NULL;
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, iov.iov_len);
+ ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
+ if (ret == -EWOULDBLOCK)
+ return -EWOULDBLOCK;
+ if (ret == sizeof(s)) {
+ read_lock_bh(&sk->sk_callback_lock);
+ /* RACE: the connection can be closed in the meantime */
+ if (likely(connected(con)))
+ ret = tipc_conn_rcv_sub(srv, con, &s);
+ read_unlock_bh(&sk->sk_callback_lock);
+ if (!ret)
+ return 0;
+ }
+
+ tipc_conn_close(con);
+ return ret;
+}
+
+static void tipc_conn_recv_work(struct work_struct *work)
+{
+ struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
+ int count = 0;
+
+ while (connected(con)) {
+ if (tipc_conn_rcv_from_sock(con))
+ break;
+
+ /* Don't flood Rx machine */
+ if (++count >= MAX_RECV_MSG_COUNT) {
+ cond_resched();
+ count = 0;
+ }
+ }
+ conn_put(con);
+}
+
+/* tipc_conn_data_ready - interrupt callback indicating the socket has data
+ * The queued work is launched into tipc_recv_work()->tipc_conn_rcv_from_sock()
+ */
+static void tipc_conn_data_ready(struct sock *sk)
+{
+ struct tipc_conn *con;
+
+ trace_sk_data_ready(sk);
+
+ read_lock_bh(&sk->sk_callback_lock);
+ con = sk->sk_user_data;
+ if (connected(con)) {
+ conn_get(con);
+ if (!queue_work(con->server->rcv_wq, &con->rwork))
+ conn_put(con);
+ }
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void tipc_topsrv_accept(struct work_struct *work)
+{
+ struct tipc_topsrv *srv = container_of(work, struct tipc_topsrv, awork);
+ struct socket *newsock, *lsock;
+ struct tipc_conn *con;
+ struct sock *newsk;
+ int ret;
+
+ spin_lock_bh(&srv->idr_lock);
+ if (!srv->listener) {
+ spin_unlock_bh(&srv->idr_lock);
+ return;
+ }
+ lsock = srv->listener;
+ spin_unlock_bh(&srv->idr_lock);
+
+ while (1) {
+ ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
+ if (ret < 0)
+ return;
+ con = tipc_conn_alloc(srv, newsock);
+ if (IS_ERR(con)) {
+ ret = PTR_ERR(con);
+ sock_release(newsock);
+ return;
+ }
+ /* Register callbacks */
+ newsk = newsock->sk;
+ write_lock_bh(&newsk->sk_callback_lock);
+ newsk->sk_data_ready = tipc_conn_data_ready;
+ newsk->sk_write_space = tipc_conn_write_space;
+ newsk->sk_user_data = con;
+ write_unlock_bh(&newsk->sk_callback_lock);
+
+ /* Wake up receive process in case of 'SYN+' message */
+ newsk->sk_data_ready(newsk);
+ conn_put(con);
+ }
+}
+
+/* tipc_topsrv_listener_data_ready - interrupt callback with connection request
+ * The queued job is launched into tipc_topsrv_accept()
+ */
+static void tipc_topsrv_listener_data_ready(struct sock *sk)
+{
+ struct tipc_topsrv *srv;
+
+ trace_sk_data_ready(sk);
+
+ read_lock_bh(&sk->sk_callback_lock);
+ srv = sk->sk_user_data;
+ if (srv)
+ queue_work(srv->rcv_wq, &srv->awork);
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static int tipc_topsrv_create_listener(struct tipc_topsrv *srv)
+{
+ struct socket *lsock = NULL;
+ struct sockaddr_tipc saddr;
+ struct sock *sk;
+ int rc;
+
+ rc = sock_create_kern(srv->net, AF_TIPC, SOCK_SEQPACKET, 0, &lsock);
+ if (rc < 0)
+ return rc;
+
+ srv->listener = lsock;
+ sk = lsock->sk;
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_data_ready = tipc_topsrv_listener_data_ready;
+ sk->sk_user_data = srv;
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ lock_sock(sk);
+ rc = tsk_set_importance(sk, TIPC_CRITICAL_IMPORTANCE);
+ release_sock(sk);
+ if (rc < 0)
+ goto err;
+
+ saddr.family = AF_TIPC;
+ saddr.addrtype = TIPC_SERVICE_RANGE;
+ saddr.addr.nameseq.type = TIPC_TOP_SRV;
+ saddr.addr.nameseq.lower = TIPC_TOP_SRV;
+ saddr.addr.nameseq.upper = TIPC_TOP_SRV;
+ saddr.scope = TIPC_NODE_SCOPE;
+
+ rc = tipc_sk_bind(lsock, (struct sockaddr *)&saddr, sizeof(saddr));
+ if (rc < 0)
+ goto err;
+ rc = kernel_listen(lsock, 0);
+ if (rc < 0)
+ goto err;
+
+ /* As server's listening socket owner and creator is the same module,
+ * we have to decrease TIPC module reference count to guarantee that
+ * it remains zero after the server socket is created, otherwise,
+ * executing "rmmod" command is unable to make TIPC module deleted
+ * after TIPC module is inserted successfully.
+ *
+ * However, the reference count is ever increased twice in
+ * sock_create_kern(): one is to increase the reference count of owner
+ * of TIPC socket's proto_ops struct; another is to increment the
+ * reference count of owner of TIPC proto struct. Therefore, we must
+ * decrement the module reference count twice to ensure that it keeps
+ * zero after server's listening socket is created. Of course, we
+ * must bump the module reference count twice as well before the socket
+ * is closed.
+ */
+ module_put(lsock->ops->owner);
+ module_put(sk->sk_prot_creator->owner);
+
+ return 0;
+err:
+ sock_release(lsock);
+ return -EINVAL;
+}
+
+bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
+ u32 upper, u32 filter, int *conid)
+{
+ struct tipc_subscr sub;
+ struct tipc_conn *con;
+ int rc;
+
+ sub.seq.type = type;
+ sub.seq.lower = lower;
+ sub.seq.upper = upper;
+ sub.timeout = TIPC_WAIT_FOREVER;
+ sub.filter = filter;
+ *(u64 *)&sub.usr_handle = (u64)port;
+
+ con = tipc_conn_alloc(tipc_topsrv(net), NULL);
+ if (IS_ERR(con))
+ return false;
+
+ *conid = con->conid;
+ rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub);
+ if (rc)
+ conn_put(con);
+
+ conn_put(con);
+ return !rc;
+}
+
+void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
+{
+ struct tipc_conn *con;
+
+ con = tipc_conn_lookup(tipc_topsrv(net), conid);
+ if (!con)
+ return;
+
+ test_and_clear_bit(CF_CONNECTED, &con->flags);
+ tipc_conn_delete_sub(con, NULL);
+ conn_put(con);
+ conn_put(con);
+}
+
+static void tipc_topsrv_kern_evt(struct net *net, struct tipc_event *evt)
+{
+ u32 port = *(u32 *)&evt->s.usr_handle;
+ u32 self = tipc_own_addr(net);
+ struct sk_buff_head evtq;
+ struct sk_buff *skb;
+
+ skb = tipc_msg_create(TOP_SRV, 0, INT_H_SIZE, sizeof(*evt),
+ self, self, port, port, 0);
+ if (!skb)
+ return;
+ msg_set_dest_droppable(buf_msg(skb), true);
+ memcpy(msg_data(buf_msg(skb)), evt, sizeof(*evt));
+ skb_queue_head_init(&evtq);
+ __skb_queue_tail(&evtq, skb);
+ tipc_loopback_trace(net, &evtq);
+ tipc_sk_rcv(net, &evtq);
+}
+
+static int tipc_topsrv_work_start(struct tipc_topsrv *s)
+{
+ s->rcv_wq = alloc_ordered_workqueue("tipc_rcv", 0);
+ if (!s->rcv_wq) {
+ pr_err("can't start tipc receive workqueue\n");
+ return -ENOMEM;
+ }
+
+ s->send_wq = alloc_ordered_workqueue("tipc_send", 0);
+ if (!s->send_wq) {
+ pr_err("can't start tipc send workqueue\n");
+ destroy_workqueue(s->rcv_wq);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void tipc_topsrv_work_stop(struct tipc_topsrv *s)
+{
+ destroy_workqueue(s->rcv_wq);
+ destroy_workqueue(s->send_wq);
+}
+
+static int tipc_topsrv_start(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+ const char name[] = "topology_server";
+ struct tipc_topsrv *srv;
+ int ret;
+
+ srv = kzalloc(sizeof(*srv), GFP_ATOMIC);
+ if (!srv)
+ return -ENOMEM;
+
+ srv->net = net;
+ INIT_WORK(&srv->awork, tipc_topsrv_accept);
+
+ strscpy(srv->name, name, sizeof(srv->name));
+ tn->topsrv = srv;
+ atomic_set(&tn->subscription_count, 0);
+
+ spin_lock_init(&srv->idr_lock);
+ idr_init(&srv->conn_idr);
+ srv->idr_in_use = 0;
+
+ ret = tipc_topsrv_work_start(srv);
+ if (ret < 0)
+ goto err_start;
+
+ ret = tipc_topsrv_create_listener(srv);
+ if (ret < 0)
+ goto err_create;
+
+ return 0;
+
+err_create:
+ tipc_topsrv_work_stop(srv);
+err_start:
+ kfree(srv);
+ return ret;
+}
+
+static void tipc_topsrv_stop(struct net *net)
+{
+ struct tipc_topsrv *srv = tipc_topsrv(net);
+ struct socket *lsock = srv->listener;
+ struct tipc_conn *con;
+ int id;
+
+ spin_lock_bh(&srv->idr_lock);
+ for (id = 0; srv->idr_in_use; id++) {
+ con = idr_find(&srv->conn_idr, id);
+ if (con) {
+ spin_unlock_bh(&srv->idr_lock);
+ tipc_conn_close(con);
+ spin_lock_bh(&srv->idr_lock);
+ }
+ }
+ __module_get(lsock->ops->owner);
+ __module_get(lsock->sk->sk_prot_creator->owner);
+ srv->listener = NULL;
+ spin_unlock_bh(&srv->idr_lock);
+
+ tipc_topsrv_work_stop(srv);
+ sock_release(lsock);
+ idr_destroy(&srv->conn_idr);
+ kfree(srv);
+}
+
+int __net_init tipc_topsrv_init_net(struct net *net)
+{
+ return tipc_topsrv_start(net);
+}
+
+void __net_exit tipc_topsrv_exit_net(struct net *net)
+{
+ tipc_topsrv_stop(net);
+}
diff --git a/net/tipc/topsrv.h b/net/tipc/topsrv.h
new file mode 100644
index 0000000000..c7ea712937
--- /dev/null
+++ b/net/tipc/topsrv.h
@@ -0,0 +1,54 @@
+/*
+ * net/tipc/server.h: Include file for TIPC server code
+ *
+ * Copyright (c) 2012-2013, Wind River Systems
+ * Copyright (c) 2017, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_SERVER_H
+#define _TIPC_SERVER_H
+
+#include "core.h"
+
+#define TIPC_SERVER_NAME_LEN 32
+#define TIPC_SUB_CLUSTER_SCOPE 0x20
+#define TIPC_SUB_NODE_SCOPE 0x40
+#define TIPC_SUB_NO_STATUS 0x80
+
+void tipc_topsrv_queue_evt(struct net *net, int conid,
+ u32 event, struct tipc_event *evt);
+
+bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
+ u32 upper, u32 filter, int *conid);
+void tipc_topsrv_kern_unsubscr(struct net *net, int conid);
+
+#endif
diff --git a/net/tipc/trace.c b/net/tipc/trace.c
new file mode 100644
index 0000000000..7d2931521e
--- /dev/null
+++ b/net/tipc/trace.c
@@ -0,0 +1,206 @@
+/*
+ * net/tipc/trace.c: TIPC tracepoints code
+ *
+ * Copyright (c) 2018, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "ASIS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+/*
+ * socket tuples for filtering in socket traces:
+ * (portid, sock type, name type, name lower, name upper)
+ */
+unsigned long sysctl_tipc_sk_filter[5] __read_mostly = {0, };
+
+/**
+ * tipc_skb_dump - dump TIPC skb data
+ * @skb: skb to be dumped
+ * @more: dump more?
+ * - false: dump only tipc msg data
+ * - true: dump kernel-related skb data and tipc cb[] array as well
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf)
+{
+ int i = 0;
+ size_t sz = (more) ? SKB_LMAX : SKB_LMIN;
+ struct tipc_msg *hdr;
+ struct tipc_skb_cb *skbcb;
+
+ if (!skb) {
+ i += scnprintf(buf, sz, "msg: (null)\n");
+ return i;
+ }
+
+ hdr = buf_msg(skb);
+ skbcb = TIPC_SKB_CB(skb);
+
+ /* tipc msg data section */
+ i += scnprintf(buf, sz, "msg: %u", msg_user(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_type(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_hdr_sz(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_data_sz(hdr));
+ i += scnprintf(buf + i, sz - i, " %x", msg_orignode(hdr));
+ i += scnprintf(buf + i, sz - i, " %x", msg_destnode(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_seqno(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_ack(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_bcast_ack(hdr));
+ switch (msg_user(hdr)) {
+ case LINK_PROTOCOL:
+ i += scnprintf(buf + i, sz - i, " %c", msg_net_plane(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_probe(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_peer_stopping(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_session(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_next_sent(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_seq_gap(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_bc_snd_nxt(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_bc_gap(hdr));
+ break;
+ case TIPC_LOW_IMPORTANCE:
+ case TIPC_MEDIUM_IMPORTANCE:
+ case TIPC_HIGH_IMPORTANCE:
+ case TIPC_CRITICAL_IMPORTANCE:
+ case CONN_MANAGER:
+ case SOCK_WAKEUP:
+ i += scnprintf(buf + i, sz - i, " | %u", msg_origport(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_destport(hdr));
+ switch (msg_type(hdr)) {
+ case TIPC_NAMED_MSG:
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_nametype(hdr));
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_nameinst(hdr));
+ break;
+ case TIPC_MCAST_MSG:
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_nametype(hdr));
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_namelower(hdr));
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_nameupper(hdr));
+ break;
+ default:
+ break;
+ }
+ i += scnprintf(buf + i, sz - i, " | %u",
+ msg_src_droppable(hdr));
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_dest_droppable(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_errcode(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_reroute_cnt(hdr));
+ break;
+ default:
+ /* need more? */
+ break;
+ }
+
+ i += scnprintf(buf + i, sz - i, "\n");
+ if (!more)
+ return i;
+
+ /* kernel-related skb data section */
+ i += scnprintf(buf + i, sz - i, "skb: %s",
+ (skb->dev) ? skb->dev->name : "n/a");
+ i += scnprintf(buf + i, sz - i, " %u", skb->len);
+ i += scnprintf(buf + i, sz - i, " %u", skb->data_len);
+ i += scnprintf(buf + i, sz - i, " %u", skb->hdr_len);
+ i += scnprintf(buf + i, sz - i, " %u", skb->truesize);
+ i += scnprintf(buf + i, sz - i, " %u", skb_cloned(skb));
+ i += scnprintf(buf + i, sz - i, " %p", skb->sk);
+ i += scnprintf(buf + i, sz - i, " %u", skb_shinfo(skb)->nr_frags);
+ i += scnprintf(buf + i, sz - i, " %llx",
+ ktime_to_ms(skb_get_ktime(skb)));
+ i += scnprintf(buf + i, sz - i, " %llx\n",
+ ktime_to_ms(skb_hwtstamps(skb)->hwtstamp));
+
+ /* tipc skb cb[] data section */
+ i += scnprintf(buf + i, sz - i, "cb[]: %u", skbcb->bytes_read);
+ i += scnprintf(buf + i, sz - i, " %u", skbcb->orig_member);
+ i += scnprintf(buf + i, sz - i, " %u",
+ jiffies_to_msecs(skbcb->nxt_retr));
+ i += scnprintf(buf + i, sz - i, " %u", skbcb->validated);
+ i += scnprintf(buf + i, sz - i, " %u", skbcb->chain_imp);
+ i += scnprintf(buf + i, sz - i, " %u\n", skbcb->ackers);
+
+ return i;
+}
+
+/**
+ * tipc_list_dump - dump TIPC skb list/queue
+ * @list: list of skbs to be dumped
+ * @more: dump more?
+ * - false: dump only the head & tail skbs
+ * - true: dump the first & last 5 skbs
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_list_dump(struct sk_buff_head *list, bool more, char *buf)
+{
+ int i = 0;
+ size_t sz = (more) ? LIST_LMAX : LIST_LMIN;
+ u32 count, len;
+ struct sk_buff *hskb, *tskb, *skb, *tmp;
+
+ if (!list) {
+ i += scnprintf(buf, sz, "(null)\n");
+ return i;
+ }
+
+ len = skb_queue_len(list);
+ i += scnprintf(buf, sz, "len = %d\n", len);
+
+ if (!len)
+ return i;
+
+ if (!more) {
+ hskb = skb_peek(list);
+ i += scnprintf(buf + i, sz - i, " head ");
+ i += tipc_skb_dump(hskb, false, buf + i);
+ if (len > 1) {
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " tail ");
+ i += tipc_skb_dump(tskb, false, buf + i);
+ }
+ } else {
+ count = 0;
+ skb_queue_walk_safe(list, skb, tmp) {
+ count++;
+ if (count == 6)
+ i += scnprintf(buf + i, sz - i, " .\n .\n");
+ if (count > 5 && count <= len - 5)
+ continue;
+ i += scnprintf(buf + i, sz - i, " #%d ", count);
+ i += tipc_skb_dump(skb, false, buf + i);
+ }
+ }
+ return i;
+}
diff --git a/net/tipc/trace.h b/net/tipc/trace.h
new file mode 100644
index 0000000000..04af83f050
--- /dev/null
+++ b/net/tipc/trace.h
@@ -0,0 +1,434 @@
+/*
+ * net/tipc/trace.h: TIPC tracepoints
+ *
+ * Copyright (c) 2018, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "ASIS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tipc
+
+#if !defined(_TIPC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TIPC_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "core.h"
+#include "link.h"
+#include "socket.h"
+#include "node.h"
+
+#define SKB_LMIN (100)
+#define SKB_LMAX (SKB_LMIN * 2)
+#define LIST_LMIN (SKB_LMIN * 3)
+#define LIST_LMAX (SKB_LMIN * 11)
+#define SK_LMIN (SKB_LMIN * 2)
+#define SK_LMAX (SKB_LMIN * 11)
+#define LINK_LMIN (SKB_LMIN)
+#define LINK_LMAX (SKB_LMIN * 16)
+#define NODE_LMIN (SKB_LMIN)
+#define NODE_LMAX (SKB_LMIN * 11)
+
+#ifndef __TIPC_TRACE_ENUM
+#define __TIPC_TRACE_ENUM
+enum {
+ TIPC_DUMP_NONE = 0,
+
+ TIPC_DUMP_TRANSMQ = 1,
+ TIPC_DUMP_BACKLOGQ = (1 << 1),
+ TIPC_DUMP_DEFERDQ = (1 << 2),
+ TIPC_DUMP_INPUTQ = (1 << 3),
+ TIPC_DUMP_WAKEUP = (1 << 4),
+
+ TIPC_DUMP_SK_SNDQ = (1 << 8),
+ TIPC_DUMP_SK_RCVQ = (1 << 9),
+ TIPC_DUMP_SK_BKLGQ = (1 << 10),
+ TIPC_DUMP_ALL = 0xffffu
+};
+#endif
+
+/* Link & Node FSM states: */
+#define state_sym(val) \
+ __print_symbolic(val, \
+ {(0xe), "ESTABLISHED" },\
+ {(0xe << 4), "ESTABLISHING" },\
+ {(0x1 << 8), "RESET" },\
+ {(0x2 << 12), "RESETTING" },\
+ {(0xd << 16), "PEER_RESET" },\
+ {(0xf << 20), "FAILINGOVER" },\
+ {(0xc << 24), "SYNCHING" },\
+ {(0xdd), "SELF_DOWN_PEER_DOWN" },\
+ {(0xaa), "SELF_UP_PEER_UP" },\
+ {(0xd1), "SELF_DOWN_PEER_LEAVING" },\
+ {(0xac), "SELF_UP_PEER_COMING" },\
+ {(0xca), "SELF_COMING_PEER_UP" },\
+ {(0x1d), "SELF_LEAVING_PEER_DOWN" },\
+ {(0xf0), "FAILINGOVER" },\
+ {(0xcc), "SYNCHING" })
+
+/* Link & Node FSM events: */
+#define evt_sym(val) \
+ __print_symbolic(val, \
+ {(0xec1ab1e), "ESTABLISH_EVT" },\
+ {(0x9eed0e), "PEER_RESET_EVT" },\
+ {(0xfa110e), "FAILURE_EVT" },\
+ {(0x10ca1d0e), "RESET_EVT" },\
+ {(0xfa110bee), "FAILOVER_BEGIN_EVT" },\
+ {(0xfa110ede), "FAILOVER_END_EVT" },\
+ {(0xc1ccbee), "SYNCH_BEGIN_EVT" },\
+ {(0xc1ccede), "SYNCH_END_EVT" },\
+ {(0xece), "SELF_ESTABL_CONTACT_EVT" },\
+ {(0x1ce), "SELF_LOST_CONTACT_EVT" },\
+ {(0x9ece), "PEER_ESTABL_CONTACT_EVT" },\
+ {(0x91ce), "PEER_LOST_CONTACT_EVT" },\
+ {(0xfbe), "FAILOVER_BEGIN_EVT" },\
+ {(0xfee), "FAILOVER_END_EVT" },\
+ {(0xcbe), "SYNCH_BEGIN_EVT" },\
+ {(0xcee), "SYNCH_END_EVT" })
+
+/* Bearer, net device events: */
+#define dev_evt_sym(val) \
+ __print_symbolic(val, \
+ {(NETDEV_CHANGE), "NETDEV_CHANGE" },\
+ {(NETDEV_GOING_DOWN), "NETDEV_GOING_DOWN" },\
+ {(NETDEV_UP), "NETDEV_UP" },\
+ {(NETDEV_CHANGEMTU), "NETDEV_CHANGEMTU" },\
+ {(NETDEV_CHANGEADDR), "NETDEV_CHANGEADDR" },\
+ {(NETDEV_UNREGISTER), "NETDEV_UNREGISTER" },\
+ {(NETDEV_CHANGENAME), "NETDEV_CHANGENAME" })
+
+extern unsigned long sysctl_tipc_sk_filter[5] __read_mostly;
+
+int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf);
+int tipc_list_dump(struct sk_buff_head *list, bool more, char *buf);
+int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf);
+int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf);
+int tipc_node_dump(struct tipc_node *n, bool more, char *buf);
+bool tipc_sk_filtering(struct sock *sk);
+
+DECLARE_EVENT_CLASS(tipc_skb_class,
+
+ TP_PROTO(struct sk_buff *skb, bool more, const char *header),
+
+ TP_ARGS(skb, more, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __dynamic_array(char, buf, (more) ? SKB_LMAX : SKB_LMIN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header, header);
+ tipc_skb_dump(skb, more, __get_str(buf));
+ ),
+
+ TP_printk("%s\n%s", __get_str(header), __get_str(buf))
+)
+
+#define DEFINE_SKB_EVENT(name) \
+DEFINE_EVENT(tipc_skb_class, name, \
+ TP_PROTO(struct sk_buff *skb, bool more, const char *header), \
+ TP_ARGS(skb, more, header))
+DEFINE_SKB_EVENT(tipc_skb_dump);
+DEFINE_SKB_EVENT(tipc_proto_build);
+DEFINE_SKB_EVENT(tipc_proto_rcv);
+
+DECLARE_EVENT_CLASS(tipc_list_class,
+
+ TP_PROTO(struct sk_buff_head *list, bool more, const char *header),
+
+ TP_ARGS(list, more, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __dynamic_array(char, buf, (more) ? LIST_LMAX : LIST_LMIN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header, header);
+ tipc_list_dump(list, more, __get_str(buf));
+ ),
+
+ TP_printk("%s\n%s", __get_str(header), __get_str(buf))
+);
+
+#define DEFINE_LIST_EVENT(name) \
+DEFINE_EVENT(tipc_list_class, name, \
+ TP_PROTO(struct sk_buff_head *list, bool more, const char *header), \
+ TP_ARGS(list, more, header))
+DEFINE_LIST_EVENT(tipc_list_dump);
+
+DECLARE_EVENT_CLASS(tipc_sk_class,
+
+ TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues,
+ const char *header),
+
+ TP_ARGS(sk, skb, dqueues, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __field(u32, portid)
+ __dynamic_array(char, buf, (dqueues) ? SK_LMAX : SK_LMIN)
+ __dynamic_array(char, skb_buf, (skb) ? SKB_LMIN : 1)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header, header);
+ __entry->portid = tipc_sock_get_portid(sk);
+ tipc_sk_dump(sk, dqueues, __get_str(buf));
+ if (skb)
+ tipc_skb_dump(skb, false, __get_str(skb_buf));
+ else
+ *(__get_str(skb_buf)) = '\0';
+ ),
+
+ TP_printk("<%u> %s\n%s%s", __entry->portid, __get_str(header),
+ __get_str(skb_buf), __get_str(buf))
+);
+
+#define DEFINE_SK_EVENT_FILTER(name) \
+DEFINE_EVENT_CONDITION(tipc_sk_class, name, \
+ TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
+ const char *header), \
+ TP_ARGS(sk, skb, dqueues, header), \
+ TP_CONDITION(tipc_sk_filtering(sk)))
+DEFINE_SK_EVENT_FILTER(tipc_sk_dump);
+DEFINE_SK_EVENT_FILTER(tipc_sk_create);
+DEFINE_SK_EVENT_FILTER(tipc_sk_sendmcast);
+DEFINE_SK_EVENT_FILTER(tipc_sk_sendmsg);
+DEFINE_SK_EVENT_FILTER(tipc_sk_sendstream);
+DEFINE_SK_EVENT_FILTER(tipc_sk_poll);
+DEFINE_SK_EVENT_FILTER(tipc_sk_filter_rcv);
+DEFINE_SK_EVENT_FILTER(tipc_sk_advance_rx);
+DEFINE_SK_EVENT_FILTER(tipc_sk_rej_msg);
+DEFINE_SK_EVENT_FILTER(tipc_sk_drop_msg);
+DEFINE_SK_EVENT_FILTER(tipc_sk_release);
+DEFINE_SK_EVENT_FILTER(tipc_sk_shutdown);
+
+#define DEFINE_SK_EVENT_FILTER_COND(name, cond) \
+DEFINE_EVENT_CONDITION(tipc_sk_class, name, \
+ TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
+ const char *header), \
+ TP_ARGS(sk, skb, dqueues, header), \
+ TP_CONDITION(tipc_sk_filtering(sk) && (cond)))
+DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit1, tipc_sk_overlimit1(sk, skb));
+DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit2, tipc_sk_overlimit2(sk, skb));
+
+DECLARE_EVENT_CLASS(tipc_link_class,
+
+ TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header),
+
+ TP_ARGS(l, dqueues, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __array(char, name, TIPC_MAX_LINK_NAME)
+ __dynamic_array(char, buf, (dqueues) ? LINK_LMAX : LINK_LMIN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header, header);
+ memcpy(__entry->name, tipc_link_name(l), TIPC_MAX_LINK_NAME);
+ tipc_link_dump(l, dqueues, __get_str(buf));
+ ),
+
+ TP_printk("<%s> %s\n%s", __entry->name, __get_str(header),
+ __get_str(buf))
+);
+
+#define DEFINE_LINK_EVENT(name) \
+DEFINE_EVENT(tipc_link_class, name, \
+ TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header), \
+ TP_ARGS(l, dqueues, header))
+DEFINE_LINK_EVENT(tipc_link_dump);
+DEFINE_LINK_EVENT(tipc_link_conges);
+DEFINE_LINK_EVENT(tipc_link_timeout);
+DEFINE_LINK_EVENT(tipc_link_reset);
+
+#define DEFINE_LINK_EVENT_COND(name, cond) \
+DEFINE_EVENT_CONDITION(tipc_link_class, name, \
+ TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header), \
+ TP_ARGS(l, dqueues, header), \
+ TP_CONDITION(cond))
+DEFINE_LINK_EVENT_COND(tipc_link_too_silent, tipc_link_too_silent(l));
+
+DECLARE_EVENT_CLASS(tipc_link_transmq_class,
+
+ TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
+
+ TP_ARGS(r, f, t, tq),
+
+ TP_STRUCT__entry(
+ __array(char, name, TIPC_MAX_LINK_NAME)
+ __field(u16, from)
+ __field(u16, to)
+ __field(u32, len)
+ __field(u16, fseqno)
+ __field(u16, lseqno)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->name, tipc_link_name(r), TIPC_MAX_LINK_NAME);
+ __entry->from = f;
+ __entry->to = t;
+ __entry->len = skb_queue_len(tq);
+ __entry->fseqno = __entry->len ?
+ msg_seqno(buf_msg(skb_peek(tq))) : 0;
+ __entry->lseqno = __entry->len ?
+ msg_seqno(buf_msg(skb_peek_tail(tq))) : 0;
+ ),
+
+ TP_printk("<%s> retrans req: [%u-%u] transmq: %u [%u-%u]\n",
+ __entry->name, __entry->from, __entry->to,
+ __entry->len, __entry->fseqno, __entry->lseqno)
+);
+
+DEFINE_EVENT_CONDITION(tipc_link_transmq_class, tipc_link_retrans,
+ TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
+ TP_ARGS(r, f, t, tq),
+ TP_CONDITION(less_eq(f, t))
+);
+
+DEFINE_EVENT_PRINT(tipc_link_transmq_class, tipc_link_bc_ack,
+ TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
+ TP_ARGS(r, f, t, tq),
+ TP_printk("<%s> acked: %u gap: %u transmq: %u [%u-%u]\n",
+ __entry->name, __entry->from, __entry->to,
+ __entry->len, __entry->fseqno, __entry->lseqno)
+);
+
+DECLARE_EVENT_CLASS(tipc_node_class,
+
+ TP_PROTO(struct tipc_node *n, bool more, const char *header),
+
+ TP_ARGS(n, more, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __field(u32, addr)
+ __dynamic_array(char, buf, (more) ? NODE_LMAX : NODE_LMIN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header, header);
+ __entry->addr = tipc_node_get_addr(n);
+ tipc_node_dump(n, more, __get_str(buf));
+ ),
+
+ TP_printk("<%x> %s\n%s", __entry->addr, __get_str(header),
+ __get_str(buf))
+);
+
+#define DEFINE_NODE_EVENT(name) \
+DEFINE_EVENT(tipc_node_class, name, \
+ TP_PROTO(struct tipc_node *n, bool more, const char *header), \
+ TP_ARGS(n, more, header))
+DEFINE_NODE_EVENT(tipc_node_dump);
+DEFINE_NODE_EVENT(tipc_node_create);
+DEFINE_NODE_EVENT(tipc_node_delete);
+DEFINE_NODE_EVENT(tipc_node_lost_contact);
+DEFINE_NODE_EVENT(tipc_node_timeout);
+DEFINE_NODE_EVENT(tipc_node_link_up);
+DEFINE_NODE_EVENT(tipc_node_link_down);
+DEFINE_NODE_EVENT(tipc_node_reset_links);
+DEFINE_NODE_EVENT(tipc_node_check_state);
+
+DECLARE_EVENT_CLASS(tipc_fsm_class,
+
+ TP_PROTO(const char *name, u32 os, u32 ns, int evt),
+
+ TP_ARGS(name, os, ns, evt),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(u32, os)
+ __field(u32, ns)
+ __field(u32, evt)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->os = os;
+ __entry->ns = ns;
+ __entry->evt = evt;
+ ),
+
+ TP_printk("<%s> %s--(%s)->%s\n", __get_str(name),
+ state_sym(__entry->os), evt_sym(__entry->evt),
+ state_sym(__entry->ns))
+);
+
+#define DEFINE_FSM_EVENT(fsm_name) \
+DEFINE_EVENT(tipc_fsm_class, fsm_name, \
+ TP_PROTO(const char *name, u32 os, u32 ns, int evt), \
+ TP_ARGS(name, os, ns, evt))
+DEFINE_FSM_EVENT(tipc_link_fsm);
+DEFINE_FSM_EVENT(tipc_node_fsm);
+
+TRACE_EVENT(tipc_l2_device_event,
+
+ TP_PROTO(struct net_device *dev, struct tipc_bearer *b,
+ unsigned long evt),
+
+ TP_ARGS(dev, b, evt),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev->name)
+ __string(b_name, b->name)
+ __field(unsigned long, evt)
+ __field(u8, b_up)
+ __field(u8, carrier)
+ __field(u8, oper)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev->name);
+ __assign_str(b_name, b->name);
+ __entry->evt = evt;
+ __entry->b_up = test_bit(0, &b->up);
+ __entry->carrier = netif_carrier_ok(dev);
+ __entry->oper = netif_oper_up(dev);
+ ),
+
+ TP_printk("%s on: <%s>/<%s> oper: %s carrier: %s bearer: %s\n",
+ dev_evt_sym(__entry->evt), __get_str(dev_name),
+ __get_str(b_name), (__entry->oper) ? "up" : "down",
+ (__entry->carrier) ? "ok" : "notok",
+ (__entry->b_up) ? "up" : "down")
+);
+
+#endif /* _TIPC_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
new file mode 100644
index 0000000000..f892b0903d
--- /dev/null
+++ b/net/tipc/udp_media.c
@@ -0,0 +1,855 @@
+/* net/tipc/udp_media.c: IP bearer support for TIPC
+ *
+ * Copyright (c) 2015, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/socket.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/inet.h>
+#include <linux/inetdevice.h>
+#include <linux/igmp.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/list.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/udp_tunnel.h>
+#include <net/ipv6_stubs.h>
+#include <linux/tipc_netlink.h>
+#include "core.h"
+#include "addr.h"
+#include "net.h"
+#include "bearer.h"
+#include "netlink.h"
+#include "msg.h"
+#include "udp_media.h"
+
+/* IANA assigned UDP port */
+#define UDP_PORT_DEFAULT 6118
+
+#define UDP_MIN_HEADROOM 48
+
+/**
+ * struct udp_media_addr - IP/UDP addressing information
+ *
+ * This is the bearer level originating address used in neighbor discovery
+ * messages, and all fields should be in network byte order
+ *
+ * @proto: Ethernet protocol in use
+ * @port: port being used
+ * @ipv4: IPv4 address of neighbor
+ * @ipv6: IPv6 address of neighbor
+ */
+struct udp_media_addr {
+ __be16 proto;
+ __be16 port;
+ union {
+ struct in_addr ipv4;
+ struct in6_addr ipv6;
+ };
+};
+
+/* struct udp_replicast - container for UDP remote addresses */
+struct udp_replicast {
+ struct udp_media_addr addr;
+ struct dst_cache dst_cache;
+ struct rcu_head rcu;
+ struct list_head list;
+};
+
+/**
+ * struct udp_bearer - ip/udp bearer data structure
+ * @bearer: associated generic tipc bearer
+ * @ubsock: bearer associated socket
+ * @ifindex: local address scope
+ * @work: used to schedule deferred work on a bearer
+ * @rcast: associated udp_replicast container
+ */
+struct udp_bearer {
+ struct tipc_bearer __rcu *bearer;
+ struct socket *ubsock;
+ u32 ifindex;
+ struct work_struct work;
+ struct udp_replicast rcast;
+};
+
+static int tipc_udp_is_mcast_addr(struct udp_media_addr *addr)
+{
+ if (ntohs(addr->proto) == ETH_P_IP)
+ return ipv4_is_multicast(addr->ipv4.s_addr);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ return ipv6_addr_is_multicast(&addr->ipv6);
+#endif
+ return 0;
+}
+
+/* udp_media_addr_set - convert a ip/udp address to a TIPC media address */
+static void tipc_udp_media_addr_set(struct tipc_media_addr *addr,
+ struct udp_media_addr *ua)
+{
+ memset(addr, 0, sizeof(struct tipc_media_addr));
+ addr->media_id = TIPC_MEDIA_TYPE_UDP;
+ memcpy(addr->value, ua, sizeof(struct udp_media_addr));
+
+ if (tipc_udp_is_mcast_addr(ua))
+ addr->broadcast = TIPC_BROADCAST_SUPPORT;
+}
+
+/* tipc_udp_addr2str - convert ip/udp address to string */
+static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size)
+{
+ struct udp_media_addr *ua = (struct udp_media_addr *)&a->value;
+
+ if (ntohs(ua->proto) == ETH_P_IP)
+ snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->port));
+ else if (ntohs(ua->proto) == ETH_P_IPV6)
+ snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->port));
+ else
+ pr_err("Invalid UDP media address\n");
+ return 0;
+}
+
+/* tipc_udp_msg2addr - extract an ip/udp address from a TIPC ndisc message */
+static int tipc_udp_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *a,
+ char *msg)
+{
+ struct udp_media_addr *ua;
+
+ ua = (struct udp_media_addr *) (msg + TIPC_MEDIA_ADDR_OFFSET);
+ if (msg[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_UDP)
+ return -EINVAL;
+ tipc_udp_media_addr_set(a, ua);
+ return 0;
+}
+
+/* tipc_udp_addr2msg - write an ip/udp address to a TIPC ndisc message */
+static int tipc_udp_addr2msg(char *msg, struct tipc_media_addr *a)
+{
+ memset(msg, 0, TIPC_MEDIA_INFO_SIZE);
+ msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_UDP;
+ memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, a->value,
+ sizeof(struct udp_media_addr));
+ return 0;
+}
+
+/* tipc_send_msg - enqueue a send request */
+static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
+ struct udp_bearer *ub, struct udp_media_addr *src,
+ struct udp_media_addr *dst, struct dst_cache *cache)
+{
+ struct dst_entry *ndst;
+ int ttl, err = 0;
+
+ local_bh_disable();
+ ndst = dst_cache_get(cache);
+ if (dst->proto == htons(ETH_P_IP)) {
+ struct rtable *rt = (struct rtable *)ndst;
+
+ if (!rt) {
+ struct flowi4 fl = {
+ .daddr = dst->ipv4.s_addr,
+ .saddr = src->ipv4.s_addr,
+ .flowi4_mark = skb->mark,
+ .flowi4_proto = IPPROTO_UDP
+ };
+ rt = ip_route_output_key(net, &fl);
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ goto tx_error;
+ }
+ dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
+ }
+
+ ttl = ip4_dst_hoplimit(&rt->dst);
+ udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
+ dst->ipv4.s_addr, 0, ttl, 0, src->port,
+ dst->port, false, true);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (!ndst) {
+ struct flowi6 fl6 = {
+ .flowi6_oif = ub->ifindex,
+ .daddr = dst->ipv6,
+ .saddr = src->ipv6,
+ .flowi6_proto = IPPROTO_UDP
+ };
+ ndst = ipv6_stub->ipv6_dst_lookup_flow(net,
+ ub->ubsock->sk,
+ &fl6, NULL);
+ if (IS_ERR(ndst)) {
+ err = PTR_ERR(ndst);
+ goto tx_error;
+ }
+ dst_cache_set_ip6(cache, ndst, &fl6.saddr);
+ }
+ ttl = ip6_dst_hoplimit(ndst);
+ err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
+ &src->ipv6, &dst->ipv6, 0, ttl, 0,
+ src->port, dst->port, false);
+#endif
+ }
+ local_bh_enable();
+ return err;
+
+tx_error:
+ local_bh_enable();
+ kfree_skb(skb);
+ return err;
+}
+
+static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *addr)
+{
+ struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
+ struct udp_media_addr *dst = (struct udp_media_addr *)&addr->value;
+ struct udp_replicast *rcast;
+ struct udp_bearer *ub;
+ int err = 0;
+
+ if (skb_headroom(skb) < UDP_MIN_HEADROOM) {
+ err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+ if (err)
+ goto out;
+ }
+
+ skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
+ ub = rcu_dereference(b->media_ptr);
+ if (!ub) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (addr->broadcast != TIPC_REPLICAST_SUPPORT)
+ return tipc_udp_xmit(net, skb, ub, src, dst,
+ &ub->rcast.dst_cache);
+
+ /* Replicast, send an skb to each configured IP address */
+ list_for_each_entry_rcu(rcast, &ub->rcast.list, list) {
+ struct sk_buff *_skb;
+
+ _skb = pskb_copy(skb, GFP_ATOMIC);
+ if (!_skb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr,
+ &rcast->dst_cache);
+ if (err)
+ goto out;
+ }
+ err = 0;
+out:
+ kfree_skb(skb);
+ return err;
+}
+
+static bool tipc_udp_is_known_peer(struct tipc_bearer *b,
+ struct udp_media_addr *addr)
+{
+ struct udp_replicast *rcast, *tmp;
+ struct udp_bearer *ub;
+
+ ub = rcu_dereference_rtnl(b->media_ptr);
+ if (!ub) {
+ pr_err_ratelimited("UDP bearer instance not found\n");
+ return false;
+ }
+
+ list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) {
+ if (!memcmp(&rcast->addr, addr, sizeof(struct udp_media_addr)))
+ return true;
+ }
+
+ return false;
+}
+
+static int tipc_udp_rcast_add(struct tipc_bearer *b,
+ struct udp_media_addr *addr)
+{
+ struct udp_replicast *rcast;
+ struct udp_bearer *ub;
+
+ ub = rcu_dereference_rtnl(b->media_ptr);
+ if (!ub)
+ return -ENODEV;
+
+ rcast = kmalloc(sizeof(*rcast), GFP_ATOMIC);
+ if (!rcast)
+ return -ENOMEM;
+
+ if (dst_cache_init(&rcast->dst_cache, GFP_ATOMIC)) {
+ kfree(rcast);
+ return -ENOMEM;
+ }
+
+ memcpy(&rcast->addr, addr, sizeof(struct udp_media_addr));
+
+ if (ntohs(addr->proto) == ETH_P_IP)
+ pr_info("New replicast peer: %pI4\n", &rcast->addr.ipv4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (ntohs(addr->proto) == ETH_P_IPV6)
+ pr_info("New replicast peer: %pI6\n", &rcast->addr.ipv6);
+#endif
+ b->bcast_addr.broadcast = TIPC_REPLICAST_SUPPORT;
+ list_add_rcu(&rcast->list, &ub->rcast.list);
+ return 0;
+}
+
+static int tipc_udp_rcast_disc(struct tipc_bearer *b, struct sk_buff *skb)
+{
+ struct udp_media_addr src = {0};
+ struct udp_media_addr *dst;
+
+ dst = (struct udp_media_addr *)&b->bcast_addr.value;
+ if (tipc_udp_is_mcast_addr(dst))
+ return 0;
+
+ src.port = udp_hdr(skb)->source;
+
+ if (ip_hdr(skb)->version == 4) {
+ struct iphdr *iphdr = ip_hdr(skb);
+
+ src.proto = htons(ETH_P_IP);
+ src.ipv4.s_addr = iphdr->saddr;
+ if (ipv4_is_multicast(iphdr->daddr))
+ return 0;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (ip_hdr(skb)->version == 6) {
+ struct ipv6hdr *iphdr = ipv6_hdr(skb);
+
+ src.proto = htons(ETH_P_IPV6);
+ src.ipv6 = iphdr->saddr;
+ if (ipv6_addr_is_multicast(&iphdr->daddr))
+ return 0;
+#endif
+ } else {
+ return 0;
+ }
+
+ if (likely(tipc_udp_is_known_peer(b, &src)))
+ return 0;
+
+ return tipc_udp_rcast_add(b, &src);
+}
+
+/* tipc_udp_recv - read data from bearer socket */
+static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct udp_bearer *ub;
+ struct tipc_bearer *b;
+ struct tipc_msg *hdr;
+ int err;
+
+ ub = rcu_dereference_sk_user_data(sk);
+ if (!ub) {
+ pr_err_ratelimited("Failed to get UDP bearer reference");
+ goto out;
+ }
+ skb_pull(skb, sizeof(struct udphdr));
+ hdr = buf_msg(skb);
+
+ b = rcu_dereference(ub->bearer);
+ if (!b)
+ goto out;
+
+ if (b && test_bit(0, &b->up)) {
+ TIPC_SKB_CB(skb)->flags = 0;
+ tipc_rcv(sock_net(sk), skb, b);
+ return 0;
+ }
+
+ if (unlikely(msg_user(hdr) == LINK_CONFIG)) {
+ err = tipc_udp_rcast_disc(b, skb);
+ if (err)
+ goto out;
+ }
+
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote)
+{
+ int err = 0;
+ struct ip_mreqn mreqn;
+ struct sock *sk = ub->ubsock->sk;
+
+ if (ntohs(remote->proto) == ETH_P_IP) {
+ mreqn.imr_multiaddr = remote->ipv4;
+ mreqn.imr_ifindex = ub->ifindex;
+ err = ip_mc_join_group(sk, &mreqn);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ lock_sock(sk);
+ err = ipv6_stub->ipv6_sock_mc_join(sk, ub->ifindex,
+ &remote->ipv6);
+ release_sock(sk);
+#endif
+ }
+ return err;
+}
+
+static int __tipc_nl_add_udp_addr(struct sk_buff *skb,
+ struct udp_media_addr *addr, int nla_t)
+{
+ if (ntohs(addr->proto) == ETH_P_IP) {
+ struct sockaddr_in ip4;
+
+ memset(&ip4, 0, sizeof(ip4));
+ ip4.sin_family = AF_INET;
+ ip4.sin_port = addr->port;
+ ip4.sin_addr.s_addr = addr->ipv4.s_addr;
+ if (nla_put(skb, nla_t, sizeof(ip4), &ip4))
+ return -EMSGSIZE;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (ntohs(addr->proto) == ETH_P_IPV6) {
+ struct sockaddr_in6 ip6;
+
+ memset(&ip6, 0, sizeof(ip6));
+ ip6.sin6_family = AF_INET6;
+ ip6.sin6_port = addr->port;
+ memcpy(&ip6.sin6_addr, &addr->ipv6, sizeof(struct in6_addr));
+ if (nla_put(skb, nla_t, sizeof(ip6), &ip6))
+ return -EMSGSIZE;
+#endif
+ }
+
+ return 0;
+}
+
+int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ u32 bid = cb->args[0];
+ u32 skip_cnt = cb->args[1];
+ u32 portid = NETLINK_CB(cb->skb).portid;
+ struct udp_replicast *rcast, *tmp;
+ struct tipc_bearer *b;
+ struct udp_bearer *ub;
+ void *hdr;
+ int err;
+ int i;
+
+ if (!bid && !skip_cnt) {
+ struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
+ struct net *net = sock_net(skb->sk);
+ struct nlattr *battrs[TIPC_NLA_BEARER_MAX + 1];
+ char *bname;
+
+ if (!attrs[TIPC_NLA_BEARER])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(battrs, TIPC_NLA_BEARER_MAX,
+ attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy, NULL);
+ if (err)
+ return err;
+
+ if (!battrs[TIPC_NLA_BEARER_NAME])
+ return -EINVAL;
+
+ bname = nla_data(battrs[TIPC_NLA_BEARER_NAME]);
+
+ rtnl_lock();
+ b = tipc_bearer_find(net, bname);
+ if (!b) {
+ rtnl_unlock();
+ return -EINVAL;
+ }
+ bid = b->identity;
+ } else {
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ rtnl_lock();
+ b = rtnl_dereference(tn->bearer_list[bid]);
+ if (!b) {
+ rtnl_unlock();
+ return -EINVAL;
+ }
+ }
+
+ ub = rtnl_dereference(b->media_ptr);
+ if (!ub) {
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ i = 0;
+ list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) {
+ if (i < skip_cnt)
+ goto count;
+
+ hdr = genlmsg_put(skb, portid, cb->nlh->nlmsg_seq,
+ &tipc_genl_family, NLM_F_MULTI,
+ TIPC_NL_BEARER_GET);
+ if (!hdr)
+ goto done;
+
+ err = __tipc_nl_add_udp_addr(skb, &rcast->addr,
+ TIPC_NLA_UDP_REMOTE);
+ if (err) {
+ genlmsg_cancel(skb, hdr);
+ goto done;
+ }
+ genlmsg_end(skb, hdr);
+count:
+ i++;
+ }
+done:
+ rtnl_unlock();
+ cb->args[0] = bid;
+ cb->args[1] = i;
+
+ return skb->len;
+}
+
+int tipc_udp_nl_add_bearer_data(struct tipc_nl_msg *msg, struct tipc_bearer *b)
+{
+ struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
+ struct udp_media_addr *dst;
+ struct udp_bearer *ub;
+ struct nlattr *nest;
+
+ ub = rtnl_dereference(b->media_ptr);
+ if (!ub)
+ return -ENODEV;
+
+ nest = nla_nest_start_noflag(msg->skb, TIPC_NLA_BEARER_UDP_OPTS);
+ if (!nest)
+ goto msg_full;
+
+ if (__tipc_nl_add_udp_addr(msg->skb, src, TIPC_NLA_UDP_LOCAL))
+ goto msg_full;
+
+ dst = (struct udp_media_addr *)&b->bcast_addr.value;
+ if (__tipc_nl_add_udp_addr(msg->skb, dst, TIPC_NLA_UDP_REMOTE))
+ goto msg_full;
+
+ if (!list_empty(&ub->rcast.list)) {
+ if (nla_put_flag(msg->skb, TIPC_NLA_UDP_MULTI_REMOTEIP))
+ goto msg_full;
+ }
+
+ nla_nest_end(msg->skb, nest);
+ return 0;
+msg_full:
+ nla_nest_cancel(msg->skb, nest);
+ return -EMSGSIZE;
+}
+
+/**
+ * tipc_parse_udp_addr - build udp media address from netlink data
+ * @nla: netlink attribute containing sockaddr storage aligned address
+ * @addr: tipc media address to fill with address, port and protocol type
+ * @scope_id: IPv6 scope id pointer, not NULL indicates it's required
+ */
+
+static int tipc_parse_udp_addr(struct nlattr *nla, struct udp_media_addr *addr,
+ u32 *scope_id)
+{
+ struct sockaddr_storage sa;
+
+ nla_memcpy(&sa, nla, sizeof(sa));
+ if (sa.ss_family == AF_INET) {
+ struct sockaddr_in *ip4 = (struct sockaddr_in *)&sa;
+
+ addr->proto = htons(ETH_P_IP);
+ addr->port = ip4->sin_port;
+ addr->ipv4.s_addr = ip4->sin_addr.s_addr;
+ return 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (sa.ss_family == AF_INET6) {
+ struct sockaddr_in6 *ip6 = (struct sockaddr_in6 *)&sa;
+
+ addr->proto = htons(ETH_P_IPV6);
+ addr->port = ip6->sin6_port;
+ memcpy(&addr->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr));
+
+ /* Scope ID is only interesting for local addresses */
+ if (scope_id) {
+ int atype;
+
+ atype = ipv6_addr_type(&ip6->sin6_addr);
+ if (__ipv6_addr_needs_scope_id(atype) &&
+ !ip6->sin6_scope_id) {
+ return -EINVAL;
+ }
+
+ *scope_id = ip6->sin6_scope_id ? : 0;
+ }
+
+ return 0;
+#endif
+ }
+ return -EADDRNOTAVAIL;
+}
+
+int tipc_udp_nl_bearer_add(struct tipc_bearer *b, struct nlattr *attr)
+{
+ int err;
+ struct udp_media_addr addr = {0};
+ struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
+ struct udp_media_addr *dst;
+
+ if (nla_parse_nested_deprecated(opts, TIPC_NLA_UDP_MAX, attr, tipc_nl_udp_policy, NULL))
+ return -EINVAL;
+
+ if (!opts[TIPC_NLA_UDP_REMOTE])
+ return -EINVAL;
+
+ err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_REMOTE], &addr, NULL);
+ if (err)
+ return err;
+
+ dst = (struct udp_media_addr *)&b->bcast_addr.value;
+ if (tipc_udp_is_mcast_addr(dst)) {
+ pr_err("Can't add remote ip to TIPC UDP multicast bearer\n");
+ return -EINVAL;
+ }
+
+ if (tipc_udp_is_known_peer(b, &addr))
+ return 0;
+
+ return tipc_udp_rcast_add(b, &addr);
+}
+
+/**
+ * tipc_udp_enable - callback to create a new udp bearer instance
+ * @net: network namespace
+ * @b: pointer to generic tipc_bearer
+ * @attrs: netlink bearer configuration
+ *
+ * validate the bearer parameters and initialize the udp bearer
+ * rtnl_lock should be held
+ */
+static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
+ struct nlattr *attrs[])
+{
+ int err = -EINVAL;
+ struct udp_bearer *ub;
+ struct udp_media_addr remote = {0};
+ struct udp_media_addr local = {0};
+ struct udp_port_cfg udp_conf = {0};
+ struct udp_tunnel_sock_cfg tuncfg = {NULL};
+ struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
+ u8 node_id[NODE_ID_LEN] = {0,};
+ struct net_device *dev;
+ int rmcast = 0;
+
+ ub = kzalloc(sizeof(*ub), GFP_ATOMIC);
+ if (!ub)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ub->rcast.list);
+
+ if (!attrs[TIPC_NLA_BEARER_UDP_OPTS])
+ goto err;
+
+ if (nla_parse_nested_deprecated(opts, TIPC_NLA_UDP_MAX, attrs[TIPC_NLA_BEARER_UDP_OPTS], tipc_nl_udp_policy, NULL))
+ goto err;
+
+ if (!opts[TIPC_NLA_UDP_LOCAL] || !opts[TIPC_NLA_UDP_REMOTE]) {
+ pr_err("Invalid UDP bearer configuration");
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_LOCAL], &local,
+ &ub->ifindex);
+ if (err)
+ goto err;
+
+ err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_REMOTE], &remote, NULL);
+ if (err)
+ goto err;
+
+ if (remote.proto != local.proto) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* Checking remote ip address */
+ rmcast = tipc_udp_is_mcast_addr(&remote);
+
+ /* Autoconfigure own node identity if needed */
+ if (!tipc_own_id(net)) {
+ memcpy(node_id, local.ipv6.in6_u.u6_addr8, 16);
+ tipc_net_init(net, node_id, 0);
+ }
+ if (!tipc_own_id(net)) {
+ pr_warn("Failed to set node id, please configure manually\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP;
+ b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT;
+ rcu_assign_pointer(b->media_ptr, ub);
+ rcu_assign_pointer(ub->bearer, b);
+ tipc_udp_media_addr_set(&b->addr, &local);
+ if (local.proto == htons(ETH_P_IP)) {
+ dev = __ip_dev_find(net, local.ipv4.s_addr, false);
+ if (!dev) {
+ err = -ENODEV;
+ goto err;
+ }
+ udp_conf.family = AF_INET;
+
+ /* Switch to use ANY to receive packets from group */
+ if (rmcast)
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ else
+ udp_conf.local_ip.s_addr = local.ipv4.s_addr;
+ udp_conf.use_udp_checksums = false;
+ ub->ifindex = dev->ifindex;
+ b->encap_hlen = sizeof(struct iphdr) + sizeof(struct udphdr);
+ b->mtu = b->media->mtu;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (local.proto == htons(ETH_P_IPV6)) {
+ dev = ub->ifindex ? __dev_get_by_index(net, ub->ifindex) : NULL;
+ dev = ipv6_dev_find(net, &local.ipv6, dev);
+ if (!dev) {
+ err = -ENODEV;
+ goto err;
+ }
+ udp_conf.family = AF_INET6;
+ udp_conf.use_udp6_tx_checksums = true;
+ udp_conf.use_udp6_rx_checksums = true;
+ if (rmcast)
+ udp_conf.local_ip6 = in6addr_any;
+ else
+ udp_conf.local_ip6 = local.ipv6;
+ ub->ifindex = dev->ifindex;
+ b->encap_hlen = sizeof(struct ipv6hdr) + sizeof(struct udphdr);
+ b->mtu = 1280;
+#endif
+ } else {
+ err = -EAFNOSUPPORT;
+ goto err;
+ }
+ udp_conf.local_udp_port = local.port;
+ err = udp_sock_create(net, &udp_conf, &ub->ubsock);
+ if (err)
+ goto err;
+ tuncfg.sk_user_data = ub;
+ tuncfg.encap_type = 1;
+ tuncfg.encap_rcv = tipc_udp_recv;
+ tuncfg.encap_destroy = NULL;
+ setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg);
+
+ err = dst_cache_init(&ub->rcast.dst_cache, GFP_ATOMIC);
+ if (err)
+ goto free;
+
+ /*
+ * The bcast media address port is used for all peers and the ip
+ * is used if it's a multicast address.
+ */
+ memcpy(&b->bcast_addr.value, &remote, sizeof(remote));
+ if (rmcast)
+ err = enable_mcast(ub, &remote);
+ else
+ err = tipc_udp_rcast_add(b, &remote);
+ if (err)
+ goto free;
+
+ return 0;
+
+free:
+ dst_cache_destroy(&ub->rcast.dst_cache);
+ udp_tunnel_sock_release(ub->ubsock);
+err:
+ kfree(ub);
+ return err;
+}
+
+/* cleanup_bearer - break the socket/bearer association */
+static void cleanup_bearer(struct work_struct *work)
+{
+ struct udp_bearer *ub = container_of(work, struct udp_bearer, work);
+ struct udp_replicast *rcast, *tmp;
+
+ list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) {
+ dst_cache_destroy(&rcast->dst_cache);
+ list_del_rcu(&rcast->list);
+ kfree_rcu(rcast, rcu);
+ }
+
+ atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
+ dst_cache_destroy(&ub->rcast.dst_cache);
+ udp_tunnel_sock_release(ub->ubsock);
+ synchronize_net();
+ kfree(ub);
+}
+
+/* tipc_udp_disable - detach bearer from socket */
+static void tipc_udp_disable(struct tipc_bearer *b)
+{
+ struct udp_bearer *ub;
+
+ ub = rtnl_dereference(b->media_ptr);
+ if (!ub) {
+ pr_err("UDP bearer instance not found\n");
+ return;
+ }
+ sock_set_flag(ub->ubsock->sk, SOCK_DEAD);
+ RCU_INIT_POINTER(ub->bearer, NULL);
+
+ /* sock_release need to be done outside of rtnl lock */
+ atomic_inc(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
+ INIT_WORK(&ub->work, cleanup_bearer);
+ schedule_work(&ub->work);
+}
+
+struct tipc_media udp_media_info = {
+ .send_msg = tipc_udp_send_msg,
+ .enable_media = tipc_udp_enable,
+ .disable_media = tipc_udp_disable,
+ .addr2str = tipc_udp_addr2str,
+ .addr2msg = tipc_udp_addr2msg,
+ .msg2addr = tipc_udp_msg2addr,
+ .priority = TIPC_DEF_LINK_PRI,
+ .tolerance = TIPC_DEF_LINK_TOL,
+ .min_win = TIPC_DEF_LINK_WIN,
+ .max_win = TIPC_DEF_LINK_WIN,
+ .mtu = TIPC_DEF_LINK_UDP_MTU,
+ .type_id = TIPC_MEDIA_TYPE_UDP,
+ .hwaddr_len = 0,
+ .name = "udp"
+};
diff --git a/net/tipc/udp_media.h b/net/tipc/udp_media.h
new file mode 100644
index 0000000000..e7455cc73e
--- /dev/null
+++ b/net/tipc/udp_media.h
@@ -0,0 +1,60 @@
+/*
+ * net/tipc/udp_media.h: Include file for UDP bearer media
+ *
+ * Copyright (c) 1996-2006, 2013-2016, Ericsson AB
+ * Copyright (c) 2005, 2010-2011, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef CONFIG_TIPC_MEDIA_UDP
+#ifndef _TIPC_UDP_MEDIA_H
+#define _TIPC_UDP_MEDIA_H
+
+#include <linux/ip.h>
+#include <linux/udp.h>
+
+int tipc_udp_nl_bearer_add(struct tipc_bearer *b, struct nlattr *attr);
+int tipc_udp_nl_add_bearer_data(struct tipc_nl_msg *msg, struct tipc_bearer *b);
+int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb);
+
+/* check if configured MTU is too low for tipc headers */
+static inline bool tipc_udp_mtu_bad(u32 mtu)
+{
+ if (mtu >= (TIPC_MIN_BEARER_MTU + sizeof(struct iphdr) +
+ sizeof(struct udphdr)))
+ return false;
+
+ pr_warn("MTU too low for tipc bearer\n");
+ return true;
+}
+
+#endif
+#endif