summaryrefslogtreecommitdiffstats
path: root/net/batman-adv
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /net/batman-adv
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'net/batman-adv')
-rw-r--r--net/batman-adv/Kconfig96
-rw-r--r--net/batman-adv/Makefile34
-rw-r--r--net/batman-adv/bat_algo.c209
-rw-r--r--net/batman-adv/bat_algo.h25
-rw-r--r--net/batman-adv/bat_iv_ogm.c2551
-rw-r--r--net/batman-adv/bat_iv_ogm.h14
-rw-r--r--net/batman-adv/bat_v.c891
-rw-r--r--net/batman-adv/bat_v.h41
-rw-r--r--net/batman-adv/bat_v_elp.c550
-rw-r--r--net/batman-adv/bat_v_elp.h24
-rw-r--r--net/batman-adv/bat_v_ogm.c1087
-rw-r--r--net/batman-adv/bat_v_ogm.h27
-rw-r--r--net/batman-adv/bitarray.c89
-rw-r--r--net/batman-adv/bitarray.h56
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2502
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h132
-rw-r--r--net/batman-adv/distributed-arp-table.c1831
-rw-r--r--net/batman-adv/distributed-arp-table.h186
-rw-r--r--net/batman-adv/fragmentation.c562
-rw-r--r--net/batman-adv/fragmentation.h44
-rw-r--r--net/batman-adv/gateway_client.c769
-rw-r--r--net/batman-adv/gateway_client.h55
-rw-r--r--net/batman-adv/gateway_common.c114
-rw-r--r--net/batman-adv/gateway_common.h31
-rw-r--r--net/batman-adv/hard-interface.c1028
-rw-r--r--net/batman-adv/hard-interface.h122
-rw-r--r--net/batman-adv/hash.c84
-rw-r--r--net/batman-adv/hash.h157
-rw-r--r--net/batman-adv/log.c36
-rw-r--r--net/batman-adv/log.h143
-rw-r--r--net/batman-adv/main.c731
-rw-r--r--net/batman-adv/main.h384
-rw-r--r--net/batman-adv/multicast.c2096
-rw-r--r--net/batman-adv/multicast.h99
-rw-r--r--net/batman-adv/netlink.c1521
-rw-r--r--net/batman-adv/netlink.h26
-rw-r--r--net/batman-adv/network-coding.c1873
-rw-r--r--net/batman-adv/network-coding.h106
-rw-r--r--net/batman-adv/originator.c1349
-rw-r--r--net/batman-adv/originator.h167
-rw-r--r--net/batman-adv/routing.c1272
-rw-r--r--net/batman-adv/routing.h42
-rw-r--r--net/batman-adv/send.c1135
-rw-r--r--net/batman-adv/send.h116
-rw-r--r--net/batman-adv/soft-interface.c1130
-rw-r--r--net/batman-adv/soft-interface.h42
-rw-r--r--net/batman-adv/tp_meter.c1490
-rw-r--r--net/batman-adv/tp_meter.h22
-rw-r--r--net/batman-adv/trace.c8
-rw-r--r--net/batman-adv/trace.h64
-rw-r--r--net/batman-adv/translation-table.c4290
-rw-r--r--net/batman-adv/translation-table.h74
-rw-r--r--net/batman-adv/tvlv.c663
-rw-r--r--net/batman-adv/tvlv.h52
-rw-r--r--net/batman-adv/types.h2383
55 files changed, 34625 insertions, 0 deletions
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
new file mode 100644
index 0000000000..860a0786bc
--- /dev/null
+++ b/net/batman-adv/Kconfig
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) B.A.T.M.A.N. contributors:
+#
+# Marek Lindner, Simon Wunderlich
+
+#
+# B.A.T.M.A.N meshing protocol
+#
+
+config BATMAN_ADV
+ tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
+ select LIBCRC32C
+ help
+ B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
+ a routing protocol for multi-hop ad-hoc mesh networks. The
+ networks may be wired or wireless. See
+ https://www.open-mesh.org/ for more information and user space
+ tools.
+
+config BATMAN_ADV_BATMAN_V
+ bool "B.A.T.M.A.N. V protocol"
+ depends on BATMAN_ADV && !(CFG80211=m && BATMAN_ADV=y)
+ default y
+ help
+ This option enables the B.A.T.M.A.N. V protocol, the successor
+ of the currently used B.A.T.M.A.N. IV protocol. The main
+ changes include splitting of the OGM protocol into a neighbor
+ discovery protocol (Echo Location Protocol, ELP) and a new OGM
+ Protocol OGMv2 for flooding protocol information through the
+ network, as well as a throughput based metric.
+ B.A.T.M.A.N. V is currently considered experimental and not
+ compatible to B.A.T.M.A.N. IV networks.
+
+config BATMAN_ADV_BLA
+ bool "Bridge Loop Avoidance"
+ depends on BATMAN_ADV && INET
+ select CRC16
+ default y
+ help
+ This option enables BLA (Bridge Loop Avoidance), a mechanism
+ to avoid Ethernet frames looping when mesh nodes are connected
+ to both the same LAN and the same mesh. If you will never use
+ more than one mesh node in the same LAN, you can safely remove
+ this feature and save some space.
+
+config BATMAN_ADV_DAT
+ bool "Distributed ARP Table"
+ depends on BATMAN_ADV && INET
+ default y
+ help
+ This option enables DAT (Distributed ARP Table), a DHT based
+ mechanism that increases ARP reliability on sparse wireless
+ mesh networks. If you think that your network does not need
+ this option you can safely remove it and save some space.
+
+config BATMAN_ADV_NC
+ bool "Network Coding"
+ depends on BATMAN_ADV
+ help
+ This option enables network coding, a mechanism that aims to
+ increase the overall network throughput by fusing multiple
+ packets in one transmission.
+ Note that interfaces controlled by batman-adv must be manually
+ configured to have promiscuous mode enabled in order to make
+ network coding work.
+ If you think that your network does not need this feature you
+ can safely disable it and save some space.
+
+config BATMAN_ADV_MCAST
+ bool "Multicast optimisation"
+ depends on BATMAN_ADV && INET && !(BRIDGE=m && BATMAN_ADV=y)
+ default y
+ help
+ This option enables the multicast optimisation which aims to
+ reduce the air overhead while improving the reliability of
+ multicast messages.
+
+config BATMAN_ADV_DEBUG
+ bool "B.A.T.M.A.N. debugging"
+ depends on BATMAN_ADV
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables compilation of support for
+ outputting debugging information to the tracing buffer. The output is
+ controlled via the batadv netdev specific log_level setting.
+
+config BATMAN_ADV_TRACING
+ bool "B.A.T.M.A.N. tracing support"
+ depends on BATMAN_ADV
+ depends on EVENT_TRACING
+ help
+ This is an option for use by developers; most people should
+ say N here. Select this option to gather traces like the debug
+ messages using the generic tracing infrastructure of the kernel.
+ BATMAN_ADV_DEBUG must also be selected to get trace events for
+ batadv_dbg.
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
new file mode 100644
index 0000000000..3bd0760c76
--- /dev/null
+++ b/net/batman-adv/Makefile
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) B.A.T.M.A.N. contributors:
+#
+# Marek Lindner, Simon Wunderlich
+
+obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
+batman-adv-y += bat_algo.o
+batman-adv-y += bat_iv_ogm.o
+batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v.o
+batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v_elp.o
+batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v_ogm.o
+batman-adv-y += bitarray.o
+batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
+batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
+batman-adv-y += fragmentation.o
+batman-adv-y += gateway_client.o
+batman-adv-y += gateway_common.o
+batman-adv-y += hard-interface.o
+batman-adv-y += hash.o
+batman-adv-$(CONFIG_BATMAN_ADV_DEBUG) += log.o
+batman-adv-y += main.o
+batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o
+batman-adv-y += netlink.o
+batman-adv-$(CONFIG_BATMAN_ADV_NC) += network-coding.o
+batman-adv-y += originator.o
+batman-adv-y += routing.o
+batman-adv-y += send.o
+batman-adv-y += soft-interface.o
+batman-adv-$(CONFIG_BATMAN_ADV_TRACING) += trace.o
+batman-adv-y += tp_meter.o
+batman-adv-y += translation-table.o
+batman-adv-y += tvlv.o
+
+CFLAGS_trace.o := -I$(src)
diff --git a/net/batman-adv/bat_algo.c b/net/batman-adv/bat_algo.c
new file mode 100644
index 0000000000..4eee53d19e
--- /dev/null
+++ b/net/batman-adv/bat_algo.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#include "main.h"
+
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/netlink.h>
+#include <linux/printk.h>
+#include <linux/skbuff.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "bat_algo.h"
+#include "netlink.h"
+
+char batadv_routing_algo[20] = "BATMAN_IV";
+static struct hlist_head batadv_algo_list;
+
+/**
+ * batadv_algo_init() - Initialize batman-adv algorithm management data
+ * structures
+ */
+void batadv_algo_init(void)
+{
+ INIT_HLIST_HEAD(&batadv_algo_list);
+}
+
+/**
+ * batadv_algo_get() - Search for algorithm with specific name
+ * @name: algorithm name to find
+ *
+ * Return: Pointer to batadv_algo_ops on success, NULL otherwise
+ */
+struct batadv_algo_ops *batadv_algo_get(const char *name)
+{
+ struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
+
+ hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
+ if (strcmp(bat_algo_ops_tmp->name, name) != 0)
+ continue;
+
+ bat_algo_ops = bat_algo_ops_tmp;
+ break;
+ }
+
+ return bat_algo_ops;
+}
+
+/**
+ * batadv_algo_register() - Register callbacks for a mesh algorithm
+ * @bat_algo_ops: mesh algorithm callbacks to add
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
+{
+ struct batadv_algo_ops *bat_algo_ops_tmp;
+
+ bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
+ if (bat_algo_ops_tmp) {
+ pr_info("Trying to register already registered routing algorithm: %s\n",
+ bat_algo_ops->name);
+ return -EEXIST;
+ }
+
+ /* all algorithms must implement all ops (for now) */
+ if (!bat_algo_ops->iface.enable ||
+ !bat_algo_ops->iface.disable ||
+ !bat_algo_ops->iface.update_mac ||
+ !bat_algo_ops->iface.primary_set ||
+ !bat_algo_ops->neigh.cmp ||
+ !bat_algo_ops->neigh.is_similar_or_better) {
+ pr_info("Routing algo '%s' does not implement required ops\n",
+ bat_algo_ops->name);
+ return -EINVAL;
+ }
+
+ INIT_HLIST_NODE(&bat_algo_ops->list);
+ hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
+
+ return 0;
+}
+
+/**
+ * batadv_algo_select() - Select algorithm of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @name: name of the algorithm to select
+ *
+ * The algorithm callbacks for the soft interface will be set when the algorithm
+ * with the correct name was found. Any previous selected algorithm will not be
+ * deinitialized and the new selected algorithm will also not be initialized.
+ * It is therefore not allowed to call batadv_algo_select outside the creation
+ * function of the soft interface.
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int batadv_algo_select(struct batadv_priv *bat_priv, const char *name)
+{
+ struct batadv_algo_ops *bat_algo_ops;
+
+ bat_algo_ops = batadv_algo_get(name);
+ if (!bat_algo_ops)
+ return -EINVAL;
+
+ bat_priv->algo_ops = bat_algo_ops;
+
+ return 0;
+}
+
+static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
+{
+ struct batadv_algo_ops *bat_algo_ops;
+ char *algo_name = (char *)val;
+ size_t name_len = strlen(algo_name);
+
+ if (name_len > 0 && algo_name[name_len - 1] == '\n')
+ algo_name[name_len - 1] = '\0';
+
+ bat_algo_ops = batadv_algo_get(algo_name);
+ if (!bat_algo_ops) {
+ pr_err("Routing algorithm '%s' is not supported\n", algo_name);
+ return -EINVAL;
+ }
+
+ return param_set_copystring(algo_name, kp);
+}
+
+static const struct kernel_param_ops batadv_param_ops_ra = {
+ .set = batadv_param_set_ra,
+ .get = param_get_string,
+};
+
+static struct kparam_string batadv_param_string_ra = {
+ .maxlen = sizeof(batadv_routing_algo),
+ .string = batadv_routing_algo,
+};
+
+module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
+ 0644);
+
+/**
+ * batadv_algo_dump_entry() - fill in information about one supported routing
+ * algorithm
+ * @msg: netlink message to be sent back
+ * @portid: Port to reply to
+ * @seq: Sequence number of message
+ * @bat_algo_ops: Algorithm to be dumped
+ *
+ * Return: Error number, or 0 on success
+ */
+static int batadv_algo_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_algo_ops *bat_algo_ops)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
+ NLM_F_MULTI, BATADV_CMD_GET_ROUTING_ALGOS);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(msg, BATADV_ATTR_ALGO_NAME, bat_algo_ops->name))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_algo_dump() - fill in information about supported routing
+ * algorithms
+ * @msg: netlink message to be sent back
+ * @cb: Parameters to the netlink request
+ *
+ * Return: Length of reply message.
+ */
+int batadv_algo_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ int portid = NETLINK_CB(cb->skb).portid;
+ struct batadv_algo_ops *bat_algo_ops;
+ int skip = cb->args[0];
+ int i = 0;
+
+ hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
+ if (i++ < skip)
+ continue;
+
+ if (batadv_algo_dump_entry(msg, portid, cb->nlh->nlmsg_seq,
+ bat_algo_ops)) {
+ i--;
+ break;
+ }
+ }
+
+ cb->args[0] = i;
+
+ return msg->len;
+}
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
new file mode 100644
index 0000000000..2c486374af
--- /dev/null
+++ b/net/batman-adv/bat_algo.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Linus Lüssing
+ */
+
+#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_
+#define _NET_BATMAN_ADV_BAT_ALGO_H_
+
+#include "main.h"
+
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+extern char batadv_routing_algo[];
+extern struct list_head batadv_hardif_list;
+
+void batadv_algo_init(void);
+struct batadv_algo_ops *batadv_algo_get(const char *name);
+int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
+int batadv_algo_select(struct batadv_priv *bat_priv, const char *name);
+int batadv_algo_dump(struct sk_buff *msg, struct netlink_callback *cb);
+
+#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
new file mode 100644
index 0000000000..74b49c35dd
--- /dev/null
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -0,0 +1,2551 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#include "bat_iv_ogm.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/cache.h>
+#include <linux/container_of.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/pkt_sched.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "bat_algo.h"
+#include "bitarray.h"
+#include "gateway_client.h"
+#include "hard-interface.h"
+#include "hash.h"
+#include "log.h"
+#include "netlink.h"
+#include "network-coding.h"
+#include "originator.h"
+#include "routing.h"
+#include "send.h"
+#include "translation-table.h"
+#include "tvlv.h"
+
+static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work);
+
+/**
+ * enum batadv_dup_status - duplicate status
+ */
+enum batadv_dup_status {
+ /** @BATADV_NO_DUP: the packet is no duplicate */
+ BATADV_NO_DUP = 0,
+
+ /**
+ * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for
+ * the neighbor)
+ */
+ BATADV_ORIG_DUP,
+
+ /** @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor */
+ BATADV_NEIGH_DUP,
+
+ /**
+ * @BATADV_PROTECTED: originator is currently protected (after reboot)
+ */
+ BATADV_PROTECTED,
+};
+
+/**
+ * batadv_ring_buffer_set() - update the ring buffer with the given value
+ * @lq_recv: pointer to the ring buffer
+ * @lq_index: index to store the value at
+ * @value: value to store in the ring buffer
+ */
+static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value)
+{
+ lq_recv[*lq_index] = value;
+ *lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE;
+}
+
+/**
+ * batadv_ring_buffer_avg() - compute the average of all non-zero values stored
+ * in the given ring buffer
+ * @lq_recv: pointer to the ring buffer
+ *
+ * Return: computed average value.
+ */
+static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
+{
+ const u8 *ptr;
+ u16 count = 0;
+ u16 i = 0;
+ u16 sum = 0;
+
+ ptr = lq_recv;
+
+ while (i < BATADV_TQ_GLOBAL_WINDOW_SIZE) {
+ if (*ptr != 0) {
+ count++;
+ sum += *ptr;
+ }
+
+ i++;
+ ptr++;
+ }
+
+ if (count == 0)
+ return 0;
+
+ return (u8)(sum / count);
+}
+
+/**
+ * batadv_iv_ogm_orig_get() - retrieve or create (if does not exist) an
+ * originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: mac address of the originator
+ *
+ * Return: the originator object corresponding to the passed mac address or NULL
+ * on failure.
+ * If the object does not exist, it is created and initialised.
+ */
+static struct batadv_orig_node *
+batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
+{
+ struct batadv_orig_node *orig_node;
+ int hash_added;
+
+ orig_node = batadv_orig_hash_find(bat_priv, addr);
+ if (orig_node)
+ return orig_node;
+
+ orig_node = batadv_orig_node_new(bat_priv, addr);
+ if (!orig_node)
+ return NULL;
+
+ spin_lock_init(&orig_node->bat_iv.ogm_cnt_lock);
+
+ kref_get(&orig_node->refcount);
+ hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
+ batadv_choose_orig, orig_node,
+ &orig_node->hash_entry);
+ if (hash_added != 0)
+ goto free_orig_node_hash;
+
+ return orig_node;
+
+free_orig_node_hash:
+ /* reference for batadv_hash_add */
+ batadv_orig_node_put(orig_node);
+ /* reference from batadv_orig_node_new */
+ batadv_orig_node_put(orig_node);
+
+ return NULL;
+}
+
+static struct batadv_neigh_node *
+batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
+ const u8 *neigh_addr,
+ struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh)
+{
+ struct batadv_neigh_node *neigh_node;
+
+ neigh_node = batadv_neigh_node_get_or_create(orig_node,
+ hard_iface, neigh_addr);
+ if (!neigh_node)
+ goto out;
+
+ neigh_node->orig_node = orig_neigh;
+
+out:
+ return neigh_node;
+}
+
+static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_ogm_packet *batadv_ogm_packet;
+ unsigned char *ogm_buff;
+ u32 random_seqno;
+
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+ /* randomize initial seqno to avoid collision */
+ get_random_bytes(&random_seqno, sizeof(random_seqno));
+ atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
+
+ hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
+ ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
+ if (!ogm_buff) {
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+ return -ENOMEM;
+ }
+
+ hard_iface->bat_iv.ogm_buff = ogm_buff;
+
+ batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
+ batadv_ogm_packet->packet_type = BATADV_IV_OGM;
+ batadv_ogm_packet->version = BATADV_COMPAT_VERSION;
+ batadv_ogm_packet->ttl = 2;
+ batadv_ogm_packet->flags = BATADV_NO_FLAGS;
+ batadv_ogm_packet->reserved = 0;
+ batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
+
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+ return 0;
+}
+
+static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
+{
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+ kfree(hard_iface->bat_iv.ogm_buff);
+ hard_iface->bat_iv.ogm_buff = NULL;
+
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+}
+
+static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_ogm_packet *batadv_ogm_packet;
+ void *ogm_buff;
+
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+ ogm_buff = hard_iface->bat_iv.ogm_buff;
+ if (!ogm_buff)
+ goto unlock;
+
+ batadv_ogm_packet = ogm_buff;
+ ether_addr_copy(batadv_ogm_packet->orig,
+ hard_iface->net_dev->dev_addr);
+ ether_addr_copy(batadv_ogm_packet->prev_sender,
+ hard_iface->net_dev->dev_addr);
+
+unlock:
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+}
+
+static void
+batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_ogm_packet *batadv_ogm_packet;
+ void *ogm_buff;
+
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+ ogm_buff = hard_iface->bat_iv.ogm_buff;
+ if (!ogm_buff)
+ goto unlock;
+
+ batadv_ogm_packet = ogm_buff;
+ batadv_ogm_packet->ttl = BATADV_TTL;
+
+unlock:
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+}
+
+/* when do we schedule our own ogm to be sent */
+static unsigned long
+batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
+{
+ unsigned int msecs;
+
+ msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
+ msecs += get_random_u32_below(2 * BATADV_JITTER);
+
+ return jiffies + msecs_to_jiffies(msecs);
+}
+
+/* when do we schedule a ogm packet to be sent */
+static unsigned long batadv_iv_ogm_fwd_send_time(void)
+{
+ return jiffies + msecs_to_jiffies(get_random_u32_below(BATADV_JITTER / 2));
+}
+
+/* apply hop penalty for a normal link */
+static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
+{
+ int hop_penalty = atomic_read(&bat_priv->hop_penalty);
+ int new_tq;
+
+ new_tq = tq * (BATADV_TQ_MAX_VALUE - hop_penalty);
+ new_tq /= BATADV_TQ_MAX_VALUE;
+
+ return new_tq;
+}
+
+/**
+ * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
+ * @buff_pos: current position in the skb
+ * @packet_len: total length of the skb
+ * @ogm_packet: potential OGM in buffer
+ *
+ * Return: true if there is enough space for another OGM, false otherwise.
+ */
+static bool
+batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+ const struct batadv_ogm_packet *ogm_packet)
+{
+ int next_buff_pos = 0;
+
+ /* check if there is enough space for the header */
+ next_buff_pos += buff_pos + sizeof(*ogm_packet);
+ if (next_buff_pos > packet_len)
+ return false;
+
+ /* check if there is enough space for the optional TVLV */
+ next_buff_pos += ntohs(ogm_packet->tvlv_len);
+
+ return (next_buff_pos <= packet_len) &&
+ (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
+}
+
+/* send a batman ogm to a given interface */
+static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
+ struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ const char *fwd_str;
+ u8 packet_num;
+ s16 buff_pos;
+ struct batadv_ogm_packet *batadv_ogm_packet;
+ struct sk_buff *skb;
+ u8 *packet_pos;
+
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
+ return;
+
+ packet_num = 0;
+ buff_pos = 0;
+ packet_pos = forw_packet->skb->data;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
+
+ /* adjust all flags and log packets */
+ while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
+ batadv_ogm_packet)) {
+ /* we might have aggregated direct link packets with an
+ * ordinary base packet
+ */
+ if (forw_packet->direct_link_flags & BIT(packet_num) &&
+ forw_packet->if_incoming == hard_iface)
+ batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
+ else
+ batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
+
+ if (packet_num > 0 || !forw_packet->own)
+ fwd_str = "Forwarding";
+ else
+ fwd_str = "Sending own";
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s) on interface %s [%pM]\n",
+ fwd_str, (packet_num > 0 ? "aggregated " : ""),
+ batadv_ogm_packet->orig,
+ ntohl(batadv_ogm_packet->seqno),
+ batadv_ogm_packet->tq, batadv_ogm_packet->ttl,
+ ((batadv_ogm_packet->flags & BATADV_DIRECTLINK) ?
+ "on" : "off"),
+ hard_iface->net_dev->name,
+ hard_iface->net_dev->dev_addr);
+
+ buff_pos += BATADV_OGM_HLEN;
+ buff_pos += ntohs(batadv_ogm_packet->tvlv_len);
+ packet_num++;
+ packet_pos = forw_packet->skb->data + buff_pos;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
+ }
+
+ /* create clone because function is called more than once */
+ skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
+ if (skb) {
+ batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
+ batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
+ skb->len + ETH_HLEN);
+ batadv_send_broadcast_skb(skb, hard_iface);
+ }
+}
+
+/* send a batman ogm packet */
+static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
+{
+ struct net_device *soft_iface;
+
+ if (!forw_packet->if_incoming) {
+ pr_err("Error - can't forward packet: incoming iface not specified\n");
+ return;
+ }
+
+ soft_iface = forw_packet->if_incoming->soft_iface;
+
+ if (WARN_ON(!forw_packet->if_outgoing))
+ return;
+
+ if (forw_packet->if_outgoing->soft_iface != soft_iface) {
+ pr_warn("%s: soft interface switch for queued OGM\n", __func__);
+ return;
+ }
+
+ if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
+ return;
+
+ /* only for one specific outgoing interface */
+ batadv_iv_ogm_send_to_if(forw_packet, forw_packet->if_outgoing);
+}
+
+/**
+ * batadv_iv_ogm_can_aggregate() - find out if an OGM can be aggregated on an
+ * existing forward packet
+ * @new_bat_ogm_packet: OGM packet to be aggregated
+ * @bat_priv: the bat priv with all the soft interface information
+ * @packet_len: (total) length of the OGM
+ * @send_time: timestamp (jiffies) when the packet is to be sent
+ * @directlink: true if this is a direct link packet
+ * @if_incoming: interface where the packet was received
+ * @if_outgoing: interface for which the retransmission should be considered
+ * @forw_packet: the forwarded packet which should be checked
+ *
+ * Return: true if new_packet can be aggregated with forw_packet
+ */
+static bool
+batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
+ struct batadv_priv *bat_priv,
+ int packet_len, unsigned long send_time,
+ bool directlink,
+ const struct batadv_hard_iface *if_incoming,
+ const struct batadv_hard_iface *if_outgoing,
+ const struct batadv_forw_packet *forw_packet)
+{
+ struct batadv_ogm_packet *batadv_ogm_packet;
+ int aggregated_bytes = forw_packet->packet_len + packet_len;
+ struct batadv_hard_iface *primary_if = NULL;
+ bool res = false;
+ unsigned long aggregation_end_time;
+
+ batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
+ aggregation_end_time = send_time;
+ aggregation_end_time += msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
+
+ /* we can aggregate the current packet to this aggregated packet
+ * if:
+ *
+ * - the send time is within our MAX_AGGREGATION_MS time
+ * - the resulting packet won't be bigger than
+ * MAX_AGGREGATION_BYTES
+ * otherwise aggregation is not possible
+ */
+ if (!time_before(send_time, forw_packet->send_time) ||
+ !time_after_eq(aggregation_end_time, forw_packet->send_time))
+ return false;
+
+ if (aggregated_bytes > BATADV_MAX_AGGREGATION_BYTES)
+ return false;
+
+ /* packet is not leaving on the same interface. */
+ if (forw_packet->if_outgoing != if_outgoing)
+ return false;
+
+ /* check aggregation compatibility
+ * -> direct link packets are broadcasted on
+ * their interface only
+ * -> aggregate packet if the current packet is
+ * a "global" packet as well as the base
+ * packet
+ */
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ return false;
+
+ /* packets without direct link flag and high TTL
+ * are flooded through the net
+ */
+ if (!directlink &&
+ !(batadv_ogm_packet->flags & BATADV_DIRECTLINK) &&
+ batadv_ogm_packet->ttl != 1 &&
+
+ /* own packets originating non-primary
+ * interfaces leave only that interface
+ */
+ (!forw_packet->own ||
+ forw_packet->if_incoming == primary_if)) {
+ res = true;
+ goto out;
+ }
+
+ /* if the incoming packet is sent via this one
+ * interface only - we still can aggregate
+ */
+ if (directlink &&
+ new_bat_ogm_packet->ttl == 1 &&
+ forw_packet->if_incoming == if_incoming &&
+
+ /* packets from direct neighbors or
+ * own secondary interface packets
+ * (= secondary interface packets in general)
+ */
+ (batadv_ogm_packet->flags & BATADV_DIRECTLINK ||
+ (forw_packet->own &&
+ forw_packet->if_incoming != primary_if))) {
+ res = true;
+ goto out;
+ }
+
+out:
+ batadv_hardif_put(primary_if);
+ return res;
+}
+
+/**
+ * batadv_iv_ogm_aggregate_new() - create a new aggregated packet and add this
+ * packet to it.
+ * @packet_buff: pointer to the OGM
+ * @packet_len: (total) length of the OGM
+ * @send_time: timestamp (jiffies) when the packet is to be sent
+ * @direct_link: whether this OGM has direct link status
+ * @if_incoming: interface where the packet was received
+ * @if_outgoing: interface for which the retransmission should be considered
+ * @own_packet: true if it is a self-generated ogm
+ */
+static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
+ int packet_len, unsigned long send_time,
+ bool direct_link,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing,
+ int own_packet)
+{
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_forw_packet *forw_packet_aggr;
+ struct sk_buff *skb;
+ unsigned char *skb_buff;
+ unsigned int skb_size;
+ atomic_t *queue_left = own_packet ? NULL : &bat_priv->batman_queue_left;
+
+ if (atomic_read(&bat_priv->aggregated_ogms) &&
+ packet_len < BATADV_MAX_AGGREGATION_BYTES)
+ skb_size = BATADV_MAX_AGGREGATION_BYTES;
+ else
+ skb_size = packet_len;
+
+ skb_size += ETH_HLEN;
+
+ skb = netdev_alloc_skb_ip_align(NULL, skb_size);
+ if (!skb)
+ return;
+
+ forw_packet_aggr = batadv_forw_packet_alloc(if_incoming, if_outgoing,
+ queue_left, bat_priv, skb);
+ if (!forw_packet_aggr) {
+ kfree_skb(skb);
+ return;
+ }
+
+ forw_packet_aggr->skb->priority = TC_PRIO_CONTROL;
+ skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
+
+ skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
+ forw_packet_aggr->packet_len = packet_len;
+ memcpy(skb_buff, packet_buff, packet_len);
+
+ forw_packet_aggr->own = own_packet;
+ forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS;
+ forw_packet_aggr->send_time = send_time;
+
+ /* save packet direct link flag status */
+ if (direct_link)
+ forw_packet_aggr->direct_link_flags |= 1;
+
+ INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
+ batadv_iv_send_outstanding_bat_ogm_packet);
+
+ batadv_forw_packet_ogmv1_queue(bat_priv, forw_packet_aggr, send_time);
+}
+
+/* aggregate a new packet into the existing ogm packet */
+static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
+ const unsigned char *packet_buff,
+ int packet_len, bool direct_link)
+{
+ unsigned long new_direct_link_flag;
+
+ skb_put_data(forw_packet_aggr->skb, packet_buff, packet_len);
+ forw_packet_aggr->packet_len += packet_len;
+ forw_packet_aggr->num_packets++;
+
+ /* save packet direct link flag status */
+ if (direct_link) {
+ new_direct_link_flag = BIT(forw_packet_aggr->num_packets);
+ forw_packet_aggr->direct_link_flags |= new_direct_link_flag;
+ }
+}
+
+/**
+ * batadv_iv_ogm_queue_add() - queue up an OGM for transmission
+ * @bat_priv: the bat priv with all the soft interface information
+ * @packet_buff: pointer to the OGM
+ * @packet_len: (total) length of the OGM
+ * @if_incoming: interface where the packet was received
+ * @if_outgoing: interface for which the retransmission should be considered
+ * @own_packet: true if it is a self-generated ogm
+ * @send_time: timestamp (jiffies) when the packet is to be sent
+ */
+static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
+ unsigned char *packet_buff,
+ int packet_len,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing,
+ int own_packet, unsigned long send_time)
+{
+ /* _aggr -> pointer to the packet we want to aggregate with
+ * _pos -> pointer to the position in the queue
+ */
+ struct batadv_forw_packet *forw_packet_aggr = NULL;
+ struct batadv_forw_packet *forw_packet_pos = NULL;
+ struct batadv_ogm_packet *batadv_ogm_packet;
+ bool direct_link;
+ unsigned long max_aggregation_jiffies;
+
+ batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
+ direct_link = !!(batadv_ogm_packet->flags & BATADV_DIRECTLINK);
+ max_aggregation_jiffies = msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
+
+ /* find position for the packet in the forward queue */
+ spin_lock_bh(&bat_priv->forw_bat_list_lock);
+ /* own packets are not to be aggregated */
+ if (atomic_read(&bat_priv->aggregated_ogms) && !own_packet) {
+ hlist_for_each_entry(forw_packet_pos,
+ &bat_priv->forw_bat_list, list) {
+ if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet,
+ bat_priv, packet_len,
+ send_time, direct_link,
+ if_incoming,
+ if_outgoing,
+ forw_packet_pos)) {
+ forw_packet_aggr = forw_packet_pos;
+ break;
+ }
+ }
+ }
+
+ /* nothing to aggregate with - either aggregation disabled or no
+ * suitable aggregation packet found
+ */
+ if (!forw_packet_aggr) {
+ /* the following section can run without the lock */
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
+
+ /* if we could not aggregate this packet with one of the others
+ * we hold it back for a while, so that it might be aggregated
+ * later on
+ */
+ if (!own_packet && atomic_read(&bat_priv->aggregated_ogms))
+ send_time += max_aggregation_jiffies;
+
+ batadv_iv_ogm_aggregate_new(packet_buff, packet_len,
+ send_time, direct_link,
+ if_incoming, if_outgoing,
+ own_packet);
+ } else {
+ batadv_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
+ packet_len, direct_link);
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
+ }
+}
+
+static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
+ const struct ethhdr *ethhdr,
+ struct batadv_ogm_packet *batadv_ogm_packet,
+ bool is_single_hop_neigh,
+ bool is_from_best_next_hop,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ u16 tvlv_len;
+
+ if (batadv_ogm_packet->ttl <= 1) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
+ return;
+ }
+
+ if (!is_from_best_next_hop) {
+ /* Mark the forwarded packet when it is not coming from our
+ * best next hop. We still need to forward the packet for our
+ * neighbor link quality detection to work in case the packet
+ * originated from a single hop neighbor. Otherwise we can
+ * simply drop the ogm.
+ */
+ if (is_single_hop_neigh)
+ batadv_ogm_packet->flags |= BATADV_NOT_BEST_NEXT_HOP;
+ else
+ return;
+ }
+
+ tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
+
+ batadv_ogm_packet->ttl--;
+ ether_addr_copy(batadv_ogm_packet->prev_sender, ethhdr->h_source);
+
+ /* apply hop penalty */
+ batadv_ogm_packet->tq = batadv_hop_penalty(batadv_ogm_packet->tq,
+ bat_priv);
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Forwarding packet: tq: %i, ttl: %i\n",
+ batadv_ogm_packet->tq, batadv_ogm_packet->ttl);
+
+ if (is_single_hop_neigh)
+ batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
+ else
+ batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
+
+ batadv_iv_ogm_queue_add(bat_priv, (unsigned char *)batadv_ogm_packet,
+ BATADV_OGM_HLEN + tvlv_len,
+ if_incoming, if_outgoing, 0,
+ batadv_iv_ogm_fwd_send_time());
+}
+
+/**
+ * batadv_iv_ogm_slide_own_bcast_window() - bitshift own OGM broadcast windows
+ * for the given interface
+ * @hard_iface: the interface for which the windows have to be shifted
+ */
+static void
+batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ struct hlist_head *head;
+ struct batadv_orig_node *orig_node;
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ unsigned long *word;
+ u32 i;
+ u8 *w;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+ hlist_for_each_entry_rcu(orig_ifinfo,
+ &orig_node->ifinfo_list,
+ list) {
+ if (orig_ifinfo->if_outgoing != hard_iface)
+ continue;
+
+ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ word = orig_ifinfo->bat_iv.bcast_own;
+ batadv_bit_get_packet(bat_priv, word, 1, 0);
+ w = &orig_ifinfo->bat_iv.bcast_own_sum;
+ *w = bitmap_weight(word,
+ BATADV_TQ_LOCAL_WINDOW_SIZE);
+ spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ }
+ }
+ rcu_read_unlock();
+ }
+}
+
+/**
+ * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer
+ * @hard_iface: interface whose ogm buffer should be transmitted
+ */
+static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
+ struct batadv_ogm_packet *batadv_ogm_packet;
+ struct batadv_hard_iface *primary_if, *tmp_hard_iface;
+ int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len;
+ u32 seqno;
+ u16 tvlv_len = 0;
+ unsigned long send_time;
+
+ lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
+
+ /* interface already disabled by batadv_iv_ogm_iface_disable */
+ if (!*ogm_buff)
+ return;
+
+ /* the interface gets activated here to avoid race conditions between
+ * the moment of activating the interface in
+ * hardif_activate_interface() where the originator mac is set and
+ * outdated packets (especially uninitialized mac addresses) in the
+ * packet queue
+ */
+ if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
+ hard_iface->if_status = BATADV_IF_ACTIVE;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+
+ if (hard_iface == primary_if) {
+ /* tt changes have to be committed before the tvlv data is
+ * appended as it may alter the tt tvlv container
+ */
+ batadv_tt_local_commit_changes(bat_priv);
+ tvlv_len = batadv_tvlv_container_ogm_append(bat_priv, ogm_buff,
+ ogm_buff_len,
+ BATADV_OGM_HLEN);
+ }
+
+ batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
+ batadv_ogm_packet->tvlv_len = htons(tvlv_len);
+
+ /* change sequence number to network order */
+ seqno = (u32)atomic_read(&hard_iface->bat_iv.ogm_seqno);
+ batadv_ogm_packet->seqno = htonl(seqno);
+ atomic_inc(&hard_iface->bat_iv.ogm_seqno);
+
+ batadv_iv_ogm_slide_own_bcast_window(hard_iface);
+
+ send_time = batadv_iv_ogm_emit_send_time(bat_priv);
+
+ if (hard_iface != primary_if) {
+ /* OGMs from secondary interfaces are only scheduled on their
+ * respective interfaces.
+ */
+ batadv_iv_ogm_queue_add(bat_priv, *ogm_buff, *ogm_buff_len,
+ hard_iface, hard_iface, 1, send_time);
+ goto out;
+ }
+
+ /* OGMs from primary interfaces are scheduled on all
+ * interfaces.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp_hard_iface, &batadv_hardif_list, list) {
+ if (tmp_hard_iface->soft_iface != hard_iface->soft_iface)
+ continue;
+
+ if (!kref_get_unless_zero(&tmp_hard_iface->refcount))
+ continue;
+
+ batadv_iv_ogm_queue_add(bat_priv, *ogm_buff,
+ *ogm_buff_len, hard_iface,
+ tmp_hard_iface, 1, send_time);
+
+ batadv_hardif_put(tmp_hard_iface);
+ }
+ rcu_read_unlock();
+
+out:
+ batadv_hardif_put(primary_if);
+}
+
+static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+{
+ if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
+ hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
+ return;
+
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+ batadv_iv_ogm_schedule_buff(hard_iface);
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+}
+
+/**
+ * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over interface
+ * @orig_node: originator which reproadcasted the OGMs directly
+ * @if_outgoing: interface which transmitted the original OGM and received the
+ * direct rebroadcast
+ *
+ * Return: Number of replied (rebroadcasted) OGMs which were transmitted by
+ * an originator and directly (without intermediate hop) received by a specific
+ * interface
+ */
+static u8 batadv_iv_orig_ifinfo_sum(struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ u8 sum;
+
+ orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
+ if (!orig_ifinfo)
+ return 0;
+
+ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ sum = orig_ifinfo->bat_iv.bcast_own_sum;
+ spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+ batadv_orig_ifinfo_put(orig_ifinfo);
+
+ return sum;
+}
+
+/**
+ * batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an
+ * originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the orig node who originally emitted the ogm packet
+ * @orig_ifinfo: ifinfo for the outgoing interface of the orig_node
+ * @ethhdr: Ethernet header of the OGM
+ * @batadv_ogm_packet: the ogm packet
+ * @if_incoming: interface where the packet was received
+ * @if_outgoing: interface for which the retransmission should be considered
+ * @dup_status: the duplicate status of this ogm packet.
+ */
+static void
+batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_orig_ifinfo *orig_ifinfo,
+ const struct ethhdr *ethhdr,
+ const struct batadv_ogm_packet *batadv_ogm_packet,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing,
+ enum batadv_dup_status dup_status)
+{
+ struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_neigh_node *tmp_neigh_node = NULL;
+ struct batadv_neigh_node *router = NULL;
+ u8 sum_orig, sum_neigh;
+ u8 *neigh_addr;
+ u8 tq_avg;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "%s(): Searching and updating originator entry of received packet\n",
+ __func__);
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp_neigh_node,
+ &orig_node->neigh_list, list) {
+ neigh_addr = tmp_neigh_node->addr;
+ if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
+ tmp_neigh_node->if_incoming == if_incoming &&
+ kref_get_unless_zero(&tmp_neigh_node->refcount)) {
+ if (WARN(neigh_node, "too many matching neigh_nodes"))
+ batadv_neigh_node_put(neigh_node);
+ neigh_node = tmp_neigh_node;
+ continue;
+ }
+
+ if (dup_status != BATADV_NO_DUP)
+ continue;
+
+ /* only update the entry for this outgoing interface */
+ neigh_ifinfo = batadv_neigh_ifinfo_get(tmp_neigh_node,
+ if_outgoing);
+ if (!neigh_ifinfo)
+ continue;
+
+ spin_lock_bh(&tmp_neigh_node->ifinfo_lock);
+ batadv_ring_buffer_set(neigh_ifinfo->bat_iv.tq_recv,
+ &neigh_ifinfo->bat_iv.tq_index, 0);
+ tq_avg = batadv_ring_buffer_avg(neigh_ifinfo->bat_iv.tq_recv);
+ neigh_ifinfo->bat_iv.tq_avg = tq_avg;
+ spin_unlock_bh(&tmp_neigh_node->ifinfo_lock);
+
+ batadv_neigh_ifinfo_put(neigh_ifinfo);
+ neigh_ifinfo = NULL;
+ }
+
+ if (!neigh_node) {
+ struct batadv_orig_node *orig_tmp;
+
+ orig_tmp = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source);
+ if (!orig_tmp)
+ goto unlock;
+
+ neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
+ ethhdr->h_source,
+ orig_node, orig_tmp);
+
+ batadv_orig_node_put(orig_tmp);
+ if (!neigh_node)
+ goto unlock;
+ } else {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Updating existing last-hop neighbor of originator\n");
+ }
+
+ rcu_read_unlock();
+ neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
+ if (!neigh_ifinfo)
+ goto out;
+
+ neigh_node->last_seen = jiffies;
+
+ spin_lock_bh(&neigh_node->ifinfo_lock);
+ batadv_ring_buffer_set(neigh_ifinfo->bat_iv.tq_recv,
+ &neigh_ifinfo->bat_iv.tq_index,
+ batadv_ogm_packet->tq);
+ tq_avg = batadv_ring_buffer_avg(neigh_ifinfo->bat_iv.tq_recv);
+ neigh_ifinfo->bat_iv.tq_avg = tq_avg;
+ spin_unlock_bh(&neigh_node->ifinfo_lock);
+
+ if (dup_status == BATADV_NO_DUP) {
+ orig_ifinfo->last_ttl = batadv_ogm_packet->ttl;
+ neigh_ifinfo->last_ttl = batadv_ogm_packet->ttl;
+ }
+
+ /* if this neighbor already is our next hop there is nothing
+ * to change
+ */
+ router = batadv_orig_router_get(orig_node, if_outgoing);
+ if (router == neigh_node)
+ goto out;
+
+ if (router) {
+ router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing);
+ if (!router_ifinfo)
+ goto out;
+
+ /* if this neighbor does not offer a better TQ we won't
+ * consider it
+ */
+ if (router_ifinfo->bat_iv.tq_avg > neigh_ifinfo->bat_iv.tq_avg)
+ goto out;
+ }
+
+ /* if the TQ is the same and the link not more symmetric we
+ * won't consider it either
+ */
+ if (router_ifinfo &&
+ neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg) {
+ sum_orig = batadv_iv_orig_ifinfo_sum(router->orig_node,
+ router->if_incoming);
+ sum_neigh = batadv_iv_orig_ifinfo_sum(neigh_node->orig_node,
+ neigh_node->if_incoming);
+ if (sum_orig >= sum_neigh)
+ goto out;
+ }
+
+ batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node);
+ goto out;
+
+unlock:
+ rcu_read_unlock();
+out:
+ batadv_neigh_node_put(neigh_node);
+ batadv_neigh_node_put(router);
+ batadv_neigh_ifinfo_put(neigh_ifinfo);
+ batadv_neigh_ifinfo_put(router_ifinfo);
+}
+
+/**
+ * batadv_iv_ogm_calc_tq() - calculate tq for current received ogm packet
+ * @orig_node: the orig node who originally emitted the ogm packet
+ * @orig_neigh_node: the orig node struct of the neighbor who sent the packet
+ * @batadv_ogm_packet: the ogm packet
+ * @if_incoming: interface where the packet was received
+ * @if_outgoing: interface for which the retransmission should be considered
+ *
+ * Return: true if the link can be considered bidirectional, false otherwise
+ */
+static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh_node,
+ struct batadv_ogm_packet *batadv_ogm_packet,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
+ struct batadv_neigh_ifinfo *neigh_ifinfo;
+ u8 total_count;
+ u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
+ unsigned int tq_iface_hop_penalty = BATADV_TQ_MAX_VALUE;
+ unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
+ unsigned int tq_asym_penalty, inv_asym_penalty;
+ unsigned int combined_tq;
+ bool ret = false;
+
+ /* find corresponding one hop neighbor */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp_neigh_node,
+ &orig_neigh_node->neigh_list, list) {
+ if (!batadv_compare_eth(tmp_neigh_node->addr,
+ orig_neigh_node->orig))
+ continue;
+
+ if (tmp_neigh_node->if_incoming != if_incoming)
+ continue;
+
+ if (!kref_get_unless_zero(&tmp_neigh_node->refcount))
+ continue;
+
+ neigh_node = tmp_neigh_node;
+ break;
+ }
+ rcu_read_unlock();
+
+ if (!neigh_node)
+ neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
+ orig_neigh_node->orig,
+ orig_neigh_node,
+ orig_neigh_node);
+
+ if (!neigh_node)
+ goto out;
+
+ /* if orig_node is direct neighbor update neigh_node last_seen */
+ if (orig_node == orig_neigh_node)
+ neigh_node->last_seen = jiffies;
+
+ orig_node->last_seen = jiffies;
+
+ /* find packet count of corresponding one hop neighbor */
+ orig_eq_count = batadv_iv_orig_ifinfo_sum(orig_neigh_node, if_incoming);
+ neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
+ if (neigh_ifinfo) {
+ neigh_rq_count = neigh_ifinfo->bat_iv.real_packet_count;
+ batadv_neigh_ifinfo_put(neigh_ifinfo);
+ } else {
+ neigh_rq_count = 0;
+ }
+
+ /* pay attention to not get a value bigger than 100 % */
+ if (orig_eq_count > neigh_rq_count)
+ total_count = neigh_rq_count;
+ else
+ total_count = orig_eq_count;
+
+ /* if we have too few packets (too less data) we set tq_own to zero
+ * if we receive too few packets it is not considered bidirectional
+ */
+ if (total_count < BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM ||
+ neigh_rq_count < BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM)
+ tq_own = 0;
+ else
+ /* neigh_node->real_packet_count is never zero as we
+ * only purge old information when getting new
+ * information
+ */
+ tq_own = (BATADV_TQ_MAX_VALUE * total_count) / neigh_rq_count;
+
+ /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
+ * affect the nearly-symmetric links only a little, but
+ * punishes asymmetric links more. This will give a value
+ * between 0 and TQ_MAX_VALUE
+ */
+ neigh_rq_inv = BATADV_TQ_LOCAL_WINDOW_SIZE - neigh_rq_count;
+ neigh_rq_inv_cube = neigh_rq_inv * neigh_rq_inv * neigh_rq_inv;
+ neigh_rq_max_cube = BATADV_TQ_LOCAL_WINDOW_SIZE *
+ BATADV_TQ_LOCAL_WINDOW_SIZE *
+ BATADV_TQ_LOCAL_WINDOW_SIZE;
+ inv_asym_penalty = BATADV_TQ_MAX_VALUE * neigh_rq_inv_cube;
+ inv_asym_penalty /= neigh_rq_max_cube;
+ tq_asym_penalty = BATADV_TQ_MAX_VALUE - inv_asym_penalty;
+ tq_iface_hop_penalty -= atomic_read(&if_incoming->hop_penalty);
+
+ /* penalize if the OGM is forwarded on the same interface. WiFi
+ * interfaces and other half duplex devices suffer from throughput
+ * drops as they can't send and receive at the same time.
+ */
+ if (if_outgoing && if_incoming == if_outgoing &&
+ batadv_is_wifi_hardif(if_outgoing))
+ tq_iface_hop_penalty = batadv_hop_penalty(tq_iface_hop_penalty,
+ bat_priv);
+
+ combined_tq = batadv_ogm_packet->tq *
+ tq_own *
+ tq_asym_penalty *
+ tq_iface_hop_penalty;
+ combined_tq /= BATADV_TQ_MAX_VALUE *
+ BATADV_TQ_MAX_VALUE *
+ BATADV_TQ_MAX_VALUE;
+ batadv_ogm_packet->tq = combined_tq;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_hop_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n",
+ orig_node->orig, orig_neigh_node->orig, total_count,
+ neigh_rq_count, tq_own, tq_asym_penalty,
+ tq_iface_hop_penalty, batadv_ogm_packet->tq,
+ if_incoming->net_dev->name,
+ if_outgoing ? if_outgoing->net_dev->name : "DEFAULT");
+
+ /* if link has the minimum required transmission quality
+ * consider it bidirectional
+ */
+ if (batadv_ogm_packet->tq >= BATADV_TQ_TOTAL_BIDRECT_LIMIT)
+ ret = true;
+
+out:
+ batadv_neigh_node_put(neigh_node);
+ return ret;
+}
+
+/**
+ * batadv_iv_ogm_update_seqnos() - process a batman packet for all interfaces,
+ * adjust the sequence number and find out whether it is a duplicate
+ * @ethhdr: ethernet header of the packet
+ * @batadv_ogm_packet: OGM packet to be considered
+ * @if_incoming: interface on which the OGM packet was received
+ * @if_outgoing: interface for which the retransmission should be considered
+ *
+ * Return: duplicate status as enum batadv_dup_status
+ */
+static enum batadv_dup_status
+batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
+ const struct batadv_ogm_packet *batadv_ogm_packet,
+ const struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_orig_node *orig_node;
+ struct batadv_orig_ifinfo *orig_ifinfo = NULL;
+ struct batadv_neigh_node *neigh_node;
+ struct batadv_neigh_ifinfo *neigh_ifinfo;
+ bool is_dup;
+ s32 seq_diff;
+ bool need_update = false;
+ int set_mark;
+ enum batadv_dup_status ret = BATADV_NO_DUP;
+ u32 seqno = ntohl(batadv_ogm_packet->seqno);
+ u8 *neigh_addr;
+ u8 packet_count;
+ unsigned long *bitmap;
+
+ orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig);
+ if (!orig_node)
+ return BATADV_NO_DUP;
+
+ orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
+ if (WARN_ON(!orig_ifinfo)) {
+ batadv_orig_node_put(orig_node);
+ return 0;
+ }
+
+ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ seq_diff = seqno - orig_ifinfo->last_real_seqno;
+
+ /* signalize caller that the packet is to be dropped. */
+ if (!hlist_empty(&orig_node->neigh_list) &&
+ batadv_window_protected(bat_priv, seq_diff,
+ BATADV_TQ_LOCAL_WINDOW_SIZE,
+ &orig_ifinfo->batman_seqno_reset, NULL)) {
+ ret = BATADV_PROTECTED;
+ goto out;
+ }
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) {
+ neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node,
+ if_outgoing);
+ if (!neigh_ifinfo)
+ continue;
+
+ neigh_addr = neigh_node->addr;
+ is_dup = batadv_test_bit(neigh_ifinfo->bat_iv.real_bits,
+ orig_ifinfo->last_real_seqno,
+ seqno);
+
+ if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
+ neigh_node->if_incoming == if_incoming) {
+ set_mark = 1;
+ if (is_dup)
+ ret = BATADV_NEIGH_DUP;
+ } else {
+ set_mark = 0;
+ if (is_dup && ret != BATADV_NEIGH_DUP)
+ ret = BATADV_ORIG_DUP;
+ }
+
+ /* if the window moved, set the update flag. */
+ bitmap = neigh_ifinfo->bat_iv.real_bits;
+ need_update |= batadv_bit_get_packet(bat_priv, bitmap,
+ seq_diff, set_mark);
+
+ packet_count = bitmap_weight(bitmap,
+ BATADV_TQ_LOCAL_WINDOW_SIZE);
+ neigh_ifinfo->bat_iv.real_packet_count = packet_count;
+ batadv_neigh_ifinfo_put(neigh_ifinfo);
+ }
+ rcu_read_unlock();
+
+ if (need_update) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "%s updating last_seqno: old %u, new %u\n",
+ if_outgoing ? if_outgoing->net_dev->name : "DEFAULT",
+ orig_ifinfo->last_real_seqno, seqno);
+ orig_ifinfo->last_real_seqno = seqno;
+ }
+
+out:
+ spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ batadv_orig_node_put(orig_node);
+ batadv_orig_ifinfo_put(orig_ifinfo);
+ return ret;
+}
+
+/**
+ * batadv_iv_ogm_process_per_outif() - process a batman iv OGM for an outgoing
+ * interface
+ * @skb: the skb containing the OGM
+ * @ogm_offset: offset from skb->data to start of ogm header
+ * @orig_node: the (cached) orig node for the originator of this OGM
+ * @if_incoming: the interface where this packet was received
+ * @if_outgoing: the interface for which the packet should be considered
+ */
+static void
+batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
+ struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_hardif_neigh_node *hardif_neigh = NULL;
+ struct batadv_neigh_node *router = NULL;
+ struct batadv_neigh_node *router_router = NULL;
+ struct batadv_orig_node *orig_neigh_node;
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ struct batadv_neigh_node *orig_neigh_router = NULL;
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL;
+ struct batadv_ogm_packet *ogm_packet;
+ enum batadv_dup_status dup_status;
+ bool is_from_best_next_hop = false;
+ bool is_single_hop_neigh = false;
+ bool sameseq, similar_ttl;
+ struct sk_buff *skb_priv;
+ struct ethhdr *ethhdr;
+ u8 *prev_sender;
+ bool is_bidirect;
+
+ /* create a private copy of the skb, as some functions change tq value
+ * and/or flags.
+ */
+ skb_priv = skb_copy(skb, GFP_ATOMIC);
+ if (!skb_priv)
+ return;
+
+ ethhdr = eth_hdr(skb_priv);
+ ogm_packet = (struct batadv_ogm_packet *)(skb_priv->data + ogm_offset);
+
+ dup_status = batadv_iv_ogm_update_seqnos(ethhdr, ogm_packet,
+ if_incoming, if_outgoing);
+ if (batadv_compare_eth(ethhdr->h_source, ogm_packet->orig))
+ is_single_hop_neigh = true;
+
+ if (dup_status == BATADV_PROTECTED) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: packet within seqno protection time (sender: %pM)\n",
+ ethhdr->h_source);
+ goto out;
+ }
+
+ if (ogm_packet->tq == 0) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet with tq equal 0\n");
+ goto out;
+ }
+
+ if (is_single_hop_neigh) {
+ hardif_neigh = batadv_hardif_neigh_get(if_incoming,
+ ethhdr->h_source);
+ if (hardif_neigh)
+ hardif_neigh->last_seen = jiffies;
+ }
+
+ router = batadv_orig_router_get(orig_node, if_outgoing);
+ if (router) {
+ router_router = batadv_orig_router_get(router->orig_node,
+ if_outgoing);
+ router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing);
+ }
+
+ if ((router_ifinfo && router_ifinfo->bat_iv.tq_avg != 0) &&
+ (batadv_compare_eth(router->addr, ethhdr->h_source)))
+ is_from_best_next_hop = true;
+
+ prev_sender = ogm_packet->prev_sender;
+ /* avoid temporary routing loops */
+ if (router && router_router &&
+ (batadv_compare_eth(router->addr, prev_sender)) &&
+ !(batadv_compare_eth(ogm_packet->orig, prev_sender)) &&
+ (batadv_compare_eth(router->addr, router_router->addr))) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
+ ethhdr->h_source);
+ goto out;
+ }
+
+ if (if_outgoing == BATADV_IF_DEFAULT)
+ batadv_tvlv_ogm_receive(bat_priv, ogm_packet, orig_node);
+
+ /* if sender is a direct neighbor the sender mac equals
+ * originator mac
+ */
+ if (is_single_hop_neigh)
+ orig_neigh_node = orig_node;
+ else
+ orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
+ ethhdr->h_source);
+
+ if (!orig_neigh_node)
+ goto out;
+
+ /* Update nc_nodes of the originator */
+ batadv_nc_update_nc_node(bat_priv, orig_node, orig_neigh_node,
+ ogm_packet, is_single_hop_neigh);
+
+ orig_neigh_router = batadv_orig_router_get(orig_neigh_node,
+ if_outgoing);
+
+ /* drop packet if sender is not a direct neighbor and if we
+ * don't route towards it
+ */
+ if (!is_single_hop_neigh && !orig_neigh_router) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: OGM via unknown neighbor!\n");
+ goto out_neigh;
+ }
+
+ is_bidirect = batadv_iv_ogm_calc_tq(orig_node, orig_neigh_node,
+ ogm_packet, if_incoming,
+ if_outgoing);
+
+ /* update ranking if it is not a duplicate or has the same
+ * seqno and similar ttl as the non-duplicate
+ */
+ orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
+ if (!orig_ifinfo)
+ goto out_neigh;
+
+ sameseq = orig_ifinfo->last_real_seqno == ntohl(ogm_packet->seqno);
+ similar_ttl = (orig_ifinfo->last_ttl - 3) <= ogm_packet->ttl;
+
+ if (is_bidirect && (dup_status == BATADV_NO_DUP ||
+ (sameseq && similar_ttl))) {
+ batadv_iv_ogm_orig_update(bat_priv, orig_node,
+ orig_ifinfo, ethhdr,
+ ogm_packet, if_incoming,
+ if_outgoing, dup_status);
+ }
+ batadv_orig_ifinfo_put(orig_ifinfo);
+
+ /* only forward for specific interface, not for the default one. */
+ if (if_outgoing == BATADV_IF_DEFAULT)
+ goto out_neigh;
+
+ /* is single hop (direct) neighbor */
+ if (is_single_hop_neigh) {
+ /* OGMs from secondary interfaces should only scheduled once
+ * per interface where it has been received, not multiple times
+ */
+ if (ogm_packet->ttl <= 2 &&
+ if_incoming != if_outgoing) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: OGM from secondary interface and wrong outgoing interface\n");
+ goto out_neigh;
+ }
+ /* mark direct link on incoming interface */
+ batadv_iv_ogm_forward(orig_node, ethhdr, ogm_packet,
+ is_single_hop_neigh,
+ is_from_best_next_hop, if_incoming,
+ if_outgoing);
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
+ goto out_neigh;
+ }
+
+ /* multihop originator */
+ if (!is_bidirect) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: not received via bidirectional link\n");
+ goto out_neigh;
+ }
+
+ if (dup_status == BATADV_NEIGH_DUP) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: duplicate packet received\n");
+ goto out_neigh;
+ }
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Forwarding packet: rebroadcast originator packet\n");
+ batadv_iv_ogm_forward(orig_node, ethhdr, ogm_packet,
+ is_single_hop_neigh, is_from_best_next_hop,
+ if_incoming, if_outgoing);
+
+out_neigh:
+ if (orig_neigh_node && !is_single_hop_neigh)
+ batadv_orig_node_put(orig_neigh_node);
+out:
+ batadv_neigh_ifinfo_put(router_ifinfo);
+ batadv_neigh_node_put(router);
+ batadv_neigh_node_put(router_router);
+ batadv_neigh_node_put(orig_neigh_router);
+ batadv_hardif_neigh_put(hardif_neigh);
+
+ consume_skb(skb_priv);
+}
+
+/**
+ * batadv_iv_ogm_process_reply() - Check OGM for direct reply and process it
+ * @ogm_packet: rebroadcast OGM packet to process
+ * @if_incoming: the interface where this packet was received
+ * @orig_node: originator which reproadcasted the OGMs
+ * @if_incoming_seqno: OGM sequence number when rebroadcast was received
+ */
+static void batadv_iv_ogm_process_reply(struct batadv_ogm_packet *ogm_packet,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_orig_node *orig_node,
+ u32 if_incoming_seqno)
+{
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ s32 bit_pos;
+ u8 *weight;
+
+ /* neighbor has to indicate direct link and it has to
+ * come via the corresponding interface
+ */
+ if (!(ogm_packet->flags & BATADV_DIRECTLINK))
+ return;
+
+ if (!batadv_compare_eth(if_incoming->net_dev->dev_addr,
+ ogm_packet->orig))
+ return;
+
+ orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_incoming);
+ if (!orig_ifinfo)
+ return;
+
+ /* save packet seqno for bidirectional check */
+ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ bit_pos = if_incoming_seqno - 2;
+ bit_pos -= ntohl(ogm_packet->seqno);
+ batadv_set_bit(orig_ifinfo->bat_iv.bcast_own, bit_pos);
+ weight = &orig_ifinfo->bat_iv.bcast_own_sum;
+ *weight = bitmap_weight(orig_ifinfo->bat_iv.bcast_own,
+ BATADV_TQ_LOCAL_WINDOW_SIZE);
+ spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+ batadv_orig_ifinfo_put(orig_ifinfo);
+}
+
+/**
+ * batadv_iv_ogm_process() - process an incoming batman iv OGM
+ * @skb: the skb containing the OGM
+ * @ogm_offset: offset to the OGM which should be processed (for aggregates)
+ * @if_incoming: the interface where this packet was received
+ */
+static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
+ struct batadv_hard_iface *if_incoming)
+{
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_orig_node *orig_neigh_node, *orig_node;
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_ogm_packet *ogm_packet;
+ u32 if_incoming_seqno;
+ bool has_directlink_flag;
+ struct ethhdr *ethhdr;
+ bool is_my_oldorig = false;
+ bool is_my_addr = false;
+ bool is_my_orig = false;
+
+ ogm_packet = (struct batadv_ogm_packet *)(skb->data + ogm_offset);
+ ethhdr = eth_hdr(skb);
+
+ /* Silently drop when the batman packet is actually not a
+ * correct packet.
+ *
+ * This might happen if a packet is padded (e.g. Ethernet has a
+ * minimum frame length of 64 byte) and the aggregation interprets
+ * it as an additional length.
+ *
+ * TODO: A more sane solution would be to have a bit in the
+ * batadv_ogm_packet to detect whether the packet is the last
+ * packet in an aggregation. Here we expect that the padding
+ * is always zero (or not 0x01)
+ */
+ if (ogm_packet->packet_type != BATADV_IV_OGM)
+ return;
+
+ /* could be changed by schedule_own_packet() */
+ if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
+
+ if (ogm_packet->flags & BATADV_DIRECTLINK)
+ has_directlink_flag = true;
+ else
+ has_directlink_flag = false;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, tq %d, TTL %d, V %d, IDF %d)\n",
+ ethhdr->h_source, if_incoming->net_dev->name,
+ if_incoming->net_dev->dev_addr, ogm_packet->orig,
+ ogm_packet->prev_sender, ntohl(ogm_packet->seqno),
+ ogm_packet->tq, ogm_packet->ttl,
+ ogm_packet->version, has_directlink_flag);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
+ continue;
+
+ if (hard_iface->soft_iface != if_incoming->soft_iface)
+ continue;
+
+ if (batadv_compare_eth(ethhdr->h_source,
+ hard_iface->net_dev->dev_addr))
+ is_my_addr = true;
+
+ if (batadv_compare_eth(ogm_packet->orig,
+ hard_iface->net_dev->dev_addr))
+ is_my_orig = true;
+
+ if (batadv_compare_eth(ogm_packet->prev_sender,
+ hard_iface->net_dev->dev_addr))
+ is_my_oldorig = true;
+ }
+ rcu_read_unlock();
+
+ if (is_my_addr) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: received my own broadcast (sender: %pM)\n",
+ ethhdr->h_source);
+ return;
+ }
+
+ if (is_my_orig) {
+ orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
+ ethhdr->h_source);
+ if (!orig_neigh_node)
+ return;
+
+ batadv_iv_ogm_process_reply(ogm_packet, if_incoming,
+ orig_neigh_node, if_incoming_seqno);
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet from myself (via neighbor)\n");
+ batadv_orig_node_put(orig_neigh_node);
+ return;
+ }
+
+ if (is_my_oldorig) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
+ ethhdr->h_source);
+ return;
+ }
+
+ if (ogm_packet->flags & BATADV_NOT_BEST_NEXT_HOP) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n",
+ ethhdr->h_source);
+ return;
+ }
+
+ orig_node = batadv_iv_ogm_orig_get(bat_priv, ogm_packet->orig);
+ if (!orig_node)
+ return;
+
+ batadv_iv_ogm_process_per_outif(skb, ogm_offset, orig_node,
+ if_incoming, BATADV_IF_DEFAULT);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
+ continue;
+
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
+ if (!kref_get_unless_zero(&hard_iface->refcount))
+ continue;
+
+ batadv_iv_ogm_process_per_outif(skb, ogm_offset, orig_node,
+ if_incoming, hard_iface);
+
+ batadv_hardif_put(hard_iface);
+ }
+ rcu_read_unlock();
+
+ batadv_orig_node_put(orig_node);
+}
+
+static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct batadv_forw_packet *forw_packet;
+ struct batadv_priv *bat_priv;
+ bool dropped = false;
+
+ delayed_work = to_delayed_work(work);
+ forw_packet = container_of(delayed_work, struct batadv_forw_packet,
+ delayed_work);
+ bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
+
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
+ dropped = true;
+ goto out;
+ }
+
+ batadv_iv_ogm_emit(forw_packet);
+
+ /* we have to have at least one packet in the queue to determine the
+ * queues wake up time unless we are shutting down.
+ *
+ * only re-schedule if this is the "original" copy, e.g. the OGM of the
+ * primary interface should only be rescheduled once per period, but
+ * this function will be called for the forw_packet instances of the
+ * other secondary interfaces as well.
+ */
+ if (forw_packet->own &&
+ forw_packet->if_incoming == forw_packet->if_outgoing)
+ batadv_iv_ogm_schedule(forw_packet->if_incoming);
+
+out:
+ /* do we get something for free()? */
+ if (batadv_forw_packet_steal(forw_packet,
+ &bat_priv->forw_bat_list_lock))
+ batadv_forw_packet_free(forw_packet, dropped);
+}
+
+static int batadv_iv_ogm_receive(struct sk_buff *skb,
+ struct batadv_hard_iface *if_incoming)
+{
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_ogm_packet *ogm_packet;
+ u8 *packet_pos;
+ int ogm_offset;
+ bool res;
+ int ret = NET_RX_DROP;
+
+ res = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
+ if (!res)
+ goto free_skb;
+
+ /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
+ * that does not have B.A.T.M.A.N. IV enabled ?
+ */
+ if (bat_priv->algo_ops->iface.enable != batadv_iv_ogm_iface_enable)
+ goto free_skb;
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
+ skb->len + ETH_HLEN);
+
+ ogm_offset = 0;
+ ogm_packet = (struct batadv_ogm_packet *)skb->data;
+
+ /* unpack the aggregated packets and process them one by one */
+ while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
+ ogm_packet)) {
+ batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
+
+ ogm_offset += BATADV_OGM_HLEN;
+ ogm_offset += ntohs(ogm_packet->tvlv_len);
+
+ packet_pos = skb->data + ogm_offset;
+ ogm_packet = (struct batadv_ogm_packet *)packet_pos;
+ }
+
+ ret = NET_RX_SUCCESS;
+
+free_skb:
+ if (ret == NET_RX_SUCCESS)
+ consume_skb(skb);
+ else
+ kfree_skb(skb);
+
+ return ret;
+}
+
+/**
+ * batadv_iv_ogm_neigh_get_tq_avg() - Get the TQ average for a neighbour on a
+ * given outgoing interface.
+ * @neigh_node: Neighbour of interest
+ * @if_outgoing: Outgoing interface of interest
+ * @tq_avg: Pointer of where to store the TQ average
+ *
+ * Return: False if no average TQ available, otherwise true.
+ */
+static bool
+batadv_iv_ogm_neigh_get_tq_avg(struct batadv_neigh_node *neigh_node,
+ struct batadv_hard_iface *if_outgoing,
+ u8 *tq_avg)
+{
+ struct batadv_neigh_ifinfo *n_ifinfo;
+
+ n_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
+ if (!n_ifinfo)
+ return false;
+
+ *tq_avg = n_ifinfo->bat_iv.tq_avg;
+ batadv_neigh_ifinfo_put(n_ifinfo);
+
+ return true;
+}
+
+/**
+ * batadv_iv_ogm_orig_dump_subentry() - Dump an originator subentry into a
+ * message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ * @orig_node: Originator to dump
+ * @neigh_node: Single hops neighbour
+ * @best: Is the best originator
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_iv_ogm_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node,
+ bool best)
+{
+ void *hdr;
+ u8 tq_avg;
+ unsigned int last_seen_msecs;
+
+ last_seen_msecs = jiffies_to_msecs(jiffies - orig_node->last_seen);
+
+ if (!batadv_iv_ogm_neigh_get_tq_avg(neigh_node, if_outgoing, &tq_avg))
+ return 0;
+
+ if (if_outgoing != BATADV_IF_DEFAULT &&
+ if_outgoing != neigh_node->if_incoming)
+ return 0;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
+ NLM_F_MULTI, BATADV_CMD_GET_ORIGINATORS);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
+ orig_node->orig) ||
+ nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
+ neigh_node->addr) ||
+ nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ neigh_node->if_incoming->net_dev->name) ||
+ nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ neigh_node->if_incoming->net_dev->ifindex) ||
+ nla_put_u8(msg, BATADV_ATTR_TQ, tq_avg) ||
+ nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
+ last_seen_msecs))
+ goto nla_put_failure;
+
+ if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_iv_ogm_orig_dump_entry() - Dump an originator entry into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ * @orig_node: Originator to dump
+ * @sub_s: Number of sub entries to skip
+ *
+ * This function assumes the caller holds rcu_read_lock().
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_iv_ogm_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing,
+ struct batadv_orig_node *orig_node, int *sub_s)
+{
+ struct batadv_neigh_node *neigh_node_best;
+ struct batadv_neigh_node *neigh_node;
+ int sub = 0;
+ bool best;
+ u8 tq_avg_best;
+
+ neigh_node_best = batadv_orig_router_get(orig_node, if_outgoing);
+ if (!neigh_node_best)
+ goto out;
+
+ if (!batadv_iv_ogm_neigh_get_tq_avg(neigh_node_best, if_outgoing,
+ &tq_avg_best))
+ goto out;
+
+ if (tq_avg_best == 0)
+ goto out;
+
+ hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) {
+ if (sub++ < *sub_s)
+ continue;
+
+ best = (neigh_node == neigh_node_best);
+
+ if (batadv_iv_ogm_orig_dump_subentry(msg, portid, seq,
+ bat_priv, if_outgoing,
+ orig_node, neigh_node,
+ best)) {
+ batadv_neigh_node_put(neigh_node_best);
+
+ *sub_s = sub - 1;
+ return -EMSGSIZE;
+ }
+ }
+
+ out:
+ batadv_neigh_node_put(neigh_node_best);
+
+ *sub_s = 0;
+ return 0;
+}
+
+/**
+ * batadv_iv_ogm_orig_dump_bucket() - Dump an originator bucket into a
+ * message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ * @head: Bucket to be dumped
+ * @idx_s: Number of entries to be skipped
+ * @sub: Number of sub entries to be skipped
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_iv_ogm_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing,
+ struct hlist_head *head, int *idx_s, int *sub)
+{
+ struct batadv_orig_node *orig_node;
+ int idx = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+ if (idx++ < *idx_s)
+ continue;
+
+ if (batadv_iv_ogm_orig_dump_entry(msg, portid, seq, bat_priv,
+ if_outgoing, orig_node,
+ sub)) {
+ rcu_read_unlock();
+ *idx_s = idx - 1;
+ return -EMSGSIZE;
+ }
+ }
+ rcu_read_unlock();
+
+ *idx_s = 0;
+ *sub = 0;
+ return 0;
+}
+
+/**
+ * batadv_iv_ogm_orig_dump() - Dump the originators into a message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ */
+static void
+batadv_iv_ogm_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ struct hlist_head *head;
+ int bucket = cb->args[0];
+ int idx = cb->args[1];
+ int sub = cb->args[2];
+ int portid = NETLINK_CB(cb->skb).portid;
+
+ while (bucket < hash->size) {
+ head = &hash->table[bucket];
+
+ if (batadv_iv_ogm_orig_dump_bucket(msg, portid,
+ cb->nlh->nlmsg_seq,
+ bat_priv, if_outgoing, head,
+ &idx, &sub))
+ break;
+
+ bucket++;
+ }
+
+ cb->args[0] = bucket;
+ cb->args[1] = idx;
+ cb->args[2] = sub;
+}
+
+/**
+ * batadv_iv_ogm_neigh_diff() - calculate tq difference of two neighbors
+ * @neigh1: the first neighbor object of the comparison
+ * @if_outgoing1: outgoing interface for the first neighbor
+ * @neigh2: the second neighbor object of the comparison
+ * @if_outgoing2: outgoing interface for the second neighbor
+ * @diff: pointer to integer receiving the calculated difference
+ *
+ * The content of *@diff is only valid when this function returns true.
+ * It is less, equal to or greater than 0 if the metric via neigh1 is lower,
+ * the same as or higher than the metric via neigh2
+ *
+ * Return: true when the difference could be calculated, false otherwise
+ */
+static bool batadv_iv_ogm_neigh_diff(struct batadv_neigh_node *neigh1,
+ struct batadv_hard_iface *if_outgoing1,
+ struct batadv_neigh_node *neigh2,
+ struct batadv_hard_iface *if_outgoing2,
+ int *diff)
+{
+ struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo;
+ u8 tq1, tq2;
+ bool ret = true;
+
+ neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
+ neigh2_ifinfo = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
+
+ if (!neigh1_ifinfo || !neigh2_ifinfo) {
+ ret = false;
+ goto out;
+ }
+
+ tq1 = neigh1_ifinfo->bat_iv.tq_avg;
+ tq2 = neigh2_ifinfo->bat_iv.tq_avg;
+ *diff = (int)tq1 - (int)tq2;
+
+out:
+ batadv_neigh_ifinfo_put(neigh1_ifinfo);
+ batadv_neigh_ifinfo_put(neigh2_ifinfo);
+
+ return ret;
+}
+
+/**
+ * batadv_iv_ogm_neigh_dump_neigh() - Dump a neighbour into a netlink message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @hardif_neigh: Neighbour to be dumped
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_iv_ogm_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_hardif_neigh_node *hardif_neigh)
+{
+ void *hdr;
+ unsigned int last_seen_msecs;
+
+ last_seen_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen);
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
+ NLM_F_MULTI, BATADV_CMD_GET_NEIGHBORS);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
+ hardif_neigh->addr) ||
+ nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ hardif_neigh->if_incoming->net_dev->name) ||
+ nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ hardif_neigh->if_incoming->net_dev->ifindex) ||
+ nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
+ last_seen_msecs))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_iv_ogm_neigh_dump_hardif() - Dump the neighbours of a hard interface
+ * into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @hard_iface: Hard interface to dump the neighbours for
+ * @idx_s: Number of entries to skip
+ *
+ * This function assumes the caller holds rcu_read_lock().
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *hard_iface,
+ int *idx_s)
+{
+ struct batadv_hardif_neigh_node *hardif_neigh;
+ int idx = 0;
+
+ hlist_for_each_entry_rcu(hardif_neigh,
+ &hard_iface->neigh_list, list) {
+ if (idx++ < *idx_s)
+ continue;
+
+ if (batadv_iv_ogm_neigh_dump_neigh(msg, portid, seq,
+ hardif_neigh)) {
+ *idx_s = idx - 1;
+ return -EMSGSIZE;
+ }
+ }
+
+ *idx_s = 0;
+ return 0;
+}
+
+/**
+ * batadv_iv_ogm_neigh_dump() - Dump the neighbours into a message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ * @single_hardif: Limit dump to this hard interface
+ */
+static void
+batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *single_hardif)
+{
+ struct batadv_hard_iface *hard_iface;
+ int i_hardif = 0;
+ int i_hardif_s = cb->args[0];
+ int idx = cb->args[1];
+ int portid = NETLINK_CB(cb->skb).portid;
+
+ rcu_read_lock();
+ if (single_hardif) {
+ if (i_hardif_s == 0) {
+ if (batadv_iv_ogm_neigh_dump_hardif(msg, portid,
+ cb->nlh->nlmsg_seq,
+ bat_priv,
+ single_hardif,
+ &idx) == 0)
+ i_hardif++;
+ }
+ } else {
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list,
+ list) {
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
+ if (i_hardif++ < i_hardif_s)
+ continue;
+
+ if (batadv_iv_ogm_neigh_dump_hardif(msg, portid,
+ cb->nlh->nlmsg_seq,
+ bat_priv,
+ hard_iface, &idx)) {
+ i_hardif--;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ cb->args[0] = i_hardif;
+ cb->args[1] = idx;
+}
+
+/**
+ * batadv_iv_ogm_neigh_cmp() - compare the metrics of two neighbors
+ * @neigh1: the first neighbor object of the comparison
+ * @if_outgoing1: outgoing interface for the first neighbor
+ * @neigh2: the second neighbor object of the comparison
+ * @if_outgoing2: outgoing interface for the second neighbor
+ *
+ * Return: a value less, equal to or greater than 0 if the metric via neigh1 is
+ * lower, the same as or higher than the metric via neigh2
+ */
+static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
+ struct batadv_hard_iface *if_outgoing1,
+ struct batadv_neigh_node *neigh2,
+ struct batadv_hard_iface *if_outgoing2)
+{
+ bool ret;
+ int diff;
+
+ ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2,
+ if_outgoing2, &diff);
+ if (!ret)
+ return 0;
+
+ return diff;
+}
+
+/**
+ * batadv_iv_ogm_neigh_is_sob() - check if neigh1 is similarly good or better
+ * than neigh2 from the metric prospective
+ * @neigh1: the first neighbor object of the comparison
+ * @if_outgoing1: outgoing interface for the first neighbor
+ * @neigh2: the second neighbor object of the comparison
+ * @if_outgoing2: outgoing interface for the second neighbor
+ *
+ * Return: true if the metric via neigh1 is equally good or better than
+ * the metric via neigh2, false otherwise.
+ */
+static bool
+batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1,
+ struct batadv_hard_iface *if_outgoing1,
+ struct batadv_neigh_node *neigh2,
+ struct batadv_hard_iface *if_outgoing2)
+{
+ bool ret;
+ int diff;
+
+ ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2,
+ if_outgoing2, &diff);
+ if (!ret)
+ return false;
+
+ ret = diff > -BATADV_TQ_SIMILARITY_THRESHOLD;
+ return ret;
+}
+
+static void batadv_iv_iface_enabled(struct batadv_hard_iface *hard_iface)
+{
+ /* begin scheduling originator messages on that interface */
+ batadv_iv_ogm_schedule(hard_iface);
+}
+
+/**
+ * batadv_iv_init_sel_class() - initialize GW selection class
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
+{
+ /* set default TQ difference threshold to 20 */
+ atomic_set(&bat_priv->gw.sel_class, 20);
+}
+
+static struct batadv_gw_node *
+batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
+{
+ struct batadv_neigh_node *router;
+ struct batadv_neigh_ifinfo *router_ifinfo;
+ struct batadv_gw_node *gw_node, *curr_gw = NULL;
+ u64 max_gw_factor = 0;
+ u64 tmp_gw_factor = 0;
+ u8 max_tq = 0;
+ u8 tq_avg;
+ struct batadv_orig_node *orig_node;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.gateway_list, list) {
+ orig_node = gw_node->orig_node;
+ router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
+ if (!router)
+ continue;
+
+ router_ifinfo = batadv_neigh_ifinfo_get(router,
+ BATADV_IF_DEFAULT);
+ if (!router_ifinfo)
+ goto next;
+
+ if (!kref_get_unless_zero(&gw_node->refcount))
+ goto next;
+
+ tq_avg = router_ifinfo->bat_iv.tq_avg;
+
+ switch (atomic_read(&bat_priv->gw.sel_class)) {
+ case 1: /* fast connection */
+ tmp_gw_factor = tq_avg * tq_avg;
+ tmp_gw_factor *= gw_node->bandwidth_down;
+ tmp_gw_factor *= 100 * 100;
+ tmp_gw_factor >>= 18;
+
+ if (tmp_gw_factor > max_gw_factor ||
+ (tmp_gw_factor == max_gw_factor &&
+ tq_avg > max_tq)) {
+ batadv_gw_node_put(curr_gw);
+ curr_gw = gw_node;
+ kref_get(&curr_gw->refcount);
+ }
+ break;
+
+ default: /* 2: stable connection (use best statistic)
+ * 3: fast-switch (use best statistic but change as
+ * soon as a better gateway appears)
+ * XX: late-switch (use best statistic but change as
+ * soon as a better gateway appears which has
+ * $routing_class more tq points)
+ */
+ if (tq_avg > max_tq) {
+ batadv_gw_node_put(curr_gw);
+ curr_gw = gw_node;
+ kref_get(&curr_gw->refcount);
+ }
+ break;
+ }
+
+ if (tq_avg > max_tq)
+ max_tq = tq_avg;
+
+ if (tmp_gw_factor > max_gw_factor)
+ max_gw_factor = tmp_gw_factor;
+
+ batadv_gw_node_put(gw_node);
+
+next:
+ batadv_neigh_node_put(router);
+ batadv_neigh_ifinfo_put(router_ifinfo);
+ }
+ rcu_read_unlock();
+
+ return curr_gw;
+}
+
+static bool batadv_iv_gw_is_eligible(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *curr_gw_orig,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_neigh_ifinfo *router_orig_ifinfo = NULL;
+ struct batadv_neigh_ifinfo *router_gw_ifinfo = NULL;
+ struct batadv_neigh_node *router_gw = NULL;
+ struct batadv_neigh_node *router_orig = NULL;
+ u8 gw_tq_avg, orig_tq_avg;
+ bool ret = false;
+
+ /* dynamic re-election is performed only on fast or late switch */
+ if (atomic_read(&bat_priv->gw.sel_class) <= 2)
+ return false;
+
+ router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT);
+ if (!router_gw) {
+ ret = true;
+ goto out;
+ }
+
+ router_gw_ifinfo = batadv_neigh_ifinfo_get(router_gw,
+ BATADV_IF_DEFAULT);
+ if (!router_gw_ifinfo) {
+ ret = true;
+ goto out;
+ }
+
+ router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
+ if (!router_orig)
+ goto out;
+
+ router_orig_ifinfo = batadv_neigh_ifinfo_get(router_orig,
+ BATADV_IF_DEFAULT);
+ if (!router_orig_ifinfo)
+ goto out;
+
+ gw_tq_avg = router_gw_ifinfo->bat_iv.tq_avg;
+ orig_tq_avg = router_orig_ifinfo->bat_iv.tq_avg;
+
+ /* the TQ value has to be better */
+ if (orig_tq_avg < gw_tq_avg)
+ goto out;
+
+ /* if the routing class is greater than 3 the value tells us how much
+ * greater the TQ value of the new gateway must be
+ */
+ if ((atomic_read(&bat_priv->gw.sel_class) > 3) &&
+ (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw.sel_class)))
+ goto out;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
+ gw_tq_avg, orig_tq_avg);
+
+ ret = true;
+out:
+ batadv_neigh_ifinfo_put(router_gw_ifinfo);
+ batadv_neigh_ifinfo_put(router_orig_ifinfo);
+ batadv_neigh_node_put(router_gw);
+ batadv_neigh_node_put(router_orig);
+
+ return ret;
+}
+
+/**
+ * batadv_iv_gw_dump_entry() - Dump a gateway into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ * @gw_node: Gateway to be dumped
+ *
+ * Return: Error code, or 0 on success
+ */
+static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_priv *bat_priv,
+ struct batadv_gw_node *gw_node)
+{
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL;
+ struct batadv_neigh_node *router;
+ struct batadv_gw_node *curr_gw = NULL;
+ int ret = 0;
+ void *hdr;
+
+ router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
+ if (!router)
+ goto out;
+
+ router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
+ if (!router_ifinfo)
+ goto out;
+
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_GATEWAYS);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ genl_dump_check_consistent(cb, hdr);
+
+ ret = -EMSGSIZE;
+
+ if (curr_gw == gw_node)
+ if (nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
+ gw_node->orig_node->orig) ||
+ nla_put_u8(msg, BATADV_ATTR_TQ, router_ifinfo->bat_iv.tq_avg) ||
+ nla_put(msg, BATADV_ATTR_ROUTER, ETH_ALEN,
+ router->addr) ||
+ nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ router->if_incoming->net_dev->name) ||
+ nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ router->if_incoming->net_dev->ifindex) ||
+ nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_DOWN,
+ gw_node->bandwidth_down) ||
+ nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_UP,
+ gw_node->bandwidth_up)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ genlmsg_end(msg, hdr);
+ ret = 0;
+
+out:
+ batadv_gw_node_put(curr_gw);
+ batadv_neigh_ifinfo_put(router_ifinfo);
+ batadv_neigh_node_put(router);
+ return ret;
+}
+
+/**
+ * batadv_iv_gw_dump() - Dump gateways into a message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ */
+static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *bat_priv)
+{
+ int portid = NETLINK_CB(cb->skb).portid;
+ struct batadv_gw_node *gw_node;
+ int idx_skip = cb->args[0];
+ int idx = 0;
+
+ spin_lock_bh(&bat_priv->gw.list_lock);
+ cb->seq = bat_priv->gw.generation << 1 | 1;
+
+ hlist_for_each_entry(gw_node, &bat_priv->gw.gateway_list, list) {
+ if (idx++ < idx_skip)
+ continue;
+
+ if (batadv_iv_gw_dump_entry(msg, portid, cb, bat_priv,
+ gw_node)) {
+ idx_skip = idx - 1;
+ goto unlock;
+ }
+ }
+
+ idx_skip = idx;
+unlock:
+ spin_unlock_bh(&bat_priv->gw.list_lock);
+
+ cb->args[0] = idx_skip;
+}
+
+static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
+ .name = "BATMAN_IV",
+ .iface = {
+ .enable = batadv_iv_ogm_iface_enable,
+ .enabled = batadv_iv_iface_enabled,
+ .disable = batadv_iv_ogm_iface_disable,
+ .update_mac = batadv_iv_ogm_iface_update_mac,
+ .primary_set = batadv_iv_ogm_primary_iface_set,
+ },
+ .neigh = {
+ .cmp = batadv_iv_ogm_neigh_cmp,
+ .is_similar_or_better = batadv_iv_ogm_neigh_is_sob,
+ .dump = batadv_iv_ogm_neigh_dump,
+ },
+ .orig = {
+ .dump = batadv_iv_ogm_orig_dump,
+ },
+ .gw = {
+ .init_sel_class = batadv_iv_init_sel_class,
+ .sel_class_max = BATADV_TQ_MAX_VALUE,
+ .get_best_gw_node = batadv_iv_gw_get_best_gw_node,
+ .is_eligible = batadv_iv_gw_is_eligible,
+ .dump = batadv_iv_gw_dump,
+ },
+};
+
+/**
+ * batadv_iv_init() - B.A.T.M.A.N. IV initialization function
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int __init batadv_iv_init(void)
+{
+ int ret;
+
+ /* batman originator packet */
+ ret = batadv_recv_handler_register(BATADV_IV_OGM,
+ batadv_iv_ogm_receive);
+ if (ret < 0)
+ goto out;
+
+ ret = batadv_algo_register(&batadv_batman_iv);
+ if (ret < 0)
+ goto handler_unregister;
+
+ goto out;
+
+handler_unregister:
+ batadv_recv_handler_unregister(BATADV_IV_OGM);
+out:
+ return ret;
+}
diff --git a/net/batman-adv/bat_iv_ogm.h b/net/batman-adv/bat_iv_ogm.h
new file mode 100644
index 0000000000..04b01bd684
--- /dev/null
+++ b/net/batman-adv/bat_iv_ogm.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#ifndef _NET_BATMAN_ADV_BAT_IV_OGM_H_
+#define _NET_BATMAN_ADV_BAT_IV_OGM_H_
+
+#include "main.h"
+
+int batadv_iv_init(void);
+
+#endif /* _NET_BATMAN_ADV_BAT_IV_OGM_H_ */
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
new file mode 100644
index 0000000000..ac11f1f08d
--- /dev/null
+++ b/net/batman-adv/bat_v.c
@@ -0,0 +1,891 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Linus Lüssing, Marek Lindner
+ */
+
+#include "bat_v.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/cache.h>
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kref.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "bat_algo.h"
+#include "bat_v_elp.h"
+#include "bat_v_ogm.h"
+#include "gateway_client.h"
+#include "hard-interface.h"
+#include "hash.h"
+#include "log.h"
+#include "netlink.h"
+#include "originator.h"
+
+static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hard_iface *primary_if;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+
+ if (primary_if) {
+ batadv_v_elp_iface_activate(primary_if, hard_iface);
+ batadv_hardif_put(primary_if);
+ }
+
+ /* B.A.T.M.A.N. V does not use any queuing mechanism, therefore it can
+ * set the interface as ACTIVE right away, without any risk of race
+ * condition
+ */
+ if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
+ hard_iface->if_status = BATADV_IF_ACTIVE;
+}
+
+static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface)
+{
+ int ret;
+
+ ret = batadv_v_elp_iface_enable(hard_iface);
+ if (ret < 0)
+ return ret;
+
+ ret = batadv_v_ogm_iface_enable(hard_iface);
+ if (ret < 0)
+ batadv_v_elp_iface_disable(hard_iface);
+
+ return ret;
+}
+
+static void batadv_v_iface_disable(struct batadv_hard_iface *hard_iface)
+{
+ batadv_v_ogm_iface_disable(hard_iface);
+ batadv_v_elp_iface_disable(hard_iface);
+}
+
+static void batadv_v_primary_iface_set(struct batadv_hard_iface *hard_iface)
+{
+ batadv_v_elp_primary_iface_set(hard_iface);
+ batadv_v_ogm_primary_iface_set(hard_iface);
+}
+
+/**
+ * batadv_v_iface_update_mac() - react to hard-interface MAC address change
+ * @hard_iface: the modified interface
+ *
+ * If the modified interface is the primary one, update the originator
+ * address in the ELP and OGM messages to reflect the new MAC address.
+ */
+static void batadv_v_iface_update_mac(struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hard_iface *primary_if;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (primary_if != hard_iface)
+ goto out;
+
+ batadv_v_primary_iface_set(hard_iface);
+out:
+ batadv_hardif_put(primary_if);
+}
+
+static void
+batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
+{
+ ewma_throughput_init(&hardif_neigh->bat_v.throughput);
+ INIT_WORK(&hardif_neigh->bat_v.metric_work,
+ batadv_v_elp_throughput_metric_update);
+}
+
+/**
+ * batadv_v_neigh_dump_neigh() - Dump a neighbour into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @hardif_neigh: Neighbour to dump
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_v_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_hardif_neigh_node *hardif_neigh)
+{
+ void *hdr;
+ unsigned int last_seen_msecs;
+ u32 throughput;
+
+ last_seen_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen);
+ throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput);
+ throughput = throughput * 100;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_NEIGHBORS);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
+ hardif_neigh->addr) ||
+ nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ hardif_neigh->if_incoming->net_dev->name) ||
+ nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ hardif_neigh->if_incoming->net_dev->ifindex) ||
+ nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
+ last_seen_msecs) ||
+ nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, throughput))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_v_neigh_dump_hardif() - Dump the neighbours of a hard interface into
+ * a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @hard_iface: The hard interface to be dumped
+ * @idx_s: Entries to be skipped
+ *
+ * This function assumes the caller holds rcu_read_lock().
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_v_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *hard_iface,
+ int *idx_s)
+{
+ struct batadv_hardif_neigh_node *hardif_neigh;
+ int idx = 0;
+
+ hlist_for_each_entry_rcu(hardif_neigh,
+ &hard_iface->neigh_list, list) {
+ if (idx++ < *idx_s)
+ continue;
+
+ if (batadv_v_neigh_dump_neigh(msg, portid, seq, hardif_neigh)) {
+ *idx_s = idx - 1;
+ return -EMSGSIZE;
+ }
+ }
+
+ *idx_s = 0;
+ return 0;
+}
+
+/**
+ * batadv_v_neigh_dump() - Dump the neighbours of a hard interface into a
+ * message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ * @single_hardif: Limit dumping to this hard interface
+ */
+static void
+batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *single_hardif)
+{
+ struct batadv_hard_iface *hard_iface;
+ int i_hardif = 0;
+ int i_hardif_s = cb->args[0];
+ int idx = cb->args[1];
+ int portid = NETLINK_CB(cb->skb).portid;
+
+ rcu_read_lock();
+ if (single_hardif) {
+ if (i_hardif_s == 0) {
+ if (batadv_v_neigh_dump_hardif(msg, portid,
+ cb->nlh->nlmsg_seq,
+ bat_priv, single_hardif,
+ &idx) == 0)
+ i_hardif++;
+ }
+ } else {
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
+ if (i_hardif++ < i_hardif_s)
+ continue;
+
+ if (batadv_v_neigh_dump_hardif(msg, portid,
+ cb->nlh->nlmsg_seq,
+ bat_priv, hard_iface,
+ &idx)) {
+ i_hardif--;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ cb->args[0] = i_hardif;
+ cb->args[1] = idx;
+}
+
+/**
+ * batadv_v_orig_dump_subentry() - Dump an originator subentry into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ * @orig_node: Originator to dump
+ * @neigh_node: Single hops neighbour
+ * @best: Is the best originator
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_v_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node,
+ bool best)
+{
+ struct batadv_neigh_ifinfo *n_ifinfo;
+ unsigned int last_seen_msecs;
+ u32 throughput;
+ void *hdr;
+
+ n_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
+ if (!n_ifinfo)
+ return 0;
+
+ throughput = n_ifinfo->bat_v.throughput * 100;
+
+ batadv_neigh_ifinfo_put(n_ifinfo);
+
+ last_seen_msecs = jiffies_to_msecs(jiffies - orig_node->last_seen);
+
+ if (if_outgoing != BATADV_IF_DEFAULT &&
+ if_outgoing != neigh_node->if_incoming)
+ return 0;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_ORIGINATORS);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, orig_node->orig) ||
+ nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
+ neigh_node->addr) ||
+ nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ neigh_node->if_incoming->net_dev->name) ||
+ nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ neigh_node->if_incoming->net_dev->ifindex) ||
+ nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, throughput) ||
+ nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
+ last_seen_msecs))
+ goto nla_put_failure;
+
+ if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_v_orig_dump_entry() - Dump an originator entry into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ * @orig_node: Originator to dump
+ * @sub_s: Number of sub entries to skip
+ *
+ * This function assumes the caller holds rcu_read_lock().
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_v_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing,
+ struct batadv_orig_node *orig_node, int *sub_s)
+{
+ struct batadv_neigh_node *neigh_node_best;
+ struct batadv_neigh_node *neigh_node;
+ int sub = 0;
+ bool best;
+
+ neigh_node_best = batadv_orig_router_get(orig_node, if_outgoing);
+ if (!neigh_node_best)
+ goto out;
+
+ hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) {
+ if (sub++ < *sub_s)
+ continue;
+
+ best = (neigh_node == neigh_node_best);
+
+ if (batadv_v_orig_dump_subentry(msg, portid, seq, bat_priv,
+ if_outgoing, orig_node,
+ neigh_node, best)) {
+ batadv_neigh_node_put(neigh_node_best);
+
+ *sub_s = sub - 1;
+ return -EMSGSIZE;
+ }
+ }
+
+ out:
+ batadv_neigh_node_put(neigh_node_best);
+
+ *sub_s = 0;
+ return 0;
+}
+
+/**
+ * batadv_v_orig_dump_bucket() - Dump an originator bucket into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ * @head: Bucket to be dumped
+ * @idx_s: Number of entries to be skipped
+ * @sub: Number of sub entries to be skipped
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_v_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing,
+ struct hlist_head *head, int *idx_s, int *sub)
+{
+ struct batadv_orig_node *orig_node;
+ int idx = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+ if (idx++ < *idx_s)
+ continue;
+
+ if (batadv_v_orig_dump_entry(msg, portid, seq, bat_priv,
+ if_outgoing, orig_node, sub)) {
+ rcu_read_unlock();
+ *idx_s = idx - 1;
+ return -EMSGSIZE;
+ }
+ }
+ rcu_read_unlock();
+
+ *idx_s = 0;
+ *sub = 0;
+ return 0;
+}
+
+/**
+ * batadv_v_orig_dump() - Dump the originators into a message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ * @if_outgoing: Limit dump to entries with this outgoing interface
+ */
+static void
+batadv_v_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ struct hlist_head *head;
+ int bucket = cb->args[0];
+ int idx = cb->args[1];
+ int sub = cb->args[2];
+ int portid = NETLINK_CB(cb->skb).portid;
+
+ while (bucket < hash->size) {
+ head = &hash->table[bucket];
+
+ if (batadv_v_orig_dump_bucket(msg, portid,
+ cb->nlh->nlmsg_seq,
+ bat_priv, if_outgoing, head, &idx,
+ &sub))
+ break;
+
+ bucket++;
+ }
+
+ cb->args[0] = bucket;
+ cb->args[1] = idx;
+ cb->args[2] = sub;
+}
+
+static int batadv_v_neigh_cmp(struct batadv_neigh_node *neigh1,
+ struct batadv_hard_iface *if_outgoing1,
+ struct batadv_neigh_node *neigh2,
+ struct batadv_hard_iface *if_outgoing2)
+{
+ struct batadv_neigh_ifinfo *ifinfo1, *ifinfo2;
+ int ret = 0;
+
+ ifinfo1 = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
+ if (!ifinfo1)
+ goto err_ifinfo1;
+
+ ifinfo2 = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
+ if (!ifinfo2)
+ goto err_ifinfo2;
+
+ ret = ifinfo1->bat_v.throughput - ifinfo2->bat_v.throughput;
+
+ batadv_neigh_ifinfo_put(ifinfo2);
+err_ifinfo2:
+ batadv_neigh_ifinfo_put(ifinfo1);
+err_ifinfo1:
+ return ret;
+}
+
+static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
+ struct batadv_hard_iface *if_outgoing1,
+ struct batadv_neigh_node *neigh2,
+ struct batadv_hard_iface *if_outgoing2)
+{
+ struct batadv_neigh_ifinfo *ifinfo1, *ifinfo2;
+ u32 threshold;
+ bool ret = false;
+
+ ifinfo1 = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
+ if (!ifinfo1)
+ goto err_ifinfo1;
+
+ ifinfo2 = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
+ if (!ifinfo2)
+ goto err_ifinfo2;
+
+ threshold = ifinfo1->bat_v.throughput / 4;
+ threshold = ifinfo1->bat_v.throughput - threshold;
+
+ ret = ifinfo2->bat_v.throughput > threshold;
+
+ batadv_neigh_ifinfo_put(ifinfo2);
+err_ifinfo2:
+ batadv_neigh_ifinfo_put(ifinfo1);
+err_ifinfo1:
+ return ret;
+}
+
+/**
+ * batadv_v_init_sel_class() - initialize GW selection class
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
+{
+ /* set default throughput difference threshold to 5Mbps */
+ atomic_set(&bat_priv->gw.sel_class, 50);
+}
+
+/**
+ * batadv_v_gw_throughput_get() - retrieve the GW-bandwidth for a given GW
+ * @gw_node: the GW to retrieve the metric for
+ * @bw: the pointer where the metric will be stored. The metric is computed as
+ * the minimum between the GW advertised throughput and the path throughput to
+ * it in the mesh
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int batadv_v_gw_throughput_get(struct batadv_gw_node *gw_node, u32 *bw)
+{
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL;
+ struct batadv_orig_node *orig_node;
+ struct batadv_neigh_node *router;
+ int ret = -1;
+
+ orig_node = gw_node->orig_node;
+ router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
+ if (!router)
+ goto out;
+
+ router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
+ if (!router_ifinfo)
+ goto out;
+
+ /* the GW metric is computed as the minimum between the path throughput
+ * to reach the GW itself and the advertised bandwidth.
+ * This gives us an approximation of the effective throughput that the
+ * client can expect via this particular GW node
+ */
+ *bw = router_ifinfo->bat_v.throughput;
+ *bw = min_t(u32, *bw, gw_node->bandwidth_down);
+
+ ret = 0;
+out:
+ batadv_neigh_node_put(router);
+ batadv_neigh_ifinfo_put(router_ifinfo);
+
+ return ret;
+}
+
+/**
+ * batadv_v_gw_get_best_gw_node() - retrieve the best GW node
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: the GW node having the best GW-metric, NULL if no GW is known
+ */
+static struct batadv_gw_node *
+batadv_v_gw_get_best_gw_node(struct batadv_priv *bat_priv)
+{
+ struct batadv_gw_node *gw_node, *curr_gw = NULL;
+ u32 max_bw = 0, bw;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.gateway_list, list) {
+ if (!kref_get_unless_zero(&gw_node->refcount))
+ continue;
+
+ if (batadv_v_gw_throughput_get(gw_node, &bw) < 0)
+ goto next;
+
+ if (curr_gw && bw <= max_bw)
+ goto next;
+
+ batadv_gw_node_put(curr_gw);
+
+ curr_gw = gw_node;
+ kref_get(&curr_gw->refcount);
+ max_bw = bw;
+
+next:
+ batadv_gw_node_put(gw_node);
+ }
+ rcu_read_unlock();
+
+ return curr_gw;
+}
+
+/**
+ * batadv_v_gw_is_eligible() - check if a originator would be selected as GW
+ * @bat_priv: the bat priv with all the soft interface information
+ * @curr_gw_orig: originator representing the currently selected GW
+ * @orig_node: the originator representing the new candidate
+ *
+ * Return: true if orig_node can be selected as current GW, false otherwise
+ */
+static bool batadv_v_gw_is_eligible(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *curr_gw_orig,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_gw_node *curr_gw, *orig_gw = NULL;
+ u32 gw_throughput, orig_throughput, threshold;
+ bool ret = false;
+
+ threshold = atomic_read(&bat_priv->gw.sel_class);
+
+ curr_gw = batadv_gw_node_get(bat_priv, curr_gw_orig);
+ if (!curr_gw) {
+ ret = true;
+ goto out;
+ }
+
+ if (batadv_v_gw_throughput_get(curr_gw, &gw_throughput) < 0) {
+ ret = true;
+ goto out;
+ }
+
+ orig_gw = batadv_gw_node_get(bat_priv, orig_node);
+ if (!orig_gw)
+ goto out;
+
+ if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0)
+ goto out;
+
+ if (orig_throughput < gw_throughput)
+ goto out;
+
+ if ((orig_throughput - gw_throughput) < threshold)
+ goto out;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Restarting gateway selection: better gateway found (throughput curr: %u, throughput new: %u)\n",
+ gw_throughput, orig_throughput);
+
+ ret = true;
+out:
+ batadv_gw_node_put(curr_gw);
+ batadv_gw_node_put(orig_gw);
+
+ return ret;
+}
+
+/**
+ * batadv_v_gw_dump_entry() - Dump a gateway into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ * @gw_node: Gateway to be dumped
+ *
+ * Return: Error code, or 0 on success
+ */
+static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_priv *bat_priv,
+ struct batadv_gw_node *gw_node)
+{
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL;
+ struct batadv_neigh_node *router;
+ struct batadv_gw_node *curr_gw = NULL;
+ int ret = 0;
+ void *hdr;
+
+ router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
+ if (!router)
+ goto out;
+
+ router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
+ if (!router_ifinfo)
+ goto out;
+
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_GATEWAYS);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ genl_dump_check_consistent(cb, hdr);
+
+ ret = -EMSGSIZE;
+
+ if (curr_gw == gw_node) {
+ if (nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+ }
+
+ if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
+ gw_node->orig_node->orig)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put_u32(msg, BATADV_ATTR_THROUGHPUT,
+ router_ifinfo->bat_v.throughput)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put(msg, BATADV_ATTR_ROUTER, ETH_ALEN, router->addr)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ router->if_incoming->net_dev->name)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ router->if_incoming->net_dev->ifindex)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_DOWN,
+ gw_node->bandwidth_down)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_UP, gw_node->bandwidth_up)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ genlmsg_end(msg, hdr);
+ ret = 0;
+
+out:
+ batadv_gw_node_put(curr_gw);
+ batadv_neigh_ifinfo_put(router_ifinfo);
+ batadv_neigh_node_put(router);
+ return ret;
+}
+
+/**
+ * batadv_v_gw_dump() - Dump gateways into a message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ */
+static void batadv_v_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *bat_priv)
+{
+ int portid = NETLINK_CB(cb->skb).portid;
+ struct batadv_gw_node *gw_node;
+ int idx_skip = cb->args[0];
+ int idx = 0;
+
+ spin_lock_bh(&bat_priv->gw.list_lock);
+ cb->seq = bat_priv->gw.generation << 1 | 1;
+
+ hlist_for_each_entry(gw_node, &bat_priv->gw.gateway_list, list) {
+ if (idx++ < idx_skip)
+ continue;
+
+ if (batadv_v_gw_dump_entry(msg, portid, cb, bat_priv,
+ gw_node)) {
+ idx_skip = idx - 1;
+ goto unlock;
+ }
+ }
+
+ idx_skip = idx;
+unlock:
+ spin_unlock_bh(&bat_priv->gw.list_lock);
+
+ cb->args[0] = idx_skip;
+}
+
+static struct batadv_algo_ops batadv_batman_v __read_mostly = {
+ .name = "BATMAN_V",
+ .iface = {
+ .activate = batadv_v_iface_activate,
+ .enable = batadv_v_iface_enable,
+ .disable = batadv_v_iface_disable,
+ .update_mac = batadv_v_iface_update_mac,
+ .primary_set = batadv_v_primary_iface_set,
+ },
+ .neigh = {
+ .hardif_init = batadv_v_hardif_neigh_init,
+ .cmp = batadv_v_neigh_cmp,
+ .is_similar_or_better = batadv_v_neigh_is_sob,
+ .dump = batadv_v_neigh_dump,
+ },
+ .orig = {
+ .dump = batadv_v_orig_dump,
+ },
+ .gw = {
+ .init_sel_class = batadv_v_init_sel_class,
+ .sel_class_max = U32_MAX,
+ .get_best_gw_node = batadv_v_gw_get_best_gw_node,
+ .is_eligible = batadv_v_gw_is_eligible,
+ .dump = batadv_v_gw_dump,
+ },
+};
+
+/**
+ * batadv_v_hardif_init() - initialize the algorithm specific fields in the
+ * hard-interface object
+ * @hard_iface: the hard-interface to initialize
+ */
+void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface)
+{
+ /* enable link throughput auto-detection by setting the throughput
+ * override to zero
+ */
+ atomic_set(&hard_iface->bat_v.throughput_override, 0);
+ atomic_set(&hard_iface->bat_v.elp_interval, 500);
+
+ hard_iface->bat_v.aggr_len = 0;
+ skb_queue_head_init(&hard_iface->bat_v.aggr_list);
+ INIT_DELAYED_WORK(&hard_iface->bat_v.aggr_wq,
+ batadv_v_ogm_aggr_work);
+}
+
+/**
+ * batadv_v_mesh_init() - initialize the B.A.T.M.A.N. V private resources for a
+ * mesh
+ * @bat_priv: the object representing the mesh interface to initialise
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int batadv_v_mesh_init(struct batadv_priv *bat_priv)
+{
+ int ret = 0;
+
+ ret = batadv_v_ogm_init(bat_priv);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * batadv_v_mesh_free() - free the B.A.T.M.A.N. V private resources for a mesh
+ * @bat_priv: the object representing the mesh interface to free
+ */
+void batadv_v_mesh_free(struct batadv_priv *bat_priv)
+{
+ batadv_v_ogm_free(bat_priv);
+}
+
+/**
+ * batadv_v_init() - B.A.T.M.A.N. V initialization function
+ *
+ * Description: Takes care of initializing all the subcomponents.
+ * It is invoked upon module load only.
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int __init batadv_v_init(void)
+{
+ int ret;
+
+ /* B.A.T.M.A.N. V echo location protocol packet */
+ ret = batadv_recv_handler_register(BATADV_ELP,
+ batadv_v_elp_packet_recv);
+ if (ret < 0)
+ return ret;
+
+ ret = batadv_recv_handler_register(BATADV_OGM2,
+ batadv_v_ogm_packet_recv);
+ if (ret < 0)
+ goto elp_unregister;
+
+ ret = batadv_algo_register(&batadv_batman_v);
+ if (ret < 0)
+ goto ogm_unregister;
+
+ return ret;
+
+ogm_unregister:
+ batadv_recv_handler_unregister(BATADV_OGM2);
+
+elp_unregister:
+ batadv_recv_handler_unregister(BATADV_ELP);
+
+ return ret;
+}
diff --git a/net/batman-adv/bat_v.h b/net/batman-adv/bat_v.h
new file mode 100644
index 0000000000..964431f4dc
--- /dev/null
+++ b/net/batman-adv/bat_v.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Linus Lüssing
+ */
+
+#ifndef _NET_BATMAN_ADV_BAT_V_H_
+#define _NET_BATMAN_ADV_BAT_V_H_
+
+#include "main.h"
+
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+
+int batadv_v_init(void);
+void batadv_v_hardif_init(struct batadv_hard_iface *hardif);
+int batadv_v_mesh_init(struct batadv_priv *bat_priv);
+void batadv_v_mesh_free(struct batadv_priv *bat_priv);
+
+#else
+
+static inline int batadv_v_init(void)
+{
+ return 0;
+}
+
+static inline void batadv_v_hardif_init(struct batadv_hard_iface *hardif)
+{
+}
+
+static inline int batadv_v_mesh_init(struct batadv_priv *bat_priv)
+{
+ return 0;
+}
+
+static inline void batadv_v_mesh_free(struct batadv_priv *bat_priv)
+{
+}
+
+#endif /* CONFIG_BATMAN_ADV_BATMAN_V */
+
+#endif /* _NET_BATMAN_ADV_BAT_V_H_ */
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
new file mode 100644
index 0000000000..1d704574e6
--- /dev/null
+++ b/net/batman-adv/bat_v_elp.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Linus Lüssing, Marek Lindner
+ */
+
+#include "bat_v_elp.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/byteorder/generic.h>
+#include <linux/container_of.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/kref.h>
+#include <linux/minmax.h>
+#include <linux/netdevice.h>
+#include <linux/nl80211.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <net/cfg80211.h>
+#include <uapi/linux/batadv_packet.h>
+
+#include "bat_algo.h"
+#include "bat_v_ogm.h"
+#include "hard-interface.h"
+#include "log.h"
+#include "originator.h"
+#include "routing.h"
+#include "send.h"
+
+/**
+ * batadv_v_elp_start_timer() - restart timer for ELP periodic work
+ * @hard_iface: the interface for which the timer has to be reset
+ */
+static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
+{
+ unsigned int msecs;
+
+ msecs = atomic_read(&hard_iface->bat_v.elp_interval) - BATADV_JITTER;
+ msecs += get_random_u32_below(2 * BATADV_JITTER);
+
+ queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.elp_wq,
+ msecs_to_jiffies(msecs));
+}
+
+/**
+ * batadv_v_elp_get_throughput() - get the throughput towards a neighbour
+ * @neigh: the neighbour for which the throughput has to be obtained
+ *
+ * Return: The throughput towards the given neighbour in multiples of 100kpbs
+ * (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc).
+ */
+static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
+{
+ struct batadv_hard_iface *hard_iface = neigh->if_incoming;
+ struct ethtool_link_ksettings link_settings;
+ struct net_device *real_netdev;
+ struct station_info sinfo;
+ u32 throughput;
+ int ret;
+
+ /* if the user specified a customised value for this interface, then
+ * return it directly
+ */
+ throughput = atomic_read(&hard_iface->bat_v.throughput_override);
+ if (throughput != 0)
+ return throughput;
+
+ /* if this is a wireless device, then ask its throughput through
+ * cfg80211 API
+ */
+ if (batadv_is_wifi_hardif(hard_iface)) {
+ if (!batadv_is_cfg80211_hardif(hard_iface))
+ /* unsupported WiFi driver version */
+ goto default_throughput;
+
+ real_netdev = batadv_get_real_netdev(hard_iface->net_dev);
+ if (!real_netdev)
+ goto default_throughput;
+
+ ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
+
+ if (!ret) {
+ /* free the TID stats immediately */
+ cfg80211_sinfo_release_content(&sinfo);
+ }
+
+ dev_put(real_netdev);
+ if (ret == -ENOENT) {
+ /* Node is not associated anymore! It would be
+ * possible to delete this neighbor. For now set
+ * the throughput metric to 0.
+ */
+ return 0;
+ }
+ if (ret)
+ goto default_throughput;
+
+ if (sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT))
+ return sinfo.expected_throughput / 100;
+
+ /* try to estimate the expected throughput based on reported tx
+ * rates
+ */
+ if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE))
+ return cfg80211_calculate_bitrate(&sinfo.txrate) / 3;
+
+ goto default_throughput;
+ }
+
+ /* if not a wifi interface, check if this device provides data via
+ * ethtool (e.g. an Ethernet adapter)
+ */
+ rtnl_lock();
+ ret = __ethtool_get_link_ksettings(hard_iface->net_dev, &link_settings);
+ rtnl_unlock();
+ if (ret == 0) {
+ /* link characteristics might change over time */
+ if (link_settings.base.duplex == DUPLEX_FULL)
+ hard_iface->bat_v.flags |= BATADV_FULL_DUPLEX;
+ else
+ hard_iface->bat_v.flags &= ~BATADV_FULL_DUPLEX;
+
+ throughput = link_settings.base.speed;
+ if (throughput && throughput != SPEED_UNKNOWN)
+ return throughput * 10;
+ }
+
+default_throughput:
+ if (!(hard_iface->bat_v.flags & BATADV_WARNING_DEFAULT)) {
+ batadv_info(hard_iface->soft_iface,
+ "WiFi driver or ethtool info does not provide information about link speeds on interface %s, therefore defaulting to hardcoded throughput values of %u.%1u Mbps. Consider overriding the throughput manually or checking your driver.\n",
+ hard_iface->net_dev->name,
+ BATADV_THROUGHPUT_DEFAULT_VALUE / 10,
+ BATADV_THROUGHPUT_DEFAULT_VALUE % 10);
+ hard_iface->bat_v.flags |= BATADV_WARNING_DEFAULT;
+ }
+
+ /* if none of the above cases apply, return the base_throughput */
+ return BATADV_THROUGHPUT_DEFAULT_VALUE;
+}
+
+/**
+ * batadv_v_elp_throughput_metric_update() - worker updating the throughput
+ * metric of a single hop neighbour
+ * @work: the work queue item
+ */
+void batadv_v_elp_throughput_metric_update(struct work_struct *work)
+{
+ struct batadv_hardif_neigh_node_bat_v *neigh_bat_v;
+ struct batadv_hardif_neigh_node *neigh;
+
+ neigh_bat_v = container_of(work, struct batadv_hardif_neigh_node_bat_v,
+ metric_work);
+ neigh = container_of(neigh_bat_v, struct batadv_hardif_neigh_node,
+ bat_v);
+
+ ewma_throughput_add(&neigh->bat_v.throughput,
+ batadv_v_elp_get_throughput(neigh));
+
+ /* decrement refcounter to balance increment performed before scheduling
+ * this task
+ */
+ batadv_hardif_neigh_put(neigh);
+}
+
+/**
+ * batadv_v_elp_wifi_neigh_probe() - send link probing packets to a neighbour
+ * @neigh: the neighbour to probe
+ *
+ * Sends a predefined number of unicast wifi packets to a given neighbour in
+ * order to trigger the throughput estimation on this link by the RC algorithm.
+ * Packets are sent only if there is not enough payload unicast traffic towards
+ * this neighbour..
+ *
+ * Return: True on success and false in case of error during skb preparation.
+ */
+static bool
+batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
+{
+ struct batadv_hard_iface *hard_iface = neigh->if_incoming;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ unsigned long last_tx_diff;
+ struct sk_buff *skb;
+ int probe_len, i;
+ int elp_skb_len;
+
+ /* this probing routine is for Wifi neighbours only */
+ if (!batadv_is_wifi_hardif(hard_iface))
+ return true;
+
+ /* probe the neighbor only if no unicast packets have been sent
+ * to it in the last 100 milliseconds: this is the rate control
+ * algorithm sampling interval (minstrel). In this way, if not
+ * enough traffic has been sent to the neighbor, batman-adv can
+ * generate 2 probe packets and push the RC algorithm to perform
+ * the sampling
+ */
+ last_tx_diff = jiffies_to_msecs(jiffies - neigh->bat_v.last_unicast_tx);
+ if (last_tx_diff <= BATADV_ELP_PROBE_MAX_TX_DIFF)
+ return true;
+
+ probe_len = max_t(int, sizeof(struct batadv_elp_packet),
+ BATADV_ELP_MIN_PROBE_SIZE);
+
+ for (i = 0; i < BATADV_ELP_PROBES_PER_NODE; i++) {
+ elp_skb_len = hard_iface->bat_v.elp_skb->len;
+ skb = skb_copy_expand(hard_iface->bat_v.elp_skb, 0,
+ probe_len - elp_skb_len,
+ GFP_ATOMIC);
+ if (!skb)
+ return false;
+
+ /* Tell the skb to get as big as the allocated space (we want
+ * the packet to be exactly of that size to make the link
+ * throughput estimation effective.
+ */
+ skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Sending unicast (probe) ELP packet on interface %s to %pM\n",
+ hard_iface->net_dev->name, neigh->addr);
+
+ batadv_send_skb_packet(skb, hard_iface, neigh->addr);
+ }
+
+ return true;
+}
+
+/**
+ * batadv_v_elp_periodic_work() - ELP periodic task per interface
+ * @work: work queue item
+ *
+ * Emits broadcast ELP messages in regular intervals.
+ */
+static void batadv_v_elp_periodic_work(struct work_struct *work)
+{
+ struct batadv_hardif_neigh_node *hardif_neigh;
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_hard_iface_bat_v *bat_v;
+ struct batadv_elp_packet *elp_packet;
+ struct batadv_priv *bat_priv;
+ struct sk_buff *skb;
+ u32 elp_interval;
+ bool ret;
+
+ bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
+ hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
+ bat_priv = netdev_priv(hard_iface->soft_iface);
+
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
+ goto out;
+
+ /* we are in the process of shutting this interface down */
+ if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
+ hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
+ goto out;
+
+ /* the interface was enabled but may not be ready yet */
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
+ goto restart_timer;
+
+ skb = skb_copy(hard_iface->bat_v.elp_skb, GFP_ATOMIC);
+ if (!skb)
+ goto restart_timer;
+
+ elp_packet = (struct batadv_elp_packet *)skb->data;
+ elp_packet->seqno = htonl(atomic_read(&hard_iface->bat_v.elp_seqno));
+ elp_interval = atomic_read(&hard_iface->bat_v.elp_interval);
+ elp_packet->elp_interval = htonl(elp_interval);
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Sending broadcast ELP packet on interface %s, seqno %u\n",
+ hard_iface->net_dev->name,
+ atomic_read(&hard_iface->bat_v.elp_seqno));
+
+ batadv_send_broadcast_skb(skb, hard_iface);
+
+ atomic_inc(&hard_iface->bat_v.elp_seqno);
+
+ /* The throughput metric is updated on each sent packet. This way, if a
+ * node is dead and no longer sends packets, batman-adv is still able to
+ * react timely to its death.
+ *
+ * The throughput metric is updated by following these steps:
+ * 1) if the hard_iface is wifi => send a number of unicast ELPs for
+ * probing/sampling to each neighbor
+ * 2) update the throughput metric value of each neighbor (note that the
+ * value retrieved in this step might be 100ms old because the
+ * probing packets at point 1) could still be in the HW queue)
+ */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(hardif_neigh, &hard_iface->neigh_list, list) {
+ if (!batadv_v_elp_wifi_neigh_probe(hardif_neigh))
+ /* if something goes wrong while probing, better to stop
+ * sending packets immediately and reschedule the task
+ */
+ break;
+
+ if (!kref_get_unless_zero(&hardif_neigh->refcount))
+ continue;
+
+ /* Reading the estimated throughput from cfg80211 is a task that
+ * may sleep and that is not allowed in an rcu protected
+ * context. Therefore schedule a task for that.
+ */
+ ret = queue_work(batadv_event_workqueue,
+ &hardif_neigh->bat_v.metric_work);
+
+ if (!ret)
+ batadv_hardif_neigh_put(hardif_neigh);
+ }
+ rcu_read_unlock();
+
+restart_timer:
+ batadv_v_elp_start_timer(hard_iface);
+out:
+ return;
+}
+
+/**
+ * batadv_v_elp_iface_enable() - setup the ELP interface private resources
+ * @hard_iface: interface for which the data has to be prepared
+ *
+ * Return: 0 on success or a -ENOMEM in case of failure.
+ */
+int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
+{
+ static const size_t tvlv_padding = sizeof(__be32);
+ struct batadv_elp_packet *elp_packet;
+ unsigned char *elp_buff;
+ u32 random_seqno;
+ size_t size;
+ int res = -ENOMEM;
+
+ size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding;
+ hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
+ if (!hard_iface->bat_v.elp_skb)
+ goto out;
+
+ skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
+ elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb,
+ BATADV_ELP_HLEN + tvlv_padding);
+ elp_packet = (struct batadv_elp_packet *)elp_buff;
+
+ elp_packet->packet_type = BATADV_ELP;
+ elp_packet->version = BATADV_COMPAT_VERSION;
+
+ /* randomize initial seqno to avoid collision */
+ get_random_bytes(&random_seqno, sizeof(random_seqno));
+ atomic_set(&hard_iface->bat_v.elp_seqno, random_seqno);
+
+ /* assume full-duplex by default */
+ hard_iface->bat_v.flags |= BATADV_FULL_DUPLEX;
+
+ /* warn the user (again) if there is no throughput data is available */
+ hard_iface->bat_v.flags &= ~BATADV_WARNING_DEFAULT;
+
+ if (batadv_is_wifi_hardif(hard_iface))
+ hard_iface->bat_v.flags &= ~BATADV_FULL_DUPLEX;
+
+ INIT_DELAYED_WORK(&hard_iface->bat_v.elp_wq,
+ batadv_v_elp_periodic_work);
+ batadv_v_elp_start_timer(hard_iface);
+ res = 0;
+
+out:
+ return res;
+}
+
+/**
+ * batadv_v_elp_iface_disable() - release ELP interface private resources
+ * @hard_iface: interface for which the resources have to be released
+ */
+void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface)
+{
+ cancel_delayed_work_sync(&hard_iface->bat_v.elp_wq);
+
+ dev_kfree_skb(hard_iface->bat_v.elp_skb);
+ hard_iface->bat_v.elp_skb = NULL;
+}
+
+/**
+ * batadv_v_elp_iface_activate() - update the ELP buffer belonging to the given
+ * hard-interface
+ * @primary_iface: the new primary interface
+ * @hard_iface: interface holding the to-be-updated buffer
+ */
+void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface,
+ struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_elp_packet *elp_packet;
+ struct sk_buff *skb;
+
+ if (!hard_iface->bat_v.elp_skb)
+ return;
+
+ skb = hard_iface->bat_v.elp_skb;
+ elp_packet = (struct batadv_elp_packet *)skb->data;
+ ether_addr_copy(elp_packet->orig,
+ primary_iface->net_dev->dev_addr);
+}
+
+/**
+ * batadv_v_elp_primary_iface_set() - change internal data to reflect the new
+ * primary interface
+ * @primary_iface: the new primary interface
+ */
+void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface)
+{
+ struct batadv_hard_iface *hard_iface;
+
+ /* update orig field of every elp iface belonging to this mesh */
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (primary_iface->soft_iface != hard_iface->soft_iface)
+ continue;
+
+ batadv_v_elp_iface_activate(primary_iface, hard_iface);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * batadv_v_elp_neigh_update() - update an ELP neighbour node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @neigh_addr: the neighbour interface address
+ * @if_incoming: the interface the packet was received through
+ * @elp_packet: the received ELP packet
+ *
+ * Updates the ELP neighbour node state with the data received within the new
+ * ELP packet.
+ */
+static void batadv_v_elp_neigh_update(struct batadv_priv *bat_priv,
+ u8 *neigh_addr,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_elp_packet *elp_packet)
+
+{
+ struct batadv_neigh_node *neigh;
+ struct batadv_orig_node *orig_neigh;
+ struct batadv_hardif_neigh_node *hardif_neigh;
+ s32 seqno_diff;
+ s32 elp_latest_seqno;
+
+ orig_neigh = batadv_v_ogm_orig_get(bat_priv, elp_packet->orig);
+ if (!orig_neigh)
+ return;
+
+ neigh = batadv_neigh_node_get_or_create(orig_neigh,
+ if_incoming, neigh_addr);
+ if (!neigh)
+ goto orig_free;
+
+ hardif_neigh = batadv_hardif_neigh_get(if_incoming, neigh_addr);
+ if (!hardif_neigh)
+ goto neigh_free;
+
+ elp_latest_seqno = hardif_neigh->bat_v.elp_latest_seqno;
+ seqno_diff = ntohl(elp_packet->seqno) - elp_latest_seqno;
+
+ /* known or older sequence numbers are ignored. However always adopt
+ * if the router seems to have been restarted.
+ */
+ if (seqno_diff < 1 && seqno_diff > -BATADV_ELP_MAX_AGE)
+ goto hardif_free;
+
+ neigh->last_seen = jiffies;
+ hardif_neigh->last_seen = jiffies;
+ hardif_neigh->bat_v.elp_latest_seqno = ntohl(elp_packet->seqno);
+ hardif_neigh->bat_v.elp_interval = ntohl(elp_packet->elp_interval);
+
+hardif_free:
+ batadv_hardif_neigh_put(hardif_neigh);
+neigh_free:
+ batadv_neigh_node_put(neigh);
+orig_free:
+ batadv_orig_node_put(orig_neigh);
+}
+
+/**
+ * batadv_v_elp_packet_recv() - main ELP packet handler
+ * @skb: the received packet
+ * @if_incoming: the interface this packet was received through
+ *
+ * Return: NET_RX_SUCCESS and consumes the skb if the packet was properly
+ * processed or NET_RX_DROP in case of failure.
+ */
+int batadv_v_elp_packet_recv(struct sk_buff *skb,
+ struct batadv_hard_iface *if_incoming)
+{
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_elp_packet *elp_packet;
+ struct batadv_hard_iface *primary_if;
+ struct ethhdr *ethhdr;
+ bool res;
+ int ret = NET_RX_DROP;
+
+ res = batadv_check_management_packet(skb, if_incoming, BATADV_ELP_HLEN);
+ if (!res)
+ goto free_skb;
+
+ ethhdr = eth_hdr(skb);
+ if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
+ goto free_skb;
+
+ /* did we receive a B.A.T.M.A.N. V ELP packet on an interface
+ * that does not have B.A.T.M.A.N. V ELP enabled ?
+ */
+ if (strcmp(bat_priv->algo_ops->name, "BATMAN_V") != 0)
+ goto free_skb;
+
+ elp_packet = (struct batadv_elp_packet *)skb->data;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Received ELP packet from %pM seqno %u ORIG: %pM\n",
+ ethhdr->h_source, ntohl(elp_packet->seqno),
+ elp_packet->orig);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto free_skb;
+
+ batadv_v_elp_neigh_update(bat_priv, ethhdr->h_source, if_incoming,
+ elp_packet);
+
+ ret = NET_RX_SUCCESS;
+ batadv_hardif_put(primary_if);
+
+free_skb:
+ if (ret == NET_RX_SUCCESS)
+ consume_skb(skb);
+ else
+ kfree_skb(skb);
+
+ return ret;
+}
diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h
new file mode 100644
index 0000000000..9e2740195f
--- /dev/null
+++ b/net/batman-adv/bat_v_elp.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Linus Lüssing, Marek Lindner
+ */
+
+#ifndef _NET_BATMAN_ADV_BAT_V_ELP_H_
+#define _NET_BATMAN_ADV_BAT_V_ELP_H_
+
+#include "main.h"
+
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+
+int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface);
+void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface);
+void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface,
+ struct batadv_hard_iface *hard_iface);
+void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface);
+int batadv_v_elp_packet_recv(struct sk_buff *skb,
+ struct batadv_hard_iface *if_incoming);
+void batadv_v_elp_throughput_metric_update(struct work_struct *work);
+
+#endif /* _NET_BATMAN_ADV_BAT_V_ELP_H_ */
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
new file mode 100644
index 0000000000..e503ee0d89
--- /dev/null
+++ b/net/batman-adv/bat_v_ogm.c
@@ -0,0 +1,1087 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Antonio Quartulli
+ */
+
+#include "bat_v_ogm.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/container_of.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/minmax.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
+
+#include "bat_algo.h"
+#include "hard-interface.h"
+#include "hash.h"
+#include "log.h"
+#include "originator.h"
+#include "routing.h"
+#include "send.h"
+#include "translation-table.h"
+#include "tvlv.h"
+
+/**
+ * batadv_v_ogm_orig_get() - retrieve and possibly create an originator node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the address of the originator
+ *
+ * Return: the orig_node corresponding to the specified address. If such an
+ * object does not exist, it is allocated here. In case of allocation failure
+ * returns NULL.
+ */
+struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv,
+ const u8 *addr)
+{
+ struct batadv_orig_node *orig_node;
+ int hash_added;
+
+ orig_node = batadv_orig_hash_find(bat_priv, addr);
+ if (orig_node)
+ return orig_node;
+
+ orig_node = batadv_orig_node_new(bat_priv, addr);
+ if (!orig_node)
+ return NULL;
+
+ kref_get(&orig_node->refcount);
+ hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
+ batadv_choose_orig, orig_node,
+ &orig_node->hash_entry);
+ if (hash_added != 0) {
+ /* remove refcnt for newly created orig_node and hash entry */
+ batadv_orig_node_put(orig_node);
+ batadv_orig_node_put(orig_node);
+ orig_node = NULL;
+ }
+
+ return orig_node;
+}
+
+/**
+ * batadv_v_ogm_start_queue_timer() - restart the OGM aggregation timer
+ * @hard_iface: the interface to use to send the OGM
+ */
+static void batadv_v_ogm_start_queue_timer(struct batadv_hard_iface *hard_iface)
+{
+ unsigned int msecs = BATADV_MAX_AGGREGATION_MS * 1000;
+
+ /* msecs * [0.9, 1.1] */
+ msecs += get_random_u32_below(msecs / 5) - (msecs / 10);
+ queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.aggr_wq,
+ msecs_to_jiffies(msecs / 1000));
+}
+
+/**
+ * batadv_v_ogm_start_timer() - restart the OGM sending timer
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
+{
+ unsigned long msecs;
+ /* this function may be invoked in different contexts (ogm rescheduling
+ * or hard_iface activation), but the work timer should not be reset
+ */
+ if (delayed_work_pending(&bat_priv->bat_v.ogm_wq))
+ return;
+
+ msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
+ msecs += get_random_u32_below(2 * BATADV_JITTER);
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->bat_v.ogm_wq,
+ msecs_to_jiffies(msecs));
+}
+
+/**
+ * batadv_v_ogm_send_to_if() - send a batman ogm using a given interface
+ * @skb: the OGM to send
+ * @hard_iface: the interface to use to send the OGM
+ */
+static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+
+ if (hard_iface->if_status != BATADV_IF_ACTIVE) {
+ kfree_skb(skb);
+ return;
+ }
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
+ batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
+ skb->len + ETH_HLEN);
+
+ batadv_send_broadcast_skb(skb, hard_iface);
+}
+
+/**
+ * batadv_v_ogm_len() - OGMv2 packet length
+ * @skb: the OGM to check
+ *
+ * Return: Length of the given OGMv2 packet, including tvlv length, excluding
+ * ethernet header length.
+ */
+static unsigned int batadv_v_ogm_len(struct sk_buff *skb)
+{
+ struct batadv_ogm2_packet *ogm_packet;
+
+ ogm_packet = (struct batadv_ogm2_packet *)skb->data;
+ return BATADV_OGM2_HLEN + ntohs(ogm_packet->tvlv_len);
+}
+
+/**
+ * batadv_v_ogm_queue_left() - check if given OGM still fits aggregation queue
+ * @skb: the OGM to check
+ * @hard_iface: the interface to use to send the OGM
+ *
+ * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
+ *
+ * Return: True, if the given OGMv2 packet still fits, false otherwise.
+ */
+static bool batadv_v_ogm_queue_left(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface)
+{
+ unsigned int max = min_t(unsigned int, hard_iface->net_dev->mtu,
+ BATADV_MAX_AGGREGATION_BYTES);
+ unsigned int ogm_len = batadv_v_ogm_len(skb);
+
+ lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
+
+ return hard_iface->bat_v.aggr_len + ogm_len <= max;
+}
+
+/**
+ * batadv_v_ogm_aggr_list_free - free all elements in an aggregation queue
+ * @hard_iface: the interface holding the aggregation queue
+ *
+ * Empties the OGMv2 aggregation queue and frees all the skbs it contains.
+ *
+ * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
+ */
+static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface)
+{
+ lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
+
+ __skb_queue_purge(&hard_iface->bat_v.aggr_list);
+ hard_iface->bat_v.aggr_len = 0;
+}
+
+/**
+ * batadv_v_ogm_aggr_send() - flush & send aggregation queue
+ * @hard_iface: the interface with the aggregation queue to flush
+ *
+ * Aggregates all OGMv2 packets currently in the aggregation queue into a
+ * single OGMv2 packet and transmits this aggregate.
+ *
+ * The aggregation queue is empty after this call.
+ *
+ * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
+ */
+static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
+{
+ unsigned int aggr_len = hard_iface->bat_v.aggr_len;
+ struct sk_buff *skb_aggr;
+ unsigned int ogm_len;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
+
+ if (!aggr_len)
+ return;
+
+ skb_aggr = dev_alloc_skb(aggr_len + ETH_HLEN + NET_IP_ALIGN);
+ if (!skb_aggr) {
+ batadv_v_ogm_aggr_list_free(hard_iface);
+ return;
+ }
+
+ skb_reserve(skb_aggr, ETH_HLEN + NET_IP_ALIGN);
+ skb_reset_network_header(skb_aggr);
+
+ while ((skb = __skb_dequeue(&hard_iface->bat_v.aggr_list))) {
+ hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb);
+
+ ogm_len = batadv_v_ogm_len(skb);
+ skb_put_data(skb_aggr, skb->data, ogm_len);
+
+ consume_skb(skb);
+ }
+
+ batadv_v_ogm_send_to_if(skb_aggr, hard_iface);
+}
+
+/**
+ * batadv_v_ogm_queue_on_if() - queue a batman ogm on a given interface
+ * @skb: the OGM to queue
+ * @hard_iface: the interface to queue the OGM on
+ */
+static void batadv_v_ogm_queue_on_if(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+
+ if (!atomic_read(&bat_priv->aggregated_ogms)) {
+ batadv_v_ogm_send_to_if(skb, hard_iface);
+ return;
+ }
+
+ spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
+ if (!batadv_v_ogm_queue_left(skb, hard_iface))
+ batadv_v_ogm_aggr_send(hard_iface);
+
+ hard_iface->bat_v.aggr_len += batadv_v_ogm_len(skb);
+ __skb_queue_tail(&hard_iface->bat_v.aggr_list, skb);
+ spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
+}
+
+/**
+ * batadv_v_ogm_send_softif() - periodic worker broadcasting the own OGM
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv)
+{
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_ogm2_packet *ogm_packet;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned char *ogm_buff;
+ int ogm_buff_len;
+ u16 tvlv_len = 0;
+ int ret;
+
+ lockdep_assert_held(&bat_priv->bat_v.ogm_buff_mutex);
+
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
+ goto out;
+
+ ogm_buff = bat_priv->bat_v.ogm_buff;
+ ogm_buff_len = bat_priv->bat_v.ogm_buff_len;
+ /* tt changes have to be committed before the tvlv data is
+ * appended as it may alter the tt tvlv container
+ */
+ batadv_tt_local_commit_changes(bat_priv);
+ tvlv_len = batadv_tvlv_container_ogm_append(bat_priv, &ogm_buff,
+ &ogm_buff_len,
+ BATADV_OGM2_HLEN);
+
+ bat_priv->bat_v.ogm_buff = ogm_buff;
+ bat_priv->bat_v.ogm_buff_len = ogm_buff_len;
+
+ skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + ogm_buff_len);
+ if (!skb)
+ goto reschedule;
+
+ skb_reserve(skb, ETH_HLEN);
+ skb_put_data(skb, ogm_buff, ogm_buff_len);
+
+ ogm_packet = (struct batadv_ogm2_packet *)skb->data;
+ ogm_packet->seqno = htonl(atomic_read(&bat_priv->bat_v.ogm_seqno));
+ atomic_inc(&bat_priv->bat_v.ogm_seqno);
+ ogm_packet->tvlv_len = htons(tvlv_len);
+
+ /* broadcast on every interface */
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
+ if (!kref_get_unless_zero(&hard_iface->refcount))
+ continue;
+
+ ret = batadv_hardif_no_broadcast(hard_iface, NULL, NULL);
+ if (ret) {
+ char *type;
+
+ switch (ret) {
+ case BATADV_HARDIF_BCAST_NORECIPIENT:
+ type = "no neighbor";
+ break;
+ case BATADV_HARDIF_BCAST_DUPFWD:
+ type = "single neighbor is source";
+ break;
+ case BATADV_HARDIF_BCAST_DUPORIG:
+ type = "single neighbor is originator";
+ break;
+ default:
+ type = "unknown";
+ }
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 from ourselves on %s suppressed: %s\n",
+ hard_iface->net_dev->name, type);
+
+ batadv_hardif_put(hard_iface);
+ continue;
+ }
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Sending own OGM2 packet (originator %pM, seqno %u, throughput %u, TTL %d) on interface %s [%pM]\n",
+ ogm_packet->orig, ntohl(ogm_packet->seqno),
+ ntohl(ogm_packet->throughput), ogm_packet->ttl,
+ hard_iface->net_dev->name,
+ hard_iface->net_dev->dev_addr);
+
+ /* this skb gets consumed by batadv_v_ogm_send_to_if() */
+ skb_tmp = skb_clone(skb, GFP_ATOMIC);
+ if (!skb_tmp) {
+ batadv_hardif_put(hard_iface);
+ break;
+ }
+
+ batadv_v_ogm_queue_on_if(skb_tmp, hard_iface);
+ batadv_hardif_put(hard_iface);
+ }
+ rcu_read_unlock();
+
+ consume_skb(skb);
+
+reschedule:
+ batadv_v_ogm_start_timer(bat_priv);
+out:
+ return;
+}
+
+/**
+ * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
+ * @work: work queue item
+ */
+static void batadv_v_ogm_send(struct work_struct *work)
+{
+ struct batadv_priv_bat_v *bat_v;
+ struct batadv_priv *bat_priv;
+
+ bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
+ bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
+
+ mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
+ batadv_v_ogm_send_softif(bat_priv);
+ mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
+}
+
+/**
+ * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface
+ * @work: work queue item
+ *
+ * Emits aggregated OGM messages in regular intervals.
+ */
+void batadv_v_ogm_aggr_work(struct work_struct *work)
+{
+ struct batadv_hard_iface_bat_v *batv;
+ struct batadv_hard_iface *hard_iface;
+
+ batv = container_of(work, struct batadv_hard_iface_bat_v, aggr_wq.work);
+ hard_iface = container_of(batv, struct batadv_hard_iface, bat_v);
+
+ spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
+ batadv_v_ogm_aggr_send(hard_iface);
+ spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
+
+ batadv_v_ogm_start_queue_timer(hard_iface);
+}
+
+/**
+ * batadv_v_ogm_iface_enable() - prepare an interface for B.A.T.M.A.N. V
+ * @hard_iface: the interface to prepare
+ *
+ * Takes care of scheduling its own OGM sending routine for this interface.
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+
+ batadv_v_ogm_start_queue_timer(hard_iface);
+ batadv_v_ogm_start_timer(bat_priv);
+
+ return 0;
+}
+
+/**
+ * batadv_v_ogm_iface_disable() - release OGM interface private resources
+ * @hard_iface: interface for which the resources have to be released
+ */
+void batadv_v_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
+{
+ cancel_delayed_work_sync(&hard_iface->bat_v.aggr_wq);
+
+ spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
+ batadv_v_ogm_aggr_list_free(hard_iface);
+ spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
+}
+
+/**
+ * batadv_v_ogm_primary_iface_set() - set a new primary interface
+ * @primary_iface: the new primary interface
+ */
+void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface);
+ struct batadv_ogm2_packet *ogm_packet;
+
+ mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
+ if (!bat_priv->bat_v.ogm_buff)
+ goto unlock;
+
+ ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff;
+ ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr);
+
+unlock:
+ mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
+}
+
+/**
+ * batadv_v_forward_penalty() - apply a penalty to the throughput metric
+ * forwarded with B.A.T.M.A.N. V OGMs
+ * @bat_priv: the bat priv with all the soft interface information
+ * @if_incoming: the interface where the OGM has been received
+ * @if_outgoing: the interface where the OGM has to be forwarded to
+ * @throughput: the current throughput
+ *
+ * Apply a penalty on the current throughput metric value based on the
+ * characteristic of the interface where the OGM has been received.
+ *
+ * Initially the per hardif hop penalty is applied to the throughput. After
+ * that the return value is then computed as follows:
+ * - throughput * 50% if the incoming and outgoing interface are the
+ * same WiFi interface and the throughput is above
+ * 1MBit/s
+ * - throughput if the outgoing interface is the default
+ * interface (i.e. this OGM is processed for the
+ * internal table and not forwarded)
+ * - throughput * node hop penalty otherwise
+ *
+ * Return: the penalised throughput metric.
+ */
+static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing,
+ u32 throughput)
+{
+ int if_hop_penalty = atomic_read(&if_incoming->hop_penalty);
+ int hop_penalty = atomic_read(&bat_priv->hop_penalty);
+ int hop_penalty_max = BATADV_TQ_MAX_VALUE;
+
+ /* Apply per hardif hop penalty */
+ throughput = throughput * (hop_penalty_max - if_hop_penalty) /
+ hop_penalty_max;
+
+ /* Don't apply hop penalty in default originator table. */
+ if (if_outgoing == BATADV_IF_DEFAULT)
+ return throughput;
+
+ /* Forwarding on the same WiFi interface cuts the throughput in half
+ * due to the store & forward characteristics of WIFI.
+ * Very low throughput values are the exception.
+ */
+ if (throughput > 10 &&
+ if_incoming == if_outgoing &&
+ !(if_incoming->bat_v.flags & BATADV_FULL_DUPLEX))
+ return throughput / 2;
+
+ /* hop penalty of 255 equals 100% */
+ return throughput * (hop_penalty_max - hop_penalty) / hop_penalty_max;
+}
+
+/**
+ * batadv_v_ogm_forward() - check conditions and forward an OGM to the given
+ * outgoing interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ogm_received: previously received OGM to be forwarded
+ * @orig_node: the originator which has been updated
+ * @neigh_node: the neigh_node through with the OGM has been received
+ * @if_incoming: the interface on which this OGM was received on
+ * @if_outgoing: the interface to which the OGM has to be forwarded to
+ *
+ * Forward an OGM to an interface after having altered the throughput metric and
+ * the TTL value contained in it. The original OGM isn't modified.
+ */
+static void batadv_v_ogm_forward(struct batadv_priv *bat_priv,
+ const struct batadv_ogm2_packet *ogm_received,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
+ struct batadv_orig_ifinfo *orig_ifinfo = NULL;
+ struct batadv_neigh_node *router = NULL;
+ struct batadv_ogm2_packet *ogm_forward;
+ unsigned char *skb_buff;
+ struct sk_buff *skb;
+ size_t packet_len;
+ u16 tvlv_len;
+
+ /* only forward for specific interfaces, not for the default one. */
+ if (if_outgoing == BATADV_IF_DEFAULT)
+ goto out;
+
+ orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
+ if (!orig_ifinfo)
+ goto out;
+
+ /* acquire possibly updated router */
+ router = batadv_orig_router_get(orig_node, if_outgoing);
+
+ /* strict rule: forward packets coming from the best next hop only */
+ if (neigh_node != router)
+ goto out;
+
+ /* don't forward the same seqno twice on one interface */
+ if (orig_ifinfo->last_seqno_forwarded == ntohl(ogm_received->seqno))
+ goto out;
+
+ orig_ifinfo->last_seqno_forwarded = ntohl(ogm_received->seqno);
+
+ if (ogm_received->ttl <= 1) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
+ goto out;
+ }
+
+ neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
+ if (!neigh_ifinfo)
+ goto out;
+
+ tvlv_len = ntohs(ogm_received->tvlv_len);
+
+ packet_len = BATADV_OGM2_HLEN + tvlv_len;
+ skb = netdev_alloc_skb_ip_align(if_outgoing->net_dev,
+ ETH_HLEN + packet_len);
+ if (!skb)
+ goto out;
+
+ skb_reserve(skb, ETH_HLEN);
+ skb_buff = skb_put_data(skb, ogm_received, packet_len);
+
+ /* apply forward penalty */
+ ogm_forward = (struct batadv_ogm2_packet *)skb_buff;
+ ogm_forward->throughput = htonl(neigh_ifinfo->bat_v.throughput);
+ ogm_forward->ttl--;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Forwarding OGM2 packet on %s: throughput %u, ttl %u, received via %s\n",
+ if_outgoing->net_dev->name, ntohl(ogm_forward->throughput),
+ ogm_forward->ttl, if_incoming->net_dev->name);
+
+ batadv_v_ogm_queue_on_if(skb, if_outgoing);
+
+out:
+ batadv_orig_ifinfo_put(orig_ifinfo);
+ batadv_neigh_node_put(router);
+ batadv_neigh_ifinfo_put(neigh_ifinfo);
+}
+
+/**
+ * batadv_v_ogm_metric_update() - update route metric based on OGM
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ogm2: OGM2 structure
+ * @orig_node: Originator structure for which the OGM has been received
+ * @neigh_node: the neigh_node through with the OGM has been received
+ * @if_incoming: the interface where this packet was received
+ * @if_outgoing: the interface for which the packet should be considered
+ *
+ * Return:
+ * 1 if the OGM is new,
+ * 0 if it is not new but valid,
+ * <0 on error (e.g. old OGM)
+ */
+static int batadv_v_ogm_metric_update(struct batadv_priv *bat_priv,
+ const struct batadv_ogm2_packet *ogm2,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
+ bool protection_started = false;
+ int ret = -EINVAL;
+ u32 path_throughput;
+ s32 seq_diff;
+
+ orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
+ if (!orig_ifinfo)
+ goto out;
+
+ seq_diff = ntohl(ogm2->seqno) - orig_ifinfo->last_real_seqno;
+
+ if (!hlist_empty(&orig_node->neigh_list) &&
+ batadv_window_protected(bat_priv, seq_diff,
+ BATADV_OGM_MAX_AGE,
+ &orig_ifinfo->batman_seqno_reset,
+ &protection_started)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: packet within window protection time from %pM\n",
+ ogm2->orig);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Last reset: %ld, %ld\n",
+ orig_ifinfo->batman_seqno_reset, jiffies);
+ goto out;
+ }
+
+ /* drop packets with old seqnos, however accept the first packet after
+ * a host has been rebooted.
+ */
+ if (seq_diff < 0 && !protection_started)
+ goto out;
+
+ neigh_node->last_seen = jiffies;
+
+ orig_node->last_seen = jiffies;
+
+ orig_ifinfo->last_real_seqno = ntohl(ogm2->seqno);
+ orig_ifinfo->last_ttl = ogm2->ttl;
+
+ neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
+ if (!neigh_ifinfo)
+ goto out;
+
+ path_throughput = batadv_v_forward_penalty(bat_priv, if_incoming,
+ if_outgoing,
+ ntohl(ogm2->throughput));
+ neigh_ifinfo->bat_v.throughput = path_throughput;
+ neigh_ifinfo->bat_v.last_seqno = ntohl(ogm2->seqno);
+ neigh_ifinfo->last_ttl = ogm2->ttl;
+
+ if (seq_diff > 0 || protection_started)
+ ret = 1;
+ else
+ ret = 0;
+out:
+ batadv_orig_ifinfo_put(orig_ifinfo);
+ batadv_neigh_ifinfo_put(neigh_ifinfo);
+
+ return ret;
+}
+
+/**
+ * batadv_v_ogm_route_update() - update routes based on OGM
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ethhdr: the Ethernet header of the OGM2
+ * @ogm2: OGM2 structure
+ * @orig_node: Originator structure for which the OGM has been received
+ * @neigh_node: the neigh_node through with the OGM has been received
+ * @if_incoming: the interface where this packet was received
+ * @if_outgoing: the interface for which the packet should be considered
+ *
+ * Return: true if the packet should be forwarded, false otherwise
+ */
+static bool batadv_v_ogm_route_update(struct batadv_priv *bat_priv,
+ const struct ethhdr *ethhdr,
+ const struct batadv_ogm2_packet *ogm2,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_neigh_node *router = NULL;
+ struct batadv_orig_node *orig_neigh_node;
+ struct batadv_neigh_node *orig_neigh_router = NULL;
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL, *neigh_ifinfo = NULL;
+ u32 router_throughput, neigh_throughput;
+ u32 router_last_seqno;
+ u32 neigh_last_seqno;
+ s32 neigh_seq_diff;
+ bool forward = false;
+
+ orig_neigh_node = batadv_v_ogm_orig_get(bat_priv, ethhdr->h_source);
+ if (!orig_neigh_node)
+ goto out;
+
+ orig_neigh_router = batadv_orig_router_get(orig_neigh_node,
+ if_outgoing);
+
+ /* drop packet if sender is not a direct neighbor and if we
+ * don't route towards it
+ */
+ router = batadv_orig_router_get(orig_node, if_outgoing);
+ if (router && router->orig_node != orig_node && !orig_neigh_router) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: OGM via unknown neighbor!\n");
+ goto out;
+ }
+
+ /* Mark the OGM to be considered for forwarding, and update routes
+ * if needed.
+ */
+ forward = true;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Searching and updating originator entry of received packet\n");
+
+ /* if this neighbor already is our next hop there is nothing
+ * to change
+ */
+ if (router == neigh_node)
+ goto out;
+
+ /* don't consider neighbours with worse throughput.
+ * also switch route if this seqno is BATADV_V_MAX_ORIGDIFF newer than
+ * the last received seqno from our best next hop.
+ */
+ if (router) {
+ router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing);
+ neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
+
+ /* if these are not allocated, something is wrong. */
+ if (!router_ifinfo || !neigh_ifinfo)
+ goto out;
+
+ neigh_last_seqno = neigh_ifinfo->bat_v.last_seqno;
+ router_last_seqno = router_ifinfo->bat_v.last_seqno;
+ neigh_seq_diff = neigh_last_seqno - router_last_seqno;
+ router_throughput = router_ifinfo->bat_v.throughput;
+ neigh_throughput = neigh_ifinfo->bat_v.throughput;
+
+ if (neigh_seq_diff < BATADV_OGM_MAX_ORIGDIFF &&
+ router_throughput >= neigh_throughput)
+ goto out;
+ }
+
+ batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node);
+out:
+ batadv_neigh_node_put(router);
+ batadv_neigh_node_put(orig_neigh_router);
+ batadv_orig_node_put(orig_neigh_node);
+ batadv_neigh_ifinfo_put(router_ifinfo);
+ batadv_neigh_ifinfo_put(neigh_ifinfo);
+
+ return forward;
+}
+
+/**
+ * batadv_v_ogm_process_per_outif() - process a batman v OGM for an outgoing if
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ethhdr: the Ethernet header of the OGM2
+ * @ogm2: OGM2 structure
+ * @orig_node: Originator structure for which the OGM has been received
+ * @neigh_node: the neigh_node through with the OGM has been received
+ * @if_incoming: the interface where this packet was received
+ * @if_outgoing: the interface for which the packet should be considered
+ */
+static void
+batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
+ const struct ethhdr *ethhdr,
+ const struct batadv_ogm2_packet *ogm2,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing)
+{
+ int seqno_age;
+ bool forward;
+
+ /* first, update the metric with according sanity checks */
+ seqno_age = batadv_v_ogm_metric_update(bat_priv, ogm2, orig_node,
+ neigh_node, if_incoming,
+ if_outgoing);
+
+ /* outdated sequence numbers are to be discarded */
+ if (seqno_age < 0)
+ return;
+
+ /* only unknown & newer OGMs contain TVLVs we are interested in */
+ if (seqno_age > 0 && if_outgoing == BATADV_IF_DEFAULT)
+ batadv_tvlv_containers_process(bat_priv, BATADV_OGM2, orig_node,
+ NULL,
+ (unsigned char *)(ogm2 + 1),
+ ntohs(ogm2->tvlv_len));
+
+ /* if the metric update went through, update routes if needed */
+ forward = batadv_v_ogm_route_update(bat_priv, ethhdr, ogm2, orig_node,
+ neigh_node, if_incoming,
+ if_outgoing);
+
+ /* if the routes have been processed correctly, check and forward */
+ if (forward)
+ batadv_v_ogm_forward(bat_priv, ogm2, orig_node, neigh_node,
+ if_incoming, if_outgoing);
+}
+
+/**
+ * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
+ * @buff_pos: current position in the skb
+ * @packet_len: total length of the skb
+ * @ogm2_packet: potential OGM2 in buffer
+ *
+ * Return: true if there is enough space for another OGM, false otherwise.
+ */
+static bool
+batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
+ const struct batadv_ogm2_packet *ogm2_packet)
+{
+ int next_buff_pos = 0;
+
+ /* check if there is enough space for the header */
+ next_buff_pos += buff_pos + sizeof(*ogm2_packet);
+ if (next_buff_pos > packet_len)
+ return false;
+
+ /* check if there is enough space for the optional TVLV */
+ next_buff_pos += ntohs(ogm2_packet->tvlv_len);
+
+ return (next_buff_pos <= packet_len) &&
+ (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
+}
+
+/**
+ * batadv_v_ogm_process() - process an incoming batman v OGM
+ * @skb: the skb containing the OGM
+ * @ogm_offset: offset to the OGM which should be processed (for aggregates)
+ * @if_incoming: the interface where this packet was received
+ */
+static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
+ struct batadv_hard_iface *if_incoming)
+{
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct ethhdr *ethhdr;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_hardif_neigh_node *hardif_neigh = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_ogm2_packet *ogm_packet;
+ u32 ogm_throughput, link_throughput, path_throughput;
+ int ret;
+
+ ethhdr = eth_hdr(skb);
+ ogm_packet = (struct batadv_ogm2_packet *)(skb->data + ogm_offset);
+
+ ogm_throughput = ntohl(ogm_packet->throughput);
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Received OGM2 packet via NB: %pM, IF: %s [%pM] (from OG: %pM, seqno %u, throughput %u, TTL %u, V %u, tvlv_len %u)\n",
+ ethhdr->h_source, if_incoming->net_dev->name,
+ if_incoming->net_dev->dev_addr, ogm_packet->orig,
+ ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
+ ogm_packet->version, ntohs(ogm_packet->tvlv_len));
+
+ if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet from ourself\n");
+ return;
+ }
+
+ /* If the throughput metric is 0, immediately drop the packet. No need
+ * to create orig_node / neigh_node for an unusable route.
+ */
+ if (ogm_throughput == 0) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet with throughput metric of 0\n");
+ return;
+ }
+
+ /* require ELP packets be to received from this neighbor first */
+ hardif_neigh = batadv_hardif_neigh_get(if_incoming, ethhdr->h_source);
+ if (!hardif_neigh) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: OGM via unknown neighbor!\n");
+ goto out;
+ }
+
+ orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig);
+ if (!orig_node)
+ goto out;
+
+ neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming,
+ ethhdr->h_source);
+ if (!neigh_node)
+ goto out;
+
+ /* Update the received throughput metric to match the link
+ * characteristic:
+ * - If this OGM traveled one hop so far (emitted by single hop
+ * neighbor) the path throughput metric equals the link throughput.
+ * - For OGMs traversing more than hop the path throughput metric is
+ * the smaller of the path throughput and the link throughput.
+ */
+ link_throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput);
+ path_throughput = min_t(u32, link_throughput, ogm_throughput);
+ ogm_packet->throughput = htonl(path_throughput);
+
+ batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet, orig_node,
+ neigh_node, if_incoming,
+ BATADV_IF_DEFAULT);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
+ continue;
+
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
+ if (!kref_get_unless_zero(&hard_iface->refcount))
+ continue;
+
+ ret = batadv_hardif_no_broadcast(hard_iface,
+ ogm_packet->orig,
+ hardif_neigh->orig);
+
+ if (ret) {
+ char *type;
+
+ switch (ret) {
+ case BATADV_HARDIF_BCAST_NORECIPIENT:
+ type = "no neighbor";
+ break;
+ case BATADV_HARDIF_BCAST_DUPFWD:
+ type = "single neighbor is source";
+ break;
+ case BATADV_HARDIF_BCAST_DUPORIG:
+ type = "single neighbor is originator";
+ break;
+ default:
+ type = "unknown";
+ }
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 packet from %pM on %s suppressed: %s\n",
+ ogm_packet->orig, hard_iface->net_dev->name,
+ type);
+
+ batadv_hardif_put(hard_iface);
+ continue;
+ }
+
+ batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet,
+ orig_node, neigh_node,
+ if_incoming, hard_iface);
+
+ batadv_hardif_put(hard_iface);
+ }
+ rcu_read_unlock();
+out:
+ batadv_orig_node_put(orig_node);
+ batadv_neigh_node_put(neigh_node);
+ batadv_hardif_neigh_put(hardif_neigh);
+}
+
+/**
+ * batadv_v_ogm_packet_recv() - OGM2 receiving handler
+ * @skb: the received OGM
+ * @if_incoming: the interface where this OGM has been received
+ *
+ * Return: NET_RX_SUCCESS and consume the skb on success or returns NET_RX_DROP
+ * (without freeing the skb) on failure
+ */
+int batadv_v_ogm_packet_recv(struct sk_buff *skb,
+ struct batadv_hard_iface *if_incoming)
+{
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_ogm2_packet *ogm_packet;
+ struct ethhdr *ethhdr;
+ int ogm_offset;
+ u8 *packet_pos;
+ int ret = NET_RX_DROP;
+
+ /* did we receive a OGM2 packet on an interface that does not have
+ * B.A.T.M.A.N. V enabled ?
+ */
+ if (strcmp(bat_priv->algo_ops->name, "BATMAN_V") != 0)
+ goto free_skb;
+
+ if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
+ goto free_skb;
+
+ ethhdr = eth_hdr(skb);
+ if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
+ goto free_skb;
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
+ skb->len + ETH_HLEN);
+
+ ogm_offset = 0;
+ ogm_packet = (struct batadv_ogm2_packet *)skb->data;
+
+ while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
+ ogm_packet)) {
+ batadv_v_ogm_process(skb, ogm_offset, if_incoming);
+
+ ogm_offset += BATADV_OGM2_HLEN;
+ ogm_offset += ntohs(ogm_packet->tvlv_len);
+
+ packet_pos = skb->data + ogm_offset;
+ ogm_packet = (struct batadv_ogm2_packet *)packet_pos;
+ }
+
+ ret = NET_RX_SUCCESS;
+
+free_skb:
+ if (ret == NET_RX_SUCCESS)
+ consume_skb(skb);
+ else
+ kfree_skb(skb);
+
+ return ret;
+}
+
+/**
+ * batadv_v_ogm_init() - initialise the OGM2 engine
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or a negative error code in case of failure
+ */
+int batadv_v_ogm_init(struct batadv_priv *bat_priv)
+{
+ struct batadv_ogm2_packet *ogm_packet;
+ unsigned char *ogm_buff;
+ u32 random_seqno;
+
+ bat_priv->bat_v.ogm_buff_len = BATADV_OGM2_HLEN;
+ ogm_buff = kzalloc(bat_priv->bat_v.ogm_buff_len, GFP_ATOMIC);
+ if (!ogm_buff)
+ return -ENOMEM;
+
+ bat_priv->bat_v.ogm_buff = ogm_buff;
+ ogm_packet = (struct batadv_ogm2_packet *)ogm_buff;
+ ogm_packet->packet_type = BATADV_OGM2;
+ ogm_packet->version = BATADV_COMPAT_VERSION;
+ ogm_packet->ttl = BATADV_TTL;
+ ogm_packet->flags = BATADV_NO_FLAGS;
+ ogm_packet->throughput = htonl(BATADV_THROUGHPUT_MAX_VALUE);
+
+ /* randomize initial seqno to avoid collision */
+ get_random_bytes(&random_seqno, sizeof(random_seqno));
+ atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno);
+ INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send);
+
+ mutex_init(&bat_priv->bat_v.ogm_buff_mutex);
+
+ return 0;
+}
+
+/**
+ * batadv_v_ogm_free() - free OGM private resources
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_v_ogm_free(struct batadv_priv *bat_priv)
+{
+ cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq);
+
+ mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
+
+ kfree(bat_priv->bat_v.ogm_buff);
+ bat_priv->bat_v.ogm_buff = NULL;
+ bat_priv->bat_v.ogm_buff_len = 0;
+
+ mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
+}
diff --git a/net/batman-adv/bat_v_ogm.h b/net/batman-adv/bat_v_ogm.h
new file mode 100644
index 0000000000..edeffedeca
--- /dev/null
+++ b/net/batman-adv/bat_v_ogm.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Antonio Quartulli
+ */
+
+#ifndef _NET_BATMAN_ADV_BAT_V_OGM_H_
+#define _NET_BATMAN_ADV_BAT_V_OGM_H_
+
+#include "main.h"
+
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+int batadv_v_ogm_init(struct batadv_priv *bat_priv);
+void batadv_v_ogm_free(struct batadv_priv *bat_priv);
+void batadv_v_ogm_aggr_work(struct work_struct *work);
+int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface);
+void batadv_v_ogm_iface_disable(struct batadv_hard_iface *hard_iface);
+struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv,
+ const u8 *addr);
+void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface);
+int batadv_v_ogm_packet_recv(struct sk_buff *skb,
+ struct batadv_hard_iface *if_incoming);
+
+#endif /* _NET_BATMAN_ADV_BAT_V_OGM_H_ */
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
new file mode 100644
index 0000000000..649c41f393
--- /dev/null
+++ b/net/batman-adv/bitarray.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich, Marek Lindner
+ */
+
+#include "bitarray.h"
+#include "main.h"
+
+#include <linux/bitmap.h>
+
+#include "log.h"
+
+/* shift the packet array by n places. */
+static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
+{
+ if (n <= 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
+ return;
+
+ bitmap_shift_left(seq_bits, seq_bits, n, BATADV_TQ_LOCAL_WINDOW_SIZE);
+}
+
+/**
+ * batadv_bit_get_packet() - receive and process one packet within the sequence
+ * number window
+ * @priv: the bat priv with all the soft interface information
+ * @seq_bits: pointer to the sequence number receive packet
+ * @seq_num_diff: difference between the current/received sequence number and
+ * the last sequence number
+ * @set_mark: whether this packet should be marked in seq_bits
+ *
+ * Return: true if the window was moved (either new or very old),
+ * false if the window was not moved/shifted.
+ */
+bool batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
+ s32 seq_num_diff, int set_mark)
+{
+ struct batadv_priv *bat_priv = priv;
+
+ /* sequence number is slightly older. We already got a sequence number
+ * higher than this one, so we just mark it.
+ */
+ if (seq_num_diff <= 0 && seq_num_diff > -BATADV_TQ_LOCAL_WINDOW_SIZE) {
+ if (set_mark)
+ batadv_set_bit(seq_bits, -seq_num_diff);
+ return false;
+ }
+
+ /* sequence number is slightly newer, so we shift the window and
+ * set the mark if required
+ */
+ if (seq_num_diff > 0 && seq_num_diff < BATADV_TQ_LOCAL_WINDOW_SIZE) {
+ batadv_bitmap_shift_left(seq_bits, seq_num_diff);
+
+ if (set_mark)
+ batadv_set_bit(seq_bits, 0);
+ return true;
+ }
+
+ /* sequence number is much newer, probably missed a lot of packets */
+ if (seq_num_diff >= BATADV_TQ_LOCAL_WINDOW_SIZE &&
+ seq_num_diff < BATADV_EXPECTED_SEQNO_RANGE) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "We missed a lot of packets (%i) !\n",
+ seq_num_diff - 1);
+ bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+ if (set_mark)
+ batadv_set_bit(seq_bits, 0);
+ return true;
+ }
+
+ /* received a much older packet. The other host either restarted
+ * or the old packet got delayed somewhere in the network. The
+ * packet should be dropped without calling this function if the
+ * seqno window is protected.
+ *
+ * seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE
+ * or
+ * seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE
+ */
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Other host probably restarted!\n");
+
+ bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+ if (set_mark)
+ batadv_set_bit(seq_bits, 0);
+
+ return true;
+}
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
new file mode 100644
index 0000000000..37f7ae413b
--- /dev/null
+++ b/net/batman-adv/bitarray.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich, Marek Lindner
+ */
+
+#ifndef _NET_BATMAN_ADV_BITARRAY_H_
+#define _NET_BATMAN_ADV_BITARRAY_H_
+
+#include "main.h"
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+/**
+ * batadv_test_bit() - check if bit is set in the current window
+ *
+ * @seq_bits: pointer to the sequence number receive packet
+ * @last_seqno: latest sequence number in seq_bits
+ * @curr_seqno: sequence number to test for
+ *
+ * Return: true if the corresponding bit in the given seq_bits indicates true
+ * and curr_seqno is within range of last_seqno. Otherwise returns false.
+ */
+static inline bool batadv_test_bit(const unsigned long *seq_bits,
+ u32 last_seqno, u32 curr_seqno)
+{
+ s32 diff;
+
+ diff = last_seqno - curr_seqno;
+ if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
+ return false;
+ return test_bit(diff, seq_bits) != 0;
+}
+
+/**
+ * batadv_set_bit() - Turn corresponding bit on, so we can remember that we got
+ * the packet
+ * @seq_bits: bitmap of the packet receive window
+ * @n: relative sequence number of newly received packet
+ */
+static inline void batadv_set_bit(unsigned long *seq_bits, s32 n)
+{
+ /* if too old, just drop it */
+ if (n < 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
+ return;
+
+ set_bit(n, seq_bits); /* turn the position on */
+}
+
+bool batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
+ s32 seq_num_diff, int set_mark);
+
+#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
new file mode 100644
index 0000000000..37ce6cfb35
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -0,0 +1,2502 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich
+ */
+
+#include "bridge_loop_avoidance.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/compiler.h>
+#include <linux/container_of.h>
+#include <linux/crc16.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <net/arp.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "hard-interface.h"
+#include "hash.h"
+#include "log.h"
+#include "netlink.h"
+#include "originator.h"
+#include "soft-interface.h"
+#include "translation-table.h"
+
+static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
+
+static void batadv_bla_periodic_work(struct work_struct *work);
+static void
+batadv_bla_send_announce(struct batadv_priv *bat_priv,
+ struct batadv_bla_backbone_gw *backbone_gw);
+
+/**
+ * batadv_choose_claim() - choose the right bucket for a claim.
+ * @data: data to hash
+ * @size: size of the hash table
+ *
+ * Return: the hash index of the claim
+ */
+static inline u32 batadv_choose_claim(const void *data, u32 size)
+{
+ const struct batadv_bla_claim *claim = data;
+ u32 hash = 0;
+
+ hash = jhash(&claim->addr, sizeof(claim->addr), hash);
+ hash = jhash(&claim->vid, sizeof(claim->vid), hash);
+
+ return hash % size;
+}
+
+/**
+ * batadv_choose_backbone_gw() - choose the right bucket for a backbone gateway.
+ * @data: data to hash
+ * @size: size of the hash table
+ *
+ * Return: the hash index of the backbone gateway
+ */
+static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
+{
+ const struct batadv_bla_backbone_gw *gw;
+ u32 hash = 0;
+
+ gw = data;
+ hash = jhash(&gw->orig, sizeof(gw->orig), hash);
+ hash = jhash(&gw->vid, sizeof(gw->vid), hash);
+
+ return hash % size;
+}
+
+/**
+ * batadv_compare_backbone_gw() - compare address and vid of two backbone gws
+ * @node: list node of the first entry to compare
+ * @data2: pointer to the second backbone gateway
+ *
+ * Return: true if the backbones have the same data, false otherwise
+ */
+static bool batadv_compare_backbone_gw(const struct hlist_node *node,
+ const void *data2)
+{
+ const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
+ hash_entry);
+ const struct batadv_bla_backbone_gw *gw1 = data1;
+ const struct batadv_bla_backbone_gw *gw2 = data2;
+
+ if (!batadv_compare_eth(gw1->orig, gw2->orig))
+ return false;
+
+ if (gw1->vid != gw2->vid)
+ return false;
+
+ return true;
+}
+
+/**
+ * batadv_compare_claim() - compare address and vid of two claims
+ * @node: list node of the first entry to compare
+ * @data2: pointer to the second claims
+ *
+ * Return: true if the claim have the same data, 0 otherwise
+ */
+static bool batadv_compare_claim(const struct hlist_node *node,
+ const void *data2)
+{
+ const void *data1 = container_of(node, struct batadv_bla_claim,
+ hash_entry);
+ const struct batadv_bla_claim *cl1 = data1;
+ const struct batadv_bla_claim *cl2 = data2;
+
+ if (!batadv_compare_eth(cl1->addr, cl2->addr))
+ return false;
+
+ if (cl1->vid != cl2->vid)
+ return false;
+
+ return true;
+}
+
+/**
+ * batadv_backbone_gw_release() - release backbone gw from lists and queue for
+ * free after rcu grace period
+ * @ref: kref pointer of the backbone gw
+ */
+static void batadv_backbone_gw_release(struct kref *ref)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+
+ backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
+ refcount);
+
+ kfree_rcu(backbone_gw, rcu);
+}
+
+/**
+ * batadv_backbone_gw_put() - decrement the backbone gw refcounter and possibly
+ * release it
+ * @backbone_gw: backbone gateway to be free'd
+ */
+static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
+{
+ if (!backbone_gw)
+ return;
+
+ kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
+}
+
+/**
+ * batadv_claim_release() - release claim from lists and queue for free after
+ * rcu grace period
+ * @ref: kref pointer of the claim
+ */
+static void batadv_claim_release(struct kref *ref)
+{
+ struct batadv_bla_claim *claim;
+ struct batadv_bla_backbone_gw *old_backbone_gw;
+
+ claim = container_of(ref, struct batadv_bla_claim, refcount);
+
+ spin_lock_bh(&claim->backbone_lock);
+ old_backbone_gw = claim->backbone_gw;
+ claim->backbone_gw = NULL;
+ spin_unlock_bh(&claim->backbone_lock);
+
+ spin_lock_bh(&old_backbone_gw->crc_lock);
+ old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+ spin_unlock_bh(&old_backbone_gw->crc_lock);
+
+ batadv_backbone_gw_put(old_backbone_gw);
+
+ kfree_rcu(claim, rcu);
+}
+
+/**
+ * batadv_claim_put() - decrement the claim refcounter and possibly release it
+ * @claim: claim to be free'd
+ */
+static void batadv_claim_put(struct batadv_bla_claim *claim)
+{
+ if (!claim)
+ return;
+
+ kref_put(&claim->refcount, batadv_claim_release);
+}
+
+/**
+ * batadv_claim_hash_find() - looks for a claim in the claim hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @data: search data (may be local/static data)
+ *
+ * Return: claim if found or NULL otherwise.
+ */
+static struct batadv_bla_claim *
+batadv_claim_hash_find(struct batadv_priv *bat_priv,
+ struct batadv_bla_claim *data)
+{
+ struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
+ struct hlist_head *head;
+ struct batadv_bla_claim *claim;
+ struct batadv_bla_claim *claim_tmp = NULL;
+ int index;
+
+ if (!hash)
+ return NULL;
+
+ index = batadv_choose_claim(data, hash->size);
+ head = &hash->table[index];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(claim, head, hash_entry) {
+ if (!batadv_compare_claim(&claim->hash_entry, data))
+ continue;
+
+ if (!kref_get_unless_zero(&claim->refcount))
+ continue;
+
+ claim_tmp = claim;
+ break;
+ }
+ rcu_read_unlock();
+
+ return claim_tmp;
+}
+
+/**
+ * batadv_backbone_hash_find() - looks for a backbone gateway in the hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the address of the originator
+ * @vid: the VLAN ID
+ *
+ * Return: backbone gateway if found or NULL otherwise
+ */
+static struct batadv_bla_backbone_gw *
+batadv_backbone_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
+ unsigned short vid)
+{
+ struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
+ struct hlist_head *head;
+ struct batadv_bla_backbone_gw search_entry, *backbone_gw;
+ struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
+ int index;
+
+ if (!hash)
+ return NULL;
+
+ ether_addr_copy(search_entry.orig, addr);
+ search_entry.vid = vid;
+
+ index = batadv_choose_backbone_gw(&search_entry, hash->size);
+ head = &hash->table[index];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
+ if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
+ &search_entry))
+ continue;
+
+ if (!kref_get_unless_zero(&backbone_gw->refcount))
+ continue;
+
+ backbone_gw_tmp = backbone_gw;
+ break;
+ }
+ rcu_read_unlock();
+
+ return backbone_gw_tmp;
+}
+
+/**
+ * batadv_bla_del_backbone_claims() - delete all claims for a backbone
+ * @backbone_gw: backbone gateway where the claims should be removed
+ */
+static void
+batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
+{
+ struct batadv_hashtable *hash;
+ struct hlist_node *node_tmp;
+ struct hlist_head *head;
+ struct batadv_bla_claim *claim;
+ int i;
+ spinlock_t *list_lock; /* protects write access to the hash lists */
+
+ hash = backbone_gw->bat_priv->bla.claim_hash;
+ if (!hash)
+ return;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+ list_lock = &hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+ hlist_for_each_entry_safe(claim, node_tmp,
+ head, hash_entry) {
+ if (claim->backbone_gw != backbone_gw)
+ continue;
+
+ batadv_claim_put(claim);
+ hlist_del_rcu(&claim->hash_entry);
+ }
+ spin_unlock_bh(list_lock);
+ }
+
+ /* all claims gone, initialize CRC */
+ spin_lock_bh(&backbone_gw->crc_lock);
+ backbone_gw->crc = BATADV_BLA_CRC_INIT;
+ spin_unlock_bh(&backbone_gw->crc_lock);
+}
+
+/**
+ * batadv_bla_send_claim() - sends a claim frame according to the provided info
+ * @bat_priv: the bat priv with all the soft interface information
+ * @mac: the mac address to be announced within the claim
+ * @vid: the VLAN ID
+ * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
+ */
+static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac,
+ unsigned short vid, int claimtype)
+{
+ struct sk_buff *skb;
+ struct ethhdr *ethhdr;
+ struct batadv_hard_iface *primary_if;
+ struct net_device *soft_iface;
+ u8 *hw_src;
+ struct batadv_bla_claim_dst local_claim_dest;
+ __be32 zeroip = 0;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ return;
+
+ memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
+ sizeof(local_claim_dest));
+ local_claim_dest.type = claimtype;
+
+ soft_iface = primary_if->soft_iface;
+
+ skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
+ /* IP DST: 0.0.0.0 */
+ zeroip,
+ primary_if->soft_iface,
+ /* IP SRC: 0.0.0.0 */
+ zeroip,
+ /* Ethernet DST: Broadcast */
+ NULL,
+ /* Ethernet SRC/HW SRC: originator mac */
+ primary_if->net_dev->dev_addr,
+ /* HW DST: FF:43:05:XX:YY:YY
+ * with XX = claim type
+ * and YY:YY = group id
+ */
+ (u8 *)&local_claim_dest);
+
+ if (!skb)
+ goto out;
+
+ ethhdr = (struct ethhdr *)skb->data;
+ hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
+
+ /* now we pretend that the client would have sent this ... */
+ switch (claimtype) {
+ case BATADV_CLAIM_TYPE_CLAIM:
+ /* normal claim frame
+ * set Ethernet SRC to the clients mac
+ */
+ ether_addr_copy(ethhdr->h_source, mac);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): CLAIM %pM on vid %d\n", __func__, mac,
+ batadv_print_vid(vid));
+ break;
+ case BATADV_CLAIM_TYPE_UNCLAIM:
+ /* unclaim frame
+ * set HW SRC to the clients mac
+ */
+ ether_addr_copy(hw_src, mac);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): UNCLAIM %pM on vid %d\n", __func__, mac,
+ batadv_print_vid(vid));
+ break;
+ case BATADV_CLAIM_TYPE_ANNOUNCE:
+ /* announcement frame
+ * set HW SRC to the special mac containing the crc
+ */
+ ether_addr_copy(hw_src, mac);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): ANNOUNCE of %pM on vid %d\n", __func__,
+ ethhdr->h_source, batadv_print_vid(vid));
+ break;
+ case BATADV_CLAIM_TYPE_REQUEST:
+ /* request frame
+ * set HW SRC and header destination to the receiving backbone
+ * gws mac
+ */
+ ether_addr_copy(hw_src, mac);
+ ether_addr_copy(ethhdr->h_dest, mac);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): REQUEST of %pM to %pM on vid %d\n", __func__,
+ ethhdr->h_source, ethhdr->h_dest,
+ batadv_print_vid(vid));
+ break;
+ case BATADV_CLAIM_TYPE_LOOPDETECT:
+ ether_addr_copy(ethhdr->h_source, mac);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): LOOPDETECT of %pM to %pM on vid %d\n",
+ __func__, ethhdr->h_source, ethhdr->h_dest,
+ batadv_print_vid(vid));
+
+ break;
+ }
+
+ if (vid & BATADV_VLAN_HAS_TAG) {
+ skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
+ vid & VLAN_VID_MASK);
+ if (!skb)
+ goto out;
+ }
+
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_type_trans(skb, soft_iface);
+ batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+ skb->len + ETH_HLEN);
+
+ netif_rx(skb);
+out:
+ batadv_hardif_put(primary_if);
+}
+
+/**
+ * batadv_bla_loopdetect_report() - worker for reporting the loop
+ * @work: work queue item
+ *
+ * Throws an uevent, as the loopdetect check function can't do that itself
+ * since the kernel may sleep while throwing uevents.
+ */
+static void batadv_bla_loopdetect_report(struct work_struct *work)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+ struct batadv_priv *bat_priv;
+ char vid_str[6] = { '\0' };
+
+ backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
+ report_work);
+ bat_priv = backbone_gw->bat_priv;
+
+ batadv_info(bat_priv->soft_iface,
+ "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
+ batadv_print_vid(backbone_gw->vid));
+ snprintf(vid_str, sizeof(vid_str), "%d",
+ batadv_print_vid(backbone_gw->vid));
+ vid_str[sizeof(vid_str) - 1] = 0;
+
+ batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
+ vid_str);
+
+ batadv_backbone_gw_put(backbone_gw);
+}
+
+/**
+ * batadv_bla_get_backbone_gw() - finds or creates a backbone gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the mac address of the originator
+ * @vid: the VLAN ID
+ * @own_backbone: set if the requested backbone is local
+ *
+ * Return: the (possibly created) backbone gateway or NULL on error
+ */
+static struct batadv_bla_backbone_gw *
+batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig,
+ unsigned short vid, bool own_backbone)
+{
+ struct batadv_bla_backbone_gw *entry;
+ struct batadv_orig_node *orig_node;
+ int hash_added;
+
+ entry = batadv_backbone_hash_find(bat_priv, orig, vid);
+
+ if (entry)
+ return entry;
+
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): not found (%pM, %d), creating new entry\n", __func__,
+ orig, batadv_print_vid(vid));
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return NULL;
+
+ entry->vid = vid;
+ entry->lasttime = jiffies;
+ entry->crc = BATADV_BLA_CRC_INIT;
+ entry->bat_priv = bat_priv;
+ spin_lock_init(&entry->crc_lock);
+ atomic_set(&entry->request_sent, 0);
+ atomic_set(&entry->wait_periods, 0);
+ ether_addr_copy(entry->orig, orig);
+ INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
+ kref_init(&entry->refcount);
+
+ kref_get(&entry->refcount);
+ hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
+ batadv_compare_backbone_gw,
+ batadv_choose_backbone_gw, entry,
+ &entry->hash_entry);
+
+ if (unlikely(hash_added != 0)) {
+ /* hash failed, free the structure */
+ kfree(entry);
+ return NULL;
+ }
+
+ /* this is a gateway now, remove any TT entry on this VLAN */
+ orig_node = batadv_orig_hash_find(bat_priv, orig);
+ if (orig_node) {
+ batadv_tt_global_del_orig(bat_priv, orig_node, vid,
+ "became a backbone gateway");
+ batadv_orig_node_put(orig_node);
+ }
+
+ if (own_backbone) {
+ batadv_bla_send_announce(bat_priv, entry);
+
+ /* this will be decreased in the worker thread */
+ atomic_inc(&entry->request_sent);
+ atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
+ atomic_inc(&bat_priv->bla.num_requests);
+ }
+
+ return entry;
+}
+
+/**
+ * batadv_bla_update_own_backbone_gw() - updates the own backbone gw for a VLAN
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the selected primary interface
+ * @vid: VLAN identifier
+ *
+ * update or add the own backbone gw to make sure we announce
+ * where we receive other backbone gws
+ */
+static void
+batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ unsigned short vid)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+
+ backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
+ primary_if->net_dev->dev_addr,
+ vid, true);
+ if (unlikely(!backbone_gw))
+ return;
+
+ backbone_gw->lasttime = jiffies;
+ batadv_backbone_gw_put(backbone_gw);
+}
+
+/**
+ * batadv_bla_answer_request() - answer a bla request by sending own claims
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: interface where the request came on
+ * @vid: the vid where the request came on
+ *
+ * Repeat all of our own claims, and finally send an ANNOUNCE frame
+ * to allow the requester another check if the CRC is correct now.
+ */
+static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ unsigned short vid)
+{
+ struct hlist_head *head;
+ struct batadv_hashtable *hash;
+ struct batadv_bla_claim *claim;
+ struct batadv_bla_backbone_gw *backbone_gw;
+ int i;
+
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): received a claim request, send all of our own claims again\n",
+ __func__);
+
+ backbone_gw = batadv_backbone_hash_find(bat_priv,
+ primary_if->net_dev->dev_addr,
+ vid);
+ if (!backbone_gw)
+ return;
+
+ hash = bat_priv->bla.claim_hash;
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(claim, head, hash_entry) {
+ /* only own claims are interesting */
+ if (claim->backbone_gw != backbone_gw)
+ continue;
+
+ batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
+ BATADV_CLAIM_TYPE_CLAIM);
+ }
+ rcu_read_unlock();
+ }
+
+ /* finally, send an announcement frame */
+ batadv_bla_send_announce(bat_priv, backbone_gw);
+ batadv_backbone_gw_put(backbone_gw);
+}
+
+/**
+ * batadv_bla_send_request() - send a request to repeat claims
+ * @backbone_gw: the backbone gateway from whom we are out of sync
+ *
+ * When the crc is wrong, ask the backbone gateway for a full table update.
+ * After the request, it will repeat all of his own claims and finally
+ * send an announcement claim with which we can check again.
+ */
+static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
+{
+ /* first, remove all old entries */
+ batadv_bla_del_backbone_claims(backbone_gw);
+
+ batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
+ "Sending REQUEST to %pM\n", backbone_gw->orig);
+
+ /* send request */
+ batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
+ backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
+
+ /* no local broadcasts should be sent or received, for now. */
+ if (!atomic_read(&backbone_gw->request_sent)) {
+ atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
+ atomic_set(&backbone_gw->request_sent, 1);
+ }
+}
+
+/**
+ * batadv_bla_send_announce() - Send an announcement frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @backbone_gw: our backbone gateway which should be announced
+ */
+static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
+ struct batadv_bla_backbone_gw *backbone_gw)
+{
+ u8 mac[ETH_ALEN];
+ __be16 crc;
+
+ memcpy(mac, batadv_announce_mac, 4);
+ spin_lock_bh(&backbone_gw->crc_lock);
+ crc = htons(backbone_gw->crc);
+ spin_unlock_bh(&backbone_gw->crc_lock);
+ memcpy(&mac[4], &crc, 2);
+
+ batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
+ BATADV_CLAIM_TYPE_ANNOUNCE);
+}
+
+/**
+ * batadv_bla_add_claim() - Adds a claim in the claim hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @mac: the mac address of the claim
+ * @vid: the VLAN ID of the frame
+ * @backbone_gw: the backbone gateway which claims it
+ */
+static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
+ const u8 *mac, const unsigned short vid,
+ struct batadv_bla_backbone_gw *backbone_gw)
+{
+ struct batadv_bla_backbone_gw *old_backbone_gw;
+ struct batadv_bla_claim *claim;
+ struct batadv_bla_claim search_claim;
+ bool remove_crc = false;
+ int hash_added;
+
+ ether_addr_copy(search_claim.addr, mac);
+ search_claim.vid = vid;
+ claim = batadv_claim_hash_find(bat_priv, &search_claim);
+
+ /* create a new claim entry if it does not exist yet. */
+ if (!claim) {
+ claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
+ if (!claim)
+ return;
+
+ ether_addr_copy(claim->addr, mac);
+ spin_lock_init(&claim->backbone_lock);
+ claim->vid = vid;
+ claim->lasttime = jiffies;
+ kref_get(&backbone_gw->refcount);
+ claim->backbone_gw = backbone_gw;
+ kref_init(&claim->refcount);
+
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): adding new entry %pM, vid %d to hash ...\n",
+ __func__, mac, batadv_print_vid(vid));
+
+ kref_get(&claim->refcount);
+ hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
+ batadv_compare_claim,
+ batadv_choose_claim, claim,
+ &claim->hash_entry);
+
+ if (unlikely(hash_added != 0)) {
+ /* only local changes happened. */
+ kfree(claim);
+ return;
+ }
+ } else {
+ claim->lasttime = jiffies;
+ if (claim->backbone_gw == backbone_gw)
+ /* no need to register a new backbone */
+ goto claim_free_ref;
+
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): changing ownership for %pM, vid %d to gw %pM\n",
+ __func__, mac, batadv_print_vid(vid),
+ backbone_gw->orig);
+
+ remove_crc = true;
+ }
+
+ /* replace backbone_gw atomically and adjust reference counters */
+ spin_lock_bh(&claim->backbone_lock);
+ old_backbone_gw = claim->backbone_gw;
+ kref_get(&backbone_gw->refcount);
+ claim->backbone_gw = backbone_gw;
+ spin_unlock_bh(&claim->backbone_lock);
+
+ if (remove_crc) {
+ /* remove claim address from old backbone_gw */
+ spin_lock_bh(&old_backbone_gw->crc_lock);
+ old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+ spin_unlock_bh(&old_backbone_gw->crc_lock);
+ }
+
+ batadv_backbone_gw_put(old_backbone_gw);
+
+ /* add claim address to new backbone_gw */
+ spin_lock_bh(&backbone_gw->crc_lock);
+ backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+ spin_unlock_bh(&backbone_gw->crc_lock);
+ backbone_gw->lasttime = jiffies;
+
+claim_free_ref:
+ batadv_claim_put(claim);
+}
+
+/**
+ * batadv_bla_claim_get_backbone_gw() - Get valid reference for backbone_gw of
+ * claim
+ * @claim: claim whose backbone_gw should be returned
+ *
+ * Return: valid reference to claim::backbone_gw
+ */
+static struct batadv_bla_backbone_gw *
+batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+
+ spin_lock_bh(&claim->backbone_lock);
+ backbone_gw = claim->backbone_gw;
+ kref_get(&backbone_gw->refcount);
+ spin_unlock_bh(&claim->backbone_lock);
+
+ return backbone_gw;
+}
+
+/**
+ * batadv_bla_del_claim() - delete a claim from the claim hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @mac: mac address of the claim to be removed
+ * @vid: VLAN id for the claim to be removed
+ */
+static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
+ const u8 *mac, const unsigned short vid)
+{
+ struct batadv_bla_claim search_claim, *claim;
+ struct batadv_bla_claim *claim_removed_entry;
+ struct hlist_node *claim_removed_node;
+
+ ether_addr_copy(search_claim.addr, mac);
+ search_claim.vid = vid;
+ claim = batadv_claim_hash_find(bat_priv, &search_claim);
+ if (!claim)
+ return;
+
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
+ mac, batadv_print_vid(vid));
+
+ claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
+ batadv_compare_claim,
+ batadv_choose_claim, claim);
+ if (!claim_removed_node)
+ goto free_claim;
+
+ /* reference from the hash is gone */
+ claim_removed_entry = hlist_entry(claim_removed_node,
+ struct batadv_bla_claim, hash_entry);
+ batadv_claim_put(claim_removed_entry);
+
+free_claim:
+ /* don't need the reference from hash_find() anymore */
+ batadv_claim_put(claim);
+}
+
+/**
+ * batadv_handle_announce() - check for ANNOUNCE frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @an_addr: announcement mac address (ARP Sender HW address)
+ * @backbone_addr: originator address of the sender (Ethernet source MAC)
+ * @vid: the VLAN ID of the frame
+ *
+ * Return: true if handled
+ */
+static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
+ u8 *backbone_addr, unsigned short vid)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+ u16 backbone_crc, crc;
+
+ if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
+ return false;
+
+ backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
+ false);
+
+ if (unlikely(!backbone_gw))
+ return true;
+
+ /* handle as ANNOUNCE frame */
+ backbone_gw->lasttime = jiffies;
+ crc = ntohs(*((__force __be16 *)(&an_addr[4])));
+
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
+ __func__, batadv_print_vid(vid), backbone_gw->orig, crc);
+
+ spin_lock_bh(&backbone_gw->crc_lock);
+ backbone_crc = backbone_gw->crc;
+ spin_unlock_bh(&backbone_gw->crc_lock);
+
+ if (backbone_crc != crc) {
+ batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
+ "%s(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
+ __func__, backbone_gw->orig,
+ batadv_print_vid(backbone_gw->vid),
+ backbone_crc, crc);
+
+ batadv_bla_send_request(backbone_gw);
+ } else {
+ /* if we have sent a request and the crc was OK,
+ * we can allow traffic again.
+ */
+ if (atomic_read(&backbone_gw->request_sent)) {
+ atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
+ atomic_set(&backbone_gw->request_sent, 0);
+ }
+ }
+
+ batadv_backbone_gw_put(backbone_gw);
+ return true;
+}
+
+/**
+ * batadv_handle_request() - check for REQUEST frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary hard interface of this batman soft interface
+ * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
+ * @ethhdr: ethernet header of a packet
+ * @vid: the VLAN ID of the frame
+ *
+ * Return: true if handled
+ */
+static bool batadv_handle_request(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ u8 *backbone_addr, struct ethhdr *ethhdr,
+ unsigned short vid)
+{
+ /* check for REQUEST frame */
+ if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
+ return false;
+
+ /* sanity check, this should not happen on a normal switch,
+ * we ignore it in this case.
+ */
+ if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
+ return true;
+
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): REQUEST vid %d (sent by %pM)...\n",
+ __func__, batadv_print_vid(vid), ethhdr->h_source);
+
+ batadv_bla_answer_request(bat_priv, primary_if, vid);
+ return true;
+}
+
+/**
+ * batadv_handle_unclaim() - check for UNCLAIM frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary hard interface of this batman soft interface
+ * @backbone_addr: originator address of the backbone (Ethernet source)
+ * @claim_addr: Client to be unclaimed (ARP sender HW MAC)
+ * @vid: the VLAN ID of the frame
+ *
+ * Return: true if handled
+ */
+static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ const u8 *backbone_addr, const u8 *claim_addr,
+ unsigned short vid)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+
+ /* unclaim in any case if it is our own */
+ if (primary_if && batadv_compare_eth(backbone_addr,
+ primary_if->net_dev->dev_addr))
+ batadv_bla_send_claim(bat_priv, claim_addr, vid,
+ BATADV_CLAIM_TYPE_UNCLAIM);
+
+ backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
+
+ if (!backbone_gw)
+ return true;
+
+ /* this must be an UNCLAIM frame */
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): UNCLAIM %pM on vid %d (sent by %pM)...\n", __func__,
+ claim_addr, batadv_print_vid(vid), backbone_gw->orig);
+
+ batadv_bla_del_claim(bat_priv, claim_addr, vid);
+ batadv_backbone_gw_put(backbone_gw);
+ return true;
+}
+
+/**
+ * batadv_handle_claim() - check for CLAIM frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary hard interface of this batman soft interface
+ * @backbone_addr: originator address of the backbone (Ethernet Source)
+ * @claim_addr: client mac address to be claimed (ARP sender HW MAC)
+ * @vid: the VLAN ID of the frame
+ *
+ * Return: true if handled
+ */
+static bool batadv_handle_claim(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ const u8 *backbone_addr, const u8 *claim_addr,
+ unsigned short vid)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+
+ /* register the gateway if not yet available, and add the claim. */
+
+ backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
+ false);
+
+ if (unlikely(!backbone_gw))
+ return true;
+
+ /* this must be a CLAIM frame */
+ batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
+ if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+ batadv_bla_send_claim(bat_priv, claim_addr, vid,
+ BATADV_CLAIM_TYPE_CLAIM);
+
+ /* TODO: we could call something like tt_local_del() here. */
+
+ batadv_backbone_gw_put(backbone_gw);
+ return true;
+}
+
+/**
+ * batadv_check_claim_group() - check for claim group membership
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary interface of this batman interface
+ * @hw_src: the Hardware source in the ARP Header
+ * @hw_dst: the Hardware destination in the ARP Header
+ * @ethhdr: pointer to the Ethernet header of the claim frame
+ *
+ * checks if it is a claim packet and if it's on the same group.
+ * This function also applies the group ID of the sender
+ * if it is in the same mesh.
+ *
+ * Return:
+ * 2 - if it is a claim packet and on the same group
+ * 1 - if is a claim packet from another group
+ * 0 - if it is not a claim packet
+ */
+static int batadv_check_claim_group(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ u8 *hw_src, u8 *hw_dst,
+ struct ethhdr *ethhdr)
+{
+ u8 *backbone_addr;
+ struct batadv_orig_node *orig_node;
+ struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
+
+ bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
+ bla_dst_own = &bat_priv->bla.claim_dest;
+
+ /* if announcement packet, use the source,
+ * otherwise assume it is in the hw_src
+ */
+ switch (bla_dst->type) {
+ case BATADV_CLAIM_TYPE_CLAIM:
+ backbone_addr = hw_src;
+ break;
+ case BATADV_CLAIM_TYPE_REQUEST:
+ case BATADV_CLAIM_TYPE_ANNOUNCE:
+ case BATADV_CLAIM_TYPE_UNCLAIM:
+ backbone_addr = ethhdr->h_source;
+ break;
+ default:
+ return 0;
+ }
+
+ /* don't accept claim frames from ourselves */
+ if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+ return 0;
+
+ /* if its already the same group, it is fine. */
+ if (bla_dst->group == bla_dst_own->group)
+ return 2;
+
+ /* lets see if this originator is in our mesh */
+ orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
+
+ /* don't accept claims from gateways which are not in
+ * the same mesh or group.
+ */
+ if (!orig_node)
+ return 1;
+
+ /* if our mesh friends mac is bigger, use it for ourselves. */
+ if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "taking other backbones claim group: %#.4x\n",
+ ntohs(bla_dst->group));
+ bla_dst_own->group = bla_dst->group;
+ }
+
+ batadv_orig_node_put(orig_node);
+
+ return 2;
+}
+
+/**
+ * batadv_bla_process_claim() - Check if this is a claim frame, and process it
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary hard interface of this batman soft interface
+ * @skb: the frame to be checked
+ *
+ * Return: true if it was a claim frame, otherwise return false to
+ * tell the callee that it can use the frame on its own.
+ */
+static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ struct sk_buff *skb)
+{
+ struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
+ u8 *hw_src, *hw_dst;
+ struct vlan_hdr *vhdr, vhdr_buf;
+ struct ethhdr *ethhdr;
+ struct arphdr *arphdr;
+ unsigned short vid;
+ int vlan_depth = 0;
+ __be16 proto;
+ int headlen;
+ int ret;
+
+ vid = batadv_get_vid(skb, 0);
+ ethhdr = eth_hdr(skb);
+
+ proto = ethhdr->h_proto;
+ headlen = ETH_HLEN;
+ if (vid & BATADV_VLAN_HAS_TAG) {
+ /* Traverse the VLAN/Ethertypes.
+ *
+ * At this point it is known that the first protocol is a VLAN
+ * header, so start checking at the encapsulated protocol.
+ *
+ * The depth of the VLAN headers is recorded to drop BLA claim
+ * frames encapsulated into multiple VLAN headers (QinQ).
+ */
+ do {
+ vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
+ &vhdr_buf);
+ if (!vhdr)
+ return false;
+
+ proto = vhdr->h_vlan_encapsulated_proto;
+ headlen += VLAN_HLEN;
+ vlan_depth++;
+ } while (proto == htons(ETH_P_8021Q));
+ }
+
+ if (proto != htons(ETH_P_ARP))
+ return false; /* not a claim frame */
+
+ /* this must be a ARP frame. check if it is a claim. */
+
+ if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
+ return false;
+
+ /* pskb_may_pull() may have modified the pointers, get ethhdr again */
+ ethhdr = eth_hdr(skb);
+ arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
+
+ /* Check whether the ARP frame carries a valid
+ * IP information
+ */
+ if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
+ return false;
+ if (arphdr->ar_pro != htons(ETH_P_IP))
+ return false;
+ if (arphdr->ar_hln != ETH_ALEN)
+ return false;
+ if (arphdr->ar_pln != 4)
+ return false;
+
+ hw_src = (u8 *)arphdr + sizeof(struct arphdr);
+ hw_dst = hw_src + ETH_ALEN + 4;
+ bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
+ bla_dst_own = &bat_priv->bla.claim_dest;
+
+ /* check if it is a claim frame in general */
+ if (memcmp(bla_dst->magic, bla_dst_own->magic,
+ sizeof(bla_dst->magic)) != 0)
+ return false;
+
+ /* check if there is a claim frame encapsulated deeper in (QinQ) and
+ * drop that, as this is not supported by BLA but should also not be
+ * sent via the mesh.
+ */
+ if (vlan_depth > 1)
+ return true;
+
+ /* Let the loopdetect frames on the mesh in any case. */
+ if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
+ return false;
+
+ /* check if it is a claim frame. */
+ ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
+ ethhdr);
+ if (ret == 1)
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+ __func__, ethhdr->h_source, batadv_print_vid(vid),
+ hw_src, hw_dst);
+
+ if (ret < 2)
+ return !!ret;
+
+ /* become a backbone gw ourselves on this vlan if not happened yet */
+ batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+
+ /* check for the different types of claim frames ... */
+ switch (bla_dst->type) {
+ case BATADV_CLAIM_TYPE_CLAIM:
+ if (batadv_handle_claim(bat_priv, primary_if, hw_src,
+ ethhdr->h_source, vid))
+ return true;
+ break;
+ case BATADV_CLAIM_TYPE_UNCLAIM:
+ if (batadv_handle_unclaim(bat_priv, primary_if,
+ ethhdr->h_source, hw_src, vid))
+ return true;
+ break;
+
+ case BATADV_CLAIM_TYPE_ANNOUNCE:
+ if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
+ vid))
+ return true;
+ break;
+ case BATADV_CLAIM_TYPE_REQUEST:
+ if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
+ vid))
+ return true;
+ break;
+ }
+
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+ __func__, ethhdr->h_source, batadv_print_vid(vid), hw_src,
+ hw_dst);
+ return true;
+}
+
+/**
+ * batadv_bla_purge_backbone_gw() - Remove backbone gateways after a timeout or
+ * immediately
+ * @bat_priv: the bat priv with all the soft interface information
+ * @now: whether the whole hash shall be wiped now
+ *
+ * Check when we last heard from other nodes, and remove them in case of
+ * a time out, or clean all backbone gws if now is set.
+ */
+static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+ struct hlist_node *node_tmp;
+ struct hlist_head *head;
+ struct batadv_hashtable *hash;
+ spinlock_t *list_lock; /* protects write access to the hash lists */
+ int i;
+
+ hash = bat_priv->bla.backbone_hash;
+ if (!hash)
+ return;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+ list_lock = &hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+ hlist_for_each_entry_safe(backbone_gw, node_tmp,
+ head, hash_entry) {
+ if (now)
+ goto purge_now;
+ if (!batadv_has_timed_out(backbone_gw->lasttime,
+ BATADV_BLA_BACKBONE_TIMEOUT))
+ continue;
+
+ batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
+ "%s(): backbone gw %pM timed out\n",
+ __func__, backbone_gw->orig);
+
+purge_now:
+ /* don't wait for the pending request anymore */
+ if (atomic_read(&backbone_gw->request_sent))
+ atomic_dec(&bat_priv->bla.num_requests);
+
+ batadv_bla_del_backbone_claims(backbone_gw);
+
+ hlist_del_rcu(&backbone_gw->hash_entry);
+ batadv_backbone_gw_put(backbone_gw);
+ }
+ spin_unlock_bh(list_lock);
+ }
+}
+
+/**
+ * batadv_bla_purge_claims() - Remove claims after a timeout or immediately
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the selected primary interface, may be NULL if now is set
+ * @now: whether the whole hash shall be wiped now
+ *
+ * Check when we heard last time from our own claims, and remove them in case of
+ * a time out, or clean all claims if now is set
+ */
+static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ int now)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+ struct batadv_bla_claim *claim;
+ struct hlist_head *head;
+ struct batadv_hashtable *hash;
+ int i;
+
+ hash = bat_priv->bla.claim_hash;
+ if (!hash)
+ return;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(claim, head, hash_entry) {
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+ if (now)
+ goto purge_now;
+
+ if (!batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
+ goto skip;
+
+ if (!batadv_has_timed_out(claim->lasttime,
+ BATADV_BLA_CLAIM_TIMEOUT))
+ goto skip;
+
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): timed out.\n", __func__);
+
+purge_now:
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): %pM, vid %d\n", __func__,
+ claim->addr, claim->vid);
+
+ batadv_handle_unclaim(bat_priv, primary_if,
+ backbone_gw->orig,
+ claim->addr, claim->vid);
+skip:
+ batadv_backbone_gw_put(backbone_gw);
+ }
+ rcu_read_unlock();
+ }
+}
+
+/**
+ * batadv_bla_update_orig_address() - Update the backbone gateways when the own
+ * originator address changes
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the new selected primary_if
+ * @oldif: the old primary interface, may be NULL
+ */
+void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_hard_iface *oldif)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+ struct hlist_head *head;
+ struct batadv_hashtable *hash;
+ __be16 group;
+ int i;
+
+ /* reset bridge loop avoidance group id */
+ group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+ bat_priv->bla.claim_dest.group = group;
+
+ /* purge everything when bridge loop avoidance is turned off */
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+ oldif = NULL;
+
+ if (!oldif) {
+ batadv_bla_purge_claims(bat_priv, NULL, 1);
+ batadv_bla_purge_backbone_gw(bat_priv, 1);
+ return;
+ }
+
+ hash = bat_priv->bla.backbone_hash;
+ if (!hash)
+ return;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
+ /* own orig still holds the old value. */
+ if (!batadv_compare_eth(backbone_gw->orig,
+ oldif->net_dev->dev_addr))
+ continue;
+
+ ether_addr_copy(backbone_gw->orig,
+ primary_if->net_dev->dev_addr);
+ /* send an announce frame so others will ask for our
+ * claims and update their tables.
+ */
+ batadv_bla_send_announce(bat_priv, backbone_gw);
+ }
+ rcu_read_unlock();
+ }
+}
+
+/**
+ * batadv_bla_send_loopdetect() - send a loopdetect frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @backbone_gw: the backbone gateway for which a loop should be detected
+ *
+ * To detect loops that the bridge loop avoidance can't handle, send a loop
+ * detection packet on the backbone. Unlike other BLA frames, this frame will
+ * be allowed on the mesh by other nodes. If it is received on the mesh, this
+ * indicates that there is a loop.
+ */
+static void
+batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
+ struct batadv_bla_backbone_gw *backbone_gw)
+{
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
+ backbone_gw->vid);
+ batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
+ backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
+}
+
+/**
+ * batadv_bla_status_update() - purge bla interfaces if necessary
+ * @net_dev: the soft interface net device
+ */
+void batadv_bla_status_update(struct net_device *net_dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hard_iface *primary_if;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ return;
+
+ /* this function already purges everything when bla is disabled,
+ * so just call that one.
+ */
+ batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
+ batadv_hardif_put(primary_if);
+}
+
+/**
+ * batadv_bla_periodic_work() - performs periodic bla work
+ * @work: kernel work struct
+ *
+ * periodic work to do:
+ * * purge structures when they are too old
+ * * send announcements
+ */
+static void batadv_bla_periodic_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct batadv_priv *bat_priv;
+ struct batadv_priv_bla *priv_bla;
+ struct hlist_head *head;
+ struct batadv_bla_backbone_gw *backbone_gw;
+ struct batadv_hashtable *hash;
+ struct batadv_hard_iface *primary_if;
+ bool send_loopdetect = false;
+ int i;
+
+ delayed_work = to_delayed_work(work);
+ priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
+ bat_priv = container_of(priv_bla, struct batadv_priv, bla);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ batadv_bla_purge_claims(bat_priv, primary_if, 0);
+ batadv_bla_purge_backbone_gw(bat_priv, 0);
+
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+ goto out;
+
+ if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
+ /* set a new random mac address for the next bridge loop
+ * detection frames. Set the locally administered bit to avoid
+ * collisions with users mac addresses.
+ */
+ eth_random_addr(bat_priv->bla.loopdetect_addr);
+ bat_priv->bla.loopdetect_addr[0] = 0xba;
+ bat_priv->bla.loopdetect_addr[1] = 0xbe;
+ bat_priv->bla.loopdetect_lasttime = jiffies;
+ atomic_set(&bat_priv->bla.loopdetect_next,
+ BATADV_BLA_LOOPDETECT_PERIODS);
+
+ /* mark for sending loop detect on all VLANs */
+ send_loopdetect = true;
+ }
+
+ hash = bat_priv->bla.backbone_hash;
+ if (!hash)
+ goto out;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
+ if (!batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
+ continue;
+
+ backbone_gw->lasttime = jiffies;
+
+ batadv_bla_send_announce(bat_priv, backbone_gw);
+ if (send_loopdetect)
+ batadv_bla_send_loopdetect(bat_priv,
+ backbone_gw);
+
+ /* request_sent is only set after creation to avoid
+ * problems when we are not yet known as backbone gw
+ * in the backbone.
+ *
+ * We can reset this now after we waited some periods
+ * to give bridge forward delays and bla group forming
+ * some grace time.
+ */
+
+ if (atomic_read(&backbone_gw->request_sent) == 0)
+ continue;
+
+ if (!atomic_dec_and_test(&backbone_gw->wait_periods))
+ continue;
+
+ atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
+ atomic_set(&backbone_gw->request_sent, 0);
+ }
+ rcu_read_unlock();
+ }
+out:
+ batadv_hardif_put(primary_if);
+
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
+ msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
+}
+
+/* The hash for claim and backbone hash receive the same key because they
+ * are getting initialized by hash_new with the same key. Reinitializing
+ * them with to different keys to allow nested locking without generating
+ * lockdep warnings
+ */
+static struct lock_class_key batadv_claim_hash_lock_class_key;
+static struct lock_class_key batadv_backbone_hash_lock_class_key;
+
+/**
+ * batadv_bla_init() - initialize all bla structures
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success, < 0 on error.
+ */
+int batadv_bla_init(struct batadv_priv *bat_priv)
+{
+ int i;
+ u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
+ struct batadv_hard_iface *primary_if;
+ u16 crc;
+ unsigned long entrytime;
+
+ spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
+
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
+
+ /* setting claim destination address */
+ memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
+ bat_priv->bla.claim_dest.type = 0;
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (primary_if) {
+ crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
+ bat_priv->bla.claim_dest.group = htons(crc);
+ batadv_hardif_put(primary_if);
+ } else {
+ bat_priv->bla.claim_dest.group = 0; /* will be set later */
+ }
+
+ /* initialize the duplicate list */
+ entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
+ for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
+ bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
+ bat_priv->bla.bcast_duplist_curr = 0;
+
+ atomic_set(&bat_priv->bla.loopdetect_next,
+ BATADV_BLA_LOOPDETECT_PERIODS);
+
+ if (bat_priv->bla.claim_hash)
+ return 0;
+
+ bat_priv->bla.claim_hash = batadv_hash_new(128);
+ if (!bat_priv->bla.claim_hash)
+ return -ENOMEM;
+
+ bat_priv->bla.backbone_hash = batadv_hash_new(32);
+ if (!bat_priv->bla.backbone_hash) {
+ batadv_hash_destroy(bat_priv->bla.claim_hash);
+ return -ENOMEM;
+ }
+
+ batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
+ &batadv_claim_hash_lock_class_key);
+ batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
+ &batadv_backbone_hash_lock_class_key);
+
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
+
+ INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
+
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
+ msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
+ return 0;
+}
+
+/**
+ * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: contains the multicast packet to be checked
+ * @payload_ptr: pointer to position inside the head buffer of the skb
+ * marking the start of the data to be CRC'ed
+ * @orig: originator mac address, NULL if unknown
+ *
+ * Check if it is on our broadcast list. Another gateway might have sent the
+ * same packet because it is connected to the same backbone, so we have to
+ * remove this duplicate.
+ *
+ * This is performed by checking the CRC, which will tell us
+ * with a good chance that it is the same packet. If it is furthermore
+ * sent by another host, drop it. We allow equal packets from
+ * the same host however as this might be intended.
+ *
+ * Return: true if a packet is in the duplicate list, false otherwise.
+ */
+static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, u8 *payload_ptr,
+ const u8 *orig)
+{
+ struct batadv_bcast_duplist_entry *entry;
+ bool ret = false;
+ int i, curr;
+ __be32 crc;
+
+ /* calculate the crc ... */
+ crc = batadv_skb_crc32(skb, payload_ptr);
+
+ spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
+
+ for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
+ curr = (bat_priv->bla.bcast_duplist_curr + i);
+ curr %= BATADV_DUPLIST_SIZE;
+ entry = &bat_priv->bla.bcast_duplist[curr];
+
+ /* we can stop searching if the entry is too old ;
+ * later entries will be even older
+ */
+ if (batadv_has_timed_out(entry->entrytime,
+ BATADV_DUPLIST_TIMEOUT))
+ break;
+
+ if (entry->crc != crc)
+ continue;
+
+ /* are the originators both known and not anonymous? */
+ if (orig && !is_zero_ether_addr(orig) &&
+ !is_zero_ether_addr(entry->orig)) {
+ /* If known, check if the new frame came from
+ * the same originator:
+ * We are safe to take identical frames from the
+ * same orig, if known, as multiplications in
+ * the mesh are detected via the (orig, seqno) pair.
+ * So we can be a bit more liberal here and allow
+ * identical frames from the same orig which the source
+ * host might have sent multiple times on purpose.
+ */
+ if (batadv_compare_eth(entry->orig, orig))
+ continue;
+ }
+
+ /* this entry seems to match: same crc, not too old,
+ * and from another gw. therefore return true to forbid it.
+ */
+ ret = true;
+ goto out;
+ }
+ /* not found, add a new entry (overwrite the oldest entry)
+ * and allow it, its the first occurrence.
+ */
+ curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
+ curr %= BATADV_DUPLIST_SIZE;
+ entry = &bat_priv->bla.bcast_duplist[curr];
+ entry->crc = crc;
+ entry->entrytime = jiffies;
+
+ /* known originator */
+ if (orig)
+ ether_addr_copy(entry->orig, orig);
+ /* anonymous originator */
+ else
+ eth_zero_addr(entry->orig);
+
+ bat_priv->bla.bcast_duplist_curr = curr;
+
+out:
+ spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
+
+ return ret;
+}
+
+/**
+ * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: contains the multicast packet to be checked, decapsulated from a
+ * unicast_packet
+ *
+ * Check if it is on our broadcast list. Another gateway might have sent the
+ * same packet because it is connected to the same backbone, so we have to
+ * remove this duplicate.
+ *
+ * Return: true if a packet is in the duplicate list, false otherwise.
+ */
+static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL);
+}
+
+/**
+ * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: contains the bcast_packet to be checked
+ *
+ * Check if it is on our broadcast list. Another gateway might have sent the
+ * same packet because it is connected to the same backbone, so we have to
+ * remove this duplicate.
+ *
+ * Return: true if a packet is in the duplicate list, false otherwise.
+ */
+bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ struct batadv_bcast_packet *bcast_packet;
+ u8 *payload_ptr;
+
+ bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ payload_ptr = (u8 *)(bcast_packet + 1);
+
+ return batadv_bla_check_duplist(bat_priv, skb, payload_ptr,
+ bcast_packet->orig);
+}
+
+/**
+ * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
+ * the VLAN identified by vid.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: originator mac address
+ * @vid: VLAN identifier
+ *
+ * Return: true if orig is a backbone for this vid, false otherwise.
+ */
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
+ unsigned short vid)
+{
+ struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
+ struct hlist_head *head;
+ struct batadv_bla_backbone_gw *backbone_gw;
+ int i;
+
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+ return false;
+
+ if (!hash)
+ return false;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
+ if (batadv_compare_eth(backbone_gw->orig, orig) &&
+ backbone_gw->vid == vid) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+ }
+
+ return false;
+}
+
+/**
+ * batadv_bla_is_backbone_gw() - check if originator is a backbone gw for a VLAN
+ * @skb: the frame to be checked
+ * @orig_node: the orig_node of the frame
+ * @hdr_size: maximum length of the frame
+ *
+ * Return: true if the orig_node is also a gateway on the soft interface,
+ * otherwise it returns false.
+ */
+bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node, int hdr_size)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+ unsigned short vid;
+
+ if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
+ return false;
+
+ /* first, find out the vid. */
+ if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
+ return false;
+
+ vid = batadv_get_vid(skb, hdr_size);
+
+ /* see if this originator is a backbone gw for this VLAN */
+ backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
+ orig_node->orig, vid);
+ if (!backbone_gw)
+ return false;
+
+ batadv_backbone_gw_put(backbone_gw);
+ return true;
+}
+
+/**
+ * batadv_bla_free() - free all bla structures
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * for softinterface free or module unload
+ */
+void batadv_bla_free(struct batadv_priv *bat_priv)
+{
+ struct batadv_hard_iface *primary_if;
+
+ cancel_delayed_work_sync(&bat_priv->bla.work);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+
+ if (bat_priv->bla.claim_hash) {
+ batadv_bla_purge_claims(bat_priv, primary_if, 1);
+ batadv_hash_destroy(bat_priv->bla.claim_hash);
+ bat_priv->bla.claim_hash = NULL;
+ }
+ if (bat_priv->bla.backbone_hash) {
+ batadv_bla_purge_backbone_gw(bat_priv, 1);
+ batadv_hash_destroy(bat_priv->bla.backbone_hash);
+ bat_priv->bla.backbone_hash = NULL;
+ }
+ batadv_hardif_put(primary_if);
+}
+
+/**
+ * batadv_bla_loopdetect_check() - check and handle a detected loop
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the packet to check
+ * @primary_if: interface where the request came on
+ * @vid: the VLAN ID of the frame
+ *
+ * Checks if this packet is a loop detect frame which has been sent by us,
+ * throws an uevent and logs the event if that is the case.
+ *
+ * Return: true if it is a loop detect frame which is to be dropped, false
+ * otherwise.
+ */
+static bool
+batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ struct batadv_hard_iface *primary_if,
+ unsigned short vid)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+ struct ethhdr *ethhdr;
+ bool ret;
+
+ ethhdr = eth_hdr(skb);
+
+ /* Only check for the MAC address and skip more checks here for
+ * performance reasons - this function is on the hotpath, after all.
+ */
+ if (!batadv_compare_eth(ethhdr->h_source,
+ bat_priv->bla.loopdetect_addr))
+ return false;
+
+ /* If the packet came too late, don't forward it on the mesh
+ * but don't consider that as loop. It might be a coincidence.
+ */
+ if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
+ BATADV_BLA_LOOPDETECT_TIMEOUT))
+ return true;
+
+ backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
+ primary_if->net_dev->dev_addr,
+ vid, true);
+ if (unlikely(!backbone_gw))
+ return true;
+
+ ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
+
+ /* backbone_gw is unreferenced in the report work function
+ * if queue_work() call was successful
+ */
+ if (!ret)
+ batadv_backbone_gw_put(backbone_gw);
+
+ return true;
+}
+
+/**
+ * batadv_bla_rx() - check packets coming from the mesh.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ * @vid: the VLAN ID of the frame
+ * @packet_type: the batman packet type this frame came in
+ *
+ * batadv_bla_rx avoidance checks if:
+ * * we have to race for a claim
+ * * if the frame is allowed on the LAN
+ *
+ * In these cases, the skb is further handled by this function
+ *
+ * Return: true if handled, otherwise it returns false and the caller shall
+ * further process the skb.
+ */
+bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ unsigned short vid, int packet_type)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+ struct ethhdr *ethhdr;
+ struct batadv_bla_claim search_claim, *claim = NULL;
+ struct batadv_hard_iface *primary_if;
+ bool own_claim;
+ bool ret;
+
+ ethhdr = eth_hdr(skb);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto handled;
+
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+ goto allow;
+
+ if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
+ goto handled;
+
+ if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
+ /* don't allow multicast packets while requests are in flight */
+ if (is_multicast_ether_addr(ethhdr->h_dest))
+ /* Both broadcast flooding or multicast-via-unicasts
+ * delivery might send to multiple backbone gateways
+ * sharing the same LAN and therefore need to coordinate
+ * which backbone gateway forwards into the LAN,
+ * by claiming the payload source address.
+ *
+ * Broadcast flooding and multicast-via-unicasts
+ * delivery use the following two batman packet types.
+ * Note: explicitly exclude BATADV_UNICAST_4ADDR,
+ * as the DHCP gateway feature will send explicitly
+ * to only one BLA gateway, so the claiming process
+ * should be avoided there.
+ */
+ if (packet_type == BATADV_BCAST ||
+ packet_type == BATADV_UNICAST)
+ goto handled;
+
+ /* potential duplicates from foreign BLA backbone gateways via
+ * multicast-in-unicast packets
+ */
+ if (is_multicast_ether_addr(ethhdr->h_dest) &&
+ packet_type == BATADV_UNICAST &&
+ batadv_bla_check_ucast_duplist(bat_priv, skb))
+ goto handled;
+
+ ether_addr_copy(search_claim.addr, ethhdr->h_source);
+ search_claim.vid = vid;
+ claim = batadv_claim_hash_find(bat_priv, &search_claim);
+
+ if (!claim) {
+ /* possible optimization: race for a claim */
+ /* No claim exists yet, claim it for us!
+ */
+
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
+ __func__, ethhdr->h_source,
+ batadv_is_my_client(bat_priv,
+ ethhdr->h_source, vid) ?
+ "yes" : "no");
+ batadv_handle_claim(bat_priv, primary_if,
+ primary_if->net_dev->dev_addr,
+ ethhdr->h_source, vid);
+ goto allow;
+ }
+
+ /* if it is our own claim ... */
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+ own_claim = batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr);
+ batadv_backbone_gw_put(backbone_gw);
+
+ if (own_claim) {
+ /* ... allow it in any case */
+ claim->lasttime = jiffies;
+ goto allow;
+ }
+
+ /* if it is a multicast ... */
+ if (is_multicast_ether_addr(ethhdr->h_dest) &&
+ (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) {
+ /* ... drop it. the responsible gateway is in charge.
+ *
+ * We need to check packet type because with the gateway
+ * feature, broadcasts (like DHCP requests) may be sent
+ * using a unicast 4 address packet type. See comment above.
+ */
+ goto handled;
+ } else {
+ /* seems the client considers us as its best gateway.
+ * send a claim and update the claim table
+ * immediately.
+ */
+ batadv_handle_claim(bat_priv, primary_if,
+ primary_if->net_dev->dev_addr,
+ ethhdr->h_source, vid);
+ goto allow;
+ }
+allow:
+ batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+ ret = false;
+ goto out;
+
+handled:
+ kfree_skb(skb);
+ ret = true;
+
+out:
+ batadv_hardif_put(primary_if);
+ batadv_claim_put(claim);
+ return ret;
+}
+
+/**
+ * batadv_bla_tx() - check packets going into the mesh
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ * @vid: the VLAN ID of the frame
+ *
+ * batadv_bla_tx checks if:
+ * * a claim was received which has to be processed
+ * * the frame is allowed on the mesh
+ *
+ * in these cases, the skb is further handled by this function.
+ *
+ * This call might reallocate skb data.
+ *
+ * Return: true if handled, otherwise it returns false and the caller shall
+ * further process the skb.
+ */
+bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ unsigned short vid)
+{
+ struct ethhdr *ethhdr;
+ struct batadv_bla_claim search_claim, *claim = NULL;
+ struct batadv_bla_backbone_gw *backbone_gw;
+ struct batadv_hard_iface *primary_if;
+ bool client_roamed;
+ bool ret = false;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+ goto allow;
+
+ if (batadv_bla_process_claim(bat_priv, primary_if, skb))
+ goto handled;
+
+ ethhdr = eth_hdr(skb);
+
+ if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
+ /* don't allow broadcasts while requests are in flight */
+ if (is_multicast_ether_addr(ethhdr->h_dest))
+ goto handled;
+
+ ether_addr_copy(search_claim.addr, ethhdr->h_source);
+ search_claim.vid = vid;
+
+ claim = batadv_claim_hash_find(bat_priv, &search_claim);
+
+ /* if no claim exists, allow it. */
+ if (!claim)
+ goto allow;
+
+ /* check if we are responsible. */
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+ client_roamed = batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr);
+ batadv_backbone_gw_put(backbone_gw);
+
+ if (client_roamed) {
+ /* if yes, the client has roamed and we have
+ * to unclaim it.
+ */
+ if (batadv_has_timed_out(claim->lasttime, 100)) {
+ /* only unclaim if the last claim entry is
+ * older than 100 ms to make sure we really
+ * have a roaming client here.
+ */
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Roaming client %pM detected. Unclaim it.\n",
+ __func__, ethhdr->h_source);
+ batadv_handle_unclaim(bat_priv, primary_if,
+ primary_if->net_dev->dev_addr,
+ ethhdr->h_source, vid);
+ goto allow;
+ } else {
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Race for claim %pM detected. Drop packet.\n",
+ __func__, ethhdr->h_source);
+ goto handled;
+ }
+ }
+
+ /* check if it is a multicast/broadcast frame */
+ if (is_multicast_ether_addr(ethhdr->h_dest)) {
+ /* drop it. the responsible gateway has forwarded it into
+ * the backbone network.
+ */
+ goto handled;
+ } else {
+ /* we must allow it. at least if we are
+ * responsible for the DESTINATION.
+ */
+ goto allow;
+ }
+allow:
+ batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+ ret = false;
+ goto out;
+handled:
+ ret = true;
+out:
+ batadv_hardif_put(primary_if);
+ batadv_claim_put(claim);
+ return ret;
+}
+
+/**
+ * batadv_bla_claim_dump_entry() - dump one entry of the claim table
+ * to a netlink socket
+ * @msg: buffer for the message
+ * @portid: netlink port
+ * @cb: Control block containing additional options
+ * @primary_if: primary interface
+ * @claim: entry to dump
+ *
+ * Return: 0 or error code.
+ */
+static int
+batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_bla_claim *claim)
+{
+ const u8 *primary_addr = primary_if->net_dev->dev_addr;
+ u16 backbone_crc;
+ bool is_own;
+ void *hdr;
+ int ret = -EINVAL;
+
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_BLA_CLAIM);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ genl_dump_check_consistent(cb, hdr);
+
+ is_own = batadv_compare_eth(claim->backbone_gw->orig,
+ primary_addr);
+
+ spin_lock_bh(&claim->backbone_gw->crc_lock);
+ backbone_crc = claim->backbone_gw->crc;
+ spin_unlock_bh(&claim->backbone_gw->crc_lock);
+
+ if (is_own)
+ if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
+ nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
+ claim->backbone_gw->orig) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
+ backbone_crc)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ genlmsg_end(msg, hdr);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/**
+ * batadv_bla_claim_dump_bucket() - dump one bucket of the claim table
+ * to a netlink socket
+ * @msg: buffer for the message
+ * @portid: netlink port
+ * @cb: Control block containing additional options
+ * @primary_if: primary interface
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
+ * @idx_skip: How many entries to skip
+ *
+ * Return: always 0.
+ */
+static int
+batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_hashtable *hash, unsigned int bucket,
+ int *idx_skip)
+{
+ struct batadv_bla_claim *claim;
+ int idx = 0;
+ int ret = 0;
+
+ spin_lock_bh(&hash->list_locks[bucket]);
+ cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+ hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) {
+ if (idx++ < *idx_skip)
+ continue;
+
+ ret = batadv_bla_claim_dump_entry(msg, portid, cb,
+ primary_if, claim);
+ if (ret) {
+ *idx_skip = idx - 1;
+ goto unlock;
+ }
+ }
+
+ *idx_skip = 0;
+unlock:
+ spin_unlock_bh(&hash->list_locks[bucket]);
+ return ret;
+}
+
+/**
+ * batadv_bla_claim_dump() - dump claim table to a netlink socket
+ * @msg: buffer for the message
+ * @cb: callback structure containing arguments
+ *
+ * Return: message length.
+ */
+int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct batadv_hard_iface *primary_if = NULL;
+ int portid = NETLINK_CB(cb->skb).portid;
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_hashtable *hash;
+ struct batadv_priv *bat_priv;
+ int bucket = cb->args[0];
+ int idx = cb->args[1];
+ int ifindex;
+ int ret = 0;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+ hash = bat_priv->bla.claim_hash;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ while (bucket < hash->size) {
+ if (batadv_bla_claim_dump_bucket(msg, portid, cb, primary_if,
+ hash, bucket, &idx))
+ break;
+ bucket++;
+ }
+
+ cb->args[0] = bucket;
+ cb->args[1] = idx;
+
+ ret = msg->len;
+
+out:
+ batadv_hardif_put(primary_if);
+
+ dev_put(soft_iface);
+
+ return ret;
+}
+
+/**
+ * batadv_bla_backbone_dump_entry() - dump one entry of the backbone table to a
+ * netlink socket
+ * @msg: buffer for the message
+ * @portid: netlink port
+ * @cb: Control block containing additional options
+ * @primary_if: primary interface
+ * @backbone_gw: entry to dump
+ *
+ * Return: 0 or error code.
+ */
+static int
+batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_bla_backbone_gw *backbone_gw)
+{
+ const u8 *primary_addr = primary_if->net_dev->dev_addr;
+ u16 backbone_crc;
+ bool is_own;
+ int msecs;
+ void *hdr;
+ int ret = -EINVAL;
+
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_BLA_BACKBONE);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ genl_dump_check_consistent(cb, hdr);
+
+ is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
+
+ spin_lock_bh(&backbone_gw->crc_lock);
+ backbone_crc = backbone_gw->crc;
+ spin_unlock_bh(&backbone_gw->crc_lock);
+
+ msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);
+
+ if (is_own)
+ if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
+ backbone_gw->orig) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
+ nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
+ backbone_crc) ||
+ nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
+ genlmsg_cancel(msg, hdr);
+ goto out;
+ }
+
+ genlmsg_end(msg, hdr);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/**
+ * batadv_bla_backbone_dump_bucket() - dump one bucket of the backbone table to
+ * a netlink socket
+ * @msg: buffer for the message
+ * @portid: netlink port
+ * @cb: Control block containing additional options
+ * @primary_if: primary interface
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
+ * @idx_skip: How many entries to skip
+ *
+ * Return: always 0.
+ */
+static int
+batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_hashtable *hash,
+ unsigned int bucket, int *idx_skip)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+ int idx = 0;
+ int ret = 0;
+
+ spin_lock_bh(&hash->list_locks[bucket]);
+ cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+ hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) {
+ if (idx++ < *idx_skip)
+ continue;
+
+ ret = batadv_bla_backbone_dump_entry(msg, portid, cb,
+ primary_if, backbone_gw);
+ if (ret) {
+ *idx_skip = idx - 1;
+ goto unlock;
+ }
+ }
+
+ *idx_skip = 0;
+unlock:
+ spin_unlock_bh(&hash->list_locks[bucket]);
+ return ret;
+}
+
+/**
+ * batadv_bla_backbone_dump() - dump backbone table to a netlink socket
+ * @msg: buffer for the message
+ * @cb: callback structure containing arguments
+ *
+ * Return: message length.
+ */
+int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct batadv_hard_iface *primary_if = NULL;
+ int portid = NETLINK_CB(cb->skb).portid;
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_hashtable *hash;
+ struct batadv_priv *bat_priv;
+ int bucket = cb->args[0];
+ int idx = cb->args[1];
+ int ifindex;
+ int ret = 0;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+ hash = bat_priv->bla.backbone_hash;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ while (bucket < hash->size) {
+ if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if,
+ hash, bucket, &idx))
+ break;
+ bucket++;
+ }
+
+ cb->args[0] = bucket;
+ cb->args[1] = idx;
+
+ ret = msg->len;
+
+out:
+ batadv_hardif_put(primary_if);
+
+ dev_put(soft_iface);
+
+ return ret;
+}
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+/**
+ * batadv_bla_check_claim() - check if address is claimed
+ *
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: mac address of which the claim status is checked
+ * @vid: the VLAN ID
+ *
+ * addr is checked if this address is claimed by the local device itself.
+ *
+ * Return: true if bla is disabled or the mac is claimed by the device,
+ * false if the device addr is already claimed by another gateway
+ */
+bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
+ u8 *addr, unsigned short vid)
+{
+ struct batadv_bla_claim search_claim;
+ struct batadv_bla_claim *claim = NULL;
+ struct batadv_hard_iface *primary_if = NULL;
+ bool ret = true;
+
+ if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+ return ret;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ return ret;
+
+ /* First look if the mac address is claimed */
+ ether_addr_copy(search_claim.addr, addr);
+ search_claim.vid = vid;
+
+ claim = batadv_claim_hash_find(bat_priv, &search_claim);
+
+ /* If there is a claim and we are not owner of the claim,
+ * return false.
+ */
+ if (claim) {
+ if (!batadv_compare_eth(claim->backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
+ ret = false;
+ batadv_claim_put(claim);
+ }
+
+ batadv_hardif_put(primary_if);
+ return ret;
+}
+#endif
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
new file mode 100644
index 0000000000..8673a26599
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich
+ */
+
+#ifndef _NET_BATMAN_ADV_BLA_H_
+#define _NET_BATMAN_ADV_BLA_H_
+
+#include "main.h"
+
+#include <linux/compiler.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+/**
+ * batadv_bla_is_loopdetect_mac() - check if the mac address is from a loop
+ * detect frame sent by bridge loop avoidance
+ * @mac: mac address to check
+ *
+ * Return: true if the it looks like a loop detect frame
+ * (mac starts with BA:BE), false otherwise
+ */
+static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac)
+{
+ if (mac[0] == 0xba && mac[1] == 0xbe)
+ return true;
+
+ return false;
+}
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ unsigned short vid, int packet_type);
+bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ unsigned short vid);
+bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node,
+ int hdr_size);
+int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb);
+int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb);
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
+ unsigned short vid);
+bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+ struct sk_buff *skb);
+void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_hard_iface *oldif);
+void batadv_bla_status_update(struct net_device *net_dev);
+int batadv_bla_init(struct batadv_priv *bat_priv);
+void batadv_bla_free(struct batadv_priv *bat_priv);
+#ifdef CONFIG_BATMAN_ADV_DAT
+bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr,
+ unsigned short vid);
+#endif
+#define BATADV_BLA_CRC_INIT 0
+#else /* ifdef CONFIG_BATMAN_ADV_BLA */
+
+static inline bool batadv_bla_rx(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, unsigned short vid,
+ int packet_type)
+{
+ return false;
+}
+
+static inline bool batadv_bla_tx(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, unsigned short vid)
+{
+ return false;
+}
+
+static inline bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node,
+ int hdr_size)
+{
+ return false;
+}
+
+static inline bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
+ u8 *orig, unsigned short vid)
+{
+ return false;
+}
+
+static inline bool
+batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline void
+batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_hard_iface *oldif)
+{
+}
+
+static inline int batadv_bla_init(struct batadv_priv *bat_priv)
+{
+ return 1;
+}
+
+static inline void batadv_bla_free(struct batadv_priv *bat_priv)
+{
+}
+
+static inline int batadv_bla_claim_dump(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int batadv_bla_backbone_dump(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr,
+ unsigned short vid)
+{
+ return true;
+}
+
+#endif /* ifdef CONFIG_BATMAN_ADV_BLA */
+
+#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
new file mode 100644
index 0000000000..28a939d560
--- /dev/null
+++ b/net/batman-adv/distributed-arp-table.c
@@ -0,0 +1,1831 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Antonio Quartulli
+ */
+
+#include "distributed-arp-table.h"
+#include "main.h"
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/byteorder/generic.h>
+#include <linux/container_of.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/jiffies.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/udp.h>
+#include <linux/workqueue.h>
+#include <net/arp.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "bridge_loop_avoidance.h"
+#include "hard-interface.h"
+#include "hash.h"
+#include "log.h"
+#include "netlink.h"
+#include "originator.h"
+#include "send.h"
+#include "soft-interface.h"
+#include "translation-table.h"
+#include "tvlv.h"
+
+enum batadv_bootpop {
+ BATADV_BOOTREPLY = 2,
+};
+
+enum batadv_boothtype {
+ BATADV_HTYPE_ETHERNET = 1,
+};
+
+enum batadv_dhcpoptioncode {
+ BATADV_DHCP_OPT_PAD = 0,
+ BATADV_DHCP_OPT_MSG_TYPE = 53,
+ BATADV_DHCP_OPT_END = 255,
+};
+
+enum batadv_dhcptype {
+ BATADV_DHCPACK = 5,
+};
+
+/* { 99, 130, 83, 99 } */
+#define BATADV_DHCP_MAGIC 1669485411
+
+struct batadv_dhcp_packet {
+ __u8 op;
+ __u8 htype;
+ __u8 hlen;
+ __u8 hops;
+ __be32 xid;
+ __be16 secs;
+ __be16 flags;
+ __be32 ciaddr;
+ __be32 yiaddr;
+ __be32 siaddr;
+ __be32 giaddr;
+ __u8 chaddr[16];
+ __u8 sname[64];
+ __u8 file[128];
+ __be32 magic;
+ /* __u8 options[]; */
+};
+
+#define BATADV_DHCP_YIADDR_LEN sizeof(((struct batadv_dhcp_packet *)0)->yiaddr)
+#define BATADV_DHCP_CHADDR_LEN sizeof(((struct batadv_dhcp_packet *)0)->chaddr)
+
+static void batadv_dat_purge(struct work_struct *work);
+
+/**
+ * batadv_dat_start_timer() - initialise the DAT periodic worker
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
+{
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work,
+ msecs_to_jiffies(10000));
+}
+
+/**
+ * batadv_dat_entry_release() - release dat_entry from lists and queue for free
+ * after rcu grace period
+ * @ref: kref pointer of the dat_entry
+ */
+static void batadv_dat_entry_release(struct kref *ref)
+{
+ struct batadv_dat_entry *dat_entry;
+
+ dat_entry = container_of(ref, struct batadv_dat_entry, refcount);
+
+ kfree_rcu(dat_entry, rcu);
+}
+
+/**
+ * batadv_dat_entry_put() - decrement the dat_entry refcounter and possibly
+ * release it
+ * @dat_entry: dat_entry to be free'd
+ */
+static void batadv_dat_entry_put(struct batadv_dat_entry *dat_entry)
+{
+ if (!dat_entry)
+ return;
+
+ kref_put(&dat_entry->refcount, batadv_dat_entry_release);
+}
+
+/**
+ * batadv_dat_to_purge() - check whether a dat_entry has to be purged or not
+ * @dat_entry: the entry to check
+ *
+ * Return: true if the entry has to be purged now, false otherwise.
+ */
+static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
+{
+ return batadv_has_timed_out(dat_entry->last_update,
+ BATADV_DAT_ENTRY_TIMEOUT);
+}
+
+/**
+ * __batadv_dat_purge() - delete entries from the DAT local storage
+ * @bat_priv: the bat priv with all the soft interface information
+ * @to_purge: function in charge to decide whether an entry has to be purged or
+ * not. This function takes the dat_entry as argument and has to
+ * returns a boolean value: true is the entry has to be deleted,
+ * false otherwise
+ *
+ * Loops over each entry in the DAT local storage and deletes it if and only if
+ * the to_purge function passed as argument returns true.
+ */
+static void __batadv_dat_purge(struct batadv_priv *bat_priv,
+ bool (*to_purge)(struct batadv_dat_entry *))
+{
+ spinlock_t *list_lock; /* protects write access to the hash lists */
+ struct batadv_dat_entry *dat_entry;
+ struct hlist_node *node_tmp;
+ struct hlist_head *head;
+ u32 i;
+
+ if (!bat_priv->dat.hash)
+ return;
+
+ for (i = 0; i < bat_priv->dat.hash->size; i++) {
+ head = &bat_priv->dat.hash->table[i];
+ list_lock = &bat_priv->dat.hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+ hlist_for_each_entry_safe(dat_entry, node_tmp, head,
+ hash_entry) {
+ /* if a helper function has been passed as parameter,
+ * ask it if the entry has to be purged or not
+ */
+ if (to_purge && !to_purge(dat_entry))
+ continue;
+
+ hlist_del_rcu(&dat_entry->hash_entry);
+ batadv_dat_entry_put(dat_entry);
+ }
+ spin_unlock_bh(list_lock);
+ }
+}
+
+/**
+ * batadv_dat_purge() - periodic task that deletes old entries from the local
+ * DAT hash table
+ * @work: kernel work struct
+ */
+static void batadv_dat_purge(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct batadv_priv_dat *priv_dat;
+ struct batadv_priv *bat_priv;
+
+ delayed_work = to_delayed_work(work);
+ priv_dat = container_of(delayed_work, struct batadv_priv_dat, work);
+ bat_priv = container_of(priv_dat, struct batadv_priv, dat);
+
+ __batadv_dat_purge(bat_priv, batadv_dat_to_purge);
+ batadv_dat_start_timer(bat_priv);
+}
+
+/**
+ * batadv_compare_dat() - comparing function used in the local DAT hash table
+ * @node: node in the local table
+ * @data2: second object to compare the node to
+ *
+ * Return: true if the two entries are the same, false otherwise.
+ */
+static bool batadv_compare_dat(const struct hlist_node *node, const void *data2)
+{
+ const void *data1 = container_of(node, struct batadv_dat_entry,
+ hash_entry);
+
+ return memcmp(data1, data2, sizeof(__be32)) == 0;
+}
+
+/**
+ * batadv_arp_hw_src() - extract the hw_src field from an ARP packet
+ * @skb: ARP packet
+ * @hdr_size: size of the possible header before the ARP packet
+ *
+ * Return: the value of the hw_src field in the ARP packet.
+ */
+static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
+{
+ u8 *addr;
+
+ addr = (u8 *)(skb->data + hdr_size);
+ addr += ETH_HLEN + sizeof(struct arphdr);
+
+ return addr;
+}
+
+/**
+ * batadv_arp_ip_src() - extract the ip_src field from an ARP packet
+ * @skb: ARP packet
+ * @hdr_size: size of the possible header before the ARP packet
+ *
+ * Return: the value of the ip_src field in the ARP packet.
+ */
+static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
+{
+ return *(__force __be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN);
+}
+
+/**
+ * batadv_arp_hw_dst() - extract the hw_dst field from an ARP packet
+ * @skb: ARP packet
+ * @hdr_size: size of the possible header before the ARP packet
+ *
+ * Return: the value of the hw_dst field in the ARP packet.
+ */
+static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
+{
+ return batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN + 4;
+}
+
+/**
+ * batadv_arp_ip_dst() - extract the ip_dst field from an ARP packet
+ * @skb: ARP packet
+ * @hdr_size: size of the possible header before the ARP packet
+ *
+ * Return: the value of the ip_dst field in the ARP packet.
+ */
+static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
+{
+ u8 *dst = batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN * 2 + 4;
+
+ return *(__force __be32 *)dst;
+}
+
+/**
+ * batadv_hash_dat() - compute the hash value for an IP address
+ * @data: data to hash
+ * @size: size of the hash table
+ *
+ * Return: the selected index in the hash table for the given data.
+ */
+static u32 batadv_hash_dat(const void *data, u32 size)
+{
+ u32 hash = 0;
+ const struct batadv_dat_entry *dat = data;
+ const unsigned char *key;
+ __be16 vid;
+ u32 i;
+
+ key = (__force const unsigned char *)&dat->ip;
+ for (i = 0; i < sizeof(dat->ip); i++) {
+ hash += key[i];
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ vid = htons(dat->vid);
+ key = (__force const unsigned char *)&vid;
+ for (i = 0; i < sizeof(dat->vid); i++) {
+ hash += key[i];
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ return hash % size;
+}
+
+/**
+ * batadv_dat_entry_hash_find() - look for a given dat_entry in the local hash
+ * table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ip: search key
+ * @vid: VLAN identifier
+ *
+ * Return: the dat_entry if found, NULL otherwise.
+ */
+static struct batadv_dat_entry *
+batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
+ unsigned short vid)
+{
+ struct hlist_head *head;
+ struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL;
+ struct batadv_hashtable *hash = bat_priv->dat.hash;
+ u32 index;
+
+ if (!hash)
+ return NULL;
+
+ to_find.ip = ip;
+ to_find.vid = vid;
+
+ index = batadv_hash_dat(&to_find, hash->size);
+ head = &hash->table[index];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
+ if (dat_entry->ip != ip)
+ continue;
+
+ if (!kref_get_unless_zero(&dat_entry->refcount))
+ continue;
+
+ dat_entry_tmp = dat_entry;
+ break;
+ }
+ rcu_read_unlock();
+
+ return dat_entry_tmp;
+}
+
+/**
+ * batadv_dat_entry_add() - add a new dat entry or update it if already exists
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ip: ipv4 to add/edit
+ * @mac_addr: mac address to assign to the given ipv4
+ * @vid: VLAN identifier
+ */
+static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
+ u8 *mac_addr, unsigned short vid)
+{
+ struct batadv_dat_entry *dat_entry;
+ int hash_added;
+
+ dat_entry = batadv_dat_entry_hash_find(bat_priv, ip, vid);
+ /* if this entry is already known, just update it */
+ if (dat_entry) {
+ if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr))
+ ether_addr_copy(dat_entry->mac_addr, mac_addr);
+ dat_entry->last_update = jiffies;
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "Entry updated: %pI4 %pM (vid: %d)\n",
+ &dat_entry->ip, dat_entry->mac_addr,
+ batadv_print_vid(vid));
+ goto out;
+ }
+
+ dat_entry = kmalloc(sizeof(*dat_entry), GFP_ATOMIC);
+ if (!dat_entry)
+ goto out;
+
+ dat_entry->ip = ip;
+ dat_entry->vid = vid;
+ ether_addr_copy(dat_entry->mac_addr, mac_addr);
+ dat_entry->last_update = jiffies;
+ kref_init(&dat_entry->refcount);
+
+ kref_get(&dat_entry->refcount);
+ hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat,
+ batadv_hash_dat, dat_entry,
+ &dat_entry->hash_entry);
+
+ if (unlikely(hash_added != 0)) {
+ /* remove the reference for the hash */
+ batadv_dat_entry_put(dat_entry);
+ goto out;
+ }
+
+ batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM (vid: %d)\n",
+ &dat_entry->ip, dat_entry->mac_addr, batadv_print_vid(vid));
+
+out:
+ batadv_dat_entry_put(dat_entry);
+}
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+
+/**
+ * batadv_dbg_arp() - print a debug message containing all the ARP packet
+ * details
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: ARP packet
+ * @hdr_size: size of the possible header before the ARP packet
+ * @msg: message to print together with the debugging information
+ */
+static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ int hdr_size, char *msg)
+{
+ struct batadv_unicast_4addr_packet *unicast_4addr_packet;
+ struct batadv_bcast_packet *bcast_pkt;
+ u8 *orig_addr;
+ __be32 ip_src, ip_dst;
+
+ if (msg)
+ batadv_dbg(BATADV_DBG_DAT, bat_priv, "%s\n", msg);
+
+ ip_src = batadv_arp_ip_src(skb, hdr_size);
+ ip_dst = batadv_arp_ip_dst(skb, hdr_size);
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "ARP MSG = [src: %pM-%pI4 dst: %pM-%pI4]\n",
+ batadv_arp_hw_src(skb, hdr_size), &ip_src,
+ batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
+
+ if (hdr_size < sizeof(struct batadv_unicast_packet))
+ return;
+
+ unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
+
+ switch (unicast_4addr_packet->u.packet_type) {
+ case BATADV_UNICAST:
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "* encapsulated within a UNICAST packet\n");
+ break;
+ case BATADV_UNICAST_4ADDR:
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "* encapsulated within a UNICAST_4ADDR packet (src: %pM)\n",
+ unicast_4addr_packet->src);
+ switch (unicast_4addr_packet->subtype) {
+ case BATADV_P_DAT_DHT_PUT:
+ batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_PUT\n");
+ break;
+ case BATADV_P_DAT_DHT_GET:
+ batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_GET\n");
+ break;
+ case BATADV_P_DAT_CACHE_REPLY:
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "* type: DAT_CACHE_REPLY\n");
+ break;
+ case BATADV_P_DATA:
+ batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DATA\n");
+ break;
+ default:
+ batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n",
+ unicast_4addr_packet->u.packet_type);
+ }
+ break;
+ case BATADV_BCAST:
+ bcast_pkt = (struct batadv_bcast_packet *)unicast_4addr_packet;
+ orig_addr = bcast_pkt->orig;
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "* encapsulated within a BCAST packet (src: %pM)\n",
+ orig_addr);
+ break;
+ default:
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "* encapsulated within an unknown packet type (0x%x)\n",
+ unicast_4addr_packet->u.packet_type);
+ }
+}
+
+#else
+
+static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ int hdr_size, char *msg)
+{
+}
+
+#endif /* CONFIG_BATMAN_ADV_DEBUG */
+
+/**
+ * batadv_is_orig_node_eligible() - check whether a node can be a DHT candidate
+ * @res: the array with the already selected candidates
+ * @select: number of already selected candidates
+ * @tmp_max: address of the currently evaluated node
+ * @max: current round max address
+ * @last_max: address of the last selected candidate
+ * @candidate: orig_node under evaluation
+ * @max_orig_node: last selected candidate
+ *
+ * Return: true if the node has been elected as next candidate or false
+ * otherwise.
+ */
+static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
+ int select, batadv_dat_addr_t tmp_max,
+ batadv_dat_addr_t max,
+ batadv_dat_addr_t last_max,
+ struct batadv_orig_node *candidate,
+ struct batadv_orig_node *max_orig_node)
+{
+ bool ret = false;
+ int j;
+
+ /* check if orig node candidate is running DAT */
+ if (!test_bit(BATADV_ORIG_CAPA_HAS_DAT, &candidate->capabilities))
+ goto out;
+
+ /* Check if this node has already been selected... */
+ for (j = 0; j < select; j++)
+ if (res[j].orig_node == candidate)
+ break;
+ /* ..and possibly skip it */
+ if (j < select)
+ goto out;
+ /* sanity check: has it already been selected? This should not happen */
+ if (tmp_max > last_max)
+ goto out;
+ /* check if during this iteration an originator with a closer dht
+ * address has already been found
+ */
+ if (tmp_max < max)
+ goto out;
+ /* this is an hash collision with the temporary selected node. Choose
+ * the one with the lowest address
+ */
+ if (tmp_max == max && max_orig_node &&
+ batadv_compare_eth(candidate->orig, max_orig_node->orig))
+ goto out;
+
+ ret = true;
+out:
+ return ret;
+}
+
+/**
+ * batadv_choose_next_candidate() - select the next DHT candidate
+ * @bat_priv: the bat priv with all the soft interface information
+ * @cands: candidates array
+ * @select: number of candidates already present in the array
+ * @ip_key: key to look up in the DHT
+ * @last_max: pointer where the address of the selected candidate will be saved
+ */
+static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
+ struct batadv_dat_candidate *cands,
+ int select, batadv_dat_addr_t ip_key,
+ batadv_dat_addr_t *last_max)
+{
+ batadv_dat_addr_t max = 0;
+ batadv_dat_addr_t tmp_max = 0;
+ struct batadv_orig_node *orig_node, *max_orig_node = NULL;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ struct hlist_head *head;
+ int i;
+
+ /* if no node is eligible as candidate, leave the candidate type as
+ * NOT_FOUND
+ */
+ cands[select].type = BATADV_DAT_CANDIDATE_NOT_FOUND;
+
+ /* iterate over the originator list and find the node with the closest
+ * dat_address which has not been selected yet
+ */
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+ /* the dht space is a ring using unsigned addresses */
+ tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr +
+ ip_key;
+
+ if (!batadv_is_orig_node_eligible(cands, select,
+ tmp_max, max,
+ *last_max, orig_node,
+ max_orig_node))
+ continue;
+
+ if (!kref_get_unless_zero(&orig_node->refcount))
+ continue;
+
+ max = tmp_max;
+ batadv_orig_node_put(max_orig_node);
+ max_orig_node = orig_node;
+ }
+ rcu_read_unlock();
+ }
+ if (max_orig_node) {
+ cands[select].type = BATADV_DAT_CANDIDATE_ORIG;
+ cands[select].orig_node = max_orig_node;
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "dat_select_candidates() %d: selected %pM addr=%u dist=%u\n",
+ select, max_orig_node->orig, max_orig_node->dat_addr,
+ max);
+ }
+ *last_max = max;
+}
+
+/**
+ * batadv_dat_select_candidates() - select the nodes which the DHT message has
+ * to be sent to
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ip_dst: ipv4 to look up in the DHT
+ * @vid: VLAN identifier
+ *
+ * An originator O is selected if and only if its DHT_ID value is one of three
+ * closest values (from the LEFT, with wrap around if needed) then the hash
+ * value of the key. ip_dst is the key.
+ *
+ * Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM.
+ */
+static struct batadv_dat_candidate *
+batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
+ unsigned short vid)
+{
+ int select;
+ batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
+ struct batadv_dat_candidate *res;
+ struct batadv_dat_entry dat;
+
+ if (!bat_priv->orig_hash)
+ return NULL;
+
+ res = kmalloc_array(BATADV_DAT_CANDIDATES_NUM, sizeof(*res),
+ GFP_ATOMIC);
+ if (!res)
+ return NULL;
+
+ dat.ip = ip_dst;
+ dat.vid = vid;
+ ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
+ BATADV_DAT_ADDR_MAX);
+
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "%s(): IP=%pI4 hash(IP)=%u\n", __func__, &ip_dst,
+ ip_key);
+
+ for (select = 0; select < BATADV_DAT_CANDIDATES_NUM; select++)
+ batadv_choose_next_candidate(bat_priv, res, select, ip_key,
+ &last_max);
+
+ return res;
+}
+
+/**
+ * batadv_dat_forward_data() - copy and send payload to the selected candidates
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @ip: the DHT key
+ * @vid: VLAN identifier
+ * @packet_subtype: unicast4addr packet subtype to use
+ *
+ * This function copies the skb with pskb_copy() and is sent as a unicast packet
+ * to each of the selected candidates.
+ *
+ * Return: true if the packet is sent to at least one candidate, false
+ * otherwise.
+ */
+static bool batadv_dat_forward_data(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, __be32 ip,
+ unsigned short vid, int packet_subtype)
+{
+ int i;
+ bool ret = false;
+ int send_status;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct sk_buff *tmp_skb;
+ struct batadv_dat_candidate *cand;
+
+ cand = batadv_dat_select_candidates(bat_priv, ip, vid);
+ if (!cand)
+ goto out;
+
+ batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %pI4\n", &ip);
+
+ for (i = 0; i < BATADV_DAT_CANDIDATES_NUM; i++) {
+ if (cand[i].type == BATADV_DAT_CANDIDATE_NOT_FOUND)
+ continue;
+
+ neigh_node = batadv_orig_router_get(cand[i].orig_node,
+ BATADV_IF_DEFAULT);
+ if (!neigh_node)
+ goto free_orig;
+
+ tmp_skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
+ if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb,
+ cand[i].orig_node,
+ packet_subtype)) {
+ kfree_skb(tmp_skb);
+ goto free_neigh;
+ }
+
+ send_status = batadv_send_unicast_skb(tmp_skb, neigh_node);
+ if (send_status == NET_XMIT_SUCCESS) {
+ /* count the sent packet */
+ switch (packet_subtype) {
+ case BATADV_P_DAT_DHT_GET:
+ batadv_inc_counter(bat_priv,
+ BATADV_CNT_DAT_GET_TX);
+ break;
+ case BATADV_P_DAT_DHT_PUT:
+ batadv_inc_counter(bat_priv,
+ BATADV_CNT_DAT_PUT_TX);
+ break;
+ }
+
+ /* packet sent to a candidate: return true */
+ ret = true;
+ }
+free_neigh:
+ batadv_neigh_node_put(neigh_node);
+free_orig:
+ batadv_orig_node_put(cand[i].orig_node);
+ }
+
+out:
+ kfree(cand);
+ return ret;
+}
+
+/**
+ * batadv_dat_tvlv_container_update() - update the dat tvlv container after dat
+ * setting change
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+ char dat_mode;
+
+ dat_mode = atomic_read(&bat_priv->distributed_arp_table);
+
+ switch (dat_mode) {
+ case 0:
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+ break;
+ case 1:
+ batadv_tvlv_container_register(bat_priv, BATADV_TVLV_DAT, 1,
+ NULL, 0);
+ break;
+ }
+}
+
+/**
+ * batadv_dat_status_update() - update the dat tvlv container after dat
+ * setting change
+ * @net_dev: the soft interface net device
+ */
+void batadv_dat_status_update(struct net_device *net_dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+
+ batadv_dat_tvlv_container_update(bat_priv);
+}
+
+/**
+ * batadv_dat_tvlv_ogm_handler_v1() - process incoming dat tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ u8 flags,
+ void *tvlv_value, u16 tvlv_value_len)
+{
+ if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
+ clear_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
+ else
+ set_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
+}
+
+/**
+ * batadv_dat_hash_free() - free the local DAT hash table
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
+{
+ if (!bat_priv->dat.hash)
+ return;
+
+ __batadv_dat_purge(bat_priv, NULL);
+
+ batadv_hash_destroy(bat_priv->dat.hash);
+
+ bat_priv->dat.hash = NULL;
+}
+
+/**
+ * batadv_dat_init() - initialise the DAT internals
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 in case of success, a negative error code otherwise
+ */
+int batadv_dat_init(struct batadv_priv *bat_priv)
+{
+ if (bat_priv->dat.hash)
+ return 0;
+
+ bat_priv->dat.hash = batadv_hash_new(1024);
+
+ if (!bat_priv->dat.hash)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
+ batadv_dat_start_timer(bat_priv);
+
+ batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,
+ NULL, NULL, BATADV_TVLV_DAT, 1,
+ BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+ batadv_dat_tvlv_container_update(bat_priv);
+ return 0;
+}
+
+/**
+ * batadv_dat_free() - free the DAT internals
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_dat_free(struct batadv_priv *bat_priv)
+{
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+
+ cancel_delayed_work_sync(&bat_priv->dat.work);
+
+ batadv_dat_hash_free(bat_priv);
+}
+
+/**
+ * batadv_dat_cache_dump_entry() - dump one entry of the DAT cache table to a
+ * netlink socket
+ * @msg: buffer for the message
+ * @portid: netlink port
+ * @cb: Control block containing additional options
+ * @dat_entry: entry to dump
+ *
+ * Return: 0 or error code.
+ */
+static int
+batadv_dat_cache_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_dat_entry *dat_entry)
+{
+ int msecs;
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_DAT_CACHE);
+ if (!hdr)
+ return -ENOBUFS;
+
+ genl_dump_check_consistent(cb, hdr);
+
+ msecs = jiffies_to_msecs(jiffies - dat_entry->last_update);
+
+ if (nla_put_in_addr(msg, BATADV_ATTR_DAT_CACHE_IP4ADDRESS,
+ dat_entry->ip) ||
+ nla_put(msg, BATADV_ATTR_DAT_CACHE_HWADDRESS, ETH_ALEN,
+ dat_entry->mac_addr) ||
+ nla_put_u16(msg, BATADV_ATTR_DAT_CACHE_VID, dat_entry->vid) ||
+ nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+ }
+
+ genlmsg_end(msg, hdr);
+ return 0;
+}
+
+/**
+ * batadv_dat_cache_dump_bucket() - dump one bucket of the DAT cache table to
+ * a netlink socket
+ * @msg: buffer for the message
+ * @portid: netlink port
+ * @cb: Control block containing additional options
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
+ * @idx_skip: How many entries to skip
+ *
+ * Return: 0 or error code.
+ */
+static int
+batadv_dat_cache_dump_bucket(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_hashtable *hash, unsigned int bucket,
+ int *idx_skip)
+{
+ struct batadv_dat_entry *dat_entry;
+ int idx = 0;
+
+ spin_lock_bh(&hash->list_locks[bucket]);
+ cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+ hlist_for_each_entry(dat_entry, &hash->table[bucket], hash_entry) {
+ if (idx < *idx_skip)
+ goto skip;
+
+ if (batadv_dat_cache_dump_entry(msg, portid, cb, dat_entry)) {
+ spin_unlock_bh(&hash->list_locks[bucket]);
+ *idx_skip = idx;
+
+ return -EMSGSIZE;
+ }
+
+skip:
+ idx++;
+ }
+ spin_unlock_bh(&hash->list_locks[bucket]);
+
+ return 0;
+}
+
+/**
+ * batadv_dat_cache_dump() - dump DAT cache table to a netlink socket
+ * @msg: buffer for the message
+ * @cb: callback structure containing arguments
+ *
+ * Return: message length.
+ */
+int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct batadv_hard_iface *primary_if = NULL;
+ int portid = NETLINK_CB(cb->skb).portid;
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_hashtable *hash;
+ struct batadv_priv *bat_priv;
+ int bucket = cb->args[0];
+ int idx = cb->args[1];
+ int ifindex;
+ int ret = 0;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+ hash = bat_priv->dat.hash;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ while (bucket < hash->size) {
+ if (batadv_dat_cache_dump_bucket(msg, portid, cb, hash, bucket,
+ &idx))
+ break;
+
+ bucket++;
+ idx = 0;
+ }
+
+ cb->args[0] = bucket;
+ cb->args[1] = idx;
+
+ ret = msg->len;
+
+out:
+ batadv_hardif_put(primary_if);
+
+ dev_put(soft_iface);
+
+ return ret;
+}
+
+/**
+ * batadv_arp_get_type() - parse an ARP packet and gets the type
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: packet to analyse
+ * @hdr_size: size of the possible header before the ARP packet in the skb
+ *
+ * Return: the ARP type if the skb contains a valid ARP packet, 0 otherwise.
+ */
+static u16 batadv_arp_get_type(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size)
+{
+ struct arphdr *arphdr;
+ struct ethhdr *ethhdr;
+ __be32 ip_src, ip_dst;
+ u8 *hw_src, *hw_dst;
+ u16 type = 0;
+
+ /* pull the ethernet header */
+ if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN)))
+ goto out;
+
+ ethhdr = (struct ethhdr *)(skb->data + hdr_size);
+
+ if (ethhdr->h_proto != htons(ETH_P_ARP))
+ goto out;
+
+ /* pull the ARP payload */
+ if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN +
+ arp_hdr_len(skb->dev))))
+ goto out;
+
+ arphdr = (struct arphdr *)(skb->data + hdr_size + ETH_HLEN);
+
+ /* check whether the ARP packet carries a valid IP information */
+ if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
+ goto out;
+
+ if (arphdr->ar_pro != htons(ETH_P_IP))
+ goto out;
+
+ if (arphdr->ar_hln != ETH_ALEN)
+ goto out;
+
+ if (arphdr->ar_pln != 4)
+ goto out;
+
+ /* Check for bad reply/request. If the ARP message is not sane, DAT
+ * will simply ignore it
+ */
+ ip_src = batadv_arp_ip_src(skb, hdr_size);
+ ip_dst = batadv_arp_ip_dst(skb, hdr_size);
+ if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) ||
+ ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) ||
+ ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) ||
+ ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst))
+ goto out;
+
+ hw_src = batadv_arp_hw_src(skb, hdr_size);
+ if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src))
+ goto out;
+
+ /* don't care about the destination MAC address in ARP requests */
+ if (arphdr->ar_op != htons(ARPOP_REQUEST)) {
+ hw_dst = batadv_arp_hw_dst(skb, hdr_size);
+ if (is_zero_ether_addr(hw_dst) ||
+ is_multicast_ether_addr(hw_dst))
+ goto out;
+ }
+
+ type = ntohs(arphdr->ar_op);
+out:
+ return type;
+}
+
+/**
+ * batadv_dat_get_vid() - extract the VLAN identifier from skb if any
+ * @skb: the buffer containing the packet to extract the VID from
+ * @hdr_size: the size of the batman-adv header encapsulating the packet
+ *
+ * Return: If the packet embedded in the skb is vlan tagged this function
+ * returns the VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS
+ * is returned.
+ */
+static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
+{
+ unsigned short vid;
+
+ vid = batadv_get_vid(skb, *hdr_size);
+
+ /* ARP parsing functions jump forward of hdr_size + ETH_HLEN.
+ * If the header contained in the packet is a VLAN one (which is longer)
+ * hdr_size is updated so that the functions will still skip the
+ * correct amount of bytes.
+ */
+ if (vid & BATADV_VLAN_HAS_TAG)
+ *hdr_size += VLAN_HLEN;
+
+ return vid;
+}
+
+/**
+ * batadv_dat_arp_create_reply() - create an ARP Reply
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ip_src: ARP sender IP
+ * @ip_dst: ARP target IP
+ * @hw_src: Ethernet source and ARP sender MAC
+ * @hw_dst: Ethernet destination and ARP target MAC
+ * @vid: VLAN identifier (optional, set to zero otherwise)
+ *
+ * Creates an ARP Reply from the given values, optionally encapsulated in a
+ * VLAN header.
+ *
+ * Return: An skb containing an ARP Reply.
+ */
+static struct sk_buff *
+batadv_dat_arp_create_reply(struct batadv_priv *bat_priv, __be32 ip_src,
+ __be32 ip_dst, u8 *hw_src, u8 *hw_dst,
+ unsigned short vid)
+{
+ struct sk_buff *skb;
+
+ skb = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_dst, bat_priv->soft_iface,
+ ip_src, hw_dst, hw_src, hw_dst);
+ if (!skb)
+ return NULL;
+
+ skb_reset_mac_header(skb);
+
+ if (vid & BATADV_VLAN_HAS_TAG)
+ skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
+ vid & VLAN_VID_MASK);
+
+ return skb;
+}
+
+/**
+ * batadv_dat_snoop_outgoing_arp_request() - snoop the ARP request and try to
+ * answer using DAT
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: packet to check
+ *
+ * Return: true if the message has been sent to the dht candidates, false
+ * otherwise. In case of a positive return value the message has to be enqueued
+ * to permit the fallback.
+ */
+bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ u16 type = 0;
+ __be32 ip_dst, ip_src;
+ u8 *hw_src;
+ bool ret = false;
+ struct batadv_dat_entry *dat_entry = NULL;
+ struct sk_buff *skb_new;
+ struct net_device *soft_iface = bat_priv->soft_iface;
+ int hdr_size = 0;
+ unsigned short vid;
+
+ if (!atomic_read(&bat_priv->distributed_arp_table))
+ goto out;
+
+ vid = batadv_dat_get_vid(skb, &hdr_size);
+
+ type = batadv_arp_get_type(bat_priv, skb, hdr_size);
+ /* If the node gets an ARP_REQUEST it has to send a DHT_GET unicast
+ * message to the selected DHT candidates
+ */
+ if (type != ARPOP_REQUEST)
+ goto out;
+
+ batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing outgoing ARP REQUEST");
+
+ ip_src = batadv_arp_ip_src(skb, hdr_size);
+ hw_src = batadv_arp_hw_src(skb, hdr_size);
+ ip_dst = batadv_arp_ip_dst(skb, hdr_size);
+
+ batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+
+ dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
+ if (dat_entry) {
+ /* If the ARP request is destined for a local client the local
+ * client will answer itself. DAT would only generate a
+ * duplicate packet.
+ *
+ * Moreover, if the soft-interface is enslaved into a bridge, an
+ * additional DAT answer may trigger kernel warnings about
+ * a packet coming from the wrong port.
+ */
+ if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
+ ret = true;
+ goto out;
+ }
+
+ /* If BLA is enabled, only send ARP replies if we have claimed
+ * the destination for the ARP request or if no one else of
+ * the backbone gws belonging to our backbone has claimed the
+ * destination.
+ */
+ if (!batadv_bla_check_claim(bat_priv,
+ dat_entry->mac_addr, vid)) {
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "Device %pM claimed by another backbone gw. Don't send ARP reply!",
+ dat_entry->mac_addr);
+ ret = true;
+ goto out;
+ }
+
+ skb_new = batadv_dat_arp_create_reply(bat_priv, ip_dst, ip_src,
+ dat_entry->mac_addr,
+ hw_src, vid);
+ if (!skb_new)
+ goto out;
+
+ skb_new->protocol = eth_type_trans(skb_new, soft_iface);
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+ skb->len + ETH_HLEN + hdr_size);
+
+ netif_rx(skb_new);
+ batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
+ ret = true;
+ } else {
+ /* Send the request to the DHT */
+ ret = batadv_dat_forward_data(bat_priv, skb, ip_dst, vid,
+ BATADV_P_DAT_DHT_GET);
+ }
+out:
+ batadv_dat_entry_put(dat_entry);
+ return ret;
+}
+
+/**
+ * batadv_dat_snoop_incoming_arp_request() - snoop the ARP request and try to
+ * answer using the local DAT storage
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: packet to check
+ * @hdr_size: size of the encapsulation header
+ *
+ * Return: true if the request has been answered, false otherwise.
+ */
+bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size)
+{
+ u16 type;
+ __be32 ip_src, ip_dst;
+ u8 *hw_src;
+ struct sk_buff *skb_new;
+ struct batadv_dat_entry *dat_entry = NULL;
+ bool ret = false;
+ unsigned short vid;
+ int err;
+
+ if (!atomic_read(&bat_priv->distributed_arp_table))
+ goto out;
+
+ vid = batadv_dat_get_vid(skb, &hdr_size);
+
+ type = batadv_arp_get_type(bat_priv, skb, hdr_size);
+ if (type != ARPOP_REQUEST)
+ goto out;
+
+ hw_src = batadv_arp_hw_src(skb, hdr_size);
+ ip_src = batadv_arp_ip_src(skb, hdr_size);
+ ip_dst = batadv_arp_ip_dst(skb, hdr_size);
+
+ batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing incoming ARP REQUEST");
+
+ batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+
+ dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
+ if (!dat_entry)
+ goto out;
+
+ skb_new = batadv_dat_arp_create_reply(bat_priv, ip_dst, ip_src,
+ dat_entry->mac_addr, hw_src, vid);
+ if (!skb_new)
+ goto out;
+
+ /* To preserve backwards compatibility, the node has choose the outgoing
+ * format based on the incoming request packet type. The assumption is
+ * that a node not using the 4addr packet format doesn't support it.
+ */
+ if (hdr_size == sizeof(struct batadv_unicast_4addr_packet))
+ err = batadv_send_skb_via_tt_4addr(bat_priv, skb_new,
+ BATADV_P_DAT_CACHE_REPLY,
+ NULL, vid);
+ else
+ err = batadv_send_skb_via_tt(bat_priv, skb_new, NULL, vid);
+
+ if (err != NET_XMIT_DROP) {
+ batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX);
+ ret = true;
+ }
+out:
+ batadv_dat_entry_put(dat_entry);
+ if (ret)
+ kfree_skb(skb);
+ return ret;
+}
+
+/**
+ * batadv_dat_snoop_outgoing_arp_reply() - snoop the ARP reply and fill the DHT
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: packet to check
+ */
+void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ u16 type;
+ __be32 ip_src, ip_dst;
+ u8 *hw_src, *hw_dst;
+ int hdr_size = 0;
+ unsigned short vid;
+
+ if (!atomic_read(&bat_priv->distributed_arp_table))
+ return;
+
+ vid = batadv_dat_get_vid(skb, &hdr_size);
+
+ type = batadv_arp_get_type(bat_priv, skb, hdr_size);
+ if (type != ARPOP_REPLY)
+ return;
+
+ batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing outgoing ARP REPLY");
+
+ hw_src = batadv_arp_hw_src(skb, hdr_size);
+ ip_src = batadv_arp_ip_src(skb, hdr_size);
+ hw_dst = batadv_arp_hw_dst(skb, hdr_size);
+ ip_dst = batadv_arp_ip_dst(skb, hdr_size);
+
+ batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+ batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
+
+ /* Send the ARP reply to the candidates for both the IP addresses that
+ * the node obtained from the ARP reply
+ */
+ batadv_dat_forward_data(bat_priv, skb, ip_src, vid,
+ BATADV_P_DAT_DHT_PUT);
+ batadv_dat_forward_data(bat_priv, skb, ip_dst, vid,
+ BATADV_P_DAT_DHT_PUT);
+}
+
+/**
+ * batadv_dat_snoop_incoming_arp_reply() - snoop the ARP reply and fill the
+ * local DAT storage only
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: packet to check
+ * @hdr_size: size of the encapsulation header
+ *
+ * Return: true if the packet was snooped and consumed by DAT. False if the
+ * packet has to be delivered to the interface
+ */
+bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size)
+{
+ struct batadv_dat_entry *dat_entry = NULL;
+ u16 type;
+ __be32 ip_src, ip_dst;
+ u8 *hw_src, *hw_dst;
+ bool dropped = false;
+ unsigned short vid;
+
+ if (!atomic_read(&bat_priv->distributed_arp_table))
+ goto out;
+
+ vid = batadv_dat_get_vid(skb, &hdr_size);
+
+ type = batadv_arp_get_type(bat_priv, skb, hdr_size);
+ if (type != ARPOP_REPLY)
+ goto out;
+
+ batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing incoming ARP REPLY");
+
+ hw_src = batadv_arp_hw_src(skb, hdr_size);
+ ip_src = batadv_arp_ip_src(skb, hdr_size);
+ hw_dst = batadv_arp_hw_dst(skb, hdr_size);
+ ip_dst = batadv_arp_ip_dst(skb, hdr_size);
+
+ /* If ip_dst is already in cache and has the right mac address,
+ * drop this frame if this ARP reply is destined for us because it's
+ * most probably an ARP reply generated by another node of the DHT.
+ * We have most probably received already a reply earlier. Delivering
+ * this frame would lead to doubled receive of an ARP reply.
+ */
+ dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_src, vid);
+ if (dat_entry && batadv_compare_eth(hw_src, dat_entry->mac_addr)) {
+ batadv_dbg(BATADV_DBG_DAT, bat_priv, "Doubled ARP reply removed: ARP MSG = [src: %pM-%pI4 dst: %pM-%pI4]; dat_entry: %pM-%pI4\n",
+ hw_src, &ip_src, hw_dst, &ip_dst,
+ dat_entry->mac_addr, &dat_entry->ip);
+ dropped = true;
+ }
+
+ /* Update our internal cache with both the IP addresses the node got
+ * within the ARP reply
+ */
+ batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+ batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
+
+ if (dropped)
+ goto out;
+
+ /* If BLA is enabled, only forward ARP replies if we have claimed the
+ * source of the ARP reply or if no one else of the same backbone has
+ * already claimed that client. This prevents that different gateways
+ * to the same backbone all forward the ARP reply leading to multiple
+ * replies in the backbone.
+ */
+ if (!batadv_bla_check_claim(bat_priv, hw_src, vid)) {
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "Device %pM claimed by another backbone gw. Drop ARP reply.\n",
+ hw_src);
+ dropped = true;
+ goto out;
+ }
+
+ /* if this REPLY is directed to a client of mine, let's deliver the
+ * packet to the interface
+ */
+ dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
+
+ /* if this REPLY is sent on behalf of a client of mine, let's drop the
+ * packet because the client will reply by itself
+ */
+ dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
+out:
+ if (dropped)
+ kfree_skb(skb);
+ batadv_dat_entry_put(dat_entry);
+ /* if dropped == false -> deliver to the interface */
+ return dropped;
+}
+
+/**
+ * batadv_dat_check_dhcp_ipudp() - check skb for IP+UDP headers valid for DHCP
+ * @skb: the packet to check
+ * @ip_src: a buffer to store the IPv4 source address in
+ *
+ * Checks whether the given skb has an IP and UDP header valid for a DHCP
+ * message from a DHCP server. And if so, stores the IPv4 source address in
+ * the provided buffer.
+ *
+ * Return: True if valid, false otherwise.
+ */
+static bool
+batadv_dat_check_dhcp_ipudp(struct sk_buff *skb, __be32 *ip_src)
+{
+ unsigned int offset = skb_network_offset(skb);
+ struct udphdr *udphdr, _udphdr;
+ struct iphdr *iphdr, _iphdr;
+
+ iphdr = skb_header_pointer(skb, offset, sizeof(_iphdr), &_iphdr);
+ if (!iphdr || iphdr->version != 4 || iphdr->ihl * 4 < sizeof(_iphdr))
+ return false;
+
+ if (iphdr->protocol != IPPROTO_UDP)
+ return false;
+
+ offset += iphdr->ihl * 4;
+ skb_set_transport_header(skb, offset);
+
+ udphdr = skb_header_pointer(skb, offset, sizeof(_udphdr), &_udphdr);
+ if (!udphdr || udphdr->source != htons(67))
+ return false;
+
+ *ip_src = get_unaligned(&iphdr->saddr);
+
+ return true;
+}
+
+/**
+ * batadv_dat_check_dhcp() - examine packet for valid DHCP message
+ * @skb: the packet to check
+ * @proto: ethernet protocol hint (behind a potential vlan)
+ * @ip_src: a buffer to store the IPv4 source address in
+ *
+ * Checks whether the given skb is a valid DHCP packet. And if so, stores the
+ * IPv4 source address in the provided buffer.
+ *
+ * Caller needs to ensure that the skb network header is set correctly.
+ *
+ * Return: If skb is a valid DHCP packet, then returns its op code
+ * (e.g. BOOTREPLY vs. BOOTREQUEST). Otherwise returns -EINVAL.
+ */
+static int
+batadv_dat_check_dhcp(struct sk_buff *skb, __be16 proto, __be32 *ip_src)
+{
+ __be32 *magic, _magic;
+ unsigned int offset;
+ struct {
+ __u8 op;
+ __u8 htype;
+ __u8 hlen;
+ __u8 hops;
+ } *dhcp_h, _dhcp_h;
+
+ if (proto != htons(ETH_P_IP))
+ return -EINVAL;
+
+ if (!batadv_dat_check_dhcp_ipudp(skb, ip_src))
+ return -EINVAL;
+
+ offset = skb_transport_offset(skb) + sizeof(struct udphdr);
+ if (skb->len < offset + sizeof(struct batadv_dhcp_packet))
+ return -EINVAL;
+
+ dhcp_h = skb_header_pointer(skb, offset, sizeof(_dhcp_h), &_dhcp_h);
+ if (!dhcp_h || dhcp_h->htype != BATADV_HTYPE_ETHERNET ||
+ dhcp_h->hlen != ETH_ALEN)
+ return -EINVAL;
+
+ offset += offsetof(struct batadv_dhcp_packet, magic);
+
+ magic = skb_header_pointer(skb, offset, sizeof(_magic), &_magic);
+ if (!magic || get_unaligned(magic) != htonl(BATADV_DHCP_MAGIC))
+ return -EINVAL;
+
+ return dhcp_h->op;
+}
+
+/**
+ * batadv_dat_get_dhcp_message_type() - get message type of a DHCP packet
+ * @skb: the DHCP packet to parse
+ *
+ * Iterates over the DHCP options of the given DHCP packet to find a
+ * DHCP Message Type option and parse it.
+ *
+ * Caller needs to ensure that the given skb is a valid DHCP packet and
+ * that the skb transport header is set correctly.
+ *
+ * Return: The found DHCP message type value, if found. -EINVAL otherwise.
+ */
+static int batadv_dat_get_dhcp_message_type(struct sk_buff *skb)
+{
+ unsigned int offset = skb_transport_offset(skb) + sizeof(struct udphdr);
+ u8 *type, _type;
+ struct {
+ u8 type;
+ u8 len;
+ } *tl, _tl;
+
+ offset += sizeof(struct batadv_dhcp_packet);
+
+ while ((tl = skb_header_pointer(skb, offset, sizeof(_tl), &_tl))) {
+ if (tl->type == BATADV_DHCP_OPT_MSG_TYPE)
+ break;
+
+ if (tl->type == BATADV_DHCP_OPT_END)
+ break;
+
+ if (tl->type == BATADV_DHCP_OPT_PAD)
+ offset++;
+ else
+ offset += tl->len + sizeof(_tl);
+ }
+
+ /* Option Overload Code not supported */
+ if (!tl || tl->type != BATADV_DHCP_OPT_MSG_TYPE ||
+ tl->len != sizeof(_type))
+ return -EINVAL;
+
+ offset += sizeof(_tl);
+
+ type = skb_header_pointer(skb, offset, sizeof(_type), &_type);
+ if (!type)
+ return -EINVAL;
+
+ return *type;
+}
+
+/**
+ * batadv_dat_dhcp_get_yiaddr() - get yiaddr from a DHCP packet
+ * @skb: the DHCP packet to parse
+ * @buf: a buffer to store the yiaddr in
+ *
+ * Caller needs to ensure that the given skb is a valid DHCP packet and
+ * that the skb transport header is set correctly.
+ *
+ * Return: True on success, false otherwise.
+ */
+static bool batadv_dat_dhcp_get_yiaddr(struct sk_buff *skb, __be32 *buf)
+{
+ unsigned int offset = skb_transport_offset(skb) + sizeof(struct udphdr);
+ __be32 *yiaddr;
+
+ offset += offsetof(struct batadv_dhcp_packet, yiaddr);
+ yiaddr = skb_header_pointer(skb, offset, BATADV_DHCP_YIADDR_LEN, buf);
+
+ if (!yiaddr)
+ return false;
+
+ if (yiaddr != buf)
+ *buf = get_unaligned(yiaddr);
+
+ return true;
+}
+
+/**
+ * batadv_dat_get_dhcp_chaddr() - get chaddr from a DHCP packet
+ * @skb: the DHCP packet to parse
+ * @buf: a buffer to store the chaddr in
+ *
+ * Caller needs to ensure that the given skb is a valid DHCP packet and
+ * that the skb transport header is set correctly.
+ *
+ * Return: True on success, false otherwise
+ */
+static bool batadv_dat_get_dhcp_chaddr(struct sk_buff *skb, u8 *buf)
+{
+ unsigned int offset = skb_transport_offset(skb) + sizeof(struct udphdr);
+ u8 *chaddr;
+
+ offset += offsetof(struct batadv_dhcp_packet, chaddr);
+ chaddr = skb_header_pointer(skb, offset, BATADV_DHCP_CHADDR_LEN, buf);
+
+ if (!chaddr)
+ return false;
+
+ if (chaddr != buf)
+ memcpy(buf, chaddr, BATADV_DHCP_CHADDR_LEN);
+
+ return true;
+}
+
+/**
+ * batadv_dat_put_dhcp() - puts addresses from a DHCP packet into the DHT and
+ * DAT cache
+ * @bat_priv: the bat priv with all the soft interface information
+ * @chaddr: the DHCP client MAC address
+ * @yiaddr: the DHCP client IP address
+ * @hw_dst: the DHCP server MAC address
+ * @ip_dst: the DHCP server IP address
+ * @vid: VLAN identifier
+ *
+ * Adds given MAC/IP pairs to the local DAT cache and propagates them further
+ * into the DHT.
+ *
+ * For the DHT propagation, client MAC + IP will appear as the ARP Reply
+ * transmitter (and hw_dst/ip_dst as the target).
+ */
+static void batadv_dat_put_dhcp(struct batadv_priv *bat_priv, u8 *chaddr,
+ __be32 yiaddr, u8 *hw_dst, __be32 ip_dst,
+ unsigned short vid)
+{
+ struct sk_buff *skb;
+
+ skb = batadv_dat_arp_create_reply(bat_priv, yiaddr, ip_dst, chaddr,
+ hw_dst, vid);
+ if (!skb)
+ return;
+
+ skb_set_network_header(skb, ETH_HLEN);
+
+ batadv_dat_entry_add(bat_priv, yiaddr, chaddr, vid);
+ batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
+
+ batadv_dat_forward_data(bat_priv, skb, yiaddr, vid,
+ BATADV_P_DAT_DHT_PUT);
+ batadv_dat_forward_data(bat_priv, skb, ip_dst, vid,
+ BATADV_P_DAT_DHT_PUT);
+
+ consume_skb(skb);
+
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "Snooped from outgoing DHCPACK (server address): %pI4, %pM (vid: %i)\n",
+ &ip_dst, hw_dst, batadv_print_vid(vid));
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "Snooped from outgoing DHCPACK (client address): %pI4, %pM (vid: %i)\n",
+ &yiaddr, chaddr, batadv_print_vid(vid));
+}
+
+/**
+ * batadv_dat_check_dhcp_ack() - examine packet for valid DHCP message
+ * @skb: the packet to check
+ * @proto: ethernet protocol hint (behind a potential vlan)
+ * @ip_src: a buffer to store the IPv4 source address in
+ * @chaddr: a buffer to store the DHCP Client Hardware Address in
+ * @yiaddr: a buffer to store the DHCP Your IP Address in
+ *
+ * Checks whether the given skb is a valid DHCPACK. And if so, stores the
+ * IPv4 server source address (ip_src), client MAC address (chaddr) and client
+ * IPv4 address (yiaddr) in the provided buffers.
+ *
+ * Caller needs to ensure that the skb network header is set correctly.
+ *
+ * Return: True if the skb is a valid DHCPACK. False otherwise.
+ */
+static bool
+batadv_dat_check_dhcp_ack(struct sk_buff *skb, __be16 proto, __be32 *ip_src,
+ u8 *chaddr, __be32 *yiaddr)
+{
+ int type;
+
+ type = batadv_dat_check_dhcp(skb, proto, ip_src);
+ if (type != BATADV_BOOTREPLY)
+ return false;
+
+ type = batadv_dat_get_dhcp_message_type(skb);
+ if (type != BATADV_DHCPACK)
+ return false;
+
+ if (!batadv_dat_dhcp_get_yiaddr(skb, yiaddr))
+ return false;
+
+ if (!batadv_dat_get_dhcp_chaddr(skb, chaddr))
+ return false;
+
+ return true;
+}
+
+/**
+ * batadv_dat_snoop_outgoing_dhcp_ack() - snoop DHCPACK and fill DAT with it
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the packet to snoop
+ * @proto: ethernet protocol hint (behind a potential vlan)
+ * @vid: VLAN identifier
+ *
+ * This function first checks whether the given skb is a valid DHCPACK. If
+ * so then its source MAC and IP as well as its DHCP Client Hardware Address
+ * field and DHCP Your IP Address field are added to the local DAT cache and
+ * propagated into the DHT.
+ *
+ * Caller needs to ensure that the skb mac and network headers are set
+ * correctly.
+ */
+void batadv_dat_snoop_outgoing_dhcp_ack(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ __be16 proto,
+ unsigned short vid)
+{
+ u8 chaddr[BATADV_DHCP_CHADDR_LEN];
+ __be32 ip_src, yiaddr;
+
+ if (!atomic_read(&bat_priv->distributed_arp_table))
+ return;
+
+ if (!batadv_dat_check_dhcp_ack(skb, proto, &ip_src, chaddr, &yiaddr))
+ return;
+
+ batadv_dat_put_dhcp(bat_priv, chaddr, yiaddr, eth_hdr(skb)->h_source,
+ ip_src, vid);
+}
+
+/**
+ * batadv_dat_snoop_incoming_dhcp_ack() - snoop DHCPACK and fill DAT cache
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the packet to snoop
+ * @hdr_size: header size, up to the tail of the batman-adv header
+ *
+ * This function first checks whether the given skb is a valid DHCPACK. If
+ * so then its source MAC and IP as well as its DHCP Client Hardware Address
+ * field and DHCP Your IP Address field are added to the local DAT cache.
+ */
+void batadv_dat_snoop_incoming_dhcp_ack(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size)
+{
+ u8 chaddr[BATADV_DHCP_CHADDR_LEN];
+ struct ethhdr *ethhdr;
+ __be32 ip_src, yiaddr;
+ unsigned short vid;
+ __be16 proto;
+ u8 *hw_src;
+
+ if (!atomic_read(&bat_priv->distributed_arp_table))
+ return;
+
+ if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN)))
+ return;
+
+ ethhdr = (struct ethhdr *)(skb->data + hdr_size);
+ skb_set_network_header(skb, hdr_size + ETH_HLEN);
+ proto = ethhdr->h_proto;
+
+ if (!batadv_dat_check_dhcp_ack(skb, proto, &ip_src, chaddr, &yiaddr))
+ return;
+
+ hw_src = ethhdr->h_source;
+ vid = batadv_dat_get_vid(skb, &hdr_size);
+
+ batadv_dat_entry_add(bat_priv, yiaddr, chaddr, vid);
+ batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "Snooped from incoming DHCPACK (server address): %pI4, %pM (vid: %i)\n",
+ &ip_src, hw_src, batadv_print_vid(vid));
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "Snooped from incoming DHCPACK (client address): %pI4, %pM (vid: %i)\n",
+ &yiaddr, chaddr, batadv_print_vid(vid));
+}
+
+/**
+ * batadv_dat_drop_broadcast_packet() - check if an ARP request has to be
+ * dropped (because the node has already obtained the reply via DAT) or not
+ * @bat_priv: the bat priv with all the soft interface information
+ * @forw_packet: the broadcast packet
+ *
+ * Return: true if the node can drop the packet, false otherwise.
+ */
+bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
+ struct batadv_forw_packet *forw_packet)
+{
+ u16 type;
+ __be32 ip_dst;
+ struct batadv_dat_entry *dat_entry = NULL;
+ bool ret = false;
+ int hdr_size = sizeof(struct batadv_bcast_packet);
+ unsigned short vid;
+
+ if (!atomic_read(&bat_priv->distributed_arp_table))
+ goto out;
+
+ /* If this packet is an ARP_REQUEST and the node already has the
+ * information that it is going to ask, then the packet can be dropped
+ */
+ if (batadv_forw_packet_is_rebroadcast(forw_packet))
+ goto out;
+
+ vid = batadv_dat_get_vid(forw_packet->skb, &hdr_size);
+
+ type = batadv_arp_get_type(bat_priv, forw_packet->skb, hdr_size);
+ if (type != ARPOP_REQUEST)
+ goto out;
+
+ ip_dst = batadv_arp_ip_dst(forw_packet->skb, hdr_size);
+ dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
+ /* check if the node already got this entry */
+ if (!dat_entry) {
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "ARP Request for %pI4: fallback\n", &ip_dst);
+ goto out;
+ }
+
+ batadv_dbg(BATADV_DBG_DAT, bat_priv,
+ "ARP Request for %pI4: fallback prevented\n", &ip_dst);
+ ret = true;
+
+out:
+ batadv_dat_entry_put(dat_entry);
+ return ret;
+}
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
new file mode 100644
index 0000000000..bed7f3d208
--- /dev/null
+++ b/net/batman-adv/distributed-arp-table.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Antonio Quartulli
+ */
+
+#ifndef _NET_BATMAN_ADV_DISTRIBUTED_ARP_TABLE_H_
+#define _NET_BATMAN_ADV_DISTRIBUTED_ARP_TABLE_H_
+
+#include "main.h"
+
+#include <linux/compiler.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
+
+#include "originator.h"
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+
+/* BATADV_DAT_ADDR_MAX - maximum address value in the DHT space */
+#define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
+
+void batadv_dat_status_update(struct net_device *net_dev);
+bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
+ struct sk_buff *skb);
+bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size);
+void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
+ struct sk_buff *skb);
+bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size);
+void batadv_dat_snoop_outgoing_dhcp_ack(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ __be16 proto,
+ unsigned short vid);
+void batadv_dat_snoop_incoming_dhcp_ack(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size);
+bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
+ struct batadv_forw_packet *forw_packet);
+
+/**
+ * batadv_dat_init_orig_node_addr() - assign a DAT address to the orig_node
+ * @orig_node: the node to assign the DAT address to
+ */
+static inline void
+batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
+{
+ u32 addr;
+
+ addr = batadv_choose_orig(orig_node->orig, BATADV_DAT_ADDR_MAX);
+ orig_node->dat_addr = (batadv_dat_addr_t)addr;
+}
+
+/**
+ * batadv_dat_init_own_addr() - assign a DAT address to the node itself
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: a pointer to the primary interface
+ */
+static inline void
+batadv_dat_init_own_addr(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if)
+{
+ u32 addr;
+
+ addr = batadv_choose_orig(primary_if->net_dev->dev_addr,
+ BATADV_DAT_ADDR_MAX);
+
+ bat_priv->dat.addr = (batadv_dat_addr_t)addr;
+}
+
+int batadv_dat_init(struct batadv_priv *bat_priv);
+void batadv_dat_free(struct batadv_priv *bat_priv);
+int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb);
+
+/**
+ * batadv_dat_inc_counter() - increment the correct DAT packet counter
+ * @bat_priv: the bat priv with all the soft interface information
+ * @subtype: the 4addr subtype of the packet to be counted
+ *
+ * Updates the ethtool statistics for the received packet if it is a DAT subtype
+ */
+static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
+ u8 subtype)
+{
+ switch (subtype) {
+ case BATADV_P_DAT_DHT_GET:
+ batadv_inc_counter(bat_priv,
+ BATADV_CNT_DAT_GET_RX);
+ break;
+ case BATADV_P_DAT_DHT_PUT:
+ batadv_inc_counter(bat_priv,
+ BATADV_CNT_DAT_PUT_RX);
+ break;
+ }
+}
+
+#else
+
+static inline void batadv_dat_status_update(struct net_device *net_dev)
+{
+}
+
+static inline bool
+batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline bool
+batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size)
+{
+ return false;
+}
+
+static inline bool
+batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline bool
+batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size)
+{
+ return false;
+}
+
+static inline void
+batadv_dat_snoop_outgoing_dhcp_ack(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, __be16 proto,
+ unsigned short vid)
+{
+}
+
+static inline void
+batadv_dat_snoop_incoming_dhcp_ack(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size)
+{
+}
+
+static inline bool
+batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
+ struct batadv_forw_packet *forw_packet)
+{
+ return false;
+}
+
+static inline void
+batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
+{
+}
+
+static inline void batadv_dat_init_own_addr(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *iface)
+{
+}
+
+static inline int batadv_dat_init(struct batadv_priv *bat_priv)
+{
+ return 0;
+}
+
+static inline void batadv_dat_free(struct batadv_priv *bat_priv)
+{
+}
+
+static inline int
+batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
+ u8 subtype)
+{
+}
+
+#endif /* CONFIG_BATMAN_ADV_DAT */
+
+#endif /* _NET_BATMAN_ADV_DISTRIBUTED_ARP_TABLE_H_ */
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
new file mode 100644
index 0000000000..c120c7c6d2
--- /dev/null
+++ b/net/batman-adv/fragmentation.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Martin Hundebøll <martin@hundeboll.net>
+ */
+
+#include "fragmentation.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/lockdep.h>
+#include <linux/minmax.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <uapi/linux/batadv_packet.h>
+
+#include "hard-interface.h"
+#include "originator.h"
+#include "routing.h"
+#include "send.h"
+
+/**
+ * batadv_frag_clear_chain() - delete entries in the fragment buffer chain
+ * @head: head of chain with entries.
+ * @dropped: whether the chain is cleared because all fragments are dropped
+ *
+ * Free fragments in the passed hlist. Should be called with appropriate lock.
+ */
+static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
+{
+ struct batadv_frag_list_entry *entry;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(entry, node, head, list) {
+ hlist_del(&entry->list);
+
+ if (dropped)
+ kfree_skb(entry->skb);
+ else
+ consume_skb(entry->skb);
+
+ kfree(entry);
+ }
+}
+
+/**
+ * batadv_frag_purge_orig() - free fragments associated to an orig
+ * @orig_node: originator to free fragments from
+ * @check_cb: optional function to tell if an entry should be purged
+ */
+void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
+ bool (*check_cb)(struct batadv_frag_table_entry *))
+{
+ struct batadv_frag_table_entry *chain;
+ u8 i;
+
+ for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
+ chain = &orig_node->fragments[i];
+ spin_lock_bh(&chain->lock);
+
+ if (!check_cb || check_cb(chain)) {
+ batadv_frag_clear_chain(&chain->fragment_list, true);
+ chain->size = 0;
+ }
+
+ spin_unlock_bh(&chain->lock);
+ }
+}
+
+/**
+ * batadv_frag_size_limit() - maximum possible size of packet to be fragmented
+ *
+ * Return: the maximum size of payload that can be fragmented.
+ */
+static int batadv_frag_size_limit(void)
+{
+ int limit = BATADV_FRAG_MAX_FRAG_SIZE;
+
+ limit -= sizeof(struct batadv_frag_packet);
+ limit *= BATADV_FRAG_MAX_FRAGMENTS;
+
+ return limit;
+}
+
+/**
+ * batadv_frag_init_chain() - check and prepare fragment chain for new fragment
+ * @chain: chain in fragments table to init
+ * @seqno: sequence number of the received fragment
+ *
+ * Make chain ready for a fragment with sequence number "seqno". Delete existing
+ * entries if they have an "old" sequence number.
+ *
+ * Caller must hold chain->lock.
+ *
+ * Return: true if chain is empty and the caller can just insert the new
+ * fragment without searching for the right position.
+ */
+static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
+ u16 seqno)
+{
+ lockdep_assert_held(&chain->lock);
+
+ if (chain->seqno == seqno)
+ return false;
+
+ if (!hlist_empty(&chain->fragment_list))
+ batadv_frag_clear_chain(&chain->fragment_list, true);
+
+ chain->size = 0;
+ chain->seqno = seqno;
+
+ return true;
+}
+
+/**
+ * batadv_frag_insert_packet() - insert a fragment into a fragment chain
+ * @orig_node: originator that the fragment was received from
+ * @skb: skb to insert
+ * @chain_out: list head to attach complete chains of fragments to
+ *
+ * Insert a new fragment into the reverse ordered chain in the right table
+ * entry. The hash table entry is cleared if "old" fragments exist in it.
+ *
+ * Return: true if skb is buffered, false on error. If the chain has all the
+ * fragments needed to merge the packet, the chain is moved to the passed head
+ * to avoid locking the chain in the table.
+ */
+static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
+ struct sk_buff *skb,
+ struct hlist_head *chain_out)
+{
+ struct batadv_frag_table_entry *chain;
+ struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
+ struct batadv_frag_list_entry *frag_entry_last = NULL;
+ struct batadv_frag_packet *frag_packet;
+ u8 bucket;
+ u16 seqno, hdr_size = sizeof(struct batadv_frag_packet);
+ bool ret = false;
+
+ /* Linearize packet to avoid linearizing 16 packets in a row when doing
+ * the later merge. Non-linear merge should be added to remove this
+ * linearization.
+ */
+ if (skb_linearize(skb) < 0)
+ goto err;
+
+ frag_packet = (struct batadv_frag_packet *)skb->data;
+ seqno = ntohs(frag_packet->seqno);
+ bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
+
+ frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
+ if (!frag_entry_new)
+ goto err;
+
+ frag_entry_new->skb = skb;
+ frag_entry_new->no = frag_packet->no;
+
+ /* Select entry in the "chain table" and delete any prior fragments
+ * with another sequence number. batadv_frag_init_chain() returns true,
+ * if the list is empty at return.
+ */
+ chain = &orig_node->fragments[bucket];
+ spin_lock_bh(&chain->lock);
+ if (batadv_frag_init_chain(chain, seqno)) {
+ hlist_add_head(&frag_entry_new->list, &chain->fragment_list);
+ chain->size = skb->len - hdr_size;
+ chain->timestamp = jiffies;
+ chain->total_size = ntohs(frag_packet->total_size);
+ ret = true;
+ goto out;
+ }
+
+ /* Find the position for the new fragment. */
+ hlist_for_each_entry(frag_entry_curr, &chain->fragment_list, list) {
+ /* Drop packet if fragment already exists. */
+ if (frag_entry_curr->no == frag_entry_new->no)
+ goto err_unlock;
+
+ /* Order fragments from highest to lowest. */
+ if (frag_entry_curr->no < frag_entry_new->no) {
+ hlist_add_before(&frag_entry_new->list,
+ &frag_entry_curr->list);
+ chain->size += skb->len - hdr_size;
+ chain->timestamp = jiffies;
+ ret = true;
+ goto out;
+ }
+
+ /* store current entry because it could be the last in list */
+ frag_entry_last = frag_entry_curr;
+ }
+
+ /* Reached the end of the list, so insert after 'frag_entry_last'. */
+ if (likely(frag_entry_last)) {
+ hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list);
+ chain->size += skb->len - hdr_size;
+ chain->timestamp = jiffies;
+ ret = true;
+ }
+
+out:
+ if (chain->size > batadv_frag_size_limit() ||
+ chain->total_size != ntohs(frag_packet->total_size) ||
+ chain->total_size > batadv_frag_size_limit()) {
+ /* Clear chain if total size of either the list or the packet
+ * exceeds the maximum size of one merged packet. Don't allow
+ * packets to have different total_size.
+ */
+ batadv_frag_clear_chain(&chain->fragment_list, true);
+ chain->size = 0;
+ } else if (ntohs(frag_packet->total_size) == chain->size) {
+ /* All fragments received. Hand over chain to caller. */
+ hlist_move_list(&chain->fragment_list, chain_out);
+ chain->size = 0;
+ }
+
+err_unlock:
+ spin_unlock_bh(&chain->lock);
+
+err:
+ if (!ret) {
+ kfree(frag_entry_new);
+ kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+/**
+ * batadv_frag_merge_packets() - merge a chain of fragments
+ * @chain: head of chain with fragments
+ *
+ * Expand the first skb in the chain and copy the content of the remaining
+ * skb's into the expanded one. After doing so, clear the chain.
+ *
+ * Return: the merged skb or NULL on error.
+ */
+static struct sk_buff *
+batadv_frag_merge_packets(struct hlist_head *chain)
+{
+ struct batadv_frag_packet *packet;
+ struct batadv_frag_list_entry *entry;
+ struct sk_buff *skb_out;
+ int size, hdr_size = sizeof(struct batadv_frag_packet);
+ bool dropped = false;
+
+ /* Remove first entry, as this is the destination for the rest of the
+ * fragments.
+ */
+ entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
+ hlist_del(&entry->list);
+ skb_out = entry->skb;
+ kfree(entry);
+
+ packet = (struct batadv_frag_packet *)skb_out->data;
+ size = ntohs(packet->total_size) + hdr_size;
+
+ /* Make room for the rest of the fragments. */
+ if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
+ kfree_skb(skb_out);
+ skb_out = NULL;
+ dropped = true;
+ goto free;
+ }
+
+ /* Move the existing MAC header to just before the payload. (Override
+ * the fragment header.)
+ */
+ skb_pull(skb_out, hdr_size);
+ skb_out->ip_summed = CHECKSUM_NONE;
+ memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
+ skb_set_mac_header(skb_out, -ETH_HLEN);
+ skb_reset_network_header(skb_out);
+ skb_reset_transport_header(skb_out);
+
+ /* Copy the payload of the each fragment into the last skb */
+ hlist_for_each_entry(entry, chain, list) {
+ size = entry->skb->len - hdr_size;
+ skb_put_data(skb_out, entry->skb->data + hdr_size, size);
+ }
+
+free:
+ /* Locking is not needed, because 'chain' is not part of any orig. */
+ batadv_frag_clear_chain(chain, dropped);
+ return skb_out;
+}
+
+/**
+ * batadv_frag_skb_buffer() - buffer fragment for later merge
+ * @skb: skb to buffer
+ * @orig_node_src: originator that the skb is received from
+ *
+ * Add fragment to buffer and merge fragments if possible.
+ *
+ * There are three possible outcomes: 1) Packet is merged: Return true and
+ * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
+ * to NULL; 3) Error: Return false and free skb.
+ *
+ * Return: true when the packet is merged or buffered, false when skb is not
+ * used.
+ */
+bool batadv_frag_skb_buffer(struct sk_buff **skb,
+ struct batadv_orig_node *orig_node_src)
+{
+ struct sk_buff *skb_out = NULL;
+ struct hlist_head head = HLIST_HEAD_INIT;
+ bool ret = false;
+
+ /* Add packet to buffer and table entry if merge is possible. */
+ if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
+ goto out_err;
+
+ /* Leave if more fragments are needed to merge. */
+ if (hlist_empty(&head))
+ goto out;
+
+ skb_out = batadv_frag_merge_packets(&head);
+ if (!skb_out)
+ goto out_err;
+
+out:
+ ret = true;
+out_err:
+ *skb = skb_out;
+ return ret;
+}
+
+/**
+ * batadv_frag_skb_fwd() - forward fragments that would exceed MTU when merged
+ * @skb: skb to forward
+ * @recv_if: interface that the skb is received on
+ * @orig_node_src: originator that the skb is received from
+ *
+ * Look up the next-hop of the fragments payload and check if the merged packet
+ * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
+ * without merging it.
+ *
+ * Return: true if the fragment is consumed/forwarded, false otherwise.
+ */
+bool batadv_frag_skb_fwd(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if,
+ struct batadv_orig_node *orig_node_src)
+{
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_orig_node *orig_node_dst;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_frag_packet *packet;
+ u16 total_size;
+ bool ret = false;
+
+ packet = (struct batadv_frag_packet *)skb->data;
+ orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
+ if (!orig_node_dst)
+ goto out;
+
+ neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
+ if (!neigh_node)
+ goto out;
+
+ /* Forward the fragment, if the merged packet would be too big to
+ * be assembled.
+ */
+ total_size = ntohs(packet->total_size);
+ if (total_size > neigh_node->if_incoming->net_dev->mtu) {
+ batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
+ batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
+ skb->len + ETH_HLEN);
+
+ packet->ttl--;
+ batadv_send_unicast_skb(skb, neigh_node);
+ ret = true;
+ }
+
+out:
+ batadv_orig_node_put(orig_node_dst);
+ batadv_neigh_node_put(neigh_node);
+ return ret;
+}
+
+/**
+ * batadv_frag_create() - create a fragment from skb
+ * @net_dev: outgoing device for fragment
+ * @skb: skb to create fragment from
+ * @frag_head: header to use in new fragment
+ * @fragment_size: size of new fragment
+ *
+ * Split the passed skb into two fragments: A new one with size matching the
+ * passed mtu and the old one with the rest. The new skb contains data from the
+ * tail of the old skb.
+ *
+ * Return: the new fragment, NULL on error.
+ */
+static struct sk_buff *batadv_frag_create(struct net_device *net_dev,
+ struct sk_buff *skb,
+ struct batadv_frag_packet *frag_head,
+ unsigned int fragment_size)
+{
+ unsigned int ll_reserved = LL_RESERVED_SPACE(net_dev);
+ unsigned int tailroom = net_dev->needed_tailroom;
+ struct sk_buff *skb_fragment;
+ unsigned int header_size = sizeof(*frag_head);
+ unsigned int mtu = fragment_size + header_size;
+
+ skb_fragment = dev_alloc_skb(ll_reserved + mtu + tailroom);
+ if (!skb_fragment)
+ goto err;
+
+ skb_fragment->priority = skb->priority;
+
+ /* Eat the last mtu-bytes of the skb */
+ skb_reserve(skb_fragment, ll_reserved + header_size);
+ skb_split(skb, skb_fragment, skb->len - fragment_size);
+
+ /* Add the header */
+ skb_push(skb_fragment, header_size);
+ memcpy(skb_fragment->data, frag_head, header_size);
+
+err:
+ return skb_fragment;
+}
+
+/**
+ * batadv_frag_send_packet() - create up to 16 fragments from the passed skb
+ * @skb: skb to create fragments from
+ * @orig_node: final destination of the created fragments
+ * @neigh_node: next-hop of the created fragments
+ *
+ * Return: the netdev tx status or a negative errno code on a failure
+ */
+int batadv_frag_send_packet(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node)
+{
+ struct net_device *net_dev = neigh_node->if_incoming->net_dev;
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_frag_packet frag_header;
+ struct sk_buff *skb_fragment;
+ unsigned int mtu = net_dev->mtu;
+ unsigned int header_size = sizeof(frag_header);
+ unsigned int max_fragment_size, num_fragments;
+ int ret;
+
+ /* To avoid merge and refragmentation at next-hops we never send
+ * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
+ */
+ mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+ max_fragment_size = mtu - header_size;
+
+ if (skb->len == 0 || max_fragment_size == 0)
+ return -EINVAL;
+
+ num_fragments = (skb->len - 1) / max_fragment_size + 1;
+ max_fragment_size = (skb->len - 1) / num_fragments + 1;
+
+ /* Don't even try to fragment, if we need more than 16 fragments */
+ if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
+ ret = -EAGAIN;
+ goto free_skb;
+ }
+
+ bat_priv = orig_node->bat_priv;
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if) {
+ ret = -EINVAL;
+ goto free_skb;
+ }
+
+ /* GRO might have added fragments to the fragment list instead of
+ * frags[]. But this is not handled by skb_split and must be
+ * linearized to avoid incorrect length information after all
+ * batman-adv fragments were created and submitted to the
+ * hard-interface
+ */
+ if (skb_has_frag_list(skb) && __skb_linearize(skb)) {
+ ret = -ENOMEM;
+ goto free_skb;
+ }
+
+ /* Create one header to be copied to all fragments */
+ frag_header.packet_type = BATADV_UNICAST_FRAG;
+ frag_header.version = BATADV_COMPAT_VERSION;
+ frag_header.ttl = BATADV_TTL;
+ frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
+ frag_header.reserved = 0;
+ frag_header.no = 0;
+ frag_header.total_size = htons(skb->len);
+
+ /* skb->priority values from 256->263 are magic values to
+ * directly indicate a specific 802.1d priority. This is used
+ * to allow 802.1d priority to be passed directly in from VLAN
+ * tags, etc.
+ */
+ if (skb->priority >= 256 && skb->priority <= 263)
+ frag_header.priority = skb->priority - 256;
+ else
+ frag_header.priority = 0;
+
+ ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
+ ether_addr_copy(frag_header.dest, orig_node->orig);
+
+ /* Eat and send fragments from the tail of skb */
+ while (skb->len > max_fragment_size) {
+ /* The initial check in this function should cover this case */
+ if (unlikely(frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)) {
+ ret = -EINVAL;
+ goto put_primary_if;
+ }
+
+ skb_fragment = batadv_frag_create(net_dev, skb, &frag_header,
+ max_fragment_size);
+ if (!skb_fragment) {
+ ret = -ENOMEM;
+ goto put_primary_if;
+ }
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
+ batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
+ skb_fragment->len + ETH_HLEN);
+ ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
+ if (ret != NET_XMIT_SUCCESS) {
+ ret = NET_XMIT_DROP;
+ goto put_primary_if;
+ }
+
+ frag_header.no++;
+ }
+
+ /* make sure that there is at least enough head for the fragmentation
+ * and ethernet headers
+ */
+ ret = skb_cow_head(skb, ETH_HLEN + header_size);
+ if (ret < 0)
+ goto put_primary_if;
+
+ skb_push(skb, header_size);
+ memcpy(skb->data, &frag_header, header_size);
+
+ /* Send the last fragment */
+ batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
+ batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
+ skb->len + ETH_HLEN);
+ ret = batadv_send_unicast_skb(skb, neigh_node);
+ /* skb was consumed */
+ skb = NULL;
+
+put_primary_if:
+ batadv_hardif_put(primary_if);
+free_skb:
+ kfree_skb(skb);
+
+ return ret;
+}
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
new file mode 100644
index 0000000000..dbf0871f87
--- /dev/null
+++ b/net/batman-adv/fragmentation.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Martin Hundebøll <martin@hundeboll.net>
+ */
+
+#ifndef _NET_BATMAN_ADV_FRAGMENTATION_H_
+#define _NET_BATMAN_ADV_FRAGMENTATION_H_
+
+#include "main.h"
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+void batadv_frag_purge_orig(struct batadv_orig_node *orig,
+ bool (*check_cb)(struct batadv_frag_table_entry *));
+bool batadv_frag_skb_fwd(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if,
+ struct batadv_orig_node *orig_node_src);
+bool batadv_frag_skb_buffer(struct sk_buff **skb,
+ struct batadv_orig_node *orig_node);
+int batadv_frag_send_packet(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node);
+
+/**
+ * batadv_frag_check_entry() - check if a list of fragments has timed out
+ * @frags_entry: table entry to check
+ *
+ * Return: true if the frags entry has timed out, false otherwise.
+ */
+static inline bool
+batadv_frag_check_entry(struct batadv_frag_table_entry *frags_entry)
+{
+ if (!hlist_empty(&frags_entry->fragment_list) &&
+ batadv_has_timed_out(frags_entry->timestamp, BATADV_FRAG_TIMEOUT))
+ return true;
+ return false;
+}
+
+#endif /* _NET_BATMAN_ADV_FRAGMENTATION_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
new file mode 100644
index 0000000000..d26124bc27
--- /dev/null
+++ b/net/batman-adv/gateway_client.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ */
+
+#include "gateway_client.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/container_of.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/udp.h>
+#include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "hard-interface.h"
+#include "log.h"
+#include "netlink.h"
+#include "originator.h"
+#include "routing.h"
+#include "soft-interface.h"
+#include "translation-table.h"
+
+/* These are the offsets of the "hw type" and "hw address length" in the dhcp
+ * packet starting at the beginning of the dhcp header
+ */
+#define BATADV_DHCP_HTYPE_OFFSET 1
+#define BATADV_DHCP_HLEN_OFFSET 2
+/* Value of htype representing Ethernet */
+#define BATADV_DHCP_HTYPE_ETHERNET 0x01
+/* This is the offset of the "chaddr" field in the dhcp packet starting at the
+ * beginning of the dhcp header
+ */
+#define BATADV_DHCP_CHADDR_OFFSET 28
+
+/**
+ * batadv_gw_node_release() - release gw_node from lists and queue for free
+ * after rcu grace period
+ * @ref: kref pointer of the gw_node
+ */
+void batadv_gw_node_release(struct kref *ref)
+{
+ struct batadv_gw_node *gw_node;
+
+ gw_node = container_of(ref, struct batadv_gw_node, refcount);
+
+ batadv_orig_node_put(gw_node->orig_node);
+ kfree_rcu(gw_node, rcu);
+}
+
+/**
+ * batadv_gw_get_selected_gw_node() - Get currently selected gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: selected gateway (with increased refcnt), NULL on errors
+ */
+struct batadv_gw_node *
+batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
+{
+ struct batadv_gw_node *gw_node;
+
+ rcu_read_lock();
+ gw_node = rcu_dereference(bat_priv->gw.curr_gw);
+ if (!gw_node)
+ goto out;
+
+ if (!kref_get_unless_zero(&gw_node->refcount))
+ gw_node = NULL;
+
+out:
+ rcu_read_unlock();
+ return gw_node;
+}
+
+/**
+ * batadv_gw_get_selected_orig() - Get originator of currently selected gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: orig_node of selected gateway (with increased refcnt), NULL on errors
+ */
+struct batadv_orig_node *
+batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
+{
+ struct batadv_gw_node *gw_node;
+ struct batadv_orig_node *orig_node = NULL;
+
+ gw_node = batadv_gw_get_selected_gw_node(bat_priv);
+ if (!gw_node)
+ goto out;
+
+ rcu_read_lock();
+ orig_node = gw_node->orig_node;
+ if (!orig_node)
+ goto unlock;
+
+ if (!kref_get_unless_zero(&orig_node->refcount))
+ orig_node = NULL;
+
+unlock:
+ rcu_read_unlock();
+out:
+ batadv_gw_node_put(gw_node);
+ return orig_node;
+}
+
+static void batadv_gw_select(struct batadv_priv *bat_priv,
+ struct batadv_gw_node *new_gw_node)
+{
+ struct batadv_gw_node *curr_gw_node;
+
+ spin_lock_bh(&bat_priv->gw.list_lock);
+
+ if (new_gw_node)
+ kref_get(&new_gw_node->refcount);
+
+ curr_gw_node = rcu_replace_pointer(bat_priv->gw.curr_gw, new_gw_node,
+ true);
+
+ batadv_gw_node_put(curr_gw_node);
+
+ spin_unlock_bh(&bat_priv->gw.list_lock);
+}
+
+/**
+ * batadv_gw_reselect() - force a gateway reselection
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Set a flag to remind the GW component to perform a new gateway reselection.
+ * However this function does not ensure that the current gateway is going to be
+ * deselected. The reselection mechanism may elect the same gateway once again.
+ *
+ * This means that invoking batadv_gw_reselect() does not guarantee a gateway
+ * change and therefore a uevent is not necessarily expected.
+ */
+void batadv_gw_reselect(struct batadv_priv *bat_priv)
+{
+ atomic_set(&bat_priv->gw.reselect, 1);
+}
+
+/**
+ * batadv_gw_check_client_stop() - check if client mode has been switched off
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * This function assumes the caller has checked that the gw state *is actually
+ * changing*. This function is not supposed to be called when there is no state
+ * change.
+ */
+void batadv_gw_check_client_stop(struct batadv_priv *bat_priv)
+{
+ struct batadv_gw_node *curr_gw;
+
+ if (atomic_read(&bat_priv->gw.mode) != BATADV_GW_MODE_CLIENT)
+ return;
+
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+ if (!curr_gw)
+ return;
+
+ /* deselect the current gateway so that next time that client mode is
+ * enabled a proper GW_ADD event can be sent
+ */
+ batadv_gw_select(bat_priv, NULL);
+
+ /* if batman-adv is switching the gw client mode off and a gateway was
+ * already selected, send a DEL uevent
+ */
+ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, NULL);
+
+ batadv_gw_node_put(curr_gw);
+}
+
+/**
+ * batadv_gw_election() - Elect the best gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_election(struct batadv_priv *bat_priv)
+{
+ struct batadv_gw_node *curr_gw = NULL;
+ struct batadv_gw_node *next_gw = NULL;
+ struct batadv_neigh_node *router = NULL;
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL;
+ char gw_addr[18] = { '\0' };
+
+ if (atomic_read(&bat_priv->gw.mode) != BATADV_GW_MODE_CLIENT)
+ goto out;
+
+ if (!bat_priv->algo_ops->gw.get_best_gw_node)
+ goto out;
+
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+
+ if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
+ goto out;
+
+ /* if gw.reselect is set to 1 it means that a previous call to
+ * gw.is_eligible() said that we have a new best GW, therefore it can
+ * now be picked from the list and selected
+ */
+ next_gw = bat_priv->algo_ops->gw.get_best_gw_node(bat_priv);
+
+ if (curr_gw == next_gw)
+ goto out;
+
+ if (next_gw) {
+ sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
+
+ router = batadv_orig_router_get(next_gw->orig_node,
+ BATADV_IF_DEFAULT);
+ if (!router) {
+ batadv_gw_reselect(bat_priv);
+ goto out;
+ }
+
+ router_ifinfo = batadv_neigh_ifinfo_get(router,
+ BATADV_IF_DEFAULT);
+ if (!router_ifinfo) {
+ batadv_gw_reselect(bat_priv);
+ goto out;
+ }
+ }
+
+ if (curr_gw && !next_gw) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Removing selected gateway - no gateway in range\n");
+ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL,
+ NULL);
+ } else if (!curr_gw && next_gw) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Adding route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
+ next_gw->orig_node->orig,
+ next_gw->bandwidth_down / 10,
+ next_gw->bandwidth_down % 10,
+ next_gw->bandwidth_up / 10,
+ next_gw->bandwidth_up % 10,
+ router_ifinfo->bat_iv.tq_avg);
+ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD,
+ gw_addr);
+ } else {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Changing route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
+ next_gw->orig_node->orig,
+ next_gw->bandwidth_down / 10,
+ next_gw->bandwidth_down % 10,
+ next_gw->bandwidth_up / 10,
+ next_gw->bandwidth_up % 10,
+ router_ifinfo->bat_iv.tq_avg);
+ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE,
+ gw_addr);
+ }
+
+ batadv_gw_select(bat_priv, next_gw);
+
+out:
+ batadv_gw_node_put(curr_gw);
+ batadv_gw_node_put(next_gw);
+ batadv_neigh_node_put(router);
+ batadv_neigh_ifinfo_put(router_ifinfo);
+}
+
+/**
+ * batadv_gw_check_election() - Elect orig node as best gateway when eligible
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is to be checked
+ */
+void batadv_gw_check_election(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_orig_node *curr_gw_orig;
+
+ /* abort immediately if the routing algorithm does not support gateway
+ * election
+ */
+ if (!bat_priv->algo_ops->gw.is_eligible)
+ return;
+
+ curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
+ if (!curr_gw_orig)
+ goto reselect;
+
+ /* this node already is the gateway */
+ if (curr_gw_orig == orig_node)
+ goto out;
+
+ if (!bat_priv->algo_ops->gw.is_eligible(bat_priv, curr_gw_orig,
+ orig_node))
+ goto out;
+
+reselect:
+ batadv_gw_reselect(bat_priv);
+out:
+ batadv_orig_node_put(curr_gw_orig);
+}
+
+/**
+ * batadv_gw_node_add() - add gateway node to list of available gateways
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ * @gateway: announced bandwidth information
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (gw.list_lock).
+ */
+static void batadv_gw_node_add(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_tvlv_gateway_data *gateway)
+{
+ struct batadv_gw_node *gw_node;
+
+ lockdep_assert_held(&bat_priv->gw.list_lock);
+
+ if (gateway->bandwidth_down == 0)
+ return;
+
+ gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
+ if (!gw_node)
+ return;
+
+ kref_init(&gw_node->refcount);
+ INIT_HLIST_NODE(&gw_node->list);
+ kref_get(&orig_node->refcount);
+ gw_node->orig_node = orig_node;
+ gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
+ gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
+
+ kref_get(&gw_node->refcount);
+ hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
+ bat_priv->gw.generation++;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
+ orig_node->orig,
+ ntohl(gateway->bandwidth_down) / 10,
+ ntohl(gateway->bandwidth_down) % 10,
+ ntohl(gateway->bandwidth_up) / 10,
+ ntohl(gateway->bandwidth_up) % 10);
+
+ /* don't return reference to new gw_node */
+ batadv_gw_node_put(gw_node);
+}
+
+/**
+ * batadv_gw_node_get() - retrieve gateway node from list of available gateways
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ *
+ * Return: gateway node if found or NULL otherwise.
+ */
+struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_gw_node *gw_node_tmp, *gw_node = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(gw_node_tmp, &bat_priv->gw.gateway_list,
+ list) {
+ if (gw_node_tmp->orig_node != orig_node)
+ continue;
+
+ if (!kref_get_unless_zero(&gw_node_tmp->refcount))
+ continue;
+
+ gw_node = gw_node_tmp;
+ break;
+ }
+ rcu_read_unlock();
+
+ return gw_node;
+}
+
+/**
+ * batadv_gw_node_update() - update list of available gateways with changed
+ * bandwidth information
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ * @gateway: announced bandwidth information
+ */
+void batadv_gw_node_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_tvlv_gateway_data *gateway)
+{
+ struct batadv_gw_node *gw_node, *curr_gw = NULL;
+
+ spin_lock_bh(&bat_priv->gw.list_lock);
+ gw_node = batadv_gw_node_get(bat_priv, orig_node);
+ if (!gw_node) {
+ batadv_gw_node_add(bat_priv, orig_node, gateway);
+ spin_unlock_bh(&bat_priv->gw.list_lock);
+ goto out;
+ }
+ spin_unlock_bh(&bat_priv->gw.list_lock);
+
+ if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) &&
+ gw_node->bandwidth_up == ntohl(gateway->bandwidth_up))
+ goto out;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Gateway bandwidth of originator %pM changed from %u.%u/%u.%u MBit to %u.%u/%u.%u MBit\n",
+ orig_node->orig,
+ gw_node->bandwidth_down / 10,
+ gw_node->bandwidth_down % 10,
+ gw_node->bandwidth_up / 10,
+ gw_node->bandwidth_up % 10,
+ ntohl(gateway->bandwidth_down) / 10,
+ ntohl(gateway->bandwidth_down) % 10,
+ ntohl(gateway->bandwidth_up) / 10,
+ ntohl(gateway->bandwidth_up) % 10);
+
+ gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
+ gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
+
+ if (ntohl(gateway->bandwidth_down) == 0) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Gateway %pM removed from gateway list\n",
+ orig_node->orig);
+
+ /* Note: We don't need a NULL check here, since curr_gw never
+ * gets dereferenced.
+ */
+ spin_lock_bh(&bat_priv->gw.list_lock);
+ if (!hlist_unhashed(&gw_node->list)) {
+ hlist_del_init_rcu(&gw_node->list);
+ batadv_gw_node_put(gw_node);
+ bat_priv->gw.generation++;
+ }
+ spin_unlock_bh(&bat_priv->gw.list_lock);
+
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+ if (gw_node == curr_gw)
+ batadv_gw_reselect(bat_priv);
+
+ batadv_gw_node_put(curr_gw);
+ }
+
+out:
+ batadv_gw_node_put(gw_node);
+}
+
+/**
+ * batadv_gw_node_delete() - Remove orig_node from gateway list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is currently in process of being removed
+ */
+void batadv_gw_node_delete(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_tvlv_gateway_data gateway;
+
+ gateway.bandwidth_down = 0;
+ gateway.bandwidth_up = 0;
+
+ batadv_gw_node_update(bat_priv, orig_node, &gateway);
+}
+
+/**
+ * batadv_gw_node_free() - Free gateway information from soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_node_free(struct batadv_priv *bat_priv)
+{
+ struct batadv_gw_node *gw_node;
+ struct hlist_node *node_tmp;
+
+ spin_lock_bh(&bat_priv->gw.list_lock);
+ hlist_for_each_entry_safe(gw_node, node_tmp,
+ &bat_priv->gw.gateway_list, list) {
+ hlist_del_init_rcu(&gw_node->list);
+ batadv_gw_node_put(gw_node);
+ bat_priv->gw.generation++;
+ }
+ spin_unlock_bh(&bat_priv->gw.list_lock);
+}
+
+/**
+ * batadv_gw_dump() - Dump gateways into a message
+ * @msg: Netlink message to dump into
+ * @cb: Control block containing additional options
+ *
+ * Return: Error code, or length of message
+ */
+int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct batadv_hard_iface *primary_if = NULL;
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_priv *bat_priv;
+ int ifindex;
+ int ret;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (!bat_priv->algo_ops->gw.dump) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ bat_priv->algo_ops->gw.dump(msg, cb, bat_priv);
+
+ ret = msg->len;
+
+out:
+ batadv_hardif_put(primary_if);
+ dev_put(soft_iface);
+
+ return ret;
+}
+
+/**
+ * batadv_gw_dhcp_recipient_get() - check if a packet is a DHCP message
+ * @skb: the packet to check
+ * @header_len: a pointer to the batman-adv header size
+ * @chaddr: buffer where the client address will be stored. Valid
+ * only if the function returns BATADV_DHCP_TO_CLIENT
+ *
+ * This function may re-allocate the data buffer of the skb passed as argument.
+ *
+ * Return:
+ * - BATADV_DHCP_NO if the packet is not a dhcp message or if there was an error
+ * while parsing it
+ * - BATADV_DHCP_TO_SERVER if this is a message going to the DHCP server
+ * - BATADV_DHCP_TO_CLIENT if this is a message going to a DHCP client
+ */
+enum batadv_dhcp_recipient
+batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
+ u8 *chaddr)
+{
+ enum batadv_dhcp_recipient ret = BATADV_DHCP_NO;
+ struct ethhdr *ethhdr;
+ struct iphdr *iphdr;
+ struct ipv6hdr *ipv6hdr;
+ struct udphdr *udphdr;
+ struct vlan_ethhdr *vhdr;
+ int chaddr_offset;
+ __be16 proto;
+ u8 *p;
+
+ /* check for ethernet header */
+ if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
+ return BATADV_DHCP_NO;
+
+ ethhdr = eth_hdr(skb);
+ proto = ethhdr->h_proto;
+ *header_len += ETH_HLEN;
+
+ /* check for initial vlan header */
+ if (proto == htons(ETH_P_8021Q)) {
+ if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
+ return BATADV_DHCP_NO;
+
+ vhdr = vlan_eth_hdr(skb);
+ proto = vhdr->h_vlan_encapsulated_proto;
+ *header_len += VLAN_HLEN;
+ }
+
+ /* check for ip header */
+ switch (proto) {
+ case htons(ETH_P_IP):
+ if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
+ return BATADV_DHCP_NO;
+
+ iphdr = (struct iphdr *)(skb->data + *header_len);
+ *header_len += iphdr->ihl * 4;
+
+ /* check for udp header */
+ if (iphdr->protocol != IPPROTO_UDP)
+ return BATADV_DHCP_NO;
+
+ break;
+ case htons(ETH_P_IPV6):
+ if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
+ return BATADV_DHCP_NO;
+
+ ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
+ *header_len += sizeof(*ipv6hdr);
+
+ /* check for udp header */
+ if (ipv6hdr->nexthdr != IPPROTO_UDP)
+ return BATADV_DHCP_NO;
+
+ break;
+ default:
+ return BATADV_DHCP_NO;
+ }
+
+ if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
+ return BATADV_DHCP_NO;
+
+ udphdr = (struct udphdr *)(skb->data + *header_len);
+ *header_len += sizeof(*udphdr);
+
+ /* check for bootp port */
+ switch (proto) {
+ case htons(ETH_P_IP):
+ if (udphdr->dest == htons(67))
+ ret = BATADV_DHCP_TO_SERVER;
+ else if (udphdr->source == htons(67))
+ ret = BATADV_DHCP_TO_CLIENT;
+ break;
+ case htons(ETH_P_IPV6):
+ if (udphdr->dest == htons(547))
+ ret = BATADV_DHCP_TO_SERVER;
+ else if (udphdr->source == htons(547))
+ ret = BATADV_DHCP_TO_CLIENT;
+ break;
+ }
+
+ chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET;
+ /* store the client address if the message is going to a client */
+ if (ret == BATADV_DHCP_TO_CLIENT) {
+ if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN))
+ return BATADV_DHCP_NO;
+
+ /* check if the DHCP packet carries an Ethernet DHCP */
+ p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
+ if (*p != BATADV_DHCP_HTYPE_ETHERNET)
+ return BATADV_DHCP_NO;
+
+ /* check if the DHCP packet carries a valid Ethernet address */
+ p = skb->data + *header_len + BATADV_DHCP_HLEN_OFFSET;
+ if (*p != ETH_ALEN)
+ return BATADV_DHCP_NO;
+
+ ether_addr_copy(chaddr, skb->data + chaddr_offset);
+ }
+
+ return ret;
+}
+
+/**
+ * batadv_gw_out_of_range() - check if the dhcp request destination is the best
+ * gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the outgoing packet
+ *
+ * Check if the skb is a DHCP request and if it is sent to the current best GW
+ * server. Due to topology changes it may be the case that the GW server
+ * previously selected is not the best one anymore.
+ *
+ * This call might reallocate skb data.
+ * Must be invoked only when the DHCP packet is going TO a DHCP SERVER.
+ *
+ * Return: true if the packet destination is unicast and it is not the best gw,
+ * false otherwise.
+ */
+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ struct batadv_neigh_node *neigh_curr = NULL;
+ struct batadv_neigh_node *neigh_old = NULL;
+ struct batadv_orig_node *orig_dst_node = NULL;
+ struct batadv_gw_node *gw_node = NULL;
+ struct batadv_gw_node *curr_gw = NULL;
+ struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo;
+ struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+ bool out_of_range = false;
+ u8 curr_tq_avg;
+ unsigned short vid;
+
+ vid = batadv_get_vid(skb, 0);
+
+ if (is_multicast_ether_addr(ethhdr->h_dest))
+ goto out;
+
+ orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest, vid);
+ if (!orig_dst_node)
+ goto out;
+
+ gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
+ if (!gw_node)
+ goto out;
+
+ switch (atomic_read(&bat_priv->gw.mode)) {
+ case BATADV_GW_MODE_SERVER:
+ /* If we are a GW then we are our best GW. We can artificially
+ * set the tq towards ourself as the maximum value
+ */
+ curr_tq_avg = BATADV_TQ_MAX_VALUE;
+ break;
+ case BATADV_GW_MODE_CLIENT:
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+ if (!curr_gw)
+ goto out;
+
+ /* packet is going to our gateway */
+ if (curr_gw->orig_node == orig_dst_node)
+ goto out;
+
+ /* If the dhcp packet has been sent to a different gw,
+ * we have to evaluate whether the old gw is still
+ * reliable enough
+ */
+ neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node,
+ NULL);
+ if (!neigh_curr)
+ goto out;
+
+ curr_ifinfo = batadv_neigh_ifinfo_get(neigh_curr,
+ BATADV_IF_DEFAULT);
+ if (!curr_ifinfo)
+ goto out;
+
+ curr_tq_avg = curr_ifinfo->bat_iv.tq_avg;
+ batadv_neigh_ifinfo_put(curr_ifinfo);
+
+ break;
+ case BATADV_GW_MODE_OFF:
+ default:
+ goto out;
+ }
+
+ neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL);
+ if (!neigh_old)
+ goto out;
+
+ old_ifinfo = batadv_neigh_ifinfo_get(neigh_old, BATADV_IF_DEFAULT);
+ if (!old_ifinfo)
+ goto out;
+
+ if ((curr_tq_avg - old_ifinfo->bat_iv.tq_avg) > BATADV_GW_THRESHOLD)
+ out_of_range = true;
+ batadv_neigh_ifinfo_put(old_ifinfo);
+
+out:
+ batadv_orig_node_put(orig_dst_node);
+ batadv_gw_node_put(curr_gw);
+ batadv_gw_node_put(gw_node);
+ batadv_neigh_node_put(neigh_old);
+ batadv_neigh_node_put(neigh_curr);
+ return out_of_range;
+}
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
new file mode 100644
index 0000000000..95c2ccdaa5
--- /dev/null
+++ b/net/batman-adv/gateway_client.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ */
+
+#ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
+#define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
+
+#include "main.h"
+
+#include <linux/kref.h>
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
+
+void batadv_gw_check_client_stop(struct batadv_priv *bat_priv);
+void batadv_gw_reselect(struct batadv_priv *bat_priv);
+void batadv_gw_election(struct batadv_priv *bat_priv);
+struct batadv_orig_node *
+batadv_gw_get_selected_orig(struct batadv_priv *bat_priv);
+void batadv_gw_check_election(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node);
+void batadv_gw_node_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_tvlv_gateway_data *gateway);
+void batadv_gw_node_delete(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node);
+void batadv_gw_node_free(struct batadv_priv *bat_priv);
+void batadv_gw_node_release(struct kref *ref);
+struct batadv_gw_node *
+batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv);
+int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb);
+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
+enum batadv_dhcp_recipient
+batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
+ u8 *chaddr);
+struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node);
+
+/**
+ * batadv_gw_node_put() - decrement the gw_node refcounter and possibly release
+ * it
+ * @gw_node: gateway node to free
+ */
+static inline void batadv_gw_node_put(struct batadv_gw_node *gw_node)
+{
+ if (!gw_node)
+ return;
+
+ kref_put(&gw_node->refcount, batadv_gw_node_release);
+}
+
+#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
new file mode 100644
index 0000000000..2dd36ef03c
--- /dev/null
+++ b/net/batman-adv/gateway_common.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ */
+
+#include "gateway_common.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "gateway_client.h"
+#include "tvlv.h"
+
+/**
+ * batadv_gw_tvlv_container_update() - update the gw tvlv container after
+ * gateway setting change
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+ struct batadv_tvlv_gateway_data gw;
+ u32 down, up;
+ char gw_mode;
+
+ gw_mode = atomic_read(&bat_priv->gw.mode);
+
+ switch (gw_mode) {
+ case BATADV_GW_MODE_OFF:
+ case BATADV_GW_MODE_CLIENT:
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_GW, 1);
+ break;
+ case BATADV_GW_MODE_SERVER:
+ down = atomic_read(&bat_priv->gw.bandwidth_down);
+ up = atomic_read(&bat_priv->gw.bandwidth_up);
+ gw.bandwidth_down = htonl(down);
+ gw.bandwidth_up = htonl(up);
+ batadv_tvlv_container_register(bat_priv, BATADV_TVLV_GW, 1,
+ &gw, sizeof(gw));
+ break;
+ }
+}
+
+/**
+ * batadv_gw_tvlv_ogm_handler_v1() - process incoming gateway tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ u8 flags,
+ void *tvlv_value, u16 tvlv_value_len)
+{
+ struct batadv_tvlv_gateway_data gateway, *gateway_ptr;
+
+ /* only fetch the tvlv value if the handler wasn't called via the
+ * CIFNOTFND flag and if there is data to fetch
+ */
+ if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND ||
+ tvlv_value_len < sizeof(gateway)) {
+ gateway.bandwidth_down = 0;
+ gateway.bandwidth_up = 0;
+ } else {
+ gateway_ptr = tvlv_value;
+ gateway.bandwidth_down = gateway_ptr->bandwidth_down;
+ gateway.bandwidth_up = gateway_ptr->bandwidth_up;
+ if (gateway.bandwidth_down == 0 ||
+ gateway.bandwidth_up == 0) {
+ gateway.bandwidth_down = 0;
+ gateway.bandwidth_up = 0;
+ }
+ }
+
+ batadv_gw_node_update(bat_priv, orig, &gateway);
+
+ /* restart gateway selection */
+ if (gateway.bandwidth_down != 0 &&
+ atomic_read(&bat_priv->gw.mode) == BATADV_GW_MODE_CLIENT)
+ batadv_gw_check_election(bat_priv, orig);
+}
+
+/**
+ * batadv_gw_init() - initialise the gateway handling internals
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_init(struct batadv_priv *bat_priv)
+{
+ if (bat_priv->algo_ops->gw.init_sel_class)
+ bat_priv->algo_ops->gw.init_sel_class(bat_priv);
+ else
+ atomic_set(&bat_priv->gw.sel_class, 1);
+
+ batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
+ NULL, NULL, BATADV_TVLV_GW, 1,
+ BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+}
+
+/**
+ * batadv_gw_free() - free the gateway handling internals
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_free(struct batadv_priv *bat_priv)
+{
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_GW, 1);
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_GW, 1);
+}
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
new file mode 100644
index 0000000000..5d097d6a1d
--- /dev/null
+++ b/net/batman-adv/gateway_common.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ */
+
+#ifndef _NET_BATMAN_ADV_GATEWAY_COMMON_H_
+#define _NET_BATMAN_ADV_GATEWAY_COMMON_H_
+
+#include "main.h"
+
+/**
+ * enum batadv_bandwidth_units - bandwidth unit types
+ */
+enum batadv_bandwidth_units {
+ /** @BATADV_BW_UNIT_KBIT: unit type kbit */
+ BATADV_BW_UNIT_KBIT,
+
+ /** @BATADV_BW_UNIT_MBIT: unit type mbit */
+ BATADV_BW_UNIT_MBIT,
+};
+
+#define BATADV_GW_MODE_OFF_NAME "off"
+#define BATADV_GW_MODE_CLIENT_NAME "client"
+#define BATADV_GW_MODE_SERVER_NAME "server"
+
+void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv);
+void batadv_gw_init(struct batadv_priv *bat_priv);
+void batadv_gw_free(struct batadv_priv *bat_priv);
+
+#endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
new file mode 100644
index 0000000000..96a412beab
--- /dev/null
+++ b/net/batman-adv/hard-interface.c
@@ -0,0 +1,1028 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#include "hard-interface.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/compiler.h>
+#include <linux/container_of.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/if.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/kref.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <net/net_namespace.h>
+#include <net/rtnetlink.h>
+#include <uapi/linux/batadv_packet.h>
+
+#include "bat_v.h"
+#include "bridge_loop_avoidance.h"
+#include "distributed-arp-table.h"
+#include "gateway_client.h"
+#include "log.h"
+#include "originator.h"
+#include "send.h"
+#include "soft-interface.h"
+#include "translation-table.h"
+
+/**
+ * batadv_hardif_release() - release hard interface from lists and queue for
+ * free after rcu grace period
+ * @ref: kref pointer of the hard interface
+ */
+void batadv_hardif_release(struct kref *ref)
+{
+ struct batadv_hard_iface *hard_iface;
+
+ hard_iface = container_of(ref, struct batadv_hard_iface, refcount);
+ dev_put(hard_iface->net_dev);
+
+ kfree_rcu(hard_iface, rcu);
+}
+
+/**
+ * batadv_hardif_get_by_netdev() - Get hard interface object of a net_device
+ * @net_dev: net_device to search for
+ *
+ * Return: batadv_hard_iface of net_dev (with increased refcnt), NULL on errors
+ */
+struct batadv_hard_iface *
+batadv_hardif_get_by_netdev(const struct net_device *net_dev)
+{
+ struct batadv_hard_iface *hard_iface;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->net_dev == net_dev &&
+ kref_get_unless_zero(&hard_iface->refcount))
+ goto out;
+ }
+
+ hard_iface = NULL;
+
+out:
+ rcu_read_unlock();
+ return hard_iface;
+}
+
+/**
+ * batadv_getlink_net() - return link net namespace (of use fallback)
+ * @netdev: net_device to check
+ * @fallback_net: return in case get_link_net is not available for @netdev
+ *
+ * Return: result of rtnl_link_ops->get_link_net or @fallback_net
+ */
+static struct net *batadv_getlink_net(const struct net_device *netdev,
+ struct net *fallback_net)
+{
+ if (!netdev->rtnl_link_ops)
+ return fallback_net;
+
+ if (!netdev->rtnl_link_ops->get_link_net)
+ return fallback_net;
+
+ return netdev->rtnl_link_ops->get_link_net(netdev);
+}
+
+/**
+ * batadv_mutual_parents() - check if two devices are each others parent
+ * @dev1: 1st net dev
+ * @net1: 1st devices netns
+ * @dev2: 2nd net dev
+ * @net2: 2nd devices netns
+ *
+ * veth devices come in pairs and each is the parent of the other!
+ *
+ * Return: true if the devices are each others parent, otherwise false
+ */
+static bool batadv_mutual_parents(const struct net_device *dev1,
+ struct net *net1,
+ const struct net_device *dev2,
+ struct net *net2)
+{
+ int dev1_parent_iflink = dev_get_iflink(dev1);
+ int dev2_parent_iflink = dev_get_iflink(dev2);
+ const struct net *dev1_parent_net;
+ const struct net *dev2_parent_net;
+
+ dev1_parent_net = batadv_getlink_net(dev1, net1);
+ dev2_parent_net = batadv_getlink_net(dev2, net2);
+
+ if (!dev1_parent_iflink || !dev2_parent_iflink)
+ return false;
+
+ return (dev1_parent_iflink == dev2->ifindex) &&
+ (dev2_parent_iflink == dev1->ifindex) &&
+ net_eq(dev1_parent_net, net2) &&
+ net_eq(dev2_parent_net, net1);
+}
+
+/**
+ * batadv_is_on_batman_iface() - check if a device is a batman iface descendant
+ * @net_dev: the device to check
+ *
+ * If the user creates any virtual device on top of a batman-adv interface, it
+ * is important to prevent this new interface from being used to create a new
+ * mesh network (this behaviour would lead to a batman-over-batman
+ * configuration). This function recursively checks all the fathers of the
+ * device passed as argument looking for a batman-adv soft interface.
+ *
+ * Return: true if the device is descendant of a batman-adv mesh interface (or
+ * if it is a batman-adv interface itself), false otherwise
+ */
+static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
+{
+ struct net *net = dev_net(net_dev);
+ struct net_device *parent_dev;
+ struct net *parent_net;
+ int iflink;
+ bool ret;
+
+ /* check if this is a batman-adv mesh interface */
+ if (batadv_softif_is_valid(net_dev))
+ return true;
+
+ iflink = dev_get_iflink(net_dev);
+ if (iflink == 0)
+ return false;
+
+ parent_net = batadv_getlink_net(net_dev, net);
+
+ /* iflink to itself, most likely physical device */
+ if (net == parent_net && iflink == net_dev->ifindex)
+ return false;
+
+ /* recurse over the parent device */
+ parent_dev = __dev_get_by_index((struct net *)parent_net, iflink);
+ if (!parent_dev) {
+ pr_warn("Cannot find parent device. Skipping batadv-on-batadv check for %s\n",
+ net_dev->name);
+ return false;
+ }
+
+ if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
+ return false;
+
+ ret = batadv_is_on_batman_iface(parent_dev);
+
+ return ret;
+}
+
+static bool batadv_is_valid_iface(const struct net_device *net_dev)
+{
+ if (net_dev->flags & IFF_LOOPBACK)
+ return false;
+
+ if (net_dev->type != ARPHRD_ETHER)
+ return false;
+
+ if (net_dev->addr_len != ETH_ALEN)
+ return false;
+
+ /* no batman over batman */
+ if (batadv_is_on_batman_iface(net_dev))
+ return false;
+
+ return true;
+}
+
+/**
+ * batadv_get_real_netdevice() - check if the given netdev struct is a virtual
+ * interface on top of another 'real' interface
+ * @netdev: the device to check
+ *
+ * Callers must hold the rtnl semaphore. You may want batadv_get_real_netdev()
+ * instead of this.
+ *
+ * Return: the 'real' net device or the original net device and NULL in case
+ * of an error.
+ */
+static struct net_device *batadv_get_real_netdevice(struct net_device *netdev)
+{
+ struct batadv_hard_iface *hard_iface = NULL;
+ struct net_device *real_netdev = NULL;
+ struct net *real_net;
+ struct net *net;
+ int iflink;
+
+ ASSERT_RTNL();
+
+ if (!netdev)
+ return NULL;
+
+ iflink = dev_get_iflink(netdev);
+ if (iflink == 0) {
+ dev_hold(netdev);
+ return netdev;
+ }
+
+ hard_iface = batadv_hardif_get_by_netdev(netdev);
+ if (!hard_iface || !hard_iface->soft_iface)
+ goto out;
+
+ net = dev_net(hard_iface->soft_iface);
+ real_net = batadv_getlink_net(netdev, net);
+
+ /* iflink to itself, most likely physical device */
+ if (net == real_net && netdev->ifindex == iflink) {
+ real_netdev = netdev;
+ dev_hold(real_netdev);
+ goto out;
+ }
+
+ real_netdev = dev_get_by_index(real_net, iflink);
+
+out:
+ batadv_hardif_put(hard_iface);
+ return real_netdev;
+}
+
+/**
+ * batadv_get_real_netdev() - check if the given net_device struct is a virtual
+ * interface on top of another 'real' interface
+ * @net_device: the device to check
+ *
+ * Return: the 'real' net device or the original net device and NULL in case
+ * of an error.
+ */
+struct net_device *batadv_get_real_netdev(struct net_device *net_device)
+{
+ struct net_device *real_netdev;
+
+ rtnl_lock();
+ real_netdev = batadv_get_real_netdevice(net_device);
+ rtnl_unlock();
+
+ return real_netdev;
+}
+
+/**
+ * batadv_is_wext_netdev() - check if the given net_device struct is a
+ * wext wifi interface
+ * @net_device: the device to check
+ *
+ * Return: true if the net device is a wext wireless device, false
+ * otherwise.
+ */
+static bool batadv_is_wext_netdev(struct net_device *net_device)
+{
+ if (!net_device)
+ return false;
+
+#ifdef CONFIG_WIRELESS_EXT
+ /* pre-cfg80211 drivers have to implement WEXT, so it is possible to
+ * check for wireless_handlers != NULL
+ */
+ if (net_device->wireless_handlers)
+ return true;
+#endif
+
+ return false;
+}
+
+/**
+ * batadv_is_cfg80211_netdev() - check if the given net_device struct is a
+ * cfg80211 wifi interface
+ * @net_device: the device to check
+ *
+ * Return: true if the net device is a cfg80211 wireless device, false
+ * otherwise.
+ */
+static bool batadv_is_cfg80211_netdev(struct net_device *net_device)
+{
+ if (!net_device)
+ return false;
+
+#if IS_ENABLED(CONFIG_CFG80211)
+ /* cfg80211 drivers have to set ieee80211_ptr */
+ if (net_device->ieee80211_ptr)
+ return true;
+#endif
+
+ return false;
+}
+
+/**
+ * batadv_wifi_flags_evaluate() - calculate wifi flags for net_device
+ * @net_device: the device to check
+ *
+ * Return: batadv_hard_iface_wifi_flags flags of the device
+ */
+static u32 batadv_wifi_flags_evaluate(struct net_device *net_device)
+{
+ u32 wifi_flags = 0;
+ struct net_device *real_netdev;
+
+ if (batadv_is_wext_netdev(net_device))
+ wifi_flags |= BATADV_HARDIF_WIFI_WEXT_DIRECT;
+
+ if (batadv_is_cfg80211_netdev(net_device))
+ wifi_flags |= BATADV_HARDIF_WIFI_CFG80211_DIRECT;
+
+ real_netdev = batadv_get_real_netdevice(net_device);
+ if (!real_netdev)
+ return wifi_flags;
+
+ if (real_netdev == net_device)
+ goto out;
+
+ if (batadv_is_wext_netdev(real_netdev))
+ wifi_flags |= BATADV_HARDIF_WIFI_WEXT_INDIRECT;
+
+ if (batadv_is_cfg80211_netdev(real_netdev))
+ wifi_flags |= BATADV_HARDIF_WIFI_CFG80211_INDIRECT;
+
+out:
+ dev_put(real_netdev);
+ return wifi_flags;
+}
+
+/**
+ * batadv_is_cfg80211_hardif() - check if the given hardif is a cfg80211 wifi
+ * interface
+ * @hard_iface: the device to check
+ *
+ * Return: true if the net device is a cfg80211 wireless device, false
+ * otherwise.
+ */
+bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface)
+{
+ u32 allowed_flags = 0;
+
+ allowed_flags |= BATADV_HARDIF_WIFI_CFG80211_DIRECT;
+ allowed_flags |= BATADV_HARDIF_WIFI_CFG80211_INDIRECT;
+
+ return !!(hard_iface->wifi_flags & allowed_flags);
+}
+
+/**
+ * batadv_is_wifi_hardif() - check if the given hardif is a wifi interface
+ * @hard_iface: the device to check
+ *
+ * Return: true if the net device is a 802.11 wireless device, false otherwise.
+ */
+bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface)
+{
+ if (!hard_iface)
+ return false;
+
+ return hard_iface->wifi_flags != 0;
+}
+
+/**
+ * batadv_hardif_no_broadcast() - check whether (re)broadcast is necessary
+ * @if_outgoing: the outgoing interface checked and considered for (re)broadcast
+ * @orig_addr: the originator of this packet
+ * @orig_neigh: originator address of the forwarder we just got the packet from
+ * (NULL if we originated)
+ *
+ * Checks whether a packet needs to be (re)broadcasted on the given interface.
+ *
+ * Return:
+ * BATADV_HARDIF_BCAST_NORECIPIENT: No neighbor on interface
+ * BATADV_HARDIF_BCAST_DUPFWD: Just one neighbor, but it is the forwarder
+ * BATADV_HARDIF_BCAST_DUPORIG: Just one neighbor, but it is the originator
+ * BATADV_HARDIF_BCAST_OK: Several neighbors, must broadcast
+ */
+int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing,
+ u8 *orig_addr, u8 *orig_neigh)
+{
+ struct batadv_hardif_neigh_node *hardif_neigh;
+ struct hlist_node *first;
+ int ret = BATADV_HARDIF_BCAST_OK;
+
+ rcu_read_lock();
+
+ /* 0 neighbors -> no (re)broadcast */
+ first = rcu_dereference(hlist_first_rcu(&if_outgoing->neigh_list));
+ if (!first) {
+ ret = BATADV_HARDIF_BCAST_NORECIPIENT;
+ goto out;
+ }
+
+ /* >1 neighbors -> (re)broadcast */
+ if (rcu_dereference(hlist_next_rcu(first)))
+ goto out;
+
+ hardif_neigh = hlist_entry(first, struct batadv_hardif_neigh_node,
+ list);
+
+ /* 1 neighbor, is the originator -> no rebroadcast */
+ if (orig_addr && batadv_compare_eth(hardif_neigh->orig, orig_addr)) {
+ ret = BATADV_HARDIF_BCAST_DUPORIG;
+ /* 1 neighbor, is the one we received from -> no rebroadcast */
+ } else if (orig_neigh &&
+ batadv_compare_eth(hardif_neigh->orig, orig_neigh)) {
+ ret = BATADV_HARDIF_BCAST_DUPFWD;
+ }
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+static struct batadv_hard_iface *
+batadv_hardif_get_active(const struct net_device *soft_iface)
+{
+ struct batadv_hard_iface *hard_iface;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface != soft_iface)
+ continue;
+
+ if (hard_iface->if_status == BATADV_IF_ACTIVE &&
+ kref_get_unless_zero(&hard_iface->refcount))
+ goto out;
+ }
+
+ hard_iface = NULL;
+
+out:
+ rcu_read_unlock();
+ return hard_iface;
+}
+
+static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *oldif)
+{
+ struct batadv_hard_iface *primary_if;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ batadv_dat_init_own_addr(bat_priv, primary_if);
+ batadv_bla_update_orig_address(bat_priv, primary_if, oldif);
+out:
+ batadv_hardif_put(primary_if);
+}
+
+static void batadv_primary_if_select(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *new_hard_iface)
+{
+ struct batadv_hard_iface *curr_hard_iface;
+
+ ASSERT_RTNL();
+
+ if (new_hard_iface)
+ kref_get(&new_hard_iface->refcount);
+
+ curr_hard_iface = rcu_replace_pointer(bat_priv->primary_if,
+ new_hard_iface, 1);
+
+ if (!new_hard_iface)
+ goto out;
+
+ bat_priv->algo_ops->iface.primary_set(new_hard_iface);
+ batadv_primary_if_update_addr(bat_priv, curr_hard_iface);
+
+out:
+ batadv_hardif_put(curr_hard_iface);
+}
+
+static bool
+batadv_hardif_is_iface_up(const struct batadv_hard_iface *hard_iface)
+{
+ if (hard_iface->net_dev->flags & IFF_UP)
+ return true;
+
+ return false;
+}
+
+static void batadv_check_known_mac_addr(const struct net_device *net_dev)
+{
+ const struct batadv_hard_iface *hard_iface;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status != BATADV_IF_ACTIVE &&
+ hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED)
+ continue;
+
+ if (hard_iface->net_dev == net_dev)
+ continue;
+
+ if (!batadv_compare_eth(hard_iface->net_dev->dev_addr,
+ net_dev->dev_addr))
+ continue;
+
+ pr_warn("The newly added mac address (%pM) already exists on: %s\n",
+ net_dev->dev_addr, hard_iface->net_dev->name);
+ pr_warn("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * batadv_hardif_recalc_extra_skbroom() - Recalculate skbuff extra head/tailroom
+ * @soft_iface: netdev struct of the mesh interface
+ */
+static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
+{
+ const struct batadv_hard_iface *hard_iface;
+ unsigned short lower_header_len = ETH_HLEN;
+ unsigned short lower_headroom = 0;
+ unsigned short lower_tailroom = 0;
+ unsigned short needed_headroom;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
+ continue;
+
+ if (hard_iface->soft_iface != soft_iface)
+ continue;
+
+ lower_header_len = max_t(unsigned short, lower_header_len,
+ hard_iface->net_dev->hard_header_len);
+
+ lower_headroom = max_t(unsigned short, lower_headroom,
+ hard_iface->net_dev->needed_headroom);
+
+ lower_tailroom = max_t(unsigned short, lower_tailroom,
+ hard_iface->net_dev->needed_tailroom);
+ }
+ rcu_read_unlock();
+
+ needed_headroom = lower_headroom + (lower_header_len - ETH_HLEN);
+ needed_headroom += batadv_max_header_len();
+
+ /* fragmentation headers don't strip the unicast/... header */
+ needed_headroom += sizeof(struct batadv_frag_packet);
+
+ soft_iface->needed_headroom = needed_headroom;
+ soft_iface->needed_tailroom = lower_tailroom;
+}
+
+/**
+ * batadv_hardif_min_mtu() - Calculate maximum MTU for soft interface
+ * @soft_iface: netdev struct of the soft interface
+ *
+ * Return: MTU for the soft-interface (limited by the minimal MTU of all active
+ * slave interfaces)
+ */
+int batadv_hardif_min_mtu(struct net_device *soft_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ const struct batadv_hard_iface *hard_iface;
+ int min_mtu = INT_MAX;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status != BATADV_IF_ACTIVE &&
+ hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED)
+ continue;
+
+ if (hard_iface->soft_iface != soft_iface)
+ continue;
+
+ min_mtu = min_t(int, hard_iface->net_dev->mtu, min_mtu);
+ }
+ rcu_read_unlock();
+
+ if (atomic_read(&bat_priv->fragmentation) == 0)
+ goto out;
+
+ /* with fragmentation enabled the maximum size of internally generated
+ * packets such as translation table exchanges or tvlv containers, etc
+ * has to be calculated
+ */
+ min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+ min_mtu -= sizeof(struct batadv_frag_packet);
+ min_mtu *= BATADV_FRAG_MAX_FRAGMENTS;
+
+out:
+ /* report to the other components the maximum amount of bytes that
+ * batman-adv can send over the wire (without considering the payload
+ * overhead). For example, this value is used by TT to compute the
+ * maximum local table size
+ */
+ atomic_set(&bat_priv->packet_size_max, min_mtu);
+
+ /* the real soft-interface MTU is computed by removing the payload
+ * overhead from the maximum amount of bytes that was just computed.
+ *
+ * However batman-adv does not support MTUs bigger than ETH_DATA_LEN
+ */
+ return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN);
+}
+
+/**
+ * batadv_update_min_mtu() - Adjusts the MTU if a new interface with a smaller
+ * MTU appeared
+ * @soft_iface: netdev struct of the soft interface
+ */
+void batadv_update_min_mtu(struct net_device *soft_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ int limit_mtu;
+ int mtu;
+
+ mtu = batadv_hardif_min_mtu(soft_iface);
+
+ if (bat_priv->mtu_set_by_user)
+ limit_mtu = bat_priv->mtu_set_by_user;
+ else
+ limit_mtu = ETH_DATA_LEN;
+
+ mtu = min(mtu, limit_mtu);
+ dev_set_mtu(soft_iface, mtu);
+
+ /* Check if the local translate table should be cleaned up to match a
+ * new (and smaller) MTU.
+ */
+ batadv_tt_local_resize_to_mtu(soft_iface);
+}
+
+static void
+batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
+
+ if (hard_iface->if_status != BATADV_IF_INACTIVE)
+ goto out;
+
+ bat_priv = netdev_priv(hard_iface->soft_iface);
+
+ bat_priv->algo_ops->iface.update_mac(hard_iface);
+ hard_iface->if_status = BATADV_IF_TO_BE_ACTIVATED;
+
+ /* the first active interface becomes our primary interface or
+ * the next active interface after the old primary interface was removed
+ */
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ batadv_primary_if_select(bat_priv, hard_iface);
+
+ batadv_info(hard_iface->soft_iface, "Interface activated: %s\n",
+ hard_iface->net_dev->name);
+
+ batadv_update_min_mtu(hard_iface->soft_iface);
+
+ if (bat_priv->algo_ops->iface.activate)
+ bat_priv->algo_ops->iface.activate(hard_iface);
+
+out:
+ batadv_hardif_put(primary_if);
+}
+
+static void
+batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
+{
+ if (hard_iface->if_status != BATADV_IF_ACTIVE &&
+ hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED)
+ return;
+
+ hard_iface->if_status = BATADV_IF_INACTIVE;
+
+ batadv_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
+ hard_iface->net_dev->name);
+
+ batadv_update_min_mtu(hard_iface->soft_iface);
+}
+
+/**
+ * batadv_hardif_enable_interface() - Enslave hard interface to soft interface
+ * @hard_iface: hard interface to add to soft interface
+ * @soft_iface: netdev struct of the mesh interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
+ struct net_device *soft_iface)
+{
+ struct batadv_priv *bat_priv;
+ __be16 ethertype = htons(ETH_P_BATMAN);
+ int max_header_len = batadv_max_header_len();
+ unsigned int required_mtu;
+ unsigned int hardif_mtu;
+ int ret;
+
+ hardif_mtu = READ_ONCE(hard_iface->net_dev->mtu);
+ required_mtu = READ_ONCE(soft_iface->mtu) + max_header_len;
+
+ if (hardif_mtu < ETH_MIN_MTU + max_header_len)
+ return -EINVAL;
+
+ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
+ goto out;
+
+ kref_get(&hard_iface->refcount);
+
+ dev_hold(soft_iface);
+ hard_iface->soft_iface = soft_iface;
+ bat_priv = netdev_priv(hard_iface->soft_iface);
+
+ ret = netdev_master_upper_dev_link(hard_iface->net_dev,
+ soft_iface, NULL, NULL, NULL);
+ if (ret)
+ goto err_dev;
+
+ ret = bat_priv->algo_ops->iface.enable(hard_iface);
+ if (ret < 0)
+ goto err_upper;
+
+ hard_iface->if_status = BATADV_IF_INACTIVE;
+
+ kref_get(&hard_iface->refcount);
+ hard_iface->batman_adv_ptype.type = ethertype;
+ hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
+ hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
+ dev_add_pack(&hard_iface->batman_adv_ptype);
+
+ batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
+ hard_iface->net_dev->name);
+
+ if (atomic_read(&bat_priv->fragmentation) &&
+ hardif_mtu < required_mtu)
+ batadv_info(hard_iface->soft_iface,
+ "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %i would solve the problem.\n",
+ hard_iface->net_dev->name, hardif_mtu,
+ required_mtu);
+
+ if (!atomic_read(&bat_priv->fragmentation) &&
+ hardif_mtu < required_mtu)
+ batadv_info(hard_iface->soft_iface,
+ "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %i.\n",
+ hard_iface->net_dev->name, hardif_mtu,
+ required_mtu);
+
+ if (batadv_hardif_is_iface_up(hard_iface))
+ batadv_hardif_activate_interface(hard_iface);
+ else
+ batadv_err(hard_iface->soft_iface,
+ "Not using interface %s (retrying later): interface not active\n",
+ hard_iface->net_dev->name);
+
+ batadv_hardif_recalc_extra_skbroom(soft_iface);
+
+ if (bat_priv->algo_ops->iface.enabled)
+ bat_priv->algo_ops->iface.enabled(hard_iface);
+
+out:
+ return 0;
+
+err_upper:
+ netdev_upper_dev_unlink(hard_iface->net_dev, soft_iface);
+err_dev:
+ hard_iface->soft_iface = NULL;
+ dev_put(soft_iface);
+ batadv_hardif_put(hard_iface);
+ return ret;
+}
+
+/**
+ * batadv_hardif_cnt() - get number of interfaces enslaved to soft interface
+ * @soft_iface: soft interface to check
+ *
+ * This function is only using RCU for locking - the result can therefore be
+ * off when another function is modifying the list at the same time. The
+ * caller can use the rtnl_lock to make sure that the count is accurate.
+ *
+ * Return: number of connected/enslaved hard interfaces
+ */
+static size_t batadv_hardif_cnt(const struct net_device *soft_iface)
+{
+ struct batadv_hard_iface *hard_iface;
+ size_t count = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface != soft_iface)
+ continue;
+
+ count++;
+ }
+ rcu_read_unlock();
+
+ return count;
+}
+
+/**
+ * batadv_hardif_disable_interface() - Remove hard interface from soft interface
+ * @hard_iface: hard interface to be removed
+ */
+void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hard_iface *primary_if = NULL;
+
+ batadv_hardif_deactivate_interface(hard_iface);
+
+ if (hard_iface->if_status != BATADV_IF_INACTIVE)
+ goto out;
+
+ batadv_info(hard_iface->soft_iface, "Removing interface: %s\n",
+ hard_iface->net_dev->name);
+ dev_remove_pack(&hard_iface->batman_adv_ptype);
+ batadv_hardif_put(hard_iface);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (hard_iface == primary_if) {
+ struct batadv_hard_iface *new_if;
+
+ new_if = batadv_hardif_get_active(hard_iface->soft_iface);
+ batadv_primary_if_select(bat_priv, new_if);
+
+ batadv_hardif_put(new_if);
+ }
+
+ bat_priv->algo_ops->iface.disable(hard_iface);
+ hard_iface->if_status = BATADV_IF_NOT_IN_USE;
+
+ /* delete all references to this hard_iface */
+ batadv_purge_orig_ref(bat_priv);
+ batadv_purge_outstanding_packets(bat_priv, hard_iface);
+ dev_put(hard_iface->soft_iface);
+
+ netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface);
+ batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
+
+ /* nobody uses this interface anymore */
+ if (batadv_hardif_cnt(hard_iface->soft_iface) <= 1)
+ batadv_gw_check_client_stop(bat_priv);
+
+ hard_iface->soft_iface = NULL;
+ batadv_hardif_put(hard_iface);
+
+out:
+ batadv_hardif_put(primary_if);
+}
+
+static struct batadv_hard_iface *
+batadv_hardif_add_interface(struct net_device *net_dev)
+{
+ struct batadv_hard_iface *hard_iface;
+
+ ASSERT_RTNL();
+
+ if (!batadv_is_valid_iface(net_dev))
+ goto out;
+
+ dev_hold(net_dev);
+
+ hard_iface = kzalloc(sizeof(*hard_iface), GFP_ATOMIC);
+ if (!hard_iface)
+ goto release_dev;
+
+ hard_iface->net_dev = net_dev;
+ hard_iface->soft_iface = NULL;
+ hard_iface->if_status = BATADV_IF_NOT_IN_USE;
+
+ INIT_LIST_HEAD(&hard_iface->list);
+ INIT_HLIST_HEAD(&hard_iface->neigh_list);
+
+ mutex_init(&hard_iface->bat_iv.ogm_buff_mutex);
+ spin_lock_init(&hard_iface->neigh_list_lock);
+ kref_init(&hard_iface->refcount);
+
+ hard_iface->num_bcasts = BATADV_NUM_BCASTS_DEFAULT;
+ hard_iface->wifi_flags = batadv_wifi_flags_evaluate(net_dev);
+ if (batadv_is_wifi_hardif(hard_iface))
+ hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
+
+ atomic_set(&hard_iface->hop_penalty, 0);
+
+ batadv_v_hardif_init(hard_iface);
+
+ batadv_check_known_mac_addr(hard_iface->net_dev);
+ kref_get(&hard_iface->refcount);
+ list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list);
+ batadv_hardif_generation++;
+
+ return hard_iface;
+
+release_dev:
+ dev_put(net_dev);
+out:
+ return NULL;
+}
+
+static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
+{
+ ASSERT_RTNL();
+
+ /* first deactivate interface */
+ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
+ batadv_hardif_disable_interface(hard_iface);
+
+ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
+ return;
+
+ hard_iface->if_status = BATADV_IF_TO_BE_REMOVED;
+ batadv_hardif_put(hard_iface);
+}
+
+/**
+ * batadv_hard_if_event_softif() - Handle events for soft interfaces
+ * @event: NETDEV_* event to handle
+ * @net_dev: net_device which generated an event
+ *
+ * Return: NOTIFY_* result
+ */
+static int batadv_hard_if_event_softif(unsigned long event,
+ struct net_device *net_dev)
+{
+ struct batadv_priv *bat_priv;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ bat_priv = netdev_priv(net_dev);
+ batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int batadv_hard_if_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_priv *bat_priv;
+
+ if (batadv_softif_is_valid(net_dev))
+ return batadv_hard_if_event_softif(event, net_dev);
+
+ hard_iface = batadv_hardif_get_by_netdev(net_dev);
+ if (!hard_iface && (event == NETDEV_REGISTER ||
+ event == NETDEV_POST_TYPE_CHANGE))
+ hard_iface = batadv_hardif_add_interface(net_dev);
+
+ if (!hard_iface)
+ goto out;
+
+ switch (event) {
+ case NETDEV_UP:
+ batadv_hardif_activate_interface(hard_iface);
+ break;
+ case NETDEV_GOING_DOWN:
+ case NETDEV_DOWN:
+ batadv_hardif_deactivate_interface(hard_iface);
+ break;
+ case NETDEV_UNREGISTER:
+ case NETDEV_PRE_TYPE_CHANGE:
+ list_del_rcu(&hard_iface->list);
+ batadv_hardif_generation++;
+
+ batadv_hardif_remove_interface(hard_iface);
+ break;
+ case NETDEV_CHANGEMTU:
+ if (hard_iface->soft_iface)
+ batadv_update_min_mtu(hard_iface->soft_iface);
+ break;
+ case NETDEV_CHANGEADDR:
+ if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
+ goto hardif_put;
+
+ batadv_check_known_mac_addr(hard_iface->net_dev);
+
+ bat_priv = netdev_priv(hard_iface->soft_iface);
+ bat_priv->algo_ops->iface.update_mac(hard_iface);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto hardif_put;
+
+ if (hard_iface == primary_if)
+ batadv_primary_if_update_addr(bat_priv, NULL);
+ break;
+ case NETDEV_CHANGEUPPER:
+ hard_iface->wifi_flags = batadv_wifi_flags_evaluate(net_dev);
+ if (batadv_is_wifi_hardif(hard_iface))
+ hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
+ break;
+ default:
+ break;
+ }
+
+hardif_put:
+ batadv_hardif_put(hard_iface);
+out:
+ batadv_hardif_put(primary_if);
+ return NOTIFY_DONE;
+}
+
+struct notifier_block batadv_hard_if_notifier = {
+ .notifier_call = batadv_hard_if_event,
+};
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
new file mode 100644
index 0000000000..64f660dbbe
--- /dev/null
+++ b/net/batman-adv/hard-interface.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_
+#define _NET_BATMAN_ADV_HARD_INTERFACE_H_
+
+#include "main.h"
+
+#include <linux/compiler.h>
+#include <linux/kref.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/rcupdate.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+/**
+ * enum batadv_hard_if_state - State of a hard interface
+ */
+enum batadv_hard_if_state {
+ /**
+ * @BATADV_IF_NOT_IN_USE: interface is not used as slave interface of a
+ * batman-adv soft interface
+ */
+ BATADV_IF_NOT_IN_USE,
+
+ /**
+ * @BATADV_IF_TO_BE_REMOVED: interface will be removed from soft
+ * interface
+ */
+ BATADV_IF_TO_BE_REMOVED,
+
+ /** @BATADV_IF_INACTIVE: interface is deactivated */
+ BATADV_IF_INACTIVE,
+
+ /** @BATADV_IF_ACTIVE: interface is used */
+ BATADV_IF_ACTIVE,
+
+ /** @BATADV_IF_TO_BE_ACTIVATED: interface is getting activated */
+ BATADV_IF_TO_BE_ACTIVATED,
+};
+
+/**
+ * enum batadv_hard_if_bcast - broadcast avoidance options
+ */
+enum batadv_hard_if_bcast {
+ /** @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface */
+ BATADV_HARDIF_BCAST_OK = 0,
+
+ /**
+ * @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no
+ * recipient
+ */
+ BATADV_HARDIF_BCAST_NORECIPIENT,
+
+ /**
+ * @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it
+ * from
+ */
+ BATADV_HARDIF_BCAST_DUPFWD,
+
+ /** @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator */
+ BATADV_HARDIF_BCAST_DUPORIG,
+};
+
+extern struct notifier_block batadv_hard_if_notifier;
+
+struct net_device *batadv_get_real_netdev(struct net_device *net_device);
+bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface);
+bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface);
+struct batadv_hard_iface*
+batadv_hardif_get_by_netdev(const struct net_device *net_dev);
+int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
+ struct net_device *soft_iface);
+void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface);
+int batadv_hardif_min_mtu(struct net_device *soft_iface);
+void batadv_update_min_mtu(struct net_device *soft_iface);
+void batadv_hardif_release(struct kref *ref);
+int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing,
+ u8 *orig_addr, u8 *orig_neigh);
+
+/**
+ * batadv_hardif_put() - decrement the hard interface refcounter and possibly
+ * release it
+ * @hard_iface: the hard interface to free
+ */
+static inline void batadv_hardif_put(struct batadv_hard_iface *hard_iface)
+{
+ if (!hard_iface)
+ return;
+
+ kref_put(&hard_iface->refcount, batadv_hardif_release);
+}
+
+/**
+ * batadv_primary_if_get_selected() - Get reference to primary interface
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: primary interface (with increased refcnt), otherwise NULL
+ */
+static inline struct batadv_hard_iface *
+batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
+{
+ struct batadv_hard_iface *hard_iface;
+
+ rcu_read_lock();
+ hard_iface = rcu_dereference(bat_priv->primary_if);
+ if (!hard_iface)
+ goto out;
+
+ if (!kref_get_unless_zero(&hard_iface->refcount))
+ hard_iface = NULL;
+
+out:
+ rcu_read_unlock();
+ return hard_iface;
+}
+
+#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
new file mode 100644
index 0000000000..8016e61978
--- /dev/null
+++ b/net/batman-adv/hash.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich, Marek Lindner
+ */
+
+#include "hash.h"
+#include "main.h"
+
+#include <linux/gfp.h>
+#include <linux/lockdep.h>
+#include <linux/slab.h>
+
+/* clears the hash */
+static void batadv_hash_init(struct batadv_hashtable *hash)
+{
+ u32 i;
+
+ for (i = 0; i < hash->size; i++) {
+ INIT_HLIST_HEAD(&hash->table[i]);
+ spin_lock_init(&hash->list_locks[i]);
+ }
+
+ atomic_set(&hash->generation, 0);
+}
+
+/**
+ * batadv_hash_destroy() - Free only the hashtable and the hash itself
+ * @hash: hash object to destroy
+ */
+void batadv_hash_destroy(struct batadv_hashtable *hash)
+{
+ kfree(hash->list_locks);
+ kfree(hash->table);
+ kfree(hash);
+}
+
+/**
+ * batadv_hash_new() - Allocates and clears the hashtable
+ * @size: number of hash buckets to allocate
+ *
+ * Return: newly allocated hashtable, NULL on errors
+ */
+struct batadv_hashtable *batadv_hash_new(u32 size)
+{
+ struct batadv_hashtable *hash;
+
+ hash = kmalloc(sizeof(*hash), GFP_ATOMIC);
+ if (!hash)
+ return NULL;
+
+ hash->table = kmalloc_array(size, sizeof(*hash->table), GFP_ATOMIC);
+ if (!hash->table)
+ goto free_hash;
+
+ hash->list_locks = kmalloc_array(size, sizeof(*hash->list_locks),
+ GFP_ATOMIC);
+ if (!hash->list_locks)
+ goto free_table;
+
+ hash->size = size;
+ batadv_hash_init(hash);
+ return hash;
+
+free_table:
+ kfree(hash->table);
+free_hash:
+ kfree(hash);
+ return NULL;
+}
+
+/**
+ * batadv_hash_set_lock_class() - Set specific lockdep class for hash spinlocks
+ * @hash: hash object to modify
+ * @key: lockdep class key address
+ */
+void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
+ struct lock_class_key *key)
+{
+ u32 i;
+
+ for (i = 0; i < hash->size; i++)
+ lockdep_set_class(&hash->list_locks[i], key);
+}
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
new file mode 100644
index 0000000000..fb251c385a
--- /dev/null
+++ b/net/batman-adv/hash.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich, Marek Lindner
+ */
+
+#ifndef _NET_BATMAN_ADV_HASH_H_
+#define _NET_BATMAN_ADV_HASH_H_
+
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/rculist.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+/* callback to a compare function. should compare 2 element data for their
+ * keys
+ *
+ * Return: true if same and false if not same
+ */
+typedef bool (*batadv_hashdata_compare_cb)(const struct hlist_node *,
+ const void *);
+
+/* the hashfunction
+ *
+ * Return: an index based on the key in the data of the first argument and the
+ * size the second
+ */
+typedef u32 (*batadv_hashdata_choose_cb)(const void *, u32);
+typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
+
+/**
+ * struct batadv_hashtable - Wrapper of simple hlist based hashtable
+ */
+struct batadv_hashtable {
+ /** @table: the hashtable itself with the buckets */
+ struct hlist_head *table;
+
+ /** @list_locks: spinlock for each hash list entry */
+ spinlock_t *list_locks;
+
+ /** @size: size of hashtable */
+ u32 size;
+
+ /** @generation: current (generation) sequence number */
+ atomic_t generation;
+};
+
+/* allocates and clears the hash */
+struct batadv_hashtable *batadv_hash_new(u32 size);
+
+/* set class key for all locks */
+void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
+ struct lock_class_key *key);
+
+/* free only the hashtable and the hash itself. */
+void batadv_hash_destroy(struct batadv_hashtable *hash);
+
+/**
+ * batadv_hash_add() - adds data to the hashtable
+ * @hash: storage hash table
+ * @compare: callback to determine if 2 hash elements are identical
+ * @choose: callback calculating the hash index
+ * @data: data passed to the aforementioned callbacks as argument
+ * @data_node: to be added element
+ *
+ * Return: 0 on success, 1 if the element already is in the hash
+ * and -1 on error.
+ */
+static inline int batadv_hash_add(struct batadv_hashtable *hash,
+ batadv_hashdata_compare_cb compare,
+ batadv_hashdata_choose_cb choose,
+ const void *data,
+ struct hlist_node *data_node)
+{
+ u32 index;
+ int ret = -1;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ spinlock_t *list_lock; /* spinlock to protect write access */
+
+ if (!hash)
+ goto out;
+
+ index = choose(data, hash->size);
+ head = &hash->table[index];
+ list_lock = &hash->list_locks[index];
+
+ spin_lock_bh(list_lock);
+
+ hlist_for_each(node, head) {
+ if (!compare(node, data))
+ continue;
+
+ ret = 1;
+ goto unlock;
+ }
+
+ /* no duplicate found in list, add new element */
+ hlist_add_head_rcu(data_node, head);
+ atomic_inc(&hash->generation);
+
+ ret = 0;
+
+unlock:
+ spin_unlock_bh(list_lock);
+out:
+ return ret;
+}
+
+/**
+ * batadv_hash_remove() - Removes data from hash, if found
+ * @hash: hash table
+ * @compare: callback to determine if 2 hash elements are identical
+ * @choose: callback calculating the hash index
+ * @data: data passed to the aforementioned callbacks as argument
+ *
+ * ata could be the structure you use with just the key filled, we just need
+ * the key for comparing.
+ *
+ * Return: returns pointer do data on success, so you can remove the used
+ * structure yourself, or NULL on error
+ */
+static inline void *batadv_hash_remove(struct batadv_hashtable *hash,
+ batadv_hashdata_compare_cb compare,
+ batadv_hashdata_choose_cb choose,
+ void *data)
+{
+ u32 index;
+ struct hlist_node *node;
+ struct hlist_head *head;
+ void *data_save = NULL;
+
+ index = choose(data, hash->size);
+ head = &hash->table[index];
+
+ spin_lock_bh(&hash->list_locks[index]);
+ hlist_for_each(node, head) {
+ if (!compare(node, data))
+ continue;
+
+ data_save = node;
+ hlist_del_rcu(node);
+ atomic_inc(&hash->generation);
+ break;
+ }
+ spin_unlock_bh(&hash->list_locks[index]);
+
+ return data_save;
+}
+
+#endif /* _NET_BATMAN_ADV_HASH_H_ */
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
new file mode 100644
index 0000000000..7a93a1e94c
--- /dev/null
+++ b/net/batman-adv/log.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ */
+
+#include "log.h"
+#include "main.h"
+
+#include <linux/stdarg.h>
+
+#include "trace.h"
+
+/**
+ * batadv_debug_log() - Add debug log entry
+ * @bat_priv: the bat priv with all the soft interface information
+ * @fmt: format string
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ trace_batadv_dbg(bat_priv, &vaf);
+
+ va_end(args);
+
+ return 0;
+}
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h
new file mode 100644
index 0000000000..6717c965f0
--- /dev/null
+++ b/net/batman-adv/log.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#ifndef _NET_BATMAN_ADV_LOG_H_
+#define _NET_BATMAN_ADV_LOG_H_
+
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/printk.h>
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+
+int batadv_debug_log_setup(struct batadv_priv *bat_priv);
+void batadv_debug_log_cleanup(struct batadv_priv *bat_priv);
+
+#else
+
+static inline int batadv_debug_log_setup(struct batadv_priv *bat_priv)
+{
+ return 0;
+}
+
+static inline void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
+{
+}
+
+#endif
+
+/**
+ * enum batadv_dbg_level - available log levels
+ */
+enum batadv_dbg_level {
+ /** @BATADV_DBG_BATMAN: OGM and TQ computations related messages */
+ BATADV_DBG_BATMAN = BIT(0),
+
+ /** @BATADV_DBG_ROUTES: route added / changed / deleted */
+ BATADV_DBG_ROUTES = BIT(1),
+
+ /** @BATADV_DBG_TT: translation table messages */
+ BATADV_DBG_TT = BIT(2),
+
+ /** @BATADV_DBG_BLA: bridge loop avoidance messages */
+ BATADV_DBG_BLA = BIT(3),
+
+ /** @BATADV_DBG_DAT: ARP snooping and DAT related messages */
+ BATADV_DBG_DAT = BIT(4),
+
+ /** @BATADV_DBG_NC: network coding related messages */
+ BATADV_DBG_NC = BIT(5),
+
+ /** @BATADV_DBG_MCAST: multicast related messages */
+ BATADV_DBG_MCAST = BIT(6),
+
+ /** @BATADV_DBG_TP_METER: throughput meter messages */
+ BATADV_DBG_TP_METER = BIT(7),
+
+ /** @BATADV_DBG_ALL: the union of all the above log levels */
+ BATADV_DBG_ALL = 255,
+};
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
+__printf(2, 3);
+
+/**
+ * _batadv_dbg() - Store debug output with(out) rate limiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ratelimited: whether output should be rate limited
+ * @fmt: format string
+ * @arg: variable arguments
+ */
+#define _batadv_dbg(type, bat_priv, ratelimited, fmt, arg...) \
+ do { \
+ struct batadv_priv *__batpriv = (bat_priv); \
+ if (atomic_read(&__batpriv->log_level) & (type) && \
+ (!(ratelimited) || net_ratelimit())) \
+ batadv_debug_log(__batpriv, fmt, ## arg); \
+ } \
+ while (0)
+#else /* !CONFIG_BATMAN_ADV_DEBUG */
+__printf(4, 5)
+static inline void _batadv_dbg(int type __always_unused,
+ struct batadv_priv *bat_priv __always_unused,
+ int ratelimited __always_unused,
+ const char *fmt __always_unused, ...)
+{
+}
+#endif
+
+/**
+ * batadv_dbg() - Store debug output without rate limiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @arg: format string and variable arguments
+ */
+#define batadv_dbg(type, bat_priv, arg...) \
+ _batadv_dbg(type, bat_priv, 0, ## arg)
+
+/**
+ * batadv_dbg_ratelimited() - Store debug output with rate limiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @arg: format string and variable arguments
+ */
+#define batadv_dbg_ratelimited(type, bat_priv, arg...) \
+ _batadv_dbg(type, bat_priv, 1, ## arg)
+
+/**
+ * batadv_info() - Store message in debug buffer and print it to kmsg buffer
+ * @net_dev: the soft interface net device
+ * @fmt: format string
+ * @arg: variable arguments
+ */
+#define batadv_info(net_dev, fmt, arg...) \
+ do { \
+ struct net_device *_netdev = (net_dev); \
+ struct batadv_priv *_batpriv = netdev_priv(_netdev); \
+ batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
+ pr_info("%s: " fmt, _netdev->name, ## arg); \
+ } while (0)
+
+/**
+ * batadv_err() - Store error in debug buffer and print it to kmsg buffer
+ * @net_dev: the soft interface net device
+ * @fmt: format string
+ * @arg: variable arguments
+ */
+#define batadv_err(net_dev, fmt, arg...) \
+ do { \
+ struct net_device *_netdev = (net_dev); \
+ struct batadv_priv *_batpriv = netdev_priv(_netdev); \
+ batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
+ pr_err("%s: " fmt, _netdev->name, ## arg); \
+ } while (0)
+
+#endif /* _NET_BATMAN_ADV_LOG_H_ */
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
new file mode 100644
index 0000000000..e8a4499155
--- /dev/null
+++ b/net/batman-adv/main.c
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/build_bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/container_of.h>
+#include <linux/crc32c.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/genetlink.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <net/dsfield.h>
+#include <net/rtnetlink.h>
+#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "bat_algo.h"
+#include "bat_iv_ogm.h"
+#include "bat_v.h"
+#include "bridge_loop_avoidance.h"
+#include "distributed-arp-table.h"
+#include "gateway_client.h"
+#include "gateway_common.h"
+#include "hard-interface.h"
+#include "log.h"
+#include "multicast.h"
+#include "netlink.h"
+#include "network-coding.h"
+#include "originator.h"
+#include "routing.h"
+#include "send.h"
+#include "soft-interface.h"
+#include "tp_meter.h"
+#include "translation-table.h"
+
+/* List manipulations on hardif_list have to be rtnl_lock()'ed,
+ * list traversals just rcu-locked
+ */
+struct list_head batadv_hardif_list;
+unsigned int batadv_hardif_generation;
+static int (*batadv_rx_handler[256])(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+
+unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+struct workqueue_struct *batadv_event_workqueue;
+
+static void batadv_recv_handler_init(void);
+
+#define BATADV_UEV_TYPE_VAR "BATTYPE="
+#define BATADV_UEV_ACTION_VAR "BATACTION="
+#define BATADV_UEV_DATA_VAR "BATDATA="
+
+static char *batadv_uev_action_str[] = {
+ "add",
+ "del",
+ "change",
+ "loopdetect",
+};
+
+static char *batadv_uev_type_str[] = {
+ "gw",
+ "bla",
+};
+
+static int __init batadv_init(void)
+{
+ int ret;
+
+ ret = batadv_tt_cache_init();
+ if (ret < 0)
+ return ret;
+
+ INIT_LIST_HEAD(&batadv_hardif_list);
+ batadv_algo_init();
+
+ batadv_recv_handler_init();
+
+ batadv_v_init();
+ batadv_iv_init();
+ batadv_nc_init();
+ batadv_tp_meter_init();
+
+ batadv_event_workqueue = create_singlethread_workqueue("bat_events");
+ if (!batadv_event_workqueue)
+ goto err_create_wq;
+
+ register_netdevice_notifier(&batadv_hard_if_notifier);
+ rtnl_link_register(&batadv_link_ops);
+ batadv_netlink_register();
+
+ pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
+ BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
+
+ return 0;
+
+err_create_wq:
+ batadv_tt_cache_destroy();
+
+ return -ENOMEM;
+}
+
+static void __exit batadv_exit(void)
+{
+ batadv_netlink_unregister();
+ rtnl_link_unregister(&batadv_link_ops);
+ unregister_netdevice_notifier(&batadv_hard_if_notifier);
+
+ destroy_workqueue(batadv_event_workqueue);
+ batadv_event_workqueue = NULL;
+
+ rcu_barrier();
+
+ batadv_tt_cache_destroy();
+}
+
+/**
+ * batadv_mesh_init() - Initialize soft interface
+ * @soft_iface: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int batadv_mesh_init(struct net_device *soft_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ int ret;
+
+ spin_lock_init(&bat_priv->forw_bat_list_lock);
+ spin_lock_init(&bat_priv->forw_bcast_list_lock);
+ spin_lock_init(&bat_priv->tt.changes_list_lock);
+ spin_lock_init(&bat_priv->tt.req_list_lock);
+ spin_lock_init(&bat_priv->tt.roam_list_lock);
+ spin_lock_init(&bat_priv->tt.last_changeset_lock);
+ spin_lock_init(&bat_priv->tt.commit_lock);
+ spin_lock_init(&bat_priv->gw.list_lock);
+#ifdef CONFIG_BATMAN_ADV_MCAST
+ spin_lock_init(&bat_priv->mcast.mla_lock);
+ spin_lock_init(&bat_priv->mcast.want_lists_lock);
+#endif
+ spin_lock_init(&bat_priv->tvlv.container_list_lock);
+ spin_lock_init(&bat_priv->tvlv.handler_list_lock);
+ spin_lock_init(&bat_priv->softif_vlan_list_lock);
+ spin_lock_init(&bat_priv->tp_list_lock);
+
+ INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
+ INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
+ INIT_HLIST_HEAD(&bat_priv->gw.gateway_list);
+#ifdef CONFIG_BATMAN_ADV_MCAST
+ INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
+ INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
+ INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
+#endif
+ INIT_LIST_HEAD(&bat_priv->tt.changes_list);
+ INIT_HLIST_HEAD(&bat_priv->tt.req_list);
+ INIT_LIST_HEAD(&bat_priv->tt.roam_list);
+#ifdef CONFIG_BATMAN_ADV_MCAST
+ INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
+#endif
+ INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
+ INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
+ INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
+ INIT_HLIST_HEAD(&bat_priv->tp_list);
+
+ bat_priv->gw.generation = 0;
+
+ ret = batadv_originator_init(bat_priv);
+ if (ret < 0) {
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+ goto err_orig;
+ }
+
+ ret = batadv_tt_init(bat_priv);
+ if (ret < 0) {
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+ goto err_tt;
+ }
+
+ ret = batadv_v_mesh_init(bat_priv);
+ if (ret < 0) {
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+ goto err_v;
+ }
+
+ ret = batadv_bla_init(bat_priv);
+ if (ret < 0) {
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+ goto err_bla;
+ }
+
+ ret = batadv_dat_init(bat_priv);
+ if (ret < 0) {
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+ goto err_dat;
+ }
+
+ ret = batadv_nc_mesh_init(bat_priv);
+ if (ret < 0) {
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+ goto err_nc;
+ }
+
+ batadv_gw_init(bat_priv);
+ batadv_mcast_init(bat_priv);
+
+ atomic_set(&bat_priv->gw.reselect, 0);
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
+
+ return 0;
+
+err_nc:
+ batadv_dat_free(bat_priv);
+err_dat:
+ batadv_bla_free(bat_priv);
+err_bla:
+ batadv_v_mesh_free(bat_priv);
+err_v:
+ batadv_tt_free(bat_priv);
+err_tt:
+ batadv_originator_free(bat_priv);
+err_orig:
+ batadv_purge_outstanding_packets(bat_priv, NULL);
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
+
+ return ret;
+}
+
+/**
+ * batadv_mesh_free() - Deinitialize soft interface
+ * @soft_iface: netdev struct of the soft interface
+ */
+void batadv_mesh_free(struct net_device *soft_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+
+ batadv_purge_outstanding_packets(bat_priv, NULL);
+
+ batadv_gw_node_free(bat_priv);
+
+ batadv_v_mesh_free(bat_priv);
+ batadv_nc_mesh_free(bat_priv);
+ batadv_dat_free(bat_priv);
+ batadv_bla_free(bat_priv);
+
+ batadv_mcast_free(bat_priv);
+
+ /* Free the TT and the originator tables only after having terminated
+ * all the other depending components which may use these structures for
+ * their purposes.
+ */
+ batadv_tt_free(bat_priv);
+
+ /* Since the originator table clean up routine is accessing the TT
+ * tables as well, it has to be invoked after the TT tables have been
+ * freed and marked as empty. This ensures that no cleanup RCU callbacks
+ * accessing the TT data are scheduled for later execution.
+ */
+ batadv_originator_free(bat_priv);
+
+ batadv_gw_free(bat_priv);
+
+ free_percpu(bat_priv->bat_counters);
+ bat_priv->bat_counters = NULL;
+
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
+}
+
+/**
+ * batadv_is_my_mac() - check if the given mac address belongs to any of the
+ * real interfaces in the current mesh
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the address to check
+ *
+ * Return: 'true' if the mac address was found, false otherwise.
+ */
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
+{
+ const struct batadv_hard_iface *hard_iface;
+ bool is_my_mac = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
+ continue;
+
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
+ if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
+ is_my_mac = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return is_my_mac;
+}
+
+/**
+ * batadv_max_header_len() - calculate maximum encapsulation overhead for a
+ * payload packet
+ *
+ * Return: the maximum encapsulation overhead in bytes.
+ */
+int batadv_max_header_len(void)
+{
+ int header_len = 0;
+
+ header_len = max_t(int, header_len,
+ sizeof(struct batadv_unicast_packet));
+ header_len = max_t(int, header_len,
+ sizeof(struct batadv_unicast_4addr_packet));
+ header_len = max_t(int, header_len,
+ sizeof(struct batadv_bcast_packet));
+
+#ifdef CONFIG_BATMAN_ADV_NC
+ header_len = max_t(int, header_len,
+ sizeof(struct batadv_coded_packet));
+#endif
+
+ return header_len + ETH_HLEN;
+}
+
+/**
+ * batadv_skb_set_priority() - sets skb priority according to packet content
+ * @skb: the packet to be sent
+ * @offset: offset to the packet content
+ *
+ * This function sets a value between 256 and 263 (802.1d priority), which
+ * can be interpreted by the cfg80211 or other drivers.
+ */
+void batadv_skb_set_priority(struct sk_buff *skb, int offset)
+{
+ struct iphdr ip_hdr_tmp, *ip_hdr;
+ struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
+ struct ethhdr ethhdr_tmp, *ethhdr;
+ struct vlan_ethhdr *vhdr, vhdr_tmp;
+ u32 prio;
+
+ /* already set, do nothing */
+ if (skb->priority >= 256 && skb->priority <= 263)
+ return;
+
+ ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
+ if (!ethhdr)
+ return;
+
+ switch (ethhdr->h_proto) {
+ case htons(ETH_P_8021Q):
+ vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
+ sizeof(*vhdr), &vhdr_tmp);
+ if (!vhdr)
+ return;
+ prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
+ prio = prio >> VLAN_PRIO_SHIFT;
+ break;
+ case htons(ETH_P_IP):
+ ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
+ sizeof(*ip_hdr), &ip_hdr_tmp);
+ if (!ip_hdr)
+ return;
+ prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
+ break;
+ case htons(ETH_P_IPV6):
+ ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
+ sizeof(*ip6_hdr), &ip6_hdr_tmp);
+ if (!ip6_hdr)
+ return;
+ prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
+ break;
+ default:
+ return;
+ }
+
+ skb->priority = prio + 256;
+}
+
+static int batadv_recv_unhandled_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
+{
+ kfree_skb(skb);
+
+ return NET_RX_DROP;
+}
+
+/* incoming packets with the batman ethertype received on any active hard
+ * interface
+ */
+
+/**
+ * batadv_batman_skb_recv() - Handle incoming message from an hard interface
+ * @skb: the received packet
+ * @dev: the net device that the packet was received on
+ * @ptype: packet type of incoming packet (ETH_P_BATMAN)
+ * @orig_dev: the original receive net device (e.g. bonded device)
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
+int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
+{
+ struct batadv_priv *bat_priv;
+ struct batadv_ogm_packet *batadv_ogm_packet;
+ struct batadv_hard_iface *hard_iface;
+ u8 idx;
+
+ hard_iface = container_of(ptype, struct batadv_hard_iface,
+ batman_adv_ptype);
+
+ /* Prevent processing a packet received on an interface which is getting
+ * shut down otherwise the packet may trigger de-reference errors
+ * further down in the receive path.
+ */
+ if (!kref_get_unless_zero(&hard_iface->refcount))
+ goto err_out;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+
+ /* skb was released by skb_share_check() */
+ if (!skb)
+ goto err_put;
+
+ /* packet should hold at least type and version */
+ if (unlikely(!pskb_may_pull(skb, 2)))
+ goto err_free;
+
+ /* expect a valid ethernet header here. */
+ if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
+ goto err_free;
+
+ if (!hard_iface->soft_iface)
+ goto err_free;
+
+ bat_priv = netdev_priv(hard_iface->soft_iface);
+
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
+ goto err_free;
+
+ /* discard frames on not active interfaces */
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
+ goto err_free;
+
+ batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
+
+ if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: incompatible batman version (%i)\n",
+ batadv_ogm_packet->version);
+ goto err_free;
+ }
+
+ /* reset control block to avoid left overs from previous users */
+ memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
+
+ idx = batadv_ogm_packet->packet_type;
+ (*batadv_rx_handler[idx])(skb, hard_iface);
+
+ batadv_hardif_put(hard_iface);
+
+ /* return NET_RX_SUCCESS in any case as we
+ * most probably dropped the packet for
+ * routing-logical reasons.
+ */
+ return NET_RX_SUCCESS;
+
+err_free:
+ kfree_skb(skb);
+err_put:
+ batadv_hardif_put(hard_iface);
+err_out:
+ return NET_RX_DROP;
+}
+
+static void batadv_recv_handler_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
+ batadv_rx_handler[i] = batadv_recv_unhandled_packet;
+
+ for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
+ batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
+
+ /* compile time checks for sizes */
+ BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
+ BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
+ BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
+ BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
+ BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
+ BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
+ BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
+ BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
+ BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
+ BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
+ BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
+ BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
+ BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
+ BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
+ BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
+ BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
+
+ i = sizeof_field(struct sk_buff, cb);
+ BUILD_BUG_ON(sizeof(struct batadv_skb_cb) > i);
+
+ /* broadcast packet */
+ batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
+
+ /* unicast packets ... */
+ /* unicast with 4 addresses packet */
+ batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
+ /* unicast packet */
+ batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
+ /* unicast tvlv packet */
+ batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
+ /* batman icmp packet */
+ batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
+ /* Fragmented packets */
+ batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
+}
+
+/**
+ * batadv_recv_handler_register() - Register handler for batman-adv packet type
+ * @packet_type: batadv_packettype which should be handled
+ * @recv_handler: receive handler for the packet type
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int
+batadv_recv_handler_register(u8 packet_type,
+ int (*recv_handler)(struct sk_buff *,
+ struct batadv_hard_iface *))
+{
+ int (*curr)(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+ curr = batadv_rx_handler[packet_type];
+
+ if (curr != batadv_recv_unhandled_packet &&
+ curr != batadv_recv_unhandled_unicast_packet)
+ return -EBUSY;
+
+ batadv_rx_handler[packet_type] = recv_handler;
+ return 0;
+}
+
+/**
+ * batadv_recv_handler_unregister() - Unregister handler for packet type
+ * @packet_type: batadv_packettype which should no longer be handled
+ */
+void batadv_recv_handler_unregister(u8 packet_type)
+{
+ batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
+}
+
+/**
+ * batadv_skb_crc32() - calculate CRC32 of the whole packet and skip bytes in
+ * the header
+ * @skb: skb pointing to fragmented socket buffers
+ * @payload_ptr: Pointer to position inside the head buffer of the skb
+ * marking the start of the data to be CRC'ed
+ *
+ * payload_ptr must always point to an address in the skb head buffer and not to
+ * a fragment.
+ *
+ * Return: big endian crc32c of the checksummed data
+ */
+__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
+{
+ u32 crc = 0;
+ unsigned int from;
+ unsigned int to = skb->len;
+ struct skb_seq_state st;
+ const u8 *data;
+ unsigned int len;
+ unsigned int consumed = 0;
+
+ from = (unsigned int)(payload_ptr - skb->data);
+
+ skb_prepare_seq_read(skb, from, to, &st);
+ while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
+ crc = crc32c(crc, data, len);
+ consumed += len;
+ }
+
+ return htonl(crc);
+}
+
+/**
+ * batadv_get_vid() - extract the VLAN identifier from skb if any
+ * @skb: the buffer containing the packet
+ * @header_len: length of the batman header preceding the ethernet header
+ *
+ * Return: VID with the BATADV_VLAN_HAS_TAG flag when the packet embedded in the
+ * skb is vlan tagged. Otherwise BATADV_NO_FLAGS.
+ */
+unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
+{
+ struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
+ struct vlan_ethhdr *vhdr;
+ unsigned short vid;
+
+ if (ethhdr->h_proto != htons(ETH_P_8021Q))
+ return BATADV_NO_FLAGS;
+
+ if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
+ return BATADV_NO_FLAGS;
+
+ vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
+ vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+ vid |= BATADV_VLAN_HAS_TAG;
+
+ return vid;
+}
+
+/**
+ * batadv_vlan_ap_isola_get() - return AP isolation status for the given vlan
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier for which the AP isolation attributed as to be
+ * looked up
+ *
+ * Return: true if AP isolation is on for the VLAN identified by vid, false
+ * otherwise
+ */
+bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
+{
+ bool ap_isolation_enabled = false;
+ struct batadv_softif_vlan *vlan;
+
+ /* if the AP isolation is requested on a VLAN, then check for its
+ * setting in the proper VLAN private data structure
+ */
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (vlan) {
+ ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
+ batadv_softif_vlan_put(vlan);
+ }
+
+ return ap_isolation_enabled;
+}
+
+/**
+ * batadv_throw_uevent() - Send an uevent with batman-adv specific env data
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: subsystem type of event. Stored in uevent's BATTYPE
+ * @action: action type of event. Stored in uevent's BATACTION
+ * @data: string with additional information to the event (ignored for
+ * BATADV_UEV_DEL). Stored in uevent's BATDATA
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
+ enum batadv_uev_action action, const char *data)
+{
+ int ret = -ENOMEM;
+ struct kobject *bat_kobj;
+ char *uevent_env[4] = { NULL, NULL, NULL, NULL };
+
+ bat_kobj = &bat_priv->soft_iface->dev.kobj;
+
+ uevent_env[0] = kasprintf(GFP_ATOMIC,
+ "%s%s", BATADV_UEV_TYPE_VAR,
+ batadv_uev_type_str[type]);
+ if (!uevent_env[0])
+ goto out;
+
+ uevent_env[1] = kasprintf(GFP_ATOMIC,
+ "%s%s", BATADV_UEV_ACTION_VAR,
+ batadv_uev_action_str[action]);
+ if (!uevent_env[1])
+ goto out;
+
+ /* If the event is DEL, ignore the data field */
+ if (action != BATADV_UEV_DEL) {
+ uevent_env[2] = kasprintf(GFP_ATOMIC,
+ "%s%s", BATADV_UEV_DATA_VAR, data);
+ if (!uevent_env[2])
+ goto out;
+ }
+
+ ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
+out:
+ kfree(uevent_env[0]);
+ kfree(uevent_env[1]);
+ kfree(uevent_env[2]);
+
+ if (ret)
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
+ batadv_uev_type_str[type],
+ batadv_uev_action_str[action],
+ (action == BATADV_UEV_DEL ? "NULL" : data), ret);
+ return ret;
+}
+
+module_init(batadv_init);
+module_exit(batadv_exit);
+
+MODULE_LICENSE("GPL");
+
+MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
+MODULE_VERSION(BATADV_SOURCE_VERSION);
+MODULE_ALIAS_RTNL_LINK("batadv");
+MODULE_ALIAS_GENL_FAMILY(BATADV_NL_NAME);
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
new file mode 100644
index 0000000000..10007c5894
--- /dev/null
+++ b/net/batman-adv/main.h
@@ -0,0 +1,384 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#ifndef _NET_BATMAN_ADV_MAIN_H_
+#define _NET_BATMAN_ADV_MAIN_H_
+
+#define BATADV_DRIVER_AUTHOR "Marek Lindner <mareklindner@neomailbox.ch>, " \
+ "Simon Wunderlich <sw@simonwunderlich.de>"
+#define BATADV_DRIVER_DESC "B.A.T.M.A.N. advanced"
+#define BATADV_DRIVER_DEVICE "batman-adv"
+
+#ifndef BATADV_SOURCE_VERSION
+#define BATADV_SOURCE_VERSION "2023.3"
+#endif
+
+/* B.A.T.M.A.N. parameters */
+
+#define BATADV_TQ_MAX_VALUE 255
+#define BATADV_THROUGHPUT_MAX_VALUE 0xFFFFFFFF
+#define BATADV_JITTER 20
+
+/* Time To Live of broadcast messages */
+#define BATADV_TTL 50
+
+/* maximum sequence number age of broadcast messages */
+#define BATADV_BCAST_MAX_AGE 64
+
+/* purge originators after time in seconds if no valid packet comes in
+ * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
+ */
+#define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
+#define BATADV_TT_LOCAL_TIMEOUT 600000 /* in milliseconds */
+#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
+#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
+#define BATADV_TT_WORK_PERIOD 5000 /* 5 seconds */
+#define BATADV_ORIG_WORK_PERIOD 1000 /* 1 second */
+#define BATADV_MCAST_WORK_PERIOD 500 /* 0.5 seconds */
+#define BATADV_DAT_ENTRY_TIMEOUT (5 * 60000) /* 5 mins in milliseconds */
+/* sliding packet range of received originator messages in sequence numbers
+ * (should be a multiple of our word size)
+ */
+#define BATADV_TQ_LOCAL_WINDOW_SIZE 64
+/* milliseconds we have to keep pending tt_req */
+#define BATADV_TT_REQUEST_TIMEOUT 3000
+
+#define BATADV_TQ_GLOBAL_WINDOW_SIZE 5
+#define BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
+#define BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
+#define BATADV_TQ_TOTAL_BIDRECT_LIMIT 1
+
+/* B.A.T.M.A.N. V */
+#define BATADV_THROUGHPUT_DEFAULT_VALUE 10 /* 1 Mbps */
+#define BATADV_ELP_PROBES_PER_NODE 2
+#define BATADV_ELP_MIN_PROBE_SIZE 200 /* bytes */
+#define BATADV_ELP_PROBE_MAX_TX_DIFF 100 /* milliseconds */
+#define BATADV_ELP_MAX_AGE 64
+#define BATADV_OGM_MAX_ORIGDIFF 5
+#define BATADV_OGM_MAX_AGE 64
+
+/* number of OGMs sent with the last tt diff */
+#define BATADV_TT_OGM_APPEND_MAX 3
+
+/* Time in which a client can roam at most ROAMING_MAX_COUNT times in
+ * milliseconds
+ */
+#define BATADV_ROAMING_MAX_TIME 20000
+#define BATADV_ROAMING_MAX_COUNT 5
+
+#define BATADV_NO_FLAGS 0
+
+#define BATADV_NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
+
+#define BATADV_NO_MARK 0
+
+/* default interface for multi interface operation. The default interface is
+ * used for communication which originated locally (i.e. is not forwarded)
+ * or where special forwarding is not desired/necessary.
+ */
+#define BATADV_IF_DEFAULT ((struct batadv_hard_iface *)NULL)
+
+#define BATADV_NUM_WORDS BITS_TO_LONGS(BATADV_TQ_LOCAL_WINDOW_SIZE)
+
+#define BATADV_LOG_BUF_LEN 8192 /* has to be a power of 2 */
+
+/* number of packets to send for broadcasts on different interface types */
+#define BATADV_NUM_BCASTS_DEFAULT 1
+#define BATADV_NUM_BCASTS_WIRELESS 3
+
+/* length of the single packet used by the TP meter */
+#define BATADV_TP_PACKET_LEN ETH_DATA_LEN
+
+/* msecs after which an ARP_REQUEST is sent in broadcast as fallback */
+#define ARP_REQ_DELAY 250
+/* numbers of originator to contact for any PUT/GET DHT operation */
+#define BATADV_DAT_CANDIDATES_NUM 3
+
+/* BATADV_TQ_SIMILARITY_THRESHOLD - TQ points that a secondary metric can differ
+ * at most from the primary one in order to be still considered acceptable
+ */
+#define BATADV_TQ_SIMILARITY_THRESHOLD 50
+
+/* should not be bigger than 512 bytes or change the size of
+ * forw_packet->direct_link_flags
+ */
+#define BATADV_MAX_AGGREGATION_BYTES 512
+#define BATADV_MAX_AGGREGATION_MS 100
+
+#define BATADV_BLA_PERIOD_LENGTH 10000 /* 10 seconds */
+#define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 6)
+#define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10)
+#define BATADV_BLA_WAIT_PERIODS 3
+#define BATADV_BLA_LOOPDETECT_PERIODS 6
+#define BATADV_BLA_LOOPDETECT_TIMEOUT 3000 /* 3 seconds */
+
+#define BATADV_DUPLIST_SIZE 16
+#define BATADV_DUPLIST_TIMEOUT 500 /* 500 ms */
+/* don't reset again within 30 seconds */
+#define BATADV_RESET_PROTECTION_MS 30000
+#define BATADV_EXPECTED_SEQNO_RANGE 65536
+
+#define BATADV_NC_NODE_TIMEOUT 10000 /* Milliseconds */
+
+/**
+ * BATADV_TP_MAX_NUM - maximum number of simultaneously active tp sessions
+ */
+#define BATADV_TP_MAX_NUM 5
+
+/**
+ * enum batadv_mesh_state - State of a soft interface
+ */
+enum batadv_mesh_state {
+ /** @BATADV_MESH_INACTIVE: soft interface is not yet running */
+ BATADV_MESH_INACTIVE,
+
+ /** @BATADV_MESH_ACTIVE: interface is up and running */
+ BATADV_MESH_ACTIVE,
+
+ /** @BATADV_MESH_DEACTIVATING: interface is getting shut down */
+ BATADV_MESH_DEACTIVATING,
+};
+
+#define BATADV_BCAST_QUEUE_LEN 256
+#define BATADV_BATMAN_QUEUE_LEN 256
+
+/**
+ * enum batadv_uev_action - action type of uevent
+ */
+enum batadv_uev_action {
+ /** @BATADV_UEV_ADD: gateway was selected (after none was selected) */
+ BATADV_UEV_ADD = 0,
+
+ /**
+ * @BATADV_UEV_DEL: selected gateway was removed and none is selected
+ * anymore
+ */
+ BATADV_UEV_DEL,
+
+ /**
+ * @BATADV_UEV_CHANGE: a different gateway was selected as based gateway
+ */
+ BATADV_UEV_CHANGE,
+
+ /**
+ * @BATADV_UEV_LOOPDETECT: loop was detected which cannot be handled by
+ * bridge loop avoidance
+ */
+ BATADV_UEV_LOOPDETECT,
+};
+
+/**
+ * enum batadv_uev_type - Type of uevent
+ */
+enum batadv_uev_type {
+ /** @BATADV_UEV_GW: selected gateway was modified */
+ BATADV_UEV_GW = 0,
+
+ /** @BATADV_UEV_BLA: bridge loop avoidance event */
+ BATADV_UEV_BLA,
+};
+
+#define BATADV_GW_THRESHOLD 50
+
+/* Number of fragment chains for each orig_node */
+#define BATADV_FRAG_BUFFER_COUNT 8
+/* Maximum number of fragments for one packet */
+#define BATADV_FRAG_MAX_FRAGMENTS 16
+/* Maxumim size of each fragment */
+#define BATADV_FRAG_MAX_FRAG_SIZE 1280
+/* Time to keep fragments while waiting for rest of the fragments */
+#define BATADV_FRAG_TIMEOUT 10000
+
+#define BATADV_DAT_CANDIDATE_NOT_FOUND 0
+#define BATADV_DAT_CANDIDATE_ORIG 1
+
+/* Debug Messages */
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+/* Append 'batman-adv: ' before kernel messages */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+/* Kernel headers */
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/jiffies.h>
+#include <linux/netdevice.h>
+#include <linux/percpu.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
+
+#include "types.h"
+#include "main.h"
+
+/**
+ * batadv_print_vid() - return printable version of vid information
+ * @vid: the VLAN identifier
+ *
+ * Return: -1 when no VLAN is used, VLAN id otherwise
+ */
+static inline int batadv_print_vid(unsigned short vid)
+{
+ if (vid & BATADV_VLAN_HAS_TAG)
+ return (int)(vid & VLAN_VID_MASK);
+ else
+ return -1;
+}
+
+extern struct list_head batadv_hardif_list;
+extern unsigned int batadv_hardif_generation;
+
+extern unsigned char batadv_broadcast_addr[];
+extern struct workqueue_struct *batadv_event_workqueue;
+
+int batadv_mesh_init(struct net_device *soft_iface);
+void batadv_mesh_free(struct net_device *soft_iface);
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr);
+int batadv_max_header_len(void);
+void batadv_skb_set_priority(struct sk_buff *skb, int offset);
+int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev);
+int
+batadv_recv_handler_register(u8 packet_type,
+ int (*recv_handler)(struct sk_buff *,
+ struct batadv_hard_iface *));
+void batadv_recv_handler_unregister(u8 packet_type);
+__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr);
+
+/**
+ * batadv_compare_eth() - Compare two not u16 aligned Ethernet addresses
+ * @data1: Pointer to a six-byte array containing the Ethernet address
+ * @data2: Pointer other six-byte array containing the Ethernet address
+ *
+ * note: can't use ether_addr_equal() as it requires aligned memory
+ *
+ * Return: true if they are the same ethernet addr
+ */
+static inline bool batadv_compare_eth(const void *data1, const void *data2)
+{
+ return ether_addr_equal_unaligned(data1, data2);
+}
+
+/**
+ * batadv_has_timed_out() - compares current time (jiffies) and timestamp +
+ * timeout
+ * @timestamp: base value to compare with (in jiffies)
+ * @timeout: added to base value before comparing (in milliseconds)
+ *
+ * Return: true if current time is after timestamp + timeout
+ */
+static inline bool batadv_has_timed_out(unsigned long timestamp,
+ unsigned int timeout)
+{
+ return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
+}
+
+/**
+ * batadv_atomic_dec_not_zero() - Decrease unless the number is 0
+ * @v: pointer of type atomic_t
+ *
+ * Return: non-zero if v was not 0, and zero otherwise.
+ */
+#define batadv_atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
+
+/**
+ * batadv_smallest_signed_int() - Returns the smallest signed integer in two's
+ * complement with the sizeof x
+ * @x: type of integer
+ *
+ * Return: smallest signed integer of type
+ */
+#define batadv_smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
+
+/**
+ * batadv_seq_before() - Checks if a sequence number x is a predecessor of y
+ * @x: potential predecessor of @y
+ * @y: value to compare @x against
+ *
+ * It handles overflows/underflows and can correctly check for a predecessor
+ * unless the variable sequence number has grown by more than
+ * 2**(bitwidth(x)-1)-1.
+ *
+ * This means that for a u8 with the maximum value 255, it would think:
+ *
+ * * when adding nothing - it is neither a predecessor nor a successor
+ * * before adding more than 127 to the starting value - it is a predecessor,
+ * * when adding 128 - it is neither a predecessor nor a successor,
+ * * after adding more than 127 to the starting value - it is a successor
+ *
+ * Return: true when x is a predecessor of y, false otherwise
+ */
+#define batadv_seq_before(x, y) ({ \
+ typeof(x)_d1 = (x); \
+ typeof(y)_d2 = (y); \
+ typeof(x)_dummy = (_d1 - _d2); \
+ (void)(&_d1 == &_d2); \
+ _dummy > batadv_smallest_signed_int(_dummy); \
+})
+
+/**
+ * batadv_seq_after() - Checks if a sequence number x is a successor of y
+ * @x: potential successor of @y
+ * @y: value to compare @x against
+ *
+ * It handles overflows/underflows and can correctly check for a successor
+ * unless the variable sequence number has grown by more than
+ * 2**(bitwidth(x)-1)-1.
+ *
+ * This means that for a u8 with the maximum value 255, it would think:
+ *
+ * * when adding nothing - it is neither a predecessor nor a successor
+ * * before adding more than 127 to the starting value - it is a predecessor,
+ * * when adding 128 - it is neither a predecessor nor a successor,
+ * * after adding more than 127 to the starting value - it is a successor
+ *
+ * Return: true when x is a successor of y, false otherwise
+ */
+#define batadv_seq_after(x, y) batadv_seq_before(y, x)
+
+/**
+ * batadv_add_counter() - Add to per cpu statistics counter of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @idx: counter index which should be modified
+ * @count: value to increase counter by
+ *
+ * Stop preemption on local cpu while incrementing the counter
+ */
+static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
+ size_t count)
+{
+ this_cpu_add(bat_priv->bat_counters[idx], count);
+}
+
+/**
+ * batadv_inc_counter() - Increase per cpu statistics counter of soft interface
+ * @b: the bat priv with all the soft interface information
+ * @i: counter index which should be modified
+ */
+#define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
+
+/**
+ * BATADV_SKB_CB() - Get batadv_skb_cb from skb control buffer
+ * @__skb: skb holding the control buffer
+ *
+ * The members of the control buffer are defined in struct batadv_skb_cb in
+ * types.h. The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h.
+ *
+ * Return: pointer to the batadv_skb_cb of the skb
+ */
+#define BATADV_SKB_CB(__skb) ((struct batadv_skb_cb *)&((__skb)->cb[0]))
+
+unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len);
+bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid);
+int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
+ enum batadv_uev_action action, const char *data);
+
+#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
new file mode 100644
index 0000000000..315394f12c
--- /dev/null
+++ b/net/batman-adv/multicast.c
@@ -0,0 +1,2096 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Linus Lüssing
+ */
+
+#include "multicast.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/container_of.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/icmpv6.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/igmp.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <net/addrconf.h>
+#include <net/genetlink.h>
+#include <net/if_inet6.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "bridge_loop_avoidance.h"
+#include "hard-interface.h"
+#include "hash.h"
+#include "log.h"
+#include "netlink.h"
+#include "send.h"
+#include "soft-interface.h"
+#include "translation-table.h"
+#include "tvlv.h"
+
+static void batadv_mcast_mla_update(struct work_struct *work);
+
+/**
+ * batadv_mcast_start_timer() - schedule the multicast periodic worker
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
+{
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work,
+ msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD));
+}
+
+/**
+ * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
+ * @soft_iface: netdev struct of the mesh interface
+ *
+ * If the given soft interface has a bridge on top then the refcount
+ * of the according net device is increased.
+ *
+ * Return: NULL if no such bridge exists. Otherwise the net device of the
+ * bridge.
+ */
+static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
+{
+ struct net_device *upper = soft_iface;
+
+ rcu_read_lock();
+ do {
+ upper = netdev_master_upper_dev_get_rcu(upper);
+ } while (upper && !netif_is_bridge_master(upper));
+
+ dev_hold(upper);
+ rcu_read_unlock();
+
+ return upper;
+}
+
+/**
+ * batadv_mcast_mla_rtr_flags_softif_get_ipv4() - get mcast router flags from
+ * node for IPv4
+ * @dev: the interface to check
+ *
+ * Checks the presence of an IPv4 multicast router on this node.
+ *
+ * Caller needs to hold rcu read lock.
+ *
+ * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR4 otherwise.
+ */
+static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev)
+{
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
+
+ if (in_dev && IN_DEV_MFORWARD(in_dev))
+ return BATADV_NO_FLAGS;
+ else
+ return BATADV_MCAST_WANT_NO_RTR4;
+}
+
+/**
+ * batadv_mcast_mla_rtr_flags_softif_get_ipv6() - get mcast router flags from
+ * node for IPv6
+ * @dev: the interface to check
+ *
+ * Checks the presence of an IPv6 multicast router on this node.
+ *
+ * Caller needs to hold rcu read lock.
+ *
+ * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR6 otherwise.
+ */
+#if IS_ENABLED(CONFIG_IPV6_MROUTE)
+static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
+{
+ struct inet6_dev *in6_dev = __in6_dev_get(dev);
+
+ if (in6_dev && atomic_read(&in6_dev->cnf.mc_forwarding))
+ return BATADV_NO_FLAGS;
+ else
+ return BATADV_MCAST_WANT_NO_RTR6;
+}
+#else
+static inline u8
+batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
+{
+ return BATADV_MCAST_WANT_NO_RTR6;
+}
+#endif
+
+/**
+ * batadv_mcast_mla_rtr_flags_softif_get() - get mcast router flags from node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @bridge: bridge interface on top of the soft_iface if present,
+ * otherwise pass NULL
+ *
+ * Checks the presence of IPv4 and IPv6 multicast routers on this
+ * node.
+ *
+ * Return:
+ * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
+ * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
+ * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
+ * The former two OR'd: no multicast router is present
+ */
+static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv,
+ struct net_device *bridge)
+{
+ struct net_device *dev = bridge ? bridge : bat_priv->soft_iface;
+ u8 flags = BATADV_NO_FLAGS;
+
+ rcu_read_lock();
+
+ flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev);
+ flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev);
+
+ rcu_read_unlock();
+
+ return flags;
+}
+
+/**
+ * batadv_mcast_mla_rtr_flags_bridge_get() - get mcast router flags from bridge
+ * @bat_priv: the bat priv with all the soft interface information
+ * @bridge: bridge interface on top of the soft_iface if present,
+ * otherwise pass NULL
+ *
+ * Checks the presence of IPv4 and IPv6 multicast routers behind a bridge.
+ *
+ * Return:
+ * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
+ * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
+ * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
+ * The former two OR'd: no multicast router is present
+ */
+static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv,
+ struct net_device *bridge)
+{
+ struct net_device *dev = bat_priv->soft_iface;
+ u8 flags = BATADV_NO_FLAGS;
+
+ if (!bridge)
+ return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
+
+ if (!br_multicast_has_router_adjacent(dev, ETH_P_IP))
+ flags |= BATADV_MCAST_WANT_NO_RTR4;
+ if (!br_multicast_has_router_adjacent(dev, ETH_P_IPV6))
+ flags |= BATADV_MCAST_WANT_NO_RTR6;
+
+ return flags;
+}
+
+/**
+ * batadv_mcast_mla_rtr_flags_get() - get multicast router flags
+ * @bat_priv: the bat priv with all the soft interface information
+ * @bridge: bridge interface on top of the soft_iface if present,
+ * otherwise pass NULL
+ *
+ * Checks the presence of IPv4 and IPv6 multicast routers on this
+ * node or behind its bridge.
+ *
+ * Return:
+ * BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
+ * BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
+ * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
+ * The former two OR'd: no multicast router is present
+ */
+static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv,
+ struct net_device *bridge)
+{
+ u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
+
+ flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge);
+ flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge);
+
+ return flags;
+}
+
+/**
+ * batadv_mcast_mla_flags_get() - get the new multicast flags
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: A set of flags for the current/next TVLV, querier and
+ * bridge state.
+ */
+static struct batadv_mcast_mla_flags
+batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv)
+{
+ struct net_device *dev = bat_priv->soft_iface;
+ struct batadv_mcast_querier_state *qr4, *qr6;
+ struct batadv_mcast_mla_flags mla_flags;
+ struct net_device *bridge;
+
+ bridge = batadv_mcast_get_bridge(dev);
+
+ memset(&mla_flags, 0, sizeof(mla_flags));
+ mla_flags.enabled = 1;
+ mla_flags.tvlv_flags |= batadv_mcast_mla_rtr_flags_get(bat_priv,
+ bridge);
+
+ if (!bridge)
+ return mla_flags;
+
+ dev_put(bridge);
+
+ mla_flags.bridged = 1;
+ qr4 = &mla_flags.querier_ipv4;
+ qr6 = &mla_flags.querier_ipv6;
+
+ if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING))
+ pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n");
+
+ qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP);
+ qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP);
+
+ qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6);
+ qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6);
+
+ mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES;
+
+ /* 1) If no querier exists at all, then multicast listeners on
+ * our local TT clients behind the bridge will keep silent.
+ * 2) If the selected querier is on one of our local TT clients,
+ * behind the bridge, then this querier might shadow multicast
+ * listeners on our local TT clients, behind this bridge.
+ *
+ * In both cases, we will signalize other batman nodes that
+ * we need all multicast traffic of the according protocol.
+ */
+ if (!qr4->exists || qr4->shadowing) {
+ mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4;
+ mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR4;
+ }
+
+ if (!qr6->exists || qr6->shadowing) {
+ mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6;
+ mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR6;
+ }
+
+ return mla_flags;
+}
+
+/**
+ * batadv_mcast_mla_is_duplicate() - check whether an address is in a list
+ * @mcast_addr: the multicast address to check
+ * @mcast_list: the list with multicast addresses to search in
+ *
+ * Return: true if the given address is already in the given list.
+ * Otherwise returns false.
+ */
+static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
+ struct hlist_head *mcast_list)
+{
+ struct batadv_hw_addr *mcast_entry;
+
+ hlist_for_each_entry(mcast_entry, mcast_list, list)
+ if (batadv_compare_eth(mcast_entry->addr, mcast_addr))
+ return true;
+
+ return false;
+}
+
+/**
+ * batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners
+ * @dev: the device to collect multicast addresses from
+ * @mcast_list: a list to put found addresses into
+ * @flags: flags indicating the new multicast state
+ *
+ * Collects multicast addresses of IPv4 multicast listeners residing
+ * on this kernel on the given soft interface, dev, in
+ * the given mcast_list. In general, multicast listeners provided by
+ * your multicast receiving applications run directly on this node.
+ *
+ * Return: -ENOMEM on memory allocation error or the number of
+ * items added to the mcast_list otherwise.
+ */
+static int
+batadv_mcast_mla_softif_get_ipv4(struct net_device *dev,
+ struct hlist_head *mcast_list,
+ struct batadv_mcast_mla_flags *flags)
+{
+ struct batadv_hw_addr *new;
+ struct in_device *in_dev;
+ u8 mcast_addr[ETH_ALEN];
+ struct ip_mc_list *pmc;
+ int ret = 0;
+
+ if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
+ return 0;
+
+ rcu_read_lock();
+
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ for (pmc = rcu_dereference(in_dev->mc_list); pmc;
+ pmc = rcu_dereference(pmc->next_rcu)) {
+ if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
+ ipv4_is_local_multicast(pmc->multiaddr))
+ continue;
+
+ if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
+ !ipv4_is_local_multicast(pmc->multiaddr))
+ continue;
+
+ ip_eth_mc_map(pmc->multiaddr, mcast_addr);
+
+ if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
+ continue;
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ ether_addr_copy(new->addr, mcast_addr);
+ hlist_add_head(&new->list, mcast_list);
+ ret++;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/**
+ * batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners
+ * @dev: the device to collect multicast addresses from
+ * @mcast_list: a list to put found addresses into
+ * @flags: flags indicating the new multicast state
+ *
+ * Collects multicast addresses of IPv6 multicast listeners residing
+ * on this kernel on the given soft interface, dev, in
+ * the given mcast_list. In general, multicast listeners provided by
+ * your multicast receiving applications run directly on this node.
+ *
+ * Return: -ENOMEM on memory allocation error or the number of
+ * items added to the mcast_list otherwise.
+ */
+#if IS_ENABLED(CONFIG_IPV6)
+static int
+batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
+ struct hlist_head *mcast_list,
+ struct batadv_mcast_mla_flags *flags)
+{
+ struct batadv_hw_addr *new;
+ struct inet6_dev *in6_dev;
+ u8 mcast_addr[ETH_ALEN];
+ struct ifmcaddr6 *pmc6;
+ int ret = 0;
+
+ if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
+ return 0;
+
+ rcu_read_lock();
+
+ in6_dev = __in6_dev_get(dev);
+ if (!in6_dev) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ for (pmc6 = rcu_dereference(in6_dev->mc_list);
+ pmc6;
+ pmc6 = rcu_dereference(pmc6->next)) {
+ if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) <
+ IPV6_ADDR_SCOPE_LINKLOCAL)
+ continue;
+
+ if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
+ ipv6_addr_is_ll_all_nodes(&pmc6->mca_addr))
+ continue;
+
+ if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
+ IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) >
+ IPV6_ADDR_SCOPE_LINKLOCAL)
+ continue;
+
+ ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr);
+
+ if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
+ continue;
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ ether_addr_copy(new->addr, mcast_addr);
+ hlist_add_head(&new->list, mcast_list);
+ ret++;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+#else
+static inline int
+batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
+ struct hlist_head *mcast_list,
+ struct batadv_mcast_mla_flags *flags)
+{
+ return 0;
+}
+#endif
+
+/**
+ * batadv_mcast_mla_softif_get() - get softif multicast listeners
+ * @dev: the device to collect multicast addresses from
+ * @mcast_list: a list to put found addresses into
+ * @flags: flags indicating the new multicast state
+ *
+ * Collects multicast addresses of multicast listeners residing
+ * on this kernel on the given soft interface, dev, in
+ * the given mcast_list. In general, multicast listeners provided by
+ * your multicast receiving applications run directly on this node.
+ *
+ * If there is a bridge interface on top of dev, collect from that one
+ * instead. Just like with IP addresses and routes, multicast listeners
+ * will(/should) register to the bridge interface instead of an
+ * enslaved bat0.
+ *
+ * Return: -ENOMEM on memory allocation error or the number of
+ * items added to the mcast_list otherwise.
+ */
+static int
+batadv_mcast_mla_softif_get(struct net_device *dev,
+ struct hlist_head *mcast_list,
+ struct batadv_mcast_mla_flags *flags)
+{
+ struct net_device *bridge = batadv_mcast_get_bridge(dev);
+ int ret4, ret6 = 0;
+
+ if (bridge)
+ dev = bridge;
+
+ ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags);
+ if (ret4 < 0)
+ goto out;
+
+ ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags);
+ if (ret6 < 0) {
+ ret4 = 0;
+ goto out;
+ }
+
+out:
+ dev_put(bridge);
+
+ return ret4 + ret6;
+}
+
+/**
+ * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
+ * @dst: destination to write to - a multicast MAC address
+ * @src: source to read from - a multicast IP address
+ *
+ * Converts a given multicast IPv4/IPv6 address from a bridge
+ * to its matching multicast MAC address and copies it into the given
+ * destination buffer.
+ *
+ * Caller needs to make sure the destination buffer can hold
+ * at least ETH_ALEN bytes.
+ */
+static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
+{
+ if (src->proto == htons(ETH_P_IP))
+ ip_eth_mc_map(src->dst.ip4, dst);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (src->proto == htons(ETH_P_IPV6))
+ ipv6_eth_mc_map(&src->dst.ip6, dst);
+#endif
+ else
+ eth_zero_addr(dst);
+}
+
+/**
+ * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
+ * @dev: a bridge slave whose bridge to collect multicast addresses from
+ * @mcast_list: a list to put found addresses into
+ * @flags: flags indicating the new multicast state
+ *
+ * Collects multicast addresses of multicast listeners residing
+ * on foreign, non-mesh devices which we gave access to our mesh via
+ * a bridge on top of the given soft interface, dev, in the given
+ * mcast_list.
+ *
+ * Return: -ENOMEM on memory allocation error or the number of
+ * items added to the mcast_list otherwise.
+ */
+static int batadv_mcast_mla_bridge_get(struct net_device *dev,
+ struct hlist_head *mcast_list,
+ struct batadv_mcast_mla_flags *flags)
+{
+ struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list);
+ struct br_ip_list *br_ip_entry, *tmp;
+ u8 tvlv_flags = flags->tvlv_flags;
+ struct batadv_hw_addr *new;
+ u8 mcast_addr[ETH_ALEN];
+ int ret;
+
+ /* we don't need to detect these devices/listeners, the IGMP/MLD
+ * snooping code of the Linux bridge already does that for us
+ */
+ ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
+ if (ret < 0)
+ goto out;
+
+ list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) {
+ if (br_ip_entry->addr.proto == htons(ETH_P_IP)) {
+ if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
+ continue;
+
+ if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
+ ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
+ continue;
+
+ if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
+ !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
+ continue;
+ }
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (br_ip_entry->addr.proto == htons(ETH_P_IPV6)) {
+ if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
+ continue;
+
+ if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
+ ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6))
+ continue;
+
+ if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
+ IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) >
+ IPV6_ADDR_SCOPE_LINKLOCAL)
+ continue;
+ }
+#endif
+
+ batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr);
+ if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
+ continue;
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ ether_addr_copy(new->addr, mcast_addr);
+ hlist_add_head(&new->list, mcast_list);
+ }
+
+out:
+ list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) {
+ list_del(&br_ip_entry->list);
+ kfree(br_ip_entry);
+ }
+
+ return ret;
+}
+
+/**
+ * batadv_mcast_mla_list_free() - free a list of multicast addresses
+ * @mcast_list: the list to free
+ *
+ * Removes and frees all items in the given mcast_list.
+ */
+static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
+{
+ struct batadv_hw_addr *mcast_entry;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
+ hlist_del(&mcast_entry->list);
+ kfree(mcast_entry);
+ }
+}
+
+/**
+ * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
+ * @bat_priv: the bat priv with all the soft interface information
+ * @mcast_list: a list of addresses which should _not_ be removed
+ *
+ * Retracts the announcement of any multicast listener from the
+ * translation table except the ones listed in the given mcast_list.
+ *
+ * If mcast_list is NULL then all are retracted.
+ */
+static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
+ struct hlist_head *mcast_list)
+{
+ struct batadv_hw_addr *mcast_entry;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
+ list) {
+ if (mcast_list &&
+ batadv_mcast_mla_is_duplicate(mcast_entry->addr,
+ mcast_list))
+ continue;
+
+ batadv_tt_local_remove(bat_priv, mcast_entry->addr,
+ BATADV_NO_FLAGS,
+ "mcast TT outdated", false);
+
+ hlist_del(&mcast_entry->list);
+ kfree(mcast_entry);
+ }
+}
+
+/**
+ * batadv_mcast_mla_tt_add() - add multicast listener announcements
+ * @bat_priv: the bat priv with all the soft interface information
+ * @mcast_list: a list of addresses which are going to get added
+ *
+ * Adds multicast listener announcements from the given mcast_list to the
+ * translation table if they have not been added yet.
+ */
+static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
+ struct hlist_head *mcast_list)
+{
+ struct batadv_hw_addr *mcast_entry;
+ struct hlist_node *tmp;
+
+ if (!mcast_list)
+ return;
+
+ hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
+ if (batadv_mcast_mla_is_duplicate(mcast_entry->addr,
+ &bat_priv->mcast.mla_list))
+ continue;
+
+ if (!batadv_tt_local_add(bat_priv->soft_iface,
+ mcast_entry->addr, BATADV_NO_FLAGS,
+ BATADV_NULL_IFINDEX, BATADV_NO_MARK))
+ continue;
+
+ hlist_del(&mcast_entry->list);
+ hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list);
+ }
+}
+
+/**
+ * batadv_mcast_querier_log() - debug output regarding the querier status on
+ * link
+ * @bat_priv: the bat priv with all the soft interface information
+ * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
+ * @old_state: the previous querier state on our link
+ * @new_state: the new querier state on our link
+ *
+ * Outputs debug messages to the logging facility with log level 'mcast'
+ * regarding changes to the querier status on the link which are relevant
+ * to our multicast optimizations.
+ *
+ * Usually this is about whether a querier appeared or vanished in
+ * our mesh or whether the querier is in the suboptimal position of being
+ * behind our local bridge segment: Snooping switches will directly
+ * forward listener reports to the querier, therefore batman-adv and
+ * the bridge will potentially not see these listeners - the querier is
+ * potentially shadowing listeners from us then.
+ *
+ * This is only interesting for nodes with a bridge on top of their
+ * soft interface.
+ */
+static void
+batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
+ struct batadv_mcast_querier_state *old_state,
+ struct batadv_mcast_querier_state *new_state)
+{
+ if (!old_state->exists && new_state->exists)
+ batadv_info(bat_priv->soft_iface, "%s Querier appeared\n",
+ str_proto);
+ else if (old_state->exists && !new_state->exists)
+ batadv_info(bat_priv->soft_iface,
+ "%s Querier disappeared - multicast optimizations disabled\n",
+ str_proto);
+ else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists)
+ batadv_info(bat_priv->soft_iface,
+ "No %s Querier present - multicast optimizations disabled\n",
+ str_proto);
+
+ if (new_state->exists) {
+ if ((!old_state->shadowing && new_state->shadowing) ||
+ (!old_state->exists && new_state->shadowing))
+ batadv_dbg(BATADV_DBG_MCAST, bat_priv,
+ "%s Querier is behind our bridged segment: Might shadow listeners\n",
+ str_proto);
+ else if (old_state->shadowing && !new_state->shadowing)
+ batadv_dbg(BATADV_DBG_MCAST, bat_priv,
+ "%s Querier is not behind our bridged segment\n",
+ str_proto);
+ }
+}
+
+/**
+ * batadv_mcast_bridge_log() - debug output for topology changes in bridged
+ * setups
+ * @bat_priv: the bat priv with all the soft interface information
+ * @new_flags: flags indicating the new multicast state
+ *
+ * If no bridges are ever used on this node, then this function does nothing.
+ *
+ * Otherwise this function outputs debug information to the 'mcast' log level
+ * which might be relevant to our multicast optimizations.
+ *
+ * More precisely, it outputs information when a bridge interface is added or
+ * removed from a soft interface. And when a bridge is present, it further
+ * outputs information about the querier state which is relevant for the
+ * multicast flags this node is going to set.
+ */
+static void
+batadv_mcast_bridge_log(struct batadv_priv *bat_priv,
+ struct batadv_mcast_mla_flags *new_flags)
+{
+ struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags;
+
+ if (!old_flags->bridged && new_flags->bridged)
+ batadv_dbg(BATADV_DBG_MCAST, bat_priv,
+ "Bridge added: Setting Unsnoopables(U)-flag\n");
+ else if (old_flags->bridged && !new_flags->bridged)
+ batadv_dbg(BATADV_DBG_MCAST, bat_priv,
+ "Bridge removed: Unsetting Unsnoopables(U)-flag\n");
+
+ if (new_flags->bridged) {
+ batadv_mcast_querier_log(bat_priv, "IGMP",
+ &old_flags->querier_ipv4,
+ &new_flags->querier_ipv4);
+ batadv_mcast_querier_log(bat_priv, "MLD",
+ &old_flags->querier_ipv6,
+ &new_flags->querier_ipv6);
+ }
+}
+
+/**
+ * batadv_mcast_flags_log() - output debug information about mcast flag changes
+ * @bat_priv: the bat priv with all the soft interface information
+ * @flags: TVLV flags indicating the new multicast state
+ *
+ * Whenever the multicast TVLV flags this node announces change, this function
+ * should be used to notify userspace about the change.
+ */
+static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
+{
+ bool old_enabled = bat_priv->mcast.mla_flags.enabled;
+ u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags;
+ char str_old_flags[] = "[.... . ]";
+
+ sprintf(str_old_flags, "[%c%c%c%s%s]",
+ (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
+ (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
+ (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
+ !(old_flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
+ !(old_flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
+
+ batadv_dbg(BATADV_DBG_MCAST, bat_priv,
+ "Changing multicast flags from '%s' to '[%c%c%c%s%s]'\n",
+ old_enabled ? str_old_flags : "<undefined>",
+ (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
+ (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
+ (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
+ !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
+ !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
+}
+
+/**
+ * batadv_mcast_mla_flags_update() - update multicast flags
+ * @bat_priv: the bat priv with all the soft interface information
+ * @flags: flags indicating the new multicast state
+ *
+ * Updates the own multicast tvlv with our current multicast related settings,
+ * capabilities and inabilities.
+ */
+static void
+batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv,
+ struct batadv_mcast_mla_flags *flags)
+{
+ struct batadv_tvlv_mcast_data mcast_data;
+
+ if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags)))
+ return;
+
+ batadv_mcast_bridge_log(bat_priv, flags);
+ batadv_mcast_flags_log(bat_priv, flags->tvlv_flags);
+
+ mcast_data.flags = flags->tvlv_flags;
+ memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved));
+
+ batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2,
+ &mcast_data, sizeof(mcast_data));
+
+ bat_priv->mcast.mla_flags = *flags;
+}
+
+/**
+ * __batadv_mcast_mla_update() - update the own MLAs
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Updates the own multicast listener announcements in the translation
+ * table as well as the own, announced multicast tvlv container.
+ *
+ * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list
+ * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are
+ * ensured by the non-parallel execution of the worker this function
+ * belongs to.
+ */
+static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv)
+{
+ struct net_device *soft_iface = bat_priv->soft_iface;
+ struct hlist_head mcast_list = HLIST_HEAD_INIT;
+ struct batadv_mcast_mla_flags flags;
+ int ret;
+
+ flags = batadv_mcast_mla_flags_get(bat_priv);
+
+ ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags);
+ if (ret < 0)
+ goto out;
+
+ ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags);
+ if (ret < 0)
+ goto out;
+
+ spin_lock(&bat_priv->mcast.mla_lock);
+ batadv_mcast_mla_tt_retract(bat_priv, &mcast_list);
+ batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
+ batadv_mcast_mla_flags_update(bat_priv, &flags);
+ spin_unlock(&bat_priv->mcast.mla_lock);
+
+out:
+ batadv_mcast_mla_list_free(&mcast_list);
+}
+
+/**
+ * batadv_mcast_mla_update() - update the own MLAs
+ * @work: kernel work struct
+ *
+ * Updates the own multicast listener announcements in the translation
+ * table as well as the own, announced multicast tvlv container.
+ *
+ * In the end, reschedules the work timer.
+ */
+static void batadv_mcast_mla_update(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct batadv_priv_mcast *priv_mcast;
+ struct batadv_priv *bat_priv;
+
+ delayed_work = to_delayed_work(work);
+ priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
+ bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
+
+ __batadv_mcast_mla_update(bat_priv);
+ batadv_mcast_start_timer(bat_priv);
+}
+
+/**
+ * batadv_mcast_is_report_ipv4() - check for IGMP reports
+ * @skb: the ethernet frame destined for the mesh
+ *
+ * This call might reallocate skb data.
+ *
+ * Checks whether the given frame is a valid IGMP report.
+ *
+ * Return: If so then true, otherwise false.
+ */
+static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
+{
+ if (ip_mc_check_igmp(skb) < 0)
+ return false;
+
+ switch (igmp_hdr(skb)->type) {
+ case IGMP_HOST_MEMBERSHIP_REPORT:
+ case IGMPV2_HOST_MEMBERSHIP_REPORT:
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
+ * potential
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the IPv4 packet to check
+ * @is_unsnoopable: stores whether the destination is snoopable
+ * @is_routable: stores whether the destination is routable
+ *
+ * Checks whether the given IPv4 packet has the potential to be forwarded with a
+ * mode more optimal than classic flooding.
+ *
+ * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory
+ * allocation failure.
+ */
+static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ bool *is_unsnoopable,
+ int *is_routable)
+{
+ struct iphdr *iphdr;
+
+ /* We might fail due to out-of-memory -> drop it */
+ if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr)))
+ return -ENOMEM;
+
+ if (batadv_mcast_is_report_ipv4(skb))
+ return -EINVAL;
+
+ iphdr = ip_hdr(skb);
+
+ /* link-local multicast listeners behind a bridge are
+ * not snoopable (see RFC4541, section 2.1.2.2)
+ */
+ if (ipv4_is_local_multicast(iphdr->daddr))
+ *is_unsnoopable = true;
+ else
+ *is_routable = ETH_P_IP;
+
+ return 0;
+}
+
+/**
+ * batadv_mcast_is_report_ipv6() - check for MLD reports
+ * @skb: the ethernet frame destined for the mesh
+ *
+ * This call might reallocate skb data.
+ *
+ * Checks whether the given frame is a valid MLD report.
+ *
+ * Return: If so then true, otherwise false.
+ */
+static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
+{
+ if (ipv6_mc_check_mld(skb) < 0)
+ return false;
+
+ switch (icmp6_hdr(skb)->icmp6_type) {
+ case ICMPV6_MGM_REPORT:
+ case ICMPV6_MLD2_REPORT:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
+ * potential
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the IPv6 packet to check
+ * @is_unsnoopable: stores whether the destination is snoopable
+ * @is_routable: stores whether the destination is routable
+ *
+ * Checks whether the given IPv6 packet has the potential to be forwarded with a
+ * mode more optimal than classic flooding.
+ *
+ * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
+ */
+static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ bool *is_unsnoopable,
+ int *is_routable)
+{
+ struct ipv6hdr *ip6hdr;
+
+ /* We might fail due to out-of-memory -> drop it */
+ if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr)))
+ return -ENOMEM;
+
+ if (batadv_mcast_is_report_ipv6(skb))
+ return -EINVAL;
+
+ ip6hdr = ipv6_hdr(skb);
+
+ if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) < IPV6_ADDR_SCOPE_LINKLOCAL)
+ return -EINVAL;
+
+ /* link-local-all-nodes multicast listeners behind a bridge are
+ * not snoopable (see RFC4541, section 3, paragraph 3)
+ */
+ if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr))
+ *is_unsnoopable = true;
+ else if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) > IPV6_ADDR_SCOPE_LINKLOCAL)
+ *is_routable = ETH_P_IPV6;
+
+ return 0;
+}
+
+/**
+ * batadv_mcast_forw_mode_check() - check for optimized forwarding potential
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the multicast frame to check
+ * @is_unsnoopable: stores whether the destination is snoopable
+ * @is_routable: stores whether the destination is routable
+ *
+ * Checks whether the given multicast ethernet frame has the potential to be
+ * forwarded with a mode more optimal than classic flooding.
+ *
+ * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
+ */
+static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ bool *is_unsnoopable,
+ int *is_routable)
+{
+ struct ethhdr *ethhdr = eth_hdr(skb);
+
+ if (!atomic_read(&bat_priv->multicast_mode))
+ return -EINVAL;
+
+ switch (ntohs(ethhdr->h_proto)) {
+ case ETH_P_IP:
+ return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
+ is_unsnoopable,
+ is_routable);
+ case ETH_P_IPV6:
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EINVAL;
+
+ return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
+ is_unsnoopable,
+ is_routable);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
+ * interest
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ethhdr: ethernet header of a packet
+ *
+ * Return: the number of nodes which want all IPv4 multicast traffic if the
+ * given ethhdr is from an IPv4 packet or the number of nodes which want all
+ * IPv6 traffic if it matches an IPv6 packet.
+ */
+static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
+ struct ethhdr *ethhdr)
+{
+ switch (ntohs(ethhdr->h_proto)) {
+ case ETH_P_IP:
+ return atomic_read(&bat_priv->mcast.num_want_all_ipv4);
+ case ETH_P_IPV6:
+ return atomic_read(&bat_priv->mcast.num_want_all_ipv6);
+ default:
+ /* we shouldn't be here... */
+ return 0;
+ }
+}
+
+/**
+ * batadv_mcast_forw_rtr_count() - count nodes with a multicast router
+ * @bat_priv: the bat priv with all the soft interface information
+ * @protocol: the ethernet protocol type to count multicast routers for
+ *
+ * Return: the number of nodes which want all routable IPv4 multicast traffic
+ * if the protocol is ETH_P_IP or the number of nodes which want all routable
+ * IPv6 traffic if the protocol is ETH_P_IPV6. Otherwise returns 0.
+ */
+
+static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv,
+ int protocol)
+{
+ switch (protocol) {
+ case ETH_P_IP:
+ return atomic_read(&bat_priv->mcast.num_want_all_rtr4);
+ case ETH_P_IPV6:
+ return atomic_read(&bat_priv->mcast.num_want_all_rtr6);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * batadv_mcast_forw_mode() - check on how to forward a multicast packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the multicast packet to check
+ * @is_routable: stores whether the destination is routable
+ *
+ * Return: The forwarding mode as enum batadv_forw_mode.
+ */
+enum batadv_forw_mode
+batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ int *is_routable)
+{
+ int ret, tt_count, ip_count, unsnoop_count, total_count;
+ bool is_unsnoopable = false;
+ struct ethhdr *ethhdr;
+ int rtr_count = 0;
+
+ ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
+ is_routable);
+ if (ret == -ENOMEM)
+ return BATADV_FORW_NONE;
+ else if (ret < 0)
+ return BATADV_FORW_BCAST;
+
+ ethhdr = eth_hdr(skb);
+
+ tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest,
+ BATADV_NO_FLAGS);
+ ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
+ unsnoop_count = !is_unsnoopable ? 0 :
+ atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
+ rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
+
+ total_count = tt_count + ip_count + unsnoop_count + rtr_count;
+
+ if (!total_count)
+ return BATADV_FORW_NONE;
+ else if (unsnoop_count)
+ return BATADV_FORW_BCAST;
+
+ if (total_count <= atomic_read(&bat_priv->multicast_fanout))
+ return BATADV_FORW_UCASTS;
+
+ return BATADV_FORW_BCAST;
+}
+
+/**
+ * batadv_mcast_forw_send_orig() - send a multicast packet to an originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the multicast packet to send
+ * @vid: the vlan identifier
+ * @orig_node: the originator to send the packet to
+ *
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned short vid,
+ struct batadv_orig_node *orig_node)
+{
+ /* Avoid sending multicast-in-unicast packets to other BLA
+ * gateways - they already got the frame from the LAN side
+ * we share with them.
+ * TODO: Refactor to take BLA into account earlier, to avoid
+ * reducing the mcast_fanout count.
+ */
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) {
+ dev_kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+
+ return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
+ orig_node, vid);
+}
+
+/**
+ * batadv_mcast_forw_tt() - forwards a packet to multicast listeners
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the multicast packet to transmit
+ * @vid: the vlan identifier
+ *
+ * Sends copies of a frame with multicast destination to any multicast
+ * listener registered in the translation table. A transmission is performed
+ * via a batman-adv unicast packet for each such destination node.
+ *
+ * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
+ * otherwise.
+ */
+static int
+batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ unsigned short vid)
+{
+ int ret = NET_XMIT_SUCCESS;
+ struct sk_buff *newskb;
+
+ struct batadv_tt_orig_list_entry *orig_entry;
+
+ struct batadv_tt_global_entry *tt_global;
+ const u8 *addr = eth_hdr(skb)->h_dest;
+
+ tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
+ if (!tt_global)
+ goto out;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) {
+ newskb = skb_copy(skb, GFP_ATOMIC);
+ if (!newskb) {
+ ret = NET_XMIT_DROP;
+ break;
+ }
+
+ batadv_mcast_forw_send_orig(bat_priv, newskb, vid,
+ orig_entry->orig_node);
+ }
+ rcu_read_unlock();
+
+ batadv_tt_global_entry_put(tt_global);
+
+out:
+ return ret;
+}
+
+/**
+ * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the multicast packet to transmit
+ * @vid: the vlan identifier
+ *
+ * Sends copies of a frame with multicast destination to any node with a
+ * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a
+ * batman-adv unicast packet for each such destination node.
+ *
+ * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
+ * otherwise.
+ */
+static int
+batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, unsigned short vid)
+{
+ struct batadv_orig_node *orig_node;
+ int ret = NET_XMIT_SUCCESS;
+ struct sk_buff *newskb;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node,
+ &bat_priv->mcast.want_all_ipv4_list,
+ mcast_want_all_ipv4_node) {
+ newskb = skb_copy(skb, GFP_ATOMIC);
+ if (!newskb) {
+ ret = NET_XMIT_DROP;
+ break;
+ }
+
+ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: The multicast packet to transmit
+ * @vid: the vlan identifier
+ *
+ * Sends copies of a frame with multicast destination to any node with a
+ * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a
+ * batman-adv unicast packet for each such destination node.
+ *
+ * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
+ * otherwise.
+ */
+static int
+batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, unsigned short vid)
+{
+ struct batadv_orig_node *orig_node;
+ int ret = NET_XMIT_SUCCESS;
+ struct sk_buff *newskb;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node,
+ &bat_priv->mcast.want_all_ipv6_list,
+ mcast_want_all_ipv6_node) {
+ newskb = skb_copy(skb, GFP_ATOMIC);
+ if (!newskb) {
+ ret = NET_XMIT_DROP;
+ break;
+ }
+
+ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the multicast packet to transmit
+ * @vid: the vlan identifier
+ *
+ * Sends copies of a frame with multicast destination to any node with a
+ * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A
+ * transmission is performed via a batman-adv unicast packet for each such
+ * destination node.
+ *
+ * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
+ * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
+ */
+static int
+batadv_mcast_forw_want_all(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, unsigned short vid)
+{
+ switch (ntohs(eth_hdr(skb)->h_proto)) {
+ case ETH_P_IP:
+ return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid);
+ case ETH_P_IPV6:
+ return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid);
+ default:
+ /* we shouldn't be here... */
+ return NET_XMIT_DROP;
+ }
+}
+
+/**
+ * batadv_mcast_forw_want_all_rtr4() - forward to nodes with want-all-rtr4
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the multicast packet to transmit
+ * @vid: the vlan identifier
+ *
+ * Sends copies of a frame with multicast destination to any node with a
+ * BATADV_MCAST_WANT_NO_RTR4 flag unset. A transmission is performed via a
+ * batman-adv unicast packet for each such destination node.
+ *
+ * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
+ * otherwise.
+ */
+static int
+batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, unsigned short vid)
+{
+ struct batadv_orig_node *orig_node;
+ int ret = NET_XMIT_SUCCESS;
+ struct sk_buff *newskb;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node,
+ &bat_priv->mcast.want_all_rtr4_list,
+ mcast_want_all_rtr4_node) {
+ newskb = skb_copy(skb, GFP_ATOMIC);
+ if (!newskb) {
+ ret = NET_XMIT_DROP;
+ break;
+ }
+
+ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * batadv_mcast_forw_want_all_rtr6() - forward to nodes with want-all-rtr6
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: The multicast packet to transmit
+ * @vid: the vlan identifier
+ *
+ * Sends copies of a frame with multicast destination to any node with a
+ * BATADV_MCAST_WANT_NO_RTR6 flag unset. A transmission is performed via a
+ * batman-adv unicast packet for each such destination node.
+ *
+ * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
+ * otherwise.
+ */
+static int
+batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, unsigned short vid)
+{
+ struct batadv_orig_node *orig_node;
+ int ret = NET_XMIT_SUCCESS;
+ struct sk_buff *newskb;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node,
+ &bat_priv->mcast.want_all_rtr6_list,
+ mcast_want_all_rtr6_node) {
+ newskb = skb_copy(skb, GFP_ATOMIC);
+ if (!newskb) {
+ ret = NET_XMIT_DROP;
+ break;
+ }
+
+ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * batadv_mcast_forw_want_rtr() - forward packet to nodes in a want-all-rtr list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the multicast packet to transmit
+ * @vid: the vlan identifier
+ *
+ * Sends copies of a frame with multicast destination to any node with a
+ * BATADV_MCAST_WANT_NO_RTR4 or BATADV_MCAST_WANT_NO_RTR6 flag unset. A
+ * transmission is performed via a batman-adv unicast packet for each such
+ * destination node.
+ *
+ * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
+ * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
+ */
+static int
+batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, unsigned short vid)
+{
+ switch (ntohs(eth_hdr(skb)->h_proto)) {
+ case ETH_P_IP:
+ return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid);
+ case ETH_P_IPV6:
+ return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid);
+ default:
+ /* we shouldn't be here... */
+ return NET_XMIT_DROP;
+ }
+}
+
+/**
+ * batadv_mcast_forw_send() - send packet to any detected multicast recipient
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the multicast packet to transmit
+ * @vid: the vlan identifier
+ * @is_routable: stores whether the destination is routable
+ *
+ * Sends copies of a frame with multicast destination to any node that signaled
+ * interest in it, that is either via the translation table or the according
+ * want-all flags. A transmission is performed via a batman-adv unicast packet
+ * for each such destination node.
+ *
+ * The given skb is consumed/freed.
+ *
+ * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
+ * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
+ */
+int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ unsigned short vid, int is_routable)
+{
+ int ret;
+
+ ret = batadv_mcast_forw_tt(bat_priv, skb, vid);
+ if (ret != NET_XMIT_SUCCESS) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ ret = batadv_mcast_forw_want_all(bat_priv, skb, vid);
+ if (ret != NET_XMIT_SUCCESS) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ if (!is_routable)
+ goto skip_mc_router;
+
+ ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
+ if (ret != NET_XMIT_SUCCESS) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+skip_mc_router:
+ consume_skb(skb);
+ return ret;
+}
+
+/**
+ * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node which multicast state might have changed of
+ * @mcast_flags: flags indicating the new multicast state
+ *
+ * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
+ * orig, has toggled then this method updates the counter and the list
+ * accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
+ */
+static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ u8 mcast_flags)
+{
+ struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
+ struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
+
+ lockdep_assert_held(&orig->mcast_handler_lock);
+
+ /* switched from flag unset to set */
+ if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
+ !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
+ atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
+
+ spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(!hlist_unhashed(node));
+
+ hlist_add_head_rcu(node, head);
+ spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ /* switched from flag set to unset */
+ } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
+ orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) {
+ atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
+
+ spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(hlist_unhashed(node));
+
+ hlist_del_init_rcu(node);
+ spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ }
+}
+
+/**
+ * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node which multicast state might have changed of
+ * @mcast_flags: flags indicating the new multicast state
+ *
+ * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
+ * toggled then this method updates the counter and the list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
+ */
+static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ u8 mcast_flags)
+{
+ struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
+ struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
+
+ lockdep_assert_held(&orig->mcast_handler_lock);
+
+ /* switched from flag unset to set */
+ if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
+ !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
+ atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
+
+ spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(!hlist_unhashed(node));
+
+ hlist_add_head_rcu(node, head);
+ spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ /* switched from flag set to unset */
+ } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
+ orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) {
+ atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
+
+ spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(hlist_unhashed(node));
+
+ hlist_del_init_rcu(node);
+ spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ }
+}
+
+/**
+ * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node which multicast state might have changed of
+ * @mcast_flags: flags indicating the new multicast state
+ *
+ * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
+ * toggled then this method updates the counter and the list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
+ */
+static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ u8 mcast_flags)
+{
+ struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
+ struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
+
+ lockdep_assert_held(&orig->mcast_handler_lock);
+
+ /* switched from flag unset to set */
+ if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
+ !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
+ atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
+
+ spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(!hlist_unhashed(node));
+
+ hlist_add_head_rcu(node, head);
+ spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ /* switched from flag set to unset */
+ } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
+ orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) {
+ atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
+
+ spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(hlist_unhashed(node));
+
+ hlist_del_init_rcu(node);
+ spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ }
+}
+
+/**
+ * batadv_mcast_want_rtr4_update() - update want-all-rtr4 counter and list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node which multicast state might have changed of
+ * @mcast_flags: flags indicating the new multicast state
+ *
+ * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has
+ * toggled then this method updates the counter and the list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
+ */
+static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ u8 mcast_flags)
+{
+ struct hlist_node *node = &orig->mcast_want_all_rtr4_node;
+ struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list;
+
+ lockdep_assert_held(&orig->mcast_handler_lock);
+
+ /* switched from flag set to unset */
+ if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR4) &&
+ orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4) {
+ atomic_inc(&bat_priv->mcast.num_want_all_rtr4);
+
+ spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(!hlist_unhashed(node));
+
+ hlist_add_head_rcu(node, head);
+ spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ /* switched from flag unset to set */
+ } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR4 &&
+ !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4)) {
+ atomic_dec(&bat_priv->mcast.num_want_all_rtr4);
+
+ spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(hlist_unhashed(node));
+
+ hlist_del_init_rcu(node);
+ spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ }
+}
+
+/**
+ * batadv_mcast_want_rtr6_update() - update want-all-rtr6 counter and list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node which multicast state might have changed of
+ * @mcast_flags: flags indicating the new multicast state
+ *
+ * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has
+ * toggled then this method updates the counter and the list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
+ */
+static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ u8 mcast_flags)
+{
+ struct hlist_node *node = &orig->mcast_want_all_rtr6_node;
+ struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list;
+
+ lockdep_assert_held(&orig->mcast_handler_lock);
+
+ /* switched from flag set to unset */
+ if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR6) &&
+ orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6) {
+ atomic_inc(&bat_priv->mcast.num_want_all_rtr6);
+
+ spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(!hlist_unhashed(node));
+
+ hlist_add_head_rcu(node, head);
+ spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ /* switched from flag unset to set */
+ } else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR6 &&
+ !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6)) {
+ atomic_dec(&bat_priv->mcast.num_want_all_rtr6);
+
+ spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+ /* flag checks above + mcast_handler_lock prevents this */
+ WARN_ON(hlist_unhashed(node));
+
+ hlist_del_init_rcu(node);
+ spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ }
+}
+
+/**
+ * batadv_mcast_tvlv_flags_get() - get multicast flags from an OGM TVLV
+ * @enabled: whether the originator has multicast TVLV support enabled
+ * @tvlv_value: tvlv buffer containing the multicast flags
+ * @tvlv_value_len: tvlv buffer length
+ *
+ * Return: multicast flags for the given tvlv buffer
+ */
+static u8
+batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len)
+{
+ u8 mcast_flags = BATADV_NO_FLAGS;
+
+ if (enabled && tvlv_value && tvlv_value_len >= sizeof(mcast_flags))
+ mcast_flags = *(u8 *)tvlv_value;
+
+ if (!enabled) {
+ mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4;
+ mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6;
+ }
+
+ /* remove redundant flags to avoid sending duplicate packets later */
+ if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)
+ mcast_flags |= BATADV_MCAST_WANT_NO_RTR4;
+
+ if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)
+ mcast_flags |= BATADV_MCAST_WANT_NO_RTR6;
+
+ return mcast_flags;
+}
+
+/**
+ * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the multicast data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ u8 flags,
+ void *tvlv_value,
+ u16 tvlv_value_len)
+{
+ bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+ u8 mcast_flags;
+
+ mcast_flags = batadv_mcast_tvlv_flags_get(orig_mcast_enabled,
+ tvlv_value, tvlv_value_len);
+
+ spin_lock_bh(&orig->mcast_handler_lock);
+
+ if (orig_mcast_enabled &&
+ !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
+ set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
+ } else if (!orig_mcast_enabled &&
+ test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
+ clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
+ }
+
+ set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
+
+ batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
+ batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
+ batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
+ batadv_mcast_want_rtr4_update(bat_priv, orig, mcast_flags);
+ batadv_mcast_want_rtr6_update(bat_priv, orig, mcast_flags);
+
+ orig->mcast_flags = mcast_flags;
+ spin_unlock_bh(&orig->mcast_handler_lock);
+}
+
+/**
+ * batadv_mcast_init() - initialize the multicast optimizations structures
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_mcast_init(struct batadv_priv *bat_priv)
+{
+ batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler,
+ NULL, NULL, BATADV_TVLV_MCAST, 2,
+ BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+
+ INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update);
+ batadv_mcast_start_timer(bat_priv);
+}
+
+/**
+ * batadv_mcast_mesh_info_put() - put multicast info into a netlink message
+ * @msg: buffer for the message
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 or error code.
+ */
+int batadv_mcast_mesh_info_put(struct sk_buff *msg,
+ struct batadv_priv *bat_priv)
+{
+ u32 flags = bat_priv->mcast.mla_flags.tvlv_flags;
+ u32 flags_priv = BATADV_NO_FLAGS;
+
+ if (bat_priv->mcast.mla_flags.bridged) {
+ flags_priv |= BATADV_MCAST_FLAGS_BRIDGED;
+
+ if (bat_priv->mcast.mla_flags.querier_ipv4.exists)
+ flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS;
+ if (bat_priv->mcast.mla_flags.querier_ipv6.exists)
+ flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS;
+ if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing)
+ flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING;
+ if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing)
+ flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING;
+ }
+
+ if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) ||
+ nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+/**
+ * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table
+ * to a netlink socket
+ * @msg: buffer for the message
+ * @portid: netlink port
+ * @cb: Control block containing additional options
+ * @orig_node: originator to dump the multicast flags of
+ *
+ * Return: 0 or error code.
+ */
+static int
+batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_orig_node *orig_node)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_MCAST_FLAGS);
+ if (!hdr)
+ return -ENOBUFS;
+
+ genl_dump_check_consistent(cb, hdr);
+
+ if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
+ orig_node->orig)) {
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+ }
+
+ if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
+ &orig_node->capabilities)) {
+ if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS,
+ orig_node->mcast_flags)) {
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+ }
+ }
+
+ genlmsg_end(msg, hdr);
+ return 0;
+}
+
+/**
+ * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags
+ * table to a netlink socket
+ * @msg: buffer for the message
+ * @portid: netlink port
+ * @cb: Control block containing additional options
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
+ * @idx_skip: How many entries to skip
+ *
+ * Return: 0 or error code.
+ */
+static int
+batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_hashtable *hash,
+ unsigned int bucket, long *idx_skip)
+{
+ struct batadv_orig_node *orig_node;
+ long idx = 0;
+
+ spin_lock_bh(&hash->list_locks[bucket]);
+ cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+ hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
+ if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
+ &orig_node->capa_initialized))
+ continue;
+
+ if (idx < *idx_skip)
+ goto skip;
+
+ if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) {
+ spin_unlock_bh(&hash->list_locks[bucket]);
+ *idx_skip = idx;
+
+ return -EMSGSIZE;
+ }
+
+skip:
+ idx++;
+ }
+ spin_unlock_bh(&hash->list_locks[bucket]);
+
+ return 0;
+}
+
+/**
+ * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
+ * @msg: buffer for the message
+ * @portid: netlink port
+ * @cb: Control block containing additional options
+ * @bat_priv: the bat priv with all the soft interface information
+ * @bucket: current bucket to dump
+ * @idx: index in current bucket to the next entry to dump
+ *
+ * Return: 0 or error code.
+ */
+static int
+__batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_priv *bat_priv, long *bucket, long *idx)
+{
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ long bucket_tmp = *bucket;
+ long idx_tmp = *idx;
+
+ while (bucket_tmp < hash->size) {
+ if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
+ bucket_tmp, &idx_tmp))
+ break;
+
+ bucket_tmp++;
+ idx_tmp = 0;
+ }
+
+ *bucket = bucket_tmp;
+ *idx = idx_tmp;
+
+ return msg->len;
+}
+
+/**
+ * batadv_mcast_netlink_get_primary() - get primary interface from netlink
+ * callback
+ * @cb: netlink callback structure
+ * @primary_if: the primary interface pointer to return the result in
+ *
+ * Return: 0 or error code.
+ */
+static int
+batadv_mcast_netlink_get_primary(struct netlink_callback *cb,
+ struct batadv_hard_iface **primary_if)
+{
+ struct batadv_hard_iface *hard_iface = NULL;
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_priv *bat_priv;
+ int ifindex;
+ int ret = 0;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+
+ hard_iface = batadv_primary_if_get_selected(bat_priv);
+ if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+out:
+ dev_put(soft_iface);
+
+ if (!ret && primary_if)
+ *primary_if = hard_iface;
+ else
+ batadv_hardif_put(hard_iface);
+
+ return ret;
+}
+
+/**
+ * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
+ * @msg: buffer for the message
+ * @cb: callback structure containing arguments
+ *
+ * Return: message length.
+ */
+int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct batadv_hard_iface *primary_if = NULL;
+ int portid = NETLINK_CB(cb->skb).portid;
+ struct batadv_priv *bat_priv;
+ long *bucket = &cb->args[0];
+ long *idx = &cb->args[1];
+ int ret;
+
+ ret = batadv_mcast_netlink_get_primary(cb, &primary_if);
+ if (ret)
+ return ret;
+
+ bat_priv = netdev_priv(primary_if->soft_iface);
+ ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
+
+ batadv_hardif_put(primary_if);
+ return ret;
+}
+
+/**
+ * batadv_mcast_free() - free the multicast optimizations structures
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_mcast_free(struct batadv_priv *bat_priv)
+{
+ cancel_delayed_work_sync(&bat_priv->mcast.work);
+
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
+
+ /* safely calling outside of worker, as worker was canceled above */
+ batadv_mcast_mla_tt_retract(bat_priv, NULL);
+}
+
+/**
+ * batadv_mcast_purge_orig() - reset originator global mcast state modifications
+ * @orig: the originator which is going to get purged
+ */
+void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
+{
+ struct batadv_priv *bat_priv = orig->bat_priv;
+
+ spin_lock_bh(&orig->mcast_handler_lock);
+
+ batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
+ batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
+ batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
+ batadv_mcast_want_rtr4_update(bat_priv, orig,
+ BATADV_MCAST_WANT_NO_RTR4);
+ batadv_mcast_want_rtr6_update(bat_priv, orig,
+ BATADV_MCAST_WANT_NO_RTR6);
+
+ spin_unlock_bh(&orig->mcast_handler_lock);
+}
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
new file mode 100644
index 0000000000..a9770d8d6d
--- /dev/null
+++ b/net/batman-adv/multicast.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Linus Lüssing
+ */
+
+#ifndef _NET_BATMAN_ADV_MULTICAST_H_
+#define _NET_BATMAN_ADV_MULTICAST_H_
+
+#include "main.h"
+
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+
+/**
+ * enum batadv_forw_mode - the way a packet should be forwarded as
+ */
+enum batadv_forw_mode {
+ /**
+ * @BATADV_FORW_BCAST: forward the packet to all nodes via a batman-adv
+ * broadcast packet
+ */
+ BATADV_FORW_BCAST,
+
+ /**
+ * @BATADV_FORW_UCASTS: forward the packet to some nodes via one
+ * or more batman-adv unicast packets
+ */
+ BATADV_FORW_UCASTS,
+
+ /** @BATADV_FORW_NONE: don't forward, drop it */
+ BATADV_FORW_NONE,
+};
+
+#ifdef CONFIG_BATMAN_ADV_MCAST
+
+enum batadv_forw_mode
+batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ int *is_routable);
+
+int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ unsigned short vid, int is_routable);
+
+void batadv_mcast_init(struct batadv_priv *bat_priv);
+
+int batadv_mcast_mesh_info_put(struct sk_buff *msg,
+ struct batadv_priv *bat_priv);
+
+int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb);
+
+void batadv_mcast_free(struct batadv_priv *bat_priv);
+
+void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
+
+#else
+
+static inline enum batadv_forw_mode
+batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ int *is_routable)
+{
+ return BATADV_FORW_BCAST;
+}
+
+static inline int
+batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ unsigned short vid, int is_routable)
+{
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+}
+
+static inline int batadv_mcast_init(struct batadv_priv *bat_priv)
+{
+ return 0;
+}
+
+static inline int
+batadv_mcast_mesh_info_put(struct sk_buff *msg, struct batadv_priv *bat_priv)
+{
+ return 0;
+}
+
+static inline int batadv_mcast_flags_dump(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void batadv_mcast_free(struct batadv_priv *bat_priv)
+{
+}
+
+static inline void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node)
+{
+}
+
+#endif /* CONFIG_BATMAN_ADV_MCAST */
+
+#endif /* _NET_BATMAN_ADV_MULTICAST_H_ */
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
new file mode 100644
index 0000000000..0c64d81a77
--- /dev/null
+++ b/net/batman-adv/netlink.c
@@ -0,0 +1,1521 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Matthias Schiffer
+ */
+
+#include "netlink.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/cache.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/genetlink.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/printk.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <net/genetlink.h>
+#include <net/net_namespace.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "bat_algo.h"
+#include "bridge_loop_avoidance.h"
+#include "distributed-arp-table.h"
+#include "gateway_client.h"
+#include "gateway_common.h"
+#include "hard-interface.h"
+#include "log.h"
+#include "multicast.h"
+#include "network-coding.h"
+#include "originator.h"
+#include "soft-interface.h"
+#include "tp_meter.h"
+#include "translation-table.h"
+
+struct genl_family batadv_netlink_family;
+
+/* multicast groups */
+enum batadv_netlink_multicast_groups {
+ BATADV_NL_MCGRP_CONFIG,
+ BATADV_NL_MCGRP_TPMETER,
+};
+
+/**
+ * enum batadv_genl_ops_flags - flags for genl_ops's internal_flags
+ */
+enum batadv_genl_ops_flags {
+ /**
+ * @BATADV_FLAG_NEED_MESH: request requires valid soft interface in
+ * attribute BATADV_ATTR_MESH_IFINDEX and expects a pointer to it to be
+ * saved in info->user_ptr[0]
+ */
+ BATADV_FLAG_NEED_MESH = BIT(0),
+
+ /**
+ * @BATADV_FLAG_NEED_HARDIF: request requires valid hard interface in
+ * attribute BATADV_ATTR_HARD_IFINDEX and expects a pointer to it to be
+ * saved in info->user_ptr[1]
+ */
+ BATADV_FLAG_NEED_HARDIF = BIT(1),
+
+ /**
+ * @BATADV_FLAG_NEED_VLAN: request requires valid vlan in
+ * attribute BATADV_ATTR_VLANID and expects a pointer to it to be
+ * saved in info->user_ptr[1]
+ */
+ BATADV_FLAG_NEED_VLAN = BIT(2),
+};
+
+static const struct genl_multicast_group batadv_netlink_mcgrps[] = {
+ [BATADV_NL_MCGRP_CONFIG] = { .name = BATADV_NL_MCAST_GROUP_CONFIG },
+ [BATADV_NL_MCGRP_TPMETER] = { .name = BATADV_NL_MCAST_GROUP_TPMETER },
+};
+
+static const struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
+ [BATADV_ATTR_VERSION] = { .type = NLA_STRING },
+ [BATADV_ATTR_ALGO_NAME] = { .type = NLA_STRING },
+ [BATADV_ATTR_MESH_IFINDEX] = { .type = NLA_U32 },
+ [BATADV_ATTR_MESH_IFNAME] = { .type = NLA_STRING },
+ [BATADV_ATTR_MESH_ADDRESS] = { .len = ETH_ALEN },
+ [BATADV_ATTR_HARD_IFINDEX] = { .type = NLA_U32 },
+ [BATADV_ATTR_HARD_IFNAME] = { .type = NLA_STRING },
+ [BATADV_ATTR_HARD_ADDRESS] = { .len = ETH_ALEN },
+ [BATADV_ATTR_ORIG_ADDRESS] = { .len = ETH_ALEN },
+ [BATADV_ATTR_TPMETER_RESULT] = { .type = NLA_U8 },
+ [BATADV_ATTR_TPMETER_TEST_TIME] = { .type = NLA_U32 },
+ [BATADV_ATTR_TPMETER_BYTES] = { .type = NLA_U64 },
+ [BATADV_ATTR_TPMETER_COOKIE] = { .type = NLA_U32 },
+ [BATADV_ATTR_ACTIVE] = { .type = NLA_FLAG },
+ [BATADV_ATTR_TT_ADDRESS] = { .len = ETH_ALEN },
+ [BATADV_ATTR_TT_TTVN] = { .type = NLA_U8 },
+ [BATADV_ATTR_TT_LAST_TTVN] = { .type = NLA_U8 },
+ [BATADV_ATTR_TT_CRC32] = { .type = NLA_U32 },
+ [BATADV_ATTR_TT_VID] = { .type = NLA_U16 },
+ [BATADV_ATTR_TT_FLAGS] = { .type = NLA_U32 },
+ [BATADV_ATTR_FLAG_BEST] = { .type = NLA_FLAG },
+ [BATADV_ATTR_LAST_SEEN_MSECS] = { .type = NLA_U32 },
+ [BATADV_ATTR_NEIGH_ADDRESS] = { .len = ETH_ALEN },
+ [BATADV_ATTR_TQ] = { .type = NLA_U8 },
+ [BATADV_ATTR_THROUGHPUT] = { .type = NLA_U32 },
+ [BATADV_ATTR_BANDWIDTH_UP] = { .type = NLA_U32 },
+ [BATADV_ATTR_BANDWIDTH_DOWN] = { .type = NLA_U32 },
+ [BATADV_ATTR_ROUTER] = { .len = ETH_ALEN },
+ [BATADV_ATTR_BLA_OWN] = { .type = NLA_FLAG },
+ [BATADV_ATTR_BLA_ADDRESS] = { .len = ETH_ALEN },
+ [BATADV_ATTR_BLA_VID] = { .type = NLA_U16 },
+ [BATADV_ATTR_BLA_BACKBONE] = { .len = ETH_ALEN },
+ [BATADV_ATTR_BLA_CRC] = { .type = NLA_U16 },
+ [BATADV_ATTR_DAT_CACHE_IP4ADDRESS] = { .type = NLA_U32 },
+ [BATADV_ATTR_DAT_CACHE_HWADDRESS] = { .len = ETH_ALEN },
+ [BATADV_ATTR_DAT_CACHE_VID] = { .type = NLA_U16 },
+ [BATADV_ATTR_MCAST_FLAGS] = { .type = NLA_U32 },
+ [BATADV_ATTR_MCAST_FLAGS_PRIV] = { .type = NLA_U32 },
+ [BATADV_ATTR_VLANID] = { .type = NLA_U16 },
+ [BATADV_ATTR_AGGREGATED_OGMS_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_AP_ISOLATION_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_ISOLATION_MARK] = { .type = NLA_U32 },
+ [BATADV_ATTR_ISOLATION_MASK] = { .type = NLA_U32 },
+ [BATADV_ATTR_BONDING_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_FRAGMENTATION_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_GW_BANDWIDTH_DOWN] = { .type = NLA_U32 },
+ [BATADV_ATTR_GW_BANDWIDTH_UP] = { .type = NLA_U32 },
+ [BATADV_ATTR_GW_MODE] = { .type = NLA_U8 },
+ [BATADV_ATTR_GW_SEL_CLASS] = { .type = NLA_U32 },
+ [BATADV_ATTR_HOP_PENALTY] = { .type = NLA_U8 },
+ [BATADV_ATTR_LOG_LEVEL] = { .type = NLA_U32 },
+ [BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_MULTICAST_FANOUT] = { .type = NLA_U32 },
+ [BATADV_ATTR_NETWORK_CODING_ENABLED] = { .type = NLA_U8 },
+ [BATADV_ATTR_ORIG_INTERVAL] = { .type = NLA_U32 },
+ [BATADV_ATTR_ELP_INTERVAL] = { .type = NLA_U32 },
+ [BATADV_ATTR_THROUGHPUT_OVERRIDE] = { .type = NLA_U32 },
+};
+
+/**
+ * batadv_netlink_get_ifindex() - Extract an interface index from a message
+ * @nlh: Message header
+ * @attrtype: Attribute which holds an interface index
+ *
+ * Return: interface index, or 0.
+ */
+int
+batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
+{
+ struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
+
+ return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
+}
+
+/**
+ * batadv_netlink_mesh_fill_ap_isolation() - Add ap_isolation softif attribute
+ * @msg: Netlink message to dump into
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_mesh_fill_ap_isolation(struct sk_buff *msg,
+ struct batadv_priv *bat_priv)
+{
+ struct batadv_softif_vlan *vlan;
+ u8 ap_isolation;
+
+ vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+ if (!vlan)
+ return 0;
+
+ ap_isolation = atomic_read(&vlan->ap_isolation);
+ batadv_softif_vlan_put(vlan);
+
+ return nla_put_u8(msg, BATADV_ATTR_AP_ISOLATION_ENABLED,
+ !!ap_isolation);
+}
+
+/**
+ * batadv_netlink_set_mesh_ap_isolation() - Set ap_isolation from genl msg
+ * @attr: parsed BATADV_ATTR_AP_ISOLATION_ENABLED attribute
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_set_mesh_ap_isolation(struct nlattr *attr,
+ struct batadv_priv *bat_priv)
+{
+ struct batadv_softif_vlan *vlan;
+
+ vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+ if (!vlan)
+ return -ENOENT;
+
+ atomic_set(&vlan->ap_isolation, !!nla_get_u8(attr));
+ batadv_softif_vlan_put(vlan);
+
+ return 0;
+}
+
+/**
+ * batadv_netlink_mesh_fill() - Fill message with mesh attributes
+ * @msg: Netlink message to dump into
+ * @bat_priv: the bat priv with all the soft interface information
+ * @cmd: type of message to generate
+ * @portid: Port making netlink request
+ * @seq: sequence number for message
+ * @flags: Additional flags for message
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_mesh_fill(struct sk_buff *msg,
+ struct batadv_priv *bat_priv,
+ enum batadv_nl_commands cmd,
+ u32 portid, u32 seq, int flags)
+{
+ struct net_device *soft_iface = bat_priv->soft_iface;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct net_device *hard_iface;
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, flags, cmd);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put_string(msg, BATADV_ATTR_VERSION, BATADV_SOURCE_VERSION) ||
+ nla_put_string(msg, BATADV_ATTR_ALGO_NAME,
+ bat_priv->algo_ops->name) ||
+ nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX, soft_iface->ifindex) ||
+ nla_put_string(msg, BATADV_ATTR_MESH_IFNAME, soft_iface->name) ||
+ nla_put(msg, BATADV_ATTR_MESH_ADDRESS, ETH_ALEN,
+ soft_iface->dev_addr) ||
+ nla_put_u8(msg, BATADV_ATTR_TT_TTVN,
+ (u8)atomic_read(&bat_priv->tt.vn)))
+ goto nla_put_failure;
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+ if (nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
+ ntohs(bat_priv->bla.claim_dest.group)))
+ goto nla_put_failure;
+#endif
+
+ if (batadv_mcast_mesh_info_put(msg, bat_priv))
+ goto nla_put_failure;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (primary_if && primary_if->if_status == BATADV_IF_ACTIVE) {
+ hard_iface = primary_if->net_dev;
+
+ if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ hard_iface->ifindex) ||
+ nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ hard_iface->name) ||
+ nla_put(msg, BATADV_ATTR_HARD_ADDRESS, ETH_ALEN,
+ hard_iface->dev_addr))
+ goto nla_put_failure;
+ }
+
+ if (nla_put_u8(msg, BATADV_ATTR_AGGREGATED_OGMS_ENABLED,
+ !!atomic_read(&bat_priv->aggregated_ogms)))
+ goto nla_put_failure;
+
+ if (batadv_netlink_mesh_fill_ap_isolation(msg, bat_priv))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_ISOLATION_MARK,
+ bat_priv->isolation_mark))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_ISOLATION_MASK,
+ bat_priv->isolation_mark_mask))
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, BATADV_ATTR_BONDING_ENABLED,
+ !!atomic_read(&bat_priv->bonding)))
+ goto nla_put_failure;
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+ if (nla_put_u8(msg, BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED,
+ !!atomic_read(&bat_priv->bridge_loop_avoidance)))
+ goto nla_put_failure;
+#endif /* CONFIG_BATMAN_ADV_BLA */
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+ if (nla_put_u8(msg, BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED,
+ !!atomic_read(&bat_priv->distributed_arp_table)))
+ goto nla_put_failure;
+#endif /* CONFIG_BATMAN_ADV_DAT */
+
+ if (nla_put_u8(msg, BATADV_ATTR_FRAGMENTATION_ENABLED,
+ !!atomic_read(&bat_priv->fragmentation)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_GW_BANDWIDTH_DOWN,
+ atomic_read(&bat_priv->gw.bandwidth_down)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_GW_BANDWIDTH_UP,
+ atomic_read(&bat_priv->gw.bandwidth_up)))
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, BATADV_ATTR_GW_MODE,
+ atomic_read(&bat_priv->gw.mode)))
+ goto nla_put_failure;
+
+ if (bat_priv->algo_ops->gw.get_best_gw_node &&
+ bat_priv->algo_ops->gw.is_eligible) {
+ /* GW selection class is not available if the routing algorithm
+ * in use does not implement the GW API
+ */
+ if (nla_put_u32(msg, BATADV_ATTR_GW_SEL_CLASS,
+ atomic_read(&bat_priv->gw.sel_class)))
+ goto nla_put_failure;
+ }
+
+ if (nla_put_u8(msg, BATADV_ATTR_HOP_PENALTY,
+ atomic_read(&bat_priv->hop_penalty)))
+ goto nla_put_failure;
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+ if (nla_put_u32(msg, BATADV_ATTR_LOG_LEVEL,
+ atomic_read(&bat_priv->log_level)))
+ goto nla_put_failure;
+#endif /* CONFIG_BATMAN_ADV_DEBUG */
+
+#ifdef CONFIG_BATMAN_ADV_MCAST
+ if (nla_put_u8(msg, BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED,
+ !atomic_read(&bat_priv->multicast_mode)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_MULTICAST_FANOUT,
+ atomic_read(&bat_priv->multicast_fanout)))
+ goto nla_put_failure;
+#endif /* CONFIG_BATMAN_ADV_MCAST */
+
+#ifdef CONFIG_BATMAN_ADV_NC
+ if (nla_put_u8(msg, BATADV_ATTR_NETWORK_CODING_ENABLED,
+ !!atomic_read(&bat_priv->network_coding)))
+ goto nla_put_failure;
+#endif /* CONFIG_BATMAN_ADV_NC */
+
+ if (nla_put_u32(msg, BATADV_ATTR_ORIG_INTERVAL,
+ atomic_read(&bat_priv->orig_interval)))
+ goto nla_put_failure;
+
+ batadv_hardif_put(primary_if);
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ batadv_hardif_put(primary_if);
+
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_netlink_notify_mesh() - send softif attributes to listener
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success, < 0 on error
+ */
+static int batadv_netlink_notify_mesh(struct batadv_priv *bat_priv)
+{
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = batadv_netlink_mesh_fill(msg, bat_priv, BATADV_CMD_SET_MESH,
+ 0, 0, 0);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ genlmsg_multicast_netns(&batadv_netlink_family,
+ dev_net(bat_priv->soft_iface), msg, 0,
+ BATADV_NL_MCGRP_CONFIG, GFP_KERNEL);
+
+ return 0;
+}
+
+/**
+ * batadv_netlink_get_mesh() - Get softif attributes
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_get_mesh(struct sk_buff *skb, struct genl_info *info)
+{
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = batadv_netlink_mesh_fill(msg, bat_priv, BATADV_CMD_GET_MESH,
+ info->snd_portid, info->snd_seq, 0);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ ret = genlmsg_reply(msg, info);
+
+ return ret;
+}
+
+/**
+ * batadv_netlink_set_mesh() - Set softif attributes
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info)
+{
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+ struct nlattr *attr;
+
+ if (info->attrs[BATADV_ATTR_AGGREGATED_OGMS_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_AGGREGATED_OGMS_ENABLED];
+
+ atomic_set(&bat_priv->aggregated_ogms, !!nla_get_u8(attr));
+ }
+
+ if (info->attrs[BATADV_ATTR_AP_ISOLATION_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_AP_ISOLATION_ENABLED];
+
+ batadv_netlink_set_mesh_ap_isolation(attr, bat_priv);
+ }
+
+ if (info->attrs[BATADV_ATTR_ISOLATION_MARK]) {
+ attr = info->attrs[BATADV_ATTR_ISOLATION_MARK];
+
+ bat_priv->isolation_mark = nla_get_u32(attr);
+ }
+
+ if (info->attrs[BATADV_ATTR_ISOLATION_MASK]) {
+ attr = info->attrs[BATADV_ATTR_ISOLATION_MASK];
+
+ bat_priv->isolation_mark_mask = nla_get_u32(attr);
+ }
+
+ if (info->attrs[BATADV_ATTR_BONDING_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_BONDING_ENABLED];
+
+ atomic_set(&bat_priv->bonding, !!nla_get_u8(attr));
+ }
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+ if (info->attrs[BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED];
+
+ atomic_set(&bat_priv->bridge_loop_avoidance,
+ !!nla_get_u8(attr));
+ batadv_bla_status_update(bat_priv->soft_iface);
+ }
+#endif /* CONFIG_BATMAN_ADV_BLA */
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+ if (info->attrs[BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED];
+
+ atomic_set(&bat_priv->distributed_arp_table,
+ !!nla_get_u8(attr));
+ batadv_dat_status_update(bat_priv->soft_iface);
+ }
+#endif /* CONFIG_BATMAN_ADV_DAT */
+
+ if (info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED];
+
+ atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr));
+
+ rtnl_lock();
+ batadv_update_min_mtu(bat_priv->soft_iface);
+ rtnl_unlock();
+ }
+
+ if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN]) {
+ attr = info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN];
+
+ atomic_set(&bat_priv->gw.bandwidth_down, nla_get_u32(attr));
+ batadv_gw_tvlv_container_update(bat_priv);
+ }
+
+ if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_UP]) {
+ attr = info->attrs[BATADV_ATTR_GW_BANDWIDTH_UP];
+
+ atomic_set(&bat_priv->gw.bandwidth_up, nla_get_u32(attr));
+ batadv_gw_tvlv_container_update(bat_priv);
+ }
+
+ if (info->attrs[BATADV_ATTR_GW_MODE]) {
+ u8 gw_mode;
+
+ attr = info->attrs[BATADV_ATTR_GW_MODE];
+ gw_mode = nla_get_u8(attr);
+
+ if (gw_mode <= BATADV_GW_MODE_SERVER) {
+ /* Invoking batadv_gw_reselect() is not enough to really
+ * de-select the current GW. It will only instruct the
+ * gateway client code to perform a re-election the next
+ * time that this is needed.
+ *
+ * When gw client mode is being switched off the current
+ * GW must be de-selected explicitly otherwise no GW_ADD
+ * uevent is thrown on client mode re-activation. This
+ * is operation is performed in
+ * batadv_gw_check_client_stop().
+ */
+ batadv_gw_reselect(bat_priv);
+
+ /* always call batadv_gw_check_client_stop() before
+ * changing the gateway state
+ */
+ batadv_gw_check_client_stop(bat_priv);
+ atomic_set(&bat_priv->gw.mode, gw_mode);
+ batadv_gw_tvlv_container_update(bat_priv);
+ }
+ }
+
+ if (info->attrs[BATADV_ATTR_GW_SEL_CLASS] &&
+ bat_priv->algo_ops->gw.get_best_gw_node &&
+ bat_priv->algo_ops->gw.is_eligible) {
+ /* setting the GW selection class is allowed only if the routing
+ * algorithm in use implements the GW API
+ */
+
+ u32 sel_class_max = bat_priv->algo_ops->gw.sel_class_max;
+ u32 sel_class;
+
+ attr = info->attrs[BATADV_ATTR_GW_SEL_CLASS];
+ sel_class = nla_get_u32(attr);
+
+ if (sel_class >= 1 && sel_class <= sel_class_max) {
+ atomic_set(&bat_priv->gw.sel_class, sel_class);
+ batadv_gw_reselect(bat_priv);
+ }
+ }
+
+ if (info->attrs[BATADV_ATTR_HOP_PENALTY]) {
+ attr = info->attrs[BATADV_ATTR_HOP_PENALTY];
+
+ atomic_set(&bat_priv->hop_penalty, nla_get_u8(attr));
+ }
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+ if (info->attrs[BATADV_ATTR_LOG_LEVEL]) {
+ attr = info->attrs[BATADV_ATTR_LOG_LEVEL];
+
+ atomic_set(&bat_priv->log_level,
+ nla_get_u32(attr) & BATADV_DBG_ALL);
+ }
+#endif /* CONFIG_BATMAN_ADV_DEBUG */
+
+#ifdef CONFIG_BATMAN_ADV_MCAST
+ if (info->attrs[BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED];
+
+ atomic_set(&bat_priv->multicast_mode, !nla_get_u8(attr));
+ }
+
+ if (info->attrs[BATADV_ATTR_MULTICAST_FANOUT]) {
+ attr = info->attrs[BATADV_ATTR_MULTICAST_FANOUT];
+
+ atomic_set(&bat_priv->multicast_fanout, nla_get_u32(attr));
+ }
+#endif /* CONFIG_BATMAN_ADV_MCAST */
+
+#ifdef CONFIG_BATMAN_ADV_NC
+ if (info->attrs[BATADV_ATTR_NETWORK_CODING_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_NETWORK_CODING_ENABLED];
+
+ atomic_set(&bat_priv->network_coding, !!nla_get_u8(attr));
+ batadv_nc_status_update(bat_priv->soft_iface);
+ }
+#endif /* CONFIG_BATMAN_ADV_NC */
+
+ if (info->attrs[BATADV_ATTR_ORIG_INTERVAL]) {
+ u32 orig_interval;
+
+ attr = info->attrs[BATADV_ATTR_ORIG_INTERVAL];
+ orig_interval = nla_get_u32(attr);
+
+ orig_interval = min_t(u32, orig_interval, INT_MAX);
+ orig_interval = max_t(u32, orig_interval, 2 * BATADV_JITTER);
+
+ atomic_set(&bat_priv->orig_interval, orig_interval);
+ }
+
+ batadv_netlink_notify_mesh(bat_priv);
+
+ return 0;
+}
+
+/**
+ * batadv_netlink_tp_meter_put() - Fill information of started tp_meter session
+ * @msg: netlink message to be sent back
+ * @cookie: tp meter session cookie
+ *
+ * Return: 0 on success, < 0 on error
+ */
+static int
+batadv_netlink_tp_meter_put(struct sk_buff *msg, u32 cookie)
+{
+ if (nla_put_u32(msg, BATADV_ATTR_TPMETER_COOKIE, cookie))
+ return -ENOBUFS;
+
+ return 0;
+}
+
+/**
+ * batadv_netlink_tpmeter_notify() - send tp_meter result via netlink to client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @dst: destination of tp_meter session
+ * @result: reason for tp meter session stop
+ * @test_time: total time of the tp_meter session
+ * @total_bytes: bytes acked to the receiver
+ * @cookie: cookie of tp_meter session
+ *
+ * Return: 0 on success, < 0 on error
+ */
+int batadv_netlink_tpmeter_notify(struct batadv_priv *bat_priv, const u8 *dst,
+ u8 result, u32 test_time, u64 total_bytes,
+ u32 cookie)
+{
+ struct sk_buff *msg;
+ void *hdr;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &batadv_netlink_family, 0,
+ BATADV_CMD_TP_METER);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto err_genlmsg;
+ }
+
+ if (nla_put_u32(msg, BATADV_ATTR_TPMETER_COOKIE, cookie))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_TPMETER_TEST_TIME, test_time))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, BATADV_ATTR_TPMETER_BYTES, total_bytes,
+ BATADV_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, BATADV_ATTR_TPMETER_RESULT, result))
+ goto nla_put_failure;
+
+ if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, dst))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast_netns(&batadv_netlink_family,
+ dev_net(bat_priv->soft_iface), msg, 0,
+ BATADV_NL_MCGRP_TPMETER, GFP_KERNEL);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ ret = -EMSGSIZE;
+
+err_genlmsg:
+ nlmsg_free(msg);
+ return ret;
+}
+
+/**
+ * batadv_netlink_tp_meter_start() - Start a new tp_meter session
+ * @skb: received netlink message
+ * @info: receiver information
+ *
+ * Return: 0 on success, < 0 on error
+ */
+static int
+batadv_netlink_tp_meter_start(struct sk_buff *skb, struct genl_info *info)
+{
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+ struct sk_buff *msg = NULL;
+ u32 test_length;
+ void *msg_head;
+ u32 cookie;
+ u8 *dst;
+ int ret;
+
+ if (!info->attrs[BATADV_ATTR_ORIG_ADDRESS])
+ return -EINVAL;
+
+ if (!info->attrs[BATADV_ATTR_TPMETER_TEST_TIME])
+ return -EINVAL;
+
+ dst = nla_data(info->attrs[BATADV_ATTR_ORIG_ADDRESS]);
+
+ test_length = nla_get_u32(info->attrs[BATADV_ATTR_TPMETER_TEST_TIME]);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ msg_head = genlmsg_put(msg, info->snd_portid, info->snd_seq,
+ &batadv_netlink_family, 0,
+ BATADV_CMD_TP_METER);
+ if (!msg_head) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ batadv_tp_start(bat_priv, dst, test_length, &cookie);
+
+ ret = batadv_netlink_tp_meter_put(msg, cookie);
+
+ out:
+ if (ret) {
+ if (msg)
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ genlmsg_end(msg, msg_head);
+ return genlmsg_reply(msg, info);
+}
+
+/**
+ * batadv_netlink_tp_meter_cancel() - Cancel a running tp_meter session
+ * @skb: received netlink message
+ * @info: receiver information
+ *
+ * Return: 0 on success, < 0 on error
+ */
+static int
+batadv_netlink_tp_meter_cancel(struct sk_buff *skb, struct genl_info *info)
+{
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+ u8 *dst;
+ int ret = 0;
+
+ if (!info->attrs[BATADV_ATTR_ORIG_ADDRESS])
+ return -EINVAL;
+
+ dst = nla_data(info->attrs[BATADV_ATTR_ORIG_ADDRESS]);
+
+ batadv_tp_stop(bat_priv, dst, BATADV_TP_REASON_CANCEL);
+
+ return ret;
+}
+
+/**
+ * batadv_netlink_hardif_fill() - Fill message with hardif attributes
+ * @msg: Netlink message to dump into
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hard_iface: hard interface which was modified
+ * @cmd: type of message to generate
+ * @portid: Port making netlink request
+ * @seq: sequence number for message
+ * @flags: Additional flags for message
+ * @cb: Control block containing additional options
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_hardif_fill(struct sk_buff *msg,
+ struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *hard_iface,
+ enum batadv_nl_commands cmd,
+ u32 portid, u32 seq, int flags,
+ struct netlink_callback *cb)
+{
+ struct net_device *net_dev = hard_iface->net_dev;
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, flags, cmd);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (cb)
+ genl_dump_check_consistent(cb, hdr);
+
+ if (nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX,
+ bat_priv->soft_iface->ifindex))
+ goto nla_put_failure;
+
+ if (nla_put_string(msg, BATADV_ATTR_MESH_IFNAME,
+ bat_priv->soft_iface->name))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+ net_dev->ifindex) ||
+ nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
+ net_dev->name) ||
+ nla_put(msg, BATADV_ATTR_HARD_ADDRESS, ETH_ALEN,
+ net_dev->dev_addr))
+ goto nla_put_failure;
+
+ if (hard_iface->if_status == BATADV_IF_ACTIVE) {
+ if (nla_put_flag(msg, BATADV_ATTR_ACTIVE))
+ goto nla_put_failure;
+ }
+
+ if (nla_put_u8(msg, BATADV_ATTR_HOP_PENALTY,
+ atomic_read(&hard_iface->hop_penalty)))
+ goto nla_put_failure;
+
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ if (nla_put_u32(msg, BATADV_ATTR_ELP_INTERVAL,
+ atomic_read(&hard_iface->bat_v.elp_interval)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_THROUGHPUT_OVERRIDE,
+ atomic_read(&hard_iface->bat_v.throughput_override)))
+ goto nla_put_failure;
+#endif /* CONFIG_BATMAN_ADV_BATMAN_V */
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_netlink_notify_hardif() - send hardif attributes to listener
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hard_iface: hard interface which was modified
+ *
+ * Return: 0 on success, < 0 on error
+ */
+static int batadv_netlink_notify_hardif(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *hard_iface)
+{
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = batadv_netlink_hardif_fill(msg, bat_priv, hard_iface,
+ BATADV_CMD_SET_HARDIF, 0, 0, 0, NULL);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ genlmsg_multicast_netns(&batadv_netlink_family,
+ dev_net(bat_priv->soft_iface), msg, 0,
+ BATADV_NL_MCGRP_CONFIG, GFP_KERNEL);
+
+ return 0;
+}
+
+/**
+ * batadv_netlink_get_hardif() - Get hardif attributes
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_get_hardif(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct batadv_hard_iface *hard_iface = info->user_ptr[1];
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = batadv_netlink_hardif_fill(msg, bat_priv, hard_iface,
+ BATADV_CMD_GET_HARDIF,
+ info->snd_portid, info->snd_seq, 0,
+ NULL);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ ret = genlmsg_reply(msg, info);
+
+ return ret;
+}
+
+/**
+ * batadv_netlink_set_hardif() - Set hardif attributes
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_set_hardif(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct batadv_hard_iface *hard_iface = info->user_ptr[1];
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+ struct nlattr *attr;
+
+ if (info->attrs[BATADV_ATTR_HOP_PENALTY]) {
+ attr = info->attrs[BATADV_ATTR_HOP_PENALTY];
+
+ atomic_set(&hard_iface->hop_penalty, nla_get_u8(attr));
+ }
+
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+
+ if (info->attrs[BATADV_ATTR_ELP_INTERVAL]) {
+ attr = info->attrs[BATADV_ATTR_ELP_INTERVAL];
+
+ atomic_set(&hard_iface->bat_v.elp_interval, nla_get_u32(attr));
+ }
+
+ if (info->attrs[BATADV_ATTR_THROUGHPUT_OVERRIDE]) {
+ attr = info->attrs[BATADV_ATTR_THROUGHPUT_OVERRIDE];
+
+ atomic_set(&hard_iface->bat_v.throughput_override,
+ nla_get_u32(attr));
+ }
+#endif /* CONFIG_BATMAN_ADV_BATMAN_V */
+
+ batadv_netlink_notify_hardif(bat_priv, hard_iface);
+
+ return 0;
+}
+
+/**
+ * batadv_netlink_dump_hardif() - Dump all hard interface into a messages
+ * @msg: Netlink message to dump into
+ * @cb: Parameters from query
+ *
+ * Return: error code, or length of reply message on success
+ */
+static int
+batadv_netlink_dump_hardif(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_priv *bat_priv;
+ int ifindex;
+ int portid = NETLINK_CB(cb->skb).portid;
+ int skip = cb->args[0];
+ int i = 0;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface)
+ return -ENODEV;
+
+ if (!batadv_softif_is_valid(soft_iface)) {
+ dev_put(soft_iface);
+ return -ENODEV;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+
+ rtnl_lock();
+ cb->seq = batadv_hardif_generation << 1 | 1;
+
+ list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface != soft_iface)
+ continue;
+
+ if (i++ < skip)
+ continue;
+
+ if (batadv_netlink_hardif_fill(msg, bat_priv, hard_iface,
+ BATADV_CMD_GET_HARDIF,
+ portid, cb->nlh->nlmsg_seq,
+ NLM_F_MULTI, cb)) {
+ i--;
+ break;
+ }
+ }
+
+ rtnl_unlock();
+
+ dev_put(soft_iface);
+
+ cb->args[0] = i;
+
+ return msg->len;
+}
+
+/**
+ * batadv_netlink_vlan_fill() - Fill message with vlan attributes
+ * @msg: Netlink message to dump into
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vlan: vlan which was modified
+ * @cmd: type of message to generate
+ * @portid: Port making netlink request
+ * @seq: sequence number for message
+ * @flags: Additional flags for message
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_vlan_fill(struct sk_buff *msg,
+ struct batadv_priv *bat_priv,
+ struct batadv_softif_vlan *vlan,
+ enum batadv_nl_commands cmd,
+ u32 portid, u32 seq, int flags)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, flags, cmd);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX,
+ bat_priv->soft_iface->ifindex))
+ goto nla_put_failure;
+
+ if (nla_put_string(msg, BATADV_ATTR_MESH_IFNAME,
+ bat_priv->soft_iface->name))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, BATADV_ATTR_VLANID, vlan->vid & VLAN_VID_MASK))
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, BATADV_ATTR_AP_ISOLATION_ENABLED,
+ !!atomic_read(&vlan->ap_isolation)))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_netlink_notify_vlan() - send vlan attributes to listener
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vlan: vlan which was modified
+ *
+ * Return: 0 on success, < 0 on error
+ */
+static int batadv_netlink_notify_vlan(struct batadv_priv *bat_priv,
+ struct batadv_softif_vlan *vlan)
+{
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = batadv_netlink_vlan_fill(msg, bat_priv, vlan,
+ BATADV_CMD_SET_VLAN, 0, 0, 0);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ genlmsg_multicast_netns(&batadv_netlink_family,
+ dev_net(bat_priv->soft_iface), msg, 0,
+ BATADV_NL_MCGRP_CONFIG, GFP_KERNEL);
+
+ return 0;
+}
+
+/**
+ * batadv_netlink_get_vlan() - Get vlan attributes
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_get_vlan(struct sk_buff *skb, struct genl_info *info)
+{
+ struct batadv_softif_vlan *vlan = info->user_ptr[1];
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = batadv_netlink_vlan_fill(msg, bat_priv, vlan, BATADV_CMD_GET_VLAN,
+ info->snd_portid, info->snd_seq, 0);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ ret = genlmsg_reply(msg, info);
+
+ return ret;
+}
+
+/**
+ * batadv_netlink_set_vlan() - Get vlan attributes
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_netlink_set_vlan(struct sk_buff *skb, struct genl_info *info)
+{
+ struct batadv_softif_vlan *vlan = info->user_ptr[1];
+ struct batadv_priv *bat_priv = info->user_ptr[0];
+ struct nlattr *attr;
+
+ if (info->attrs[BATADV_ATTR_AP_ISOLATION_ENABLED]) {
+ attr = info->attrs[BATADV_ATTR_AP_ISOLATION_ENABLED];
+
+ atomic_set(&vlan->ap_isolation, !!nla_get_u8(attr));
+ }
+
+ batadv_netlink_notify_vlan(bat_priv, vlan);
+
+ return 0;
+}
+
+/**
+ * batadv_get_softif_from_info() - Retrieve soft interface from genl attributes
+ * @net: the applicable net namespace
+ * @info: receiver information
+ *
+ * Return: Pointer to soft interface (with increased refcnt) on success, error
+ * pointer on error
+ */
+static struct net_device *
+batadv_get_softif_from_info(struct net *net, struct genl_info *info)
+{
+ struct net_device *soft_iface;
+ int ifindex;
+
+ if (!info->attrs[BATADV_ATTR_MESH_IFINDEX])
+ return ERR_PTR(-EINVAL);
+
+ ifindex = nla_get_u32(info->attrs[BATADV_ATTR_MESH_IFINDEX]);
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface)
+ return ERR_PTR(-ENODEV);
+
+ if (!batadv_softif_is_valid(soft_iface))
+ goto err_put_softif;
+
+ return soft_iface;
+
+err_put_softif:
+ dev_put(soft_iface);
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * batadv_get_hardif_from_info() - Retrieve hardif from genl attributes
+ * @bat_priv: the bat priv with all the soft interface information
+ * @net: the applicable net namespace
+ * @info: receiver information
+ *
+ * Return: Pointer to hard interface (with increased refcnt) on success, error
+ * pointer on error
+ */
+static struct batadv_hard_iface *
+batadv_get_hardif_from_info(struct batadv_priv *bat_priv, struct net *net,
+ struct genl_info *info)
+{
+ struct batadv_hard_iface *hard_iface;
+ struct net_device *hard_dev;
+ unsigned int hardif_index;
+
+ if (!info->attrs[BATADV_ATTR_HARD_IFINDEX])
+ return ERR_PTR(-EINVAL);
+
+ hardif_index = nla_get_u32(info->attrs[BATADV_ATTR_HARD_IFINDEX]);
+
+ hard_dev = dev_get_by_index(net, hardif_index);
+ if (!hard_dev)
+ return ERR_PTR(-ENODEV);
+
+ hard_iface = batadv_hardif_get_by_netdev(hard_dev);
+ if (!hard_iface)
+ goto err_put_harddev;
+
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ goto err_put_hardif;
+
+ /* hard_dev is referenced by hard_iface and not needed here */
+ dev_put(hard_dev);
+
+ return hard_iface;
+
+err_put_hardif:
+ batadv_hardif_put(hard_iface);
+err_put_harddev:
+ dev_put(hard_dev);
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * batadv_get_vlan_from_info() - Retrieve vlan from genl attributes
+ * @bat_priv: the bat priv with all the soft interface information
+ * @net: the applicable net namespace
+ * @info: receiver information
+ *
+ * Return: Pointer to vlan on success (with increased refcnt), error pointer
+ * on error
+ */
+static struct batadv_softif_vlan *
+batadv_get_vlan_from_info(struct batadv_priv *bat_priv, struct net *net,
+ struct genl_info *info)
+{
+ struct batadv_softif_vlan *vlan;
+ u16 vid;
+
+ if (!info->attrs[BATADV_ATTR_VLANID])
+ return ERR_PTR(-EINVAL);
+
+ vid = nla_get_u16(info->attrs[BATADV_ATTR_VLANID]);
+
+ vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
+ if (!vlan)
+ return ERR_PTR(-ENOENT);
+
+ return vlan;
+}
+
+/**
+ * batadv_pre_doit() - Prepare batman-adv genl doit request
+ * @ops: requested netlink operation
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+static int batadv_pre_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_priv *bat_priv = NULL;
+ struct batadv_softif_vlan *vlan;
+ struct net_device *soft_iface;
+ u8 user_ptr1_flags;
+ u8 mesh_dep_flags;
+ int ret;
+
+ user_ptr1_flags = BATADV_FLAG_NEED_HARDIF | BATADV_FLAG_NEED_VLAN;
+ if (WARN_ON(hweight8(ops->internal_flags & user_ptr1_flags) > 1))
+ return -EINVAL;
+
+ mesh_dep_flags = BATADV_FLAG_NEED_HARDIF | BATADV_FLAG_NEED_VLAN;
+ if (WARN_ON((ops->internal_flags & mesh_dep_flags) &&
+ (~ops->internal_flags & BATADV_FLAG_NEED_MESH)))
+ return -EINVAL;
+
+ if (ops->internal_flags & BATADV_FLAG_NEED_MESH) {
+ soft_iface = batadv_get_softif_from_info(net, info);
+ if (IS_ERR(soft_iface))
+ return PTR_ERR(soft_iface);
+
+ bat_priv = netdev_priv(soft_iface);
+ info->user_ptr[0] = bat_priv;
+ }
+
+ if (ops->internal_flags & BATADV_FLAG_NEED_HARDIF) {
+ hard_iface = batadv_get_hardif_from_info(bat_priv, net, info);
+ if (IS_ERR(hard_iface)) {
+ ret = PTR_ERR(hard_iface);
+ goto err_put_softif;
+ }
+
+ info->user_ptr[1] = hard_iface;
+ }
+
+ if (ops->internal_flags & BATADV_FLAG_NEED_VLAN) {
+ vlan = batadv_get_vlan_from_info(bat_priv, net, info);
+ if (IS_ERR(vlan)) {
+ ret = PTR_ERR(vlan);
+ goto err_put_softif;
+ }
+
+ info->user_ptr[1] = vlan;
+ }
+
+ return 0;
+
+err_put_softif:
+ if (bat_priv)
+ dev_put(bat_priv->soft_iface);
+
+ return ret;
+}
+
+/**
+ * batadv_post_doit() - End batman-adv genl doit request
+ * @ops: requested netlink operation
+ * @skb: Netlink message with request data
+ * @info: receiver information
+ */
+static void batadv_post_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_softif_vlan *vlan;
+ struct batadv_priv *bat_priv;
+
+ if (ops->internal_flags & BATADV_FLAG_NEED_HARDIF &&
+ info->user_ptr[1]) {
+ hard_iface = info->user_ptr[1];
+
+ batadv_hardif_put(hard_iface);
+ }
+
+ if (ops->internal_flags & BATADV_FLAG_NEED_VLAN && info->user_ptr[1]) {
+ vlan = info->user_ptr[1];
+ batadv_softif_vlan_put(vlan);
+ }
+
+ if (ops->internal_flags & BATADV_FLAG_NEED_MESH && info->user_ptr[0]) {
+ bat_priv = info->user_ptr[0];
+ dev_put(bat_priv->soft_iface);
+ }
+}
+
+static const struct genl_small_ops batadv_netlink_ops[] = {
+ {
+ .cmd = BATADV_CMD_GET_MESH,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ /* can be retrieved by unprivileged users */
+ .doit = batadv_netlink_get_mesh,
+ .internal_flags = BATADV_FLAG_NEED_MESH,
+ },
+ {
+ .cmd = BATADV_CMD_TP_METER,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = batadv_netlink_tp_meter_start,
+ .internal_flags = BATADV_FLAG_NEED_MESH,
+ },
+ {
+ .cmd = BATADV_CMD_TP_METER_CANCEL,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = batadv_netlink_tp_meter_cancel,
+ .internal_flags = BATADV_FLAG_NEED_MESH,
+ },
+ {
+ .cmd = BATADV_CMD_GET_ROUTING_ALGOS,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .dumpit = batadv_algo_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_HARDIF,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ /* can be retrieved by unprivileged users */
+ .dumpit = batadv_netlink_dump_hardif,
+ .doit = batadv_netlink_get_hardif,
+ .internal_flags = BATADV_FLAG_NEED_MESH |
+ BATADV_FLAG_NEED_HARDIF,
+ },
+ {
+ .cmd = BATADV_CMD_GET_TRANSTABLE_LOCAL,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .dumpit = batadv_tt_local_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_TRANSTABLE_GLOBAL,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .dumpit = batadv_tt_global_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_ORIGINATORS,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .dumpit = batadv_orig_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_NEIGHBORS,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .dumpit = batadv_hardif_neigh_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_GATEWAYS,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .dumpit = batadv_gw_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_BLA_CLAIM,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .dumpit = batadv_bla_claim_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_BLA_BACKBONE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .dumpit = batadv_bla_backbone_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_DAT_CACHE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .dumpit = batadv_dat_cache_dump,
+ },
+ {
+ .cmd = BATADV_CMD_GET_MCAST_FLAGS,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .dumpit = batadv_mcast_flags_dump,
+ },
+ {
+ .cmd = BATADV_CMD_SET_MESH,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = batadv_netlink_set_mesh,
+ .internal_flags = BATADV_FLAG_NEED_MESH,
+ },
+ {
+ .cmd = BATADV_CMD_SET_HARDIF,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = batadv_netlink_set_hardif,
+ .internal_flags = BATADV_FLAG_NEED_MESH |
+ BATADV_FLAG_NEED_HARDIF,
+ },
+ {
+ .cmd = BATADV_CMD_GET_VLAN,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ /* can be retrieved by unprivileged users */
+ .doit = batadv_netlink_get_vlan,
+ .internal_flags = BATADV_FLAG_NEED_MESH |
+ BATADV_FLAG_NEED_VLAN,
+ },
+ {
+ .cmd = BATADV_CMD_SET_VLAN,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = batadv_netlink_set_vlan,
+ .internal_flags = BATADV_FLAG_NEED_MESH |
+ BATADV_FLAG_NEED_VLAN,
+ },
+};
+
+struct genl_family batadv_netlink_family __ro_after_init = {
+ .hdrsize = 0,
+ .name = BATADV_NL_NAME,
+ .version = 1,
+ .maxattr = BATADV_ATTR_MAX,
+ .policy = batadv_netlink_policy,
+ .netnsok = true,
+ .pre_doit = batadv_pre_doit,
+ .post_doit = batadv_post_doit,
+ .module = THIS_MODULE,
+ .small_ops = batadv_netlink_ops,
+ .n_small_ops = ARRAY_SIZE(batadv_netlink_ops),
+ .resv_start_op = BATADV_CMD_SET_VLAN + 1,
+ .mcgrps = batadv_netlink_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(batadv_netlink_mcgrps),
+};
+
+/**
+ * batadv_netlink_register() - register batadv genl netlink family
+ */
+void __init batadv_netlink_register(void)
+{
+ int ret;
+
+ ret = genl_register_family(&batadv_netlink_family);
+ if (ret)
+ pr_warn("unable to register netlink family");
+}
+
+/**
+ * batadv_netlink_unregister() - unregister batadv genl netlink family
+ */
+void batadv_netlink_unregister(void)
+{
+ genl_unregister_family(&batadv_netlink_family);
+}
diff --git a/net/batman-adv/netlink.h b/net/batman-adv/netlink.h
new file mode 100644
index 0000000000..876d2806a6
--- /dev/null
+++ b/net/batman-adv/netlink.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Matthias Schiffer
+ */
+
+#ifndef _NET_BATMAN_ADV_NETLINK_H_
+#define _NET_BATMAN_ADV_NETLINK_H_
+
+#include "main.h"
+
+#include <linux/netlink.h>
+#include <linux/types.h>
+#include <net/genetlink.h>
+
+void batadv_netlink_register(void);
+void batadv_netlink_unregister(void);
+int batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype);
+
+int batadv_netlink_tpmeter_notify(struct batadv_priv *bat_priv, const u8 *dst,
+ u8 result, u32 test_time, u64 total_bytes,
+ u32 cookie);
+
+extern struct genl_family batadv_netlink_family;
+
+#endif /* _NET_BATMAN_ADV_NETLINK_H_ */
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
new file mode 100644
index 0000000000..71ebd0284f
--- /dev/null
+++ b/net/batman-adv/network-coding.c
@@ -0,0 +1,1873 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Martin Hundebøll, Jeppe Ledet-Pedersen
+ */
+
+#include "network-coding.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/byteorder/generic.h>
+#include <linux/compiler.h>
+#include <linux/container_of.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/init.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
+
+#include "hash.h"
+#include "log.h"
+#include "originator.h"
+#include "routing.h"
+#include "send.h"
+#include "tvlv.h"
+
+static struct lock_class_key batadv_nc_coding_hash_lock_class_key;
+static struct lock_class_key batadv_nc_decoding_hash_lock_class_key;
+
+static void batadv_nc_worker(struct work_struct *work);
+static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+
+/**
+ * batadv_nc_init() - one-time initialization for network coding
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int __init batadv_nc_init(void)
+{
+ /* Register our packet type */
+ return batadv_recv_handler_register(BATADV_CODED,
+ batadv_nc_recv_coded_packet);
+}
+
+/**
+ * batadv_nc_start_timer() - initialise the nc periodic worker
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
+{
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->nc.work,
+ msecs_to_jiffies(10));
+}
+
+/**
+ * batadv_nc_tvlv_container_update() - update the network coding tvlv container
+ * after network coding setting change
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+ char nc_mode;
+
+ nc_mode = atomic_read(&bat_priv->network_coding);
+
+ switch (nc_mode) {
+ case 0:
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
+ break;
+ case 1:
+ batadv_tvlv_container_register(bat_priv, BATADV_TVLV_NC, 1,
+ NULL, 0);
+ break;
+ }
+}
+
+/**
+ * batadv_nc_status_update() - update the network coding tvlv container after
+ * network coding setting change
+ * @net_dev: the soft interface net device
+ */
+void batadv_nc_status_update(struct net_device *net_dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+
+ batadv_nc_tvlv_container_update(bat_priv);
+}
+
+/**
+ * batadv_nc_tvlv_ogm_handler_v1() - process incoming nc tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ u8 flags,
+ void *tvlv_value, u16 tvlv_value_len)
+{
+ if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
+ clear_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
+ else
+ set_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
+}
+
+/**
+ * batadv_nc_mesh_init() - initialise coding hash table and start housekeeping
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
+{
+ bat_priv->nc.timestamp_fwd_flush = jiffies;
+ bat_priv->nc.timestamp_sniffed_purge = jiffies;
+
+ if (bat_priv->nc.coding_hash || bat_priv->nc.decoding_hash)
+ return 0;
+
+ bat_priv->nc.coding_hash = batadv_hash_new(128);
+ if (!bat_priv->nc.coding_hash)
+ goto err;
+
+ batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
+ &batadv_nc_coding_hash_lock_class_key);
+
+ bat_priv->nc.decoding_hash = batadv_hash_new(128);
+ if (!bat_priv->nc.decoding_hash) {
+ batadv_hash_destroy(bat_priv->nc.coding_hash);
+ goto err;
+ }
+
+ batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
+ &batadv_nc_decoding_hash_lock_class_key);
+
+ INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
+ batadv_nc_start_timer(bat_priv);
+
+ batadv_tvlv_handler_register(bat_priv, batadv_nc_tvlv_ogm_handler_v1,
+ NULL, NULL, BATADV_TVLV_NC, 1,
+ BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+ batadv_nc_tvlv_container_update(bat_priv);
+ return 0;
+
+err:
+ return -ENOMEM;
+}
+
+/**
+ * batadv_nc_init_bat_priv() - initialise the nc specific bat_priv variables
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
+{
+ atomic_set(&bat_priv->network_coding, 0);
+ bat_priv->nc.min_tq = 200;
+ bat_priv->nc.max_fwd_delay = 10;
+ bat_priv->nc.max_buffer_time = 200;
+}
+
+/**
+ * batadv_nc_init_orig() - initialise the nc fields of an orig_node
+ * @orig_node: the orig_node which is going to be initialised
+ */
+void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
+{
+ INIT_LIST_HEAD(&orig_node->in_coding_list);
+ INIT_LIST_HEAD(&orig_node->out_coding_list);
+ spin_lock_init(&orig_node->in_coding_list_lock);
+ spin_lock_init(&orig_node->out_coding_list_lock);
+}
+
+/**
+ * batadv_nc_node_release() - release nc_node from lists and queue for free
+ * after rcu grace period
+ * @ref: kref pointer of the nc_node
+ */
+static void batadv_nc_node_release(struct kref *ref)
+{
+ struct batadv_nc_node *nc_node;
+
+ nc_node = container_of(ref, struct batadv_nc_node, refcount);
+
+ batadv_orig_node_put(nc_node->orig_node);
+ kfree_rcu(nc_node, rcu);
+}
+
+/**
+ * batadv_nc_node_put() - decrement the nc_node refcounter and possibly
+ * release it
+ * @nc_node: nc_node to be free'd
+ */
+static void batadv_nc_node_put(struct batadv_nc_node *nc_node)
+{
+ if (!nc_node)
+ return;
+
+ kref_put(&nc_node->refcount, batadv_nc_node_release);
+}
+
+/**
+ * batadv_nc_path_release() - release nc_path from lists and queue for free
+ * after rcu grace period
+ * @ref: kref pointer of the nc_path
+ */
+static void batadv_nc_path_release(struct kref *ref)
+{
+ struct batadv_nc_path *nc_path;
+
+ nc_path = container_of(ref, struct batadv_nc_path, refcount);
+
+ kfree_rcu(nc_path, rcu);
+}
+
+/**
+ * batadv_nc_path_put() - decrement the nc_path refcounter and possibly
+ * release it
+ * @nc_path: nc_path to be free'd
+ */
+static void batadv_nc_path_put(struct batadv_nc_path *nc_path)
+{
+ if (!nc_path)
+ return;
+
+ kref_put(&nc_path->refcount, batadv_nc_path_release);
+}
+
+/**
+ * batadv_nc_packet_free() - frees nc packet
+ * @nc_packet: the nc packet to free
+ * @dropped: whether the packet is freed because is dropped
+ */
+static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet,
+ bool dropped)
+{
+ if (dropped)
+ kfree_skb(nc_packet->skb);
+ else
+ consume_skb(nc_packet->skb);
+
+ batadv_nc_path_put(nc_packet->nc_path);
+ kfree(nc_packet);
+}
+
+/**
+ * batadv_nc_to_purge_nc_node() - checks whether an nc node has to be purged
+ * @bat_priv: the bat priv with all the soft interface information
+ * @nc_node: the nc node to check
+ *
+ * Return: true if the entry has to be purged now, false otherwise
+ */
+static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
+ struct batadv_nc_node *nc_node)
+{
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
+ return true;
+
+ return batadv_has_timed_out(nc_node->last_seen, BATADV_NC_NODE_TIMEOUT);
+}
+
+/**
+ * batadv_nc_to_purge_nc_path_coding() - checks whether an nc path has timed out
+ * @bat_priv: the bat priv with all the soft interface information
+ * @nc_path: the nc path to check
+ *
+ * Return: true if the entry has to be purged now, false otherwise
+ */
+static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
+ struct batadv_nc_path *nc_path)
+{
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
+ return true;
+
+ /* purge the path when no packets has been added for 10 times the
+ * max_fwd_delay time
+ */
+ return batadv_has_timed_out(nc_path->last_valid,
+ bat_priv->nc.max_fwd_delay * 10);
+}
+
+/**
+ * batadv_nc_to_purge_nc_path_decoding() - checks whether an nc path has timed
+ * out
+ * @bat_priv: the bat priv with all the soft interface information
+ * @nc_path: the nc path to check
+ *
+ * Return: true if the entry has to be purged now, false otherwise
+ */
+static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
+ struct batadv_nc_path *nc_path)
+{
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
+ return true;
+
+ /* purge the path when no packets has been added for 10 times the
+ * max_buffer time
+ */
+ return batadv_has_timed_out(nc_path->last_valid,
+ bat_priv->nc.max_buffer_time * 10);
+}
+
+/**
+ * batadv_nc_purge_orig_nc_nodes() - go through list of nc nodes and purge stale
+ * entries
+ * @bat_priv: the bat priv with all the soft interface information
+ * @list: list of nc nodes
+ * @lock: nc node list lock
+ * @to_purge: function in charge to decide whether an entry has to be purged or
+ * not. This function takes the nc node as argument and has to return
+ * a boolean value: true if the entry has to be deleted, false
+ * otherwise
+ */
+static void
+batadv_nc_purge_orig_nc_nodes(struct batadv_priv *bat_priv,
+ struct list_head *list,
+ spinlock_t *lock,
+ bool (*to_purge)(struct batadv_priv *,
+ struct batadv_nc_node *))
+{
+ struct batadv_nc_node *nc_node, *nc_node_tmp;
+
+ /* For each nc_node in list */
+ spin_lock_bh(lock);
+ list_for_each_entry_safe(nc_node, nc_node_tmp, list, list) {
+ /* if an helper function has been passed as parameter,
+ * ask it if the entry has to be purged or not
+ */
+ if (to_purge && !to_purge(bat_priv, nc_node))
+ continue;
+
+ batadv_dbg(BATADV_DBG_NC, bat_priv,
+ "Removing nc_node %pM -> %pM\n",
+ nc_node->addr, nc_node->orig_node->orig);
+ list_del_rcu(&nc_node->list);
+ batadv_nc_node_put(nc_node);
+ }
+ spin_unlock_bh(lock);
+}
+
+/**
+ * batadv_nc_purge_orig() - purges all nc node data attached of the given
+ * originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig_node with the nc node entries to be purged
+ * @to_purge: function in charge to decide whether an entry has to be purged or
+ * not. This function takes the nc node as argument and has to return
+ * a boolean value: true is the entry has to be deleted, false
+ * otherwise
+ */
+void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ bool (*to_purge)(struct batadv_priv *,
+ struct batadv_nc_node *))
+{
+ /* Check ingoing nc_node's of this orig_node */
+ batadv_nc_purge_orig_nc_nodes(bat_priv, &orig_node->in_coding_list,
+ &orig_node->in_coding_list_lock,
+ to_purge);
+
+ /* Check outgoing nc_node's of this orig_node */
+ batadv_nc_purge_orig_nc_nodes(bat_priv, &orig_node->out_coding_list,
+ &orig_node->out_coding_list_lock,
+ to_purge);
+}
+
+/**
+ * batadv_nc_purge_orig_hash() - traverse entire originator hash to check if
+ * they have timed out nc nodes
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
+{
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ struct hlist_head *head;
+ struct batadv_orig_node *orig_node;
+ u32 i;
+
+ if (!hash)
+ return;
+
+ /* For each orig_node */
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry)
+ batadv_nc_purge_orig(bat_priv, orig_node,
+ batadv_nc_to_purge_nc_node);
+ rcu_read_unlock();
+ }
+}
+
+/**
+ * batadv_nc_purge_paths() - traverse all nc paths part of the hash and remove
+ * unused ones
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hash: hash table containing the nc paths to check
+ * @to_purge: function in charge to decide whether an entry has to be purged or
+ * not. This function takes the nc node as argument and has to return
+ * a boolean value: true is the entry has to be deleted, false
+ * otherwise
+ */
+static void batadv_nc_purge_paths(struct batadv_priv *bat_priv,
+ struct batadv_hashtable *hash,
+ bool (*to_purge)(struct batadv_priv *,
+ struct batadv_nc_path *))
+{
+ struct hlist_head *head;
+ struct hlist_node *node_tmp;
+ struct batadv_nc_path *nc_path;
+ spinlock_t *lock; /* Protects lists in hash */
+ u32 i;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+ lock = &hash->list_locks[i];
+
+ /* For each nc_path in this bin */
+ spin_lock_bh(lock);
+ hlist_for_each_entry_safe(nc_path, node_tmp, head, hash_entry) {
+ /* if an helper function has been passed as parameter,
+ * ask it if the entry has to be purged or not
+ */
+ if (to_purge && !to_purge(bat_priv, nc_path))
+ continue;
+
+ /* purging an non-empty nc_path should never happen, but
+ * is observed under high CPU load. Delay the purging
+ * until next iteration to allow the packet_list to be
+ * emptied first.
+ */
+ if (!unlikely(list_empty(&nc_path->packet_list))) {
+ net_ratelimited_function(printk,
+ KERN_WARNING
+ "Skipping free of non-empty nc_path (%pM -> %pM)!\n",
+ nc_path->prev_hop,
+ nc_path->next_hop);
+ continue;
+ }
+
+ /* nc_path is unused, so remove it */
+ batadv_dbg(BATADV_DBG_NC, bat_priv,
+ "Remove nc_path %pM -> %pM\n",
+ nc_path->prev_hop, nc_path->next_hop);
+ hlist_del_rcu(&nc_path->hash_entry);
+ batadv_nc_path_put(nc_path);
+ }
+ spin_unlock_bh(lock);
+ }
+}
+
+/**
+ * batadv_nc_hash_key_gen() - computes the nc_path hash key
+ * @key: buffer to hold the final hash key
+ * @src: source ethernet mac address going into the hash key
+ * @dst: destination ethernet mac address going into the hash key
+ */
+static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src,
+ const char *dst)
+{
+ memcpy(key->prev_hop, src, sizeof(key->prev_hop));
+ memcpy(key->next_hop, dst, sizeof(key->next_hop));
+}
+
+/**
+ * batadv_nc_hash_choose() - compute the hash value for an nc path
+ * @data: data to hash
+ * @size: size of the hash table
+ *
+ * Return: the selected index in the hash table for the given data.
+ */
+static u32 batadv_nc_hash_choose(const void *data, u32 size)
+{
+ const struct batadv_nc_path *nc_path = data;
+ u32 hash = 0;
+
+ hash = jhash(&nc_path->prev_hop, sizeof(nc_path->prev_hop), hash);
+ hash = jhash(&nc_path->next_hop, sizeof(nc_path->next_hop), hash);
+
+ return hash % size;
+}
+
+/**
+ * batadv_nc_hash_compare() - comparing function used in the network coding hash
+ * tables
+ * @node: node in the local table
+ * @data2: second object to compare the node to
+ *
+ * Return: true if the two entry are the same, false otherwise
+ */
+static bool batadv_nc_hash_compare(const struct hlist_node *node,
+ const void *data2)
+{
+ const struct batadv_nc_path *nc_path1, *nc_path2;
+
+ nc_path1 = container_of(node, struct batadv_nc_path, hash_entry);
+ nc_path2 = data2;
+
+ /* Return 1 if the two keys are identical */
+ if (!batadv_compare_eth(nc_path1->prev_hop, nc_path2->prev_hop))
+ return false;
+
+ if (!batadv_compare_eth(nc_path1->next_hop, nc_path2->next_hop))
+ return false;
+
+ return true;
+}
+
+/**
+ * batadv_nc_hash_find() - search for an existing nc path and return it
+ * @hash: hash table containing the nc path
+ * @data: search key
+ *
+ * Return: the nc_path if found, NULL otherwise.
+ */
+static struct batadv_nc_path *
+batadv_nc_hash_find(struct batadv_hashtable *hash,
+ void *data)
+{
+ struct hlist_head *head;
+ struct batadv_nc_path *nc_path, *nc_path_tmp = NULL;
+ int index;
+
+ if (!hash)
+ return NULL;
+
+ index = batadv_nc_hash_choose(data, hash->size);
+ head = &hash->table[index];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(nc_path, head, hash_entry) {
+ if (!batadv_nc_hash_compare(&nc_path->hash_entry, data))
+ continue;
+
+ if (!kref_get_unless_zero(&nc_path->refcount))
+ continue;
+
+ nc_path_tmp = nc_path;
+ break;
+ }
+ rcu_read_unlock();
+
+ return nc_path_tmp;
+}
+
+/**
+ * batadv_nc_send_packet() - send non-coded packet and free nc_packet struct
+ * @nc_packet: the nc packet to send
+ */
+static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
+{
+ batadv_send_unicast_skb(nc_packet->skb, nc_packet->neigh_node);
+ nc_packet->skb = NULL;
+ batadv_nc_packet_free(nc_packet, false);
+}
+
+/**
+ * batadv_nc_sniffed_purge() - Checks timestamp of given sniffed nc_packet.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @nc_path: the nc path the packet belongs to
+ * @nc_packet: the nc packet to be checked
+ *
+ * Checks whether the given sniffed (overheard) nc_packet has hit its buffering
+ * timeout. If so, the packet is no longer kept and the entry deleted from the
+ * queue. Has to be called with the appropriate locks.
+ *
+ * Return: false as soon as the entry in the fifo queue has not been timed out
+ * yet and true otherwise.
+ */
+static bool batadv_nc_sniffed_purge(struct batadv_priv *bat_priv,
+ struct batadv_nc_path *nc_path,
+ struct batadv_nc_packet *nc_packet)
+{
+ unsigned long timeout = bat_priv->nc.max_buffer_time;
+ bool res = false;
+
+ lockdep_assert_held(&nc_path->packet_list_lock);
+
+ /* Packets are added to tail, so the remaining packets did not time
+ * out and we can stop processing the current queue
+ */
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE &&
+ !batadv_has_timed_out(nc_packet->timestamp, timeout))
+ goto out;
+
+ /* purge nc packet */
+ list_del(&nc_packet->list);
+ batadv_nc_packet_free(nc_packet, true);
+
+ res = true;
+
+out:
+ return res;
+}
+
+/**
+ * batadv_nc_fwd_flush() - Checks the timestamp of the given nc packet.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @nc_path: the nc path the packet belongs to
+ * @nc_packet: the nc packet to be checked
+ *
+ * Checks whether the given nc packet has hit its forward timeout. If so, the
+ * packet is no longer delayed, immediately sent and the entry deleted from the
+ * queue. Has to be called with the appropriate locks.
+ *
+ * Return: false as soon as the entry in the fifo queue has not been timed out
+ * yet and true otherwise.
+ */
+static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv,
+ struct batadv_nc_path *nc_path,
+ struct batadv_nc_packet *nc_packet)
+{
+ unsigned long timeout = bat_priv->nc.max_fwd_delay;
+
+ lockdep_assert_held(&nc_path->packet_list_lock);
+
+ /* Packets are added to tail, so the remaining packets did not time
+ * out and we can stop processing the current queue
+ */
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE &&
+ !batadv_has_timed_out(nc_packet->timestamp, timeout))
+ return false;
+
+ /* Send packet */
+ batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
+ batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
+ nc_packet->skb->len + ETH_HLEN);
+ list_del(&nc_packet->list);
+ batadv_nc_send_packet(nc_packet);
+
+ return true;
+}
+
+/**
+ * batadv_nc_process_nc_paths() - traverse given nc packet pool and free timed
+ * out nc packets
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hash: to be processed hash table
+ * @process_fn: Function called to process given nc packet. Should return true
+ * to encourage this function to proceed with the next packet.
+ * Otherwise the rest of the current queue is skipped.
+ */
+static void
+batadv_nc_process_nc_paths(struct batadv_priv *bat_priv,
+ struct batadv_hashtable *hash,
+ bool (*process_fn)(struct batadv_priv *,
+ struct batadv_nc_path *,
+ struct batadv_nc_packet *))
+{
+ struct hlist_head *head;
+ struct batadv_nc_packet *nc_packet, *nc_packet_tmp;
+ struct batadv_nc_path *nc_path;
+ bool ret;
+ int i;
+
+ if (!hash)
+ return;
+
+ /* Loop hash table bins */
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ /* Loop coding paths */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(nc_path, head, hash_entry) {
+ /* Loop packets */
+ spin_lock_bh(&nc_path->packet_list_lock);
+ list_for_each_entry_safe(nc_packet, nc_packet_tmp,
+ &nc_path->packet_list, list) {
+ ret = process_fn(bat_priv, nc_path, nc_packet);
+ if (!ret)
+ break;
+ }
+ spin_unlock_bh(&nc_path->packet_list_lock);
+ }
+ rcu_read_unlock();
+ }
+}
+
+/**
+ * batadv_nc_worker() - periodic task for housekeeping related to network
+ * coding
+ * @work: kernel work struct
+ */
+static void batadv_nc_worker(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct batadv_priv_nc *priv_nc;
+ struct batadv_priv *bat_priv;
+ unsigned long timeout;
+
+ delayed_work = to_delayed_work(work);
+ priv_nc = container_of(delayed_work, struct batadv_priv_nc, work);
+ bat_priv = container_of(priv_nc, struct batadv_priv, nc);
+
+ batadv_nc_purge_orig_hash(bat_priv);
+ batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash,
+ batadv_nc_to_purge_nc_path_coding);
+ batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash,
+ batadv_nc_to_purge_nc_path_decoding);
+
+ timeout = bat_priv->nc.max_fwd_delay;
+
+ if (batadv_has_timed_out(bat_priv->nc.timestamp_fwd_flush, timeout)) {
+ batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.coding_hash,
+ batadv_nc_fwd_flush);
+ bat_priv->nc.timestamp_fwd_flush = jiffies;
+ }
+
+ if (batadv_has_timed_out(bat_priv->nc.timestamp_sniffed_purge,
+ bat_priv->nc.max_buffer_time)) {
+ batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.decoding_hash,
+ batadv_nc_sniffed_purge);
+ bat_priv->nc.timestamp_sniffed_purge = jiffies;
+ }
+
+ /* Schedule a new check */
+ batadv_nc_start_timer(bat_priv);
+}
+
+/**
+ * batadv_can_nc_with_orig() - checks whether the given orig node is suitable
+ * for coding or not
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: neighboring orig node which may be used as nc candidate
+ * @ogm_packet: incoming ogm packet also used for the checks
+ *
+ * Return: true if:
+ * 1) The OGM must have the most recent sequence number.
+ * 2) The TTL must be decremented by one and only one.
+ * 3) The OGM must be received from the first hop from orig_node.
+ * 4) The TQ value of the OGM must be above bat_priv->nc.min_tq.
+ */
+static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_ogm_packet *ogm_packet)
+{
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ u32 last_real_seqno;
+ u8 last_ttl;
+
+ orig_ifinfo = batadv_orig_ifinfo_get(orig_node, BATADV_IF_DEFAULT);
+ if (!orig_ifinfo)
+ return false;
+
+ last_ttl = orig_ifinfo->last_ttl;
+ last_real_seqno = orig_ifinfo->last_real_seqno;
+ batadv_orig_ifinfo_put(orig_ifinfo);
+
+ if (last_real_seqno != ntohl(ogm_packet->seqno))
+ return false;
+ if (last_ttl != ogm_packet->ttl + 1)
+ return false;
+ if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender))
+ return false;
+ if (ogm_packet->tq < bat_priv->nc.min_tq)
+ return false;
+
+ return true;
+}
+
+/**
+ * batadv_nc_find_nc_node() - search for an existing nc node and return it
+ * @orig_node: orig node originating the ogm packet
+ * @orig_neigh_node: neighboring orig node from which we received the ogm packet
+ * (can be equal to orig_node)
+ * @in_coding: traverse incoming or outgoing network coding list
+ *
+ * Return: the nc_node if found, NULL otherwise.
+ */
+static struct batadv_nc_node *
+batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh_node,
+ bool in_coding)
+{
+ struct batadv_nc_node *nc_node, *nc_node_out = NULL;
+ struct list_head *list;
+
+ if (in_coding)
+ list = &orig_neigh_node->in_coding_list;
+ else
+ list = &orig_neigh_node->out_coding_list;
+
+ /* Traverse list of nc_nodes to orig_node */
+ rcu_read_lock();
+ list_for_each_entry_rcu(nc_node, list, list) {
+ if (!batadv_compare_eth(nc_node->addr, orig_node->orig))
+ continue;
+
+ if (!kref_get_unless_zero(&nc_node->refcount))
+ continue;
+
+ /* Found a match */
+ nc_node_out = nc_node;
+ break;
+ }
+ rcu_read_unlock();
+
+ return nc_node_out;
+}
+
+/**
+ * batadv_nc_get_nc_node() - retrieves an nc node or creates the entry if it was
+ * not found
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node originating the ogm packet
+ * @orig_neigh_node: neighboring orig node from which we received the ogm packet
+ * (can be equal to orig_node)
+ * @in_coding: traverse incoming or outgoing network coding list
+ *
+ * Return: the nc_node if found or created, NULL in case of an error.
+ */
+static struct batadv_nc_node *
+batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh_node,
+ bool in_coding)
+{
+ struct batadv_nc_node *nc_node;
+ spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
+ struct list_head *list;
+
+ /* Select ingoing or outgoing coding node */
+ if (in_coding) {
+ lock = &orig_neigh_node->in_coding_list_lock;
+ list = &orig_neigh_node->in_coding_list;
+ } else {
+ lock = &orig_neigh_node->out_coding_list_lock;
+ list = &orig_neigh_node->out_coding_list;
+ }
+
+ spin_lock_bh(lock);
+
+ /* Check if nc_node is already added */
+ nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
+
+ /* Node found */
+ if (nc_node)
+ goto unlock;
+
+ nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
+ if (!nc_node)
+ goto unlock;
+
+ /* Initialize nc_node */
+ INIT_LIST_HEAD(&nc_node->list);
+ kref_init(&nc_node->refcount);
+ ether_addr_copy(nc_node->addr, orig_node->orig);
+ kref_get(&orig_neigh_node->refcount);
+ nc_node->orig_node = orig_neigh_node;
+
+ batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
+ nc_node->addr, nc_node->orig_node->orig);
+
+ /* Add nc_node to orig_node */
+ kref_get(&nc_node->refcount);
+ list_add_tail_rcu(&nc_node->list, list);
+
+unlock:
+ spin_unlock_bh(lock);
+
+ return nc_node;
+}
+
+/**
+ * batadv_nc_update_nc_node() - updates stored incoming and outgoing nc node
+ * structs (best called on incoming OGMs)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node originating the ogm packet
+ * @orig_neigh_node: neighboring orig node from which we received the ogm packet
+ * (can be equal to orig_node)
+ * @ogm_packet: incoming ogm packet
+ * @is_single_hop_neigh: orig_node is a single hop neighbor
+ */
+void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh_node,
+ struct batadv_ogm_packet *ogm_packet,
+ int is_single_hop_neigh)
+{
+ struct batadv_nc_node *in_nc_node = NULL;
+ struct batadv_nc_node *out_nc_node = NULL;
+
+ /* Check if network coding is enabled */
+ if (!atomic_read(&bat_priv->network_coding))
+ goto out;
+
+ /* check if orig node is network coding enabled */
+ if (!test_bit(BATADV_ORIG_CAPA_HAS_NC, &orig_node->capabilities))
+ goto out;
+
+ /* accept ogms from 'good' neighbors and single hop neighbors */
+ if (!batadv_can_nc_with_orig(bat_priv, orig_node, ogm_packet) &&
+ !is_single_hop_neigh)
+ goto out;
+
+ /* Add orig_node as in_nc_node on hop */
+ in_nc_node = batadv_nc_get_nc_node(bat_priv, orig_node,
+ orig_neigh_node, true);
+ if (!in_nc_node)
+ goto out;
+
+ in_nc_node->last_seen = jiffies;
+
+ /* Add hop as out_nc_node on orig_node */
+ out_nc_node = batadv_nc_get_nc_node(bat_priv, orig_neigh_node,
+ orig_node, false);
+ if (!out_nc_node)
+ goto out;
+
+ out_nc_node->last_seen = jiffies;
+
+out:
+ batadv_nc_node_put(in_nc_node);
+ batadv_nc_node_put(out_nc_node);
+}
+
+/**
+ * batadv_nc_get_path() - get existing nc_path or allocate a new one
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hash: hash table containing the nc path
+ * @src: ethernet source address - first half of the nc path search key
+ * @dst: ethernet destination address - second half of the nc path search key
+ *
+ * Return: pointer to nc_path if the path was found or created, returns NULL
+ * on error.
+ */
+static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
+ struct batadv_hashtable *hash,
+ u8 *src,
+ u8 *dst)
+{
+ int hash_added;
+ struct batadv_nc_path *nc_path, nc_path_key;
+
+ batadv_nc_hash_key_gen(&nc_path_key, src, dst);
+
+ /* Search for existing nc_path */
+ nc_path = batadv_nc_hash_find(hash, (void *)&nc_path_key);
+
+ if (nc_path) {
+ /* Set timestamp to delay removal of nc_path */
+ nc_path->last_valid = jiffies;
+ return nc_path;
+ }
+
+ /* No existing nc_path was found; create a new */
+ nc_path = kzalloc(sizeof(*nc_path), GFP_ATOMIC);
+
+ if (!nc_path)
+ return NULL;
+
+ /* Initialize nc_path */
+ INIT_LIST_HEAD(&nc_path->packet_list);
+ spin_lock_init(&nc_path->packet_list_lock);
+ kref_init(&nc_path->refcount);
+ nc_path->last_valid = jiffies;
+ ether_addr_copy(nc_path->next_hop, dst);
+ ether_addr_copy(nc_path->prev_hop, src);
+
+ batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_path %pM -> %pM\n",
+ nc_path->prev_hop,
+ nc_path->next_hop);
+
+ /* Add nc_path to hash table */
+ kref_get(&nc_path->refcount);
+ hash_added = batadv_hash_add(hash, batadv_nc_hash_compare,
+ batadv_nc_hash_choose, &nc_path_key,
+ &nc_path->hash_entry);
+
+ if (hash_added < 0) {
+ kfree(nc_path);
+ return NULL;
+ }
+
+ return nc_path;
+}
+
+/**
+ * batadv_nc_random_weight_tq() - scale the receivers TQ-value to avoid unfair
+ * selection of a receiver with slightly lower TQ than the other
+ * @tq: to be weighted tq value
+ *
+ * Return: scaled tq value
+ */
+static u8 batadv_nc_random_weight_tq(u8 tq)
+{
+ /* randomize the estimated packet loss (max TQ - estimated TQ) */
+ u8 rand_tq = get_random_u32_below(BATADV_TQ_MAX_VALUE + 1 - tq);
+
+ /* convert to (randomized) estimated tq again */
+ return BATADV_TQ_MAX_VALUE - rand_tq;
+}
+
+/**
+ * batadv_nc_memxor() - XOR destination with source
+ * @dst: byte array to XOR into
+ * @src: byte array to XOR from
+ * @len: length of destination array
+ */
+static void batadv_nc_memxor(char *dst, const char *src, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; i < len; ++i)
+ dst[i] ^= src[i];
+}
+
+/**
+ * batadv_nc_code_packets() - code a received unicast_packet with an nc packet
+ * into a coded_packet and send it
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: data skb to forward
+ * @ethhdr: pointer to the ethernet header inside the skb
+ * @nc_packet: structure containing the packet to the skb can be coded with
+ * @neigh_node: next hop to forward packet to
+ *
+ * Return: true if both packets are consumed, false otherwise.
+ */
+static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ struct ethhdr *ethhdr,
+ struct batadv_nc_packet *nc_packet,
+ struct batadv_neigh_node *neigh_node)
+{
+ u8 tq_weighted_neigh, tq_weighted_coding, tq_tmp;
+ struct sk_buff *skb_dest, *skb_src;
+ struct batadv_unicast_packet *packet1;
+ struct batadv_unicast_packet *packet2;
+ struct batadv_coded_packet *coded_packet;
+ struct batadv_neigh_node *neigh_tmp, *router_neigh, *first_dest;
+ struct batadv_neigh_node *router_coding = NULL, *second_dest;
+ struct batadv_neigh_ifinfo *router_neigh_ifinfo = NULL;
+ struct batadv_neigh_ifinfo *router_coding_ifinfo = NULL;
+ u8 *first_source, *second_source;
+ __be32 packet_id1, packet_id2;
+ size_t count;
+ bool res = false;
+ int coding_len;
+ int unicast_size = sizeof(*packet1);
+ int coded_size = sizeof(*coded_packet);
+ int header_add = coded_size - unicast_size;
+
+ /* TODO: do we need to consider the outgoing interface for
+ * coded packets?
+ */
+ router_neigh = batadv_orig_router_get(neigh_node->orig_node,
+ BATADV_IF_DEFAULT);
+ if (!router_neigh)
+ goto out;
+
+ router_neigh_ifinfo = batadv_neigh_ifinfo_get(router_neigh,
+ BATADV_IF_DEFAULT);
+ if (!router_neigh_ifinfo)
+ goto out;
+
+ neigh_tmp = nc_packet->neigh_node;
+ router_coding = batadv_orig_router_get(neigh_tmp->orig_node,
+ BATADV_IF_DEFAULT);
+ if (!router_coding)
+ goto out;
+
+ router_coding_ifinfo = batadv_neigh_ifinfo_get(router_coding,
+ BATADV_IF_DEFAULT);
+ if (!router_coding_ifinfo)
+ goto out;
+
+ tq_tmp = router_neigh_ifinfo->bat_iv.tq_avg;
+ tq_weighted_neigh = batadv_nc_random_weight_tq(tq_tmp);
+ tq_tmp = router_coding_ifinfo->bat_iv.tq_avg;
+ tq_weighted_coding = batadv_nc_random_weight_tq(tq_tmp);
+
+ /* Select one destination for the MAC-header dst-field based on
+ * weighted TQ-values.
+ */
+ if (tq_weighted_neigh >= tq_weighted_coding) {
+ /* Destination from nc_packet is selected for MAC-header */
+ first_dest = nc_packet->neigh_node;
+ first_source = nc_packet->nc_path->prev_hop;
+ second_dest = neigh_node;
+ second_source = ethhdr->h_source;
+ packet1 = (struct batadv_unicast_packet *)nc_packet->skb->data;
+ packet2 = (struct batadv_unicast_packet *)skb->data;
+ packet_id1 = nc_packet->packet_id;
+ packet_id2 = batadv_skb_crc32(skb,
+ skb->data + sizeof(*packet2));
+ } else {
+ /* Destination for skb is selected for MAC-header */
+ first_dest = neigh_node;
+ first_source = ethhdr->h_source;
+ second_dest = nc_packet->neigh_node;
+ second_source = nc_packet->nc_path->prev_hop;
+ packet1 = (struct batadv_unicast_packet *)skb->data;
+ packet2 = (struct batadv_unicast_packet *)nc_packet->skb->data;
+ packet_id1 = batadv_skb_crc32(skb,
+ skb->data + sizeof(*packet1));
+ packet_id2 = nc_packet->packet_id;
+ }
+
+ /* Instead of zero padding the smallest data buffer, we
+ * code into the largest.
+ */
+ if (skb->len <= nc_packet->skb->len) {
+ skb_dest = nc_packet->skb;
+ skb_src = skb;
+ } else {
+ skb_dest = skb;
+ skb_src = nc_packet->skb;
+ }
+
+ /* coding_len is used when decoding the packet shorter packet */
+ coding_len = skb_src->len - unicast_size;
+
+ if (skb_linearize(skb_dest) < 0 || skb_linearize(skb_src) < 0)
+ goto out;
+
+ skb_push(skb_dest, header_add);
+
+ coded_packet = (struct batadv_coded_packet *)skb_dest->data;
+ skb_reset_mac_header(skb_dest);
+
+ coded_packet->packet_type = BATADV_CODED;
+ coded_packet->version = BATADV_COMPAT_VERSION;
+ coded_packet->ttl = packet1->ttl;
+
+ /* Info about first unicast packet */
+ ether_addr_copy(coded_packet->first_source, first_source);
+ ether_addr_copy(coded_packet->first_orig_dest, packet1->dest);
+ coded_packet->first_crc = packet_id1;
+ coded_packet->first_ttvn = packet1->ttvn;
+
+ /* Info about second unicast packet */
+ ether_addr_copy(coded_packet->second_dest, second_dest->addr);
+ ether_addr_copy(coded_packet->second_source, second_source);
+ ether_addr_copy(coded_packet->second_orig_dest, packet2->dest);
+ coded_packet->second_crc = packet_id2;
+ coded_packet->second_ttl = packet2->ttl;
+ coded_packet->second_ttvn = packet2->ttvn;
+ coded_packet->coded_len = htons(coding_len);
+
+ /* This is where the magic happens: Code skb_src into skb_dest */
+ batadv_nc_memxor(skb_dest->data + coded_size,
+ skb_src->data + unicast_size, coding_len);
+
+ /* Update counters accordingly */
+ if (BATADV_SKB_CB(skb_src)->decoded &&
+ BATADV_SKB_CB(skb_dest)->decoded) {
+ /* Both packets are recoded */
+ count = skb_src->len + ETH_HLEN;
+ count += skb_dest->len + ETH_HLEN;
+ batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE, 2);
+ batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES, count);
+ } else if (!BATADV_SKB_CB(skb_src)->decoded &&
+ !BATADV_SKB_CB(skb_dest)->decoded) {
+ /* Both packets are newly coded */
+ count = skb_src->len + ETH_HLEN;
+ count += skb_dest->len + ETH_HLEN;
+ batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE, 2);
+ batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES, count);
+ } else if (BATADV_SKB_CB(skb_src)->decoded &&
+ !BATADV_SKB_CB(skb_dest)->decoded) {
+ /* skb_src recoded and skb_dest is newly coded */
+ batadv_inc_counter(bat_priv, BATADV_CNT_NC_RECODE);
+ batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES,
+ skb_src->len + ETH_HLEN);
+ batadv_inc_counter(bat_priv, BATADV_CNT_NC_CODE);
+ batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES,
+ skb_dest->len + ETH_HLEN);
+ } else if (!BATADV_SKB_CB(skb_src)->decoded &&
+ BATADV_SKB_CB(skb_dest)->decoded) {
+ /* skb_src is newly coded and skb_dest is recoded */
+ batadv_inc_counter(bat_priv, BATADV_CNT_NC_CODE);
+ batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES,
+ skb_src->len + ETH_HLEN);
+ batadv_inc_counter(bat_priv, BATADV_CNT_NC_RECODE);
+ batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES,
+ skb_dest->len + ETH_HLEN);
+ }
+
+ /* skb_src is now coded into skb_dest, so free it */
+ consume_skb(skb_src);
+
+ /* avoid duplicate free of skb from nc_packet */
+ nc_packet->skb = NULL;
+ batadv_nc_packet_free(nc_packet, false);
+
+ /* Send the coded packet and return true */
+ batadv_send_unicast_skb(skb_dest, first_dest);
+ res = true;
+out:
+ batadv_neigh_node_put(router_neigh);
+ batadv_neigh_node_put(router_coding);
+ batadv_neigh_ifinfo_put(router_neigh_ifinfo);
+ batadv_neigh_ifinfo_put(router_coding_ifinfo);
+ return res;
+}
+
+/**
+ * batadv_nc_skb_coding_possible() - true if a decoded skb is available at dst.
+ * @skb: data skb to forward
+ * @dst: destination mac address of the other skb to code with
+ * @src: source mac address of skb
+ *
+ * Whenever we network code a packet we have to check whether we received it in
+ * a network coded form. If so, we may not be able to use it for coding because
+ * some neighbors may also have received (overheard) the packet in the network
+ * coded form without being able to decode it. It is hard to know which of the
+ * neighboring nodes was able to decode the packet, therefore we can only
+ * re-code the packet if the source of the previous encoded packet is involved.
+ * Since the source encoded the packet we can be certain it has all necessary
+ * decode information.
+ *
+ * Return: true if coding of a decoded packet is allowed.
+ */
+static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
+{
+ if (BATADV_SKB_CB(skb)->decoded && !batadv_compare_eth(dst, src))
+ return false;
+ return true;
+}
+
+/**
+ * batadv_nc_path_search() - Find the coding path matching in_nc_node and
+ * out_nc_node to retrieve a buffered packet that can be used for coding.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @in_nc_node: pointer to skb next hop's neighbor nc node
+ * @out_nc_node: pointer to skb source's neighbor nc node
+ * @skb: data skb to forward
+ * @eth_dst: next hop mac address of skb
+ *
+ * Return: true if coding of a decoded skb is allowed.
+ */
+static struct batadv_nc_packet *
+batadv_nc_path_search(struct batadv_priv *bat_priv,
+ struct batadv_nc_node *in_nc_node,
+ struct batadv_nc_node *out_nc_node,
+ struct sk_buff *skb,
+ u8 *eth_dst)
+{
+ struct batadv_nc_path *nc_path, nc_path_key;
+ struct batadv_nc_packet *nc_packet_out = NULL;
+ struct batadv_nc_packet *nc_packet, *nc_packet_tmp;
+ struct batadv_hashtable *hash = bat_priv->nc.coding_hash;
+ int idx;
+
+ if (!hash)
+ return NULL;
+
+ /* Create almost path key */
+ batadv_nc_hash_key_gen(&nc_path_key, in_nc_node->addr,
+ out_nc_node->addr);
+ idx = batadv_nc_hash_choose(&nc_path_key, hash->size);
+
+ /* Check for coding opportunities in this nc_path */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(nc_path, &hash->table[idx], hash_entry) {
+ if (!batadv_compare_eth(nc_path->prev_hop, in_nc_node->addr))
+ continue;
+
+ if (!batadv_compare_eth(nc_path->next_hop, out_nc_node->addr))
+ continue;
+
+ spin_lock_bh(&nc_path->packet_list_lock);
+ if (list_empty(&nc_path->packet_list)) {
+ spin_unlock_bh(&nc_path->packet_list_lock);
+ continue;
+ }
+
+ list_for_each_entry_safe(nc_packet, nc_packet_tmp,
+ &nc_path->packet_list, list) {
+ if (!batadv_nc_skb_coding_possible(nc_packet->skb,
+ eth_dst,
+ in_nc_node->addr))
+ continue;
+
+ /* Coding opportunity is found! */
+ list_del(&nc_packet->list);
+ nc_packet_out = nc_packet;
+ break;
+ }
+
+ spin_unlock_bh(&nc_path->packet_list_lock);
+ break;
+ }
+ rcu_read_unlock();
+
+ return nc_packet_out;
+}
+
+/**
+ * batadv_nc_skb_src_search() - Loops through the list of neighboring nodes of
+ * the skb's sender (may be equal to the originator).
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: data skb to forward
+ * @eth_dst: next hop mac address of skb
+ * @eth_src: source mac address of skb
+ * @in_nc_node: pointer to skb next hop's neighbor nc node
+ *
+ * Return: an nc packet if a suitable coding packet was found, NULL otherwise.
+ */
+static struct batadv_nc_packet *
+batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ u8 *eth_dst,
+ u8 *eth_src,
+ struct batadv_nc_node *in_nc_node)
+{
+ struct batadv_orig_node *orig_node;
+ struct batadv_nc_node *out_nc_node;
+ struct batadv_nc_packet *nc_packet = NULL;
+
+ orig_node = batadv_orig_hash_find(bat_priv, eth_src);
+ if (!orig_node)
+ return NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(out_nc_node,
+ &orig_node->out_coding_list, list) {
+ /* Check if the skb is decoded and if recoding is possible */
+ if (!batadv_nc_skb_coding_possible(skb,
+ out_nc_node->addr, eth_src))
+ continue;
+
+ /* Search for an opportunity in this nc_path */
+ nc_packet = batadv_nc_path_search(bat_priv, in_nc_node,
+ out_nc_node, skb, eth_dst);
+ if (nc_packet)
+ break;
+ }
+ rcu_read_unlock();
+
+ batadv_orig_node_put(orig_node);
+ return nc_packet;
+}
+
+/**
+ * batadv_nc_skb_store_before_coding() - set the ethernet src and dst of the
+ * unicast skb before it is stored for use in later decoding
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: data skb to store
+ * @eth_dst_new: new destination mac address of skb
+ */
+static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ u8 *eth_dst_new)
+{
+ struct ethhdr *ethhdr;
+
+ /* Copy skb header to change the mac header */
+ skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ /* Set the mac header as if we actually sent the packet uncoded */
+ ethhdr = eth_hdr(skb);
+ ether_addr_copy(ethhdr->h_source, ethhdr->h_dest);
+ ether_addr_copy(ethhdr->h_dest, eth_dst_new);
+
+ /* Set data pointer to MAC header to mimic packets from our tx path */
+ skb_push(skb, ETH_HLEN);
+
+ /* Add the packet to the decoding packet pool */
+ batadv_nc_skb_store_for_decoding(bat_priv, skb);
+
+ /* batadv_nc_skb_store_for_decoding() clones the skb, so we must free
+ * our ref
+ */
+ consume_skb(skb);
+}
+
+/**
+ * batadv_nc_skb_dst_search() - Loops through list of neighboring nodes to dst.
+ * @skb: data skb to forward
+ * @neigh_node: next hop to forward packet to
+ * @ethhdr: pointer to the ethernet header inside the skb
+ *
+ * Loops through the list of neighboring nodes the next hop has a good
+ * connection to (receives OGMs with a sufficient quality). We need to find a
+ * neighbor of our next hop that potentially sent a packet which our next hop
+ * also received (overheard) and has stored for later decoding.
+ *
+ * Return: true if the skb was consumed (encoded packet sent) or false otherwise
+ */
+static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
+ struct batadv_neigh_node *neigh_node,
+ struct ethhdr *ethhdr)
+{
+ struct net_device *netdev = neigh_node->if_incoming->soft_iface;
+ struct batadv_priv *bat_priv = netdev_priv(netdev);
+ struct batadv_orig_node *orig_node = neigh_node->orig_node;
+ struct batadv_nc_node *nc_node;
+ struct batadv_nc_packet *nc_packet = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(nc_node, &orig_node->in_coding_list, list) {
+ /* Search for coding opportunity with this in_nc_node */
+ nc_packet = batadv_nc_skb_src_search(bat_priv, skb,
+ neigh_node->addr,
+ ethhdr->h_source, nc_node);
+
+ /* Opportunity was found, so stop searching */
+ if (nc_packet)
+ break;
+ }
+ rcu_read_unlock();
+
+ if (!nc_packet)
+ return false;
+
+ /* Save packets for later decoding */
+ batadv_nc_skb_store_before_coding(bat_priv, skb,
+ neigh_node->addr);
+ batadv_nc_skb_store_before_coding(bat_priv, nc_packet->skb,
+ nc_packet->neigh_node->addr);
+
+ /* Code and send packets */
+ if (batadv_nc_code_packets(bat_priv, skb, ethhdr, nc_packet,
+ neigh_node))
+ return true;
+
+ /* out of mem ? Coding failed - we have to free the buffered packet
+ * to avoid memleaks. The skb passed as argument will be dealt with
+ * by the calling function.
+ */
+ batadv_nc_send_packet(nc_packet);
+ return false;
+}
+
+/**
+ * batadv_nc_skb_add_to_path() - buffer skb for later encoding / decoding
+ * @skb: skb to add to path
+ * @nc_path: path to add skb to
+ * @neigh_node: next hop to forward packet to
+ * @packet_id: checksum to identify packet
+ *
+ * Return: true if the packet was buffered or false in case of an error.
+ */
+static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
+ struct batadv_nc_path *nc_path,
+ struct batadv_neigh_node *neigh_node,
+ __be32 packet_id)
+{
+ struct batadv_nc_packet *nc_packet;
+
+ nc_packet = kzalloc(sizeof(*nc_packet), GFP_ATOMIC);
+ if (!nc_packet)
+ return false;
+
+ /* Initialize nc_packet */
+ nc_packet->timestamp = jiffies;
+ nc_packet->packet_id = packet_id;
+ nc_packet->skb = skb;
+ nc_packet->neigh_node = neigh_node;
+ nc_packet->nc_path = nc_path;
+
+ /* Add coding packet to list */
+ spin_lock_bh(&nc_path->packet_list_lock);
+ list_add_tail(&nc_packet->list, &nc_path->packet_list);
+ spin_unlock_bh(&nc_path->packet_list_lock);
+
+ return true;
+}
+
+/**
+ * batadv_nc_skb_forward() - try to code a packet or add it to the coding packet
+ * buffer
+ * @skb: data skb to forward
+ * @neigh_node: next hop to forward packet to
+ *
+ * Return: true if the skb was consumed (encoded packet sent) or false otherwise
+ */
+bool batadv_nc_skb_forward(struct sk_buff *skb,
+ struct batadv_neigh_node *neigh_node)
+{
+ const struct net_device *netdev = neigh_node->if_incoming->soft_iface;
+ struct batadv_priv *bat_priv = netdev_priv(netdev);
+ struct batadv_unicast_packet *packet;
+ struct batadv_nc_path *nc_path;
+ struct ethhdr *ethhdr = eth_hdr(skb);
+ __be32 packet_id;
+ u8 *payload;
+
+ /* Check if network coding is enabled */
+ if (!atomic_read(&bat_priv->network_coding))
+ goto out;
+
+ /* We only handle unicast packets */
+ payload = skb_network_header(skb);
+ packet = (struct batadv_unicast_packet *)payload;
+ if (packet->packet_type != BATADV_UNICAST)
+ goto out;
+
+ /* Try to find a coding opportunity and send the skb if one is found */
+ if (batadv_nc_skb_dst_search(skb, neigh_node, ethhdr))
+ return true;
+
+ /* Find or create a nc_path for this src-dst pair */
+ nc_path = batadv_nc_get_path(bat_priv,
+ bat_priv->nc.coding_hash,
+ ethhdr->h_source,
+ neigh_node->addr);
+
+ if (!nc_path)
+ goto out;
+
+ /* Add skb to nc_path */
+ packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet));
+ if (!batadv_nc_skb_add_to_path(skb, nc_path, neigh_node, packet_id))
+ goto free_nc_path;
+
+ /* Packet is consumed */
+ return true;
+
+free_nc_path:
+ batadv_nc_path_put(nc_path);
+out:
+ /* Packet is not consumed */
+ return false;
+}
+
+/**
+ * batadv_nc_skb_store_for_decoding() - save a clone of the skb which can be
+ * used when decoding coded packets
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: data skb to store
+ */
+void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ struct batadv_unicast_packet *packet;
+ struct batadv_nc_path *nc_path;
+ struct ethhdr *ethhdr = eth_hdr(skb);
+ __be32 packet_id;
+ u8 *payload;
+
+ /* Check if network coding is enabled */
+ if (!atomic_read(&bat_priv->network_coding))
+ goto out;
+
+ /* Check for supported packet type */
+ payload = skb_network_header(skb);
+ packet = (struct batadv_unicast_packet *)payload;
+ if (packet->packet_type != BATADV_UNICAST)
+ goto out;
+
+ /* Find existing nc_path or create a new */
+ nc_path = batadv_nc_get_path(bat_priv,
+ bat_priv->nc.decoding_hash,
+ ethhdr->h_source,
+ ethhdr->h_dest);
+
+ if (!nc_path)
+ goto out;
+
+ /* Clone skb and adjust skb->data to point at batman header */
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto free_nc_path;
+
+ if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+ goto free_skb;
+
+ if (unlikely(!skb_pull_rcsum(skb, ETH_HLEN)))
+ goto free_skb;
+
+ /* Add skb to nc_path */
+ packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet));
+ if (!batadv_nc_skb_add_to_path(skb, nc_path, NULL, packet_id))
+ goto free_skb;
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_NC_BUFFER);
+ return;
+
+free_skb:
+ kfree_skb(skb);
+free_nc_path:
+ batadv_nc_path_put(nc_path);
+out:
+ return;
+}
+
+/**
+ * batadv_nc_skb_store_sniffed_unicast() - check if a received unicast packet
+ * should be saved in the decoding buffer and, if so, store it there
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: unicast skb to store
+ */
+void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ struct ethhdr *ethhdr = eth_hdr(skb);
+
+ if (batadv_is_my_mac(bat_priv, ethhdr->h_dest))
+ return;
+
+ /* Set data pointer to MAC header to mimic packets from our tx path */
+ skb_push(skb, ETH_HLEN);
+
+ batadv_nc_skb_store_for_decoding(bat_priv, skb);
+}
+
+/**
+ * batadv_nc_skb_decode_packet() - decode given skb using the decode data stored
+ * in nc_packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: unicast skb to decode
+ * @nc_packet: decode data needed to decode the skb
+ *
+ * Return: pointer to decoded unicast packet if the packet was decoded or NULL
+ * in case of an error.
+ */
+static struct batadv_unicast_packet *
+batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ struct batadv_nc_packet *nc_packet)
+{
+ const int h_size = sizeof(struct batadv_unicast_packet);
+ const int h_diff = sizeof(struct batadv_coded_packet) - h_size;
+ struct batadv_unicast_packet *unicast_packet;
+ struct batadv_coded_packet coded_packet_tmp;
+ struct ethhdr *ethhdr, ethhdr_tmp;
+ u8 *orig_dest, ttl, ttvn;
+ unsigned int coding_len;
+ int err;
+
+ /* Save headers temporarily */
+ memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp));
+ memcpy(&ethhdr_tmp, skb_mac_header(skb), sizeof(ethhdr_tmp));
+
+ if (skb_cow(skb, 0) < 0)
+ return NULL;
+
+ if (unlikely(!skb_pull_rcsum(skb, h_diff)))
+ return NULL;
+
+ /* Data points to batman header, so set mac header 14 bytes before
+ * and network to data
+ */
+ skb_set_mac_header(skb, -ETH_HLEN);
+ skb_reset_network_header(skb);
+
+ /* Reconstruct original mac header */
+ ethhdr = eth_hdr(skb);
+ *ethhdr = ethhdr_tmp;
+
+ /* Select the correct unicast header information based on the location
+ * of our mac address in the coded_packet header
+ */
+ if (batadv_is_my_mac(bat_priv, coded_packet_tmp.second_dest)) {
+ /* If we are the second destination the packet was overheard,
+ * so the Ethernet address must be copied to h_dest and
+ * pkt_type changed from PACKET_OTHERHOST to PACKET_HOST
+ */
+ ether_addr_copy(ethhdr->h_dest, coded_packet_tmp.second_dest);
+ skb->pkt_type = PACKET_HOST;
+
+ orig_dest = coded_packet_tmp.second_orig_dest;
+ ttl = coded_packet_tmp.second_ttl;
+ ttvn = coded_packet_tmp.second_ttvn;
+ } else {
+ orig_dest = coded_packet_tmp.first_orig_dest;
+ ttl = coded_packet_tmp.ttl;
+ ttvn = coded_packet_tmp.first_ttvn;
+ }
+
+ coding_len = ntohs(coded_packet_tmp.coded_len);
+
+ if (coding_len > skb->len)
+ return NULL;
+
+ /* Here the magic is reversed:
+ * extract the missing packet from the received coded packet
+ */
+ batadv_nc_memxor(skb->data + h_size,
+ nc_packet->skb->data + h_size,
+ coding_len);
+
+ /* Resize decoded skb if decoded with larger packet */
+ if (nc_packet->skb->len > coding_len + h_size) {
+ err = pskb_trim_rcsum(skb, coding_len + h_size);
+ if (err)
+ return NULL;
+ }
+
+ /* Create decoded unicast packet */
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+ unicast_packet->packet_type = BATADV_UNICAST;
+ unicast_packet->version = BATADV_COMPAT_VERSION;
+ unicast_packet->ttl = ttl;
+ ether_addr_copy(unicast_packet->dest, orig_dest);
+ unicast_packet->ttvn = ttvn;
+
+ batadv_nc_packet_free(nc_packet, false);
+ return unicast_packet;
+}
+
+/**
+ * batadv_nc_find_decoding_packet() - search through buffered decoding data to
+ * find the data needed to decode the coded packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ethhdr: pointer to the ethernet header inside the coded packet
+ * @coded: coded packet we try to find decode data for
+ *
+ * Return: pointer to nc packet if the needed data was found or NULL otherwise.
+ */
+static struct batadv_nc_packet *
+batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
+ struct ethhdr *ethhdr,
+ struct batadv_coded_packet *coded)
+{
+ struct batadv_hashtable *hash = bat_priv->nc.decoding_hash;
+ struct batadv_nc_packet *tmp_nc_packet, *nc_packet = NULL;
+ struct batadv_nc_path *nc_path, nc_path_key;
+ u8 *dest, *source;
+ __be32 packet_id;
+ int index;
+
+ if (!hash)
+ return NULL;
+
+ /* Select the correct packet id based on the location of our mac-addr */
+ dest = ethhdr->h_source;
+ if (!batadv_is_my_mac(bat_priv, coded->second_dest)) {
+ source = coded->second_source;
+ packet_id = coded->second_crc;
+ } else {
+ source = coded->first_source;
+ packet_id = coded->first_crc;
+ }
+
+ batadv_nc_hash_key_gen(&nc_path_key, source, dest);
+ index = batadv_nc_hash_choose(&nc_path_key, hash->size);
+
+ /* Search for matching coding path */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(nc_path, &hash->table[index], hash_entry) {
+ /* Find matching nc_packet */
+ spin_lock_bh(&nc_path->packet_list_lock);
+ list_for_each_entry(tmp_nc_packet,
+ &nc_path->packet_list, list) {
+ if (packet_id == tmp_nc_packet->packet_id) {
+ list_del(&tmp_nc_packet->list);
+
+ nc_packet = tmp_nc_packet;
+ break;
+ }
+ }
+ spin_unlock_bh(&nc_path->packet_list_lock);
+
+ if (nc_packet)
+ break;
+ }
+ rcu_read_unlock();
+
+ if (!nc_packet)
+ batadv_dbg(BATADV_DBG_NC, bat_priv,
+ "No decoding packet found for %u\n", packet_id);
+
+ return nc_packet;
+}
+
+/**
+ * batadv_nc_recv_coded_packet() - try to decode coded packet and enqueue the
+ * resulting unicast packet
+ * @skb: incoming coded packet
+ * @recv_if: pointer to interface this packet was received on
+ *
+ * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
+static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
+{
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_unicast_packet *unicast_packet;
+ struct batadv_coded_packet *coded_packet;
+ struct batadv_nc_packet *nc_packet;
+ struct ethhdr *ethhdr;
+ int hdr_size = sizeof(*coded_packet);
+
+ /* Check if network coding is enabled */
+ if (!atomic_read(&bat_priv->network_coding))
+ goto free_skb;
+
+ /* Make sure we can access (and remove) header */
+ if (unlikely(!pskb_may_pull(skb, hdr_size)))
+ goto free_skb;
+
+ coded_packet = (struct batadv_coded_packet *)skb->data;
+ ethhdr = eth_hdr(skb);
+
+ /* Verify frame is destined for us */
+ if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest) &&
+ !batadv_is_my_mac(bat_priv, coded_packet->second_dest))
+ goto free_skb;
+
+ /* Update stat counter */
+ if (batadv_is_my_mac(bat_priv, coded_packet->second_dest))
+ batadv_inc_counter(bat_priv, BATADV_CNT_NC_SNIFFED);
+
+ nc_packet = batadv_nc_find_decoding_packet(bat_priv, ethhdr,
+ coded_packet);
+ if (!nc_packet) {
+ batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE_FAILED);
+ goto free_skb;
+ }
+
+ /* Make skb's linear, because decoding accesses the entire buffer */
+ if (skb_linearize(skb) < 0)
+ goto free_nc_packet;
+
+ if (skb_linearize(nc_packet->skb) < 0)
+ goto free_nc_packet;
+
+ /* Decode the packet */
+ unicast_packet = batadv_nc_skb_decode_packet(bat_priv, skb, nc_packet);
+ if (!unicast_packet) {
+ batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE_FAILED);
+ goto free_nc_packet;
+ }
+
+ /* Mark packet as decoded to do correct recoding when forwarding */
+ BATADV_SKB_CB(skb)->decoded = true;
+ batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE);
+ batadv_add_counter(bat_priv, BATADV_CNT_NC_DECODE_BYTES,
+ skb->len + ETH_HLEN);
+ return batadv_recv_unicast_packet(skb, recv_if);
+
+free_nc_packet:
+ batadv_nc_packet_free(nc_packet, true);
+free_skb:
+ kfree_skb(skb);
+
+ return NET_RX_DROP;
+}
+
+/**
+ * batadv_nc_mesh_free() - clean up network coding memory
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
+{
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_NC, 1);
+ cancel_delayed_work_sync(&bat_priv->nc.work);
+
+ batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
+ batadv_hash_destroy(bat_priv->nc.coding_hash);
+ batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash, NULL);
+ batadv_hash_destroy(bat_priv->nc.decoding_hash);
+}
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
new file mode 100644
index 0000000000..368cc3130e
--- /dev/null
+++ b/net/batman-adv/network-coding.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Martin Hundebøll, Jeppe Ledet-Pedersen
+ */
+
+#ifndef _NET_BATMAN_ADV_NETWORK_CODING_H_
+#define _NET_BATMAN_ADV_NETWORK_CODING_H_
+
+#include "main.h"
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
+
+#ifdef CONFIG_BATMAN_ADV_NC
+
+void batadv_nc_status_update(struct net_device *net_dev);
+int batadv_nc_init(void);
+int batadv_nc_mesh_init(struct batadv_priv *bat_priv);
+void batadv_nc_mesh_free(struct batadv_priv *bat_priv);
+void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh_node,
+ struct batadv_ogm_packet *ogm_packet,
+ int is_single_hop_neigh);
+void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ bool (*to_purge)(struct batadv_priv *,
+ struct batadv_nc_node *));
+void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv);
+void batadv_nc_init_orig(struct batadv_orig_node *orig_node);
+bool batadv_nc_skb_forward(struct sk_buff *skb,
+ struct batadv_neigh_node *neigh_node);
+void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
+ struct sk_buff *skb);
+void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
+ struct sk_buff *skb);
+
+#else /* ifdef CONFIG_BATMAN_ADV_NC */
+
+static inline void batadv_nc_status_update(struct net_device *net_dev)
+{
+}
+
+static inline int batadv_nc_init(void)
+{
+ return 0;
+}
+
+static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
+{
+ return 0;
+}
+
+static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
+{
+}
+
+static inline void
+batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh_node,
+ struct batadv_ogm_packet *ogm_packet,
+ int is_single_hop_neigh)
+{
+}
+
+static inline void
+batadv_nc_purge_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ bool (*to_purge)(struct batadv_priv *,
+ struct batadv_nc_node *))
+{
+}
+
+static inline void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
+{
+}
+
+static inline void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
+{
+}
+
+static inline bool batadv_nc_skb_forward(struct sk_buff *skb,
+ struct batadv_neigh_node *neigh_node)
+{
+ return false;
+}
+
+static inline void
+batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+}
+
+static inline void
+batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+}
+
+#endif /* ifdef CONFIG_BATMAN_ADV_NC */
+
+#endif /* _NET_BATMAN_ADV_NETWORK_CODING_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
new file mode 100644
index 0000000000..34903df4fe
--- /dev/null
+++ b/net/batman-adv/originator.c
@@ -0,0 +1,1349 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#include "originator.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/container_of.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/jiffies.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/workqueue.h>
+#include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "bat_algo.h"
+#include "distributed-arp-table.h"
+#include "fragmentation.h"
+#include "gateway_client.h"
+#include "hard-interface.h"
+#include "hash.h"
+#include "log.h"
+#include "multicast.h"
+#include "netlink.h"
+#include "network-coding.h"
+#include "routing.h"
+#include "soft-interface.h"
+#include "translation-table.h"
+
+/* hash class keys */
+static struct lock_class_key batadv_orig_hash_lock_class_key;
+
+/**
+ * batadv_orig_hash_find() - Find and return originator from orig_hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @data: mac address of the originator
+ *
+ * Return: orig_node (with increased refcnt), NULL on errors
+ */
+struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
+{
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ struct hlist_head *head;
+ struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
+ int index;
+
+ if (!hash)
+ return NULL;
+
+ index = batadv_choose_orig(data, hash->size);
+ head = &hash->table[index];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+ if (!batadv_compare_eth(orig_node, data))
+ continue;
+
+ if (!kref_get_unless_zero(&orig_node->refcount))
+ continue;
+
+ orig_node_tmp = orig_node;
+ break;
+ }
+ rcu_read_unlock();
+
+ return orig_node_tmp;
+}
+
+static void batadv_purge_orig(struct work_struct *work);
+
+/**
+ * batadv_compare_orig() - comparing function used in the originator hash table
+ * @node: node in the local table
+ * @data2: second object to compare the node to
+ *
+ * Return: true if they are the same originator
+ */
+bool batadv_compare_orig(const struct hlist_node *node, const void *data2)
+{
+ const void *data1 = container_of(node, struct batadv_orig_node,
+ hash_entry);
+
+ return batadv_compare_eth(data1, data2);
+}
+
+/**
+ * batadv_orig_node_vlan_get() - get an orig_node_vlan object
+ * @orig_node: the originator serving the VLAN
+ * @vid: the VLAN identifier
+ *
+ * Return: the vlan object identified by vid and belonging to orig_node or NULL
+ * if it does not exist.
+ */
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
+ unsigned short vid)
+{
+ struct batadv_orig_node_vlan *vlan = NULL, *tmp;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
+ if (tmp->vid != vid)
+ continue;
+
+ if (!kref_get_unless_zero(&tmp->refcount))
+ continue;
+
+ vlan = tmp;
+
+ break;
+ }
+ rcu_read_unlock();
+
+ return vlan;
+}
+
+/**
+ * batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan
+ * object
+ * @orig_node: the originator serving the VLAN
+ * @vid: the VLAN identifier
+ *
+ * Return: NULL in case of failure or the vlan object identified by vid and
+ * belonging to orig_node otherwise. The object is created and added to the list
+ * if it does not exist.
+ *
+ * The object is returned with refcounter increased by 1.
+ */
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+ unsigned short vid)
+{
+ struct batadv_orig_node_vlan *vlan;
+
+ spin_lock_bh(&orig_node->vlan_list_lock);
+
+ /* first look if an object for this vid already exists */
+ vlan = batadv_orig_node_vlan_get(orig_node, vid);
+ if (vlan)
+ goto out;
+
+ vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
+ if (!vlan)
+ goto out;
+
+ kref_init(&vlan->refcount);
+ vlan->vid = vid;
+
+ kref_get(&vlan->refcount);
+ hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
+
+out:
+ spin_unlock_bh(&orig_node->vlan_list_lock);
+
+ return vlan;
+}
+
+/**
+ * batadv_orig_node_vlan_release() - release originator-vlan object from lists
+ * and queue for free after rcu grace period
+ * @ref: kref pointer of the originator-vlan object
+ */
+void batadv_orig_node_vlan_release(struct kref *ref)
+{
+ struct batadv_orig_node_vlan *orig_vlan;
+
+ orig_vlan = container_of(ref, struct batadv_orig_node_vlan, refcount);
+
+ kfree_rcu(orig_vlan, rcu);
+}
+
+/**
+ * batadv_originator_init() - Initialize all originator structures
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int batadv_originator_init(struct batadv_priv *bat_priv)
+{
+ if (bat_priv->orig_hash)
+ return 0;
+
+ bat_priv->orig_hash = batadv_hash_new(1024);
+
+ if (!bat_priv->orig_hash)
+ goto err;
+
+ batadv_hash_set_lock_class(bat_priv->orig_hash,
+ &batadv_orig_hash_lock_class_key);
+
+ INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
+ queue_delayed_work(batadv_event_workqueue,
+ &bat_priv->orig_work,
+ msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
+
+ return 0;
+
+err:
+ return -ENOMEM;
+}
+
+/**
+ * batadv_neigh_ifinfo_release() - release neigh_ifinfo from lists and queue for
+ * free after rcu grace period
+ * @ref: kref pointer of the neigh_ifinfo
+ */
+void batadv_neigh_ifinfo_release(struct kref *ref)
+{
+ struct batadv_neigh_ifinfo *neigh_ifinfo;
+
+ neigh_ifinfo = container_of(ref, struct batadv_neigh_ifinfo, refcount);
+
+ if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
+ batadv_hardif_put(neigh_ifinfo->if_outgoing);
+
+ kfree_rcu(neigh_ifinfo, rcu);
+}
+
+/**
+ * batadv_hardif_neigh_release() - release hardif neigh node from lists and
+ * queue for free after rcu grace period
+ * @ref: kref pointer of the neigh_node
+ */
+void batadv_hardif_neigh_release(struct kref *ref)
+{
+ struct batadv_hardif_neigh_node *hardif_neigh;
+
+ hardif_neigh = container_of(ref, struct batadv_hardif_neigh_node,
+ refcount);
+
+ spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
+ hlist_del_init_rcu(&hardif_neigh->list);
+ spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
+
+ batadv_hardif_put(hardif_neigh->if_incoming);
+ kfree_rcu(hardif_neigh, rcu);
+}
+
+/**
+ * batadv_neigh_node_release() - release neigh_node from lists and queue for
+ * free after rcu grace period
+ * @ref: kref pointer of the neigh_node
+ */
+void batadv_neigh_node_release(struct kref *ref)
+{
+ struct hlist_node *node_tmp;
+ struct batadv_neigh_node *neigh_node;
+ struct batadv_neigh_ifinfo *neigh_ifinfo;
+
+ neigh_node = container_of(ref, struct batadv_neigh_node, refcount);
+
+ hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
+ &neigh_node->ifinfo_list, list) {
+ batadv_neigh_ifinfo_put(neigh_ifinfo);
+ }
+
+ batadv_hardif_neigh_put(neigh_node->hardif_neigh);
+
+ batadv_hardif_put(neigh_node->if_incoming);
+
+ kfree_rcu(neigh_node, rcu);
+}
+
+/**
+ * batadv_orig_router_get() - router to the originator depending on iface
+ * @orig_node: the orig node for the router
+ * @if_outgoing: the interface where the payload packet has been received or
+ * the OGM should be sent to
+ *
+ * Return: the neighbor which should be the router for this orig_node/iface.
+ *
+ * The object is returned with refcounter increased by 1.
+ */
+struct batadv_neigh_node *
+batadv_orig_router_get(struct batadv_orig_node *orig_node,
+ const struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ struct batadv_neigh_node *router = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
+ if (orig_ifinfo->if_outgoing != if_outgoing)
+ continue;
+
+ router = rcu_dereference(orig_ifinfo->router);
+ break;
+ }
+
+ if (router && !kref_get_unless_zero(&router->refcount))
+ router = NULL;
+
+ rcu_read_unlock();
+ return router;
+}
+
+/**
+ * batadv_orig_ifinfo_get() - find the ifinfo from an orig_node
+ * @orig_node: the orig node to be queried
+ * @if_outgoing: the interface for which the ifinfo should be acquired
+ *
+ * Return: the requested orig_ifinfo or NULL if not found.
+ *
+ * The object is returned with refcounter increased by 1.
+ */
+struct batadv_orig_ifinfo *
+batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
+ list) {
+ if (tmp->if_outgoing != if_outgoing)
+ continue;
+
+ if (!kref_get_unless_zero(&tmp->refcount))
+ continue;
+
+ orig_ifinfo = tmp;
+ break;
+ }
+ rcu_read_unlock();
+
+ return orig_ifinfo;
+}
+
+/**
+ * batadv_orig_ifinfo_new() - search and possibly create an orig_ifinfo object
+ * @orig_node: the orig node to be queried
+ * @if_outgoing: the interface for which the ifinfo should be acquired
+ *
+ * Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing
+ * interface otherwise. The object is created and added to the list
+ * if it does not exist.
+ *
+ * The object is returned with refcounter increased by 1.
+ */
+struct batadv_orig_ifinfo *
+batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ unsigned long reset_time;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+
+ orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
+ if (orig_ifinfo)
+ goto out;
+
+ orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
+ if (!orig_ifinfo)
+ goto out;
+
+ if (if_outgoing != BATADV_IF_DEFAULT)
+ kref_get(&if_outgoing->refcount);
+
+ reset_time = jiffies - 1;
+ reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
+ orig_ifinfo->batman_seqno_reset = reset_time;
+ orig_ifinfo->if_outgoing = if_outgoing;
+ INIT_HLIST_NODE(&orig_ifinfo->list);
+ kref_init(&orig_ifinfo->refcount);
+
+ kref_get(&orig_ifinfo->refcount);
+ hlist_add_head_rcu(&orig_ifinfo->list,
+ &orig_node->ifinfo_list);
+out:
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+ return orig_ifinfo;
+}
+
+/**
+ * batadv_neigh_ifinfo_get() - find the ifinfo from an neigh_node
+ * @neigh: the neigh node to be queried
+ * @if_outgoing: the interface for which the ifinfo should be acquired
+ *
+ * The object is returned with refcounter increased by 1.
+ *
+ * Return: the requested neigh_ifinfo or NULL if not found
+ */
+struct batadv_neigh_ifinfo *
+batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
+ *tmp_neigh_ifinfo;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
+ list) {
+ if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
+ continue;
+
+ if (!kref_get_unless_zero(&tmp_neigh_ifinfo->refcount))
+ continue;
+
+ neigh_ifinfo = tmp_neigh_ifinfo;
+ break;
+ }
+ rcu_read_unlock();
+
+ return neigh_ifinfo;
+}
+
+/**
+ * batadv_neigh_ifinfo_new() - search and possibly create an neigh_ifinfo object
+ * @neigh: the neigh node to be queried
+ * @if_outgoing: the interface for which the ifinfo should be acquired
+ *
+ * Return: NULL in case of failure or the neigh_ifinfo object for the
+ * if_outgoing interface otherwise. The object is created and added to the list
+ * if it does not exist.
+ *
+ * The object is returned with refcounter increased by 1.
+ */
+struct batadv_neigh_ifinfo *
+batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_neigh_ifinfo *neigh_ifinfo;
+
+ spin_lock_bh(&neigh->ifinfo_lock);
+
+ neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
+ if (neigh_ifinfo)
+ goto out;
+
+ neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
+ if (!neigh_ifinfo)
+ goto out;
+
+ if (if_outgoing)
+ kref_get(&if_outgoing->refcount);
+
+ INIT_HLIST_NODE(&neigh_ifinfo->list);
+ kref_init(&neigh_ifinfo->refcount);
+ neigh_ifinfo->if_outgoing = if_outgoing;
+
+ kref_get(&neigh_ifinfo->refcount);
+ hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
+
+out:
+ spin_unlock_bh(&neigh->ifinfo_lock);
+
+ return neigh_ifinfo;
+}
+
+/**
+ * batadv_neigh_node_get() - retrieve a neighbour from the list
+ * @orig_node: originator which the neighbour belongs to
+ * @hard_iface: the interface where this neighbour is connected to
+ * @addr: the address of the neighbour
+ *
+ * Looks for and possibly returns a neighbour belonging to this originator list
+ * which is connected through the provided hard interface.
+ *
+ * Return: neighbor when found. Otherwise NULL
+ */
+static struct batadv_neigh_node *
+batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
+ const struct batadv_hard_iface *hard_iface,
+ const u8 *addr)
+{
+ struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
+ if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
+ continue;
+
+ if (tmp_neigh_node->if_incoming != hard_iface)
+ continue;
+
+ if (!kref_get_unless_zero(&tmp_neigh_node->refcount))
+ continue;
+
+ res = tmp_neigh_node;
+ break;
+ }
+ rcu_read_unlock();
+
+ return res;
+}
+
+/**
+ * batadv_hardif_neigh_create() - create a hardif neighbour node
+ * @hard_iface: the interface this neighbour is connected to
+ * @neigh_addr: the interface address of the neighbour to retrieve
+ * @orig_node: originator object representing the neighbour
+ *
+ * Return: the hardif neighbour node if found or created or NULL otherwise.
+ */
+static struct batadv_hardif_neigh_node *
+batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
+ const u8 *neigh_addr,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hardif_neigh_node *hardif_neigh;
+
+ spin_lock_bh(&hard_iface->neigh_list_lock);
+
+ /* check if neighbor hasn't been added in the meantime */
+ hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
+ if (hardif_neigh)
+ goto out;
+
+ hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
+ if (!hardif_neigh)
+ goto out;
+
+ kref_get(&hard_iface->refcount);
+ INIT_HLIST_NODE(&hardif_neigh->list);
+ ether_addr_copy(hardif_neigh->addr, neigh_addr);
+ ether_addr_copy(hardif_neigh->orig, orig_node->orig);
+ hardif_neigh->if_incoming = hard_iface;
+ hardif_neigh->last_seen = jiffies;
+
+ kref_init(&hardif_neigh->refcount);
+
+ if (bat_priv->algo_ops->neigh.hardif_init)
+ bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
+
+ hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list);
+
+out:
+ spin_unlock_bh(&hard_iface->neigh_list_lock);
+ return hardif_neigh;
+}
+
+/**
+ * batadv_hardif_neigh_get_or_create() - retrieve or create a hardif neighbour
+ * node
+ * @hard_iface: the interface this neighbour is connected to
+ * @neigh_addr: the interface address of the neighbour to retrieve
+ * @orig_node: originator object representing the neighbour
+ *
+ * Return: the hardif neighbour node if found or created or NULL otherwise.
+ */
+static struct batadv_hardif_neigh_node *
+batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
+ const u8 *neigh_addr,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_hardif_neigh_node *hardif_neigh;
+
+ /* first check without locking to avoid the overhead */
+ hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
+ if (hardif_neigh)
+ return hardif_neigh;
+
+ return batadv_hardif_neigh_create(hard_iface, neigh_addr, orig_node);
+}
+
+/**
+ * batadv_hardif_neigh_get() - retrieve a hardif neighbour from the list
+ * @hard_iface: the interface where this neighbour is connected to
+ * @neigh_addr: the address of the neighbour
+ *
+ * Looks for and possibly returns a neighbour belonging to this hard interface.
+ *
+ * Return: neighbor when found. Otherwise NULL
+ */
+struct batadv_hardif_neigh_node *
+batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
+ const u8 *neigh_addr)
+{
+ struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp_hardif_neigh,
+ &hard_iface->neigh_list, list) {
+ if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
+ continue;
+
+ if (!kref_get_unless_zero(&tmp_hardif_neigh->refcount))
+ continue;
+
+ hardif_neigh = tmp_hardif_neigh;
+ break;
+ }
+ rcu_read_unlock();
+
+ return hardif_neigh;
+}
+
+/**
+ * batadv_neigh_node_create() - create a neigh node object
+ * @orig_node: originator object representing the neighbour
+ * @hard_iface: the interface where the neighbour is connected to
+ * @neigh_addr: the mac address of the neighbour interface
+ *
+ * Allocates a new neigh_node object and initialises all the generic fields.
+ *
+ * Return: the neighbour node if found or created or NULL otherwise.
+ */
+static struct batadv_neigh_node *
+batadv_neigh_node_create(struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *hard_iface,
+ const u8 *neigh_addr)
+{
+ struct batadv_neigh_node *neigh_node;
+ struct batadv_hardif_neigh_node *hardif_neigh = NULL;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+
+ neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
+ if (neigh_node)
+ goto out;
+
+ hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
+ neigh_addr, orig_node);
+ if (!hardif_neigh)
+ goto out;
+
+ neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
+ if (!neigh_node)
+ goto out;
+
+ INIT_HLIST_NODE(&neigh_node->list);
+ INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
+ spin_lock_init(&neigh_node->ifinfo_lock);
+
+ kref_get(&hard_iface->refcount);
+ ether_addr_copy(neigh_node->addr, neigh_addr);
+ neigh_node->if_incoming = hard_iface;
+ neigh_node->orig_node = orig_node;
+ neigh_node->last_seen = jiffies;
+
+ /* increment unique neighbor refcount */
+ kref_get(&hardif_neigh->refcount);
+ neigh_node->hardif_neigh = hardif_neigh;
+
+ /* extra reference for return */
+ kref_init(&neigh_node->refcount);
+
+ kref_get(&neigh_node->refcount);
+ hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+
+ batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
+ "Creating new neighbor %pM for orig_node %pM on interface %s\n",
+ neigh_addr, orig_node->orig, hard_iface->net_dev->name);
+
+out:
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+
+ batadv_hardif_neigh_put(hardif_neigh);
+ return neigh_node;
+}
+
+/**
+ * batadv_neigh_node_get_or_create() - retrieve or create a neigh node object
+ * @orig_node: originator object representing the neighbour
+ * @hard_iface: the interface where the neighbour is connected to
+ * @neigh_addr: the mac address of the neighbour interface
+ *
+ * Return: the neighbour node if found or created or NULL otherwise.
+ */
+struct batadv_neigh_node *
+batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *hard_iface,
+ const u8 *neigh_addr)
+{
+ struct batadv_neigh_node *neigh_node;
+
+ /* first check without locking to avoid the overhead */
+ neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
+ if (neigh_node)
+ return neigh_node;
+
+ return batadv_neigh_node_create(orig_node, hard_iface, neigh_addr);
+}
+
+/**
+ * batadv_hardif_neigh_dump() - Dump to netlink the neighbor infos for a
+ * specific outgoing interface
+ * @msg: message to dump into
+ * @cb: parameters for the dump
+ *
+ * Return: 0 or error value
+ */
+int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct net_device *hard_iface = NULL;
+ struct batadv_hard_iface *hardif = BATADV_IF_DEFAULT;
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
+ int ret;
+ int ifindex, hard_ifindex;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ hard_ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_HARD_IFINDEX);
+ if (hard_ifindex) {
+ hard_iface = dev_get_by_index(net, hard_ifindex);
+ if (hard_iface)
+ hardif = batadv_hardif_get_by_netdev(hard_iface);
+
+ if (!hardif) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (hardif->soft_iface != soft_iface) {
+ ret = -ENOENT;
+ goto out;
+ }
+ }
+
+ if (!bat_priv->algo_ops->neigh.dump) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ bat_priv->algo_ops->neigh.dump(msg, cb, bat_priv, hardif);
+
+ ret = msg->len;
+
+ out:
+ batadv_hardif_put(hardif);
+ dev_put(hard_iface);
+ batadv_hardif_put(primary_if);
+ dev_put(soft_iface);
+
+ return ret;
+}
+
+/**
+ * batadv_orig_ifinfo_release() - release orig_ifinfo from lists and queue for
+ * free after rcu grace period
+ * @ref: kref pointer of the orig_ifinfo
+ */
+void batadv_orig_ifinfo_release(struct kref *ref)
+{
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ struct batadv_neigh_node *router;
+
+ orig_ifinfo = container_of(ref, struct batadv_orig_ifinfo, refcount);
+
+ if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
+ batadv_hardif_put(orig_ifinfo->if_outgoing);
+
+ /* this is the last reference to this object */
+ router = rcu_dereference_protected(orig_ifinfo->router, true);
+ batadv_neigh_node_put(router);
+
+ kfree_rcu(orig_ifinfo, rcu);
+}
+
+/**
+ * batadv_orig_node_free_rcu() - free the orig_node
+ * @rcu: rcu pointer of the orig_node
+ */
+static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
+{
+ struct batadv_orig_node *orig_node;
+
+ orig_node = container_of(rcu, struct batadv_orig_node, rcu);
+
+ batadv_mcast_purge_orig(orig_node);
+
+ batadv_frag_purge_orig(orig_node, NULL);
+
+ kfree(orig_node->tt_buff);
+ kfree(orig_node);
+}
+
+/**
+ * batadv_orig_node_release() - release orig_node from lists and queue for
+ * free after rcu grace period
+ * @ref: kref pointer of the orig_node
+ */
+void batadv_orig_node_release(struct kref *ref)
+{
+ struct hlist_node *node_tmp;
+ struct batadv_neigh_node *neigh_node;
+ struct batadv_orig_node *orig_node;
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ struct batadv_orig_node_vlan *vlan;
+ struct batadv_orig_ifinfo *last_candidate;
+
+ orig_node = container_of(ref, struct batadv_orig_node, refcount);
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+
+ /* for all neighbors towards this originator ... */
+ hlist_for_each_entry_safe(neigh_node, node_tmp,
+ &orig_node->neigh_list, list) {
+ hlist_del_rcu(&neigh_node->list);
+ batadv_neigh_node_put(neigh_node);
+ }
+
+ hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
+ &orig_node->ifinfo_list, list) {
+ hlist_del_rcu(&orig_ifinfo->list);
+ batadv_orig_ifinfo_put(orig_ifinfo);
+ }
+
+ last_candidate = orig_node->last_bonding_candidate;
+ orig_node->last_bonding_candidate = NULL;
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+
+ batadv_orig_ifinfo_put(last_candidate);
+
+ spin_lock_bh(&orig_node->vlan_list_lock);
+ hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) {
+ hlist_del_rcu(&vlan->list);
+ batadv_orig_node_vlan_put(vlan);
+ }
+ spin_unlock_bh(&orig_node->vlan_list_lock);
+
+ /* Free nc_nodes */
+ batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
+
+ call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
+}
+
+/**
+ * batadv_originator_free() - Free all originator structures
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_originator_free(struct batadv_priv *bat_priv)
+{
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ struct hlist_node *node_tmp;
+ struct hlist_head *head;
+ spinlock_t *list_lock; /* spinlock to protect write access */
+ struct batadv_orig_node *orig_node;
+ u32 i;
+
+ if (!hash)
+ return;
+
+ cancel_delayed_work_sync(&bat_priv->orig_work);
+
+ bat_priv->orig_hash = NULL;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+ list_lock = &hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+ hlist_for_each_entry_safe(orig_node, node_tmp,
+ head, hash_entry) {
+ hlist_del_rcu(&orig_node->hash_entry);
+ batadv_orig_node_put(orig_node);
+ }
+ spin_unlock_bh(list_lock);
+ }
+
+ batadv_hash_destroy(hash);
+}
+
+/**
+ * batadv_orig_node_new() - creates a new orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the originator
+ *
+ * Creates a new originator object and initialises all the generic fields.
+ * The new object is not added to the originator list.
+ *
+ * Return: the newly created object or NULL on failure.
+ */
+struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
+ const u8 *addr)
+{
+ struct batadv_orig_node *orig_node;
+ struct batadv_orig_node_vlan *vlan;
+ unsigned long reset_time;
+ int i;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Creating new originator: %pM\n", addr);
+
+ orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
+ if (!orig_node)
+ return NULL;
+
+ INIT_HLIST_HEAD(&orig_node->neigh_list);
+ INIT_HLIST_HEAD(&orig_node->vlan_list);
+ INIT_HLIST_HEAD(&orig_node->ifinfo_list);
+ spin_lock_init(&orig_node->bcast_seqno_lock);
+ spin_lock_init(&orig_node->neigh_list_lock);
+ spin_lock_init(&orig_node->tt_buff_lock);
+ spin_lock_init(&orig_node->tt_lock);
+ spin_lock_init(&orig_node->vlan_list_lock);
+
+ batadv_nc_init_orig(orig_node);
+
+ /* extra reference for return */
+ kref_init(&orig_node->refcount);
+
+ orig_node->bat_priv = bat_priv;
+ ether_addr_copy(orig_node->orig, addr);
+ batadv_dat_init_orig_node_addr(orig_node);
+ atomic_set(&orig_node->last_ttvn, 0);
+ orig_node->tt_buff = NULL;
+ orig_node->tt_buff_len = 0;
+ orig_node->last_seen = jiffies;
+ reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
+ orig_node->bcast_seqno_reset = reset_time;
+
+#ifdef CONFIG_BATMAN_ADV_MCAST
+ orig_node->mcast_flags = BATADV_MCAST_WANT_NO_RTR4;
+ orig_node->mcast_flags |= BATADV_MCAST_WANT_NO_RTR6;
+ INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
+ INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
+ INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
+ spin_lock_init(&orig_node->mcast_handler_lock);
+#endif
+
+ /* create a vlan object for the "untagged" LAN */
+ vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
+ if (!vlan)
+ goto free_orig_node;
+ /* batadv_orig_node_vlan_new() increases the refcounter.
+ * Immediately release vlan since it is not needed anymore in this
+ * context
+ */
+ batadv_orig_node_vlan_put(vlan);
+
+ for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
+ INIT_HLIST_HEAD(&orig_node->fragments[i].fragment_list);
+ spin_lock_init(&orig_node->fragments[i].lock);
+ orig_node->fragments[i].size = 0;
+ }
+
+ return orig_node;
+free_orig_node:
+ kfree(orig_node);
+ return NULL;
+}
+
+/**
+ * batadv_purge_neigh_ifinfo() - purge obsolete ifinfo entries from neighbor
+ * @bat_priv: the bat priv with all the soft interface information
+ * @neigh: orig node which is to be checked
+ */
+static void
+batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
+ struct batadv_neigh_node *neigh)
+{
+ struct batadv_neigh_ifinfo *neigh_ifinfo;
+ struct batadv_hard_iface *if_outgoing;
+ struct hlist_node *node_tmp;
+
+ spin_lock_bh(&neigh->ifinfo_lock);
+
+ /* for all ifinfo objects for this neighinator */
+ hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
+ &neigh->ifinfo_list, list) {
+ if_outgoing = neigh_ifinfo->if_outgoing;
+
+ /* always keep the default interface */
+ if (if_outgoing == BATADV_IF_DEFAULT)
+ continue;
+
+ /* don't purge if the interface is not (going) down */
+ if (if_outgoing->if_status != BATADV_IF_INACTIVE &&
+ if_outgoing->if_status != BATADV_IF_NOT_IN_USE &&
+ if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED)
+ continue;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
+ neigh->addr, if_outgoing->net_dev->name);
+
+ hlist_del_rcu(&neigh_ifinfo->list);
+ batadv_neigh_ifinfo_put(neigh_ifinfo);
+ }
+
+ spin_unlock_bh(&neigh->ifinfo_lock);
+}
+
+/**
+ * batadv_purge_orig_ifinfo() - purge obsolete ifinfo entries from originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is to be checked
+ *
+ * Return: true if any ifinfo entry was purged, false otherwise.
+ */
+static bool
+batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ struct batadv_hard_iface *if_outgoing;
+ struct hlist_node *node_tmp;
+ bool ifinfo_purged = false;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+
+ /* for all ifinfo objects for this originator */
+ hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
+ &orig_node->ifinfo_list, list) {
+ if_outgoing = orig_ifinfo->if_outgoing;
+
+ /* always keep the default interface */
+ if (if_outgoing == BATADV_IF_DEFAULT)
+ continue;
+
+ /* don't purge if the interface is not (going) down */
+ if (if_outgoing->if_status != BATADV_IF_INACTIVE &&
+ if_outgoing->if_status != BATADV_IF_NOT_IN_USE &&
+ if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED)
+ continue;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "router/ifinfo purge: originator %pM, iface: %s\n",
+ orig_node->orig, if_outgoing->net_dev->name);
+
+ ifinfo_purged = true;
+
+ hlist_del_rcu(&orig_ifinfo->list);
+ batadv_orig_ifinfo_put(orig_ifinfo);
+ if (orig_node->last_bonding_candidate == orig_ifinfo) {
+ orig_node->last_bonding_candidate = NULL;
+ batadv_orig_ifinfo_put(orig_ifinfo);
+ }
+ }
+
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+
+ return ifinfo_purged;
+}
+
+/**
+ * batadv_purge_orig_neighbors() - purges neighbors from originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is to be checked
+ *
+ * Return: true if any neighbor was purged, false otherwise
+ */
+static bool
+batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
+{
+ struct hlist_node *node_tmp;
+ struct batadv_neigh_node *neigh_node;
+ bool neigh_purged = false;
+ unsigned long last_seen;
+ struct batadv_hard_iface *if_incoming;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+
+ /* for all neighbors towards this originator ... */
+ hlist_for_each_entry_safe(neigh_node, node_tmp,
+ &orig_node->neigh_list, list) {
+ last_seen = neigh_node->last_seen;
+ if_incoming = neigh_node->if_incoming;
+
+ if (batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT) ||
+ if_incoming->if_status == BATADV_IF_INACTIVE ||
+ if_incoming->if_status == BATADV_IF_NOT_IN_USE ||
+ if_incoming->if_status == BATADV_IF_TO_BE_REMOVED) {
+ if (if_incoming->if_status == BATADV_IF_INACTIVE ||
+ if_incoming->if_status == BATADV_IF_NOT_IN_USE ||
+ if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
+ orig_node->orig, neigh_node->addr,
+ if_incoming->net_dev->name);
+ else
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
+ orig_node->orig, neigh_node->addr,
+ jiffies_to_msecs(last_seen));
+
+ neigh_purged = true;
+
+ hlist_del_rcu(&neigh_node->list);
+ batadv_neigh_node_put(neigh_node);
+ } else {
+ /* only necessary if not the whole neighbor is to be
+ * deleted, but some interface has been removed.
+ */
+ batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
+ }
+ }
+
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+ return neigh_purged;
+}
+
+/**
+ * batadv_find_best_neighbor() - finds the best neighbor after purging
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is to be checked
+ * @if_outgoing: the interface for which the metric should be compared
+ *
+ * Return: the current best neighbor, with refcount increased.
+ */
+static struct batadv_neigh_node *
+batadv_find_best_neighbor(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_neigh_node *best = NULL, *neigh;
+ struct batadv_algo_ops *bao = bat_priv->algo_ops;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
+ if (best && (bao->neigh.cmp(neigh, if_outgoing, best,
+ if_outgoing) <= 0))
+ continue;
+
+ if (!kref_get_unless_zero(&neigh->refcount))
+ continue;
+
+ batadv_neigh_node_put(best);
+
+ best = neigh;
+ }
+ rcu_read_unlock();
+
+ return best;
+}
+
+/**
+ * batadv_purge_orig_node() - purges obsolete information from an orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is to be checked
+ *
+ * This function checks if the orig_node or substructures of it have become
+ * obsolete, and purges this information if that's the case.
+ *
+ * Return: true if the orig_node is to be removed, false otherwise.
+ */
+static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_neigh_node *best_neigh_node;
+ struct batadv_hard_iface *hard_iface;
+ bool changed_ifinfo, changed_neigh;
+
+ if (batadv_has_timed_out(orig_node->last_seen,
+ 2 * BATADV_PURGE_TIMEOUT)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Originator timeout: originator %pM, last_seen %u\n",
+ orig_node->orig,
+ jiffies_to_msecs(orig_node->last_seen));
+ return true;
+ }
+ changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
+ changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
+
+ if (!changed_ifinfo && !changed_neigh)
+ return false;
+
+ /* first for NULL ... */
+ best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
+ BATADV_IF_DEFAULT);
+ batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
+ best_neigh_node);
+ batadv_neigh_node_put(best_neigh_node);
+
+ /* ... then for all other interfaces. */
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
+ continue;
+
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
+ if (!kref_get_unless_zero(&hard_iface->refcount))
+ continue;
+
+ best_neigh_node = batadv_find_best_neighbor(bat_priv,
+ orig_node,
+ hard_iface);
+ batadv_update_route(bat_priv, orig_node, hard_iface,
+ best_neigh_node);
+ batadv_neigh_node_put(best_neigh_node);
+
+ batadv_hardif_put(hard_iface);
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
+/**
+ * batadv_purge_orig_ref() - Purge all outdated originators
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
+{
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ struct hlist_node *node_tmp;
+ struct hlist_head *head;
+ spinlock_t *list_lock; /* spinlock to protect write access */
+ struct batadv_orig_node *orig_node;
+ u32 i;
+
+ if (!hash)
+ return;
+
+ /* for all origins... */
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+ list_lock = &hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+ hlist_for_each_entry_safe(orig_node, node_tmp,
+ head, hash_entry) {
+ if (batadv_purge_orig_node(bat_priv, orig_node)) {
+ batadv_gw_node_delete(bat_priv, orig_node);
+ hlist_del_rcu(&orig_node->hash_entry);
+ batadv_tt_global_del_orig(orig_node->bat_priv,
+ orig_node, -1,
+ "originator timed out");
+ batadv_orig_node_put(orig_node);
+ continue;
+ }
+
+ batadv_frag_purge_orig(orig_node,
+ batadv_frag_check_entry);
+ }
+ spin_unlock_bh(list_lock);
+ }
+
+ batadv_gw_election(bat_priv);
+}
+
+static void batadv_purge_orig(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct batadv_priv *bat_priv;
+
+ delayed_work = to_delayed_work(work);
+ bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
+ batadv_purge_orig_ref(bat_priv);
+ queue_delayed_work(batadv_event_workqueue,
+ &bat_priv->orig_work,
+ msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
+}
+
+/**
+ * batadv_orig_dump() - Dump to netlink the originator infos for a specific
+ * outgoing interface
+ * @msg: message to dump into
+ * @cb: parameters for the dump
+ *
+ * Return: 0 or error value
+ */
+int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct net_device *hard_iface = NULL;
+ struct batadv_hard_iface *hardif = BATADV_IF_DEFAULT;
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
+ int ret;
+ int ifindex, hard_ifindex;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ hard_ifindex = batadv_netlink_get_ifindex(cb->nlh,
+ BATADV_ATTR_HARD_IFINDEX);
+ if (hard_ifindex) {
+ hard_iface = dev_get_by_index(net, hard_ifindex);
+ if (hard_iface)
+ hardif = batadv_hardif_get_by_netdev(hard_iface);
+
+ if (!hardif) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (hardif->soft_iface != soft_iface) {
+ ret = -ENOENT;
+ goto out;
+ }
+ }
+
+ if (!bat_priv->algo_ops->orig.dump) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ bat_priv->algo_ops->orig.dump(msg, cb, bat_priv, hardif);
+
+ ret = msg->len;
+
+ out:
+ batadv_hardif_put(hardif);
+ dev_put(hard_iface);
+ batadv_hardif_put(primary_if);
+ dev_put(soft_iface);
+
+ return ret;
+}
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
new file mode 100644
index 0000000000..ea3d69e4e6
--- /dev/null
+++ b/net/batman-adv/originator.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
+#define _NET_BATMAN_ADV_ORIGINATOR_H_
+
+#include "main.h"
+
+#include <linux/compiler.h>
+#include <linux/if_ether.h>
+#include <linux/jhash.h>
+#include <linux/kref.h>
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+bool batadv_compare_orig(const struct hlist_node *node, const void *data2);
+int batadv_originator_init(struct batadv_priv *bat_priv);
+void batadv_originator_free(struct batadv_priv *bat_priv);
+void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
+void batadv_orig_node_release(struct kref *ref);
+struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
+ const u8 *addr);
+struct batadv_hardif_neigh_node *
+batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
+ const u8 *neigh_addr);
+void batadv_hardif_neigh_release(struct kref *ref);
+struct batadv_neigh_node *
+batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *hard_iface,
+ const u8 *neigh_addr);
+void batadv_neigh_node_release(struct kref *ref);
+struct batadv_neigh_node *
+batadv_orig_router_get(struct batadv_orig_node *orig_node,
+ const struct batadv_hard_iface *if_outgoing);
+struct batadv_neigh_ifinfo *
+batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
+ struct batadv_hard_iface *if_outgoing);
+struct batadv_neigh_ifinfo *
+batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
+ struct batadv_hard_iface *if_outgoing);
+void batadv_neigh_ifinfo_release(struct kref *ref);
+
+int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb);
+
+struct batadv_orig_ifinfo *
+batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *if_outgoing);
+struct batadv_orig_ifinfo *
+batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *if_outgoing);
+void batadv_orig_ifinfo_release(struct kref *ref);
+
+int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb);
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+ unsigned short vid);
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
+ unsigned short vid);
+void batadv_orig_node_vlan_release(struct kref *ref);
+
+/**
+ * batadv_choose_orig() - Return the index of the orig entry in the hash table
+ * @data: mac address of the originator node
+ * @size: the size of the hash table
+ *
+ * Return: the hash index where the object represented by @data should be
+ * stored at.
+ */
+static inline u32 batadv_choose_orig(const void *data, u32 size)
+{
+ u32 hash = 0;
+
+ hash = jhash(data, ETH_ALEN, hash);
+ return hash % size;
+}
+
+struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data);
+
+/**
+ * batadv_orig_node_vlan_put() - decrement the refcounter and possibly release
+ * the originator-vlan object
+ * @orig_vlan: the originator-vlan object to release
+ */
+static inline void
+batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan)
+{
+ if (!orig_vlan)
+ return;
+
+ kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release);
+}
+
+/**
+ * batadv_neigh_ifinfo_put() - decrement the refcounter and possibly release
+ * the neigh_ifinfo
+ * @neigh_ifinfo: the neigh_ifinfo object to release
+ */
+static inline void
+batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo)
+{
+ if (!neigh_ifinfo)
+ return;
+
+ kref_put(&neigh_ifinfo->refcount, batadv_neigh_ifinfo_release);
+}
+
+/**
+ * batadv_hardif_neigh_put() - decrement the hardif neighbors refcounter
+ * and possibly release it
+ * @hardif_neigh: hardif neigh neighbor to free
+ */
+static inline void
+batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh)
+{
+ if (!hardif_neigh)
+ return;
+
+ kref_put(&hardif_neigh->refcount, batadv_hardif_neigh_release);
+}
+
+/**
+ * batadv_neigh_node_put() - decrement the neighbors refcounter and possibly
+ * release it
+ * @neigh_node: neigh neighbor to free
+ */
+static inline void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node)
+{
+ if (!neigh_node)
+ return;
+
+ kref_put(&neigh_node->refcount, batadv_neigh_node_release);
+}
+
+/**
+ * batadv_orig_ifinfo_put() - decrement the refcounter and possibly release
+ * the orig_ifinfo
+ * @orig_ifinfo: the orig_ifinfo object to release
+ */
+static inline void
+batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo)
+{
+ if (!orig_ifinfo)
+ return;
+
+ kref_put(&orig_ifinfo->refcount, batadv_orig_ifinfo_release);
+}
+
+/**
+ * batadv_orig_node_put() - decrement the orig node refcounter and possibly
+ * release it
+ * @orig_node: the orig node to free
+ */
+static inline void batadv_orig_node_put(struct batadv_orig_node *orig_node)
+{
+ if (!orig_node)
+ return;
+
+ kref_put(&orig_node->refcount, batadv_orig_node_release);
+}
+
+#endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
new file mode 100644
index 0000000000..163cd43c48
--- /dev/null
+++ b/net/batman-adv/routing.c
@@ -0,0 +1,1272 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#include "routing.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/kref.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <uapi/linux/batadv_packet.h>
+
+#include "bitarray.h"
+#include "bridge_loop_avoidance.h"
+#include "distributed-arp-table.h"
+#include "fragmentation.h"
+#include "hard-interface.h"
+#include "log.h"
+#include "network-coding.h"
+#include "originator.h"
+#include "send.h"
+#include "soft-interface.h"
+#include "tp_meter.h"
+#include "translation-table.h"
+#include "tvlv.h"
+
+static int batadv_route_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+
+/**
+ * _batadv_update_route() - set the router for this originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is to be configured
+ * @recv_if: the receive interface for which this route is set
+ * @neigh_node: neighbor which should be the next router
+ *
+ * This function does not perform any error checks
+ */
+static void _batadv_update_route(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *recv_if,
+ struct batadv_neigh_node *neigh_node)
+{
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ struct batadv_neigh_node *curr_router;
+
+ orig_ifinfo = batadv_orig_ifinfo_get(orig_node, recv_if);
+ if (!orig_ifinfo)
+ return;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+ /* curr_router used earlier may not be the current orig_ifinfo->router
+ * anymore because it was dereferenced outside of the neigh_list_lock
+ * protected region. After the new best neighbor has replace the current
+ * best neighbor the reference counter needs to decrease. Consequently,
+ * the code needs to ensure the curr_router variable contains a pointer
+ * to the replaced best neighbor.
+ */
+
+ /* increase refcount of new best neighbor */
+ if (neigh_node)
+ kref_get(&neigh_node->refcount);
+
+ curr_router = rcu_replace_pointer(orig_ifinfo->router, neigh_node,
+ true);
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+ batadv_orig_ifinfo_put(orig_ifinfo);
+
+ /* route deleted */
+ if (curr_router && !neigh_node) {
+ batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+ "Deleting route towards: %pM\n", orig_node->orig);
+ batadv_tt_global_del_orig(bat_priv, orig_node, -1,
+ "Deleted route towards originator");
+
+ /* route added */
+ } else if (!curr_router && neigh_node) {
+ batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+ "Adding route towards: %pM (via %pM)\n",
+ orig_node->orig, neigh_node->addr);
+ /* route changed */
+ } else if (neigh_node && curr_router) {
+ batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+ "Changing route towards: %pM (now via %pM - was via %pM)\n",
+ orig_node->orig, neigh_node->addr,
+ curr_router->addr);
+ }
+
+ /* decrease refcount of previous best neighbor */
+ batadv_neigh_node_put(curr_router);
+}
+
+/**
+ * batadv_update_route() - set the router for this originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is to be configured
+ * @recv_if: the receive interface for which this route is set
+ * @neigh_node: neighbor which should be the next router
+ */
+void batadv_update_route(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *recv_if,
+ struct batadv_neigh_node *neigh_node)
+{
+ struct batadv_neigh_node *router = NULL;
+
+ if (!orig_node)
+ goto out;
+
+ router = batadv_orig_router_get(orig_node, recv_if);
+
+ if (router != neigh_node)
+ _batadv_update_route(bat_priv, orig_node, recv_if, neigh_node);
+
+out:
+ batadv_neigh_node_put(router);
+}
+
+/**
+ * batadv_window_protected() - checks whether the host restarted and is in the
+ * protection time.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @seq_num_diff: difference between the current/received sequence number and
+ * the last sequence number
+ * @seq_old_max_diff: maximum age of sequence number not considered as restart
+ * @last_reset: jiffies timestamp of the last reset, will be updated when reset
+ * is detected
+ * @protection_started: is set to true if the protection window was started,
+ * doesn't change otherwise.
+ *
+ * Return:
+ * false if the packet is to be accepted.
+ * true if the packet is to be ignored.
+ */
+bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
+ s32 seq_old_max_diff, unsigned long *last_reset,
+ bool *protection_started)
+{
+ if (seq_num_diff <= -seq_old_max_diff ||
+ seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
+ if (!batadv_has_timed_out(*last_reset,
+ BATADV_RESET_PROTECTION_MS))
+ return true;
+
+ *last_reset = jiffies;
+ if (protection_started)
+ *protection_started = true;
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "old packet received, start protection\n");
+ }
+
+ return false;
+}
+
+/**
+ * batadv_check_management_packet() - Check preconditions for management packets
+ * @skb: incoming packet buffer
+ * @hard_iface: incoming hard interface
+ * @header_len: minimal header length of packet type
+ *
+ * Return: true when management preconditions are met, false otherwise
+ */
+bool batadv_check_management_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface,
+ int header_len)
+{
+ struct ethhdr *ethhdr;
+
+ /* drop packet if it has not necessary minimum size */
+ if (unlikely(!pskb_may_pull(skb, header_len)))
+ return false;
+
+ ethhdr = eth_hdr(skb);
+
+ /* packet with broadcast indication but unicast recipient */
+ if (!is_broadcast_ether_addr(ethhdr->h_dest))
+ return false;
+
+ /* packet with invalid sender address */
+ if (!is_valid_ether_addr(ethhdr->h_source))
+ return false;
+
+ /* create a copy of the skb, if needed, to modify it. */
+ if (skb_cow(skb, 0) < 0)
+ return false;
+
+ /* keep skb linear */
+ if (skb_linearize(skb) < 0)
+ return false;
+
+ return true;
+}
+
+/**
+ * batadv_recv_my_icmp_packet() - receive an icmp packet locally
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: icmp packet to process
+ *
+ * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
+static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_icmp_header *icmph;
+ int res, ret = NET_RX_DROP;
+
+ icmph = (struct batadv_icmp_header *)skb->data;
+
+ switch (icmph->msg_type) {
+ case BATADV_ECHO_REQUEST:
+ /* answer echo request (ping) */
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ /* get routing information */
+ orig_node = batadv_orig_hash_find(bat_priv, icmph->orig);
+ if (!orig_node)
+ goto out;
+
+ /* create a copy of the skb, if needed, to modify it. */
+ if (skb_cow(skb, ETH_HLEN) < 0)
+ goto out;
+
+ icmph = (struct batadv_icmp_header *)skb->data;
+
+ ether_addr_copy(icmph->dst, icmph->orig);
+ ether_addr_copy(icmph->orig, primary_if->net_dev->dev_addr);
+ icmph->msg_type = BATADV_ECHO_REPLY;
+ icmph->ttl = BATADV_TTL;
+
+ res = batadv_send_skb_to_orig(skb, orig_node, NULL);
+ if (res == NET_XMIT_SUCCESS)
+ ret = NET_RX_SUCCESS;
+
+ /* skb was consumed */
+ skb = NULL;
+ break;
+ case BATADV_TP:
+ if (!pskb_may_pull(skb, sizeof(struct batadv_icmp_tp_packet)))
+ goto out;
+
+ batadv_tp_meter_recv(bat_priv, skb);
+ ret = NET_RX_SUCCESS;
+ /* skb was consumed */
+ skb = NULL;
+ goto out;
+ default:
+ /* drop unknown type */
+ goto out;
+ }
+out:
+ batadv_hardif_put(primary_if);
+ batadv_orig_node_put(orig_node);
+
+ kfree_skb(skb);
+
+ return ret;
+}
+
+static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_icmp_packet *icmp_packet;
+ int res, ret = NET_RX_DROP;
+
+ icmp_packet = (struct batadv_icmp_packet *)skb->data;
+
+ /* send TTL exceeded if packet is an echo request (traceroute) */
+ if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
+ pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
+ icmp_packet->orig, icmp_packet->dst);
+ goto out;
+ }
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ /* get routing information */
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
+ if (!orig_node)
+ goto out;
+
+ /* create a copy of the skb, if needed, to modify it. */
+ if (skb_cow(skb, ETH_HLEN) < 0)
+ goto out;
+
+ icmp_packet = (struct batadv_icmp_packet *)skb->data;
+
+ ether_addr_copy(icmp_packet->dst, icmp_packet->orig);
+ ether_addr_copy(icmp_packet->orig, primary_if->net_dev->dev_addr);
+ icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
+ icmp_packet->ttl = BATADV_TTL;
+
+ res = batadv_send_skb_to_orig(skb, orig_node, NULL);
+ if (res == NET_RX_SUCCESS)
+ ret = NET_XMIT_SUCCESS;
+
+ /* skb was consumed */
+ skb = NULL;
+
+out:
+ batadv_hardif_put(primary_if);
+ batadv_orig_node_put(orig_node);
+
+ kfree_skb(skb);
+
+ return ret;
+}
+
+/**
+ * batadv_recv_icmp_packet() - Process incoming icmp packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
+int batadv_recv_icmp_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
+{
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_icmp_header *icmph;
+ struct batadv_icmp_packet_rr *icmp_packet_rr;
+ struct ethhdr *ethhdr;
+ struct batadv_orig_node *orig_node = NULL;
+ int hdr_size = sizeof(struct batadv_icmp_header);
+ int res, ret = NET_RX_DROP;
+
+ /* drop packet if it has not necessary minimum size */
+ if (unlikely(!pskb_may_pull(skb, hdr_size)))
+ goto free_skb;
+
+ ethhdr = eth_hdr(skb);
+
+ /* packet with unicast indication but non-unicast recipient */
+ if (!is_valid_ether_addr(ethhdr->h_dest))
+ goto free_skb;
+
+ /* packet with broadcast/multicast sender address */
+ if (is_multicast_ether_addr(ethhdr->h_source))
+ goto free_skb;
+
+ /* not for me */
+ if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
+ goto free_skb;
+
+ icmph = (struct batadv_icmp_header *)skb->data;
+
+ /* add record route information if not full */
+ if ((icmph->msg_type == BATADV_ECHO_REPLY ||
+ icmph->msg_type == BATADV_ECHO_REQUEST) &&
+ skb->len >= sizeof(struct batadv_icmp_packet_rr)) {
+ if (skb_linearize(skb) < 0)
+ goto free_skb;
+
+ /* create a copy of the skb, if needed, to modify it. */
+ if (skb_cow(skb, ETH_HLEN) < 0)
+ goto free_skb;
+
+ ethhdr = eth_hdr(skb);
+ icmph = (struct batadv_icmp_header *)skb->data;
+ icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
+ if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
+ goto free_skb;
+
+ ether_addr_copy(icmp_packet_rr->rr[icmp_packet_rr->rr_cur],
+ ethhdr->h_dest);
+ icmp_packet_rr->rr_cur++;
+ }
+
+ /* packet for me */
+ if (batadv_is_my_mac(bat_priv, icmph->dst))
+ return batadv_recv_my_icmp_packet(bat_priv, skb);
+
+ /* TTL exceeded */
+ if (icmph->ttl < 2)
+ return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
+
+ /* get routing information */
+ orig_node = batadv_orig_hash_find(bat_priv, icmph->dst);
+ if (!orig_node)
+ goto free_skb;
+
+ /* create a copy of the skb, if needed, to modify it. */
+ if (skb_cow(skb, ETH_HLEN) < 0)
+ goto put_orig_node;
+
+ icmph = (struct batadv_icmp_header *)skb->data;
+
+ /* decrement ttl */
+ icmph->ttl--;
+
+ /* route it */
+ res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
+ if (res == NET_XMIT_SUCCESS)
+ ret = NET_RX_SUCCESS;
+
+ /* skb was consumed */
+ skb = NULL;
+
+put_orig_node:
+ batadv_orig_node_put(orig_node);
+free_skb:
+ kfree_skb(skb);
+
+ return ret;
+}
+
+/**
+ * batadv_check_unicast_packet() - Check for malformed unicast packets
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: packet to check
+ * @hdr_size: size of header to pull
+ *
+ * Checks for short header and bad addresses in the given packet.
+ *
+ * Return: negative value when check fails and 0 otherwise. The negative value
+ * depends on the reason: -ENODATA for bad header, -EBADR for broadcast
+ * destination or source, and -EREMOTE for non-local (other host) destination.
+ */
+static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size)
+{
+ struct ethhdr *ethhdr;
+
+ /* drop packet if it has not necessary minimum size */
+ if (unlikely(!pskb_may_pull(skb, hdr_size)))
+ return -ENODATA;
+
+ ethhdr = eth_hdr(skb);
+
+ /* packet with unicast indication but non-unicast recipient */
+ if (!is_valid_ether_addr(ethhdr->h_dest))
+ return -EBADR;
+
+ /* packet with broadcast/multicast sender address */
+ if (is_multicast_ether_addr(ethhdr->h_source))
+ return -EBADR;
+
+ /* not for me */
+ if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
+ return -EREMOTE;
+
+ return 0;
+}
+
+/**
+ * batadv_last_bonding_get() - Get last_bonding_candidate of orig_node
+ * @orig_node: originator node whose last bonding candidate should be retrieved
+ *
+ * Return: last bonding candidate of router or NULL if not found
+ *
+ * The object is returned with refcounter increased by 1.
+ */
+static struct batadv_orig_ifinfo *
+batadv_last_bonding_get(struct batadv_orig_node *orig_node)
+{
+ struct batadv_orig_ifinfo *last_bonding_candidate;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+ last_bonding_candidate = orig_node->last_bonding_candidate;
+
+ if (last_bonding_candidate)
+ kref_get(&last_bonding_candidate->refcount);
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+
+ return last_bonding_candidate;
+}
+
+/**
+ * batadv_last_bonding_replace() - Replace last_bonding_candidate of orig_node
+ * @orig_node: originator node whose bonding candidates should be replaced
+ * @new_candidate: new bonding candidate or NULL
+ */
+static void
+batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
+ struct batadv_orig_ifinfo *new_candidate)
+{
+ struct batadv_orig_ifinfo *old_candidate;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+ old_candidate = orig_node->last_bonding_candidate;
+
+ if (new_candidate)
+ kref_get(&new_candidate->refcount);
+ orig_node->last_bonding_candidate = new_candidate;
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+
+ batadv_orig_ifinfo_put(old_candidate);
+}
+
+/**
+ * batadv_find_router() - find a suitable router for this originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the destination node
+ * @recv_if: pointer to interface this packet was received on
+ *
+ * Return: the router which should be used for this orig_node on
+ * this interface, or NULL if not available.
+ */
+struct batadv_neigh_node *
+batadv_find_router(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *recv_if)
+{
+ struct batadv_algo_ops *bao = bat_priv->algo_ops;
+ struct batadv_neigh_node *first_candidate_router = NULL;
+ struct batadv_neigh_node *next_candidate_router = NULL;
+ struct batadv_neigh_node *router, *cand_router = NULL;
+ struct batadv_neigh_node *last_cand_router = NULL;
+ struct batadv_orig_ifinfo *cand, *first_candidate = NULL;
+ struct batadv_orig_ifinfo *next_candidate = NULL;
+ struct batadv_orig_ifinfo *last_candidate;
+ bool last_candidate_found = false;
+
+ if (!orig_node)
+ return NULL;
+
+ router = batadv_orig_router_get(orig_node, recv_if);
+
+ if (!router)
+ return router;
+
+ /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop)
+ * and if activated.
+ */
+ if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding)))
+ return router;
+
+ /* bonding: loop through the list of possible routers found
+ * for the various outgoing interfaces and find a candidate after
+ * the last chosen bonding candidate (next_candidate). If no such
+ * router is found, use the first candidate found (the previously
+ * chosen bonding candidate might have been the last one in the list).
+ * If this can't be found either, return the previously chosen
+ * router - obviously there are no other candidates.
+ */
+ rcu_read_lock();
+ last_candidate = batadv_last_bonding_get(orig_node);
+ if (last_candidate)
+ last_cand_router = rcu_dereference(last_candidate->router);
+
+ hlist_for_each_entry_rcu(cand, &orig_node->ifinfo_list, list) {
+ /* acquire some structures and references ... */
+ if (!kref_get_unless_zero(&cand->refcount))
+ continue;
+
+ cand_router = rcu_dereference(cand->router);
+ if (!cand_router)
+ goto next;
+
+ if (!kref_get_unless_zero(&cand_router->refcount)) {
+ cand_router = NULL;
+ goto next;
+ }
+
+ /* alternative candidate should be good enough to be
+ * considered
+ */
+ if (!bao->neigh.is_similar_or_better(cand_router,
+ cand->if_outgoing, router,
+ recv_if))
+ goto next;
+
+ /* don't use the same router twice */
+ if (last_cand_router == cand_router)
+ goto next;
+
+ /* mark the first possible candidate */
+ if (!first_candidate) {
+ kref_get(&cand_router->refcount);
+ kref_get(&cand->refcount);
+ first_candidate = cand;
+ first_candidate_router = cand_router;
+ }
+
+ /* check if the loop has already passed the previously selected
+ * candidate ... this function should select the next candidate
+ * AFTER the previously used bonding candidate.
+ */
+ if (!last_candidate || last_candidate_found) {
+ next_candidate = cand;
+ next_candidate_router = cand_router;
+ break;
+ }
+
+ if (last_candidate == cand)
+ last_candidate_found = true;
+next:
+ /* free references */
+ if (cand_router) {
+ batadv_neigh_node_put(cand_router);
+ cand_router = NULL;
+ }
+ batadv_orig_ifinfo_put(cand);
+ }
+ rcu_read_unlock();
+
+ /* After finding candidates, handle the three cases:
+ * 1) there is a next candidate, use that
+ * 2) there is no next candidate, use the first of the list
+ * 3) there is no candidate at all, return the default router
+ */
+ if (next_candidate) {
+ batadv_neigh_node_put(router);
+
+ kref_get(&next_candidate_router->refcount);
+ router = next_candidate_router;
+ batadv_last_bonding_replace(orig_node, next_candidate);
+ } else if (first_candidate) {
+ batadv_neigh_node_put(router);
+
+ kref_get(&first_candidate_router->refcount);
+ router = first_candidate_router;
+ batadv_last_bonding_replace(orig_node, first_candidate);
+ } else {
+ batadv_last_bonding_replace(orig_node, NULL);
+ }
+
+ /* cleanup of candidates */
+ if (first_candidate) {
+ batadv_neigh_node_put(first_candidate_router);
+ batadv_orig_ifinfo_put(first_candidate);
+ }
+
+ if (next_candidate) {
+ batadv_neigh_node_put(next_candidate_router);
+ batadv_orig_ifinfo_put(next_candidate);
+ }
+
+ batadv_orig_ifinfo_put(last_candidate);
+
+ return router;
+}
+
+static int batadv_route_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
+{
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_unicast_packet *unicast_packet;
+ struct ethhdr *ethhdr = eth_hdr(skb);
+ int res, hdr_len, ret = NET_RX_DROP;
+ unsigned int len;
+
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+
+ /* TTL exceeded */
+ if (unicast_packet->ttl < 2) {
+ pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
+ ethhdr->h_source, unicast_packet->dest);
+ goto free_skb;
+ }
+
+ /* get routing information */
+ orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->dest);
+
+ if (!orig_node)
+ goto free_skb;
+
+ /* create a copy of the skb, if needed, to modify it. */
+ if (skb_cow(skb, ETH_HLEN) < 0)
+ goto put_orig_node;
+
+ /* decrement ttl */
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+ unicast_packet->ttl--;
+
+ switch (unicast_packet->packet_type) {
+ case BATADV_UNICAST_4ADDR:
+ hdr_len = sizeof(struct batadv_unicast_4addr_packet);
+ break;
+ case BATADV_UNICAST:
+ hdr_len = sizeof(struct batadv_unicast_packet);
+ break;
+ default:
+ /* other packet types not supported - yet */
+ hdr_len = -1;
+ break;
+ }
+
+ if (hdr_len > 0)
+ batadv_skb_set_priority(skb, hdr_len);
+
+ len = skb->len;
+ res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
+
+ /* translate transmit result into receive result */
+ if (res == NET_XMIT_SUCCESS) {
+ ret = NET_RX_SUCCESS;
+ /* skb was transmitted and consumed */
+ batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
+ batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
+ len + ETH_HLEN);
+ }
+
+ /* skb was consumed */
+ skb = NULL;
+
+put_orig_node:
+ batadv_orig_node_put(orig_node);
+free_skb:
+ kfree_skb(skb);
+
+ return ret;
+}
+
+/**
+ * batadv_reroute_unicast_packet() - update the unicast header for re-routing
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: unicast packet to process
+ * @unicast_packet: the unicast header to be updated
+ * @dst_addr: the payload destination
+ * @vid: VLAN identifier
+ *
+ * Search the translation table for dst_addr and update the unicast header with
+ * the new corresponding information (originator address where the destination
+ * client currently is and its known TTVN)
+ *
+ * Return: true if the packet header has been updated, false otherwise
+ */
+static bool
+batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ struct batadv_unicast_packet *unicast_packet,
+ u8 *dst_addr, unsigned short vid)
+{
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_hard_iface *primary_if = NULL;
+ bool ret = false;
+ const u8 *orig_addr;
+ u8 orig_ttvn;
+
+ if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+ orig_addr = primary_if->net_dev->dev_addr;
+ orig_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
+ } else {
+ orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
+ vid);
+ if (!orig_node)
+ goto out;
+
+ if (batadv_compare_eth(orig_node->orig, unicast_packet->dest))
+ goto out;
+
+ orig_addr = orig_node->orig;
+ orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
+ }
+
+ /* update the packet header */
+ skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
+ ether_addr_copy(unicast_packet->dest, orig_addr);
+ unicast_packet->ttvn = orig_ttvn;
+ skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
+
+ ret = true;
+out:
+ batadv_hardif_put(primary_if);
+ batadv_orig_node_put(orig_node);
+
+ return ret;
+}
+
+static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_len)
+{
+ struct batadv_unicast_packet *unicast_packet;
+ struct batadv_hard_iface *primary_if;
+ struct batadv_orig_node *orig_node;
+ u8 curr_ttvn, old_ttvn;
+ struct ethhdr *ethhdr;
+ unsigned short vid;
+ int is_old_ttvn;
+
+ /* check if there is enough data before accessing it */
+ if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
+ return false;
+
+ /* create a copy of the skb (in case of for re-routing) to modify it. */
+ if (skb_cow(skb, sizeof(*unicast_packet)) < 0)
+ return false;
+
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+ vid = batadv_get_vid(skb, hdr_len);
+ ethhdr = (struct ethhdr *)(skb->data + hdr_len);
+
+ /* do not reroute multicast frames in a unicast header */
+ if (is_multicast_ether_addr(ethhdr->h_dest))
+ return true;
+
+ /* check if the destination client was served by this node and it is now
+ * roaming. In this case, it means that the node has got a ROAM_ADV
+ * message and that it knows the new destination in the mesh to re-route
+ * the packet to
+ */
+ if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
+ if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
+ ethhdr->h_dest, vid))
+ batadv_dbg_ratelimited(BATADV_DBG_TT,
+ bat_priv,
+ "Rerouting unicast packet to %pM (dst=%pM): Local Roaming\n",
+ unicast_packet->dest,
+ ethhdr->h_dest);
+ /* at this point the mesh destination should have been
+ * substituted with the originator address found in the global
+ * table. If not, let the packet go untouched anyway because
+ * there is nothing the node can do
+ */
+ return true;
+ }
+
+ /* retrieve the TTVN known by this node for the packet destination. This
+ * value is used later to check if the node which sent (or re-routed
+ * last time) the packet had an updated information or not
+ */
+ curr_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
+ if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
+ orig_node = batadv_orig_hash_find(bat_priv,
+ unicast_packet->dest);
+ /* if it is not possible to find the orig_node representing the
+ * destination, the packet can immediately be dropped as it will
+ * not be possible to deliver it
+ */
+ if (!orig_node)
+ return false;
+
+ curr_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
+ batadv_orig_node_put(orig_node);
+ }
+
+ /* check if the TTVN contained in the packet is fresher than what the
+ * node knows
+ */
+ is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn);
+ if (!is_old_ttvn)
+ return true;
+
+ old_ttvn = unicast_packet->ttvn;
+ /* the packet was forged based on outdated network information. Its
+ * destination can possibly be updated and forwarded towards the new
+ * target host
+ */
+ if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
+ ethhdr->h_dest, vid)) {
+ batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
+ "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
+ unicast_packet->dest, ethhdr->h_dest,
+ old_ttvn, curr_ttvn);
+ return true;
+ }
+
+ /* the packet has not been re-routed: either the destination is
+ * currently served by this node or there is no destination at all and
+ * it is possible to drop the packet
+ */
+ if (!batadv_is_my_client(bat_priv, ethhdr->h_dest, vid))
+ return false;
+
+ /* update the header in order to let the packet be delivered to this
+ * node's soft interface
+ */
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ return false;
+
+ /* update the packet header */
+ skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
+ ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
+ unicast_packet->ttvn = curr_ttvn;
+ skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
+
+ batadv_hardif_put(primary_if);
+
+ return true;
+}
+
+/**
+ * batadv_recv_unhandled_unicast_packet() - receive and process packets which
+ * are in the unicast number space but not yet known to the implementation
+ * @skb: unicast tvlv packet to process
+ * @recv_if: pointer to interface this packet was received on
+ *
+ * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
+int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
+{
+ struct batadv_unicast_packet *unicast_packet;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ int check, hdr_size = sizeof(*unicast_packet);
+
+ check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
+ if (check < 0)
+ goto free_skb;
+
+ /* we don't know about this type, drop it. */
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+ if (batadv_is_my_mac(bat_priv, unicast_packet->dest))
+ goto free_skb;
+
+ return batadv_route_unicast_packet(skb, recv_if);
+
+free_skb:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+/**
+ * batadv_recv_unicast_packet() - Process incoming unicast packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
+int batadv_recv_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
+{
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_unicast_packet *unicast_packet;
+ struct batadv_unicast_4addr_packet *unicast_4addr_packet;
+ u8 *orig_addr, *orig_addr_gw;
+ struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL;
+ int check, hdr_size = sizeof(*unicast_packet);
+ enum batadv_subtype subtype;
+ int ret = NET_RX_DROP;
+ bool is4addr, is_gw;
+
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+ is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
+ /* the caller function should have already pulled 2 bytes */
+ if (is4addr)
+ hdr_size = sizeof(*unicast_4addr_packet);
+
+ /* function returns -EREMOTE for promiscuous packets */
+ check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
+
+ /* Even though the packet is not for us, we might save it to use for
+ * decoding a later received coded packet
+ */
+ if (check == -EREMOTE)
+ batadv_nc_skb_store_sniffed_unicast(bat_priv, skb);
+
+ if (check < 0)
+ goto free_skb;
+ if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
+ goto free_skb;
+
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+
+ /* packet for me */
+ if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
+ /* If this is a unicast packet from another backgone gw,
+ * drop it.
+ */
+ orig_addr_gw = eth_hdr(skb)->h_source;
+ orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw);
+ if (orig_node_gw) {
+ is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw,
+ hdr_size);
+ batadv_orig_node_put(orig_node_gw);
+ if (is_gw) {
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "%s(): Dropped unicast pkt received from another backbone gw %pM.\n",
+ __func__, orig_addr_gw);
+ goto free_skb;
+ }
+ }
+
+ if (is4addr) {
+ unicast_4addr_packet =
+ (struct batadv_unicast_4addr_packet *)skb->data;
+ subtype = unicast_4addr_packet->subtype;
+ batadv_dat_inc_counter(bat_priv, subtype);
+
+ /* Only payload data should be considered for speedy
+ * join. For example, DAT also uses unicast 4addr
+ * types, but those packets should not be considered
+ * for speedy join, since the clients do not actually
+ * reside at the sending originator.
+ */
+ if (subtype == BATADV_P_DATA) {
+ orig_addr = unicast_4addr_packet->src;
+ orig_node = batadv_orig_hash_find(bat_priv,
+ orig_addr);
+ }
+ }
+
+ if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb,
+ hdr_size))
+ goto rx_success;
+ if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb,
+ hdr_size))
+ goto rx_success;
+
+ batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size);
+
+ batadv_interface_rx(recv_if->soft_iface, skb, hdr_size,
+ orig_node);
+
+rx_success:
+ batadv_orig_node_put(orig_node);
+
+ return NET_RX_SUCCESS;
+ }
+
+ ret = batadv_route_unicast_packet(skb, recv_if);
+ /* skb was consumed */
+ skb = NULL;
+
+free_skb:
+ kfree_skb(skb);
+
+ return ret;
+}
+
+/**
+ * batadv_recv_unicast_tvlv() - receive and process unicast tvlv packets
+ * @skb: unicast tvlv packet to process
+ * @recv_if: pointer to interface this packet was received on
+ *
+ * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
+int batadv_recv_unicast_tvlv(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
+{
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
+ unsigned char *tvlv_buff;
+ u16 tvlv_buff_len;
+ int hdr_size = sizeof(*unicast_tvlv_packet);
+ int ret = NET_RX_DROP;
+
+ if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
+ goto free_skb;
+
+ /* the header is likely to be modified while forwarding */
+ if (skb_cow(skb, hdr_size) < 0)
+ goto free_skb;
+
+ /* packet needs to be linearized to access the tvlv content */
+ if (skb_linearize(skb) < 0)
+ goto free_skb;
+
+ unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)skb->data;
+
+ tvlv_buff = (unsigned char *)(skb->data + hdr_size);
+ tvlv_buff_len = ntohs(unicast_tvlv_packet->tvlv_len);
+
+ if (tvlv_buff_len > skb->len - hdr_size)
+ goto free_skb;
+
+ ret = batadv_tvlv_containers_process(bat_priv, BATADV_UNICAST_TVLV,
+ NULL, skb, tvlv_buff,
+ tvlv_buff_len);
+
+ if (ret != NET_RX_SUCCESS) {
+ ret = batadv_route_unicast_packet(skb, recv_if);
+ /* skb was consumed */
+ skb = NULL;
+ }
+
+free_skb:
+ kfree_skb(skb);
+
+ return ret;
+}
+
+/**
+ * batadv_recv_frag_packet() - process received fragment
+ * @skb: the received fragment
+ * @recv_if: interface that the skb is received on
+ *
+ * This function does one of the three following things: 1) Forward fragment, if
+ * the assembled packet will exceed our MTU; 2) Buffer fragment, if we still
+ * lack further fragments; 3) Merge fragments, if we have all needed parts.
+ *
+ * Return: NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise.
+ */
+int batadv_recv_frag_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
+{
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_orig_node *orig_node_src = NULL;
+ struct batadv_frag_packet *frag_packet;
+ int ret = NET_RX_DROP;
+
+ if (batadv_check_unicast_packet(bat_priv, skb,
+ sizeof(*frag_packet)) < 0)
+ goto free_skb;
+
+ frag_packet = (struct batadv_frag_packet *)skb->data;
+ orig_node_src = batadv_orig_hash_find(bat_priv, frag_packet->orig);
+ if (!orig_node_src)
+ goto free_skb;
+
+ skb->priority = frag_packet->priority + 256;
+
+ /* Route the fragment if it is not for us and too big to be merged. */
+ if (!batadv_is_my_mac(bat_priv, frag_packet->dest) &&
+ batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) {
+ /* skb was consumed */
+ skb = NULL;
+ ret = NET_RX_SUCCESS;
+ goto put_orig_node;
+ }
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
+
+ /* Add fragment to buffer and merge if possible. */
+ if (!batadv_frag_skb_buffer(&skb, orig_node_src))
+ goto put_orig_node;
+
+ /* Deliver merged packet to the appropriate handler, if it was
+ * merged
+ */
+ if (skb) {
+ batadv_batman_skb_recv(skb, recv_if->net_dev,
+ &recv_if->batman_adv_ptype, NULL);
+ /* skb was consumed */
+ skb = NULL;
+ }
+
+ ret = NET_RX_SUCCESS;
+
+put_orig_node:
+ batadv_orig_node_put(orig_node_src);
+free_skb:
+ kfree_skb(skb);
+
+ return ret;
+}
+
+/**
+ * batadv_recv_bcast_packet() - Process incoming broadcast packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
+int batadv_recv_bcast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
+{
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_bcast_packet *bcast_packet;
+ struct ethhdr *ethhdr;
+ int hdr_size = sizeof(*bcast_packet);
+ s32 seq_diff;
+ u32 seqno;
+ int ret;
+
+ /* drop packet if it has not necessary minimum size */
+ if (unlikely(!pskb_may_pull(skb, hdr_size)))
+ goto free_skb;
+
+ ethhdr = eth_hdr(skb);
+
+ /* packet with broadcast indication but unicast recipient */
+ if (!is_broadcast_ether_addr(ethhdr->h_dest))
+ goto free_skb;
+
+ /* packet with broadcast/multicast sender address */
+ if (is_multicast_ether_addr(ethhdr->h_source))
+ goto free_skb;
+
+ /* ignore broadcasts sent by myself */
+ if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
+ goto free_skb;
+
+ bcast_packet = (struct batadv_bcast_packet *)skb->data;
+
+ /* ignore broadcasts originated by myself */
+ if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
+ goto free_skb;
+
+ if (bcast_packet->ttl-- < 2)
+ goto free_skb;
+
+ orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
+
+ if (!orig_node)
+ goto free_skb;
+
+ spin_lock_bh(&orig_node->bcast_seqno_lock);
+
+ seqno = ntohl(bcast_packet->seqno);
+ /* check whether the packet is a duplicate */
+ if (batadv_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
+ seqno))
+ goto spin_unlock;
+
+ seq_diff = seqno - orig_node->last_bcast_seqno;
+
+ /* check whether the packet is old and the host just restarted. */
+ if (batadv_window_protected(bat_priv, seq_diff,
+ BATADV_BCAST_MAX_AGE,
+ &orig_node->bcast_seqno_reset, NULL))
+ goto spin_unlock;
+
+ /* mark broadcast in flood history, update window position
+ * if required.
+ */
+ if (batadv_bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
+ orig_node->last_bcast_seqno = seqno;
+
+ spin_unlock_bh(&orig_node->bcast_seqno_lock);
+
+ /* check whether this has been sent by another originator before */
+ if (batadv_bla_check_bcast_duplist(bat_priv, skb))
+ goto free_skb;
+
+ batadv_skb_set_priority(skb, sizeof(struct batadv_bcast_packet));
+
+ /* rebroadcast packet */
+ ret = batadv_forw_bcast_packet(bat_priv, skb, 0, false);
+ if (ret == NETDEV_TX_BUSY)
+ goto free_skb;
+
+ /* don't hand the broadcast up if it is from an originator
+ * from the same backbone.
+ */
+ if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size))
+ goto free_skb;
+
+ if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, hdr_size))
+ goto rx_success;
+ if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb, hdr_size))
+ goto rx_success;
+
+ batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size);
+
+ /* broadcast for me */
+ batadv_interface_rx(recv_if->soft_iface, skb, hdr_size, orig_node);
+
+rx_success:
+ ret = NET_RX_SUCCESS;
+ goto out;
+
+spin_unlock:
+ spin_unlock_bh(&orig_node->bcast_seqno_lock);
+free_skb:
+ kfree_skb(skb);
+ ret = NET_RX_DROP;
+out:
+ batadv_orig_node_put(orig_node);
+ return ret;
+}
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
new file mode 100644
index 0000000000..afd15b3879
--- /dev/null
+++ b/net/batman-adv/routing.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#ifndef _NET_BATMAN_ADV_ROUTING_H_
+#define _NET_BATMAN_ADV_ROUTING_H_
+
+#include "main.h"
+
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+bool batadv_check_management_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface,
+ int header_len);
+void batadv_update_route(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *recv_if,
+ struct batadv_neigh_node *neigh_node);
+int batadv_recv_icmp_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_frag_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *iface);
+int batadv_recv_bcast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_unicast_tvlv(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+struct batadv_neigh_node *
+batadv_find_router(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *recv_if);
+bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
+ s32 seq_old_max_diff, unsigned long *last_reset,
+ bool *protection_started);
+
+#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
new file mode 100644
index 0000000000..0379b12686
--- /dev/null
+++ b/net/batman-adv/send.c
@@ -0,0 +1,1135 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#include "send.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/container_of.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/workqueue.h>
+
+#include "distributed-arp-table.h"
+#include "fragmentation.h"
+#include "gateway_client.h"
+#include "hard-interface.h"
+#include "log.h"
+#include "network-coding.h"
+#include "originator.h"
+#include "routing.h"
+#include "soft-interface.h"
+#include "translation-table.h"
+
+static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
+
+/**
+ * batadv_send_skb_packet() - send an already prepared packet
+ * @skb: the packet to send
+ * @hard_iface: the interface to use to send the broadcast packet
+ * @dst_addr: the payload destination
+ *
+ * Send out an already prepared packet to the given neighbor or broadcast it
+ * using the specified interface. Either hard_iface or neigh_node must be not
+ * NULL.
+ * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
+ * otherwise it is sent as unicast to the given neighbor.
+ *
+ * Regardless of the return value, the skb is consumed.
+ *
+ * Return: A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ */
+int batadv_send_skb_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface,
+ const u8 *dst_addr)
+{
+ struct batadv_priv *bat_priv;
+ struct ethhdr *ethhdr;
+ int ret;
+
+ bat_priv = netdev_priv(hard_iface->soft_iface);
+
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
+ goto send_skb_err;
+
+ if (unlikely(!hard_iface->net_dev))
+ goto send_skb_err;
+
+ if (!(hard_iface->net_dev->flags & IFF_UP)) {
+ pr_warn("Interface %s is not up - can't send packet via that interface!\n",
+ hard_iface->net_dev->name);
+ goto send_skb_err;
+ }
+
+ /* push to the ethernet header. */
+ if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
+ goto send_skb_err;
+
+ skb_reset_mac_header(skb);
+
+ ethhdr = eth_hdr(skb);
+ ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
+ ether_addr_copy(ethhdr->h_dest, dst_addr);
+ ethhdr->h_proto = htons(ETH_P_BATMAN);
+
+ skb_set_network_header(skb, ETH_HLEN);
+ skb->protocol = htons(ETH_P_BATMAN);
+
+ skb->dev = hard_iface->net_dev;
+
+ /* Save a clone of the skb to use when decoding coded packets */
+ batadv_nc_skb_store_for_decoding(bat_priv, skb);
+
+ /* dev_queue_xmit() returns a negative result on error. However on
+ * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
+ * (which is > 0). This will not be treated as an error.
+ */
+ ret = dev_queue_xmit(skb);
+ return net_xmit_eval(ret);
+send_skb_err:
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+}
+
+/**
+ * batadv_send_broadcast_skb() - Send broadcast packet via hard interface
+ * @skb: packet to be transmitted (with batadv header and no outer eth header)
+ * @hard_iface: outgoing interface
+ *
+ * Return: A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ */
+int batadv_send_broadcast_skb(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface)
+{
+ return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
+}
+
+/**
+ * batadv_send_unicast_skb() - Send unicast packet to neighbor
+ * @skb: packet to be transmitted (with batadv header and no outer eth header)
+ * @neigh: neighbor which is used as next hop to destination
+ *
+ * Return: A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ */
+int batadv_send_unicast_skb(struct sk_buff *skb,
+ struct batadv_neigh_node *neigh)
+{
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ struct batadv_hardif_neigh_node *hardif_neigh;
+#endif
+ int ret;
+
+ ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
+
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
+
+ if (hardif_neigh && ret != NET_XMIT_DROP)
+ hardif_neigh->bat_v.last_unicast_tx = jiffies;
+
+ batadv_hardif_neigh_put(hardif_neigh);
+#endif
+
+ return ret;
+}
+
+/**
+ * batadv_send_skb_to_orig() - Lookup next-hop and transmit skb.
+ * @skb: Packet to be transmitted.
+ * @orig_node: Final destination of the packet.
+ * @recv_if: Interface used when receiving the packet (can be NULL).
+ *
+ * Looks up the best next-hop towards the passed originator and passes the
+ * skb on for preparation of MAC header. If the packet originated from this
+ * host, NULL can be passed as recv_if and no interface alternating is
+ * attempted.
+ *
+ * Return: negative errno code on a failure, -EINPROGRESS if the skb is
+ * buffered for later transmit or the NET_XMIT status returned by the
+ * lower routine if the packet has been passed down.
+ */
+int batadv_send_skb_to_orig(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *recv_if)
+{
+ struct batadv_priv *bat_priv = orig_node->bat_priv;
+ struct batadv_neigh_node *neigh_node;
+ int ret;
+
+ /* batadv_find_router() increases neigh_nodes refcount if found. */
+ neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
+ if (!neigh_node) {
+ ret = -EINVAL;
+ goto free_skb;
+ }
+
+ /* Check if the skb is too large to send in one piece and fragment
+ * it if needed.
+ */
+ if (atomic_read(&bat_priv->fragmentation) &&
+ skb->len > neigh_node->if_incoming->net_dev->mtu) {
+ /* Fragment and send packet. */
+ ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
+ /* skb was consumed */
+ skb = NULL;
+
+ goto put_neigh_node;
+ }
+
+ /* try to network code the packet, if it is received on an interface
+ * (i.e. being forwarded). If the packet originates from this node or if
+ * network coding fails, then send the packet as usual.
+ */
+ if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
+ ret = -EINPROGRESS;
+ else
+ ret = batadv_send_unicast_skb(skb, neigh_node);
+
+ /* skb was consumed */
+ skb = NULL;
+
+put_neigh_node:
+ batadv_neigh_node_put(neigh_node);
+free_skb:
+ kfree_skb(skb);
+
+ return ret;
+}
+
+/**
+ * batadv_send_skb_push_fill_unicast() - extend the buffer and initialize the
+ * common fields for unicast packets
+ * @skb: the skb carrying the unicast header to initialize
+ * @hdr_size: amount of bytes to push at the beginning of the skb
+ * @orig_node: the destination node
+ *
+ * Return: false if the buffer extension was not possible or true otherwise.
+ */
+static bool
+batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_unicast_packet *unicast_packet;
+ u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
+
+ if (batadv_skb_head_push(skb, hdr_size) < 0)
+ return false;
+
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+ unicast_packet->version = BATADV_COMPAT_VERSION;
+ /* batman packet type: unicast */
+ unicast_packet->packet_type = BATADV_UNICAST;
+ /* set unicast ttl */
+ unicast_packet->ttl = BATADV_TTL;
+ /* copy the destination for faster routing */
+ ether_addr_copy(unicast_packet->dest, orig_node->orig);
+ /* set the destination tt version number */
+ unicast_packet->ttvn = ttvn;
+
+ return true;
+}
+
+/**
+ * batadv_send_skb_prepare_unicast() - encapsulate an skb with a unicast header
+ * @skb: the skb containing the payload to encapsulate
+ * @orig_node: the destination node
+ *
+ * Return: false if the payload could not be encapsulated or true otherwise.
+ */
+static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node)
+{
+ size_t uni_size = sizeof(struct batadv_unicast_packet);
+
+ return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
+}
+
+/**
+ * batadv_send_skb_prepare_unicast_4addr() - encapsulate an skb with a
+ * unicast 4addr header
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the skb containing the payload to encapsulate
+ * @orig: the destination node
+ * @packet_subtype: the unicast 4addr packet subtype to use
+ *
+ * Return: false if the payload could not be encapsulated or true otherwise.
+ */
+bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ struct batadv_orig_node *orig,
+ int packet_subtype)
+{
+ struct batadv_hard_iface *primary_if;
+ struct batadv_unicast_4addr_packet *uc_4addr_packet;
+ bool ret = false;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ /* Pull the header space and fill the unicast_packet substructure.
+ * We can do that because the first member of the uc_4addr_packet
+ * is of type struct unicast_packet
+ */
+ if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
+ orig))
+ goto out;
+
+ uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
+ uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
+ ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
+ uc_4addr_packet->subtype = packet_subtype;
+ uc_4addr_packet->reserved = 0;
+
+ ret = true;
+out:
+ batadv_hardif_put(primary_if);
+ return ret;
+}
+
+/**
+ * batadv_send_skb_unicast() - encapsulate and send an skb via unicast
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @packet_type: the batman unicast packet type to use
+ * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
+ * 4addr packets)
+ * @orig_node: the originator to send the packet to
+ * @vid: the vid to be used to search the translation table
+ *
+ * Wrap the given skb into a batman-adv unicast or unicast-4addr header
+ * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
+ * as packet_type. Then send this frame to the given orig_node.
+ *
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int packet_type,
+ int packet_subtype,
+ struct batadv_orig_node *orig_node,
+ unsigned short vid)
+{
+ struct batadv_unicast_packet *unicast_packet;
+ struct ethhdr *ethhdr;
+ int ret = NET_XMIT_DROP;
+
+ if (!orig_node)
+ goto out;
+
+ switch (packet_type) {
+ case BATADV_UNICAST:
+ if (!batadv_send_skb_prepare_unicast(skb, orig_node))
+ goto out;
+ break;
+ case BATADV_UNICAST_4ADDR:
+ if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
+ orig_node,
+ packet_subtype))
+ goto out;
+ break;
+ default:
+ /* this function supports UNICAST and UNICAST_4ADDR only. It
+ * should never be invoked with any other packet type
+ */
+ goto out;
+ }
+
+ /* skb->data might have been reallocated by
+ * batadv_send_skb_prepare_unicast{,_4addr}()
+ */
+ ethhdr = eth_hdr(skb);
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+
+ /* inform the destination node that we are still missing a correct route
+ * for this client. The destination will receive this packet and will
+ * try to reroute it because the ttvn contained in the header is less
+ * than the current one
+ */
+ if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
+ unicast_packet->ttvn = unicast_packet->ttvn - 1;
+
+ ret = batadv_send_skb_to_orig(skb, orig_node, NULL);
+ /* skb was consumed */
+ skb = NULL;
+
+out:
+ kfree_skb(skb);
+ return ret;
+}
+
+/**
+ * batadv_send_skb_via_tt_generic() - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @packet_type: the batman unicast packet type to use
+ * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
+ * 4addr packets)
+ * @dst_hint: can be used to override the destination contained in the skb
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
+ * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
+ * to the according destination node.
+ *
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int packet_type,
+ int packet_subtype, u8 *dst_hint,
+ unsigned short vid)
+{
+ struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+ struct batadv_orig_node *orig_node;
+ u8 *src, *dst;
+ int ret;
+
+ src = ethhdr->h_source;
+ dst = ethhdr->h_dest;
+
+ /* if we got an hint! let's send the packet to this client (if any) */
+ if (dst_hint) {
+ src = NULL;
+ dst = dst_hint;
+ }
+ orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
+
+ ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
+ packet_subtype, orig_node, vid);
+
+ batadv_orig_node_put(orig_node);
+
+ return ret;
+}
+
+/**
+ * batadv_send_skb_via_gw() - send an skb via gateway lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the currently selected gateway. Wrap the given skb into a batman-adv
+ * unicast header and send this frame to this gateway node.
+ *
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ unsigned short vid)
+{
+ struct batadv_orig_node *orig_node;
+ int ret;
+
+ orig_node = batadv_gw_get_selected_orig(bat_priv);
+ ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
+ BATADV_P_DATA, orig_node, vid);
+
+ batadv_orig_node_put(orig_node);
+
+ return ret;
+}
+
+/**
+ * batadv_forw_packet_free() - free a forwarding packet
+ * @forw_packet: The packet to free
+ * @dropped: whether the packet is freed because is dropped
+ *
+ * This frees a forwarding packet and releases any resources it might
+ * have claimed.
+ */
+void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
+ bool dropped)
+{
+ if (dropped)
+ kfree_skb(forw_packet->skb);
+ else
+ consume_skb(forw_packet->skb);
+
+ batadv_hardif_put(forw_packet->if_incoming);
+ batadv_hardif_put(forw_packet->if_outgoing);
+ if (forw_packet->queue_left)
+ atomic_inc(forw_packet->queue_left);
+ kfree(forw_packet);
+}
+
+/**
+ * batadv_forw_packet_alloc() - allocate a forwarding packet
+ * @if_incoming: The (optional) if_incoming to be grabbed
+ * @if_outgoing: The (optional) if_outgoing to be grabbed
+ * @queue_left: The (optional) queue counter to decrease
+ * @bat_priv: The bat_priv for the mesh of this forw_packet
+ * @skb: The raw packet this forwarding packet shall contain
+ *
+ * Allocates a forwarding packet and tries to get a reference to the
+ * (optional) if_incoming, if_outgoing and queue_left. If queue_left
+ * is NULL then bat_priv is optional, too.
+ *
+ * Return: An allocated forwarding packet on success, NULL otherwise.
+ */
+struct batadv_forw_packet *
+batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing,
+ atomic_t *queue_left,
+ struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ struct batadv_forw_packet *forw_packet;
+ const char *qname;
+
+ if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
+ qname = "unknown";
+
+ if (queue_left == &bat_priv->bcast_queue_left)
+ qname = "bcast";
+
+ if (queue_left == &bat_priv->batman_queue_left)
+ qname = "batman";
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "%s queue is full\n", qname);
+
+ return NULL;
+ }
+
+ forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
+ if (!forw_packet)
+ goto err;
+
+ if (if_incoming)
+ kref_get(&if_incoming->refcount);
+
+ if (if_outgoing)
+ kref_get(&if_outgoing->refcount);
+
+ INIT_HLIST_NODE(&forw_packet->list);
+ INIT_HLIST_NODE(&forw_packet->cleanup_list);
+ forw_packet->skb = skb;
+ forw_packet->queue_left = queue_left;
+ forw_packet->if_incoming = if_incoming;
+ forw_packet->if_outgoing = if_outgoing;
+ forw_packet->num_packets = 0;
+
+ return forw_packet;
+
+err:
+ if (queue_left)
+ atomic_inc(queue_left);
+
+ return NULL;
+}
+
+/**
+ * batadv_forw_packet_was_stolen() - check whether someone stole this packet
+ * @forw_packet: the forwarding packet to check
+ *
+ * This function checks whether the given forwarding packet was claimed by
+ * someone else for free().
+ *
+ * Return: True if someone stole it, false otherwise.
+ */
+static bool
+batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet)
+{
+ return !hlist_unhashed(&forw_packet->cleanup_list);
+}
+
+/**
+ * batadv_forw_packet_steal() - claim a forw_packet for free()
+ * @forw_packet: the forwarding packet to steal
+ * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock)
+ *
+ * This function tries to steal a specific forw_packet from global
+ * visibility for the purpose of getting it for free(). That means
+ * the caller is *not* allowed to requeue it afterwards.
+ *
+ * Return: True if stealing was successful. False if someone else stole it
+ * before us.
+ */
+bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
+ spinlock_t *lock)
+{
+ /* did purging routine steal it earlier? */
+ spin_lock_bh(lock);
+ if (batadv_forw_packet_was_stolen(forw_packet)) {
+ spin_unlock_bh(lock);
+ return false;
+ }
+
+ hlist_del_init(&forw_packet->list);
+
+ /* Just to spot misuse of this function */
+ hlist_add_fake(&forw_packet->cleanup_list);
+
+ spin_unlock_bh(lock);
+ return true;
+}
+
+/**
+ * batadv_forw_packet_list_steal() - claim a list of forward packets for free()
+ * @forw_list: the to be stolen forward packets
+ * @cleanup_list: a backup pointer, to be able to dispose the packet later
+ * @hard_iface: the interface to steal forward packets from
+ *
+ * This function claims responsibility to free any forw_packet queued on the
+ * given hard_iface. If hard_iface is NULL forwarding packets on all hard
+ * interfaces will be claimed.
+ *
+ * The packets are being moved from the forw_list to the cleanup_list. This
+ * makes it possible for already running threads to notice the claim.
+ */
+static void
+batadv_forw_packet_list_steal(struct hlist_head *forw_list,
+ struct hlist_head *cleanup_list,
+ const struct batadv_hard_iface *hard_iface)
+{
+ struct batadv_forw_packet *forw_packet;
+ struct hlist_node *safe_tmp_node;
+
+ hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
+ forw_list, list) {
+ /* if purge_outstanding_packets() was called with an argument
+ * we delete only packets belonging to the given interface
+ */
+ if (hard_iface &&
+ forw_packet->if_incoming != hard_iface &&
+ forw_packet->if_outgoing != hard_iface)
+ continue;
+
+ hlist_del(&forw_packet->list);
+ hlist_add_head(&forw_packet->cleanup_list, cleanup_list);
+ }
+}
+
+/**
+ * batadv_forw_packet_list_free() - free a list of forward packets
+ * @head: a list of to be freed forw_packets
+ *
+ * This function cancels the scheduling of any packet in the provided list,
+ * waits for any possibly running packet forwarding thread to finish and
+ * finally, safely frees this forward packet.
+ *
+ * This function might sleep.
+ */
+static void batadv_forw_packet_list_free(struct hlist_head *head)
+{
+ struct batadv_forw_packet *forw_packet;
+ struct hlist_node *safe_tmp_node;
+
+ hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head,
+ cleanup_list) {
+ cancel_delayed_work_sync(&forw_packet->delayed_work);
+
+ hlist_del(&forw_packet->cleanup_list);
+ batadv_forw_packet_free(forw_packet, true);
+ }
+}
+
+/**
+ * batadv_forw_packet_queue() - try to queue a forwarding packet
+ * @forw_packet: the forwarding packet to queue
+ * @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock)
+ * @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list)
+ * @send_time: timestamp (jiffies) when the packet is to be sent
+ *
+ * This function tries to (re)queue a forwarding packet. Requeuing
+ * is prevented if the according interface is shutting down
+ * (e.g. if batadv_forw_packet_list_steal() was called for this
+ * packet earlier).
+ *
+ * Calling batadv_forw_packet_queue() after a call to
+ * batadv_forw_packet_steal() is forbidden!
+ *
+ * Caller needs to ensure that forw_packet->delayed_work was initialized.
+ */
+static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet,
+ spinlock_t *lock, struct hlist_head *head,
+ unsigned long send_time)
+{
+ spin_lock_bh(lock);
+
+ /* did purging routine steal it from us? */
+ if (batadv_forw_packet_was_stolen(forw_packet)) {
+ /* If you got it for free() without trouble, then
+ * don't get back into the queue after stealing...
+ */
+ WARN_ONCE(hlist_fake(&forw_packet->cleanup_list),
+ "Requeuing after batadv_forw_packet_steal() not allowed!\n");
+
+ spin_unlock_bh(lock);
+ return;
+ }
+
+ hlist_del_init(&forw_packet->list);
+ hlist_add_head(&forw_packet->list, head);
+
+ queue_delayed_work(batadv_event_workqueue,
+ &forw_packet->delayed_work,
+ send_time - jiffies);
+ spin_unlock_bh(lock);
+}
+
+/**
+ * batadv_forw_packet_bcast_queue() - try to queue a broadcast packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @forw_packet: the forwarding packet to queue
+ * @send_time: timestamp (jiffies) when the packet is to be sent
+ *
+ * This function tries to (re)queue a broadcast packet.
+ *
+ * Caller needs to ensure that forw_packet->delayed_work was initialized.
+ */
+static void
+batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv,
+ struct batadv_forw_packet *forw_packet,
+ unsigned long send_time)
+{
+ batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bcast_list_lock,
+ &bat_priv->forw_bcast_list, send_time);
+}
+
+/**
+ * batadv_forw_packet_ogmv1_queue() - try to queue an OGMv1 packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @forw_packet: the forwarding packet to queue
+ * @send_time: timestamp (jiffies) when the packet is to be sent
+ *
+ * This function tries to (re)queue an OGMv1 packet.
+ *
+ * Caller needs to ensure that forw_packet->delayed_work was initialized.
+ */
+void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
+ struct batadv_forw_packet *forw_packet,
+ unsigned long send_time)
+{
+ batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bat_list_lock,
+ &bat_priv->forw_bat_list, send_time);
+}
+
+/**
+ * batadv_forw_bcast_packet_to_list() - queue broadcast packet for transmissions
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: broadcast packet to add
+ * @delay: number of jiffies to wait before sending
+ * @own_packet: true if it is a self-generated broadcast packet
+ * @if_in: the interface where the packet was received on
+ * @if_out: the outgoing interface to queue on
+ *
+ * Adds a broadcast packet to the queue and sets up timers. Broadcast packets
+ * are sent multiple times to increase probability for being received.
+ *
+ * This call clones the given skb, hence the caller needs to take into
+ * account that the data segment of the original skb might not be
+ * modifiable anymore.
+ *
+ * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
+ */
+static int batadv_forw_bcast_packet_to_list(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet,
+ struct batadv_hard_iface *if_in,
+ struct batadv_hard_iface *if_out)
+{
+ struct batadv_forw_packet *forw_packet;
+ unsigned long send_time = jiffies;
+ struct sk_buff *newskb;
+
+ newskb = skb_clone(skb, GFP_ATOMIC);
+ if (!newskb)
+ goto err;
+
+ forw_packet = batadv_forw_packet_alloc(if_in, if_out,
+ &bat_priv->bcast_queue_left,
+ bat_priv, newskb);
+ if (!forw_packet)
+ goto err_packet_free;
+
+ forw_packet->own = own_packet;
+
+ INIT_DELAYED_WORK(&forw_packet->delayed_work,
+ batadv_send_outstanding_bcast_packet);
+
+ send_time += delay ? delay : msecs_to_jiffies(5);
+
+ batadv_forw_packet_bcast_queue(bat_priv, forw_packet, send_time);
+ return NETDEV_TX_OK;
+
+err_packet_free:
+ kfree_skb(newskb);
+err:
+ return NETDEV_TX_BUSY;
+}
+
+/**
+ * batadv_forw_bcast_packet_if() - forward and queue a broadcast packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: broadcast packet to add
+ * @delay: number of jiffies to wait before sending
+ * @own_packet: true if it is a self-generated broadcast packet
+ * @if_in: the interface where the packet was received on
+ * @if_out: the outgoing interface to forward to
+ *
+ * Transmits a broadcast packet on the specified interface either immediately
+ * or if a delay is given after that. Furthermore, queues additional
+ * retransmissions if this interface is a wireless one.
+ *
+ * This call clones the given skb, hence the caller needs to take into
+ * account that the data segment of the original skb might not be
+ * modifiable anymore.
+ *
+ * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
+ */
+static int batadv_forw_bcast_packet_if(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet,
+ struct batadv_hard_iface *if_in,
+ struct batadv_hard_iface *if_out)
+{
+ unsigned int num_bcasts = if_out->num_bcasts;
+ struct sk_buff *newskb;
+ int ret = NETDEV_TX_OK;
+
+ if (!delay) {
+ newskb = skb_clone(skb, GFP_ATOMIC);
+ if (!newskb)
+ return NETDEV_TX_BUSY;
+
+ batadv_send_broadcast_skb(newskb, if_out);
+ num_bcasts--;
+ }
+
+ /* delayed broadcast or rebroadcasts? */
+ if (num_bcasts >= 1) {
+ BATADV_SKB_CB(skb)->num_bcasts = num_bcasts;
+
+ ret = batadv_forw_bcast_packet_to_list(bat_priv, skb, delay,
+ own_packet, if_in,
+ if_out);
+ }
+
+ return ret;
+}
+
+/**
+ * batadv_send_no_broadcast() - check whether (re)broadcast is necessary
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: broadcast packet to check
+ * @own_packet: true if it is a self-generated broadcast packet
+ * @if_out: the outgoing interface checked and considered for (re)broadcast
+ *
+ * Return: False if a packet needs to be (re)broadcasted on the given interface,
+ * true otherwise.
+ */
+static bool batadv_send_no_broadcast(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, bool own_packet,
+ struct batadv_hard_iface *if_out)
+{
+ struct batadv_hardif_neigh_node *neigh_node = NULL;
+ struct batadv_bcast_packet *bcast_packet;
+ u8 *orig_neigh;
+ u8 *neigh_addr;
+ char *type;
+ int ret;
+
+ if (!own_packet) {
+ neigh_addr = eth_hdr(skb)->h_source;
+ neigh_node = batadv_hardif_neigh_get(if_out,
+ neigh_addr);
+ }
+
+ bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ orig_neigh = neigh_node ? neigh_node->orig : NULL;
+
+ ret = batadv_hardif_no_broadcast(if_out, bcast_packet->orig,
+ orig_neigh);
+
+ batadv_hardif_neigh_put(neigh_node);
+
+ /* ok, may broadcast */
+ if (!ret)
+ return false;
+
+ /* no broadcast */
+ switch (ret) {
+ case BATADV_HARDIF_BCAST_NORECIPIENT:
+ type = "no neighbor";
+ break;
+ case BATADV_HARDIF_BCAST_DUPFWD:
+ type = "single neighbor is source";
+ break;
+ case BATADV_HARDIF_BCAST_DUPORIG:
+ type = "single neighbor is originator";
+ break;
+ default:
+ type = "unknown";
+ }
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "BCAST packet from orig %pM on %s suppressed: %s\n",
+ bcast_packet->orig,
+ if_out->net_dev->name, type);
+
+ return true;
+}
+
+/**
+ * __batadv_forw_bcast_packet() - forward and queue a broadcast packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: broadcast packet to add
+ * @delay: number of jiffies to wait before sending
+ * @own_packet: true if it is a self-generated broadcast packet
+ *
+ * Transmits a broadcast packet either immediately or if a delay is given
+ * after that. Furthermore, queues additional retransmissions on wireless
+ * interfaces.
+ *
+ * This call clones the given skb, hence the caller needs to take into
+ * account that the data segment of the given skb might not be
+ * modifiable anymore.
+ *
+ * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
+ */
+static int __batadv_forw_bcast_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet)
+{
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_hard_iface *primary_if;
+ int ret = NETDEV_TX_OK;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ return NETDEV_TX_BUSY;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
+ if (!kref_get_unless_zero(&hard_iface->refcount))
+ continue;
+
+ if (batadv_send_no_broadcast(bat_priv, skb, own_packet,
+ hard_iface)) {
+ batadv_hardif_put(hard_iface);
+ continue;
+ }
+
+ ret = batadv_forw_bcast_packet_if(bat_priv, skb, delay,
+ own_packet, primary_if,
+ hard_iface);
+ batadv_hardif_put(hard_iface);
+
+ if (ret == NETDEV_TX_BUSY)
+ break;
+ }
+ rcu_read_unlock();
+
+ batadv_hardif_put(primary_if);
+ return ret;
+}
+
+/**
+ * batadv_forw_bcast_packet() - forward and queue a broadcast packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: broadcast packet to add
+ * @delay: number of jiffies to wait before sending
+ * @own_packet: true if it is a self-generated broadcast packet
+ *
+ * Transmits a broadcast packet either immediately or if a delay is given
+ * after that. Furthermore, queues additional retransmissions on wireless
+ * interfaces.
+ *
+ * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
+ */
+int batadv_forw_bcast_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet)
+{
+ return __batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet);
+}
+
+/**
+ * batadv_send_bcast_packet() - send and queue a broadcast packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: broadcast packet to add
+ * @delay: number of jiffies to wait before sending
+ * @own_packet: true if it is a self-generated broadcast packet
+ *
+ * Transmits a broadcast packet either immediately or if a delay is given
+ * after that. Furthermore, queues additional retransmissions on wireless
+ * interfaces.
+ *
+ * Consumes the provided skb.
+ */
+void batadv_send_bcast_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet)
+{
+ __batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet);
+ consume_skb(skb);
+}
+
+/**
+ * batadv_forw_packet_bcasts_left() - check if a retransmission is necessary
+ * @forw_packet: the forwarding packet to check
+ *
+ * Checks whether a given packet has any (re)transmissions left on the provided
+ * interface.
+ *
+ * hard_iface may be NULL: In that case the number of transmissions this skb had
+ * so far is compared with the maximum amount of retransmissions independent of
+ * any interface instead.
+ *
+ * Return: True if (re)transmissions are left, false otherwise.
+ */
+static bool
+batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet)
+{
+ return BATADV_SKB_CB(forw_packet->skb)->num_bcasts;
+}
+
+/**
+ * batadv_forw_packet_bcasts_dec() - decrement retransmission counter of a
+ * packet
+ * @forw_packet: the packet to decrease the counter for
+ */
+static void
+batadv_forw_packet_bcasts_dec(struct batadv_forw_packet *forw_packet)
+{
+ BATADV_SKB_CB(forw_packet->skb)->num_bcasts--;
+}
+
+/**
+ * batadv_forw_packet_is_rebroadcast() - check packet for previous transmissions
+ * @forw_packet: the packet to check
+ *
+ * Return: True if this packet was transmitted before, false otherwise.
+ */
+bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet)
+{
+ unsigned char num_bcasts = BATADV_SKB_CB(forw_packet->skb)->num_bcasts;
+
+ return num_bcasts != forw_packet->if_outgoing->num_bcasts;
+}
+
+/**
+ * batadv_send_outstanding_bcast_packet() - transmit a queued broadcast packet
+ * @work: work queue item
+ *
+ * Transmits a queued broadcast packet and if necessary reschedules it.
+ */
+static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
+{
+ unsigned long send_time = jiffies + msecs_to_jiffies(5);
+ struct batadv_forw_packet *forw_packet;
+ struct delayed_work *delayed_work;
+ struct batadv_priv *bat_priv;
+ struct sk_buff *skb1;
+ bool dropped = false;
+
+ delayed_work = to_delayed_work(work);
+ forw_packet = container_of(delayed_work, struct batadv_forw_packet,
+ delayed_work);
+ bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
+
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
+ dropped = true;
+ goto out;
+ }
+
+ if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) {
+ dropped = true;
+ goto out;
+ }
+
+ /* send a copy of the saved skb */
+ skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
+ if (!skb1)
+ goto out;
+
+ batadv_send_broadcast_skb(skb1, forw_packet->if_outgoing);
+ batadv_forw_packet_bcasts_dec(forw_packet);
+
+ if (batadv_forw_packet_bcasts_left(forw_packet)) {
+ batadv_forw_packet_bcast_queue(bat_priv, forw_packet,
+ send_time);
+ return;
+ }
+
+out:
+ /* do we get something for free()? */
+ if (batadv_forw_packet_steal(forw_packet,
+ &bat_priv->forw_bcast_list_lock))
+ batadv_forw_packet_free(forw_packet, dropped);
+}
+
+/**
+ * batadv_purge_outstanding_packets() - stop/purge scheduled bcast/OGMv1 packets
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on
+ *
+ * This method cancels and purges any broadcast and OGMv1 packet on the given
+ * hard_iface. If hard_iface is NULL, broadcast and OGMv1 packets on all hard
+ * interfaces will be canceled and purged.
+ *
+ * This function might sleep.
+ */
+void
+batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ const struct batadv_hard_iface *hard_iface)
+{
+ struct hlist_head head = HLIST_HEAD_INIT;
+
+ if (hard_iface)
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "%s(): %s\n",
+ __func__, hard_iface->net_dev->name);
+ else
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "%s()\n", __func__);
+
+ /* claim bcast list for free() */
+ spin_lock_bh(&bat_priv->forw_bcast_list_lock);
+ batadv_forw_packet_list_steal(&bat_priv->forw_bcast_list, &head,
+ hard_iface);
+ spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
+
+ /* claim batman packet list for free() */
+ spin_lock_bh(&bat_priv->forw_bat_list_lock);
+ batadv_forw_packet_list_steal(&bat_priv->forw_bat_list, &head,
+ hard_iface);
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
+
+ /* then cancel or wait for packet workers to finish and free */
+ batadv_forw_packet_list_free(&head);
+}
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
new file mode 100644
index 0000000000..08af251b76
--- /dev/null
+++ b/net/batman-adv/send.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#ifndef _NET_BATMAN_ADV_SEND_H_
+#define _NET_BATMAN_ADV_SEND_H_
+
+#include "main.h"
+
+#include <linux/compiler.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
+
+void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
+ bool dropped);
+struct batadv_forw_packet *
+batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
+ struct batadv_hard_iface *if_outgoing,
+ atomic_t *queue_left,
+ struct batadv_priv *bat_priv,
+ struct sk_buff *skb);
+bool batadv_forw_packet_steal(struct batadv_forw_packet *packet, spinlock_t *l);
+void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
+ struct batadv_forw_packet *forw_packet,
+ unsigned long send_time);
+bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet);
+
+int batadv_send_skb_to_orig(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *recv_if);
+int batadv_send_skb_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface,
+ const u8 *dst_addr);
+int batadv_send_broadcast_skb(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface);
+int batadv_send_unicast_skb(struct sk_buff *skb,
+ struct batadv_neigh_node *neigh_node);
+int batadv_forw_bcast_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet);
+void batadv_send_bcast_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned long delay,
+ bool own_packet);
+void
+batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ const struct batadv_hard_iface *hard_iface);
+bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ struct batadv_orig_node *orig_node,
+ int packet_subtype);
+int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int packet_type,
+ int packet_subtype,
+ struct batadv_orig_node *orig_node,
+ unsigned short vid);
+int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int packet_type,
+ int packet_subtype, u8 *dst_hint,
+ unsigned short vid);
+int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ unsigned short vid);
+
+/**
+ * batadv_send_skb_via_tt() - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the payload to send
+ * @dst_hint: can be used to override the destination contained in the skb
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast header. Then send this frame to the according destination node.
+ *
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, u8 *dst_hint,
+ unsigned short vid)
+{
+ return batadv_send_skb_via_tt_generic(bat_priv, skb, BATADV_UNICAST, 0,
+ dst_hint, vid);
+}
+
+/**
+ * batadv_send_skb_via_tt_4addr() - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the payload to send
+ * @packet_subtype: the unicast 4addr packet subtype to use
+ * @dst_hint: can be used to override the destination contained in the skb
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast-4addr header. Then send this frame to the according destination
+ * node.
+ *
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static inline int batadv_send_skb_via_tt_4addr(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ int packet_subtype,
+ u8 *dst_hint,
+ unsigned short vid)
+{
+ return batadv_send_skb_via_tt_generic(bat_priv, skb,
+ BATADV_UNICAST_4ADDR,
+ packet_subtype, dst_hint, vid);
+}
+
+#endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
new file mode 100644
index 0000000000..1bf1232a4f
--- /dev/null
+++ b/net/batman-adv/soft-interface.c
@@ -0,0 +1,1130 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#include "soft-interface.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/cache.h>
+#include <linux/compiler.h>
+#include <linux/container_of.h>
+#include <linux/cpumask.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/jiffies.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/percpu.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <net/net_namespace.h>
+#include <net/netlink.h>
+#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "bat_algo.h"
+#include "bridge_loop_avoidance.h"
+#include "distributed-arp-table.h"
+#include "gateway_client.h"
+#include "hard-interface.h"
+#include "multicast.h"
+#include "network-coding.h"
+#include "send.h"
+#include "translation-table.h"
+
+/**
+ * batadv_skb_head_push() - Increase header size and move (push) head pointer
+ * @skb: packet buffer which should be modified
+ * @len: number of bytes to add
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
+{
+ int result;
+
+ /* TODO: We must check if we can release all references to non-payload
+ * data using __skb_header_release in our skbs to allow skb_cow_header
+ * to work optimally. This means that those skbs are not allowed to read
+ * or write any data which is before the current position of skb->data
+ * after that call and thus allow other skbs with the same data buffer
+ * to write freely in that area.
+ */
+ result = skb_cow_head(skb, len);
+ if (result < 0)
+ return result;
+
+ skb_push(skb, len);
+ return 0;
+}
+
+static int batadv_interface_open(struct net_device *dev)
+{
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int batadv_interface_release(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+/**
+ * batadv_sum_counter() - Sum the cpu-local counters for index 'idx'
+ * @bat_priv: the bat priv with all the soft interface information
+ * @idx: index of counter to sum up
+ *
+ * Return: sum of all cpu-local counters
+ */
+static u64 batadv_sum_counter(struct batadv_priv *bat_priv, size_t idx)
+{
+ u64 *counters, sum = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
+ sum += counters[idx];
+ }
+
+ return sum;
+}
+
+static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+
+ stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
+ stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
+ stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
+ stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
+ stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
+ return stats;
+}
+
+static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_softif_vlan *vlan;
+ struct sockaddr *addr = p;
+ u8 old_addr[ETH_ALEN];
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ether_addr_copy(old_addr, dev->dev_addr);
+ eth_hw_addr_set(dev, addr->sa_data);
+
+ /* only modify transtable if it has been initialized before */
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
+ return 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ batadv_tt_local_remove(bat_priv, old_addr, vlan->vid,
+ "mac address changed", false);
+ batadv_tt_local_add(dev, addr->sa_data, vlan->vid,
+ BATADV_NULL_IFINDEX, BATADV_NO_MARK);
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+
+ /* check ranges */
+ if (new_mtu < ETH_MIN_MTU || new_mtu > batadv_hardif_min_mtu(dev))
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ bat_priv->mtu_set_by_user = new_mtu;
+
+ return 0;
+}
+
+/**
+ * batadv_interface_set_rx_mode() - set the rx mode of a device
+ * @dev: registered network device to modify
+ *
+ * We do not actually need to set any rx filters for the virtual batman
+ * soft interface. However a dummy handler enables a user to set static
+ * multicast listeners for instance.
+ */
+static void batadv_interface_set_rx_mode(struct net_device *dev)
+{
+}
+
+static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
+ struct net_device *soft_iface)
+{
+ struct ethhdr *ethhdr;
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_bcast_packet *bcast_packet;
+ static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
+ 0x00, 0x00};
+ static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
+ 0x00, 0x00};
+ enum batadv_dhcp_recipient dhcp_rcp = BATADV_DHCP_NO;
+ u8 *dst_hint = NULL, chaddr[ETH_ALEN];
+ struct vlan_ethhdr *vhdr;
+ unsigned int header_len = 0;
+ int data_len = skb->len, ret;
+ unsigned long brd_delay = 0;
+ bool do_bcast = false, client_added;
+ unsigned short vid;
+ u32 seqno;
+ int gw_mode;
+ enum batadv_forw_mode forw_mode = BATADV_FORW_BCAST;
+ int mcast_is_routable = 0;
+ int network_offset = ETH_HLEN;
+ __be16 proto;
+
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
+ goto dropped;
+
+ /* reset control block to avoid left overs from previous users */
+ memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
+
+ netif_trans_update(soft_iface);
+ vid = batadv_get_vid(skb, 0);
+
+ skb_reset_mac_header(skb);
+ ethhdr = eth_hdr(skb);
+
+ proto = ethhdr->h_proto;
+
+ switch (ntohs(proto)) {
+ case ETH_P_8021Q:
+ if (!pskb_may_pull(skb, sizeof(*vhdr)))
+ goto dropped;
+ vhdr = vlan_eth_hdr(skb);
+ proto = vhdr->h_vlan_encapsulated_proto;
+
+ /* drop batman-in-batman packets to prevent loops */
+ if (proto != htons(ETH_P_BATMAN)) {
+ network_offset += VLAN_HLEN;
+ break;
+ }
+
+ fallthrough;
+ case ETH_P_BATMAN:
+ goto dropped;
+ }
+
+ skb_set_network_header(skb, network_offset);
+
+ if (batadv_bla_tx(bat_priv, skb, vid))
+ goto dropped;
+
+ /* skb->data might have been reallocated by batadv_bla_tx() */
+ ethhdr = eth_hdr(skb);
+
+ /* Register the client MAC in the transtable */
+ if (!is_multicast_ether_addr(ethhdr->h_source) &&
+ !batadv_bla_is_loopdetect_mac(ethhdr->h_source)) {
+ client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source,
+ vid, skb->skb_iif,
+ skb->mark);
+ if (!client_added)
+ goto dropped;
+ }
+
+ /* Snoop address candidates from DHCPACKs for early DAT filling */
+ batadv_dat_snoop_outgoing_dhcp_ack(bat_priv, skb, proto, vid);
+
+ /* don't accept stp packets. STP does not help in meshes.
+ * better use the bridge loop avoidance ...
+ *
+ * The same goes for ECTP sent at least by some Cisco Switches,
+ * it might confuse the mesh when used with bridge loop avoidance.
+ */
+ if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
+ goto dropped;
+
+ if (batadv_compare_eth(ethhdr->h_dest, ectp_addr))
+ goto dropped;
+
+ gw_mode = atomic_read(&bat_priv->gw.mode);
+ if (is_multicast_ether_addr(ethhdr->h_dest)) {
+ /* if gw mode is off, broadcast every packet */
+ if (gw_mode == BATADV_GW_MODE_OFF) {
+ do_bcast = true;
+ goto send;
+ }
+
+ dhcp_rcp = batadv_gw_dhcp_recipient_get(skb, &header_len,
+ chaddr);
+ /* skb->data may have been modified by
+ * batadv_gw_dhcp_recipient_get()
+ */
+ ethhdr = eth_hdr(skb);
+ /* if gw_mode is on, broadcast any non-DHCP message.
+ * All the DHCP packets are going to be sent as unicast
+ */
+ if (dhcp_rcp == BATADV_DHCP_NO) {
+ do_bcast = true;
+ goto send;
+ }
+
+ if (dhcp_rcp == BATADV_DHCP_TO_CLIENT)
+ dst_hint = chaddr;
+ else if ((gw_mode == BATADV_GW_MODE_SERVER) &&
+ (dhcp_rcp == BATADV_DHCP_TO_SERVER))
+ /* gateways should not forward any DHCP message if
+ * directed to a DHCP server
+ */
+ goto dropped;
+
+send:
+ if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
+ forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
+ &mcast_is_routable);
+ switch (forw_mode) {
+ case BATADV_FORW_BCAST:
+ break;
+ case BATADV_FORW_UCASTS:
+ do_bcast = false;
+ break;
+ case BATADV_FORW_NONE:
+ fallthrough;
+ default:
+ goto dropped;
+ }
+ }
+ }
+
+ batadv_skb_set_priority(skb, 0);
+
+ /* ethernet packet should be broadcasted */
+ if (do_bcast) {
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto dropped;
+
+ /* in case of ARP request, we do not immediately broadcasti the
+ * packet, instead we first wait for DAT to try to retrieve the
+ * correct ARP entry
+ */
+ if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
+ brd_delay = msecs_to_jiffies(ARP_REQ_DELAY);
+
+ if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
+ goto dropped;
+
+ bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ bcast_packet->version = BATADV_COMPAT_VERSION;
+ bcast_packet->ttl = BATADV_TTL - 1;
+
+ /* batman packet type: broadcast */
+ bcast_packet->packet_type = BATADV_BCAST;
+ bcast_packet->reserved = 0;
+
+ /* hw address of first interface is the orig mac because only
+ * this mac is known throughout the mesh
+ */
+ ether_addr_copy(bcast_packet->orig,
+ primary_if->net_dev->dev_addr);
+
+ /* set broadcast sequence number */
+ seqno = atomic_inc_return(&bat_priv->bcast_seqno);
+ bcast_packet->seqno = htonl(seqno);
+
+ batadv_send_bcast_packet(bat_priv, skb, brd_delay, true);
+ /* unicast packet */
+ } else {
+ /* DHCP packets going to a server will use the GW feature */
+ if (dhcp_rcp == BATADV_DHCP_TO_SERVER) {
+ ret = batadv_gw_out_of_range(bat_priv, skb);
+ if (ret)
+ goto dropped;
+ ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
+ } else if (forw_mode == BATADV_FORW_UCASTS) {
+ ret = batadv_mcast_forw_send(bat_priv, skb, vid,
+ mcast_is_routable);
+ } else {
+ if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
+ skb))
+ goto dropped;
+
+ batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
+
+ ret = batadv_send_skb_via_tt(bat_priv, skb, dst_hint,
+ vid);
+ }
+ if (ret != NET_XMIT_SUCCESS)
+ goto dropped_freed;
+ }
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_TX);
+ batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
+ goto end;
+
+dropped:
+ kfree_skb(skb);
+dropped_freed:
+ batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
+end:
+ batadv_hardif_put(primary_if);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * batadv_interface_rx() - receive ethernet frame on local batman-adv interface
+ * @soft_iface: local interface which will receive the ethernet frame
+ * @skb: ethernet frame for @soft_iface
+ * @hdr_size: size of already parsed batman-adv header
+ * @orig_node: originator from which the batman-adv packet was sent
+ *
+ * Sends an ethernet frame to the receive path of the local @soft_iface.
+ * skb->data has still point to the batman-adv header with the size @hdr_size.
+ * The caller has to have parsed this header already and made sure that at least
+ * @hdr_size bytes are still available for pull in @skb.
+ *
+ * The packet may still get dropped. This can happen when the encapsulated
+ * ethernet frame is invalid or contains again an batman-adv packet. Also
+ * unicast packets will be dropped directly when it was sent between two
+ * isolated clients.
+ */
+void batadv_interface_rx(struct net_device *soft_iface,
+ struct sk_buff *skb, int hdr_size,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_bcast_packet *batadv_bcast_packet;
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ struct vlan_ethhdr *vhdr;
+ struct ethhdr *ethhdr;
+ unsigned short vid;
+ int packet_type;
+
+ batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ packet_type = batadv_bcast_packet->packet_type;
+
+ skb_pull_rcsum(skb, hdr_size);
+ skb_reset_mac_header(skb);
+
+ /* clean the netfilter state now that the batman-adv header has been
+ * removed
+ */
+ nf_reset_ct(skb);
+
+ if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+ goto dropped;
+
+ vid = batadv_get_vid(skb, 0);
+ ethhdr = eth_hdr(skb);
+
+ switch (ntohs(ethhdr->h_proto)) {
+ case ETH_P_8021Q:
+ if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
+ goto dropped;
+
+ vhdr = skb_vlan_eth_hdr(skb);
+
+ /* drop batman-in-batman packets to prevent loops */
+ if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN))
+ break;
+
+ fallthrough;
+ case ETH_P_BATMAN:
+ goto dropped;
+ }
+
+ /* skb->dev & skb->pkt_type are set here */
+ skb->protocol = eth_type_trans(skb, soft_iface);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+ skb->len + ETH_HLEN);
+
+ /* Let the bridge loop avoidance check the packet. If will
+ * not handle it, we can safely push it up.
+ */
+ if (batadv_bla_rx(bat_priv, skb, vid, packet_type))
+ goto out;
+
+ if (orig_node)
+ batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
+ ethhdr->h_source, vid);
+
+ if (is_multicast_ether_addr(ethhdr->h_dest)) {
+ /* set the mark on broadcast packets if AP isolation is ON and
+ * the packet is coming from an "isolated" client
+ */
+ if (batadv_vlan_ap_isola_get(bat_priv, vid) &&
+ batadv_tt_global_is_isolated(bat_priv, ethhdr->h_source,
+ vid)) {
+ /* save bits in skb->mark not covered by the mask and
+ * apply the mark on the rest
+ */
+ skb->mark &= ~bat_priv->isolation_mark_mask;
+ skb->mark |= bat_priv->isolation_mark;
+ }
+ } else if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest, vid)) {
+ goto dropped;
+ }
+
+ netif_rx(skb);
+ goto out;
+
+dropped:
+ kfree_skb(skb);
+out:
+ return;
+}
+
+/**
+ * batadv_softif_vlan_release() - release vlan from lists and queue for free
+ * after rcu grace period
+ * @ref: kref pointer of the vlan object
+ */
+void batadv_softif_vlan_release(struct kref *ref)
+{
+ struct batadv_softif_vlan *vlan;
+
+ vlan = container_of(ref, struct batadv_softif_vlan, refcount);
+
+ spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+ hlist_del_rcu(&vlan->list);
+ spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+
+ kfree_rcu(vlan, rcu);
+}
+
+/**
+ * batadv_softif_vlan_get() - get the vlan object for a specific vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the identifier of the vlan object to retrieve
+ *
+ * Return: the private data of the vlan matching the vid passed as argument or
+ * NULL otherwise. The refcounter of the returned object is incremented by 1.
+ */
+struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
+ unsigned short vid)
+{
+ struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
+ if (vlan_tmp->vid != vid)
+ continue;
+
+ if (!kref_get_unless_zero(&vlan_tmp->refcount))
+ continue;
+
+ vlan = vlan_tmp;
+ break;
+ }
+ rcu_read_unlock();
+
+ return vlan;
+}
+
+/**
+ * batadv_softif_create_vlan() - allocate the needed resources for a new vlan
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ *
+ * Return: 0 on success, a negative error otherwise.
+ */
+int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
+{
+ struct batadv_softif_vlan *vlan;
+
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (vlan) {
+ batadv_softif_vlan_put(vlan);
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+ return -EEXIST;
+ }
+
+ vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
+ if (!vlan) {
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+ return -ENOMEM;
+ }
+
+ vlan->bat_priv = bat_priv;
+ vlan->vid = vid;
+ kref_init(&vlan->refcount);
+
+ atomic_set(&vlan->ap_isolation, 0);
+
+ kref_get(&vlan->refcount);
+ hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+ /* add a new TT local entry. This one will be marked with the NOPURGE
+ * flag
+ */
+ batadv_tt_local_add(bat_priv->soft_iface,
+ bat_priv->soft_iface->dev_addr, vid,
+ BATADV_NULL_IFINDEX, BATADV_NO_MARK);
+
+ /* don't return reference to new softif_vlan */
+ batadv_softif_vlan_put(vlan);
+
+ return 0;
+}
+
+/**
+ * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vlan: the object to remove
+ */
+static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
+ struct batadv_softif_vlan *vlan)
+{
+ /* explicitly remove the associated TT local entry because it is marked
+ * with the NOPURGE flag
+ */
+ batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
+ vlan->vid, "vlan interface destroyed", false);
+
+ batadv_softif_vlan_put(vlan);
+}
+
+/**
+ * batadv_interface_add_vid() - ndo_add_vid API implementation
+ * @dev: the netdev of the mesh interface
+ * @proto: protocol of the vlan id
+ * @vid: identifier of the new vlan
+ *
+ * Set up all the internal structures for handling the new vlan on top of the
+ * mesh interface
+ *
+ * Return: 0 on success or a negative error code in case of failure.
+ */
+static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
+ unsigned short vid)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_softif_vlan *vlan;
+
+ /* only 802.1Q vlans are supported.
+ * batman-adv does not know how to handle other types
+ */
+ if (proto != htons(ETH_P_8021Q))
+ return -EINVAL;
+
+ vid |= BATADV_VLAN_HAS_TAG;
+
+ /* if a new vlan is getting created and it already exists, it means that
+ * it was not deleted yet. batadv_softif_vlan_get() increases the
+ * refcount in order to revive the object.
+ *
+ * if it does not exist then create it.
+ */
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (!vlan)
+ return batadv_softif_create_vlan(bat_priv, vid);
+
+ /* add a new TT local entry. This one will be marked with the NOPURGE
+ * flag. This must be added again, even if the vlan object already
+ * exists, because the entry was deleted by kill_vid()
+ */
+ batadv_tt_local_add(bat_priv->soft_iface,
+ bat_priv->soft_iface->dev_addr, vid,
+ BATADV_NULL_IFINDEX, BATADV_NO_MARK);
+
+ return 0;
+}
+
+/**
+ * batadv_interface_kill_vid() - ndo_kill_vid API implementation
+ * @dev: the netdev of the mesh interface
+ * @proto: protocol of the vlan id
+ * @vid: identifier of the deleted vlan
+ *
+ * Destroy all the internal structures used to handle the vlan identified by vid
+ * on top of the mesh interface
+ *
+ * Return: 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
+ * or -ENOENT if the specified vlan id wasn't registered.
+ */
+static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
+ unsigned short vid)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_softif_vlan *vlan;
+
+ /* only 802.1Q vlans are supported. batman-adv does not know how to
+ * handle other types
+ */
+ if (proto != htons(ETH_P_8021Q))
+ return -EINVAL;
+
+ vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
+ if (!vlan)
+ return -ENOENT;
+
+ batadv_softif_destroy_vlan(bat_priv, vlan);
+
+ /* finally free the vlan object */
+ batadv_softif_vlan_put(vlan);
+
+ return 0;
+}
+
+/* batman-adv network devices have devices nesting below it and are a special
+ * "super class" of normal network devices; split their locks off into a
+ * separate class since they always nest.
+ */
+static struct lock_class_key batadv_netdev_xmit_lock_key;
+static struct lock_class_key batadv_netdev_addr_lock_key;
+
+/**
+ * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
+ * @dev: device which owns the tx queue
+ * @txq: tx queue to modify
+ * @_unused: always NULL
+ */
+static void batadv_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
+}
+
+/**
+ * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
+ * @dev: network device to modify
+ */
+static void batadv_set_lockdep_class(struct net_device *dev)
+{
+ lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
+ netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
+}
+
+/**
+ * batadv_softif_init_late() - late stage initialization of soft interface
+ * @dev: registered network device to modify
+ *
+ * Return: error code on failures
+ */
+static int batadv_softif_init_late(struct net_device *dev)
+{
+ struct batadv_priv *bat_priv;
+ u32 random_seqno;
+ int ret;
+ size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
+
+ batadv_set_lockdep_class(dev);
+
+ bat_priv = netdev_priv(dev);
+ bat_priv->soft_iface = dev;
+
+ /* batadv_interface_stats() needs to be available as soon as
+ * register_netdevice() has been called
+ */
+ bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(u64));
+ if (!bat_priv->bat_counters)
+ return -ENOMEM;
+
+ atomic_set(&bat_priv->aggregated_ogms, 1);
+ atomic_set(&bat_priv->bonding, 0);
+#ifdef CONFIG_BATMAN_ADV_BLA
+ atomic_set(&bat_priv->bridge_loop_avoidance, 1);
+#endif
+#ifdef CONFIG_BATMAN_ADV_DAT
+ atomic_set(&bat_priv->distributed_arp_table, 1);
+#endif
+#ifdef CONFIG_BATMAN_ADV_MCAST
+ atomic_set(&bat_priv->multicast_mode, 1);
+ atomic_set(&bat_priv->multicast_fanout, 16);
+ atomic_set(&bat_priv->mcast.num_want_all_unsnoopables, 0);
+ atomic_set(&bat_priv->mcast.num_want_all_ipv4, 0);
+ atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
+#endif
+ atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
+ atomic_set(&bat_priv->gw.bandwidth_down, 100);
+ atomic_set(&bat_priv->gw.bandwidth_up, 20);
+ atomic_set(&bat_priv->orig_interval, 1000);
+ atomic_set(&bat_priv->hop_penalty, 30);
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+ atomic_set(&bat_priv->log_level, 0);
+#endif
+ atomic_set(&bat_priv->fragmentation, 1);
+ atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN);
+ atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
+ atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
+
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
+ atomic_set(&bat_priv->bcast_seqno, 1);
+ atomic_set(&bat_priv->tt.vn, 0);
+ atomic_set(&bat_priv->tt.local_changes, 0);
+ atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
+#ifdef CONFIG_BATMAN_ADV_BLA
+ atomic_set(&bat_priv->bla.num_requests, 0);
+#endif
+ atomic_set(&bat_priv->tp_num, 0);
+
+ bat_priv->tt.last_changeset = NULL;
+ bat_priv->tt.last_changeset_len = 0;
+ bat_priv->isolation_mark = 0;
+ bat_priv->isolation_mark_mask = 0;
+
+ /* randomize initial seqno to avoid collision */
+ get_random_bytes(&random_seqno, sizeof(random_seqno));
+ atomic_set(&bat_priv->frag_seqno, random_seqno);
+
+ bat_priv->primary_if = NULL;
+
+ batadv_nc_init_bat_priv(bat_priv);
+
+ if (!bat_priv->algo_ops) {
+ ret = batadv_algo_select(bat_priv, batadv_routing_algo);
+ if (ret < 0)
+ goto free_bat_counters;
+ }
+
+ ret = batadv_mesh_init(dev);
+ if (ret < 0)
+ goto free_bat_counters;
+
+ return 0;
+
+free_bat_counters:
+ free_percpu(bat_priv->bat_counters);
+ bat_priv->bat_counters = NULL;
+
+ return ret;
+}
+
+/**
+ * batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface
+ * @dev: batadv_soft_interface used as master interface
+ * @slave_dev: net_device which should become the slave interface
+ * @extack: extended ACK report struct
+ *
+ * Return: 0 if successful or error otherwise.
+ */
+static int batadv_softif_slave_add(struct net_device *dev,
+ struct net_device *slave_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct batadv_hard_iface *hard_iface;
+ int ret = -EINVAL;
+
+ hard_iface = batadv_hardif_get_by_netdev(slave_dev);
+ if (!hard_iface || hard_iface->soft_iface)
+ goto out;
+
+ ret = batadv_hardif_enable_interface(hard_iface, dev);
+
+out:
+ batadv_hardif_put(hard_iface);
+ return ret;
+}
+
+/**
+ * batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface
+ * @dev: batadv_soft_interface used as master interface
+ * @slave_dev: net_device which should be removed from the master interface
+ *
+ * Return: 0 if successful or error otherwise.
+ */
+static int batadv_softif_slave_del(struct net_device *dev,
+ struct net_device *slave_dev)
+{
+ struct batadv_hard_iface *hard_iface;
+ int ret = -EINVAL;
+
+ hard_iface = batadv_hardif_get_by_netdev(slave_dev);
+
+ if (!hard_iface || hard_iface->soft_iface != dev)
+ goto out;
+
+ batadv_hardif_disable_interface(hard_iface);
+ ret = 0;
+
+out:
+ batadv_hardif_put(hard_iface);
+ return ret;
+}
+
+static const struct net_device_ops batadv_netdev_ops = {
+ .ndo_init = batadv_softif_init_late,
+ .ndo_open = batadv_interface_open,
+ .ndo_stop = batadv_interface_release,
+ .ndo_get_stats = batadv_interface_stats,
+ .ndo_vlan_rx_add_vid = batadv_interface_add_vid,
+ .ndo_vlan_rx_kill_vid = batadv_interface_kill_vid,
+ .ndo_set_mac_address = batadv_interface_set_mac_addr,
+ .ndo_change_mtu = batadv_interface_change_mtu,
+ .ndo_set_rx_mode = batadv_interface_set_rx_mode,
+ .ndo_start_xmit = batadv_interface_tx,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_add_slave = batadv_softif_slave_add,
+ .ndo_del_slave = batadv_softif_slave_del,
+};
+
+static void batadv_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver));
+ strscpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version));
+ strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strscpy(info->bus_info, "batman", sizeof(info->bus_info));
+}
+
+/* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
+ * Declare each description string in struct.name[] to get fixed sized buffer
+ * and compile time checking for strings longer than ETH_GSTRING_LEN.
+ */
+static const struct {
+ const char name[ETH_GSTRING_LEN];
+} batadv_counters_strings[] = {
+ { "tx" },
+ { "tx_bytes" },
+ { "tx_dropped" },
+ { "rx" },
+ { "rx_bytes" },
+ { "forward" },
+ { "forward_bytes" },
+ { "mgmt_tx" },
+ { "mgmt_tx_bytes" },
+ { "mgmt_rx" },
+ { "mgmt_rx_bytes" },
+ { "frag_tx" },
+ { "frag_tx_bytes" },
+ { "frag_rx" },
+ { "frag_rx_bytes" },
+ { "frag_fwd" },
+ { "frag_fwd_bytes" },
+ { "tt_request_tx" },
+ { "tt_request_rx" },
+ { "tt_response_tx" },
+ { "tt_response_rx" },
+ { "tt_roam_adv_tx" },
+ { "tt_roam_adv_rx" },
+#ifdef CONFIG_BATMAN_ADV_DAT
+ { "dat_get_tx" },
+ { "dat_get_rx" },
+ { "dat_put_tx" },
+ { "dat_put_rx" },
+ { "dat_cached_reply_tx" },
+#endif
+#ifdef CONFIG_BATMAN_ADV_NC
+ { "nc_code" },
+ { "nc_code_bytes" },
+ { "nc_recode" },
+ { "nc_recode_bytes" },
+ { "nc_buffer" },
+ { "nc_decode" },
+ { "nc_decode_bytes" },
+ { "nc_decode_failed" },
+ { "nc_sniffed" },
+#endif
+};
+
+static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, batadv_counters_strings,
+ sizeof(batadv_counters_strings));
+}
+
+static void batadv_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < BATADV_CNT_NUM; i++)
+ data[i] = batadv_sum_counter(bat_priv, i);
+}
+
+static int batadv_get_sset_count(struct net_device *dev, int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return BATADV_CNT_NUM;
+
+ return -EOPNOTSUPP;
+}
+
+static const struct ethtool_ops batadv_ethtool_ops = {
+ .get_drvinfo = batadv_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = batadv_get_strings,
+ .get_ethtool_stats = batadv_get_ethtool_stats,
+ .get_sset_count = batadv_get_sset_count,
+};
+
+/**
+ * batadv_softif_free() - Deconstructor of batadv_soft_interface
+ * @dev: Device to cleanup and remove
+ */
+static void batadv_softif_free(struct net_device *dev)
+{
+ batadv_mesh_free(dev);
+
+ /* some scheduled RCU callbacks need the bat_priv struct to accomplish
+ * their tasks. Wait for them all to be finished before freeing the
+ * netdev and its private data (bat_priv)
+ */
+ rcu_barrier();
+}
+
+/**
+ * batadv_softif_init_early() - early stage initialization of soft interface
+ * @dev: registered network device to modify
+ */
+static void batadv_softif_init_early(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->netdev_ops = &batadv_netdev_ops;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = batadv_softif_free;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_LLTX;
+ dev->priv_flags |= IFF_NO_QUEUE;
+
+ /* can't call min_mtu, because the needed variables
+ * have not been initialized yet
+ */
+ dev->mtu = ETH_DATA_LEN;
+
+ /* generate random address */
+ eth_hw_addr_random(dev);
+
+ dev->ethtool_ops = &batadv_ethtool_ops;
+}
+
+/**
+ * batadv_softif_validate() - validate configuration of new batadv link
+ * @tb: IFLA_INFO_DATA netlink attributes
+ * @data: enum batadv_ifla_attrs attributes
+ * @extack: extended ACK report struct
+ *
+ * Return: 0 if successful or error otherwise.
+ */
+static int batadv_softif_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct batadv_algo_ops *algo_ops;
+
+ if (!data)
+ return 0;
+
+ if (data[IFLA_BATADV_ALGO_NAME]) {
+ algo_ops = batadv_algo_get(nla_data(data[IFLA_BATADV_ALGO_NAME]));
+ if (!algo_ops)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * batadv_softif_newlink() - pre-initialize and register new batadv link
+ * @src_net: the applicable net namespace
+ * @dev: network device to register
+ * @tb: IFLA_INFO_DATA netlink attributes
+ * @data: enum batadv_ifla_attrs attributes
+ * @extack: extended ACK report struct
+ *
+ * Return: 0 if successful or error otherwise.
+ */
+static int batadv_softif_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ const char *algo_name;
+ int err;
+
+ if (data && data[IFLA_BATADV_ALGO_NAME]) {
+ algo_name = nla_data(data[IFLA_BATADV_ALGO_NAME]);
+ err = batadv_algo_select(bat_priv, algo_name);
+ if (err)
+ return -EINVAL;
+ }
+
+ return register_netdevice(dev);
+}
+
+/**
+ * batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
+ * netlink
+ * @soft_iface: the to-be-removed batman-adv interface
+ * @head: list pointer
+ */
+static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
+ struct list_head *head)
+{
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_softif_vlan *vlan;
+
+ list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface == soft_iface)
+ batadv_hardif_disable_interface(hard_iface);
+ }
+
+ /* destroy the "untagged" VLAN */
+ vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+ if (vlan) {
+ batadv_softif_destroy_vlan(bat_priv, vlan);
+ batadv_softif_vlan_put(vlan);
+ }
+
+ unregister_netdevice_queue(soft_iface, head);
+}
+
+/**
+ * batadv_softif_is_valid() - Check whether device is a batadv soft interface
+ * @net_dev: device which should be checked
+ *
+ * Return: true when net_dev is a batman-adv interface, false otherwise
+ */
+bool batadv_softif_is_valid(const struct net_device *net_dev)
+{
+ if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
+ return true;
+
+ return false;
+}
+
+static const struct nla_policy batadv_ifla_policy[IFLA_BATADV_MAX + 1] = {
+ [IFLA_BATADV_ALGO_NAME] = { .type = NLA_NUL_STRING },
+};
+
+struct rtnl_link_ops batadv_link_ops __read_mostly = {
+ .kind = "batadv",
+ .priv_size = sizeof(struct batadv_priv),
+ .setup = batadv_softif_init_early,
+ .maxtype = IFLA_BATADV_MAX,
+ .policy = batadv_ifla_policy,
+ .validate = batadv_softif_validate,
+ .newlink = batadv_softif_newlink,
+ .dellink = batadv_softif_destroy_netlink,
+};
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
new file mode 100644
index 0000000000..9f2003f1a4
--- /dev/null
+++ b/net/batman-adv/soft-interface.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ */
+
+#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
+#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
+
+#include "main.h"
+
+#include <linux/kref.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/rtnetlink.h>
+
+int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
+void batadv_interface_rx(struct net_device *soft_iface,
+ struct sk_buff *skb, int hdr_size,
+ struct batadv_orig_node *orig_node);
+bool batadv_softif_is_valid(const struct net_device *net_dev);
+extern struct rtnl_link_ops batadv_link_ops;
+int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid);
+void batadv_softif_vlan_release(struct kref *ref);
+struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
+ unsigned short vid);
+
+/**
+ * batadv_softif_vlan_put() - decrease the vlan object refcounter and
+ * possibly release it
+ * @vlan: the vlan object to release
+ */
+static inline void batadv_softif_vlan_put(struct batadv_softif_vlan *vlan)
+{
+ if (!vlan)
+ return;
+
+ kref_put(&vlan->refcount, batadv_softif_vlan_release);
+}
+
+#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
new file mode 100644
index 0000000000..7f3dd3c393
--- /dev/null
+++ b/net/batman-adv/tp_meter.c
@@ -0,0 +1,1490 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Edo Monticelli, Antonio Quartulli
+ */
+
+#include "tp_meter.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/build_bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/cache.h>
+#include <linux/compiler.h>
+#include <linux/container_of.h>
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/netdevice.h>
+#include <linux/param.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "hard-interface.h"
+#include "log.h"
+#include "netlink.h"
+#include "originator.h"
+#include "send.h"
+
+/**
+ * BATADV_TP_DEF_TEST_LENGTH - Default test length if not specified by the user
+ * in milliseconds
+ */
+#define BATADV_TP_DEF_TEST_LENGTH 10000
+
+/**
+ * BATADV_TP_AWND - Advertised window by the receiver (in bytes)
+ */
+#define BATADV_TP_AWND 0x20000000
+
+/**
+ * BATADV_TP_RECV_TIMEOUT - Receiver activity timeout. If the receiver does not
+ * get anything for such amount of milliseconds, the connection is killed
+ */
+#define BATADV_TP_RECV_TIMEOUT 1000
+
+/**
+ * BATADV_TP_MAX_RTO - Maximum sender timeout. If the sender RTO gets beyond
+ * such amount of milliseconds, the receiver is considered unreachable and the
+ * connection is killed
+ */
+#define BATADV_TP_MAX_RTO 30000
+
+/**
+ * BATADV_TP_FIRST_SEQ - First seqno of each session. The number is rather high
+ * in order to immediately trigger a wrap around (test purposes)
+ */
+#define BATADV_TP_FIRST_SEQ ((u32)-1 - 2000)
+
+/**
+ * BATADV_TP_PLEN - length of the payload (data after the batadv_unicast header)
+ * to simulate
+ */
+#define BATADV_TP_PLEN (BATADV_TP_PACKET_LEN - ETH_HLEN - \
+ sizeof(struct batadv_unicast_packet))
+
+static u8 batadv_tp_prerandom[4096] __read_mostly;
+
+/**
+ * batadv_tp_session_cookie() - generate session cookie based on session ids
+ * @session: TP session identifier
+ * @icmp_uid: icmp pseudo uid of the tp session
+ *
+ * Return: 32 bit tp_meter session cookie
+ */
+static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid)
+{
+ u32 cookie;
+
+ cookie = icmp_uid << 16;
+ cookie |= session[0] << 8;
+ cookie |= session[1];
+
+ return cookie;
+}
+
+/**
+ * batadv_tp_cwnd() - compute the new cwnd size
+ * @base: base cwnd size value
+ * @increment: the value to add to base to get the new size
+ * @min: minimum cwnd value (usually MSS)
+ *
+ * Return the new cwnd size and ensure it does not exceed the Advertised
+ * Receiver Window size. It is wrapped around safely.
+ * For details refer to Section 3.1 of RFC5681
+ *
+ * Return: new congestion window size in bytes
+ */
+static u32 batadv_tp_cwnd(u32 base, u32 increment, u32 min)
+{
+ u32 new_size = base + increment;
+
+ /* check for wrap-around */
+ if (new_size < base)
+ new_size = (u32)ULONG_MAX;
+
+ new_size = min_t(u32, new_size, BATADV_TP_AWND);
+
+ return max_t(u32, new_size, min);
+}
+
+/**
+ * batadv_tp_update_cwnd() - update the Congestion Windows
+ * @tp_vars: the private data of the current TP meter session
+ * @mss: maximum segment size of transmission
+ *
+ * 1) if the session is in Slow Start, the CWND has to be increased by 1
+ * MSS every unique received ACK
+ * 2) if the session is in Congestion Avoidance, the CWND has to be
+ * increased by MSS * MSS / CWND for every unique received ACK
+ */
+static void batadv_tp_update_cwnd(struct batadv_tp_vars *tp_vars, u32 mss)
+{
+ spin_lock_bh(&tp_vars->cwnd_lock);
+
+ /* slow start... */
+ if (tp_vars->cwnd <= tp_vars->ss_threshold) {
+ tp_vars->dec_cwnd = 0;
+ tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss);
+ spin_unlock_bh(&tp_vars->cwnd_lock);
+ return;
+ }
+
+ /* increment CWND at least of 1 (section 3.1 of RFC5681) */
+ tp_vars->dec_cwnd += max_t(u32, 1U << 3,
+ ((mss * mss) << 6) / (tp_vars->cwnd << 3));
+ if (tp_vars->dec_cwnd < (mss << 3)) {
+ spin_unlock_bh(&tp_vars->cwnd_lock);
+ return;
+ }
+
+ tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss);
+ tp_vars->dec_cwnd = 0;
+
+ spin_unlock_bh(&tp_vars->cwnd_lock);
+}
+
+/**
+ * batadv_tp_update_rto() - calculate new retransmission timeout
+ * @tp_vars: the private data of the current TP meter session
+ * @new_rtt: new roundtrip time in msec
+ */
+static void batadv_tp_update_rto(struct batadv_tp_vars *tp_vars,
+ u32 new_rtt)
+{
+ long m = new_rtt;
+
+ /* RTT update
+ * Details in Section 2.2 and 2.3 of RFC6298
+ *
+ * It's tricky to understand. Don't lose hair please.
+ * Inspired by tcp_rtt_estimator() tcp_input.c
+ */
+ if (tp_vars->srtt != 0) {
+ m -= (tp_vars->srtt >> 3); /* m is now error in rtt est */
+ tp_vars->srtt += m; /* rtt = 7/8 srtt + 1/8 new */
+ if (m < 0)
+ m = -m;
+
+ m -= (tp_vars->rttvar >> 2);
+ tp_vars->rttvar += m; /* mdev ~= 3/4 rttvar + 1/4 new */
+ } else {
+ /* first measure getting in */
+ tp_vars->srtt = m << 3; /* take the measured time to be srtt */
+ tp_vars->rttvar = m << 1; /* new_rtt / 2 */
+ }
+
+ /* rto = srtt + 4 * rttvar.
+ * rttvar is scaled by 4, therefore doesn't need to be multiplied
+ */
+ tp_vars->rto = (tp_vars->srtt >> 3) + tp_vars->rttvar;
+}
+
+/**
+ * batadv_tp_batctl_notify() - send client status result to client
+ * @reason: reason for tp meter session stop
+ * @dst: destination of tp_meter session
+ * @bat_priv: the bat priv with all the soft interface information
+ * @start_time: start of transmission in jiffies
+ * @total_sent: bytes acked to the receiver
+ * @cookie: cookie of tp_meter session
+ */
+static void batadv_tp_batctl_notify(enum batadv_tp_meter_reason reason,
+ const u8 *dst, struct batadv_priv *bat_priv,
+ unsigned long start_time, u64 total_sent,
+ u32 cookie)
+{
+ u32 test_time;
+ u8 result;
+ u32 total_bytes;
+
+ if (!batadv_tp_is_error(reason)) {
+ result = BATADV_TP_REASON_COMPLETE;
+ test_time = jiffies_to_msecs(jiffies - start_time);
+ total_bytes = total_sent;
+ } else {
+ result = reason;
+ test_time = 0;
+ total_bytes = 0;
+ }
+
+ batadv_netlink_tpmeter_notify(bat_priv, dst, result, test_time,
+ total_bytes, cookie);
+}
+
+/**
+ * batadv_tp_batctl_error_notify() - send client error result to client
+ * @reason: reason for tp meter session stop
+ * @dst: destination of tp_meter session
+ * @bat_priv: the bat priv with all the soft interface information
+ * @cookie: cookie of tp_meter session
+ */
+static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason,
+ const u8 *dst,
+ struct batadv_priv *bat_priv,
+ u32 cookie)
+{
+ batadv_tp_batctl_notify(reason, dst, bat_priv, 0, 0, cookie);
+}
+
+/**
+ * batadv_tp_list_find() - find a tp_vars object in the global list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @dst: the other endpoint MAC address to look for
+ *
+ * Look for a tp_vars object matching dst as end_point and return it after
+ * having increment the refcounter. Return NULL is not found
+ *
+ * Return: matching tp_vars or NULL when no tp_vars with @dst was found
+ */
+static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv,
+ const u8 *dst)
+{
+ struct batadv_tp_vars *pos, *tp_vars = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(pos, &bat_priv->tp_list, list) {
+ if (!batadv_compare_eth(pos->other_end, dst))
+ continue;
+
+ /* most of the time this function is invoked during the normal
+ * process..it makes sens to pay more when the session is
+ * finished and to speed the process up during the measurement
+ */
+ if (unlikely(!kref_get_unless_zero(&pos->refcount)))
+ continue;
+
+ tp_vars = pos;
+ break;
+ }
+ rcu_read_unlock();
+
+ return tp_vars;
+}
+
+/**
+ * batadv_tp_list_find_session() - find tp_vars session object in the global
+ * list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @dst: the other endpoint MAC address to look for
+ * @session: session identifier
+ *
+ * Look for a tp_vars object matching dst as end_point, session as tp meter
+ * session and return it after having increment the refcounter. Return NULL
+ * is not found
+ *
+ * Return: matching tp_vars or NULL when no tp_vars was found
+ */
+static struct batadv_tp_vars *
+batadv_tp_list_find_session(struct batadv_priv *bat_priv, const u8 *dst,
+ const u8 *session)
+{
+ struct batadv_tp_vars *pos, *tp_vars = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(pos, &bat_priv->tp_list, list) {
+ if (!batadv_compare_eth(pos->other_end, dst))
+ continue;
+
+ if (memcmp(pos->session, session, sizeof(pos->session)) != 0)
+ continue;
+
+ /* most of the time this function is invoked during the normal
+ * process..it makes sense to pay more when the session is
+ * finished and to speed the process up during the measurement
+ */
+ if (unlikely(!kref_get_unless_zero(&pos->refcount)))
+ continue;
+
+ tp_vars = pos;
+ break;
+ }
+ rcu_read_unlock();
+
+ return tp_vars;
+}
+
+/**
+ * batadv_tp_vars_release() - release batadv_tp_vars from lists and queue for
+ * free after rcu grace period
+ * @ref: kref pointer of the batadv_tp_vars
+ */
+static void batadv_tp_vars_release(struct kref *ref)
+{
+ struct batadv_tp_vars *tp_vars;
+ struct batadv_tp_unacked *un, *safe;
+
+ tp_vars = container_of(ref, struct batadv_tp_vars, refcount);
+
+ /* lock should not be needed because this object is now out of any
+ * context!
+ */
+ spin_lock_bh(&tp_vars->unacked_lock);
+ list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
+ list_del(&un->list);
+ kfree(un);
+ }
+ spin_unlock_bh(&tp_vars->unacked_lock);
+
+ kfree_rcu(tp_vars, rcu);
+}
+
+/**
+ * batadv_tp_vars_put() - decrement the batadv_tp_vars refcounter and possibly
+ * release it
+ * @tp_vars: the private data of the current TP meter session to be free'd
+ */
+static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars)
+{
+ if (!tp_vars)
+ return;
+
+ kref_put(&tp_vars->refcount, batadv_tp_vars_release);
+}
+
+/**
+ * batadv_tp_sender_cleanup() - cleanup sender data and drop and timer
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tp_vars: the private data of the current TP meter session to cleanup
+ */
+static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv,
+ struct batadv_tp_vars *tp_vars)
+{
+ cancel_delayed_work(&tp_vars->finish_work);
+
+ spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
+ hlist_del_rcu(&tp_vars->list);
+ spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
+
+ /* drop list reference */
+ batadv_tp_vars_put(tp_vars);
+
+ atomic_dec(&tp_vars->bat_priv->tp_num);
+
+ /* kill the timer and remove its reference */
+ del_timer_sync(&tp_vars->timer);
+ /* the worker might have rearmed itself therefore we kill it again. Note
+ * that if the worker should run again before invoking the following
+ * del_timer(), it would not re-arm itself once again because the status
+ * is OFF now
+ */
+ del_timer(&tp_vars->timer);
+ batadv_tp_vars_put(tp_vars);
+}
+
+/**
+ * batadv_tp_sender_end() - print info about ended session and inform client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tp_vars: the private data of the current TP meter session
+ */
+static void batadv_tp_sender_end(struct batadv_priv *bat_priv,
+ struct batadv_tp_vars *tp_vars)
+{
+ u32 session_cookie;
+
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Test towards %pM finished..shutting down (reason=%d)\n",
+ tp_vars->other_end, tp_vars->reason);
+
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Last timing stats: SRTT=%ums RTTVAR=%ums RTO=%ums\n",
+ tp_vars->srtt >> 3, tp_vars->rttvar >> 2, tp_vars->rto);
+
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Final values: cwnd=%u ss_threshold=%u\n",
+ tp_vars->cwnd, tp_vars->ss_threshold);
+
+ session_cookie = batadv_tp_session_cookie(tp_vars->session,
+ tp_vars->icmp_uid);
+
+ batadv_tp_batctl_notify(tp_vars->reason,
+ tp_vars->other_end,
+ bat_priv,
+ tp_vars->start_time,
+ atomic64_read(&tp_vars->tot_sent),
+ session_cookie);
+}
+
+/**
+ * batadv_tp_sender_shutdown() - let sender thread/timer stop gracefully
+ * @tp_vars: the private data of the current TP meter session
+ * @reason: reason for tp meter session stop
+ */
+static void batadv_tp_sender_shutdown(struct batadv_tp_vars *tp_vars,
+ enum batadv_tp_meter_reason reason)
+{
+ if (!atomic_dec_and_test(&tp_vars->sending))
+ return;
+
+ tp_vars->reason = reason;
+}
+
+/**
+ * batadv_tp_sender_finish() - stop sender session after test_length was reached
+ * @work: delayed work reference of the related tp_vars
+ */
+static void batadv_tp_sender_finish(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct batadv_tp_vars *tp_vars;
+
+ delayed_work = to_delayed_work(work);
+ tp_vars = container_of(delayed_work, struct batadv_tp_vars,
+ finish_work);
+
+ batadv_tp_sender_shutdown(tp_vars, BATADV_TP_REASON_COMPLETE);
+}
+
+/**
+ * batadv_tp_reset_sender_timer() - reschedule the sender timer
+ * @tp_vars: the private TP meter data for this session
+ *
+ * Reschedule the timer using tp_vars->rto as delay
+ */
+static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
+{
+ /* most of the time this function is invoked while normal packet
+ * reception...
+ */
+ if (unlikely(atomic_read(&tp_vars->sending) == 0))
+ /* timer ref will be dropped in batadv_tp_sender_cleanup */
+ return;
+
+ mod_timer(&tp_vars->timer, jiffies + msecs_to_jiffies(tp_vars->rto));
+}
+
+/**
+ * batadv_tp_sender_timeout() - timer that fires in case of packet loss
+ * @t: address to timer_list inside tp_vars
+ *
+ * If fired it means that there was packet loss.
+ * Switch to Slow Start, set the ss_threshold to half of the current cwnd and
+ * reset the cwnd to 3*MSS
+ */
+static void batadv_tp_sender_timeout(struct timer_list *t)
+{
+ struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
+ struct batadv_priv *bat_priv = tp_vars->bat_priv;
+
+ if (atomic_read(&tp_vars->sending) == 0)
+ return;
+
+ /* if the user waited long enough...shutdown the test */
+ if (unlikely(tp_vars->rto >= BATADV_TP_MAX_RTO)) {
+ batadv_tp_sender_shutdown(tp_vars,
+ BATADV_TP_REASON_DST_UNREACHABLE);
+ return;
+ }
+
+ /* RTO exponential backoff
+ * Details in Section 5.5 of RFC6298
+ */
+ tp_vars->rto <<= 1;
+
+ spin_lock_bh(&tp_vars->cwnd_lock);
+
+ tp_vars->ss_threshold = tp_vars->cwnd >> 1;
+ if (tp_vars->ss_threshold < BATADV_TP_PLEN * 2)
+ tp_vars->ss_threshold = BATADV_TP_PLEN * 2;
+
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Meter: RTO fired during test towards %pM! cwnd=%u new ss_thr=%u, resetting last_sent to %u\n",
+ tp_vars->other_end, tp_vars->cwnd, tp_vars->ss_threshold,
+ atomic_read(&tp_vars->last_acked));
+
+ tp_vars->cwnd = BATADV_TP_PLEN * 3;
+
+ spin_unlock_bh(&tp_vars->cwnd_lock);
+
+ /* resend the non-ACKed packets.. */
+ tp_vars->last_sent = atomic_read(&tp_vars->last_acked);
+ wake_up(&tp_vars->more_bytes);
+
+ batadv_tp_reset_sender_timer(tp_vars);
+}
+
+/**
+ * batadv_tp_fill_prerandom() - Fill buffer with prefetched random bytes
+ * @tp_vars: the private TP meter data for this session
+ * @buf: Buffer to fill with bytes
+ * @nbytes: amount of pseudorandom bytes
+ */
+static void batadv_tp_fill_prerandom(struct batadv_tp_vars *tp_vars,
+ u8 *buf, size_t nbytes)
+{
+ u32 local_offset;
+ size_t bytes_inbuf;
+ size_t to_copy;
+ size_t pos = 0;
+
+ spin_lock_bh(&tp_vars->prerandom_lock);
+ local_offset = tp_vars->prerandom_offset;
+ tp_vars->prerandom_offset += nbytes;
+ tp_vars->prerandom_offset %= sizeof(batadv_tp_prerandom);
+ spin_unlock_bh(&tp_vars->prerandom_lock);
+
+ while (nbytes) {
+ local_offset %= sizeof(batadv_tp_prerandom);
+ bytes_inbuf = sizeof(batadv_tp_prerandom) - local_offset;
+ to_copy = min(nbytes, bytes_inbuf);
+
+ memcpy(&buf[pos], &batadv_tp_prerandom[local_offset], to_copy);
+ pos += to_copy;
+ nbytes -= to_copy;
+ local_offset = 0;
+ }
+}
+
+/**
+ * batadv_tp_send_msg() - send a single message
+ * @tp_vars: the private TP meter data for this session
+ * @src: source mac address
+ * @orig_node: the originator of the destination
+ * @seqno: sequence number of this packet
+ * @len: length of the entire packet
+ * @session: session identifier
+ * @uid: local ICMP "socket" index
+ * @timestamp: timestamp in jiffies which is replied in ack
+ *
+ * Create and send a single TP Meter message.
+ *
+ * Return: 0 on success, BATADV_TP_REASON_DST_UNREACHABLE if the destination is
+ * not reachable, BATADV_TP_REASON_MEMORY_ERROR if the packet couldn't be
+ * allocated
+ */
+static int batadv_tp_send_msg(struct batadv_tp_vars *tp_vars, const u8 *src,
+ struct batadv_orig_node *orig_node,
+ u32 seqno, size_t len, const u8 *session,
+ int uid, u32 timestamp)
+{
+ struct batadv_icmp_tp_packet *icmp;
+ struct sk_buff *skb;
+ int r;
+ u8 *data;
+ size_t data_len;
+
+ skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
+ if (unlikely(!skb))
+ return BATADV_TP_REASON_MEMORY_ERROR;
+
+ skb_reserve(skb, ETH_HLEN);
+ icmp = skb_put(skb, sizeof(*icmp));
+
+ /* fill the icmp header */
+ ether_addr_copy(icmp->dst, orig_node->orig);
+ ether_addr_copy(icmp->orig, src);
+ icmp->version = BATADV_COMPAT_VERSION;
+ icmp->packet_type = BATADV_ICMP;
+ icmp->ttl = BATADV_TTL;
+ icmp->msg_type = BATADV_TP;
+ icmp->uid = uid;
+
+ icmp->subtype = BATADV_TP_MSG;
+ memcpy(icmp->session, session, sizeof(icmp->session));
+ icmp->seqno = htonl(seqno);
+ icmp->timestamp = htonl(timestamp);
+
+ data_len = len - sizeof(*icmp);
+ data = skb_put(skb, data_len);
+ batadv_tp_fill_prerandom(tp_vars, data, data_len);
+
+ r = batadv_send_skb_to_orig(skb, orig_node, NULL);
+ if (r == NET_XMIT_SUCCESS)
+ return 0;
+
+ return BATADV_TP_REASON_CANT_SEND;
+}
+
+/**
+ * batadv_tp_recv_ack() - ACK receiving function
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the buffer containing the received packet
+ *
+ * Process a received TP ACK packet
+ */
+static void batadv_tp_recv_ack(struct batadv_priv *bat_priv,
+ const struct sk_buff *skb)
+{
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_orig_node *orig_node = NULL;
+ const struct batadv_icmp_tp_packet *icmp;
+ struct batadv_tp_vars *tp_vars;
+ const unsigned char *dev_addr;
+ size_t packet_len, mss;
+ u32 rtt, recv_ack, cwnd;
+
+ packet_len = BATADV_TP_PLEN;
+ mss = BATADV_TP_PLEN;
+ packet_len += sizeof(struct batadv_unicast_packet);
+
+ icmp = (struct batadv_icmp_tp_packet *)skb->data;
+
+ /* find the tp_vars */
+ tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
+ icmp->session);
+ if (unlikely(!tp_vars))
+ return;
+
+ if (unlikely(atomic_read(&tp_vars->sending) == 0))
+ goto out;
+
+ /* old ACK? silently drop it.. */
+ if (batadv_seq_before(ntohl(icmp->seqno),
+ (u32)atomic_read(&tp_vars->last_acked)))
+ goto out;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (unlikely(!primary_if))
+ goto out;
+
+ orig_node = batadv_orig_hash_find(bat_priv, icmp->orig);
+ if (unlikely(!orig_node))
+ goto out;
+
+ /* update RTO with the new sampled RTT, if any */
+ rtt = jiffies_to_msecs(jiffies) - ntohl(icmp->timestamp);
+ if (icmp->timestamp && rtt)
+ batadv_tp_update_rto(tp_vars, rtt);
+
+ /* ACK for new data... reset the timer */
+ batadv_tp_reset_sender_timer(tp_vars);
+
+ recv_ack = ntohl(icmp->seqno);
+
+ /* check if this ACK is a duplicate */
+ if (atomic_read(&tp_vars->last_acked) == recv_ack) {
+ atomic_inc(&tp_vars->dup_acks);
+ if (atomic_read(&tp_vars->dup_acks) != 3)
+ goto out;
+
+ if (recv_ack >= tp_vars->recover)
+ goto out;
+
+ /* if this is the third duplicate ACK do Fast Retransmit */
+ batadv_tp_send_msg(tp_vars, primary_if->net_dev->dev_addr,
+ orig_node, recv_ack, packet_len,
+ icmp->session, icmp->uid,
+ jiffies_to_msecs(jiffies));
+
+ spin_lock_bh(&tp_vars->cwnd_lock);
+
+ /* Fast Recovery */
+ tp_vars->fast_recovery = true;
+ /* Set recover to the last outstanding seqno when Fast Recovery
+ * is entered. RFC6582, Section 3.2, step 1
+ */
+ tp_vars->recover = tp_vars->last_sent;
+ tp_vars->ss_threshold = tp_vars->cwnd >> 1;
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Meter: Fast Recovery, (cur cwnd=%u) ss_thr=%u last_sent=%u recv_ack=%u\n",
+ tp_vars->cwnd, tp_vars->ss_threshold,
+ tp_vars->last_sent, recv_ack);
+ tp_vars->cwnd = batadv_tp_cwnd(tp_vars->ss_threshold, 3 * mss,
+ mss);
+ tp_vars->dec_cwnd = 0;
+ tp_vars->last_sent = recv_ack;
+
+ spin_unlock_bh(&tp_vars->cwnd_lock);
+ } else {
+ /* count the acked data */
+ atomic64_add(recv_ack - atomic_read(&tp_vars->last_acked),
+ &tp_vars->tot_sent);
+ /* reset the duplicate ACKs counter */
+ atomic_set(&tp_vars->dup_acks, 0);
+
+ if (tp_vars->fast_recovery) {
+ /* partial ACK */
+ if (batadv_seq_before(recv_ack, tp_vars->recover)) {
+ /* this is another hole in the window. React
+ * immediately as specified by NewReno (see
+ * Section 3.2 of RFC6582 for details)
+ */
+ dev_addr = primary_if->net_dev->dev_addr;
+ batadv_tp_send_msg(tp_vars, dev_addr,
+ orig_node, recv_ack,
+ packet_len, icmp->session,
+ icmp->uid,
+ jiffies_to_msecs(jiffies));
+ tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd,
+ mss, mss);
+ } else {
+ tp_vars->fast_recovery = false;
+ /* set cwnd to the value of ss_threshold at the
+ * moment that Fast Recovery was entered.
+ * RFC6582, Section 3.2, step 3
+ */
+ cwnd = batadv_tp_cwnd(tp_vars->ss_threshold, 0,
+ mss);
+ tp_vars->cwnd = cwnd;
+ }
+ goto move_twnd;
+ }
+
+ if (recv_ack - atomic_read(&tp_vars->last_acked) >= mss)
+ batadv_tp_update_cwnd(tp_vars, mss);
+move_twnd:
+ /* move the Transmit Window */
+ atomic_set(&tp_vars->last_acked, recv_ack);
+ }
+
+ wake_up(&tp_vars->more_bytes);
+out:
+ batadv_hardif_put(primary_if);
+ batadv_orig_node_put(orig_node);
+ batadv_tp_vars_put(tp_vars);
+}
+
+/**
+ * batadv_tp_avail() - check if congestion window is not full
+ * @tp_vars: the private data of the current TP meter session
+ * @payload_len: size of the payload of a single message
+ *
+ * Return: true when congestion window is not full, false otherwise
+ */
+static bool batadv_tp_avail(struct batadv_tp_vars *tp_vars,
+ size_t payload_len)
+{
+ u32 win_left, win_limit;
+
+ win_limit = atomic_read(&tp_vars->last_acked) + tp_vars->cwnd;
+ win_left = win_limit - tp_vars->last_sent;
+
+ return win_left >= payload_len;
+}
+
+/**
+ * batadv_tp_wait_available() - wait until congestion window becomes free or
+ * timeout is reached
+ * @tp_vars: the private data of the current TP meter session
+ * @plen: size of the payload of a single message
+ *
+ * Return: 0 if the condition evaluated to false after the timeout elapsed,
+ * 1 if the condition evaluated to true after the timeout elapsed, the
+ * remaining jiffies (at least 1) if the condition evaluated to true before
+ * the timeout elapsed, or -ERESTARTSYS if it was interrupted by a signal.
+ */
+static int batadv_tp_wait_available(struct batadv_tp_vars *tp_vars, size_t plen)
+{
+ int ret;
+
+ ret = wait_event_interruptible_timeout(tp_vars->more_bytes,
+ batadv_tp_avail(tp_vars, plen),
+ HZ / 10);
+
+ return ret;
+}
+
+/**
+ * batadv_tp_send() - main sending thread of a tp meter session
+ * @arg: address of the related tp_vars
+ *
+ * Return: nothing, this function never returns
+ */
+static int batadv_tp_send(void *arg)
+{
+ struct batadv_tp_vars *tp_vars = arg;
+ struct batadv_priv *bat_priv = tp_vars->bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_orig_node *orig_node = NULL;
+ size_t payload_len, packet_len;
+ int err = 0;
+
+ if (unlikely(tp_vars->role != BATADV_TP_SENDER)) {
+ err = BATADV_TP_REASON_DST_UNREACHABLE;
+ tp_vars->reason = err;
+ goto out;
+ }
+
+ orig_node = batadv_orig_hash_find(bat_priv, tp_vars->other_end);
+ if (unlikely(!orig_node)) {
+ err = BATADV_TP_REASON_DST_UNREACHABLE;
+ tp_vars->reason = err;
+ goto out;
+ }
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (unlikely(!primary_if)) {
+ err = BATADV_TP_REASON_DST_UNREACHABLE;
+ tp_vars->reason = err;
+ goto out;
+ }
+
+ /* assume that all the hard_interfaces have a correctly
+ * configured MTU, so use the soft_iface MTU as MSS.
+ * This might not be true and in that case the fragmentation
+ * should be used.
+ * Now, try to send the packet as it is
+ */
+ payload_len = BATADV_TP_PLEN;
+ BUILD_BUG_ON(sizeof(struct batadv_icmp_tp_packet) > BATADV_TP_PLEN);
+
+ batadv_tp_reset_sender_timer(tp_vars);
+
+ /* queue the worker in charge of terminating the test */
+ queue_delayed_work(batadv_event_workqueue, &tp_vars->finish_work,
+ msecs_to_jiffies(tp_vars->test_length));
+
+ while (atomic_read(&tp_vars->sending) != 0) {
+ if (unlikely(!batadv_tp_avail(tp_vars, payload_len))) {
+ batadv_tp_wait_available(tp_vars, payload_len);
+ continue;
+ }
+
+ /* to emulate normal unicast traffic, add to the payload len
+ * the size of the unicast header
+ */
+ packet_len = payload_len + sizeof(struct batadv_unicast_packet);
+
+ err = batadv_tp_send_msg(tp_vars, primary_if->net_dev->dev_addr,
+ orig_node, tp_vars->last_sent,
+ packet_len,
+ tp_vars->session, tp_vars->icmp_uid,
+ jiffies_to_msecs(jiffies));
+
+ /* something went wrong during the preparation/transmission */
+ if (unlikely(err && err != BATADV_TP_REASON_CANT_SEND)) {
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Meter: %s() cannot send packets (%d)\n",
+ __func__, err);
+ /* ensure nobody else tries to stop the thread now */
+ if (atomic_dec_and_test(&tp_vars->sending))
+ tp_vars->reason = err;
+ break;
+ }
+
+ /* right-shift the TWND */
+ if (!err)
+ tp_vars->last_sent += payload_len;
+
+ cond_resched();
+ }
+
+out:
+ batadv_hardif_put(primary_if);
+ batadv_orig_node_put(orig_node);
+
+ batadv_tp_sender_end(bat_priv, tp_vars);
+ batadv_tp_sender_cleanup(bat_priv, tp_vars);
+
+ batadv_tp_vars_put(tp_vars);
+
+ return 0;
+}
+
+/**
+ * batadv_tp_start_kthread() - start new thread which manages the tp meter
+ * sender
+ * @tp_vars: the private data of the current TP meter session
+ */
+static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
+{
+ struct task_struct *kthread;
+ struct batadv_priv *bat_priv = tp_vars->bat_priv;
+ u32 session_cookie;
+
+ kref_get(&tp_vars->refcount);
+ kthread = kthread_create(batadv_tp_send, tp_vars, "kbatadv_tp_meter");
+ if (IS_ERR(kthread)) {
+ session_cookie = batadv_tp_session_cookie(tp_vars->session,
+ tp_vars->icmp_uid);
+ pr_err("batadv: cannot create tp meter kthread\n");
+ batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR,
+ tp_vars->other_end,
+ bat_priv, session_cookie);
+
+ /* drop reserved reference for kthread */
+ batadv_tp_vars_put(tp_vars);
+
+ /* cleanup of failed tp meter variables */
+ batadv_tp_sender_cleanup(bat_priv, tp_vars);
+ return;
+ }
+
+ wake_up_process(kthread);
+}
+
+/**
+ * batadv_tp_start() - start a new tp meter session
+ * @bat_priv: the bat priv with all the soft interface information
+ * @dst: the receiver MAC address
+ * @test_length: test length in milliseconds
+ * @cookie: session cookie
+ */
+void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
+ u32 test_length, u32 *cookie)
+{
+ struct batadv_tp_vars *tp_vars;
+ u8 session_id[2];
+ u8 icmp_uid;
+ u32 session_cookie;
+
+ get_random_bytes(session_id, sizeof(session_id));
+ get_random_bytes(&icmp_uid, 1);
+ session_cookie = batadv_tp_session_cookie(session_id, icmp_uid);
+ *cookie = session_cookie;
+
+ /* look for an already existing test towards this node */
+ spin_lock_bh(&bat_priv->tp_list_lock);
+ tp_vars = batadv_tp_list_find(bat_priv, dst);
+ if (tp_vars) {
+ spin_unlock_bh(&bat_priv->tp_list_lock);
+ batadv_tp_vars_put(tp_vars);
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Meter: test to or from the same node already ongoing, aborting\n");
+ batadv_tp_batctl_error_notify(BATADV_TP_REASON_ALREADY_ONGOING,
+ dst, bat_priv, session_cookie);
+ return;
+ }
+
+ if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) {
+ spin_unlock_bh(&bat_priv->tp_list_lock);
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Meter: too many ongoing sessions, aborting (SEND)\n");
+ batadv_tp_batctl_error_notify(BATADV_TP_REASON_TOO_MANY, dst,
+ bat_priv, session_cookie);
+ return;
+ }
+
+ tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC);
+ if (!tp_vars) {
+ spin_unlock_bh(&bat_priv->tp_list_lock);
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Meter: %s cannot allocate list elements\n",
+ __func__);
+ batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR,
+ dst, bat_priv, session_cookie);
+ return;
+ }
+
+ /* initialize tp_vars */
+ ether_addr_copy(tp_vars->other_end, dst);
+ kref_init(&tp_vars->refcount);
+ tp_vars->role = BATADV_TP_SENDER;
+ atomic_set(&tp_vars->sending, 1);
+ memcpy(tp_vars->session, session_id, sizeof(session_id));
+ tp_vars->icmp_uid = icmp_uid;
+
+ tp_vars->last_sent = BATADV_TP_FIRST_SEQ;
+ atomic_set(&tp_vars->last_acked, BATADV_TP_FIRST_SEQ);
+ tp_vars->fast_recovery = false;
+ tp_vars->recover = BATADV_TP_FIRST_SEQ;
+
+ /* initialise the CWND to 3*MSS (Section 3.1 in RFC5681).
+ * For batman-adv the MSS is the size of the payload received by the
+ * soft_interface, hence its MTU
+ */
+ tp_vars->cwnd = BATADV_TP_PLEN * 3;
+ /* at the beginning initialise the SS threshold to the biggest possible
+ * window size, hence the AWND size
+ */
+ tp_vars->ss_threshold = BATADV_TP_AWND;
+
+ /* RTO initial value is 3 seconds.
+ * Details in Section 2.1 of RFC6298
+ */
+ tp_vars->rto = 1000;
+ tp_vars->srtt = 0;
+ tp_vars->rttvar = 0;
+
+ atomic64_set(&tp_vars->tot_sent, 0);
+
+ kref_get(&tp_vars->refcount);
+ timer_setup(&tp_vars->timer, batadv_tp_sender_timeout, 0);
+
+ tp_vars->bat_priv = bat_priv;
+ tp_vars->start_time = jiffies;
+
+ init_waitqueue_head(&tp_vars->more_bytes);
+
+ spin_lock_init(&tp_vars->unacked_lock);
+ INIT_LIST_HEAD(&tp_vars->unacked_list);
+
+ spin_lock_init(&tp_vars->cwnd_lock);
+
+ tp_vars->prerandom_offset = 0;
+ spin_lock_init(&tp_vars->prerandom_lock);
+
+ kref_get(&tp_vars->refcount);
+ hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
+ spin_unlock_bh(&bat_priv->tp_list_lock);
+
+ tp_vars->test_length = test_length;
+ if (!tp_vars->test_length)
+ tp_vars->test_length = BATADV_TP_DEF_TEST_LENGTH;
+
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Meter: starting throughput meter towards %pM (length=%ums)\n",
+ dst, test_length);
+
+ /* init work item for finished tp tests */
+ INIT_DELAYED_WORK(&tp_vars->finish_work, batadv_tp_sender_finish);
+
+ /* start tp kthread. This way the write() call issued from userspace can
+ * happily return and avoid to block
+ */
+ batadv_tp_start_kthread(tp_vars);
+
+ /* don't return reference to new tp_vars */
+ batadv_tp_vars_put(tp_vars);
+}
+
+/**
+ * batadv_tp_stop() - stop currently running tp meter session
+ * @bat_priv: the bat priv with all the soft interface information
+ * @dst: the receiver MAC address
+ * @return_value: reason for tp meter session stop
+ */
+void batadv_tp_stop(struct batadv_priv *bat_priv, const u8 *dst,
+ u8 return_value)
+{
+ struct batadv_orig_node *orig_node;
+ struct batadv_tp_vars *tp_vars;
+
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Meter: stopping test towards %pM\n", dst);
+
+ orig_node = batadv_orig_hash_find(bat_priv, dst);
+ if (!orig_node)
+ return;
+
+ tp_vars = batadv_tp_list_find(bat_priv, orig_node->orig);
+ if (!tp_vars) {
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Meter: trying to interrupt an already over connection\n");
+ goto out;
+ }
+
+ batadv_tp_sender_shutdown(tp_vars, return_value);
+ batadv_tp_vars_put(tp_vars);
+out:
+ batadv_orig_node_put(orig_node);
+}
+
+/**
+ * batadv_tp_reset_receiver_timer() - reset the receiver shutdown timer
+ * @tp_vars: the private data of the current TP meter session
+ *
+ * start the receiver shutdown timer or reset it if already started
+ */
+static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
+{
+ mod_timer(&tp_vars->timer,
+ jiffies + msecs_to_jiffies(BATADV_TP_RECV_TIMEOUT));
+}
+
+/**
+ * batadv_tp_receiver_shutdown() - stop a tp meter receiver when timeout is
+ * reached without received ack
+ * @t: address to timer_list inside tp_vars
+ */
+static void batadv_tp_receiver_shutdown(struct timer_list *t)
+{
+ struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
+ struct batadv_tp_unacked *un, *safe;
+ struct batadv_priv *bat_priv;
+
+ bat_priv = tp_vars->bat_priv;
+
+ /* if there is recent activity rearm the timer */
+ if (!batadv_has_timed_out(tp_vars->last_recv_time,
+ BATADV_TP_RECV_TIMEOUT)) {
+ /* reset the receiver shutdown timer */
+ batadv_tp_reset_receiver_timer(tp_vars);
+ return;
+ }
+
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Shutting down for inactivity (more than %dms) from %pM\n",
+ BATADV_TP_RECV_TIMEOUT, tp_vars->other_end);
+
+ spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
+ hlist_del_rcu(&tp_vars->list);
+ spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
+
+ /* drop list reference */
+ batadv_tp_vars_put(tp_vars);
+
+ atomic_dec(&bat_priv->tp_num);
+
+ spin_lock_bh(&tp_vars->unacked_lock);
+ list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
+ list_del(&un->list);
+ kfree(un);
+ }
+ spin_unlock_bh(&tp_vars->unacked_lock);
+
+ /* drop reference of timer */
+ batadv_tp_vars_put(tp_vars);
+}
+
+/**
+ * batadv_tp_send_ack() - send an ACK packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @dst: the mac address of the destination originator
+ * @seq: the sequence number to ACK
+ * @timestamp: the timestamp to echo back in the ACK
+ * @session: session identifier
+ * @socket_index: local ICMP socket identifier
+ *
+ * Return: 0 on success, a positive integer representing the reason of the
+ * failure otherwise
+ */
+static int batadv_tp_send_ack(struct batadv_priv *bat_priv, const u8 *dst,
+ u32 seq, __be32 timestamp, const u8 *session,
+ int socket_index)
+{
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_orig_node *orig_node;
+ struct batadv_icmp_tp_packet *icmp;
+ struct sk_buff *skb;
+ int r, ret;
+
+ orig_node = batadv_orig_hash_find(bat_priv, dst);
+ if (unlikely(!orig_node)) {
+ ret = BATADV_TP_REASON_DST_UNREACHABLE;
+ goto out;
+ }
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (unlikely(!primary_if)) {
+ ret = BATADV_TP_REASON_DST_UNREACHABLE;
+ goto out;
+ }
+
+ skb = netdev_alloc_skb_ip_align(NULL, sizeof(*icmp) + ETH_HLEN);
+ if (unlikely(!skb)) {
+ ret = BATADV_TP_REASON_MEMORY_ERROR;
+ goto out;
+ }
+
+ skb_reserve(skb, ETH_HLEN);
+ icmp = skb_put(skb, sizeof(*icmp));
+ icmp->packet_type = BATADV_ICMP;
+ icmp->version = BATADV_COMPAT_VERSION;
+ icmp->ttl = BATADV_TTL;
+ icmp->msg_type = BATADV_TP;
+ ether_addr_copy(icmp->dst, orig_node->orig);
+ ether_addr_copy(icmp->orig, primary_if->net_dev->dev_addr);
+ icmp->uid = socket_index;
+
+ icmp->subtype = BATADV_TP_ACK;
+ memcpy(icmp->session, session, sizeof(icmp->session));
+ icmp->seqno = htonl(seq);
+ icmp->timestamp = timestamp;
+
+ /* send the ack */
+ r = batadv_send_skb_to_orig(skb, orig_node, NULL);
+ if (unlikely(r < 0) || r == NET_XMIT_DROP) {
+ ret = BATADV_TP_REASON_DST_UNREACHABLE;
+ goto out;
+ }
+ ret = 0;
+
+out:
+ batadv_orig_node_put(orig_node);
+ batadv_hardif_put(primary_if);
+
+ return ret;
+}
+
+/**
+ * batadv_tp_handle_out_of_order() - store an out of order packet
+ * @tp_vars: the private data of the current TP meter session
+ * @skb: the buffer containing the received packet
+ *
+ * Store the out of order packet in the unacked list for late processing. This
+ * packets are kept in this list so that they can be ACKed at once as soon as
+ * all the previous packets have been received
+ *
+ * Return: true if the packed has been successfully processed, false otherwise
+ */
+static bool batadv_tp_handle_out_of_order(struct batadv_tp_vars *tp_vars,
+ const struct sk_buff *skb)
+{
+ const struct batadv_icmp_tp_packet *icmp;
+ struct batadv_tp_unacked *un, *new;
+ u32 payload_len;
+ bool added = false;
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (unlikely(!new))
+ return false;
+
+ icmp = (struct batadv_icmp_tp_packet *)skb->data;
+
+ new->seqno = ntohl(icmp->seqno);
+ payload_len = skb->len - sizeof(struct batadv_unicast_packet);
+ new->len = payload_len;
+
+ spin_lock_bh(&tp_vars->unacked_lock);
+ /* if the list is empty immediately attach this new object */
+ if (list_empty(&tp_vars->unacked_list)) {
+ list_add(&new->list, &tp_vars->unacked_list);
+ goto out;
+ }
+
+ /* otherwise loop over the list and either drop the packet because this
+ * is a duplicate or store it at the right position.
+ *
+ * The iteration is done in the reverse way because it is likely that
+ * the last received packet (the one being processed now) has a bigger
+ * seqno than all the others already stored.
+ */
+ list_for_each_entry_reverse(un, &tp_vars->unacked_list, list) {
+ /* check for duplicates */
+ if (new->seqno == un->seqno) {
+ if (new->len > un->len)
+ un->len = new->len;
+ kfree(new);
+ added = true;
+ break;
+ }
+
+ /* look for the right position */
+ if (batadv_seq_before(new->seqno, un->seqno))
+ continue;
+
+ /* as soon as an entry having a bigger seqno is found, the new
+ * one is attached _after_ it. In this way the list is kept in
+ * ascending order
+ */
+ list_add_tail(&new->list, &un->list);
+ added = true;
+ break;
+ }
+
+ /* received packet with smallest seqno out of order; add it to front */
+ if (!added)
+ list_add(&new->list, &tp_vars->unacked_list);
+
+out:
+ spin_unlock_bh(&tp_vars->unacked_lock);
+
+ return true;
+}
+
+/**
+ * batadv_tp_ack_unordered() - update number received bytes in current stream
+ * without gaps
+ * @tp_vars: the private data of the current TP meter session
+ */
+static void batadv_tp_ack_unordered(struct batadv_tp_vars *tp_vars)
+{
+ struct batadv_tp_unacked *un, *safe;
+ u32 to_ack;
+
+ /* go through the unacked packet list and possibly ACK them as
+ * well
+ */
+ spin_lock_bh(&tp_vars->unacked_lock);
+ list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
+ /* the list is ordered, therefore it is possible to stop as soon
+ * there is a gap between the last acked seqno and the seqno of
+ * the packet under inspection
+ */
+ if (batadv_seq_before(tp_vars->last_recv, un->seqno))
+ break;
+
+ to_ack = un->seqno + un->len - tp_vars->last_recv;
+
+ if (batadv_seq_before(tp_vars->last_recv, un->seqno + un->len))
+ tp_vars->last_recv += to_ack;
+
+ list_del(&un->list);
+ kfree(un);
+ }
+ spin_unlock_bh(&tp_vars->unacked_lock);
+}
+
+/**
+ * batadv_tp_init_recv() - return matching or create new receiver tp_vars
+ * @bat_priv: the bat priv with all the soft interface information
+ * @icmp: received icmp tp msg
+ *
+ * Return: corresponding tp_vars or NULL on errors
+ */
+static struct batadv_tp_vars *
+batadv_tp_init_recv(struct batadv_priv *bat_priv,
+ const struct batadv_icmp_tp_packet *icmp)
+{
+ struct batadv_tp_vars *tp_vars;
+
+ spin_lock_bh(&bat_priv->tp_list_lock);
+ tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
+ icmp->session);
+ if (tp_vars)
+ goto out_unlock;
+
+ if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) {
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Meter: too many ongoing sessions, aborting (RECV)\n");
+ goto out_unlock;
+ }
+
+ tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC);
+ if (!tp_vars)
+ goto out_unlock;
+
+ ether_addr_copy(tp_vars->other_end, icmp->orig);
+ tp_vars->role = BATADV_TP_RECEIVER;
+ memcpy(tp_vars->session, icmp->session, sizeof(tp_vars->session));
+ tp_vars->last_recv = BATADV_TP_FIRST_SEQ;
+ tp_vars->bat_priv = bat_priv;
+ kref_init(&tp_vars->refcount);
+
+ spin_lock_init(&tp_vars->unacked_lock);
+ INIT_LIST_HEAD(&tp_vars->unacked_list);
+
+ kref_get(&tp_vars->refcount);
+ hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
+
+ kref_get(&tp_vars->refcount);
+ timer_setup(&tp_vars->timer, batadv_tp_receiver_shutdown, 0);
+
+ batadv_tp_reset_receiver_timer(tp_vars);
+
+out_unlock:
+ spin_unlock_bh(&bat_priv->tp_list_lock);
+
+ return tp_vars;
+}
+
+/**
+ * batadv_tp_recv_msg() - process a single data message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the buffer containing the received packet
+ *
+ * Process a received TP MSG packet
+ */
+static void batadv_tp_recv_msg(struct batadv_priv *bat_priv,
+ const struct sk_buff *skb)
+{
+ const struct batadv_icmp_tp_packet *icmp;
+ struct batadv_tp_vars *tp_vars;
+ size_t packet_size;
+ u32 seqno;
+
+ icmp = (struct batadv_icmp_tp_packet *)skb->data;
+
+ seqno = ntohl(icmp->seqno);
+ /* check if this is the first seqno. This means that if the
+ * first packet is lost, the tp meter does not work anymore!
+ */
+ if (seqno == BATADV_TP_FIRST_SEQ) {
+ tp_vars = batadv_tp_init_recv(bat_priv, icmp);
+ if (!tp_vars) {
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Meter: seqno != BATADV_TP_FIRST_SEQ cannot initiate connection\n");
+ goto out;
+ }
+ } else {
+ tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
+ icmp->session);
+ if (!tp_vars) {
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Unexpected packet from %pM!\n",
+ icmp->orig);
+ goto out;
+ }
+ }
+
+ if (unlikely(tp_vars->role != BATADV_TP_RECEIVER)) {
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Meter: dropping packet: not expected (role=%u)\n",
+ tp_vars->role);
+ goto out;
+ }
+
+ tp_vars->last_recv_time = jiffies;
+
+ /* if the packet is a duplicate, it may be the case that an ACK has been
+ * lost. Resend the ACK
+ */
+ if (batadv_seq_before(seqno, tp_vars->last_recv))
+ goto send_ack;
+
+ /* if the packet is out of order enqueue it */
+ if (ntohl(icmp->seqno) != tp_vars->last_recv) {
+ /* exit immediately (and do not send any ACK) if the packet has
+ * not been enqueued correctly
+ */
+ if (!batadv_tp_handle_out_of_order(tp_vars, skb))
+ goto out;
+
+ /* send a duplicate ACK */
+ goto send_ack;
+ }
+
+ /* if everything was fine count the ACKed bytes */
+ packet_size = skb->len - sizeof(struct batadv_unicast_packet);
+ tp_vars->last_recv += packet_size;
+
+ /* check if this ordered message filled a gap.... */
+ batadv_tp_ack_unordered(tp_vars);
+
+send_ack:
+ /* send the ACK. If the received packet was out of order, the ACK that
+ * is going to be sent is a duplicate (the sender will count them and
+ * possibly enter Fast Retransmit as soon as it has reached 3)
+ */
+ batadv_tp_send_ack(bat_priv, icmp->orig, tp_vars->last_recv,
+ icmp->timestamp, icmp->session, icmp->uid);
+out:
+ batadv_tp_vars_put(tp_vars);
+}
+
+/**
+ * batadv_tp_meter_recv() - main TP Meter receiving function
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the buffer containing the received packet
+ */
+void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb)
+{
+ struct batadv_icmp_tp_packet *icmp;
+
+ icmp = (struct batadv_icmp_tp_packet *)skb->data;
+
+ switch (icmp->subtype) {
+ case BATADV_TP_MSG:
+ batadv_tp_recv_msg(bat_priv, skb);
+ break;
+ case BATADV_TP_ACK:
+ batadv_tp_recv_ack(bat_priv, skb);
+ break;
+ default:
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Received unknown TP Metric packet type %u\n",
+ icmp->subtype);
+ }
+ consume_skb(skb);
+}
+
+/**
+ * batadv_tp_meter_init() - initialize global tp_meter structures
+ */
+void __init batadv_tp_meter_init(void)
+{
+ get_random_bytes(batadv_tp_prerandom, sizeof(batadv_tp_prerandom));
+}
diff --git a/net/batman-adv/tp_meter.h b/net/batman-adv/tp_meter.h
new file mode 100644
index 0000000000..f0046d366e
--- /dev/null
+++ b/net/batman-adv/tp_meter.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Edo Monticelli, Antonio Quartulli
+ */
+
+#ifndef _NET_BATMAN_ADV_TP_METER_H_
+#define _NET_BATMAN_ADV_TP_METER_H_
+
+#include "main.h"
+
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+void batadv_tp_meter_init(void);
+void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
+ u32 test_length, u32 *cookie);
+void batadv_tp_stop(struct batadv_priv *bat_priv, const u8 *dst,
+ u8 return_value);
+void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb);
+
+#endif /* _NET_BATMAN_ADV_TP_METER_H_ */
diff --git a/net/batman-adv/trace.c b/net/batman-adv/trace.c
new file mode 100644
index 0000000000..ec8b951907
--- /dev/null
+++ b/net/batman-adv/trace.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Sven Eckelmann
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/net/batman-adv/trace.h b/net/batman-adv/trace.h
new file mode 100644
index 0000000000..5dd52bc5ca
--- /dev/null
+++ b/net/batman-adv/trace.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Sven Eckelmann
+ */
+
+#if !defined(_NET_BATMAN_ADV_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _NET_BATMAN_ADV_TRACE_H_
+
+#include "main.h"
+
+#include <linux/netdevice.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM batadv
+
+/* provide dummy function when tracing is disabled */
+#if !defined(CONFIG_BATMAN_ADV_TRACING)
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+ static inline void trace_ ## name(proto) {}
+
+#endif /* CONFIG_BATMAN_ADV_TRACING */
+
+TRACE_EVENT(batadv_dbg,
+
+ TP_PROTO(struct batadv_priv *bat_priv,
+ struct va_format *vaf),
+
+ TP_ARGS(bat_priv, vaf),
+
+ TP_STRUCT__entry(
+ __string(device, bat_priv->soft_iface->name)
+ __string(driver, KBUILD_MODNAME)
+ __vstring(msg, vaf->fmt, vaf->va)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, bat_priv->soft_iface->name);
+ __assign_str(driver, KBUILD_MODNAME);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
+ ),
+
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+#endif /* _NET_BATMAN_ADV_TRACE_H_ || TRACE_HEADER_MULTI_READ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
new file mode 100644
index 0000000000..b95c36765d
--- /dev/null
+++ b/net/batman-adv/translation-table.c
@@ -0,0 +1,4290 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich, Antonio Quartulli
+ */
+
+#include "translation-table.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/build_bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/cache.h>
+#include <linux/compiler.h>
+#include <linux/container_of.h>
+#include <linux/crc32c.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
+
+#include "bridge_loop_avoidance.h"
+#include "hard-interface.h"
+#include "hash.h"
+#include "log.h"
+#include "netlink.h"
+#include "originator.h"
+#include "soft-interface.h"
+#include "tvlv.h"
+
+static struct kmem_cache *batadv_tl_cache __read_mostly;
+static struct kmem_cache *batadv_tg_cache __read_mostly;
+static struct kmem_cache *batadv_tt_orig_cache __read_mostly;
+static struct kmem_cache *batadv_tt_change_cache __read_mostly;
+static struct kmem_cache *batadv_tt_req_cache __read_mostly;
+static struct kmem_cache *batadv_tt_roam_cache __read_mostly;
+
+/* hash class keys */
+static struct lock_class_key batadv_tt_local_hash_lock_class_key;
+static struct lock_class_key batadv_tt_global_hash_lock_class_key;
+
+static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client,
+ unsigned short vid,
+ struct batadv_orig_node *orig_node);
+static void batadv_tt_purge(struct work_struct *work);
+static void
+batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
+static void batadv_tt_global_del(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *addr,
+ unsigned short vid, const char *message,
+ bool roaming);
+
+/**
+ * batadv_compare_tt() - check if two TT entries are the same
+ * @node: the list element pointer of the first TT entry
+ * @data2: pointer to the tt_common_entry of the second TT entry
+ *
+ * Compare the MAC address and the VLAN ID of the two TT entries and check if
+ * they are the same TT client.
+ * Return: true if the two TT clients are the same, false otherwise
+ */
+static bool batadv_compare_tt(const struct hlist_node *node, const void *data2)
+{
+ const void *data1 = container_of(node, struct batadv_tt_common_entry,
+ hash_entry);
+ const struct batadv_tt_common_entry *tt1 = data1;
+ const struct batadv_tt_common_entry *tt2 = data2;
+
+ return (tt1->vid == tt2->vid) && batadv_compare_eth(data1, data2);
+}
+
+/**
+ * batadv_choose_tt() - return the index of the tt entry in the hash table
+ * @data: pointer to the tt_common_entry object to map
+ * @size: the size of the hash table
+ *
+ * Return: the hash index where the object represented by 'data' should be
+ * stored at.
+ */
+static inline u32 batadv_choose_tt(const void *data, u32 size)
+{
+ const struct batadv_tt_common_entry *tt;
+ u32 hash = 0;
+
+ tt = data;
+ hash = jhash(&tt->addr, ETH_ALEN, hash);
+ hash = jhash(&tt->vid, sizeof(tt->vid), hash);
+
+ return hash % size;
+}
+
+/**
+ * batadv_tt_hash_find() - look for a client in the given hash table
+ * @hash: the hash table to search
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Return: a pointer to the tt_common struct belonging to the searched client if
+ * found, NULL otherwise.
+ */
+static struct batadv_tt_common_entry *
+batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr,
+ unsigned short vid)
+{
+ struct hlist_head *head;
+ struct batadv_tt_common_entry to_search, *tt, *tt_tmp = NULL;
+ u32 index;
+
+ if (!hash)
+ return NULL;
+
+ ether_addr_copy(to_search.addr, addr);
+ to_search.vid = vid;
+
+ index = batadv_choose_tt(&to_search, hash->size);
+ head = &hash->table[index];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tt, head, hash_entry) {
+ if (!batadv_compare_eth(tt, addr))
+ continue;
+
+ if (tt->vid != vid)
+ continue;
+
+ if (!kref_get_unless_zero(&tt->refcount))
+ continue;
+
+ tt_tmp = tt;
+ break;
+ }
+ rcu_read_unlock();
+
+ return tt_tmp;
+}
+
+/**
+ * batadv_tt_local_hash_find() - search the local table for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Return: a pointer to the corresponding tt_local_entry struct if the client is
+ * found, NULL otherwise.
+ */
+static struct batadv_tt_local_entry *
+batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
+ unsigned short vid)
+{
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
+
+ tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, addr,
+ vid);
+ if (tt_common_entry)
+ tt_local_entry = container_of(tt_common_entry,
+ struct batadv_tt_local_entry,
+ common);
+ return tt_local_entry;
+}
+
+/**
+ * batadv_tt_global_hash_find() - search the global table for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Return: a pointer to the corresponding tt_global_entry struct if the client
+ * is found, NULL otherwise.
+ */
+struct batadv_tt_global_entry *
+batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
+ unsigned short vid)
+{
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
+
+ tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, addr,
+ vid);
+ if (tt_common_entry)
+ tt_global_entry = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
+ common);
+ return tt_global_entry;
+}
+
+/**
+ * batadv_tt_local_entry_free_rcu() - free the tt_local_entry
+ * @rcu: rcu pointer of the tt_local_entry
+ */
+static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct batadv_tt_local_entry *tt_local_entry;
+
+ tt_local_entry = container_of(rcu, struct batadv_tt_local_entry,
+ common.rcu);
+
+ kmem_cache_free(batadv_tl_cache, tt_local_entry);
+}
+
+/**
+ * batadv_tt_local_entry_release() - release tt_local_entry from lists and queue
+ * for free after rcu grace period
+ * @ref: kref pointer of the nc_node
+ */
+static void batadv_tt_local_entry_release(struct kref *ref)
+{
+ struct batadv_tt_local_entry *tt_local_entry;
+
+ tt_local_entry = container_of(ref, struct batadv_tt_local_entry,
+ common.refcount);
+
+ batadv_softif_vlan_put(tt_local_entry->vlan);
+
+ call_rcu(&tt_local_entry->common.rcu, batadv_tt_local_entry_free_rcu);
+}
+
+/**
+ * batadv_tt_local_entry_put() - decrement the tt_local_entry refcounter and
+ * possibly release it
+ * @tt_local_entry: tt_local_entry to be free'd
+ */
+static void
+batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
+{
+ if (!tt_local_entry)
+ return;
+
+ kref_put(&tt_local_entry->common.refcount,
+ batadv_tt_local_entry_release);
+}
+
+/**
+ * batadv_tt_global_entry_free_rcu() - free the tt_global_entry
+ * @rcu: rcu pointer of the tt_global_entry
+ */
+static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct batadv_tt_global_entry *tt_global_entry;
+
+ tt_global_entry = container_of(rcu, struct batadv_tt_global_entry,
+ common.rcu);
+
+ kmem_cache_free(batadv_tg_cache, tt_global_entry);
+}
+
+/**
+ * batadv_tt_global_entry_release() - release tt_global_entry from lists and
+ * queue for free after rcu grace period
+ * @ref: kref pointer of the nc_node
+ */
+void batadv_tt_global_entry_release(struct kref *ref)
+{
+ struct batadv_tt_global_entry *tt_global_entry;
+
+ tt_global_entry = container_of(ref, struct batadv_tt_global_entry,
+ common.refcount);
+
+ batadv_tt_global_del_orig_list(tt_global_entry);
+
+ call_rcu(&tt_global_entry->common.rcu, batadv_tt_global_entry_free_rcu);
+}
+
+/**
+ * batadv_tt_global_hash_count() - count the number of orig entries
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to count entries for
+ * @vid: VLAN identifier
+ *
+ * Return: the number of originators advertising the given address/data
+ * (excluding our self).
+ */
+int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
+ const u8 *addr, unsigned short vid)
+{
+ struct batadv_tt_global_entry *tt_global_entry;
+ int count;
+
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
+ if (!tt_global_entry)
+ return 0;
+
+ count = atomic_read(&tt_global_entry->orig_list_count);
+ batadv_tt_global_entry_put(tt_global_entry);
+
+ return count;
+}
+
+/**
+ * batadv_tt_local_size_mod() - change the size by v of the local table
+ * identified by vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier of the sub-table to change
+ * @v: the amount to sum to the local table size
+ */
+static void batadv_tt_local_size_mod(struct batadv_priv *bat_priv,
+ unsigned short vid, int v)
+{
+ struct batadv_softif_vlan *vlan;
+
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (!vlan)
+ return;
+
+ atomic_add(v, &vlan->tt.num_entries);
+
+ batadv_softif_vlan_put(vlan);
+}
+
+/**
+ * batadv_tt_local_size_inc() - increase by one the local table size for the
+ * given vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ */
+static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv,
+ unsigned short vid)
+{
+ batadv_tt_local_size_mod(bat_priv, vid, 1);
+}
+
+/**
+ * batadv_tt_local_size_dec() - decrease by one the local table size for the
+ * given vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ */
+static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv,
+ unsigned short vid)
+{
+ batadv_tt_local_size_mod(bat_priv, vid, -1);
+}
+
+/**
+ * batadv_tt_global_size_mod() - change the size by v of the global table
+ * for orig_node identified by vid
+ * @orig_node: the originator for which the table has to be modified
+ * @vid: the VLAN identifier
+ * @v: the amount to sum to the global table size
+ */
+static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
+ unsigned short vid, int v)
+{
+ struct batadv_orig_node_vlan *vlan;
+
+ vlan = batadv_orig_node_vlan_new(orig_node, vid);
+ if (!vlan)
+ return;
+
+ if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
+ spin_lock_bh(&orig_node->vlan_list_lock);
+ if (!hlist_unhashed(&vlan->list)) {
+ hlist_del_init_rcu(&vlan->list);
+ batadv_orig_node_vlan_put(vlan);
+ }
+ spin_unlock_bh(&orig_node->vlan_list_lock);
+ }
+
+ batadv_orig_node_vlan_put(vlan);
+}
+
+/**
+ * batadv_tt_global_size_inc() - increase by one the global table size for the
+ * given vid
+ * @orig_node: the originator which global table size has to be decreased
+ * @vid: the vlan identifier
+ */
+static void batadv_tt_global_size_inc(struct batadv_orig_node *orig_node,
+ unsigned short vid)
+{
+ batadv_tt_global_size_mod(orig_node, vid, 1);
+}
+
+/**
+ * batadv_tt_global_size_dec() - decrease by one the global table size for the
+ * given vid
+ * @orig_node: the originator which global table size has to be decreased
+ * @vid: the vlan identifier
+ */
+static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
+ unsigned short vid)
+{
+ batadv_tt_global_size_mod(orig_node, vid, -1);
+}
+
+/**
+ * batadv_tt_orig_list_entry_free_rcu() - free the orig_entry
+ * @rcu: rcu pointer of the orig_entry
+ */
+static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct batadv_tt_orig_list_entry *orig_entry;
+
+ orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
+
+ kmem_cache_free(batadv_tt_orig_cache, orig_entry);
+}
+
+/**
+ * batadv_tt_orig_list_entry_release() - release tt orig entry from lists and
+ * queue for free after rcu grace period
+ * @ref: kref pointer of the tt orig entry
+ */
+static void batadv_tt_orig_list_entry_release(struct kref *ref)
+{
+ struct batadv_tt_orig_list_entry *orig_entry;
+
+ orig_entry = container_of(ref, struct batadv_tt_orig_list_entry,
+ refcount);
+
+ batadv_orig_node_put(orig_entry->orig_node);
+ call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
+}
+
+/**
+ * batadv_tt_orig_list_entry_put() - decrement the tt orig entry refcounter and
+ * possibly release it
+ * @orig_entry: tt orig entry to be free'd
+ */
+static void
+batadv_tt_orig_list_entry_put(struct batadv_tt_orig_list_entry *orig_entry)
+{
+ if (!orig_entry)
+ return;
+
+ kref_put(&orig_entry->refcount, batadv_tt_orig_list_entry_release);
+}
+
+/**
+ * batadv_tt_local_event() - store a local TT event (ADD/DEL)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_local_entry: the TT entry involved in the event
+ * @event_flags: flags to store in the event structure
+ */
+static void batadv_tt_local_event(struct batadv_priv *bat_priv,
+ struct batadv_tt_local_entry *tt_local_entry,
+ u8 event_flags)
+{
+ struct batadv_tt_change_node *tt_change_node, *entry, *safe;
+ struct batadv_tt_common_entry *common = &tt_local_entry->common;
+ u8 flags = common->flags | event_flags;
+ bool event_removed = false;
+ bool del_op_requested, del_op_entry;
+
+ tt_change_node = kmem_cache_alloc(batadv_tt_change_cache, GFP_ATOMIC);
+ if (!tt_change_node)
+ return;
+
+ tt_change_node->change.flags = flags;
+ memset(tt_change_node->change.reserved, 0,
+ sizeof(tt_change_node->change.reserved));
+ ether_addr_copy(tt_change_node->change.addr, common->addr);
+ tt_change_node->change.vid = htons(common->vid);
+
+ del_op_requested = flags & BATADV_TT_CLIENT_DEL;
+
+ /* check for ADD+DEL or DEL+ADD events */
+ spin_lock_bh(&bat_priv->tt.changes_list_lock);
+ list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
+ list) {
+ if (!batadv_compare_eth(entry->change.addr, common->addr))
+ continue;
+
+ /* DEL+ADD in the same orig interval have no effect and can be
+ * removed to avoid silly behaviour on the receiver side. The
+ * other way around (ADD+DEL) can happen in case of roaming of
+ * a client still in the NEW state. Roaming of NEW clients is
+ * now possible due to automatically recognition of "temporary"
+ * clients
+ */
+ del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL;
+ if (!del_op_requested && del_op_entry)
+ goto del;
+ if (del_op_requested && !del_op_entry)
+ goto del;
+
+ /* this is a second add in the same originator interval. It
+ * means that flags have been changed: update them!
+ */
+ if (!del_op_requested && !del_op_entry)
+ entry->change.flags = flags;
+
+ continue;
+del:
+ list_del(&entry->list);
+ kmem_cache_free(batadv_tt_change_cache, entry);
+ kmem_cache_free(batadv_tt_change_cache, tt_change_node);
+ event_removed = true;
+ goto unlock;
+ }
+
+ /* track the change in the OGMinterval list */
+ list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
+
+unlock:
+ spin_unlock_bh(&bat_priv->tt.changes_list_lock);
+
+ if (event_removed)
+ atomic_dec(&bat_priv->tt.local_changes);
+ else
+ atomic_inc(&bat_priv->tt.local_changes);
+}
+
+/**
+ * batadv_tt_len() - compute length in bytes of given number of tt changes
+ * @changes_num: number of tt changes
+ *
+ * Return: computed length in bytes.
+ */
+static int batadv_tt_len(int changes_num)
+{
+ return changes_num * sizeof(struct batadv_tvlv_tt_change);
+}
+
+/**
+ * batadv_tt_entries() - compute the number of entries fitting in tt_len bytes
+ * @tt_len: available space
+ *
+ * Return: the number of entries.
+ */
+static u16 batadv_tt_entries(u16 tt_len)
+{
+ return tt_len / batadv_tt_len(1);
+}
+
+/**
+ * batadv_tt_local_table_transmit_size() - calculates the local translation
+ * table size when transmitted over the air
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: local translation table size in bytes.
+ */
+static int batadv_tt_local_table_transmit_size(struct batadv_priv *bat_priv)
+{
+ u16 num_vlan = 0;
+ u16 tt_local_entries = 0;
+ struct batadv_softif_vlan *vlan;
+ int hdr_size;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ num_vlan++;
+ tt_local_entries += atomic_read(&vlan->tt.num_entries);
+ }
+ rcu_read_unlock();
+
+ /* header size of tvlv encapsulated tt response payload */
+ hdr_size = sizeof(struct batadv_unicast_tvlv_packet);
+ hdr_size += sizeof(struct batadv_tvlv_hdr);
+ hdr_size += sizeof(struct batadv_tvlv_tt_data);
+ hdr_size += num_vlan * sizeof(struct batadv_tvlv_tt_vlan_data);
+
+ return hdr_size + batadv_tt_len(tt_local_entries);
+}
+
+static int batadv_tt_local_init(struct batadv_priv *bat_priv)
+{
+ if (bat_priv->tt.local_hash)
+ return 0;
+
+ bat_priv->tt.local_hash = batadv_hash_new(1024);
+
+ if (!bat_priv->tt.local_hash)
+ return -ENOMEM;
+
+ batadv_hash_set_lock_class(bat_priv->tt.local_hash,
+ &batadv_tt_local_hash_lock_class_key);
+
+ return 0;
+}
+
+static void batadv_tt_global_free(struct batadv_priv *bat_priv,
+ struct batadv_tt_global_entry *tt_global,
+ const char *message)
+{
+ struct batadv_tt_global_entry *tt_removed_entry;
+ struct hlist_node *tt_removed_node;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting global tt entry %pM (vid: %d): %s\n",
+ tt_global->common.addr,
+ batadv_print_vid(tt_global->common.vid), message);
+
+ tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
+ batadv_compare_tt,
+ batadv_choose_tt,
+ &tt_global->common);
+ if (!tt_removed_node)
+ return;
+
+ /* drop reference of remove hash entry */
+ tt_removed_entry = hlist_entry(tt_removed_node,
+ struct batadv_tt_global_entry,
+ common.hash_entry);
+ batadv_tt_global_entry_put(tt_removed_entry);
+}
+
+/**
+ * batadv_tt_local_add() - add a new client to the local table or update an
+ * existing client
+ * @soft_iface: netdev struct of the mesh interface
+ * @addr: the mac address of the client to add
+ * @vid: VLAN identifier
+ * @ifindex: index of the interface where the client is connected to (useful to
+ * identify wireless clients)
+ * @mark: the value contained in the skb->mark field of the received packet (if
+ * any)
+ *
+ * Return: true if the client was successfully added, false otherwise.
+ */
+bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
+ unsigned short vid, int ifindex, u32 mark)
+{
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_tt_local_entry *tt_local;
+ struct batadv_tt_global_entry *tt_global = NULL;
+ struct net *net = dev_net(soft_iface);
+ struct batadv_softif_vlan *vlan;
+ struct net_device *in_dev = NULL;
+ struct batadv_hard_iface *in_hardif = NULL;
+ struct hlist_head *head;
+ struct batadv_tt_orig_list_entry *orig_entry;
+ int hash_added, table_size, packet_size_max;
+ bool ret = false;
+ bool roamed_back = false;
+ u8 remote_flags;
+ u32 match_mark;
+
+ if (ifindex != BATADV_NULL_IFINDEX)
+ in_dev = dev_get_by_index(net, ifindex);
+
+ if (in_dev)
+ in_hardif = batadv_hardif_get_by_netdev(in_dev);
+
+ tt_local = batadv_tt_local_hash_find(bat_priv, addr, vid);
+
+ if (!is_multicast_ether_addr(addr))
+ tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
+
+ if (tt_local) {
+ tt_local->last_seen = jiffies;
+ if (tt_local->common.flags & BATADV_TT_CLIENT_PENDING) {
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Re-adding pending client %pM (vid: %d)\n",
+ addr, batadv_print_vid(vid));
+ /* whatever the reason why the PENDING flag was set,
+ * this is a client which was enqueued to be removed in
+ * this orig_interval. Since it popped up again, the
+ * flag can be reset like it was never enqueued
+ */
+ tt_local->common.flags &= ~BATADV_TT_CLIENT_PENDING;
+ goto add_event;
+ }
+
+ if (tt_local->common.flags & BATADV_TT_CLIENT_ROAM) {
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Roaming client %pM (vid: %d) came back to its original location\n",
+ addr, batadv_print_vid(vid));
+ /* the ROAM flag is set because this client roamed away
+ * and the node got a roaming_advertisement message. Now
+ * that the client popped up again at its original
+ * location such flag can be unset
+ */
+ tt_local->common.flags &= ~BATADV_TT_CLIENT_ROAM;
+ roamed_back = true;
+ }
+ goto check_roaming;
+ }
+
+ /* Ignore the client if we cannot send it in a full table response. */
+ table_size = batadv_tt_local_table_transmit_size(bat_priv);
+ table_size += batadv_tt_len(1);
+ packet_size_max = atomic_read(&bat_priv->packet_size_max);
+ if (table_size > packet_size_max) {
+ net_ratelimited_function(batadv_info, soft_iface,
+ "Local translation table size (%i) exceeds maximum packet size (%i); Ignoring new local tt entry: %pM\n",
+ table_size, packet_size_max, addr);
+ goto out;
+ }
+
+ tt_local = kmem_cache_alloc(batadv_tl_cache, GFP_ATOMIC);
+ if (!tt_local)
+ goto out;
+
+ /* increase the refcounter of the related vlan */
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (!vlan) {
+ net_ratelimited_function(batadv_info, soft_iface,
+ "adding TT local entry %pM to non-existent VLAN %d\n",
+ addr, batadv_print_vid(vid));
+ kmem_cache_free(batadv_tl_cache, tt_local);
+ tt_local = NULL;
+ goto out;
+ }
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
+ addr, batadv_print_vid(vid),
+ (u8)atomic_read(&bat_priv->tt.vn));
+
+ ether_addr_copy(tt_local->common.addr, addr);
+ /* The local entry has to be marked as NEW to avoid to send it in
+ * a full table response going out before the next ttvn increment
+ * (consistency check)
+ */
+ tt_local->common.flags = BATADV_TT_CLIENT_NEW;
+ tt_local->common.vid = vid;
+ if (batadv_is_wifi_hardif(in_hardif))
+ tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
+ kref_init(&tt_local->common.refcount);
+ tt_local->last_seen = jiffies;
+ tt_local->common.added_at = tt_local->last_seen;
+ tt_local->vlan = vlan;
+
+ /* the batman interface mac and multicast addresses should never be
+ * purged
+ */
+ if (batadv_compare_eth(addr, soft_iface->dev_addr) ||
+ is_multicast_ether_addr(addr))
+ tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
+
+ kref_get(&tt_local->common.refcount);
+ hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
+ batadv_choose_tt, &tt_local->common,
+ &tt_local->common.hash_entry);
+
+ if (unlikely(hash_added != 0)) {
+ /* remove the reference for the hash */
+ batadv_tt_local_entry_put(tt_local);
+ goto out;
+ }
+
+add_event:
+ batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
+
+check_roaming:
+ /* Check whether it is a roaming, but don't do anything if the roaming
+ * process has already been handled
+ */
+ if (tt_global && !(tt_global->common.flags & BATADV_TT_CLIENT_ROAM)) {
+ /* These node are probably going to update their tt table */
+ head = &tt_global->orig_list;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(orig_entry, head, list) {
+ batadv_send_roam_adv(bat_priv, tt_global->common.addr,
+ tt_global->common.vid,
+ orig_entry->orig_node);
+ }
+ rcu_read_unlock();
+ if (roamed_back) {
+ batadv_tt_global_free(bat_priv, tt_global,
+ "Roaming canceled");
+ } else {
+ /* The global entry has to be marked as ROAMING and
+ * has to be kept for consistency purpose
+ */
+ tt_global->common.flags |= BATADV_TT_CLIENT_ROAM;
+ tt_global->roam_at = jiffies;
+ }
+ }
+
+ /* store the current remote flags before altering them. This helps
+ * understanding is flags are changing or not
+ */
+ remote_flags = tt_local->common.flags & BATADV_TT_REMOTE_MASK;
+
+ if (batadv_is_wifi_hardif(in_hardif))
+ tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
+ else
+ tt_local->common.flags &= ~BATADV_TT_CLIENT_WIFI;
+
+ /* check the mark in the skb: if it's equal to the configured
+ * isolation_mark, it means the packet is coming from an isolated
+ * non-mesh client
+ */
+ match_mark = (mark & bat_priv->isolation_mark_mask);
+ if (bat_priv->isolation_mark_mask &&
+ match_mark == bat_priv->isolation_mark)
+ tt_local->common.flags |= BATADV_TT_CLIENT_ISOLA;
+ else
+ tt_local->common.flags &= ~BATADV_TT_CLIENT_ISOLA;
+
+ /* if any "dynamic" flag has been modified, resend an ADD event for this
+ * entry so that all the nodes can get the new flags
+ */
+ if (remote_flags ^ (tt_local->common.flags & BATADV_TT_REMOTE_MASK))
+ batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
+
+ ret = true;
+out:
+ batadv_hardif_put(in_hardif);
+ dev_put(in_dev);
+ batadv_tt_local_entry_put(tt_local);
+ batadv_tt_global_entry_put(tt_global);
+ return ret;
+}
+
+/**
+ * batadv_tt_prepare_tvlv_global_data() - prepare the TVLV TT header to send
+ * within a TT Response directed to another node
+ * @orig_node: originator for which the TT data has to be prepared
+ * @tt_data: uninitialised pointer to the address of the TVLV buffer
+ * @tt_change: uninitialised pointer to the address of the area where the TT
+ * changed can be stored
+ * @tt_len: pointer to the length to reserve to the tt_change. if -1 this
+ * function reserves the amount of space needed to send the entire global TT
+ * table. In case of success the value is updated with the real amount of
+ * reserved bytes
+ * Allocate the needed amount of memory for the entire TT TVLV and write its
+ * header made up of one tvlv_tt_data object and a series of tvlv_tt_vlan_data
+ * objects, one per active VLAN served by the originator node.
+ *
+ * Return: the size of the allocated buffer or 0 in case of failure.
+ */
+static u16
+batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
+ struct batadv_tvlv_tt_data **tt_data,
+ struct batadv_tvlv_tt_change **tt_change,
+ s32 *tt_len)
+{
+ u16 num_vlan = 0;
+ u16 num_entries = 0;
+ u16 change_offset;
+ u16 tvlv_len;
+ struct batadv_tvlv_tt_vlan_data *tt_vlan;
+ struct batadv_orig_node_vlan *vlan;
+ u8 *tt_change_ptr;
+
+ spin_lock_bh(&orig_node->vlan_list_lock);
+ hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
+ num_vlan++;
+ num_entries += atomic_read(&vlan->tt.num_entries);
+ }
+
+ change_offset = sizeof(**tt_data);
+ change_offset += num_vlan * sizeof(*tt_vlan);
+
+ /* if tt_len is negative, allocate the space needed by the full table */
+ if (*tt_len < 0)
+ *tt_len = batadv_tt_len(num_entries);
+
+ tvlv_len = *tt_len;
+ tvlv_len += change_offset;
+
+ *tt_data = kmalloc(tvlv_len, GFP_ATOMIC);
+ if (!*tt_data) {
+ *tt_len = 0;
+ goto out;
+ }
+
+ (*tt_data)->flags = BATADV_NO_FLAGS;
+ (*tt_data)->ttvn = atomic_read(&orig_node->last_ttvn);
+ (*tt_data)->num_vlan = htons(num_vlan);
+
+ tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+ hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
+ tt_vlan->vid = htons(vlan->vid);
+ tt_vlan->crc = htonl(vlan->tt.crc);
+ tt_vlan->reserved = 0;
+
+ tt_vlan++;
+ }
+
+ tt_change_ptr = (u8 *)*tt_data + change_offset;
+ *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
+
+out:
+ spin_unlock_bh(&orig_node->vlan_list_lock);
+ return tvlv_len;
+}
+
+/**
+ * batadv_tt_prepare_tvlv_local_data() - allocate and prepare the TT TVLV for
+ * this node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: uninitialised pointer to the address of the TVLV buffer
+ * @tt_change: uninitialised pointer to the address of the area where the TT
+ * changes can be stored
+ * @tt_len: pointer to the length to reserve to the tt_change. if -1 this
+ * function reserves the amount of space needed to send the entire local TT
+ * table. In case of success the value is updated with the real amount of
+ * reserved bytes
+ *
+ * Allocate the needed amount of memory for the entire TT TVLV and write its
+ * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
+ * objects, one per active VLAN.
+ *
+ * Return: the size of the allocated buffer or 0 in case of failure.
+ */
+static u16
+batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_tt_data **tt_data,
+ struct batadv_tvlv_tt_change **tt_change,
+ s32 *tt_len)
+{
+ struct batadv_tvlv_tt_vlan_data *tt_vlan;
+ struct batadv_softif_vlan *vlan;
+ u16 num_vlan = 0;
+ u16 vlan_entries = 0;
+ u16 total_entries = 0;
+ u16 tvlv_len;
+ u8 *tt_change_ptr;
+ int change_offset;
+
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+ hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) {
+ vlan_entries = atomic_read(&vlan->tt.num_entries);
+ if (vlan_entries < 1)
+ continue;
+
+ num_vlan++;
+ total_entries += vlan_entries;
+ }
+
+ change_offset = sizeof(**tt_data);
+ change_offset += num_vlan * sizeof(*tt_vlan);
+
+ /* if tt_len is negative, allocate the space needed by the full table */
+ if (*tt_len < 0)
+ *tt_len = batadv_tt_len(total_entries);
+
+ tvlv_len = *tt_len;
+ tvlv_len += change_offset;
+
+ *tt_data = kmalloc(tvlv_len, GFP_ATOMIC);
+ if (!*tt_data) {
+ tvlv_len = 0;
+ goto out;
+ }
+
+ (*tt_data)->flags = BATADV_NO_FLAGS;
+ (*tt_data)->ttvn = atomic_read(&bat_priv->tt.vn);
+ (*tt_data)->num_vlan = htons(num_vlan);
+
+ tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+ hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) {
+ vlan_entries = atomic_read(&vlan->tt.num_entries);
+ if (vlan_entries < 1)
+ continue;
+
+ tt_vlan->vid = htons(vlan->vid);
+ tt_vlan->crc = htonl(vlan->tt.crc);
+ tt_vlan->reserved = 0;
+
+ tt_vlan++;
+ }
+
+ tt_change_ptr = (u8 *)*tt_data + change_offset;
+ *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
+
+out:
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+ return tvlv_len;
+}
+
+/**
+ * batadv_tt_tvlv_container_update() - update the translation table tvlv
+ * container after local tt changes have been committed
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+ struct batadv_tt_change_node *entry, *safe;
+ struct batadv_tvlv_tt_data *tt_data;
+ struct batadv_tvlv_tt_change *tt_change;
+ int tt_diff_len, tt_change_len = 0;
+ int tt_diff_entries_num = 0;
+ int tt_diff_entries_count = 0;
+ u16 tvlv_len;
+
+ tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
+ tt_diff_len = batadv_tt_len(tt_diff_entries_num);
+
+ /* if we have too many changes for one packet don't send any
+ * and wait for the tt table request which will be fragmented
+ */
+ if (tt_diff_len > bat_priv->soft_iface->mtu)
+ tt_diff_len = 0;
+
+ tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data,
+ &tt_change, &tt_diff_len);
+ if (!tvlv_len)
+ return;
+
+ tt_data->flags = BATADV_TT_OGM_DIFF;
+
+ if (tt_diff_len == 0)
+ goto container_register;
+
+ spin_lock_bh(&bat_priv->tt.changes_list_lock);
+ atomic_set(&bat_priv->tt.local_changes, 0);
+
+ list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
+ list) {
+ if (tt_diff_entries_count < tt_diff_entries_num) {
+ memcpy(tt_change + tt_diff_entries_count,
+ &entry->change,
+ sizeof(struct batadv_tvlv_tt_change));
+ tt_diff_entries_count++;
+ }
+ list_del(&entry->list);
+ kmem_cache_free(batadv_tt_change_cache, entry);
+ }
+ spin_unlock_bh(&bat_priv->tt.changes_list_lock);
+
+ /* Keep the buffer for possible tt_request */
+ spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+ kfree(bat_priv->tt.last_changeset);
+ bat_priv->tt.last_changeset_len = 0;
+ bat_priv->tt.last_changeset = NULL;
+ tt_change_len = batadv_tt_len(tt_diff_entries_count);
+ /* check whether this new OGM has no changes due to size problems */
+ if (tt_diff_entries_count > 0) {
+ /* if kmalloc() fails we will reply with the full table
+ * instead of providing the diff
+ */
+ bat_priv->tt.last_changeset = kzalloc(tt_diff_len, GFP_ATOMIC);
+ if (bat_priv->tt.last_changeset) {
+ memcpy(bat_priv->tt.last_changeset,
+ tt_change, tt_change_len);
+ bat_priv->tt.last_changeset_len = tt_diff_len;
+ }
+ }
+ spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
+
+container_register:
+ batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data,
+ tvlv_len);
+ kfree(tt_data);
+}
+
+/**
+ * batadv_tt_local_dump_entry() - Dump one TT local entry into a message
+ * @msg :Netlink message to dump into
+ * @portid: Port making netlink request
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ * @common: tt local & tt global common data
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_priv *bat_priv,
+ struct batadv_tt_common_entry *common)
+{
+ void *hdr;
+ struct batadv_softif_vlan *vlan;
+ struct batadv_tt_local_entry *local;
+ unsigned int last_seen_msecs;
+ u32 crc;
+
+ local = container_of(common, struct batadv_tt_local_entry, common);
+ last_seen_msecs = jiffies_to_msecs(jiffies - local->last_seen);
+
+ vlan = batadv_softif_vlan_get(bat_priv, common->vid);
+ if (!vlan)
+ return 0;
+
+ crc = vlan->tt.crc;
+
+ batadv_softif_vlan_put(vlan);
+
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_TRANSTABLE_LOCAL);
+ if (!hdr)
+ return -ENOBUFS;
+
+ genl_dump_check_consistent(cb, hdr);
+
+ if (nla_put(msg, BATADV_ATTR_TT_ADDRESS, ETH_ALEN, common->addr) ||
+ nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
+ nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
+ nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags))
+ goto nla_put_failure;
+
+ if (!(common->flags & BATADV_TT_CLIENT_NOPURGE) &&
+ nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, last_seen_msecs))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_tt_local_dump_bucket() - Dump one TT local bucket into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @cb: Control block containing additional options
+ * @bat_priv: The bat priv with all the soft interface information
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
+ * @idx_s: Number of entries to skip
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_priv *bat_priv,
+ struct batadv_hashtable *hash, unsigned int bucket,
+ int *idx_s)
+{
+ struct batadv_tt_common_entry *common;
+ int idx = 0;
+
+ spin_lock_bh(&hash->list_locks[bucket]);
+ cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+ hlist_for_each_entry(common, &hash->table[bucket], hash_entry) {
+ if (idx++ < *idx_s)
+ continue;
+
+ if (batadv_tt_local_dump_entry(msg, portid, cb, bat_priv,
+ common)) {
+ spin_unlock_bh(&hash->list_locks[bucket]);
+ *idx_s = idx - 1;
+ return -EMSGSIZE;
+ }
+ }
+ spin_unlock_bh(&hash->list_locks[bucket]);
+
+ *idx_s = 0;
+ return 0;
+}
+
+/**
+ * batadv_tt_local_dump() - Dump TT local entries into a message
+ * @msg: Netlink message to dump into
+ * @cb: Parameters from query
+ *
+ * Return: Error code, or 0 on success
+ */
+int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_hashtable *hash;
+ int ret;
+ int ifindex;
+ int bucket = cb->args[0];
+ int idx = cb->args[1];
+ int portid = NETLINK_CB(cb->skb).portid;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ hash = bat_priv->tt.local_hash;
+
+ while (bucket < hash->size) {
+ if (batadv_tt_local_dump_bucket(msg, portid, cb, bat_priv,
+ hash, bucket, &idx))
+ break;
+
+ bucket++;
+ }
+
+ ret = msg->len;
+
+ out:
+ batadv_hardif_put(primary_if);
+ dev_put(soft_iface);
+
+ cb->args[0] = bucket;
+ cb->args[1] = idx;
+
+ return ret;
+}
+
+static void
+batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
+ struct batadv_tt_local_entry *tt_local_entry,
+ u16 flags, const char *message)
+{
+ batadv_tt_local_event(bat_priv, tt_local_entry, flags);
+
+ /* The local client has to be marked as "pending to be removed" but has
+ * to be kept in the table in order to send it in a full table
+ * response issued before the net ttvn increment (consistency check)
+ */
+ tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Local tt entry (%pM, vid: %d) pending to be removed: %s\n",
+ tt_local_entry->common.addr,
+ batadv_print_vid(tt_local_entry->common.vid), message);
+}
+
+/**
+ * batadv_tt_local_remove() - logically remove an entry from the local table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the MAC address of the client to remove
+ * @vid: VLAN identifier
+ * @message: message to append to the log on deletion
+ * @roaming: true if the deletion is due to a roaming event
+ *
+ * Return: the flags assigned to the local entry before being deleted
+ */
+u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
+ unsigned short vid, const char *message,
+ bool roaming)
+{
+ struct batadv_tt_local_entry *tt_removed_entry;
+ struct batadv_tt_local_entry *tt_local_entry;
+ u16 flags, curr_flags = BATADV_NO_FLAGS;
+ struct hlist_node *tt_removed_node;
+
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
+ if (!tt_local_entry)
+ goto out;
+
+ curr_flags = tt_local_entry->common.flags;
+
+ flags = BATADV_TT_CLIENT_DEL;
+ /* if this global entry addition is due to a roaming, the node has to
+ * mark the local entry as "roamed" in order to correctly reroute
+ * packets later
+ */
+ if (roaming) {
+ flags |= BATADV_TT_CLIENT_ROAM;
+ /* mark the local client as ROAMed */
+ tt_local_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
+ }
+
+ if (!(tt_local_entry->common.flags & BATADV_TT_CLIENT_NEW)) {
+ batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags,
+ message);
+ goto out;
+ }
+ /* if this client has been added right now, it is possible to
+ * immediately purge it
+ */
+ batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
+
+ tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
+ batadv_compare_tt,
+ batadv_choose_tt,
+ &tt_local_entry->common);
+ if (!tt_removed_node)
+ goto out;
+
+ /* drop reference of remove hash entry */
+ tt_removed_entry = hlist_entry(tt_removed_node,
+ struct batadv_tt_local_entry,
+ common.hash_entry);
+ batadv_tt_local_entry_put(tt_removed_entry);
+
+out:
+ batadv_tt_local_entry_put(tt_local_entry);
+
+ return curr_flags;
+}
+
+/**
+ * batadv_tt_local_purge_list() - purge inactive tt local entries
+ * @bat_priv: the bat priv with all the soft interface information
+ * @head: pointer to the list containing the local tt entries
+ * @timeout: parameter deciding whether a given tt local entry is considered
+ * inactive or not
+ */
+static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
+ struct hlist_head *head,
+ int timeout)
+{
+ struct batadv_tt_local_entry *tt_local_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct hlist_node *node_tmp;
+
+ hlist_for_each_entry_safe(tt_common_entry, node_tmp, head,
+ hash_entry) {
+ tt_local_entry = container_of(tt_common_entry,
+ struct batadv_tt_local_entry,
+ common);
+ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_NOPURGE)
+ continue;
+
+ /* entry already marked for deletion */
+ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
+ continue;
+
+ if (!batadv_has_timed_out(tt_local_entry->last_seen, timeout))
+ continue;
+
+ batadv_tt_local_set_pending(bat_priv, tt_local_entry,
+ BATADV_TT_CLIENT_DEL, "timed out");
+ }
+}
+
+/**
+ * batadv_tt_local_purge() - purge inactive tt local entries
+ * @bat_priv: the bat priv with all the soft interface information
+ * @timeout: parameter deciding whether a given tt local entry is considered
+ * inactive or not
+ */
+static void batadv_tt_local_purge(struct batadv_priv *bat_priv,
+ int timeout)
+{
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
+ struct hlist_head *head;
+ spinlock_t *list_lock; /* protects write access to the hash lists */
+ u32 i;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+ list_lock = &hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+ batadv_tt_local_purge_list(bat_priv, head, timeout);
+ spin_unlock_bh(list_lock);
+ }
+}
+
+static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
+{
+ struct batadv_hashtable *hash;
+ spinlock_t *list_lock; /* protects write access to the hash lists */
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_local_entry *tt_local;
+ struct hlist_node *node_tmp;
+ struct hlist_head *head;
+ u32 i;
+
+ if (!bat_priv->tt.local_hash)
+ return;
+
+ hash = bat_priv->tt.local_hash;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+ list_lock = &hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+ hlist_for_each_entry_safe(tt_common_entry, node_tmp,
+ head, hash_entry) {
+ hlist_del_rcu(&tt_common_entry->hash_entry);
+ tt_local = container_of(tt_common_entry,
+ struct batadv_tt_local_entry,
+ common);
+
+ batadv_tt_local_entry_put(tt_local);
+ }
+ spin_unlock_bh(list_lock);
+ }
+
+ batadv_hash_destroy(hash);
+
+ bat_priv->tt.local_hash = NULL;
+}
+
+static int batadv_tt_global_init(struct batadv_priv *bat_priv)
+{
+ if (bat_priv->tt.global_hash)
+ return 0;
+
+ bat_priv->tt.global_hash = batadv_hash_new(1024);
+
+ if (!bat_priv->tt.global_hash)
+ return -ENOMEM;
+
+ batadv_hash_set_lock_class(bat_priv->tt.global_hash,
+ &batadv_tt_global_hash_lock_class_key);
+
+ return 0;
+}
+
+static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
+{
+ struct batadv_tt_change_node *entry, *safe;
+
+ spin_lock_bh(&bat_priv->tt.changes_list_lock);
+
+ list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
+ list) {
+ list_del(&entry->list);
+ kmem_cache_free(batadv_tt_change_cache, entry);
+ }
+
+ atomic_set(&bat_priv->tt.local_changes, 0);
+ spin_unlock_bh(&bat_priv->tt.changes_list_lock);
+}
+
+/**
+ * batadv_tt_global_orig_entry_find() - find a TT orig_list_entry
+ * @entry: the TT global entry where the orig_list_entry has to be
+ * extracted from
+ * @orig_node: the originator for which the orig_list_entry has to be found
+ *
+ * retrieve the orig_tt_list_entry belonging to orig_node from the
+ * batadv_tt_global_entry list
+ *
+ * Return: it with an increased refcounter, NULL if not found
+ */
+static struct batadv_tt_orig_list_entry *
+batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
+ const struct batadv_orig_node *orig_node)
+{
+ struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
+ const struct hlist_head *head;
+
+ rcu_read_lock();
+ head = &entry->orig_list;
+ hlist_for_each_entry_rcu(tmp_orig_entry, head, list) {
+ if (tmp_orig_entry->orig_node != orig_node)
+ continue;
+ if (!kref_get_unless_zero(&tmp_orig_entry->refcount))
+ continue;
+
+ orig_entry = tmp_orig_entry;
+ break;
+ }
+ rcu_read_unlock();
+
+ return orig_entry;
+}
+
+/**
+ * batadv_tt_global_entry_has_orig() - check if a TT global entry is also
+ * handled by a given originator
+ * @entry: the TT global entry to check
+ * @orig_node: the originator to search in the list
+ * @flags: a pointer to store TT flags for the given @entry received
+ * from @orig_node
+ *
+ * find out if an orig_node is already in the list of a tt_global_entry.
+ *
+ * Return: true if found, false otherwise
+ */
+static bool
+batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
+ const struct batadv_orig_node *orig_node,
+ u8 *flags)
+{
+ struct batadv_tt_orig_list_entry *orig_entry;
+ bool found = false;
+
+ orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
+ if (orig_entry) {
+ found = true;
+
+ if (flags)
+ *flags = orig_entry->flags;
+
+ batadv_tt_orig_list_entry_put(orig_entry);
+ }
+
+ return found;
+}
+
+/**
+ * batadv_tt_global_sync_flags() - update TT sync flags
+ * @tt_global: the TT global entry to update sync flags in
+ *
+ * Updates the sync flag bits in the tt_global flag attribute with a logical
+ * OR of all sync flags from any of its TT orig entries.
+ */
+static void
+batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
+{
+ struct batadv_tt_orig_list_entry *orig_entry;
+ const struct hlist_head *head;
+ u16 flags = BATADV_NO_FLAGS;
+
+ rcu_read_lock();
+ head = &tt_global->orig_list;
+ hlist_for_each_entry_rcu(orig_entry, head, list)
+ flags |= orig_entry->flags;
+ rcu_read_unlock();
+
+ flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);
+ tt_global->common.flags = flags;
+}
+
+/**
+ * batadv_tt_global_orig_entry_add() - add or update a TT orig entry
+ * @tt_global: the TT global entry to add an orig entry in
+ * @orig_node: the originator to add an orig entry for
+ * @ttvn: translation table version number of this changeset
+ * @flags: TT sync flags
+ */
+static void
+batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
+ struct batadv_orig_node *orig_node, int ttvn,
+ u8 flags)
+{
+ struct batadv_tt_orig_list_entry *orig_entry;
+
+ spin_lock_bh(&tt_global->list_lock);
+
+ orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
+ if (orig_entry) {
+ /* refresh the ttvn: the current value could be a bogus one that
+ * was added during a "temporary client detection"
+ */
+ orig_entry->ttvn = ttvn;
+ orig_entry->flags = flags;
+ goto sync_flags;
+ }
+
+ orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);
+ if (!orig_entry)
+ goto out;
+
+ INIT_HLIST_NODE(&orig_entry->list);
+ kref_get(&orig_node->refcount);
+ batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
+ orig_entry->orig_node = orig_node;
+ orig_entry->ttvn = ttvn;
+ orig_entry->flags = flags;
+ kref_init(&orig_entry->refcount);
+
+ kref_get(&orig_entry->refcount);
+ hlist_add_head_rcu(&orig_entry->list,
+ &tt_global->orig_list);
+ atomic_inc(&tt_global->orig_list_count);
+
+sync_flags:
+ batadv_tt_global_sync_flags(tt_global);
+out:
+ batadv_tt_orig_list_entry_put(orig_entry);
+
+ spin_unlock_bh(&tt_global->list_lock);
+}
+
+/**
+ * batadv_tt_global_add() - add a new TT global entry or update an existing one
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the originator announcing the client
+ * @tt_addr: the mac address of the non-mesh client
+ * @vid: VLAN identifier
+ * @flags: TT flags that have to be set for this non-mesh client
+ * @ttvn: the tt version number ever announcing this non-mesh client
+ *
+ * Add a new TT global entry for the given originator. If the entry already
+ * exists add a new reference to the given originator (a global entry can have
+ * references to multiple originators) and adjust the flags attribute to reflect
+ * the function argument.
+ * If a TT local entry exists for this non-mesh client remove it.
+ *
+ * The caller must hold the orig_node refcount.
+ *
+ * Return: true if the new entry has been added, false otherwise
+ */
+static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *tt_addr,
+ unsigned short vid, u16 flags, u8 ttvn)
+{
+ struct batadv_tt_global_entry *tt_global_entry;
+ struct batadv_tt_local_entry *tt_local_entry;
+ bool ret = false;
+ int hash_added;
+ struct batadv_tt_common_entry *common;
+ u16 local_flags;
+
+ /* ignore global entries from backbone nodes */
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid))
+ return true;
+
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr, vid);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr, vid);
+
+ /* if the node already has a local client for this entry, it has to wait
+ * for a roaming advertisement instead of manually messing up the global
+ * table
+ */
+ if ((flags & BATADV_TT_CLIENT_TEMP) && tt_local_entry &&
+ !(tt_local_entry->common.flags & BATADV_TT_CLIENT_NEW))
+ goto out;
+
+ if (!tt_global_entry) {
+ tt_global_entry = kmem_cache_zalloc(batadv_tg_cache,
+ GFP_ATOMIC);
+ if (!tt_global_entry)
+ goto out;
+
+ common = &tt_global_entry->common;
+ ether_addr_copy(common->addr, tt_addr);
+ common->vid = vid;
+
+ if (!is_multicast_ether_addr(common->addr))
+ common->flags = flags & (~BATADV_TT_SYNC_MASK);
+
+ tt_global_entry->roam_at = 0;
+ /* node must store current time in case of roaming. This is
+ * needed to purge this entry out on timeout (if nobody claims
+ * it)
+ */
+ if (flags & BATADV_TT_CLIENT_ROAM)
+ tt_global_entry->roam_at = jiffies;
+ kref_init(&common->refcount);
+ common->added_at = jiffies;
+
+ INIT_HLIST_HEAD(&tt_global_entry->orig_list);
+ atomic_set(&tt_global_entry->orig_list_count, 0);
+ spin_lock_init(&tt_global_entry->list_lock);
+
+ kref_get(&common->refcount);
+ hash_added = batadv_hash_add(bat_priv->tt.global_hash,
+ batadv_compare_tt,
+ batadv_choose_tt, common,
+ &common->hash_entry);
+
+ if (unlikely(hash_added != 0)) {
+ /* remove the reference for the hash */
+ batadv_tt_global_entry_put(tt_global_entry);
+ goto out_remove;
+ }
+ } else {
+ common = &tt_global_entry->common;
+ /* If there is already a global entry, we can use this one for
+ * our processing.
+ * But if we are trying to add a temporary client then here are
+ * two options at this point:
+ * 1) the global client is not a temporary client: the global
+ * client has to be left as it is, temporary information
+ * should never override any already known client state
+ * 2) the global client is a temporary client: purge the
+ * originator list and add the new one orig_entry
+ */
+ if (flags & BATADV_TT_CLIENT_TEMP) {
+ if (!(common->flags & BATADV_TT_CLIENT_TEMP))
+ goto out;
+ if (batadv_tt_global_entry_has_orig(tt_global_entry,
+ orig_node, NULL))
+ goto out_remove;
+ batadv_tt_global_del_orig_list(tt_global_entry);
+ goto add_orig_entry;
+ }
+
+ /* if the client was temporary added before receiving the first
+ * OGM announcing it, we have to clear the TEMP flag. Also,
+ * remove the previous temporary orig node and re-add it
+ * if required. If the orig entry changed, the new one which
+ * is a non-temporary entry is preferred.
+ */
+ if (common->flags & BATADV_TT_CLIENT_TEMP) {
+ batadv_tt_global_del_orig_list(tt_global_entry);
+ common->flags &= ~BATADV_TT_CLIENT_TEMP;
+ }
+
+ /* the change can carry possible "attribute" flags like the
+ * TT_CLIENT_TEMP, therefore they have to be copied in the
+ * client entry
+ */
+ if (!is_multicast_ether_addr(common->addr))
+ common->flags |= flags & (~BATADV_TT_SYNC_MASK);
+
+ /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
+ * one originator left in the list and we previously received a
+ * delete + roaming change for this originator.
+ *
+ * We should first delete the old originator before adding the
+ * new one.
+ */
+ if (common->flags & BATADV_TT_CLIENT_ROAM) {
+ batadv_tt_global_del_orig_list(tt_global_entry);
+ common->flags &= ~BATADV_TT_CLIENT_ROAM;
+ tt_global_entry->roam_at = 0;
+ }
+ }
+add_orig_entry:
+ /* add the new orig_entry (if needed) or update it */
+ batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
+ flags & BATADV_TT_SYNC_MASK);
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
+ common->addr, batadv_print_vid(common->vid),
+ orig_node->orig);
+ ret = true;
+
+out_remove:
+ /* Do not remove multicast addresses from the local hash on
+ * global additions
+ */
+ if (is_multicast_ether_addr(tt_addr))
+ goto out;
+
+ /* remove address from local hash if present */
+ local_flags = batadv_tt_local_remove(bat_priv, tt_addr, vid,
+ "global tt received",
+ flags & BATADV_TT_CLIENT_ROAM);
+ tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI;
+
+ if (!(flags & BATADV_TT_CLIENT_ROAM))
+ /* this is a normal global add. Therefore the client is not in a
+ * roaming state anymore.
+ */
+ tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
+
+out:
+ batadv_tt_global_entry_put(tt_global_entry);
+ batadv_tt_local_entry_put(tt_local_entry);
+ return ret;
+}
+
+/**
+ * batadv_transtable_best_orig() - Get best originator list entry from tt entry
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_global_entry: global translation table entry to be analyzed
+ *
+ * This function assumes the caller holds rcu_read_lock().
+ * Return: best originator list entry or NULL on errors.
+ */
+static struct batadv_tt_orig_list_entry *
+batadv_transtable_best_orig(struct batadv_priv *bat_priv,
+ struct batadv_tt_global_entry *tt_global_entry)
+{
+ struct batadv_neigh_node *router, *best_router = NULL;
+ struct batadv_algo_ops *bao = bat_priv->algo_ops;
+ struct hlist_head *head;
+ struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL;
+
+ head = &tt_global_entry->orig_list;
+ hlist_for_each_entry_rcu(orig_entry, head, list) {
+ router = batadv_orig_router_get(orig_entry->orig_node,
+ BATADV_IF_DEFAULT);
+ if (!router)
+ continue;
+
+ if (best_router &&
+ bao->neigh.cmp(router, BATADV_IF_DEFAULT, best_router,
+ BATADV_IF_DEFAULT) <= 0) {
+ batadv_neigh_node_put(router);
+ continue;
+ }
+
+ /* release the refcount for the "old" best */
+ batadv_neigh_node_put(best_router);
+
+ best_entry = orig_entry;
+ best_router = router;
+ }
+
+ batadv_neigh_node_put(best_router);
+
+ return best_entry;
+}
+
+/**
+ * batadv_tt_global_dump_subentry() - Dump all TT local entries into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @common: tt local & tt global common data
+ * @orig: Originator node announcing a non-mesh client
+ * @best: Is the best originator for the TT entry
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_tt_common_entry *common,
+ struct batadv_tt_orig_list_entry *orig,
+ bool best)
+{
+ u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags;
+ void *hdr;
+ struct batadv_orig_node_vlan *vlan;
+ u8 last_ttvn;
+ u32 crc;
+
+ vlan = batadv_orig_node_vlan_get(orig->orig_node,
+ common->vid);
+ if (!vlan)
+ return 0;
+
+ crc = vlan->tt.crc;
+
+ batadv_orig_node_vlan_put(vlan);
+
+ hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
+ NLM_F_MULTI,
+ BATADV_CMD_GET_TRANSTABLE_GLOBAL);
+ if (!hdr)
+ return -ENOBUFS;
+
+ last_ttvn = atomic_read(&orig->orig_node->last_ttvn);
+
+ if (nla_put(msg, BATADV_ATTR_TT_ADDRESS, ETH_ALEN, common->addr) ||
+ nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
+ orig->orig_node->orig) ||
+ nla_put_u8(msg, BATADV_ATTR_TT_TTVN, orig->ttvn) ||
+ nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||
+ nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
+ nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
+ nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags))
+ goto nla_put_failure;
+
+ if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+/**
+ * batadv_tt_global_dump_entry() - Dump one TT global entry into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @common: tt local & tt global common data
+ * @sub_s: Number of entries to skip
+ *
+ * This function assumes the caller holds rcu_read_lock().
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_tt_global_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct batadv_tt_common_entry *common, int *sub_s)
+{
+ struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
+ struct batadv_tt_global_entry *global;
+ struct hlist_head *head;
+ int sub = 0;
+ bool best;
+
+ global = container_of(common, struct batadv_tt_global_entry, common);
+ best_entry = batadv_transtable_best_orig(bat_priv, global);
+ head = &global->orig_list;
+
+ hlist_for_each_entry_rcu(orig_entry, head, list) {
+ if (sub++ < *sub_s)
+ continue;
+
+ best = (orig_entry == best_entry);
+
+ if (batadv_tt_global_dump_subentry(msg, portid, seq, common,
+ orig_entry, best)) {
+ *sub_s = sub - 1;
+ return -EMSGSIZE;
+ }
+ }
+
+ *sub_s = 0;
+ return 0;
+}
+
+/**
+ * batadv_tt_global_dump_bucket() - Dump one TT local bucket into a message
+ * @msg: Netlink message to dump into
+ * @portid: Port making netlink request
+ * @seq: Sequence number of netlink message
+ * @bat_priv: The bat priv with all the soft interface information
+ * @head: Pointer to the list containing the global tt entries
+ * @idx_s: Number of entries to skip
+ * @sub: Number of entries to skip
+ *
+ * Return: Error code, or 0 on success
+ */
+static int
+batadv_tt_global_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+ struct batadv_priv *bat_priv,
+ struct hlist_head *head, int *idx_s, int *sub)
+{
+ struct batadv_tt_common_entry *common;
+ int idx = 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(common, head, hash_entry) {
+ if (idx++ < *idx_s)
+ continue;
+
+ if (batadv_tt_global_dump_entry(msg, portid, seq, bat_priv,
+ common, sub)) {
+ rcu_read_unlock();
+ *idx_s = idx - 1;
+ return -EMSGSIZE;
+ }
+ }
+ rcu_read_unlock();
+
+ *idx_s = 0;
+ *sub = 0;
+ return 0;
+}
+
+/**
+ * batadv_tt_global_dump() - Dump TT global entries into a message
+ * @msg: Netlink message to dump into
+ * @cb: Parameters from query
+ *
+ * Return: Error code, or length of message on success
+ */
+int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(cb->skb->sk);
+ struct net_device *soft_iface;
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_hashtable *hash;
+ struct hlist_head *head;
+ int ret;
+ int ifindex;
+ int bucket = cb->args[0];
+ int idx = cb->args[1];
+ int sub = cb->args[2];
+ int portid = NETLINK_CB(cb->skb).portid;
+
+ ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
+ if (!ifindex)
+ return -EINVAL;
+
+ soft_iface = dev_get_by_index(net, ifindex);
+ if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ hash = bat_priv->tt.global_hash;
+
+ while (bucket < hash->size) {
+ head = &hash->table[bucket];
+
+ if (batadv_tt_global_dump_bucket(msg, portid,
+ cb->nlh->nlmsg_seq, bat_priv,
+ head, &idx, &sub))
+ break;
+
+ bucket++;
+ }
+
+ ret = msg->len;
+
+ out:
+ batadv_hardif_put(primary_if);
+ dev_put(soft_iface);
+
+ cb->args[0] = bucket;
+ cb->args[1] = idx;
+ cb->args[2] = sub;
+
+ return ret;
+}
+
+/**
+ * _batadv_tt_global_del_orig_entry() - remove and free an orig_entry
+ * @tt_global_entry: the global entry to remove the orig_entry from
+ * @orig_entry: the orig entry to remove and free
+ *
+ * Remove an orig_entry from its list in the given tt_global_entry and
+ * free this orig_entry afterwards.
+ *
+ * Caller must hold tt_global_entry->list_lock and ensure orig_entry->list is
+ * part of a list.
+ */
+static void
+_batadv_tt_global_del_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
+ struct batadv_tt_orig_list_entry *orig_entry)
+{
+ lockdep_assert_held(&tt_global_entry->list_lock);
+
+ batadv_tt_global_size_dec(orig_entry->orig_node,
+ tt_global_entry->common.vid);
+ atomic_dec(&tt_global_entry->orig_list_count);
+ /* requires holding tt_global_entry->list_lock and orig_entry->list
+ * being part of a list
+ */
+ hlist_del_rcu(&orig_entry->list);
+ batadv_tt_orig_list_entry_put(orig_entry);
+}
+
+/* deletes the orig list of a tt_global_entry */
+static void
+batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
+{
+ struct hlist_head *head;
+ struct hlist_node *safe;
+ struct batadv_tt_orig_list_entry *orig_entry;
+
+ spin_lock_bh(&tt_global_entry->list_lock);
+ head = &tt_global_entry->orig_list;
+ hlist_for_each_entry_safe(orig_entry, safe, head, list)
+ _batadv_tt_global_del_orig_entry(tt_global_entry, orig_entry);
+ spin_unlock_bh(&tt_global_entry->list_lock);
+}
+
+/**
+ * batadv_tt_global_del_orig_node() - remove orig_node from a global tt entry
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_global_entry: the global entry to remove the orig_node from
+ * @orig_node: the originator announcing the client
+ * @message: message to append to the log on deletion
+ *
+ * Remove the given orig_node and its according orig_entry from the given
+ * global tt entry.
+ */
+static void
+batadv_tt_global_del_orig_node(struct batadv_priv *bat_priv,
+ struct batadv_tt_global_entry *tt_global_entry,
+ struct batadv_orig_node *orig_node,
+ const char *message)
+{
+ struct hlist_head *head;
+ struct hlist_node *safe;
+ struct batadv_tt_orig_list_entry *orig_entry;
+ unsigned short vid;
+
+ spin_lock_bh(&tt_global_entry->list_lock);
+ head = &tt_global_entry->orig_list;
+ hlist_for_each_entry_safe(orig_entry, safe, head, list) {
+ if (orig_entry->orig_node == orig_node) {
+ vid = tt_global_entry->common.vid;
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting %pM from global tt entry %pM (vid: %d): %s\n",
+ orig_node->orig,
+ tt_global_entry->common.addr,
+ batadv_print_vid(vid), message);
+ _batadv_tt_global_del_orig_entry(tt_global_entry,
+ orig_entry);
+ }
+ }
+ spin_unlock_bh(&tt_global_entry->list_lock);
+}
+
+/* If the client is to be deleted, we check if it is the last origantor entry
+ * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
+ * timer, otherwise we simply remove the originator scheduled for deletion.
+ */
+static void
+batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
+ struct batadv_tt_global_entry *tt_global_entry,
+ struct batadv_orig_node *orig_node,
+ const char *message)
+{
+ bool last_entry = true;
+ struct hlist_head *head;
+ struct batadv_tt_orig_list_entry *orig_entry;
+
+ /* no local entry exists, case 1:
+ * Check if this is the last one or if other entries exist.
+ */
+
+ rcu_read_lock();
+ head = &tt_global_entry->orig_list;
+ hlist_for_each_entry_rcu(orig_entry, head, list) {
+ if (orig_entry->orig_node != orig_node) {
+ last_entry = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (last_entry) {
+ /* its the last one, mark for roaming. */
+ tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
+ tt_global_entry->roam_at = jiffies;
+ } else {
+ /* there is another entry, we can simply delete this
+ * one and can still use the other one.
+ */
+ batadv_tt_global_del_orig_node(bat_priv, tt_global_entry,
+ orig_node, message);
+ }
+}
+
+/**
+ * batadv_tt_global_del() - remove a client from the global table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: an originator serving this client
+ * @addr: the mac address of the client
+ * @vid: VLAN identifier
+ * @message: a message explaining the reason for deleting the client to print
+ * for debugging purpose
+ * @roaming: true if the deletion has been triggered by a roaming event
+ */
+static void batadv_tt_global_del(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *addr, unsigned short vid,
+ const char *message, bool roaming)
+{
+ struct batadv_tt_global_entry *tt_global_entry;
+ struct batadv_tt_local_entry *local_entry = NULL;
+
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
+ if (!tt_global_entry)
+ goto out;
+
+ if (!roaming) {
+ batadv_tt_global_del_orig_node(bat_priv, tt_global_entry,
+ orig_node, message);
+
+ if (hlist_empty(&tt_global_entry->orig_list))
+ batadv_tt_global_free(bat_priv, tt_global_entry,
+ message);
+
+ goto out;
+ }
+
+ /* if we are deleting a global entry due to a roam
+ * event, there are two possibilities:
+ * 1) the client roamed from node A to node B => if there
+ * is only one originator left for this client, we mark
+ * it with BATADV_TT_CLIENT_ROAM, we start a timer and we
+ * wait for node B to claim it. In case of timeout
+ * the entry is purged.
+ *
+ * If there are other originators left, we directly delete
+ * the originator.
+ * 2) the client roamed to us => we can directly delete
+ * the global entry, since it is useless now.
+ */
+ local_entry = batadv_tt_local_hash_find(bat_priv,
+ tt_global_entry->common.addr,
+ vid);
+ if (local_entry) {
+ /* local entry exists, case 2: client roamed to us. */
+ batadv_tt_global_del_orig_list(tt_global_entry);
+ batadv_tt_global_free(bat_priv, tt_global_entry, message);
+ } else {
+ /* no local entry exists, case 1: check for roaming */
+ batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
+ orig_node, message);
+ }
+
+out:
+ batadv_tt_global_entry_put(tt_global_entry);
+ batadv_tt_local_entry_put(local_entry);
+}
+
+/**
+ * batadv_tt_global_del_orig() - remove all the TT global entries belonging to
+ * the given originator matching the provided vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the originator owning the entries to remove
+ * @match_vid: the VLAN identifier to match. If negative all the entries will be
+ * removed
+ * @message: debug message to print as "reason"
+ */
+void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ s32 match_vid,
+ const char *message)
+{
+ struct batadv_tt_global_entry *tt_global;
+ struct batadv_tt_common_entry *tt_common_entry;
+ u32 i;
+ struct batadv_hashtable *hash = bat_priv->tt.global_hash;
+ struct hlist_node *safe;
+ struct hlist_head *head;
+ spinlock_t *list_lock; /* protects write access to the hash lists */
+ unsigned short vid;
+
+ if (!hash)
+ return;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+ list_lock = &hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+ hlist_for_each_entry_safe(tt_common_entry, safe,
+ head, hash_entry) {
+ /* remove only matching entries */
+ if (match_vid >= 0 && tt_common_entry->vid != match_vid)
+ continue;
+
+ tt_global = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
+ common);
+
+ batadv_tt_global_del_orig_node(bat_priv, tt_global,
+ orig_node, message);
+
+ if (hlist_empty(&tt_global->orig_list)) {
+ vid = tt_global->common.vid;
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting global tt entry %pM (vid: %d): %s\n",
+ tt_global->common.addr,
+ batadv_print_vid(vid), message);
+ hlist_del_rcu(&tt_common_entry->hash_entry);
+ batadv_tt_global_entry_put(tt_global);
+ }
+ }
+ spin_unlock_bh(list_lock);
+ }
+ clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
+}
+
+static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
+ char **msg)
+{
+ bool purge = false;
+ unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT;
+ unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT;
+
+ if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) &&
+ batadv_has_timed_out(tt_global->roam_at, roam_timeout)) {
+ purge = true;
+ *msg = "Roaming timeout\n";
+ }
+
+ if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) &&
+ batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) {
+ purge = true;
+ *msg = "Temporary client timeout\n";
+ }
+
+ return purge;
+}
+
+static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
+{
+ struct batadv_hashtable *hash = bat_priv->tt.global_hash;
+ struct hlist_head *head;
+ struct hlist_node *node_tmp;
+ spinlock_t *list_lock; /* protects write access to the hash lists */
+ u32 i;
+ char *msg = NULL;
+ struct batadv_tt_common_entry *tt_common;
+ struct batadv_tt_global_entry *tt_global;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+ list_lock = &hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+ hlist_for_each_entry_safe(tt_common, node_tmp, head,
+ hash_entry) {
+ tt_global = container_of(tt_common,
+ struct batadv_tt_global_entry,
+ common);
+
+ if (!batadv_tt_global_to_purge(tt_global, &msg))
+ continue;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting global tt entry %pM (vid: %d): %s\n",
+ tt_global->common.addr,
+ batadv_print_vid(tt_global->common.vid),
+ msg);
+
+ hlist_del_rcu(&tt_common->hash_entry);
+
+ batadv_tt_global_entry_put(tt_global);
+ }
+ spin_unlock_bh(list_lock);
+ }
+}
+
+static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
+{
+ struct batadv_hashtable *hash;
+ spinlock_t *list_lock; /* protects write access to the hash lists */
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global;
+ struct hlist_node *node_tmp;
+ struct hlist_head *head;
+ u32 i;
+
+ if (!bat_priv->tt.global_hash)
+ return;
+
+ hash = bat_priv->tt.global_hash;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+ list_lock = &hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+ hlist_for_each_entry_safe(tt_common_entry, node_tmp,
+ head, hash_entry) {
+ hlist_del_rcu(&tt_common_entry->hash_entry);
+ tt_global = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
+ common);
+ batadv_tt_global_entry_put(tt_global);
+ }
+ spin_unlock_bh(list_lock);
+ }
+
+ batadv_hash_destroy(hash);
+
+ bat_priv->tt.global_hash = NULL;
+}
+
+static bool
+_batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
+ struct batadv_tt_global_entry *tt_global_entry)
+{
+ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI &&
+ tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI)
+ return true;
+
+ /* check if the two clients are marked as isolated */
+ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_ISOLA &&
+ tt_global_entry->common.flags & BATADV_TT_CLIENT_ISOLA)
+ return true;
+
+ return false;
+}
+
+/**
+ * batadv_transtable_search() - get the mesh destination for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of the source client
+ * @addr: mac address of the destination client
+ * @vid: VLAN identifier
+ *
+ * Return: a pointer to the originator that was selected as destination in the
+ * mesh for contacting the client 'addr', NULL otherwise.
+ * In case of multiple originators serving the same client, the function returns
+ * the best one (best in terms of metric towards the destination node).
+ *
+ * If the two clients are AP isolated the function returns NULL.
+ */
+struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
+ const u8 *src,
+ const u8 *addr,
+ unsigned short vid)
+{
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_tt_orig_list_entry *best_entry;
+
+ if (src && batadv_vlan_ap_isola_get(bat_priv, vid)) {
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, src, vid);
+ if (!tt_local_entry ||
+ (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING))
+ goto out;
+ }
+
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
+ if (!tt_global_entry)
+ goto out;
+
+ /* check whether the clients should not communicate due to AP
+ * isolation
+ */
+ if (tt_local_entry &&
+ _batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
+ goto out;
+
+ rcu_read_lock();
+ best_entry = batadv_transtable_best_orig(bat_priv, tt_global_entry);
+ /* found anything? */
+ if (best_entry)
+ orig_node = best_entry->orig_node;
+ if (orig_node && !kref_get_unless_zero(&orig_node->refcount))
+ orig_node = NULL;
+ rcu_read_unlock();
+
+out:
+ batadv_tt_global_entry_put(tt_global_entry);
+ batadv_tt_local_entry_put(tt_local_entry);
+
+ return orig_node;
+}
+
+/**
+ * batadv_tt_global_crc() - calculates the checksum of the local table belonging
+ * to the given orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator for which the CRC should be computed
+ * @vid: VLAN identifier for which the CRC32 has to be computed
+ *
+ * This function computes the checksum for the global table corresponding to a
+ * specific originator. In particular, the checksum is computed as follows: For
+ * each client connected to the originator the CRC32C of the MAC address and the
+ * VID is computed and then all the CRC32Cs of the various clients are xor'ed
+ * together.
+ *
+ * The idea behind is that CRC32C should be used as much as possible in order to
+ * produce a unique hash of the table, but since the order which is used to feed
+ * the CRC32C function affects the result and since every node in the network
+ * probably sorts the clients differently, the hash function cannot be directly
+ * computed over the entire table. Hence the CRC32C is used only on
+ * the single client entry, while all the results are then xor'ed together
+ * because the XOR operation can combine them all while trying to reduce the
+ * noise as much as possible.
+ *
+ * Return: the checksum of the global table of a given originator.
+ */
+static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ unsigned short vid)
+{
+ struct batadv_hashtable *hash = bat_priv->tt.global_hash;
+ struct batadv_tt_orig_list_entry *tt_orig;
+ struct batadv_tt_common_entry *tt_common;
+ struct batadv_tt_global_entry *tt_global;
+ struct hlist_head *head;
+ u32 i, crc_tmp, crc = 0;
+ u8 flags;
+ __be16 tmp_vid;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
+ tt_global = container_of(tt_common,
+ struct batadv_tt_global_entry,
+ common);
+ /* compute the CRC only for entries belonging to the
+ * VLAN identified by the vid passed as parameter
+ */
+ if (tt_common->vid != vid)
+ continue;
+
+ /* Roaming clients are in the global table for
+ * consistency only. They don't have to be
+ * taken into account while computing the
+ * global crc
+ */
+ if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
+ continue;
+ /* Temporary clients have not been announced yet, so
+ * they have to be skipped while computing the global
+ * crc
+ */
+ if (tt_common->flags & BATADV_TT_CLIENT_TEMP)
+ continue;
+
+ /* find out if this global entry is announced by this
+ * originator
+ */
+ tt_orig = batadv_tt_global_orig_entry_find(tt_global,
+ orig_node);
+ if (!tt_orig)
+ continue;
+
+ /* use network order to read the VID: this ensures that
+ * every node reads the bytes in the same order.
+ */
+ tmp_vid = htons(tt_common->vid);
+ crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
+
+ /* compute the CRC on flags that have to be kept in sync
+ * among nodes
+ */
+ flags = tt_orig->flags;
+ crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
+
+ crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
+
+ batadv_tt_orig_list_entry_put(tt_orig);
+ }
+ rcu_read_unlock();
+ }
+
+ return crc;
+}
+
+/**
+ * batadv_tt_local_crc() - calculates the checksum of the local table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: VLAN identifier for which the CRC32 has to be computed
+ *
+ * For details about the computation, please refer to the documentation for
+ * batadv_tt_global_crc().
+ *
+ * Return: the checksum of the local table
+ */
+static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
+ unsigned short vid)
+{
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
+ struct batadv_tt_common_entry *tt_common;
+ struct hlist_head *head;
+ u32 i, crc_tmp, crc = 0;
+ u8 flags;
+ __be16 tmp_vid;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
+ /* compute the CRC only for entries belonging to the
+ * VLAN identified by vid
+ */
+ if (tt_common->vid != vid)
+ continue;
+
+ /* not yet committed clients have not to be taken into
+ * account while computing the CRC
+ */
+ if (tt_common->flags & BATADV_TT_CLIENT_NEW)
+ continue;
+
+ /* use network order to read the VID: this ensures that
+ * every node reads the bytes in the same order.
+ */
+ tmp_vid = htons(tt_common->vid);
+ crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
+
+ /* compute the CRC on flags that have to be kept in sync
+ * among nodes
+ */
+ flags = tt_common->flags & BATADV_TT_SYNC_MASK;
+ crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
+
+ crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
+ }
+ rcu_read_unlock();
+ }
+
+ return crc;
+}
+
+/**
+ * batadv_tt_req_node_release() - free tt_req node entry
+ * @ref: kref pointer of the tt req_node entry
+ */
+static void batadv_tt_req_node_release(struct kref *ref)
+{
+ struct batadv_tt_req_node *tt_req_node;
+
+ tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount);
+
+ kmem_cache_free(batadv_tt_req_cache, tt_req_node);
+}
+
+/**
+ * batadv_tt_req_node_put() - decrement the tt_req_node refcounter and
+ * possibly release it
+ * @tt_req_node: tt_req_node to be free'd
+ */
+static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node)
+{
+ if (!tt_req_node)
+ return;
+
+ kref_put(&tt_req_node->refcount, batadv_tt_req_node_release);
+}
+
+static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
+{
+ struct batadv_tt_req_node *node;
+ struct hlist_node *safe;
+
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
+
+ hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
+ hlist_del_init(&node->list);
+ batadv_tt_req_node_put(node);
+ }
+
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
+}
+
+static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const void *tt_buff,
+ u16 tt_buff_len)
+{
+ /* Replace the old buffer only if I received something in the
+ * last OGM (the OGM could carry no changes)
+ */
+ spin_lock_bh(&orig_node->tt_buff_lock);
+ if (tt_buff_len > 0) {
+ kfree(orig_node->tt_buff);
+ orig_node->tt_buff_len = 0;
+ orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
+ if (orig_node->tt_buff) {
+ memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
+ orig_node->tt_buff_len = tt_buff_len;
+ }
+ }
+ spin_unlock_bh(&orig_node->tt_buff_lock);
+}
+
+static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
+{
+ struct batadv_tt_req_node *node;
+ struct hlist_node *safe;
+
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
+ hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
+ if (batadv_has_timed_out(node->issued_at,
+ BATADV_TT_REQUEST_TIMEOUT)) {
+ hlist_del_init(&node->list);
+ batadv_tt_req_node_put(node);
+ }
+ }
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
+}
+
+/**
+ * batadv_tt_req_node_new() - search and possibly create a tt_req_node object
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node this request is being issued for
+ *
+ * Return: the pointer to the new tt_req_node struct if no request
+ * has already been issued for this orig_node, NULL otherwise.
+ */
+static struct batadv_tt_req_node *
+batadv_tt_req_node_new(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
+
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
+ hlist_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
+ if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
+ !batadv_has_timed_out(tt_req_node_tmp->issued_at,
+ BATADV_TT_REQUEST_TIMEOUT))
+ goto unlock;
+ }
+
+ tt_req_node = kmem_cache_alloc(batadv_tt_req_cache, GFP_ATOMIC);
+ if (!tt_req_node)
+ goto unlock;
+
+ kref_init(&tt_req_node->refcount);
+ ether_addr_copy(tt_req_node->addr, orig_node->orig);
+ tt_req_node->issued_at = jiffies;
+
+ kref_get(&tt_req_node->refcount);
+ hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list);
+unlock:
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
+ return tt_req_node;
+}
+
+/**
+ * batadv_tt_local_valid() - verify local tt entry and get flags
+ * @entry_ptr: to be checked local tt entry
+ * @data_ptr: not used but definition required to satisfy the callback prototype
+ * @flags: a pointer to store TT flags for this client to
+ *
+ * Checks the validity of the given local TT entry. If it is, then the provided
+ * flags pointer is updated.
+ *
+ * Return: true if the entry is a valid, false otherwise.
+ */
+static bool batadv_tt_local_valid(const void *entry_ptr,
+ const void *data_ptr,
+ u8 *flags)
+{
+ const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
+
+ if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
+ return false;
+
+ if (flags)
+ *flags = tt_common_entry->flags;
+
+ return true;
+}
+
+/**
+ * batadv_tt_global_valid() - verify global tt entry and get flags
+ * @entry_ptr: to be checked global tt entry
+ * @data_ptr: an orig_node object (may be NULL)
+ * @flags: a pointer to store TT flags for this client to
+ *
+ * Checks the validity of the given global TT entry. If it is, then the provided
+ * flags pointer is updated either with the common (summed) TT flags if data_ptr
+ * is NULL or the specific, per originator TT flags otherwise.
+ *
+ * Return: true if the entry is a valid, false otherwise.
+ */
+static bool batadv_tt_global_valid(const void *entry_ptr,
+ const void *data_ptr,
+ u8 *flags)
+{
+ const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
+ const struct batadv_tt_global_entry *tt_global_entry;
+ const struct batadv_orig_node *orig_node = data_ptr;
+
+ if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
+ tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
+ return false;
+
+ tt_global_entry = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
+ common);
+
+ return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node,
+ flags);
+}
+
+/**
+ * batadv_tt_tvlv_generate() - fill the tvlv buff with the tt entries from the
+ * specified tt hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hash: hash table containing the tt entries
+ * @tt_len: expected tvlv tt data buffer length in number of bytes
+ * @tvlv_buff: pointer to the buffer to fill with the TT data
+ * @valid_cb: function to filter tt change entries and to return TT flags
+ * @cb_data: data passed to the filter function as argument
+ *
+ * Fills the tvlv buff with the tt entries from the specified hash. If valid_cb
+ * is not provided then this becomes a no-op.
+ */
+static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+ struct batadv_hashtable *hash,
+ void *tvlv_buff, u16 tt_len,
+ bool (*valid_cb)(const void *,
+ const void *,
+ u8 *flags),
+ void *cb_data)
+{
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tvlv_tt_change *tt_change;
+ struct hlist_head *head;
+ u16 tt_tot, tt_num_entries = 0;
+ u8 flags;
+ bool ret;
+ u32 i;
+
+ tt_tot = batadv_tt_entries(tt_len);
+ tt_change = tvlv_buff;
+
+ if (!valid_cb)
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ hlist_for_each_entry_rcu(tt_common_entry,
+ head, hash_entry) {
+ if (tt_tot == tt_num_entries)
+ break;
+
+ ret = valid_cb(tt_common_entry, cb_data, &flags);
+ if (!ret)
+ continue;
+
+ ether_addr_copy(tt_change->addr, tt_common_entry->addr);
+ tt_change->flags = flags;
+ tt_change->vid = htons(tt_common_entry->vid);
+ memset(tt_change->reserved, 0,
+ sizeof(tt_change->reserved));
+
+ tt_num_entries++;
+ tt_change++;
+ }
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * batadv_tt_global_check_crc() - check if all the CRCs are correct
+ * @orig_node: originator for which the CRCs have to be checked
+ * @tt_vlan: pointer to the first tvlv VLAN entry
+ * @num_vlan: number of tvlv VLAN entries
+ *
+ * Return: true if all the received CRCs match the locally stored ones, false
+ * otherwise
+ */
+static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
+ struct batadv_tvlv_tt_vlan_data *tt_vlan,
+ u16 num_vlan)
+{
+ struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
+ struct batadv_orig_node_vlan *vlan;
+ int i, orig_num_vlan;
+ u32 crc;
+
+ /* check if each received CRC matches the locally stored one */
+ for (i = 0; i < num_vlan; i++) {
+ tt_vlan_tmp = tt_vlan + i;
+
+ /* if orig_node is a backbone node for this VLAN, don't check
+ * the CRC as we ignore all the global entries over it
+ */
+ if (batadv_bla_is_backbone_gw_orig(orig_node->bat_priv,
+ orig_node->orig,
+ ntohs(tt_vlan_tmp->vid)))
+ continue;
+
+ vlan = batadv_orig_node_vlan_get(orig_node,
+ ntohs(tt_vlan_tmp->vid));
+ if (!vlan)
+ return false;
+
+ crc = vlan->tt.crc;
+ batadv_orig_node_vlan_put(vlan);
+
+ if (crc != ntohl(tt_vlan_tmp->crc))
+ return false;
+ }
+
+ /* check if any excess VLANs exist locally for the originator
+ * which are not mentioned in the TVLV from the originator.
+ */
+ rcu_read_lock();
+ orig_num_vlan = 0;
+ hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list)
+ orig_num_vlan++;
+ rcu_read_unlock();
+
+ if (orig_num_vlan > num_vlan)
+ return false;
+
+ return true;
+}
+
+/**
+ * batadv_tt_local_update_crc() - update all the local CRCs
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
+{
+ struct batadv_softif_vlan *vlan;
+
+ /* recompute the global CRC for each VLAN */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ vlan->tt.crc = batadv_tt_local_crc(bat_priv, vlan->vid);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * batadv_tt_global_update_crc() - update all the global CRCs for this orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the orig_node for which the CRCs have to be updated
+ */
+static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_orig_node_vlan *vlan;
+ u32 crc;
+
+ /* recompute the global CRC for each VLAN */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+ /* if orig_node is a backbone node for this VLAN, don't compute
+ * the CRC as we ignore all the global entries over it
+ */
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig,
+ vlan->vid))
+ continue;
+
+ crc = batadv_tt_global_crc(bat_priv, orig_node, vlan->vid);
+ vlan->tt.crc = crc;
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * batadv_send_tt_request() - send a TT Request message to a given node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @dst_orig_node: the destination of the message
+ * @ttvn: the version number that the source of the message is looking for
+ * @tt_vlan: pointer to the first tvlv VLAN object to request
+ * @num_vlan: number of tvlv VLAN entries
+ * @full_table: ask for the entire translation table if true, while only for the
+ * last TT diff otherwise
+ *
+ * Return: true if the TT Request was sent, false otherwise
+ */
+static bool batadv_send_tt_request(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *dst_orig_node,
+ u8 ttvn,
+ struct batadv_tvlv_tt_vlan_data *tt_vlan,
+ u16 num_vlan, bool full_table)
+{
+ struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
+ struct batadv_tt_req_node *tt_req_node = NULL;
+ struct batadv_tvlv_tt_vlan_data *tt_vlan_req;
+ struct batadv_hard_iface *primary_if;
+ bool ret = false;
+ int i, size;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ /* The new tt_req will be issued only if I'm not waiting for a
+ * reply from the same orig_node yet
+ */
+ tt_req_node = batadv_tt_req_node_new(bat_priv, dst_orig_node);
+ if (!tt_req_node)
+ goto out;
+
+ size = sizeof(*tvlv_tt_data) + sizeof(*tt_vlan_req) * num_vlan;
+ tvlv_tt_data = kzalloc(size, GFP_ATOMIC);
+ if (!tvlv_tt_data)
+ goto out;
+
+ tvlv_tt_data->flags = BATADV_TT_REQUEST;
+ tvlv_tt_data->ttvn = ttvn;
+ tvlv_tt_data->num_vlan = htons(num_vlan);
+
+ /* send all the CRCs within the request. This is needed by intermediate
+ * nodes to ensure they have the correct table before replying
+ */
+ tt_vlan_req = (struct batadv_tvlv_tt_vlan_data *)(tvlv_tt_data + 1);
+ for (i = 0; i < num_vlan; i++) {
+ tt_vlan_req->vid = tt_vlan->vid;
+ tt_vlan_req->crc = tt_vlan->crc;
+
+ tt_vlan_req++;
+ tt_vlan++;
+ }
+
+ if (full_table)
+ tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending TT_REQUEST to %pM [%c]\n",
+ dst_orig_node->orig, full_table ? 'F' : '.');
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
+ batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+ dst_orig_node->orig, BATADV_TVLV_TT, 1,
+ tvlv_tt_data, size);
+ ret = true;
+
+out:
+ batadv_hardif_put(primary_if);
+
+ if (ret && tt_req_node) {
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
+ if (!hlist_unhashed(&tt_req_node->list)) {
+ hlist_del_init(&tt_req_node->list);
+ batadv_tt_req_node_put(tt_req_node);
+ }
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
+ }
+
+ batadv_tt_req_node_put(tt_req_node);
+
+ kfree(tvlv_tt_data);
+ return ret;
+}
+
+/**
+ * batadv_send_other_tt_response() - send reply to tt request concerning another
+ * node's translation table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ * @req_dst: mac address of tt request recipient
+ *
+ * Return: true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_tt_data *tt_data,
+ u8 *req_src, u8 *req_dst)
+{
+ struct batadv_orig_node *req_dst_orig_node;
+ struct batadv_orig_node *res_dst_orig_node = NULL;
+ struct batadv_tvlv_tt_change *tt_change;
+ struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
+ struct batadv_tvlv_tt_vlan_data *tt_vlan;
+ bool ret = false, full_table;
+ u8 orig_ttvn, req_ttvn;
+ u16 tvlv_len;
+ s32 tt_len;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
+ req_src, tt_data->ttvn, req_dst,
+ ((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.'));
+
+ /* Let's get the orig node of the REAL destination */
+ req_dst_orig_node = batadv_orig_hash_find(bat_priv, req_dst);
+ if (!req_dst_orig_node)
+ goto out;
+
+ res_dst_orig_node = batadv_orig_hash_find(bat_priv, req_src);
+ if (!res_dst_orig_node)
+ goto out;
+
+ orig_ttvn = (u8)atomic_read(&req_dst_orig_node->last_ttvn);
+ req_ttvn = tt_data->ttvn;
+
+ tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
+ /* this node doesn't have the requested data */
+ if (orig_ttvn != req_ttvn ||
+ !batadv_tt_global_check_crc(req_dst_orig_node, tt_vlan,
+ ntohs(tt_data->num_vlan)))
+ goto out;
+
+ /* If the full table has been explicitly requested */
+ if (tt_data->flags & BATADV_TT_FULL_TABLE ||
+ !req_dst_orig_node->tt_buff)
+ full_table = true;
+ else
+ full_table = false;
+
+ /* TT fragmentation hasn't been implemented yet, so send as many
+ * TT entries fit a single packet as possible only
+ */
+ if (!full_table) {
+ spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
+ tt_len = req_dst_orig_node->tt_buff_len;
+
+ tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node,
+ &tvlv_tt_data,
+ &tt_change,
+ &tt_len);
+ if (!tt_len)
+ goto unlock;
+
+ /* Copy the last orig_node's OGM buffer */
+ memcpy(tt_change, req_dst_orig_node->tt_buff,
+ req_dst_orig_node->tt_buff_len);
+ spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
+ } else {
+ /* allocate the tvlv, put the tt_data and all the tt_vlan_data
+ * in the initial part
+ */
+ tt_len = -1;
+ tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node,
+ &tvlv_tt_data,
+ &tt_change,
+ &tt_len);
+ if (!tt_len)
+ goto out;
+
+ /* fill the rest of the tvlv with the real TT entries */
+ batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash,
+ tt_change, tt_len,
+ batadv_tt_global_valid,
+ req_dst_orig_node);
+ }
+
+ /* Don't send the response, if larger than fragmented packet. */
+ tt_len = sizeof(struct batadv_unicast_tvlv_packet) + tvlv_len;
+ if (tt_len > atomic_read(&bat_priv->packet_size_max)) {
+ net_ratelimited_function(batadv_info, bat_priv->soft_iface,
+ "Ignoring TT_REQUEST from %pM; Response size exceeds max packet size.\n",
+ res_dst_orig_node->orig);
+ goto out;
+ }
+
+ tvlv_tt_data->flags = BATADV_TT_RESPONSE;
+ tvlv_tt_data->ttvn = req_ttvn;
+
+ if (full_table)
+ tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Sending TT_RESPONSE %pM for %pM [%c] (ttvn: %u)\n",
+ res_dst_orig_node->orig, req_dst_orig_node->orig,
+ full_table ? 'F' : '.', req_ttvn);
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
+
+ batadv_tvlv_unicast_send(bat_priv, req_dst_orig_node->orig,
+ req_src, BATADV_TVLV_TT, 1, tvlv_tt_data,
+ tvlv_len);
+
+ ret = true;
+ goto out;
+
+unlock:
+ spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
+
+out:
+ batadv_orig_node_put(res_dst_orig_node);
+ batadv_orig_node_put(req_dst_orig_node);
+ kfree(tvlv_tt_data);
+ return ret;
+}
+
+/**
+ * batadv_send_my_tt_response() - send reply to tt request concerning this
+ * node's translation table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ *
+ * Return: true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_tt_data *tt_data,
+ u8 *req_src)
+{
+ struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_tvlv_tt_change *tt_change;
+ struct batadv_orig_node *orig_node;
+ u8 my_ttvn, req_ttvn;
+ u16 tvlv_len;
+ bool full_table;
+ s32 tt_len;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
+ req_src, tt_data->ttvn,
+ ((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.'));
+
+ spin_lock_bh(&bat_priv->tt.commit_lock);
+
+ my_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
+ req_ttvn = tt_data->ttvn;
+
+ orig_node = batadv_orig_hash_find(bat_priv, req_src);
+ if (!orig_node)
+ goto out;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ /* If the full table has been explicitly requested or the gap
+ * is too big send the whole local translation table
+ */
+ if (tt_data->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
+ !bat_priv->tt.last_changeset)
+ full_table = true;
+ else
+ full_table = false;
+
+ /* TT fragmentation hasn't been implemented yet, so send as many
+ * TT entries fit a single packet as possible only
+ */
+ if (!full_table) {
+ spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+
+ tt_len = bat_priv->tt.last_changeset_len;
+ tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv,
+ &tvlv_tt_data,
+ &tt_change,
+ &tt_len);
+ if (!tt_len || !tvlv_len)
+ goto unlock;
+
+ /* Copy the last orig_node's OGM buffer */
+ memcpy(tt_change, bat_priv->tt.last_changeset,
+ bat_priv->tt.last_changeset_len);
+ spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
+ } else {
+ req_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
+
+ /* allocate the tvlv, put the tt_data and all the tt_vlan_data
+ * in the initial part
+ */
+ tt_len = -1;
+ tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv,
+ &tvlv_tt_data,
+ &tt_change,
+ &tt_len);
+ if (!tt_len || !tvlv_len)
+ goto out;
+
+ /* fill the rest of the tvlv with the real TT entries */
+ batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash,
+ tt_change, tt_len,
+ batadv_tt_local_valid, NULL);
+ }
+
+ tvlv_tt_data->flags = BATADV_TT_RESPONSE;
+ tvlv_tt_data->ttvn = req_ttvn;
+
+ if (full_table)
+ tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Sending TT_RESPONSE to %pM [%c] (ttvn: %u)\n",
+ orig_node->orig, full_table ? 'F' : '.', req_ttvn);
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
+
+ batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+ req_src, BATADV_TVLV_TT, 1, tvlv_tt_data,
+ tvlv_len);
+
+ goto out;
+
+unlock:
+ spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
+out:
+ spin_unlock_bh(&bat_priv->tt.commit_lock);
+ batadv_orig_node_put(orig_node);
+ batadv_hardif_put(primary_if);
+ kfree(tvlv_tt_data);
+ /* The packet was for this host, so it doesn't need to be re-routed */
+ return true;
+}
+
+/**
+ * batadv_send_tt_response() - send reply to tt request
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ * @req_dst: mac address of tt request recipient
+ *
+ * Return: true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_tt_data *tt_data,
+ u8 *req_src, u8 *req_dst)
+{
+ if (batadv_is_my_mac(bat_priv, req_dst))
+ return batadv_send_my_tt_response(bat_priv, tt_data, req_src);
+ return batadv_send_other_tt_response(bat_priv, tt_data, req_src,
+ req_dst);
+}
+
+static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_tvlv_tt_change *tt_change,
+ u16 tt_num_changes, u8 ttvn)
+{
+ int i;
+ int roams;
+
+ for (i = 0; i < tt_num_changes; i++) {
+ if ((tt_change + i)->flags & BATADV_TT_CLIENT_DEL) {
+ roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
+ batadv_tt_global_del(bat_priv, orig_node,
+ (tt_change + i)->addr,
+ ntohs((tt_change + i)->vid),
+ "tt removed by changes",
+ roams);
+ } else {
+ if (!batadv_tt_global_add(bat_priv, orig_node,
+ (tt_change + i)->addr,
+ ntohs((tt_change + i)->vid),
+ (tt_change + i)->flags, ttvn))
+ /* In case of problem while storing a
+ * global_entry, we stop the updating
+ * procedure without committing the
+ * ttvn change. This will avoid to send
+ * corrupted data on tt_request
+ */
+ return;
+ }
+ }
+ set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
+}
+
+static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_tt_change *tt_change,
+ u8 ttvn, u8 *resp_src,
+ u16 num_entries)
+{
+ struct batadv_orig_node *orig_node;
+
+ orig_node = batadv_orig_hash_find(bat_priv, resp_src);
+ if (!orig_node)
+ goto out;
+
+ /* Purge the old table first.. */
+ batadv_tt_global_del_orig(bat_priv, orig_node, -1,
+ "Received full table");
+
+ _batadv_tt_update_changes(bat_priv, orig_node, tt_change, num_entries,
+ ttvn);
+
+ spin_lock_bh(&orig_node->tt_buff_lock);
+ kfree(orig_node->tt_buff);
+ orig_node->tt_buff_len = 0;
+ orig_node->tt_buff = NULL;
+ spin_unlock_bh(&orig_node->tt_buff_lock);
+
+ atomic_set(&orig_node->last_ttvn, ttvn);
+
+out:
+ batadv_orig_node_put(orig_node);
+}
+
+static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ u16 tt_num_changes, u8 ttvn,
+ struct batadv_tvlv_tt_change *tt_change)
+{
+ _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
+ tt_num_changes, ttvn);
+
+ batadv_tt_save_orig_buffer(bat_priv, orig_node, tt_change,
+ batadv_tt_len(tt_num_changes));
+ atomic_set(&orig_node->last_ttvn, ttvn);
+}
+
+/**
+ * batadv_is_my_client() - check if a client is served by the local node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to check
+ * @vid: VLAN identifier
+ *
+ * Return: true if the client is served by this node, false otherwise.
+ */
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr,
+ unsigned short vid)
+{
+ struct batadv_tt_local_entry *tt_local_entry;
+ bool ret = false;
+
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
+ if (!tt_local_entry)
+ goto out;
+ /* Check if the client has been logically deleted (but is kept for
+ * consistency purpose)
+ */
+ if ((tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING) ||
+ (tt_local_entry->common.flags & BATADV_TT_CLIENT_ROAM))
+ goto out;
+ ret = true;
+out:
+ batadv_tt_local_entry_put(tt_local_entry);
+ return ret;
+}
+
+/**
+ * batadv_handle_tt_response() - process incoming tt reply
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @resp_src: mac address of tt reply sender
+ * @num_entries: number of tt change entries appended to the tt data
+ */
+static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_tt_data *tt_data,
+ u8 *resp_src, u16 num_entries)
+{
+ struct batadv_tt_req_node *node;
+ struct hlist_node *safe;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_tvlv_tt_change *tt_change;
+ u8 *tvlv_ptr = (u8 *)tt_data;
+ u16 change_offset;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
+ resp_src, tt_data->ttvn, num_entries,
+ ((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.'));
+
+ orig_node = batadv_orig_hash_find(bat_priv, resp_src);
+ if (!orig_node)
+ goto out;
+
+ spin_lock_bh(&orig_node->tt_lock);
+
+ change_offset = sizeof(struct batadv_tvlv_tt_vlan_data);
+ change_offset *= ntohs(tt_data->num_vlan);
+ change_offset += sizeof(*tt_data);
+ tvlv_ptr += change_offset;
+
+ tt_change = (struct batadv_tvlv_tt_change *)tvlv_ptr;
+ if (tt_data->flags & BATADV_TT_FULL_TABLE) {
+ batadv_tt_fill_gtable(bat_priv, tt_change, tt_data->ttvn,
+ resp_src, num_entries);
+ } else {
+ batadv_tt_update_changes(bat_priv, orig_node, num_entries,
+ tt_data->ttvn, tt_change);
+ }
+
+ /* Recalculate the CRC for this orig_node and store it */
+ batadv_tt_global_update_crc(bat_priv, orig_node);
+
+ spin_unlock_bh(&orig_node->tt_lock);
+
+ /* Delete the tt_req_node from pending tt_requests list */
+ spin_lock_bh(&bat_priv->tt.req_list_lock);
+ hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
+ if (!batadv_compare_eth(node->addr, resp_src))
+ continue;
+ hlist_del_init(&node->list);
+ batadv_tt_req_node_put(node);
+ }
+
+ spin_unlock_bh(&bat_priv->tt.req_list_lock);
+out:
+ batadv_orig_node_put(orig_node);
+}
+
+static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
+{
+ struct batadv_tt_roam_node *node, *safe;
+
+ spin_lock_bh(&bat_priv->tt.roam_list_lock);
+
+ list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
+ list_del(&node->list);
+ kmem_cache_free(batadv_tt_roam_cache, node);
+ }
+
+ spin_unlock_bh(&bat_priv->tt.roam_list_lock);
+}
+
+static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
+{
+ struct batadv_tt_roam_node *node, *safe;
+
+ spin_lock_bh(&bat_priv->tt.roam_list_lock);
+ list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
+ if (!batadv_has_timed_out(node->first_time,
+ BATADV_ROAMING_MAX_TIME))
+ continue;
+
+ list_del(&node->list);
+ kmem_cache_free(batadv_tt_roam_cache, node);
+ }
+ spin_unlock_bh(&bat_priv->tt.roam_list_lock);
+}
+
+/**
+ * batadv_tt_check_roam_count() - check if a client has roamed too frequently
+ * @bat_priv: the bat priv with all the soft interface information
+ * @client: mac address of the roaming client
+ *
+ * This function checks whether the client already reached the
+ * maximum number of possible roaming phases. In this case the ROAMING_ADV
+ * will not be sent.
+ *
+ * Return: true if the ROAMING_ADV can be sent, false otherwise
+ */
+static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client)
+{
+ struct batadv_tt_roam_node *tt_roam_node;
+ bool ret = false;
+
+ spin_lock_bh(&bat_priv->tt.roam_list_lock);
+ /* The new tt_req will be issued only if I'm not waiting for a
+ * reply from the same orig_node yet
+ */
+ list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
+ if (!batadv_compare_eth(tt_roam_node->addr, client))
+ continue;
+
+ if (batadv_has_timed_out(tt_roam_node->first_time,
+ BATADV_ROAMING_MAX_TIME))
+ continue;
+
+ if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter))
+ /* Sorry, you roamed too many times! */
+ goto unlock;
+ ret = true;
+ break;
+ }
+
+ if (!ret) {
+ tt_roam_node = kmem_cache_alloc(batadv_tt_roam_cache,
+ GFP_ATOMIC);
+ if (!tt_roam_node)
+ goto unlock;
+
+ tt_roam_node->first_time = jiffies;
+ atomic_set(&tt_roam_node->counter,
+ BATADV_ROAMING_MAX_COUNT - 1);
+ ether_addr_copy(tt_roam_node->addr, client);
+
+ list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
+ ret = true;
+ }
+
+unlock:
+ spin_unlock_bh(&bat_priv->tt.roam_list_lock);
+ return ret;
+}
+
+/**
+ * batadv_send_roam_adv() - send a roaming advertisement message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @client: mac address of the roaming client
+ * @vid: VLAN identifier
+ * @orig_node: message destination
+ *
+ * Send a ROAMING_ADV message to the node which was previously serving this
+ * client. This is done to inform the node that from now on all traffic destined
+ * for this particular roamed client has to be forwarded to the sender of the
+ * roaming message.
+ */
+static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client,
+ unsigned short vid,
+ struct batadv_orig_node *orig_node)
+{
+ struct batadv_hard_iface *primary_if;
+ struct batadv_tvlv_roam_adv tvlv_roam;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ /* before going on we have to check whether the client has
+ * already roamed to us too many times
+ */
+ if (!batadv_tt_check_roam_count(bat_priv, client))
+ goto out;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Sending ROAMING_ADV to %pM (client %pM, vid: %d)\n",
+ orig_node->orig, client, batadv_print_vid(vid));
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
+
+ memcpy(tvlv_roam.client, client, sizeof(tvlv_roam.client));
+ tvlv_roam.vid = htons(vid);
+
+ batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+ orig_node->orig, BATADV_TVLV_ROAM, 1,
+ &tvlv_roam, sizeof(tvlv_roam));
+
+out:
+ batadv_hardif_put(primary_if);
+}
+
+static void batadv_tt_purge(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct batadv_priv_tt *priv_tt;
+ struct batadv_priv *bat_priv;
+
+ delayed_work = to_delayed_work(work);
+ priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
+ bat_priv = container_of(priv_tt, struct batadv_priv, tt);
+
+ batadv_tt_local_purge(bat_priv, BATADV_TT_LOCAL_TIMEOUT);
+ batadv_tt_global_purge(bat_priv);
+ batadv_tt_req_purge(bat_priv);
+ batadv_tt_roam_purge(bat_priv);
+
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
+ msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
+}
+
+/**
+ * batadv_tt_free() - Free translation table of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_tt_free(struct batadv_priv *bat_priv)
+{
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_ROAM, 1);
+
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);
+
+ cancel_delayed_work_sync(&bat_priv->tt.work);
+
+ batadv_tt_local_table_free(bat_priv);
+ batadv_tt_global_table_free(bat_priv);
+ batadv_tt_req_list_free(bat_priv);
+ batadv_tt_changes_list_free(bat_priv);
+ batadv_tt_roam_list_free(bat_priv);
+
+ kfree(bat_priv->tt.last_changeset);
+}
+
+/**
+ * batadv_tt_local_set_flags() - set or unset the specified flags on the local
+ * table and possibly count them in the TT size
+ * @bat_priv: the bat priv with all the soft interface information
+ * @flags: the flag to switch
+ * @enable: whether to set or unset the flag
+ * @count: whether to increase the TT size by the number of changed entries
+ */
+static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv, u16 flags,
+ bool enable, bool count)
+{
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct hlist_head *head;
+ u32 i;
+
+ if (!hash)
+ return;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tt_common_entry,
+ head, hash_entry) {
+ if (enable) {
+ if ((tt_common_entry->flags & flags) == flags)
+ continue;
+ tt_common_entry->flags |= flags;
+ } else {
+ if (!(tt_common_entry->flags & flags))
+ continue;
+ tt_common_entry->flags &= ~flags;
+ }
+
+ if (!count)
+ continue;
+
+ batadv_tt_local_size_inc(bat_priv,
+ tt_common_entry->vid);
+ }
+ rcu_read_unlock();
+ }
+}
+
+/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
+static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
+{
+ struct batadv_hashtable *hash = bat_priv->tt.local_hash;
+ struct batadv_tt_common_entry *tt_common;
+ struct batadv_tt_local_entry *tt_local;
+ struct hlist_node *node_tmp;
+ struct hlist_head *head;
+ spinlock_t *list_lock; /* protects write access to the hash lists */
+ u32 i;
+
+ if (!hash)
+ return;
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+ list_lock = &hash->list_locks[i];
+
+ spin_lock_bh(list_lock);
+ hlist_for_each_entry_safe(tt_common, node_tmp, head,
+ hash_entry) {
+ if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
+ continue;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting local tt entry (%pM, vid: %d): pending\n",
+ tt_common->addr,
+ batadv_print_vid(tt_common->vid));
+
+ batadv_tt_local_size_dec(bat_priv, tt_common->vid);
+ hlist_del_rcu(&tt_common->hash_entry);
+ tt_local = container_of(tt_common,
+ struct batadv_tt_local_entry,
+ common);
+
+ batadv_tt_local_entry_put(tt_local);
+ }
+ spin_unlock_bh(list_lock);
+ }
+}
+
+/**
+ * batadv_tt_local_commit_changes_nolock() - commit all pending local tt changes
+ * which have been queued in the time since the last commit
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Caller must hold tt->commit_lock.
+ */
+static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
+{
+ lockdep_assert_held(&bat_priv->tt.commit_lock);
+
+ if (atomic_read(&bat_priv->tt.local_changes) < 1) {
+ if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))
+ batadv_tt_tvlv_container_update(bat_priv);
+ return;
+ }
+
+ batadv_tt_local_set_flags(bat_priv, BATADV_TT_CLIENT_NEW, false, true);
+
+ batadv_tt_local_purge_pending_clients(bat_priv);
+ batadv_tt_local_update_crc(bat_priv);
+
+ /* Increment the TTVN only once per OGM interval */
+ atomic_inc(&bat_priv->tt.vn);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Local changes committed, updating to ttvn %u\n",
+ (u8)atomic_read(&bat_priv->tt.vn));
+
+ /* reset the sending counter */
+ atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
+ batadv_tt_tvlv_container_update(bat_priv);
+}
+
+/**
+ * batadv_tt_local_commit_changes() - commit all pending local tt changes which
+ * have been queued in the time since the last commit
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv)
+{
+ spin_lock_bh(&bat_priv->tt.commit_lock);
+ batadv_tt_local_commit_changes_nolock(bat_priv);
+ spin_unlock_bh(&bat_priv->tt.commit_lock);
+}
+
+/**
+ * batadv_is_ap_isolated() - Check if packet from upper layer should be dropped
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: source mac address of packet
+ * @dst: destination mac address of packet
+ * @vid: vlan id of packet
+ *
+ * Return: true when src+dst(+vid) pair should be isolated, false otherwise
+ */
+bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
+ unsigned short vid)
+{
+ struct batadv_tt_local_entry *tt_local_entry;
+ struct batadv_tt_global_entry *tt_global_entry;
+ struct batadv_softif_vlan *vlan;
+ bool ret = false;
+
+ vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (!vlan)
+ return false;
+
+ if (!atomic_read(&vlan->ap_isolation))
+ goto vlan_put;
+
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst, vid);
+ if (!tt_local_entry)
+ goto vlan_put;
+
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, src, vid);
+ if (!tt_global_entry)
+ goto local_entry_put;
+
+ if (_batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
+ ret = true;
+
+ batadv_tt_global_entry_put(tt_global_entry);
+local_entry_put:
+ batadv_tt_local_entry_put(tt_local_entry);
+vlan_put:
+ batadv_softif_vlan_put(vlan);
+ return ret;
+}
+
+/**
+ * batadv_tt_update_orig() - update global translation table with new tt
+ * information received via ogms
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the orig_node of the ogm
+ * @tt_buff: pointer to the first tvlv VLAN entry
+ * @tt_num_vlan: number of tvlv VLAN entries
+ * @tt_change: pointer to the first entry in the TT buffer
+ * @tt_num_changes: number of tt changes inside the tt buffer
+ * @ttvn: translation table version number of this changeset
+ */
+static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const void *tt_buff, u16 tt_num_vlan,
+ struct batadv_tvlv_tt_change *tt_change,
+ u16 tt_num_changes, u8 ttvn)
+{
+ u8 orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
+ struct batadv_tvlv_tt_vlan_data *tt_vlan;
+ bool full_table = true;
+ bool has_tt_init;
+
+ tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
+ has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT,
+ &orig_node->capa_initialized);
+
+ /* orig table not initialised AND first diff is in the OGM OR the ttvn
+ * increased by one -> we can apply the attached changes
+ */
+ if ((!has_tt_init && ttvn == 1) || ttvn - orig_ttvn == 1) {
+ /* the OGM could not contain the changes due to their size or
+ * because they have already been sent BATADV_TT_OGM_APPEND_MAX
+ * times.
+ * In this case send a tt request
+ */
+ if (!tt_num_changes) {
+ full_table = false;
+ goto request_table;
+ }
+
+ spin_lock_bh(&orig_node->tt_lock);
+
+ batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
+ ttvn, tt_change);
+
+ /* Even if we received the precomputed crc with the OGM, we
+ * prefer to recompute it to spot any possible inconsistency
+ * in the global table
+ */
+ batadv_tt_global_update_crc(bat_priv, orig_node);
+
+ spin_unlock_bh(&orig_node->tt_lock);
+
+ /* The ttvn alone is not enough to guarantee consistency
+ * because a single value could represent different states
+ * (due to the wrap around). Thus a node has to check whether
+ * the resulting table (after applying the changes) is still
+ * consistent or not. E.g. a node could disconnect while its
+ * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
+ * checking the CRC value is mandatory to detect the
+ * inconsistency
+ */
+ if (!batadv_tt_global_check_crc(orig_node, tt_vlan,
+ tt_num_vlan))
+ goto request_table;
+ } else {
+ /* if we missed more than one change or our tables are not
+ * in sync anymore -> request fresh tt data
+ */
+ if (!has_tt_init || ttvn != orig_ttvn ||
+ !batadv_tt_global_check_crc(orig_node, tt_vlan,
+ tt_num_vlan)) {
+request_table:
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u num_changes: %u)\n",
+ orig_node->orig, ttvn, orig_ttvn,
+ tt_num_changes);
+ batadv_send_tt_request(bat_priv, orig_node, ttvn,
+ tt_vlan, tt_num_vlan,
+ full_table);
+ return;
+ }
+ }
+}
+
+/**
+ * batadv_tt_global_client_is_roaming() - check if a client is marked as roaming
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to check
+ * @vid: VLAN identifier
+ *
+ * Return: true if we know that the client has moved from its old originator
+ * to another one. This entry is still kept for consistency purposes and will be
+ * deleted later by a DEL or because of timeout
+ */
+bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
+ u8 *addr, unsigned short vid)
+{
+ struct batadv_tt_global_entry *tt_global_entry;
+ bool ret = false;
+
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
+ if (!tt_global_entry)
+ goto out;
+
+ ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
+ batadv_tt_global_entry_put(tt_global_entry);
+out:
+ return ret;
+}
+
+/**
+ * batadv_tt_local_client_is_roaming() - tells whether the client is roaming
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the local client to query
+ * @vid: VLAN identifier
+ *
+ * Return: true if the local client is known to be roaming (it is not served by
+ * this node anymore) or not. If yes, the client is still present in the table
+ * to keep the latter consistent with the node TTVN
+ */
+bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
+ u8 *addr, unsigned short vid)
+{
+ struct batadv_tt_local_entry *tt_local_entry;
+ bool ret = false;
+
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
+ if (!tt_local_entry)
+ goto out;
+
+ ret = tt_local_entry->common.flags & BATADV_TT_CLIENT_ROAM;
+ batadv_tt_local_entry_put(tt_local_entry);
+out:
+ return ret;
+}
+
+/**
+ * batadv_tt_add_temporary_global_entry() - Add temporary entry to global TT
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which the temporary entry should be associated with
+ * @addr: mac address of the client
+ * @vid: VLAN id of the new temporary global translation table
+ *
+ * Return: true when temporary tt entry could be added, false otherwise
+ */
+bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *addr,
+ unsigned short vid)
+{
+ /* ignore loop detect macs, they are not supposed to be in the tt local
+ * data as well.
+ */
+ if (batadv_bla_is_loopdetect_mac(addr))
+ return false;
+
+ if (!batadv_tt_global_add(bat_priv, orig_node, addr, vid,
+ BATADV_TT_CLIENT_TEMP,
+ atomic_read(&orig_node->last_ttvn)))
+ return false;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Added temporary global client (addr: %pM, vid: %d, orig: %pM)\n",
+ addr, batadv_print_vid(vid), orig_node->orig);
+
+ return true;
+}
+
+/**
+ * batadv_tt_local_resize_to_mtu() - resize the local translation table fit the
+ * maximum packet size that can be transported through the mesh
+ * @soft_iface: netdev struct of the mesh interface
+ *
+ * Remove entries older than 'timeout' and half timeout if more entries need
+ * to be removed.
+ */
+void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
+{
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ int packet_size_max = atomic_read(&bat_priv->packet_size_max);
+ int table_size, timeout = BATADV_TT_LOCAL_TIMEOUT / 2;
+ bool reduced = false;
+
+ spin_lock_bh(&bat_priv->tt.commit_lock);
+
+ while (true) {
+ table_size = batadv_tt_local_table_transmit_size(bat_priv);
+ if (packet_size_max >= table_size)
+ break;
+
+ batadv_tt_local_purge(bat_priv, timeout);
+ batadv_tt_local_purge_pending_clients(bat_priv);
+
+ timeout /= 2;
+ reduced = true;
+ net_ratelimited_function(batadv_info, soft_iface,
+ "Forced to purge local tt entries to fit new maximum fragment MTU (%i)\n",
+ packet_size_max);
+ }
+
+ /* commit these changes immediately, to avoid synchronization problem
+ * with the TTVN
+ */
+ if (reduced)
+ batadv_tt_local_commit_changes_nolock(bat_priv);
+
+ spin_unlock_bh(&bat_priv->tt.commit_lock);
+}
+
+/**
+ * batadv_tt_tvlv_ogm_handler_v1() - process incoming tt tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ u8 flags, void *tvlv_value,
+ u16 tvlv_value_len)
+{
+ struct batadv_tvlv_tt_vlan_data *tt_vlan;
+ struct batadv_tvlv_tt_change *tt_change;
+ struct batadv_tvlv_tt_data *tt_data;
+ u16 num_entries, num_vlan;
+
+ if (tvlv_value_len < sizeof(*tt_data))
+ return;
+
+ tt_data = tvlv_value;
+ tvlv_value_len -= sizeof(*tt_data);
+
+ num_vlan = ntohs(tt_data->num_vlan);
+
+ if (tvlv_value_len < sizeof(*tt_vlan) * num_vlan)
+ return;
+
+ tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
+ tt_change = (struct batadv_tvlv_tt_change *)(tt_vlan + num_vlan);
+ tvlv_value_len -= sizeof(*tt_vlan) * num_vlan;
+
+ num_entries = batadv_tt_entries(tvlv_value_len);
+
+ batadv_tt_update_orig(bat_priv, orig, tt_vlan, num_vlan, tt_change,
+ num_entries, tt_data->ttvn);
+}
+
+/**
+ * batadv_tt_tvlv_unicast_handler_v1() - process incoming (unicast) tt tvlv
+ * container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of tt tvlv sender
+ * @dst: mac address of tt tvlv recipient
+ * @tvlv_value: tvlv buffer containing the tt data
+ * @tvlv_value_len: tvlv buffer length
+ *
+ * Return: NET_RX_DROP if the tt tvlv is to be re-routed, NET_RX_SUCCESS
+ * otherwise.
+ */
+static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
+ u8 *src, u8 *dst,
+ void *tvlv_value,
+ u16 tvlv_value_len)
+{
+ struct batadv_tvlv_tt_data *tt_data;
+ u16 tt_vlan_len, tt_num_entries;
+ char tt_flag;
+ bool ret;
+
+ if (tvlv_value_len < sizeof(*tt_data))
+ return NET_RX_SUCCESS;
+
+ tt_data = tvlv_value;
+ tvlv_value_len -= sizeof(*tt_data);
+
+ tt_vlan_len = sizeof(struct batadv_tvlv_tt_vlan_data);
+ tt_vlan_len *= ntohs(tt_data->num_vlan);
+
+ if (tvlv_value_len < tt_vlan_len)
+ return NET_RX_SUCCESS;
+
+ tvlv_value_len -= tt_vlan_len;
+ tt_num_entries = batadv_tt_entries(tvlv_value_len);
+
+ switch (tt_data->flags & BATADV_TT_DATA_TYPE_MASK) {
+ case BATADV_TT_REQUEST:
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
+
+ /* If this node cannot provide a TT response the tt_request is
+ * forwarded
+ */
+ ret = batadv_send_tt_response(bat_priv, tt_data, src, dst);
+ if (!ret) {
+ if (tt_data->flags & BATADV_TT_FULL_TABLE)
+ tt_flag = 'F';
+ else
+ tt_flag = '.';
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Routing TT_REQUEST to %pM [%c]\n",
+ dst, tt_flag);
+ /* tvlv API will re-route the packet */
+ return NET_RX_DROP;
+ }
+ break;
+ case BATADV_TT_RESPONSE:
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
+
+ if (batadv_is_my_mac(bat_priv, dst)) {
+ batadv_handle_tt_response(bat_priv, tt_data,
+ src, tt_num_entries);
+ return NET_RX_SUCCESS;
+ }
+
+ if (tt_data->flags & BATADV_TT_FULL_TABLE)
+ tt_flag = 'F';
+ else
+ tt_flag = '.';
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Routing TT_RESPONSE to %pM [%c]\n", dst, tt_flag);
+
+ /* tvlv API will re-route the packet */
+ return NET_RX_DROP;
+ }
+
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_roam_tvlv_unicast_handler_v1() - process incoming tt roam tvlv
+ * container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of tt tvlv sender
+ * @dst: mac address of tt tvlv recipient
+ * @tvlv_value: tvlv buffer containing the tt data
+ * @tvlv_value_len: tvlv buffer length
+ *
+ * Return: NET_RX_DROP if the tt roam tvlv is to be re-routed, NET_RX_SUCCESS
+ * otherwise.
+ */
+static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
+ u8 *src, u8 *dst,
+ void *tvlv_value,
+ u16 tvlv_value_len)
+{
+ struct batadv_tvlv_roam_adv *roaming_adv;
+ struct batadv_orig_node *orig_node = NULL;
+
+ /* If this node is not the intended recipient of the
+ * roaming advertisement the packet is forwarded
+ * (the tvlv API will re-route the packet).
+ */
+ if (!batadv_is_my_mac(bat_priv, dst))
+ return NET_RX_DROP;
+
+ if (tvlv_value_len < sizeof(*roaming_adv))
+ goto out;
+
+ orig_node = batadv_orig_hash_find(bat_priv, src);
+ if (!orig_node)
+ goto out;
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
+ roaming_adv = tvlv_value;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Received ROAMING_ADV from %pM (client %pM)\n",
+ src, roaming_adv->client);
+
+ batadv_tt_global_add(bat_priv, orig_node, roaming_adv->client,
+ ntohs(roaming_adv->vid), BATADV_TT_CLIENT_ROAM,
+ atomic_read(&orig_node->last_ttvn) + 1);
+
+out:
+ batadv_orig_node_put(orig_node);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tt_init() - initialise the translation table internals
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure.
+ */
+int batadv_tt_init(struct batadv_priv *bat_priv)
+{
+ int ret;
+
+ /* synchronized flags must be remote */
+ BUILD_BUG_ON(!(BATADV_TT_SYNC_MASK & BATADV_TT_REMOTE_MASK));
+
+ ret = batadv_tt_local_init(bat_priv);
+ if (ret < 0)
+ return ret;
+
+ ret = batadv_tt_global_init(bat_priv);
+ if (ret < 0) {
+ batadv_tt_local_table_free(bat_priv);
+ return ret;
+ }
+
+ batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1,
+ batadv_tt_tvlv_unicast_handler_v1, NULL,
+ BATADV_TVLV_TT, 1, BATADV_NO_FLAGS);
+
+ batadv_tvlv_handler_register(bat_priv, NULL,
+ batadv_roam_tvlv_unicast_handler_v1, NULL,
+ BATADV_TVLV_ROAM, 1, BATADV_NO_FLAGS);
+
+ INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
+ msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
+
+ return 1;
+}
+
+/**
+ * batadv_tt_global_is_isolated() - check if a client is marked as isolated
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client
+ * @vid: the identifier of the VLAN where this client is connected
+ *
+ * Return: true if the client is marked with the TT_CLIENT_ISOLA flag, false
+ * otherwise
+ */
+bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
+ const u8 *addr, unsigned short vid)
+{
+ struct batadv_tt_global_entry *tt;
+ bool ret;
+
+ tt = batadv_tt_global_hash_find(bat_priv, addr, vid);
+ if (!tt)
+ return false;
+
+ ret = tt->common.flags & BATADV_TT_CLIENT_ISOLA;
+
+ batadv_tt_global_entry_put(tt);
+
+ return ret;
+}
+
+/**
+ * batadv_tt_cache_init() - Initialize tt memory object cache
+ *
+ * Return: 0 on success or negative error number in case of failure.
+ */
+int __init batadv_tt_cache_init(void)
+{
+ size_t tl_size = sizeof(struct batadv_tt_local_entry);
+ size_t tg_size = sizeof(struct batadv_tt_global_entry);
+ size_t tt_orig_size = sizeof(struct batadv_tt_orig_list_entry);
+ size_t tt_change_size = sizeof(struct batadv_tt_change_node);
+ size_t tt_req_size = sizeof(struct batadv_tt_req_node);
+ size_t tt_roam_size = sizeof(struct batadv_tt_roam_node);
+
+ batadv_tl_cache = kmem_cache_create("batadv_tl_cache", tl_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!batadv_tl_cache)
+ return -ENOMEM;
+
+ batadv_tg_cache = kmem_cache_create("batadv_tg_cache", tg_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!batadv_tg_cache)
+ goto err_tt_tl_destroy;
+
+ batadv_tt_orig_cache = kmem_cache_create("batadv_tt_orig_cache",
+ tt_orig_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!batadv_tt_orig_cache)
+ goto err_tt_tg_destroy;
+
+ batadv_tt_change_cache = kmem_cache_create("batadv_tt_change_cache",
+ tt_change_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!batadv_tt_change_cache)
+ goto err_tt_orig_destroy;
+
+ batadv_tt_req_cache = kmem_cache_create("batadv_tt_req_cache",
+ tt_req_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!batadv_tt_req_cache)
+ goto err_tt_change_destroy;
+
+ batadv_tt_roam_cache = kmem_cache_create("batadv_tt_roam_cache",
+ tt_roam_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!batadv_tt_roam_cache)
+ goto err_tt_req_destroy;
+
+ return 0;
+
+err_tt_req_destroy:
+ kmem_cache_destroy(batadv_tt_req_cache);
+ batadv_tt_req_cache = NULL;
+err_tt_change_destroy:
+ kmem_cache_destroy(batadv_tt_change_cache);
+ batadv_tt_change_cache = NULL;
+err_tt_orig_destroy:
+ kmem_cache_destroy(batadv_tt_orig_cache);
+ batadv_tt_orig_cache = NULL;
+err_tt_tg_destroy:
+ kmem_cache_destroy(batadv_tg_cache);
+ batadv_tg_cache = NULL;
+err_tt_tl_destroy:
+ kmem_cache_destroy(batadv_tl_cache);
+ batadv_tl_cache = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * batadv_tt_cache_destroy() - Destroy tt memory object cache
+ */
+void batadv_tt_cache_destroy(void)
+{
+ kmem_cache_destroy(batadv_tl_cache);
+ kmem_cache_destroy(batadv_tg_cache);
+ kmem_cache_destroy(batadv_tt_orig_cache);
+ kmem_cache_destroy(batadv_tt_change_cache);
+ kmem_cache_destroy(batadv_tt_req_cache);
+ kmem_cache_destroy(batadv_tt_roam_cache);
+}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
new file mode 100644
index 0000000000..d18740d9a2
--- /dev/null
+++ b/net/batman-adv/translation-table.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich, Antonio Quartulli
+ */
+
+#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
+#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
+
+#include "main.h"
+
+#include <linux/kref.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+int batadv_tt_init(struct batadv_priv *bat_priv);
+bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
+ unsigned short vid, int ifindex, u32 mark);
+u16 batadv_tt_local_remove(struct batadv_priv *bat_priv,
+ const u8 *addr, unsigned short vid,
+ const char *message, bool roaming);
+int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb);
+int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb);
+void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ s32 match_vid, const char *message);
+struct batadv_tt_global_entry *
+batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
+ unsigned short vid);
+void batadv_tt_global_entry_release(struct kref *ref);
+int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
+ const u8 *addr, unsigned short vid);
+struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
+ const u8 *src, const u8 *addr,
+ unsigned short vid);
+void batadv_tt_free(struct batadv_priv *bat_priv);
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr,
+ unsigned short vid);
+bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
+ unsigned short vid);
+void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv);
+bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
+ u8 *addr, unsigned short vid);
+bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
+ u8 *addr, unsigned short vid);
+void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface);
+bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *addr,
+ unsigned short vid);
+bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
+ const u8 *addr, unsigned short vid);
+
+int batadv_tt_cache_init(void);
+void batadv_tt_cache_destroy(void);
+
+/**
+ * batadv_tt_global_entry_put() - decrement the tt_global_entry refcounter and
+ * possibly release it
+ * @tt_global_entry: tt_global_entry to be free'd
+ */
+static inline void
+batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry)
+{
+ if (!tt_global_entry)
+ return;
+
+ kref_put(&tt_global_entry->common.refcount,
+ batadv_tt_global_entry_release);
+}
+
+#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
new file mode 100644
index 0000000000..2a583215d4
--- /dev/null
+++ b/net/batman-adv/tvlv.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#include "main.h"
+
+#include <linux/byteorder/generic.h>
+#include <linux/container_of.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/pkt_sched.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
+
+#include "originator.h"
+#include "send.h"
+#include "tvlv.h"
+
+/**
+ * batadv_tvlv_handler_release() - release tvlv handler from lists and queue for
+ * free after rcu grace period
+ * @ref: kref pointer of the tvlv
+ */
+static void batadv_tvlv_handler_release(struct kref *ref)
+{
+ struct batadv_tvlv_handler *tvlv_handler;
+
+ tvlv_handler = container_of(ref, struct batadv_tvlv_handler, refcount);
+ kfree_rcu(tvlv_handler, rcu);
+}
+
+/**
+ * batadv_tvlv_handler_put() - decrement the tvlv container refcounter and
+ * possibly release it
+ * @tvlv_handler: the tvlv handler to free
+ */
+static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler)
+{
+ if (!tvlv_handler)
+ return;
+
+ kref_put(&tvlv_handler->refcount, batadv_tvlv_handler_release);
+}
+
+/**
+ * batadv_tvlv_handler_get() - retrieve tvlv handler from the tvlv handler list
+ * based on the provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv handler type to look for
+ * @version: tvlv handler version to look for
+ *
+ * Return: tvlv handler if found or NULL otherwise.
+ */
+static struct batadv_tvlv_handler *
+batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
+{
+ struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tvlv_handler_tmp,
+ &bat_priv->tvlv.handler_list, list) {
+ if (tvlv_handler_tmp->type != type)
+ continue;
+
+ if (tvlv_handler_tmp->version != version)
+ continue;
+
+ if (!kref_get_unless_zero(&tvlv_handler_tmp->refcount))
+ continue;
+
+ tvlv_handler = tvlv_handler_tmp;
+ break;
+ }
+ rcu_read_unlock();
+
+ return tvlv_handler;
+}
+
+/**
+ * batadv_tvlv_container_release() - release tvlv from lists and free
+ * @ref: kref pointer of the tvlv
+ */
+static void batadv_tvlv_container_release(struct kref *ref)
+{
+ struct batadv_tvlv_container *tvlv;
+
+ tvlv = container_of(ref, struct batadv_tvlv_container, refcount);
+ kfree(tvlv);
+}
+
+/**
+ * batadv_tvlv_container_put() - decrement the tvlv container refcounter and
+ * possibly release it
+ * @tvlv: the tvlv container to free
+ */
+static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv)
+{
+ if (!tvlv)
+ return;
+
+ kref_put(&tvlv->refcount, batadv_tvlv_container_release);
+}
+
+/**
+ * batadv_tvlv_container_get() - retrieve tvlv container from the tvlv container
+ * list based on the provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type to look for
+ * @version: tvlv container version to look for
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ *
+ * Return: tvlv container if found or NULL otherwise.
+ */
+static struct batadv_tvlv_container *
+batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
+{
+ struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
+
+ lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
+
+ hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
+ if (tvlv_tmp->tvlv_hdr.type != type)
+ continue;
+
+ if (tvlv_tmp->tvlv_hdr.version != version)
+ continue;
+
+ kref_get(&tvlv_tmp->refcount);
+ tvlv = tvlv_tmp;
+ break;
+ }
+
+ return tvlv;
+}
+
+/**
+ * batadv_tvlv_container_list_size() - calculate the size of the tvlv container
+ * list entries
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ *
+ * Return: size of all currently registered tvlv containers in bytes.
+ */
+static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
+{
+ struct batadv_tvlv_container *tvlv;
+ u16 tvlv_len = 0;
+
+ lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
+
+ hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
+ tvlv_len += sizeof(struct batadv_tvlv_hdr);
+ tvlv_len += ntohs(tvlv->tvlv_hdr.len);
+ }
+
+ return tvlv_len;
+}
+
+/**
+ * batadv_tvlv_container_remove() - remove tvlv container from the tvlv
+ * container list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tvlv: the to be removed tvlv container
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ */
+static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_container *tvlv)
+{
+ lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
+
+ if (!tvlv)
+ return;
+
+ hlist_del(&tvlv->list);
+
+ /* first call to decrement the counter, second call to free */
+ batadv_tvlv_container_put(tvlv);
+ batadv_tvlv_container_put(tvlv);
+}
+
+/**
+ * batadv_tvlv_container_unregister() - unregister tvlv container based on the
+ * provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type to unregister
+ * @version: tvlv container type to unregister
+ */
+void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
+ u8 type, u8 version)
+{
+ struct batadv_tvlv_container *tvlv;
+
+ spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+ tvlv = batadv_tvlv_container_get(bat_priv, type, version);
+ batadv_tvlv_container_remove(bat_priv, tvlv);
+ spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+}
+
+/**
+ * batadv_tvlv_container_register() - register tvlv type, version and content
+ * to be propagated with each (primary interface) OGM
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type
+ * @version: tvlv container version
+ * @tvlv_value: tvlv container content
+ * @tvlv_value_len: tvlv container content length
+ *
+ * If a container of the same type and version was already registered the new
+ * content is going to replace the old one.
+ */
+void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
+ u8 type, u8 version,
+ void *tvlv_value, u16 tvlv_value_len)
+{
+ struct batadv_tvlv_container *tvlv_old, *tvlv_new;
+
+ if (!tvlv_value)
+ tvlv_value_len = 0;
+
+ tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
+ if (!tvlv_new)
+ return;
+
+ tvlv_new->tvlv_hdr.version = version;
+ tvlv_new->tvlv_hdr.type = type;
+ tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
+
+ memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
+ INIT_HLIST_NODE(&tvlv_new->list);
+ kref_init(&tvlv_new->refcount);
+
+ spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+ tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
+ batadv_tvlv_container_remove(bat_priv, tvlv_old);
+
+ kref_get(&tvlv_new->refcount);
+ hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
+ spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+
+ /* don't return reference to new tvlv_container */
+ batadv_tvlv_container_put(tvlv_new);
+}
+
+/**
+ * batadv_tvlv_realloc_packet_buff() - reallocate packet buffer to accommodate
+ * requested packet size
+ * @packet_buff: packet buffer
+ * @packet_buff_len: packet buffer size
+ * @min_packet_len: requested packet minimum size
+ * @additional_packet_len: requested additional packet size on top of minimum
+ * size
+ *
+ * Return: true of the packet buffer could be changed to the requested size,
+ * false otherwise.
+ */
+static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
+ int *packet_buff_len,
+ int min_packet_len,
+ int additional_packet_len)
+{
+ unsigned char *new_buff;
+
+ new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
+
+ /* keep old buffer if kmalloc should fail */
+ if (!new_buff)
+ return false;
+
+ memcpy(new_buff, *packet_buff, min_packet_len);
+ kfree(*packet_buff);
+ *packet_buff = new_buff;
+ *packet_buff_len = min_packet_len + additional_packet_len;
+
+ return true;
+}
+
+/**
+ * batadv_tvlv_container_ogm_append() - append tvlv container content to given
+ * OGM packet buffer
+ * @bat_priv: the bat priv with all the soft interface information
+ * @packet_buff: ogm packet buffer
+ * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
+ * content
+ * @packet_min_len: ogm header size to be preserved for the OGM itself
+ *
+ * The ogm packet might be enlarged or shrunk depending on the current size
+ * and the size of the to-be-appended tvlv containers.
+ *
+ * Return: size of all appended tvlv containers in bytes.
+ */
+u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff,
+ int *packet_buff_len, int packet_min_len)
+{
+ struct batadv_tvlv_container *tvlv;
+ struct batadv_tvlv_hdr *tvlv_hdr;
+ u16 tvlv_value_len;
+ void *tvlv_value;
+ bool ret;
+
+ spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+ tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
+
+ ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
+ packet_min_len, tvlv_value_len);
+
+ if (!ret)
+ goto end;
+
+ if (!tvlv_value_len)
+ goto end;
+
+ tvlv_value = (*packet_buff) + packet_min_len;
+
+ hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
+ tvlv_hdr = tvlv_value;
+ tvlv_hdr->type = tvlv->tvlv_hdr.type;
+ tvlv_hdr->version = tvlv->tvlv_hdr.version;
+ tvlv_hdr->len = tvlv->tvlv_hdr.len;
+ tvlv_value = tvlv_hdr + 1;
+ memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
+ tvlv_value = (u8 *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
+ }
+
+end:
+ spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+ return tvlv_value_len;
+}
+
+/**
+ * batadv_tvlv_call_handler() - parse the given tvlv buffer to call the
+ * appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tvlv_handler: tvlv callback function handling the tvlv content
+ * @packet_type: indicates for which packet type the TVLV handler is called
+ * @orig_node: orig node emitting the ogm packet
+ * @skb: the skb the TVLV handler is called for
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ *
+ * Return: success if the handler was not found or the return value of the
+ * handler callback.
+ */
+static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_handler *tvlv_handler,
+ u8 packet_type,
+ struct batadv_orig_node *orig_node,
+ struct sk_buff *skb, void *tvlv_value,
+ u16 tvlv_value_len)
+{
+ unsigned int tvlv_offset;
+ u8 *src, *dst;
+
+ if (!tvlv_handler)
+ return NET_RX_SUCCESS;
+
+ switch (packet_type) {
+ case BATADV_IV_OGM:
+ case BATADV_OGM2:
+ if (!tvlv_handler->ogm_handler)
+ return NET_RX_SUCCESS;
+
+ if (!orig_node)
+ return NET_RX_SUCCESS;
+
+ tvlv_handler->ogm_handler(bat_priv, orig_node,
+ BATADV_NO_FLAGS,
+ tvlv_value, tvlv_value_len);
+ tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
+ break;
+ case BATADV_UNICAST_TVLV:
+ if (!skb)
+ return NET_RX_SUCCESS;
+
+ if (!tvlv_handler->unicast_handler)
+ return NET_RX_SUCCESS;
+
+ src = ((struct batadv_unicast_tvlv_packet *)skb->data)->src;
+ dst = ((struct batadv_unicast_tvlv_packet *)skb->data)->dst;
+
+ return tvlv_handler->unicast_handler(bat_priv, src,
+ dst, tvlv_value,
+ tvlv_value_len);
+ case BATADV_MCAST:
+ if (!skb)
+ return NET_RX_SUCCESS;
+
+ if (!tvlv_handler->mcast_handler)
+ return NET_RX_SUCCESS;
+
+ tvlv_offset = (unsigned char *)tvlv_value - skb->data;
+ skb_set_network_header(skb, tvlv_offset);
+ skb_set_transport_header(skb, tvlv_offset + tvlv_value_len);
+
+ return tvlv_handler->mcast_handler(bat_priv, skb);
+ }
+
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tvlv_containers_process() - parse the given tvlv buffer to call the
+ * appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @packet_type: indicates for which packet type the TVLV handler is called
+ * @orig_node: orig node emitting the ogm packet
+ * @skb: the skb the TVLV handler is called for
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ *
+ * Return: success when processing an OGM or the return value of all called
+ * handler callbacks.
+ */
+int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
+ u8 packet_type,
+ struct batadv_orig_node *orig_node,
+ struct sk_buff *skb, void *tvlv_value,
+ u16 tvlv_value_len)
+{
+ struct batadv_tvlv_handler *tvlv_handler;
+ struct batadv_tvlv_hdr *tvlv_hdr;
+ u16 tvlv_value_cont_len;
+ u8 cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
+ int ret = NET_RX_SUCCESS;
+
+ while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
+ tvlv_hdr = tvlv_value;
+ tvlv_value_cont_len = ntohs(tvlv_hdr->len);
+ tvlv_value = tvlv_hdr + 1;
+ tvlv_value_len -= sizeof(*tvlv_hdr);
+
+ if (tvlv_value_cont_len > tvlv_value_len)
+ break;
+
+ tvlv_handler = batadv_tvlv_handler_get(bat_priv,
+ tvlv_hdr->type,
+ tvlv_hdr->version);
+
+ ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
+ packet_type, orig_node, skb,
+ tvlv_value,
+ tvlv_value_cont_len);
+ batadv_tvlv_handler_put(tvlv_handler);
+ tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len;
+ tvlv_value_len -= tvlv_value_cont_len;
+ }
+
+ if (packet_type != BATADV_IV_OGM &&
+ packet_type != BATADV_OGM2)
+ return ret;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tvlv_handler,
+ &bat_priv->tvlv.handler_list, list) {
+ if (!tvlv_handler->ogm_handler)
+ continue;
+
+ if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
+ !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
+ tvlv_handler->ogm_handler(bat_priv, orig_node,
+ cifnotfound, NULL, 0);
+
+ tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
+ }
+ rcu_read_unlock();
+
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tvlv_ogm_receive() - process an incoming ogm and call the appropriate
+ * handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @batadv_ogm_packet: ogm packet containing the tvlv containers
+ * @orig_node: orig node emitting the ogm packet
+ */
+void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
+ struct batadv_ogm_packet *batadv_ogm_packet,
+ struct batadv_orig_node *orig_node)
+{
+ void *tvlv_value;
+ u16 tvlv_value_len;
+
+ if (!batadv_ogm_packet)
+ return;
+
+ tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
+ if (!tvlv_value_len)
+ return;
+
+ tvlv_value = batadv_ogm_packet + 1;
+
+ batadv_tvlv_containers_process(bat_priv, BATADV_IV_OGM, orig_node, NULL,
+ tvlv_value, tvlv_value_len);
+}
+
+/**
+ * batadv_tvlv_handler_register() - register tvlv handler based on the provided
+ * type and version (both need to match) for ogm tvlv payload and/or unicast
+ * payload
+ * @bat_priv: the bat priv with all the soft interface information
+ * @optr: ogm tvlv handler callback function. This function receives the orig
+ * node, flags and the tvlv content as argument to process.
+ * @uptr: unicast tvlv handler callback function. This function receives the
+ * source & destination of the unicast packet as well as the tvlv content
+ * to process.
+ * @mptr: multicast packet tvlv handler callback function. This function
+ * receives the full skb to process, with the skb network header pointing
+ * to the current tvlv and the skb transport header pointing to the first
+ * byte after the current tvlv.
+ * @type: tvlv handler type to be registered
+ * @version: tvlv handler version to be registered
+ * @flags: flags to enable or disable TVLV API behavior
+ */
+void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
+ void (*optr)(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ u8 flags,
+ void *tvlv_value,
+ u16 tvlv_value_len),
+ int (*uptr)(struct batadv_priv *bat_priv,
+ u8 *src, u8 *dst,
+ void *tvlv_value,
+ u16 tvlv_value_len),
+ int (*mptr)(struct batadv_priv *bat_priv,
+ struct sk_buff *skb),
+ u8 type, u8 version, u8 flags)
+{
+ struct batadv_tvlv_handler *tvlv_handler;
+
+ spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+
+ tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
+ if (tvlv_handler) {
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+ batadv_tvlv_handler_put(tvlv_handler);
+ return;
+ }
+
+ tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
+ if (!tvlv_handler) {
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+ return;
+ }
+
+ tvlv_handler->ogm_handler = optr;
+ tvlv_handler->unicast_handler = uptr;
+ tvlv_handler->mcast_handler = mptr;
+ tvlv_handler->type = type;
+ tvlv_handler->version = version;
+ tvlv_handler->flags = flags;
+ kref_init(&tvlv_handler->refcount);
+ INIT_HLIST_NODE(&tvlv_handler->list);
+
+ kref_get(&tvlv_handler->refcount);
+ hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+
+ /* don't return reference to new tvlv_handler */
+ batadv_tvlv_handler_put(tvlv_handler);
+}
+
+/**
+ * batadv_tvlv_handler_unregister() - unregister tvlv handler based on the
+ * provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv handler type to be unregistered
+ * @version: tvlv handler version to be unregistered
+ */
+void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+ u8 type, u8 version)
+{
+ struct batadv_tvlv_handler *tvlv_handler;
+
+ tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
+ if (!tvlv_handler)
+ return;
+
+ batadv_tvlv_handler_put(tvlv_handler);
+ spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+ hlist_del_rcu(&tvlv_handler->list);
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+ batadv_tvlv_handler_put(tvlv_handler);
+}
+
+/**
+ * batadv_tvlv_unicast_send() - send a unicast packet with tvlv payload to the
+ * specified host
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: source mac address of the unicast packet
+ * @dst: destination mac address of the unicast packet
+ * @type: tvlv type
+ * @version: tvlv version
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ */
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, const u8 *src,
+ const u8 *dst, u8 type, u8 version,
+ void *tvlv_value, u16 tvlv_value_len)
+{
+ struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
+ struct batadv_tvlv_hdr *tvlv_hdr;
+ struct batadv_orig_node *orig_node;
+ struct sk_buff *skb;
+ unsigned char *tvlv_buff;
+ unsigned int tvlv_len;
+ ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
+
+ orig_node = batadv_orig_hash_find(bat_priv, dst);
+ if (!orig_node)
+ return;
+
+ tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
+
+ skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
+ if (!skb)
+ goto out;
+
+ skb->priority = TC_PRIO_CONTROL;
+ skb_reserve(skb, ETH_HLEN);
+ tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
+ unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
+ unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
+ unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
+ unicast_tvlv_packet->ttl = BATADV_TTL;
+ unicast_tvlv_packet->reserved = 0;
+ unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
+ unicast_tvlv_packet->align = 0;
+ ether_addr_copy(unicast_tvlv_packet->src, src);
+ ether_addr_copy(unicast_tvlv_packet->dst, dst);
+
+ tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
+ tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
+ tvlv_hdr->version = version;
+ tvlv_hdr->type = type;
+ tvlv_hdr->len = htons(tvlv_value_len);
+ tvlv_buff += sizeof(*tvlv_hdr);
+ memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
+
+ batadv_send_skb_to_orig(skb, orig_node, NULL);
+out:
+ batadv_orig_node_put(orig_node);
+}
diff --git a/net/batman-adv/tvlv.h b/net/batman-adv/tvlv.h
new file mode 100644
index 0000000000..e5697230d9
--- /dev/null
+++ b/net/batman-adv/tvlv.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#ifndef _NET_BATMAN_ADV_TVLV_H_
+#define _NET_BATMAN_ADV_TVLV_H_
+
+#include "main.h"
+
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
+
+void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
+ u8 type, u8 version,
+ void *tvlv_value, u16 tvlv_value_len);
+u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff,
+ int *packet_buff_len, int packet_min_len);
+void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
+ struct batadv_ogm_packet *batadv_ogm_packet,
+ struct batadv_orig_node *orig_node);
+void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
+ u8 type, u8 version);
+
+void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
+ void (*optr)(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ u8 flags,
+ void *tvlv_value,
+ u16 tvlv_value_len),
+ int (*uptr)(struct batadv_priv *bat_priv,
+ u8 *src, u8 *dst,
+ void *tvlv_value,
+ u16 tvlv_value_len),
+ int (*mptr)(struct batadv_priv *bat_priv,
+ struct sk_buff *skb),
+ u8 type, u8 version, u8 flags);
+void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+ u8 type, u8 version);
+int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
+ u8 packet_type,
+ struct batadv_orig_node *orig_node,
+ struct sk_buff *skb, void *tvlv_buff,
+ u16 tvlv_buff_len);
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, const u8 *src,
+ const u8 *dst, u8 type, u8 version,
+ void *tvlv_value, u16 tvlv_value_len);
+
+#endif /* _NET_BATMAN_ADV_TVLV_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
new file mode 100644
index 0000000000..17d5ea1d8e
--- /dev/null
+++ b/net/batman-adv/types.h
@@ -0,0 +1,2383 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ */
+
+#ifndef _NET_BATMAN_ADV_TYPES_H_
+#define _NET_BATMAN_ADV_TYPES_H_
+
+#ifndef _NET_BATMAN_ADV_MAIN_H_
+#error only "main.h" can be included directly
+#endif
+
+#include <linux/average.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/sched.h> /* for linux/wait.h */
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
+#include <uapi/linux/batman_adv.h>
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+
+/**
+ * typedef batadv_dat_addr_t - type used for all DHT addresses
+ *
+ * If it is changed, BATADV_DAT_ADDR_MAX is changed as well.
+ *
+ * *Please be careful: batadv_dat_addr_t must be UNSIGNED*
+ */
+typedef u16 batadv_dat_addr_t;
+
+#endif /* CONFIG_BATMAN_ADV_DAT */
+
+/**
+ * enum batadv_dhcp_recipient - dhcp destination
+ */
+enum batadv_dhcp_recipient {
+ /** @BATADV_DHCP_NO: packet is not a dhcp message */
+ BATADV_DHCP_NO = 0,
+
+ /** @BATADV_DHCP_TO_SERVER: dhcp message is directed to a server */
+ BATADV_DHCP_TO_SERVER,
+
+ /** @BATADV_DHCP_TO_CLIENT: dhcp message is directed to a client */
+ BATADV_DHCP_TO_CLIENT,
+};
+
+/**
+ * BATADV_TT_REMOTE_MASK - bitmask selecting the flags that are sent over the
+ * wire only
+ */
+#define BATADV_TT_REMOTE_MASK 0x00FF
+
+/**
+ * BATADV_TT_SYNC_MASK - bitmask of the flags that need to be kept in sync
+ * among the nodes. These flags are used to compute the global/local CRC
+ */
+#define BATADV_TT_SYNC_MASK 0x00F0
+
+/**
+ * struct batadv_hard_iface_bat_iv - per hard-interface B.A.T.M.A.N. IV data
+ */
+struct batadv_hard_iface_bat_iv {
+ /** @ogm_buff: buffer holding the OGM packet */
+ unsigned char *ogm_buff;
+
+ /** @ogm_buff_len: length of the OGM packet buffer */
+ int ogm_buff_len;
+
+ /** @ogm_seqno: OGM sequence number - used to identify each OGM */
+ atomic_t ogm_seqno;
+
+ /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
+ struct mutex ogm_buff_mutex;
+};
+
+/**
+ * enum batadv_v_hard_iface_flags - interface flags useful to B.A.T.M.A.N. V
+ */
+enum batadv_v_hard_iface_flags {
+ /**
+ * @BATADV_FULL_DUPLEX: tells if the connection over this link is
+ * full-duplex
+ */
+ BATADV_FULL_DUPLEX = BIT(0),
+
+ /**
+ * @BATADV_WARNING_DEFAULT: tells whether we have warned the user that
+ * no throughput data is available for this interface and that default
+ * values are assumed.
+ */
+ BATADV_WARNING_DEFAULT = BIT(1),
+};
+
+/**
+ * struct batadv_hard_iface_bat_v - per hard-interface B.A.T.M.A.N. V data
+ */
+struct batadv_hard_iface_bat_v {
+ /** @elp_interval: time interval between two ELP transmissions */
+ atomic_t elp_interval;
+
+ /** @elp_seqno: current ELP sequence number */
+ atomic_t elp_seqno;
+
+ /** @elp_skb: base skb containing the ELP message to send */
+ struct sk_buff *elp_skb;
+
+ /** @elp_wq: workqueue used to schedule ELP transmissions */
+ struct delayed_work elp_wq;
+
+ /** @aggr_wq: workqueue used to transmit queued OGM packets */
+ struct delayed_work aggr_wq;
+
+ /** @aggr_list: queue for to be aggregated OGM packets */
+ struct sk_buff_head aggr_list;
+
+ /** @aggr_len: size of the OGM aggregate (excluding ethernet header) */
+ unsigned int aggr_len;
+
+ /**
+ * @throughput_override: throughput override to disable link
+ * auto-detection
+ */
+ atomic_t throughput_override;
+
+ /** @flags: interface specific flags */
+ u8 flags;
+};
+
+/**
+ * enum batadv_hard_iface_wifi_flags - Flags describing the wifi configuration
+ * of a batadv_hard_iface
+ */
+enum batadv_hard_iface_wifi_flags {
+ /** @BATADV_HARDIF_WIFI_WEXT_DIRECT: it is a wext wifi device */
+ BATADV_HARDIF_WIFI_WEXT_DIRECT = BIT(0),
+
+ /** @BATADV_HARDIF_WIFI_CFG80211_DIRECT: it is a cfg80211 wifi device */
+ BATADV_HARDIF_WIFI_CFG80211_DIRECT = BIT(1),
+
+ /**
+ * @BATADV_HARDIF_WIFI_WEXT_INDIRECT: link device is a wext wifi device
+ */
+ BATADV_HARDIF_WIFI_WEXT_INDIRECT = BIT(2),
+
+ /**
+ * @BATADV_HARDIF_WIFI_CFG80211_INDIRECT: link device is a cfg80211 wifi
+ * device
+ */
+ BATADV_HARDIF_WIFI_CFG80211_INDIRECT = BIT(3),
+};
+
+/**
+ * struct batadv_hard_iface - network device known to batman-adv
+ */
+struct batadv_hard_iface {
+ /** @list: list node for batadv_hardif_list */
+ struct list_head list;
+
+ /** @if_status: status of the interface for batman-adv */
+ char if_status;
+
+ /**
+ * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
+ */
+ u8 num_bcasts;
+
+ /**
+ * @wifi_flags: flags whether this is (directly or indirectly) a wifi
+ * interface
+ */
+ u32 wifi_flags;
+
+ /** @net_dev: pointer to the net_device */
+ struct net_device *net_dev;
+
+ /** @refcount: number of contexts the object is used */
+ struct kref refcount;
+
+ /**
+ * @batman_adv_ptype: packet type describing packets that should be
+ * processed by batman-adv for this interface
+ */
+ struct packet_type batman_adv_ptype;
+
+ /**
+ * @soft_iface: the batman-adv interface which uses this network
+ * interface
+ */
+ struct net_device *soft_iface;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct rcu_head rcu;
+
+ /**
+ * @hop_penalty: penalty which will be applied to the tq-field
+ * of an OGM received via this interface
+ */
+ atomic_t hop_penalty;
+
+ /** @bat_iv: per hard-interface B.A.T.M.A.N. IV data */
+ struct batadv_hard_iface_bat_iv bat_iv;
+
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ /** @bat_v: per hard-interface B.A.T.M.A.N. V data */
+ struct batadv_hard_iface_bat_v bat_v;
+#endif
+
+ /**
+ * @neigh_list: list of unique single hop neighbors via this interface
+ */
+ struct hlist_head neigh_list;
+
+ /** @neigh_list_lock: lock protecting neigh_list */
+ spinlock_t neigh_list_lock;
+};
+
+/**
+ * struct batadv_orig_ifinfo_bat_iv - B.A.T.M.A.N. IV private orig_ifinfo
+ * members
+ */
+struct batadv_orig_ifinfo_bat_iv {
+ /**
+ * @bcast_own: bitfield which counts the number of our OGMs this
+ * orig_node rebroadcasted "back" to us (relative to last_real_seqno)
+ */
+ DECLARE_BITMAP(bcast_own, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+ /** @bcast_own_sum: sum of bcast_own */
+ u8 bcast_own_sum;
+};
+
+/**
+ * struct batadv_orig_ifinfo - originator info per outgoing interface
+ */
+struct batadv_orig_ifinfo {
+ /** @list: list node for &batadv_orig_node.ifinfo_list */
+ struct hlist_node list;
+
+ /** @if_outgoing: pointer to outgoing hard-interface */
+ struct batadv_hard_iface *if_outgoing;
+
+ /** @router: router that should be used to reach this originator */
+ struct batadv_neigh_node __rcu *router;
+
+ /** @last_real_seqno: last and best known sequence number */
+ u32 last_real_seqno;
+
+ /** @last_ttl: ttl of last received packet */
+ u8 last_ttl;
+
+ /** @last_seqno_forwarded: seqno of the OGM which was forwarded last */
+ u32 last_seqno_forwarded;
+
+ /** @batman_seqno_reset: time when the batman seqno window was reset */
+ unsigned long batman_seqno_reset;
+
+ /** @bat_iv: B.A.T.M.A.N. IV private structure */
+ struct batadv_orig_ifinfo_bat_iv bat_iv;
+
+ /** @refcount: number of contexts the object is used */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_frag_table_entry - head in the fragment buffer table
+ */
+struct batadv_frag_table_entry {
+ /** @fragment_list: head of list with fragments */
+ struct hlist_head fragment_list;
+
+ /** @lock: lock to protect the list of fragments */
+ spinlock_t lock;
+
+ /** @timestamp: time (jiffie) of last received fragment */
+ unsigned long timestamp;
+
+ /** @seqno: sequence number of the fragments in the list */
+ u16 seqno;
+
+ /** @size: accumulated size of packets in list */
+ u16 size;
+
+ /** @total_size: expected size of the assembled packet */
+ u16 total_size;
+};
+
+/**
+ * struct batadv_frag_list_entry - entry in a list of fragments
+ */
+struct batadv_frag_list_entry {
+ /** @list: list node information */
+ struct hlist_node list;
+
+ /** @skb: fragment */
+ struct sk_buff *skb;
+
+ /** @no: fragment number in the set */
+ u8 no;
+};
+
+/**
+ * struct batadv_vlan_tt - VLAN specific TT attributes
+ */
+struct batadv_vlan_tt {
+ /** @crc: CRC32 checksum of the entries belonging to this vlan */
+ u32 crc;
+
+ /** @num_entries: number of TT entries for this VLAN */
+ atomic_t num_entries;
+};
+
+/**
+ * struct batadv_orig_node_vlan - VLAN specific data per orig_node
+ */
+struct batadv_orig_node_vlan {
+ /** @vid: the VLAN identifier */
+ unsigned short vid;
+
+ /** @tt: VLAN specific TT attributes */
+ struct batadv_vlan_tt tt;
+
+ /** @list: list node for &batadv_orig_node.vlan_list */
+ struct hlist_node list;
+
+ /**
+ * @refcount: number of context where this object is currently in use
+ */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in a RCU-safe manner */
+ struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
+ */
+struct batadv_orig_bat_iv {
+ /**
+ * @ogm_cnt_lock: lock protecting &batadv_orig_ifinfo_bat_iv.bcast_own,
+ * &batadv_orig_ifinfo_bat_iv.bcast_own_sum,
+ * &batadv_neigh_ifinfo_bat_iv.bat_iv.real_bits and
+ * &batadv_neigh_ifinfo_bat_iv.real_packet_count
+ */
+ spinlock_t ogm_cnt_lock;
+};
+
+/**
+ * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
+ */
+struct batadv_orig_node {
+ /** @orig: originator ethernet address */
+ u8 orig[ETH_ALEN];
+
+ /** @ifinfo_list: list for routers per outgoing interface */
+ struct hlist_head ifinfo_list;
+
+ /**
+ * @last_bonding_candidate: pointer to last ifinfo of last used router
+ */
+ struct batadv_orig_ifinfo *last_bonding_candidate;
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+ /** @dat_addr: address of the orig node in the distributed hash */
+ batadv_dat_addr_t dat_addr;
+#endif
+
+ /** @last_seen: time when last packet from this node was received */
+ unsigned long last_seen;
+
+ /**
+ * @bcast_seqno_reset: time when the broadcast seqno window was reset
+ */
+ unsigned long bcast_seqno_reset;
+
+#ifdef CONFIG_BATMAN_ADV_MCAST
+ /**
+ * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
+ */
+ spinlock_t mcast_handler_lock;
+
+ /** @mcast_flags: multicast flags announced by the orig node */
+ u8 mcast_flags;
+
+ /**
+ * @mcast_want_all_unsnoopables_node: a list node for the
+ * mcast.want_all_unsnoopables list
+ */
+ struct hlist_node mcast_want_all_unsnoopables_node;
+
+ /**
+ * @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4
+ * list
+ */
+ struct hlist_node mcast_want_all_ipv4_node;
+ /**
+ * @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6
+ * list
+ */
+ struct hlist_node mcast_want_all_ipv6_node;
+
+ /**
+ * @mcast_want_all_rtr4_node: a list node for the mcast.want_all_rtr4
+ * list
+ */
+ struct hlist_node mcast_want_all_rtr4_node;
+ /**
+ * @mcast_want_all_rtr6_node: a list node for the mcast.want_all_rtr6
+ * list
+ */
+ struct hlist_node mcast_want_all_rtr6_node;
+#endif
+
+ /** @capabilities: announced capabilities of this originator */
+ unsigned long capabilities;
+
+ /**
+ * @capa_initialized: bitfield to remember whether a capability was
+ * initialized
+ */
+ unsigned long capa_initialized;
+
+ /** @last_ttvn: last seen translation table version number */
+ atomic_t last_ttvn;
+
+ /** @tt_buff: last tt changeset this node received from the orig node */
+ unsigned char *tt_buff;
+
+ /**
+ * @tt_buff_len: length of the last tt changeset this node received
+ * from the orig node
+ */
+ s16 tt_buff_len;
+
+ /** @tt_buff_lock: lock that protects tt_buff and tt_buff_len */
+ spinlock_t tt_buff_lock;
+
+ /**
+ * @tt_lock: avoids concurrent read from and write to the table. Table
+ * update is made up of two operations (data structure update and
+ * metadata -CRC/TTVN-recalculation) and they have to be executed
+ * atomically in order to avoid another thread to read the
+ * table/metadata between those.
+ */
+ spinlock_t tt_lock;
+
+ /**
+ * @bcast_bits: bitfield containing the info which payload broadcast
+ * originated from this orig node this host already has seen (relative
+ * to last_bcast_seqno)
+ */
+ DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+ /**
+ * @last_bcast_seqno: last broadcast sequence number received by this
+ * host
+ */
+ u32 last_bcast_seqno;
+
+ /**
+ * @neigh_list: list of potential next hop neighbor towards this orig
+ * node
+ */
+ struct hlist_head neigh_list;
+
+ /**
+ * @neigh_list_lock: lock protecting neigh_list, ifinfo_list,
+ * last_bonding_candidate and router
+ */
+ spinlock_t neigh_list_lock;
+
+ /** @hash_entry: hlist node for &batadv_priv.orig_hash */
+ struct hlist_node hash_entry;
+
+ /** @bat_priv: pointer to soft_iface this orig node belongs to */
+ struct batadv_priv *bat_priv;
+
+ /** @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno */
+ spinlock_t bcast_seqno_lock;
+
+ /** @refcount: number of contexts the object is used */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct rcu_head rcu;
+
+#ifdef CONFIG_BATMAN_ADV_NC
+ /** @in_coding_list: list of nodes this orig can hear */
+ struct list_head in_coding_list;
+
+ /** @out_coding_list: list of nodes that can hear this orig */
+ struct list_head out_coding_list;
+
+ /** @in_coding_list_lock: protects in_coding_list */
+ spinlock_t in_coding_list_lock;
+
+ /** @out_coding_list_lock: protects out_coding_list */
+ spinlock_t out_coding_list_lock;
+#endif
+
+ /** @fragments: array with heads for fragment chains */
+ struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
+
+ /**
+ * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by
+ * the originator represented by this object
+ */
+ struct hlist_head vlan_list;
+
+ /** @vlan_list_lock: lock protecting vlan_list */
+ spinlock_t vlan_list_lock;
+
+ /** @bat_iv: B.A.T.M.A.N. IV private structure */
+ struct batadv_orig_bat_iv bat_iv;
+};
+
+/**
+ * enum batadv_orig_capabilities - orig node capabilities
+ */
+enum batadv_orig_capabilities {
+ /**
+ * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table
+ * enabled
+ */
+ BATADV_ORIG_CAPA_HAS_DAT,
+
+ /** @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled */
+ BATADV_ORIG_CAPA_HAS_NC,
+
+ /** @BATADV_ORIG_CAPA_HAS_TT: orig node has tt capability */
+ BATADV_ORIG_CAPA_HAS_TT,
+
+ /**
+ * @BATADV_ORIG_CAPA_HAS_MCAST: orig node has some multicast capability
+ * (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
+ */
+ BATADV_ORIG_CAPA_HAS_MCAST,
+};
+
+/**
+ * struct batadv_gw_node - structure for orig nodes announcing gw capabilities
+ */
+struct batadv_gw_node {
+ /** @list: list node for &batadv_priv_gw.list */
+ struct hlist_node list;
+
+ /** @orig_node: pointer to corresponding orig node */
+ struct batadv_orig_node *orig_node;
+
+ /** @bandwidth_down: advertised uplink download bandwidth */
+ u32 bandwidth_down;
+
+ /** @bandwidth_up: advertised uplink upload bandwidth */
+ u32 bandwidth_up;
+
+ /** @refcount: number of contexts the object is used */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct rcu_head rcu;
+};
+
+DECLARE_EWMA(throughput, 10, 8)
+
+/**
+ * struct batadv_hardif_neigh_node_bat_v - B.A.T.M.A.N. V private neighbor
+ * information
+ */
+struct batadv_hardif_neigh_node_bat_v {
+ /** @throughput: ewma link throughput towards this neighbor */
+ struct ewma_throughput throughput;
+
+ /** @elp_interval: time interval between two ELP transmissions */
+ u32 elp_interval;
+
+ /** @elp_latest_seqno: latest and best known ELP sequence number */
+ u32 elp_latest_seqno;
+
+ /**
+ * @last_unicast_tx: when the last unicast packet has been sent to this
+ * neighbor
+ */
+ unsigned long last_unicast_tx;
+
+ /** @metric_work: work queue callback item for metric update */
+ struct work_struct metric_work;
+};
+
+/**
+ * struct batadv_hardif_neigh_node - unique neighbor per hard-interface
+ */
+struct batadv_hardif_neigh_node {
+ /** @list: list node for &batadv_hard_iface.neigh_list */
+ struct hlist_node list;
+
+ /** @addr: the MAC address of the neighboring interface */
+ u8 addr[ETH_ALEN];
+
+ /**
+ * @orig: the address of the originator this neighbor node belongs to
+ */
+ u8 orig[ETH_ALEN];
+
+ /** @if_incoming: pointer to incoming hard-interface */
+ struct batadv_hard_iface *if_incoming;
+
+ /** @last_seen: when last packet via this neighbor was received */
+ unsigned long last_seen;
+
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ /** @bat_v: B.A.T.M.A.N. V private data */
+ struct batadv_hardif_neigh_node_bat_v bat_v;
+#endif
+
+ /** @refcount: number of contexts the object is used */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in a RCU-safe manner */
+ struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_neigh_node - structure for single hops neighbors
+ */
+struct batadv_neigh_node {
+ /** @list: list node for &batadv_orig_node.neigh_list */
+ struct hlist_node list;
+
+ /** @orig_node: pointer to corresponding orig_node */
+ struct batadv_orig_node *orig_node;
+
+ /** @addr: the MAC address of the neighboring interface */
+ u8 addr[ETH_ALEN];
+
+ /** @ifinfo_list: list for routing metrics per outgoing interface */
+ struct hlist_head ifinfo_list;
+
+ /** @ifinfo_lock: lock protecting ifinfo_list and its members */
+ spinlock_t ifinfo_lock;
+
+ /** @if_incoming: pointer to incoming hard-interface */
+ struct batadv_hard_iface *if_incoming;
+
+ /** @last_seen: when last packet via this neighbor was received */
+ unsigned long last_seen;
+
+ /** @hardif_neigh: hardif_neigh of this neighbor */
+ struct batadv_hardif_neigh_node *hardif_neigh;
+
+ /** @refcount: number of contexts the object is used */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_neigh_ifinfo_bat_iv - neighbor information per outgoing
+ * interface for B.A.T.M.A.N. IV
+ */
+struct batadv_neigh_ifinfo_bat_iv {
+ /** @tq_recv: ring buffer of received TQ values from this neigh node */
+ u8 tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
+
+ /** @tq_index: ring buffer index */
+ u8 tq_index;
+
+ /**
+ * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
+ */
+ u8 tq_avg;
+
+ /**
+ * @real_bits: bitfield containing the number of OGMs received from this
+ * neigh node (relative to orig_node->last_real_seqno)
+ */
+ DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+ /** @real_packet_count: counted result of real_bits */
+ u8 real_packet_count;
+};
+
+/**
+ * struct batadv_neigh_ifinfo_bat_v - neighbor information per outgoing
+ * interface for B.A.T.M.A.N. V
+ */
+struct batadv_neigh_ifinfo_bat_v {
+ /**
+ * @throughput: last throughput metric received from originator via this
+ * neigh
+ */
+ u32 throughput;
+
+ /** @last_seqno: last sequence number known for this neighbor */
+ u32 last_seqno;
+};
+
+/**
+ * struct batadv_neigh_ifinfo - neighbor information per outgoing interface
+ */
+struct batadv_neigh_ifinfo {
+ /** @list: list node for &batadv_neigh_node.ifinfo_list */
+ struct hlist_node list;
+
+ /** @if_outgoing: pointer to outgoing hard-interface */
+ struct batadv_hard_iface *if_outgoing;
+
+ /** @bat_iv: B.A.T.M.A.N. IV private structure */
+ struct batadv_neigh_ifinfo_bat_iv bat_iv;
+
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ /** @bat_v: B.A.T.M.A.N. V private data */
+ struct batadv_neigh_ifinfo_bat_v bat_v;
+#endif
+
+ /** @last_ttl: last received ttl from this neigh node */
+ u8 last_ttl;
+
+ /** @refcount: number of contexts the object is used */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in a RCU-safe manner */
+ struct rcu_head rcu;
+};
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+
+/**
+ * struct batadv_bcast_duplist_entry - structure for LAN broadcast suppression
+ */
+struct batadv_bcast_duplist_entry {
+ /** @orig: mac address of orig node originating the broadcast */
+ u8 orig[ETH_ALEN];
+
+ /** @crc: crc32 checksum of broadcast payload */
+ __be32 crc;
+
+ /** @entrytime: time when the broadcast packet was received */
+ unsigned long entrytime;
+};
+#endif
+
+/**
+ * enum batadv_counters - indices for traffic counters
+ */
+enum batadv_counters {
+ /** @BATADV_CNT_TX: transmitted payload traffic packet counter */
+ BATADV_CNT_TX,
+
+ /** @BATADV_CNT_TX_BYTES: transmitted payload traffic bytes counter */
+ BATADV_CNT_TX_BYTES,
+
+ /**
+ * @BATADV_CNT_TX_DROPPED: dropped transmission payload traffic packet
+ * counter
+ */
+ BATADV_CNT_TX_DROPPED,
+
+ /** @BATADV_CNT_RX: received payload traffic packet counter */
+ BATADV_CNT_RX,
+
+ /** @BATADV_CNT_RX_BYTES: received payload traffic bytes counter */
+ BATADV_CNT_RX_BYTES,
+
+ /** @BATADV_CNT_FORWARD: forwarded payload traffic packet counter */
+ BATADV_CNT_FORWARD,
+
+ /**
+ * @BATADV_CNT_FORWARD_BYTES: forwarded payload traffic bytes counter
+ */
+ BATADV_CNT_FORWARD_BYTES,
+
+ /**
+ * @BATADV_CNT_MGMT_TX: transmitted routing protocol traffic packet
+ * counter
+ */
+ BATADV_CNT_MGMT_TX,
+
+ /**
+ * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes
+ * counter
+ */
+ BATADV_CNT_MGMT_TX_BYTES,
+
+ /**
+ * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
+ */
+ BATADV_CNT_MGMT_RX,
+
+ /**
+ * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes
+ * counter
+ */
+ BATADV_CNT_MGMT_RX_BYTES,
+
+ /** @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter */
+ BATADV_CNT_FRAG_TX,
+
+ /**
+ * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
+ */
+ BATADV_CNT_FRAG_TX_BYTES,
+
+ /** @BATADV_CNT_FRAG_RX: received fragment traffic packet counter */
+ BATADV_CNT_FRAG_RX,
+
+ /**
+ * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
+ */
+ BATADV_CNT_FRAG_RX_BYTES,
+
+ /** @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter */
+ BATADV_CNT_FRAG_FWD,
+
+ /**
+ * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
+ */
+ BATADV_CNT_FRAG_FWD_BYTES,
+
+ /**
+ * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
+ */
+ BATADV_CNT_TT_REQUEST_TX,
+
+ /** @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter */
+ BATADV_CNT_TT_REQUEST_RX,
+
+ /**
+ * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet
+ * counter
+ */
+ BATADV_CNT_TT_RESPONSE_TX,
+
+ /**
+ * @BATADV_CNT_TT_RESPONSE_RX: received tt resp traffic packet counter
+ */
+ BATADV_CNT_TT_RESPONSE_RX,
+
+ /**
+ * @BATADV_CNT_TT_ROAM_ADV_TX: transmitted tt roam traffic packet
+ * counter
+ */
+ BATADV_CNT_TT_ROAM_ADV_TX,
+
+ /**
+ * @BATADV_CNT_TT_ROAM_ADV_RX: received tt roam traffic packet counter
+ */
+ BATADV_CNT_TT_ROAM_ADV_RX,
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+ /**
+ * @BATADV_CNT_DAT_GET_TX: transmitted dht GET traffic packet counter
+ */
+ BATADV_CNT_DAT_GET_TX,
+
+ /** @BATADV_CNT_DAT_GET_RX: received dht GET traffic packet counter */
+ BATADV_CNT_DAT_GET_RX,
+
+ /**
+ * @BATADV_CNT_DAT_PUT_TX: transmitted dht PUT traffic packet counter
+ */
+ BATADV_CNT_DAT_PUT_TX,
+
+ /** @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter */
+ BATADV_CNT_DAT_PUT_RX,
+
+ /**
+ * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic
+ * packet counter
+ */
+ BATADV_CNT_DAT_CACHED_REPLY_TX,
+#endif
+
+#ifdef CONFIG_BATMAN_ADV_NC
+ /**
+ * @BATADV_CNT_NC_CODE: transmitted nc-combined traffic packet counter
+ */
+ BATADV_CNT_NC_CODE,
+
+ /**
+ * @BATADV_CNT_NC_CODE_BYTES: transmitted nc-combined traffic bytes
+ * counter
+ */
+ BATADV_CNT_NC_CODE_BYTES,
+
+ /**
+ * @BATADV_CNT_NC_RECODE: transmitted nc-recombined traffic packet
+ * counter
+ */
+ BATADV_CNT_NC_RECODE,
+
+ /**
+ * @BATADV_CNT_NC_RECODE_BYTES: transmitted nc-recombined traffic bytes
+ * counter
+ */
+ BATADV_CNT_NC_RECODE_BYTES,
+
+ /**
+ * @BATADV_CNT_NC_BUFFER: counter for packets buffered for later nc
+ * decoding
+ */
+ BATADV_CNT_NC_BUFFER,
+
+ /**
+ * @BATADV_CNT_NC_DECODE: received and nc-decoded traffic packet counter
+ */
+ BATADV_CNT_NC_DECODE,
+
+ /**
+ * @BATADV_CNT_NC_DECODE_BYTES: received and nc-decoded traffic bytes
+ * counter
+ */
+ BATADV_CNT_NC_DECODE_BYTES,
+
+ /**
+ * @BATADV_CNT_NC_DECODE_FAILED: received and decode-failed traffic
+ * packet counter
+ */
+ BATADV_CNT_NC_DECODE_FAILED,
+
+ /**
+ * @BATADV_CNT_NC_SNIFFED: counter for nc-decoded packets received in
+ * promisc mode.
+ */
+ BATADV_CNT_NC_SNIFFED,
+#endif
+
+ /** @BATADV_CNT_NUM: number of traffic counters */
+ BATADV_CNT_NUM,
+};
+
+/**
+ * struct batadv_priv_tt - per mesh interface translation table data
+ */
+struct batadv_priv_tt {
+ /** @vn: translation table version number */
+ atomic_t vn;
+
+ /**
+ * @ogm_append_cnt: counter of number of OGMs containing the local tt
+ * diff
+ */
+ atomic_t ogm_append_cnt;
+
+ /** @local_changes: changes registered in an originator interval */
+ atomic_t local_changes;
+
+ /**
+ * @changes_list: tracks tt local changes within an originator interval
+ */
+ struct list_head changes_list;
+
+ /** @local_hash: local translation table hash table */
+ struct batadv_hashtable *local_hash;
+
+ /** @global_hash: global translation table hash table */
+ struct batadv_hashtable *global_hash;
+
+ /** @req_list: list of pending & unanswered tt_requests */
+ struct hlist_head req_list;
+
+ /**
+ * @roam_list: list of the last roaming events of each client limiting
+ * the number of roaming events to avoid route flapping
+ */
+ struct list_head roam_list;
+
+ /** @changes_list_lock: lock protecting changes_list */
+ spinlock_t changes_list_lock;
+
+ /** @req_list_lock: lock protecting req_list */
+ spinlock_t req_list_lock;
+
+ /** @roam_list_lock: lock protecting roam_list */
+ spinlock_t roam_list_lock;
+
+ /** @last_changeset: last tt changeset this host has generated */
+ unsigned char *last_changeset;
+
+ /**
+ * @last_changeset_len: length of last tt changeset this host has
+ * generated
+ */
+ s16 last_changeset_len;
+
+ /**
+ * @last_changeset_lock: lock protecting last_changeset &
+ * last_changeset_len
+ */
+ spinlock_t last_changeset_lock;
+
+ /**
+ * @commit_lock: prevents from executing a local TT commit while reading
+ * the local table. The local TT commit is made up of two operations
+ * (data structure update and metadata -CRC/TTVN- recalculation) and
+ * they have to be executed atomically in order to avoid another thread
+ * to read the table/metadata between those.
+ */
+ spinlock_t commit_lock;
+
+ /** @work: work queue callback item for translation table purging */
+ struct delayed_work work;
+};
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+
+/**
+ * struct batadv_priv_bla - per mesh interface bridge loop avoidance data
+ */
+struct batadv_priv_bla {
+ /** @num_requests: number of bla requests in flight */
+ atomic_t num_requests;
+
+ /**
+ * @claim_hash: hash table containing mesh nodes this host has claimed
+ */
+ struct batadv_hashtable *claim_hash;
+
+ /**
+ * @backbone_hash: hash table containing all detected backbone gateways
+ */
+ struct batadv_hashtable *backbone_hash;
+
+ /** @loopdetect_addr: MAC address used for own loopdetection frames */
+ u8 loopdetect_addr[ETH_ALEN];
+
+ /**
+ * @loopdetect_lasttime: time when the loopdetection frames were sent
+ */
+ unsigned long loopdetect_lasttime;
+
+ /**
+ * @loopdetect_next: how many periods to wait for the next loopdetect
+ * process
+ */
+ atomic_t loopdetect_next;
+
+ /**
+ * @bcast_duplist: recently received broadcast packets array (for
+ * broadcast duplicate suppression)
+ */
+ struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
+
+ /**
+ * @bcast_duplist_curr: index of last broadcast packet added to
+ * bcast_duplist
+ */
+ int bcast_duplist_curr;
+
+ /**
+ * @bcast_duplist_lock: lock protecting bcast_duplist &
+ * bcast_duplist_curr
+ */
+ spinlock_t bcast_duplist_lock;
+
+ /** @claim_dest: local claim data (e.g. claim group) */
+ struct batadv_bla_claim_dst claim_dest;
+
+ /** @work: work queue callback item for cleanups & bla announcements */
+ struct delayed_work work;
+};
+#endif
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+
+/**
+ * struct batadv_priv_debug_log - debug logging data
+ */
+struct batadv_priv_debug_log {
+ /** @log_buff: buffer holding the logs (ring buffer) */
+ char log_buff[BATADV_LOG_BUF_LEN];
+
+ /** @log_start: index of next character to read */
+ unsigned long log_start;
+
+ /** @log_end: index of next character to write */
+ unsigned long log_end;
+
+ /** @lock: lock protecting log_buff, log_start & log_end */
+ spinlock_t lock;
+
+ /** @queue_wait: log reader's wait queue */
+ wait_queue_head_t queue_wait;
+};
+#endif
+
+/**
+ * struct batadv_priv_gw - per mesh interface gateway data
+ */
+struct batadv_priv_gw {
+ /** @gateway_list: list of available gateway nodes */
+ struct hlist_head gateway_list;
+
+ /** @list_lock: lock protecting gateway_list, curr_gw, generation */
+ spinlock_t list_lock;
+
+ /** @curr_gw: pointer to currently selected gateway node */
+ struct batadv_gw_node __rcu *curr_gw;
+
+ /** @generation: current (generation) sequence number */
+ unsigned int generation;
+
+ /**
+ * @mode: gateway operation: off, client or server (see batadv_gw_modes)
+ */
+ atomic_t mode;
+
+ /** @sel_class: gateway selection class (applies if gw_mode client) */
+ atomic_t sel_class;
+
+ /**
+ * @bandwidth_down: advertised uplink download bandwidth (if gw_mode
+ * server)
+ */
+ atomic_t bandwidth_down;
+
+ /**
+ * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
+ */
+ atomic_t bandwidth_up;
+
+ /** @reselect: bool indicating a gateway re-selection is in progress */
+ atomic_t reselect;
+};
+
+/**
+ * struct batadv_priv_tvlv - per mesh interface tvlv data
+ */
+struct batadv_priv_tvlv {
+ /**
+ * @container_list: list of registered tvlv containers to be sent with
+ * each OGM
+ */
+ struct hlist_head container_list;
+
+ /** @handler_list: list of the various tvlv content handlers */
+ struct hlist_head handler_list;
+
+ /** @container_list_lock: protects tvlv container list access */
+ spinlock_t container_list_lock;
+
+ /** @handler_list_lock: protects handler list access */
+ spinlock_t handler_list_lock;
+};
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+
+/**
+ * struct batadv_priv_dat - per mesh interface DAT private data
+ */
+struct batadv_priv_dat {
+ /** @addr: node DAT address */
+ batadv_dat_addr_t addr;
+
+ /** @hash: hashtable representing the local ARP cache */
+ struct batadv_hashtable *hash;
+
+ /** @work: work queue callback item for cache purging */
+ struct delayed_work work;
+};
+#endif
+
+#ifdef CONFIG_BATMAN_ADV_MCAST
+/**
+ * struct batadv_mcast_querier_state - IGMP/MLD querier state when bridged
+ */
+struct batadv_mcast_querier_state {
+ /** @exists: whether a querier exists in the mesh */
+ unsigned char exists:1;
+
+ /**
+ * @shadowing: if a querier exists, whether it is potentially shadowing
+ * multicast listeners (i.e. querier is behind our own bridge segment)
+ */
+ unsigned char shadowing:1;
+};
+
+/**
+ * struct batadv_mcast_mla_flags - flags for the querier, bridge and tvlv state
+ */
+struct batadv_mcast_mla_flags {
+ /** @querier_ipv4: the current state of an IGMP querier in the mesh */
+ struct batadv_mcast_querier_state querier_ipv4;
+
+ /** @querier_ipv6: the current state of an MLD querier in the mesh */
+ struct batadv_mcast_querier_state querier_ipv6;
+
+ /** @enabled: whether the multicast tvlv is currently enabled */
+ unsigned char enabled:1;
+
+ /** @bridged: whether the soft interface has a bridge on top */
+ unsigned char bridged:1;
+
+ /** @tvlv_flags: the flags we have last sent in our mcast tvlv */
+ u8 tvlv_flags;
+};
+
+/**
+ * struct batadv_priv_mcast - per mesh interface mcast data
+ */
+struct batadv_priv_mcast {
+ /**
+ * @mla_list: list of multicast addresses we are currently announcing
+ * via TT
+ */
+ struct hlist_head mla_list; /* see __batadv_mcast_mla_update() */
+
+ /**
+ * @want_all_unsnoopables_list: a list of orig_nodes wanting all
+ * unsnoopable multicast traffic
+ */
+ struct hlist_head want_all_unsnoopables_list;
+
+ /**
+ * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast
+ * traffic
+ */
+ struct hlist_head want_all_ipv4_list;
+
+ /**
+ * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast
+ * traffic
+ */
+ struct hlist_head want_all_ipv6_list;
+
+ /**
+ * @want_all_rtr4_list: a list of orig_nodes wanting all routable IPv4
+ * multicast traffic
+ */
+ struct hlist_head want_all_rtr4_list;
+
+ /**
+ * @want_all_rtr6_list: a list of orig_nodes wanting all routable IPv6
+ * multicast traffic
+ */
+ struct hlist_head want_all_rtr6_list;
+
+ /**
+ * @mla_flags: flags for the querier, bridge and tvlv state
+ */
+ struct batadv_mcast_mla_flags mla_flags;
+
+ /**
+ * @mla_lock: a lock protecting mla_list and mla_flags
+ */
+ spinlock_t mla_lock;
+
+ /**
+ * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP
+ * traffic
+ */
+ atomic_t num_want_all_unsnoopables;
+
+ /** @num_want_all_ipv4: counter for items in want_all_ipv4_list */
+ atomic_t num_want_all_ipv4;
+
+ /** @num_want_all_ipv6: counter for items in want_all_ipv6_list */
+ atomic_t num_want_all_ipv6;
+
+ /** @num_want_all_rtr4: counter for items in want_all_rtr4_list */
+ atomic_t num_want_all_rtr4;
+
+ /** @num_want_all_rtr6: counter for items in want_all_rtr6_list */
+ atomic_t num_want_all_rtr6;
+
+ /**
+ * @want_lists_lock: lock for protecting modifications to mcasts
+ * want_all_{unsnoopables,ipv4,ipv6}_list (traversals are rcu-locked)
+ */
+ spinlock_t want_lists_lock;
+
+ /** @work: work queue callback item for multicast TT and TVLV updates */
+ struct delayed_work work;
+};
+#endif
+
+/**
+ * struct batadv_priv_nc - per mesh interface network coding private data
+ */
+struct batadv_priv_nc {
+ /** @work: work queue callback item for cleanup */
+ struct delayed_work work;
+
+ /**
+ * @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
+ */
+ u8 min_tq;
+
+ /**
+ * @max_fwd_delay: maximum packet forward delay to allow coding of
+ * packets
+ */
+ u32 max_fwd_delay;
+
+ /**
+ * @max_buffer_time: buffer time for sniffed packets used to decoding
+ */
+ u32 max_buffer_time;
+
+ /**
+ * @timestamp_fwd_flush: timestamp of last forward packet queue flush
+ */
+ unsigned long timestamp_fwd_flush;
+
+ /**
+ * @timestamp_sniffed_purge: timestamp of last sniffed packet queue
+ * purge
+ */
+ unsigned long timestamp_sniffed_purge;
+
+ /**
+ * @coding_hash: Hash table used to buffer skbs while waiting for
+ * another incoming skb to code it with. Skbs are added to the buffer
+ * just before being forwarded in routing.c
+ */
+ struct batadv_hashtable *coding_hash;
+
+ /**
+ * @decoding_hash: Hash table used to buffer skbs that might be needed
+ * to decode a received coded skb. The buffer is used for 1) skbs
+ * arriving on the soft-interface; 2) skbs overheard on the
+ * hard-interface; and 3) skbs forwarded by batman-adv.
+ */
+ struct batadv_hashtable *decoding_hash;
+};
+
+/**
+ * struct batadv_tp_unacked - unacked packet meta-information
+ *
+ * This struct is supposed to represent a buffer unacked packet. However, since
+ * the purpose of the TP meter is to count the traffic only, there is no need to
+ * store the entire sk_buff, the starting offset and the length are enough
+ */
+struct batadv_tp_unacked {
+ /** @seqno: seqno of the unacked packet */
+ u32 seqno;
+
+ /** @len: length of the packet */
+ u16 len;
+
+ /** @list: list node for &batadv_tp_vars.unacked_list */
+ struct list_head list;
+};
+
+/**
+ * enum batadv_tp_meter_role - Modus in tp meter session
+ */
+enum batadv_tp_meter_role {
+ /** @BATADV_TP_RECEIVER: Initialized as receiver */
+ BATADV_TP_RECEIVER,
+
+ /** @BATADV_TP_SENDER: Initialized as sender */
+ BATADV_TP_SENDER
+};
+
+/**
+ * struct batadv_tp_vars - tp meter private variables per session
+ */
+struct batadv_tp_vars {
+ /** @list: list node for &bat_priv.tp_list */
+ struct hlist_node list;
+
+ /** @timer: timer for ack (receiver) and retry (sender) */
+ struct timer_list timer;
+
+ /** @bat_priv: pointer to the mesh object */
+ struct batadv_priv *bat_priv;
+
+ /** @start_time: start time in jiffies */
+ unsigned long start_time;
+
+ /** @other_end: mac address of remote */
+ u8 other_end[ETH_ALEN];
+
+ /** @role: receiver/sender modi */
+ enum batadv_tp_meter_role role;
+
+ /** @sending: sending binary semaphore: 1 if sending, 0 is not */
+ atomic_t sending;
+
+ /** @reason: reason for a stopped session */
+ enum batadv_tp_meter_reason reason;
+
+ /** @finish_work: work item for the finishing procedure */
+ struct delayed_work finish_work;
+
+ /** @test_length: test length in milliseconds */
+ u32 test_length;
+
+ /** @session: TP session identifier */
+ u8 session[2];
+
+ /** @icmp_uid: local ICMP "socket" index */
+ u8 icmp_uid;
+
+ /* sender variables */
+
+ /** @dec_cwnd: decimal part of the cwnd used during linear growth */
+ u16 dec_cwnd;
+
+ /** @cwnd: current size of the congestion window */
+ u32 cwnd;
+
+ /** @cwnd_lock: lock do protect @cwnd & @dec_cwnd */
+ spinlock_t cwnd_lock;
+
+ /**
+ * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the
+ * connection switches to the Congestion Avoidance state
+ */
+ u32 ss_threshold;
+
+ /** @last_acked: last acked byte */
+ atomic_t last_acked;
+
+ /** @last_sent: last sent byte, not yet acked */
+ u32 last_sent;
+
+ /** @tot_sent: amount of data sent/ACKed so far */
+ atomic64_t tot_sent;
+
+ /** @dup_acks: duplicate ACKs counter */
+ atomic_t dup_acks;
+
+ /** @fast_recovery: true if in Fast Recovery mode */
+ unsigned char fast_recovery:1;
+
+ /** @recover: last sent seqno when entering Fast Recovery */
+ u32 recover;
+
+ /** @rto: sender timeout */
+ u32 rto;
+
+ /** @srtt: smoothed RTT scaled by 2^3 */
+ u32 srtt;
+
+ /** @rttvar: RTT variation scaled by 2^2 */
+ u32 rttvar;
+
+ /**
+ * @more_bytes: waiting queue anchor when waiting for more ack/retry
+ * timeout
+ */
+ wait_queue_head_t more_bytes;
+
+ /** @prerandom_offset: offset inside the prerandom buffer */
+ u32 prerandom_offset;
+
+ /** @prerandom_lock: spinlock protecting access to prerandom_offset */
+ spinlock_t prerandom_lock;
+
+ /* receiver variables */
+
+ /** @last_recv: last in-order received packet */
+ u32 last_recv;
+
+ /** @unacked_list: list of unacked packets (meta-info only) */
+ struct list_head unacked_list;
+
+ /** @unacked_lock: protect unacked_list */
+ spinlock_t unacked_lock;
+
+ /** @last_recv_time: time (jiffies) a msg was received */
+ unsigned long last_recv_time;
+
+ /** @refcount: number of context where the object is used */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_softif_vlan - per VLAN attributes set
+ */
+struct batadv_softif_vlan {
+ /** @bat_priv: pointer to the mesh object */
+ struct batadv_priv *bat_priv;
+
+ /** @vid: VLAN identifier */
+ unsigned short vid;
+
+ /** @ap_isolation: AP isolation state */
+ atomic_t ap_isolation; /* boolean */
+
+ /** @tt: TT private attributes (VLAN specific) */
+ struct batadv_vlan_tt tt;
+
+ /** @list: list node for &bat_priv.softif_vlan_list */
+ struct hlist_node list;
+
+ /**
+ * @refcount: number of context where this object is currently in use
+ */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in a RCU-safe manner */
+ struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_priv_bat_v - B.A.T.M.A.N. V per soft-interface private data
+ */
+struct batadv_priv_bat_v {
+ /** @ogm_buff: buffer holding the OGM packet */
+ unsigned char *ogm_buff;
+
+ /** @ogm_buff_len: length of the OGM packet buffer */
+ int ogm_buff_len;
+
+ /** @ogm_seqno: OGM sequence number - used to identify each OGM */
+ atomic_t ogm_seqno;
+
+ /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
+ struct mutex ogm_buff_mutex;
+
+ /** @ogm_wq: workqueue used to schedule OGM transmissions */
+ struct delayed_work ogm_wq;
+};
+
+/**
+ * struct batadv_priv - per mesh interface data
+ */
+struct batadv_priv {
+ /**
+ * @mesh_state: current status of the mesh
+ * (inactive/active/deactivating)
+ */
+ atomic_t mesh_state;
+
+ /** @soft_iface: net device which holds this struct as private data */
+ struct net_device *soft_iface;
+
+ /**
+ * @mtu_set_by_user: MTU was set once by user
+ * protected by rtnl_lock
+ */
+ int mtu_set_by_user;
+
+ /**
+ * @bat_counters: mesh internal traffic statistic counters (see
+ * batadv_counters)
+ */
+ u64 __percpu *bat_counters; /* Per cpu counters */
+
+ /**
+ * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
+ */
+ atomic_t aggregated_ogms;
+
+ /** @bonding: bool indicating whether traffic bonding is enabled */
+ atomic_t bonding;
+
+ /**
+ * @fragmentation: bool indicating whether traffic fragmentation is
+ * enabled
+ */
+ atomic_t fragmentation;
+
+ /**
+ * @packet_size_max: max packet size that can be transmitted via
+ * multiple fragmented skbs or a single frame if fragmentation is
+ * disabled
+ */
+ atomic_t packet_size_max;
+
+ /**
+ * @frag_seqno: incremental counter to identify chains of egress
+ * fragments
+ */
+ atomic_t frag_seqno;
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+ /**
+ * @bridge_loop_avoidance: bool indicating whether bridge loop
+ * avoidance is enabled
+ */
+ atomic_t bridge_loop_avoidance;
+#endif
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+ /**
+ * @distributed_arp_table: bool indicating whether distributed ARP table
+ * is enabled
+ */
+ atomic_t distributed_arp_table;
+#endif
+
+#ifdef CONFIG_BATMAN_ADV_MCAST
+ /**
+ * @multicast_mode: Enable or disable multicast optimizations on this
+ * node's sender/originating side
+ */
+ atomic_t multicast_mode;
+
+ /**
+ * @multicast_fanout: Maximum number of packet copies to generate for a
+ * multicast-to-unicast conversion
+ */
+ atomic_t multicast_fanout;
+#endif
+
+ /** @orig_interval: OGM broadcast interval in milliseconds */
+ atomic_t orig_interval;
+
+ /**
+ * @hop_penalty: penalty which will be applied to an OGM's tq-field on
+ * every hop
+ */
+ atomic_t hop_penalty;
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+ /** @log_level: configured log level (see batadv_dbg_level) */
+ atomic_t log_level;
+#endif
+
+ /**
+ * @isolation_mark: the skb->mark value used to match packets for AP
+ * isolation
+ */
+ u32 isolation_mark;
+
+ /**
+ * @isolation_mark_mask: bitmask identifying the bits in skb->mark to be
+ * used for the isolation mark
+ */
+ u32 isolation_mark_mask;
+
+ /** @bcast_seqno: last sent broadcast packet sequence number */
+ atomic_t bcast_seqno;
+
+ /**
+ * @bcast_queue_left: number of remaining buffered broadcast packet
+ * slots
+ */
+ atomic_t bcast_queue_left;
+
+ /** @batman_queue_left: number of remaining OGM packet slots */
+ atomic_t batman_queue_left;
+
+ /** @forw_bat_list: list of aggregated OGMs that will be forwarded */
+ struct hlist_head forw_bat_list;
+
+ /**
+ * @forw_bcast_list: list of broadcast packets that will be
+ * rebroadcasted
+ */
+ struct hlist_head forw_bcast_list;
+
+ /** @tp_list: list of tp sessions */
+ struct hlist_head tp_list;
+
+ /** @orig_hash: hash table containing mesh participants (orig nodes) */
+ struct batadv_hashtable *orig_hash;
+
+ /** @forw_bat_list_lock: lock protecting forw_bat_list */
+ spinlock_t forw_bat_list_lock;
+
+ /** @forw_bcast_list_lock: lock protecting forw_bcast_list */
+ spinlock_t forw_bcast_list_lock;
+
+ /** @tp_list_lock: spinlock protecting @tp_list */
+ spinlock_t tp_list_lock;
+
+ /** @tp_num: number of currently active tp sessions */
+ atomic_t tp_num;
+
+ /** @orig_work: work queue callback item for orig node purging */
+ struct delayed_work orig_work;
+
+ /**
+ * @primary_if: one of the hard-interfaces assigned to this mesh
+ * interface becomes the primary interface
+ */
+ struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
+
+ /** @algo_ops: routing algorithm used by this mesh interface */
+ struct batadv_algo_ops *algo_ops;
+
+ /**
+ * @softif_vlan_list: a list of softif_vlan structs, one per VLAN
+ * created on top of the mesh interface represented by this object
+ */
+ struct hlist_head softif_vlan_list;
+
+ /** @softif_vlan_list_lock: lock protecting softif_vlan_list */
+ spinlock_t softif_vlan_list_lock;
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+ /** @bla: bridge loop avoidance data */
+ struct batadv_priv_bla bla;
+#endif
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+ /** @debug_log: holding debug logging relevant data */
+ struct batadv_priv_debug_log *debug_log;
+#endif
+
+ /** @gw: gateway data */
+ struct batadv_priv_gw gw;
+
+ /** @tt: translation table data */
+ struct batadv_priv_tt tt;
+
+ /** @tvlv: type-version-length-value data */
+ struct batadv_priv_tvlv tvlv;
+
+#ifdef CONFIG_BATMAN_ADV_DAT
+ /** @dat: distributed arp table data */
+ struct batadv_priv_dat dat;
+#endif
+
+#ifdef CONFIG_BATMAN_ADV_MCAST
+ /** @mcast: multicast data */
+ struct batadv_priv_mcast mcast;
+#endif
+
+#ifdef CONFIG_BATMAN_ADV_NC
+ /**
+ * @network_coding: bool indicating whether network coding is enabled
+ */
+ atomic_t network_coding;
+
+ /** @nc: network coding data */
+ struct batadv_priv_nc nc;
+#endif /* CONFIG_BATMAN_ADV_NC */
+
+#ifdef CONFIG_BATMAN_ADV_BATMAN_V
+ /** @bat_v: B.A.T.M.A.N. V per soft-interface private data */
+ struct batadv_priv_bat_v bat_v;
+#endif
+};
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+
+/**
+ * struct batadv_bla_backbone_gw - batman-adv gateway bridged into the LAN
+ */
+struct batadv_bla_backbone_gw {
+ /**
+ * @orig: originator address of backbone node (mac address of primary
+ * iface)
+ */
+ u8 orig[ETH_ALEN];
+
+ /** @vid: vlan id this gateway was detected on */
+ unsigned short vid;
+
+ /** @hash_entry: hlist node for &batadv_priv_bla.backbone_hash */
+ struct hlist_node hash_entry;
+
+ /** @bat_priv: pointer to soft_iface this backbone gateway belongs to */
+ struct batadv_priv *bat_priv;
+
+ /** @lasttime: last time we heard of this backbone gw */
+ unsigned long lasttime;
+
+ /**
+ * @wait_periods: grace time for bridge forward delays and bla group
+ * forming at bootup phase - no bcast traffic is formwared until it has
+ * elapsed
+ */
+ atomic_t wait_periods;
+
+ /**
+ * @request_sent: if this bool is set to true we are out of sync with
+ * this backbone gateway - no bcast traffic is formwared until the
+ * situation was resolved
+ */
+ atomic_t request_sent;
+
+ /** @crc: crc16 checksum over all claims */
+ u16 crc;
+
+ /** @crc_lock: lock protecting crc */
+ spinlock_t crc_lock;
+
+ /** @report_work: work struct for reporting detected loops */
+ struct work_struct report_work;
+
+ /** @refcount: number of contexts the object is used */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_bla_claim - claimed non-mesh client structure
+ */
+struct batadv_bla_claim {
+ /** @addr: mac address of claimed non-mesh client */
+ u8 addr[ETH_ALEN];
+
+ /** @vid: vlan id this client was detected on */
+ unsigned short vid;
+
+ /** @backbone_gw: pointer to backbone gw claiming this client */
+ struct batadv_bla_backbone_gw *backbone_gw;
+
+ /** @backbone_lock: lock protecting backbone_gw pointer */
+ spinlock_t backbone_lock;
+
+ /** @lasttime: last time we heard of claim (locals only) */
+ unsigned long lasttime;
+
+ /** @hash_entry: hlist node for &batadv_priv_bla.claim_hash */
+ struct hlist_node hash_entry;
+
+ /** @refcount: number of contexts the object is used */
+ struct rcu_head rcu;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct kref refcount;
+};
+#endif
+
+/**
+ * struct batadv_tt_common_entry - tt local & tt global common data
+ */
+struct batadv_tt_common_entry {
+ /** @addr: mac address of non-mesh client */
+ u8 addr[ETH_ALEN];
+
+ /** @vid: VLAN identifier */
+ unsigned short vid;
+
+ /**
+ * @hash_entry: hlist node for &batadv_priv_tt.local_hash or for
+ * &batadv_priv_tt.global_hash
+ */
+ struct hlist_node hash_entry;
+
+ /** @flags: various state handling flags (see batadv_tt_client_flags) */
+ u16 flags;
+
+ /** @added_at: timestamp used for purging stale tt common entries */
+ unsigned long added_at;
+
+ /** @refcount: number of contexts the object is used */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_tt_local_entry - translation table local entry data
+ */
+struct batadv_tt_local_entry {
+ /** @common: general translation table data */
+ struct batadv_tt_common_entry common;
+
+ /** @last_seen: timestamp used for purging stale tt local entries */
+ unsigned long last_seen;
+
+ /** @vlan: soft-interface vlan of the entry */
+ struct batadv_softif_vlan *vlan;
+};
+
+/**
+ * struct batadv_tt_global_entry - translation table global entry data
+ */
+struct batadv_tt_global_entry {
+ /** @common: general translation table data */
+ struct batadv_tt_common_entry common;
+
+ /** @orig_list: list of orig nodes announcing this non-mesh client */
+ struct hlist_head orig_list;
+
+ /** @orig_list_count: number of items in the orig_list */
+ atomic_t orig_list_count;
+
+ /** @list_lock: lock protecting orig_list */
+ spinlock_t list_lock;
+
+ /** @roam_at: time at which TT_GLOBAL_ROAM was set */
+ unsigned long roam_at;
+};
+
+/**
+ * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
+ */
+struct batadv_tt_orig_list_entry {
+ /** @orig_node: pointer to orig node announcing this non-mesh client */
+ struct batadv_orig_node *orig_node;
+
+ /**
+ * @ttvn: translation table version number which added the non-mesh
+ * client
+ */
+ u8 ttvn;
+
+ /** @flags: per orig entry TT sync flags */
+ u8 flags;
+
+ /** @list: list node for &batadv_tt_global_entry.orig_list */
+ struct hlist_node list;
+
+ /** @refcount: number of contexts the object is used */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_tt_change_node - structure for tt changes occurred
+ */
+struct batadv_tt_change_node {
+ /** @list: list node for &batadv_priv_tt.changes_list */
+ struct list_head list;
+
+ /** @change: holds the actual translation table diff data */
+ struct batadv_tvlv_tt_change change;
+};
+
+/**
+ * struct batadv_tt_req_node - data to keep track of the tt requests in flight
+ */
+struct batadv_tt_req_node {
+ /**
+ * @addr: mac address of the originator this request was sent to
+ */
+ u8 addr[ETH_ALEN];
+
+ /** @issued_at: timestamp used for purging stale tt requests */
+ unsigned long issued_at;
+
+ /** @refcount: number of contexts the object is used by */
+ struct kref refcount;
+
+ /** @list: list node for &batadv_priv_tt.req_list */
+ struct hlist_node list;
+};
+
+/**
+ * struct batadv_tt_roam_node - roaming client data
+ */
+struct batadv_tt_roam_node {
+ /** @addr: mac address of the client in the roaming phase */
+ u8 addr[ETH_ALEN];
+
+ /**
+ * @counter: number of allowed roaming events per client within a single
+ * OGM interval (changes are committed with each OGM)
+ */
+ atomic_t counter;
+
+ /**
+ * @first_time: timestamp used for purging stale roaming node entries
+ */
+ unsigned long first_time;
+
+ /** @list: list node for &batadv_priv_tt.roam_list */
+ struct list_head list;
+};
+
+/**
+ * struct batadv_nc_node - network coding node
+ */
+struct batadv_nc_node {
+ /** @list: next and prev pointer for the list handling */
+ struct list_head list;
+
+ /** @addr: the node's mac address */
+ u8 addr[ETH_ALEN];
+
+ /** @refcount: number of contexts the object is used by */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct rcu_head rcu;
+
+ /** @orig_node: pointer to corresponding orig node struct */
+ struct batadv_orig_node *orig_node;
+
+ /** @last_seen: timestamp of last ogm received from this node */
+ unsigned long last_seen;
+};
+
+/**
+ * struct batadv_nc_path - network coding path
+ */
+struct batadv_nc_path {
+ /** @hash_entry: next and prev pointer for the list handling */
+ struct hlist_node hash_entry;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct rcu_head rcu;
+
+ /** @refcount: number of contexts the object is used by */
+ struct kref refcount;
+
+ /** @packet_list: list of buffered packets for this path */
+ struct list_head packet_list;
+
+ /** @packet_list_lock: access lock for packet list */
+ spinlock_t packet_list_lock;
+
+ /** @next_hop: next hop (destination) of path */
+ u8 next_hop[ETH_ALEN];
+
+ /** @prev_hop: previous hop (source) of path */
+ u8 prev_hop[ETH_ALEN];
+
+ /** @last_valid: timestamp for last validation of path */
+ unsigned long last_valid;
+};
+
+/**
+ * struct batadv_nc_packet - network coding packet used when coding and
+ * decoding packets
+ */
+struct batadv_nc_packet {
+ /** @list: next and prev pointer for the list handling */
+ struct list_head list;
+
+ /** @packet_id: crc32 checksum of skb data */
+ __be32 packet_id;
+
+ /**
+ * @timestamp: field containing the info when the packet was added to
+ * path
+ */
+ unsigned long timestamp;
+
+ /** @neigh_node: pointer to original next hop neighbor of skb */
+ struct batadv_neigh_node *neigh_node;
+
+ /** @skb: skb which can be encoded or used for decoding */
+ struct sk_buff *skb;
+
+ /** @nc_path: pointer to path this nc packet is attached to */
+ struct batadv_nc_path *nc_path;
+};
+
+/**
+ * struct batadv_skb_cb - control buffer structure used to store private data
+ * relevant to batman-adv in the skb->cb buffer in skbs.
+ */
+struct batadv_skb_cb {
+ /**
+ * @decoded: Marks a skb as decoded, which is checked when searching for
+ * coding opportunities in network-coding.c
+ */
+ unsigned char decoded:1;
+
+ /** @num_bcasts: Counter for broadcast packet retransmissions */
+ unsigned char num_bcasts;
+};
+
+/**
+ * struct batadv_forw_packet - structure for bcast packets to be sent/forwarded
+ */
+struct batadv_forw_packet {
+ /**
+ * @list: list node for &batadv_priv.forw.bcast_list and
+ * &batadv_priv.forw.bat_list
+ */
+ struct hlist_node list;
+
+ /** @cleanup_list: list node for purging functions */
+ struct hlist_node cleanup_list;
+
+ /** @send_time: execution time for delayed_work (packet sending) */
+ unsigned long send_time;
+
+ /**
+ * @own: bool for locally generated packets (local OGMs are re-scheduled
+ * after sending)
+ */
+ u8 own;
+
+ /** @skb: bcast packet's skb buffer */
+ struct sk_buff *skb;
+
+ /** @packet_len: size of aggregated OGM packet inside the skb buffer */
+ u16 packet_len;
+
+ /** @direct_link_flags: direct link flags for aggregated OGM packets */
+ u32 direct_link_flags;
+
+ /** @num_packets: counter for aggregated OGMv1 packets */
+ u8 num_packets;
+
+ /** @delayed_work: work queue callback item for packet sending */
+ struct delayed_work delayed_work;
+
+ /**
+ * @if_incoming: pointer to incoming hard-iface or primary iface if
+ * locally generated packet
+ */
+ struct batadv_hard_iface *if_incoming;
+
+ /**
+ * @if_outgoing: packet where the packet should be sent to, or NULL if
+ * unspecified
+ */
+ struct batadv_hard_iface *if_outgoing;
+
+ /** @queue_left: The queue (counter) this packet was applied to */
+ atomic_t *queue_left;
+};
+
+/**
+ * struct batadv_algo_iface_ops - mesh algorithm callbacks (interface specific)
+ */
+struct batadv_algo_iface_ops {
+ /**
+ * @activate: start routing mechanisms when hard-interface is brought up
+ * (optional)
+ */
+ void (*activate)(struct batadv_hard_iface *hard_iface);
+
+ /** @enable: init routing info when hard-interface is enabled */
+ int (*enable)(struct batadv_hard_iface *hard_iface);
+
+ /** @enabled: notification when hard-interface was enabled (optional) */
+ void (*enabled)(struct batadv_hard_iface *hard_iface);
+
+ /** @disable: de-init routing info when hard-interface is disabled */
+ void (*disable)(struct batadv_hard_iface *hard_iface);
+
+ /**
+ * @update_mac: (re-)init mac addresses of the protocol information
+ * belonging to this hard-interface
+ */
+ void (*update_mac)(struct batadv_hard_iface *hard_iface);
+
+ /** @primary_set: called when primary interface is selected / changed */
+ void (*primary_set)(struct batadv_hard_iface *hard_iface);
+};
+
+/**
+ * struct batadv_algo_neigh_ops - mesh algorithm callbacks (neighbour specific)
+ */
+struct batadv_algo_neigh_ops {
+ /** @hardif_init: called on creation of single hop entry (optional) */
+ void (*hardif_init)(struct batadv_hardif_neigh_node *neigh);
+
+ /**
+ * @cmp: compare the metrics of two neighbors for their respective
+ * outgoing interfaces
+ */
+ int (*cmp)(struct batadv_neigh_node *neigh1,
+ struct batadv_hard_iface *if_outgoing1,
+ struct batadv_neigh_node *neigh2,
+ struct batadv_hard_iface *if_outgoing2);
+
+ /**
+ * @is_similar_or_better: check if neigh1 is equally similar or better
+ * than neigh2 for their respective outgoing interface from the metric
+ * prospective
+ */
+ bool (*is_similar_or_better)(struct batadv_neigh_node *neigh1,
+ struct batadv_hard_iface *if_outgoing1,
+ struct batadv_neigh_node *neigh2,
+ struct batadv_hard_iface *if_outgoing2);
+
+ /** @dump: dump neighbors to a netlink socket (optional) */
+ void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *priv,
+ struct batadv_hard_iface *hard_iface);
+};
+
+/**
+ * struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific)
+ */
+struct batadv_algo_orig_ops {
+ /** @dump: dump originators to a netlink socket (optional) */
+ void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *priv,
+ struct batadv_hard_iface *hard_iface);
+};
+
+/**
+ * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
+ */
+struct batadv_algo_gw_ops {
+ /** @init_sel_class: initialize GW selection class (optional) */
+ void (*init_sel_class)(struct batadv_priv *bat_priv);
+
+ /**
+ * @sel_class_max: maximum allowed GW selection class
+ */
+ u32 sel_class_max;
+
+ /**
+ * @get_best_gw_node: select the best GW from the list of available
+ * nodes (optional)
+ */
+ struct batadv_gw_node *(*get_best_gw_node)
+ (struct batadv_priv *bat_priv);
+
+ /**
+ * @is_eligible: check if a newly discovered GW is a potential candidate
+ * for the election as best GW (optional)
+ */
+ bool (*is_eligible)(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *curr_gw_orig,
+ struct batadv_orig_node *orig_node);
+
+ /** @dump: dump gateways to a netlink socket (optional) */
+ void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
+ struct batadv_priv *priv);
+};
+
+/**
+ * struct batadv_algo_ops - mesh algorithm callbacks
+ */
+struct batadv_algo_ops {
+ /** @list: list node for the batadv_algo_list */
+ struct hlist_node list;
+
+ /** @name: name of the algorithm */
+ char *name;
+
+ /** @iface: callbacks related to interface handling */
+ struct batadv_algo_iface_ops iface;
+
+ /** @neigh: callbacks related to neighbors handling */
+ struct batadv_algo_neigh_ops neigh;
+
+ /** @orig: callbacks related to originators handling */
+ struct batadv_algo_orig_ops orig;
+
+ /** @gw: callbacks related to GW mode */
+ struct batadv_algo_gw_ops gw;
+};
+
+/**
+ * struct batadv_dat_entry - it is a single entry of batman-adv ARP backend. It
+ * is used to stored ARP entries needed for the global DAT cache
+ */
+struct batadv_dat_entry {
+ /** @ip: the IPv4 corresponding to this DAT/ARP entry */
+ __be32 ip;
+
+ /** @mac_addr: the MAC address associated to the stored IPv4 */
+ u8 mac_addr[ETH_ALEN];
+
+ /** @vid: the vlan ID associated to this entry */
+ unsigned short vid;
+
+ /**
+ * @last_update: time in jiffies when this entry was refreshed last time
+ */
+ unsigned long last_update;
+
+ /** @hash_entry: hlist node for &batadv_priv_dat.hash */
+ struct hlist_node hash_entry;
+
+ /** @refcount: number of contexts the object is used */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_hw_addr - a list entry for a MAC address
+ */
+struct batadv_hw_addr {
+ /** @list: list node for the linking of entries */
+ struct hlist_node list;
+
+ /** @addr: the MAC address of this list entry */
+ unsigned char addr[ETH_ALEN];
+};
+
+/**
+ * struct batadv_dat_candidate - candidate destination for DAT operations
+ */
+struct batadv_dat_candidate {
+ /**
+ * @type: the type of the selected candidate. It can one of the
+ * following:
+ * - BATADV_DAT_CANDIDATE_NOT_FOUND
+ * - BATADV_DAT_CANDIDATE_ORIG
+ */
+ int type;
+
+ /**
+ * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to
+ * the corresponding originator node structure
+ */
+ struct batadv_orig_node *orig_node;
+};
+
+/**
+ * struct batadv_tvlv_container - container for tvlv appended to OGMs
+ */
+struct batadv_tvlv_container {
+ /** @list: hlist node for &batadv_priv_tvlv.container_list */
+ struct hlist_node list;
+
+ /** @tvlv_hdr: tvlv header information needed to construct the tvlv */
+ struct batadv_tvlv_hdr tvlv_hdr;
+
+ /** @refcount: number of contexts the object is used */
+ struct kref refcount;
+};
+
+/**
+ * struct batadv_tvlv_handler - handler for specific tvlv type and version
+ */
+struct batadv_tvlv_handler {
+ /** @list: hlist node for &batadv_priv_tvlv.handler_list */
+ struct hlist_node list;
+
+ /**
+ * @ogm_handler: handler callback which is given the tvlv payload to
+ * process on incoming OGM packets
+ */
+ void (*ogm_handler)(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig,
+ u8 flags, void *tvlv_value, u16 tvlv_value_len);
+
+ /**
+ * @unicast_handler: handler callback which is given the tvlv payload to
+ * process on incoming unicast tvlv packets
+ */
+ int (*unicast_handler)(struct batadv_priv *bat_priv,
+ u8 *src, u8 *dst,
+ void *tvlv_value, u16 tvlv_value_len);
+
+ /**
+ * @mcast_handler: handler callback which is given the tvlv payload to
+ * process on incoming mcast packet
+ */
+ int (*mcast_handler)(struct batadv_priv *bat_priv, struct sk_buff *skb);
+
+ /** @type: tvlv type this handler feels responsible for */
+ u8 type;
+
+ /** @version: tvlv version this handler feels responsible for */
+ u8 version;
+
+ /** @flags: tvlv handler flags */
+ u8 flags;
+
+ /** @refcount: number of contexts the object is used */
+ struct kref refcount;
+
+ /** @rcu: struct used for freeing in an RCU-safe manner */
+ struct rcu_head rcu;
+};
+
+/**
+ * enum batadv_tvlv_handler_flags - tvlv handler flags definitions
+ */
+enum batadv_tvlv_handler_flags {
+ /**
+ * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function
+ * will call this handler even if its type was not found (with no data)
+ */
+ BATADV_TVLV_HANDLER_OGM_CIFNOTFND = BIT(1),
+
+ /**
+ * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the
+ * API marks a handler as being called, so it won't be called if the
+ * BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
+ */
+ BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2),
+};
+
+#endif /* _NET_BATMAN_ADV_TYPES_H_ */