summaryrefslogtreecommitdiffstats
path: root/net/l2tp
diff options
context:
space:
mode:
Diffstat (limited to 'net/l2tp')
-rw-r--r--net/l2tp/Kconfig110
-rw-r--r--net/l2tp/Makefile18
-rw-r--r--net/l2tp/l2tp_core.c1723
-rw-r--r--net/l2tp/l2tp_core.h346
-rw-r--r--net/l2tp/l2tp_debugfs.c348
-rw-r--r--net/l2tp/l2tp_eth.c370
-rw-r--r--net/l2tp/l2tp_ip.c682
-rw-r--r--net/l2tp/l2tp_ip6.c810
-rw-r--r--net/l2tp/l2tp_netlink.c1048
-rw-r--r--net/l2tp/l2tp_ppp.c1743
-rw-r--r--net/l2tp/trace.h211
11 files changed, 7409 insertions, 0 deletions
diff --git a/net/l2tp/Kconfig b/net/l2tp/Kconfig
new file mode 100644
index 0000000000..b7856748e9
--- /dev/null
+++ b/net/l2tp/Kconfig
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Layer Two Tunneling Protocol (L2TP)
+#
+
+menuconfig L2TP
+ tristate "Layer Two Tunneling Protocol (L2TP)"
+ depends on (IPV6 || IPV6=n)
+ depends on INET
+ select NET_UDP_TUNNEL
+ help
+ Layer Two Tunneling Protocol
+
+ From RFC 2661 <http://www.ietf.org/rfc/rfc2661.txt>.
+
+ L2TP facilitates the tunneling of packets across an
+ intervening network in a way that is as transparent as
+ possible to both end-users and applications.
+
+ L2TP is often used to tunnel PPP traffic over IP
+ tunnels. One IP tunnel may carry thousands of individual PPP
+ connections. L2TP is also used as a VPN protocol, popular
+ with home workers to connect to their offices.
+
+ L2TPv3 allows other protocols as well as PPP to be carried
+ over L2TP tunnels. L2TPv3 is defined in RFC 3931
+ <http://www.ietf.org/rfc/rfc3931.txt>.
+
+ The kernel component handles only L2TP data packets: a
+ userland daemon handles L2TP the control protocol (tunnel
+ and session setup). One such daemon is OpenL2TP
+ (http://openl2tp.org/).
+
+ If you don't need L2TP, say N. To compile all L2TP code as
+ modules, choose M here.
+
+config L2TP_DEBUGFS
+ tristate "L2TP debugfs support"
+ depends on L2TP && DEBUG_FS
+ help
+ Support for l2tp directory in debugfs filesystem. This may be
+ used to dump internal state of the l2tp drivers for problem
+ analysis.
+
+ If unsure, say 'Y'.
+
+ To compile this driver as a module, choose M here. The module
+ will be called l2tp_debugfs.
+
+config L2TP_V3
+ bool "L2TPv3 support"
+ depends on L2TP
+ help
+ Layer Two Tunneling Protocol Version 3
+
+ From RFC 3931 <http://www.ietf.org/rfc/rfc3931.txt>.
+
+ The Layer Two Tunneling Protocol (L2TP) provides a dynamic
+ mechanism for tunneling Layer 2 (L2) "circuits" across a
+ packet-oriented data network (e.g., over IP). L2TP, as
+ originally defined in RFC 2661, is a standard method for
+ tunneling Point-to-Point Protocol (PPP) [RFC1661] sessions.
+ L2TP has since been adopted for tunneling a number of other
+ L2 protocols, including ATM, Frame Relay, HDLC and even raw
+ ethernet frames.
+
+ If you are connecting to L2TPv3 equipment, or you want to
+ tunnel raw ethernet frames using L2TP, say Y here. If
+ unsure, say N.
+
+config L2TP_IP
+ tristate "L2TP IP encapsulation for L2TPv3"
+ depends on L2TP_V3
+ help
+ Support for L2TP-over-IP socket family.
+
+ The L2TPv3 protocol defines two possible encapsulations for
+ L2TP frames, namely UDP and plain IP (without UDP). This
+ driver provides a new L2TPIP socket family with which
+ userspace L2TPv3 daemons may create L2TP/IP tunnel sockets
+ when UDP encapsulation is not required. When L2TP is carried
+ in IP packets, it used IP protocol number 115, so this port
+ must be enabled in firewalls.
+
+ To compile this driver as a module, choose M here. The module
+ will be called l2tp_ip.
+
+config L2TP_ETH
+ tristate "L2TP ethernet pseudowire support for L2TPv3"
+ depends on L2TP_V3
+ help
+ Support for carrying raw ethernet frames over L2TPv3.
+
+ From RFC 4719 <http://www.ietf.org/rfc/rfc4719.txt>.
+
+ The Layer 2 Tunneling Protocol, Version 3 (L2TPv3) can be
+ used as a control protocol and for data encapsulation to set
+ up Pseudowires for transporting layer 2 Packet Data Units
+ across an IP network [RFC3931].
+
+ This driver provides an ethernet virtual interface for each
+ L2TP ethernet pseudowire instance. Standard Linux tools may
+ be used to assign an IP address to the local virtual
+ interface, or add the interface to a bridge.
+
+ If you are using L2TPv3, you will almost certainly want to
+ enable this option.
+
+ To compile this driver as a module, choose M here. The module
+ will be called l2tp_eth.
diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile
new file mode 100644
index 0000000000..cf8f27071d
--- /dev/null
+++ b/net/l2tp/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the L2TP.
+#
+
+obj-$(CONFIG_L2TP) += l2tp_core.o
+
+CFLAGS_l2tp_core.o += -I$(src)
+
+# Build l2tp as modules if L2TP is M
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_PPPOL2TP)) += l2tp_ppp.o
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o
+ifneq ($(CONFIG_IPV6),)
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip6.o
+endif
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
new file mode 100644
index 0000000000..8d21ff25f1
--- /dev/null
+++ b/net/l2tp/l2tp_core.c
@@ -0,0 +1,1723 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* L2TP core.
+ *
+ * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
+ *
+ * This file contains some code of the original L2TPv2 pppol2tp
+ * driver, which has the following copyright:
+ *
+ * Authors: Martijn van Oosterhout <kleptog@svana.org>
+ * James Chapman (jchapman@katalix.com)
+ * Contributors:
+ * Michal Ostrowski <mostrows@speakeasy.net>
+ * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
+ * David S. Miller (davem@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/uaccess.h>
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/l2tp.h>
+#include <linux/hash.h>
+#include <linux/sort.h>
+#include <linux/file.h>
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/dst.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/inet_common.h>
+#include <net/xfrm.h>
+#include <net/protocol.h>
+#include <net/inet6_connection_sock.h>
+#include <net/inet_ecn.h>
+#include <net/ip6_route.h>
+#include <net/ip6_checksum.h>
+
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
+
+#include "l2tp_core.h"
+#include "trace.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#define L2TP_DRV_VERSION "V2.0"
+
+/* L2TP header constants */
+#define L2TP_HDRFLAG_T 0x8000
+#define L2TP_HDRFLAG_L 0x4000
+#define L2TP_HDRFLAG_S 0x0800
+#define L2TP_HDRFLAG_O 0x0200
+#define L2TP_HDRFLAG_P 0x0100
+
+#define L2TP_HDR_VER_MASK 0x000F
+#define L2TP_HDR_VER_2 0x0002
+#define L2TP_HDR_VER_3 0x0003
+
+/* L2TPv3 default L2-specific sublayer */
+#define L2TP_SLFLAG_S 0x40000000
+#define L2TP_SL_SEQ_MASK 0x00ffffff
+
+#define L2TP_HDR_SIZE_MAX 14
+
+/* Default trace flags */
+#define L2TP_DEFAULT_DEBUG_FLAGS 0
+
+/* Private data stored for received packets in the skb.
+ */
+struct l2tp_skb_cb {
+ u32 ns;
+ u16 has_seq;
+ u16 length;
+ unsigned long expires;
+};
+
+#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
+
+static struct workqueue_struct *l2tp_wq;
+
+/* per-net private data for this module */
+static unsigned int l2tp_net_id;
+struct l2tp_net {
+ /* Lock for write access to l2tp_tunnel_idr */
+ spinlock_t l2tp_tunnel_idr_lock;
+ struct idr l2tp_tunnel_idr;
+ struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
+ /* Lock for write access to l2tp_session_hlist */
+ spinlock_t l2tp_session_hlist_lock;
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+static bool l2tp_sk_is_v6(struct sock *sk)
+{
+ return sk->sk_family == PF_INET6 &&
+ !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
+}
+#endif
+
+static inline struct l2tp_net *l2tp_pernet(const struct net *net)
+{
+ return net_generic(net, l2tp_net_id);
+}
+
+/* Session hash global list for L2TPv3.
+ * The session_id SHOULD be random according to RFC3931, but several
+ * L2TP implementations use incrementing session_ids. So we do a real
+ * hash on the session_id, rather than a simple bitmask.
+ */
+static inline struct hlist_head *
+l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
+{
+ return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
+}
+
+/* Session hash list.
+ * The session_id SHOULD be random according to RFC2661, but several
+ * L2TP implementations (Cisco and Microsoft) use incrementing
+ * session_ids. So we do a real hash on the session_id, rather than a
+ * simple bitmask.
+ */
+static inline struct hlist_head *
+l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
+{
+ return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
+}
+
+static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
+{
+ trace_free_tunnel(tunnel);
+ sock_put(tunnel->sock);
+ /* the tunnel is freed in the socket destructor */
+}
+
+static void l2tp_session_free(struct l2tp_session *session)
+{
+ trace_free_session(session);
+ if (session->tunnel)
+ l2tp_tunnel_dec_refcount(session->tunnel);
+ kfree(session);
+}
+
+struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk)
+{
+ struct l2tp_tunnel *tunnel = sk->sk_user_data;
+
+ if (tunnel)
+ if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
+ return NULL;
+
+ return tunnel;
+}
+EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
+
+void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
+{
+ refcount_inc(&tunnel->ref_count);
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_inc_refcount);
+
+void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
+{
+ if (refcount_dec_and_test(&tunnel->ref_count))
+ l2tp_tunnel_free(tunnel);
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_dec_refcount);
+
+void l2tp_session_inc_refcount(struct l2tp_session *session)
+{
+ refcount_inc(&session->ref_count);
+}
+EXPORT_SYMBOL_GPL(l2tp_session_inc_refcount);
+
+void l2tp_session_dec_refcount(struct l2tp_session *session)
+{
+ if (refcount_dec_and_test(&session->ref_count))
+ l2tp_session_free(session);
+}
+EXPORT_SYMBOL_GPL(l2tp_session_dec_refcount);
+
+/* Lookup a tunnel. A new reference is held on the returned tunnel. */
+struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
+{
+ const struct l2tp_net *pn = l2tp_pernet(net);
+ struct l2tp_tunnel *tunnel;
+
+ rcu_read_lock_bh();
+ tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
+ if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
+ rcu_read_unlock_bh();
+ return tunnel;
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
+
+struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ unsigned long tunnel_id, tmp;
+ struct l2tp_tunnel *tunnel;
+ int count = 0;
+
+ rcu_read_lock_bh();
+ idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
+ if (tunnel && ++count > nth &&
+ refcount_inc_not_zero(&tunnel->ref_count)) {
+ rcu_read_unlock_bh();
+ return tunnel;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
+
+struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
+ u32 session_id)
+{
+ struct hlist_head *session_list;
+ struct l2tp_session *session;
+
+ session_list = l2tp_session_id_hash(tunnel, session_id);
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu(session, session_list, hlist)
+ if (session->session_id == session_id) {
+ l2tp_session_inc_refcount(session);
+ rcu_read_unlock_bh();
+
+ return session;
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session);
+
+struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id)
+{
+ struct hlist_head *session_list;
+ struct l2tp_session *session;
+
+ session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id);
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu(session, session_list, global_hlist)
+ if (session->session_id == session_id) {
+ l2tp_session_inc_refcount(session);
+ rcu_read_unlock_bh();
+
+ return session;
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_get);
+
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
+{
+ int hash;
+ struct l2tp_session *session;
+ int count = 0;
+
+ rcu_read_lock_bh();
+ for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
+ hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
+ if (++count > nth) {
+ l2tp_session_inc_refcount(session);
+ rcu_read_unlock_bh();
+ return session;
+ }
+ }
+ }
+
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
+
+/* Lookup a session by interface name.
+ * This is very inefficient but is only used by management interfaces.
+ */
+struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
+ const char *ifname)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ int hash;
+ struct l2tp_session *session;
+
+ rcu_read_lock_bh();
+ for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
+ hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
+ if (!strcmp(session->ifname, ifname)) {
+ l2tp_session_inc_refcount(session);
+ rcu_read_unlock_bh();
+
+ return session;
+ }
+ }
+ }
+
+ rcu_read_unlock_bh();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
+
+int l2tp_session_register(struct l2tp_session *session,
+ struct l2tp_tunnel *tunnel)
+{
+ struct l2tp_session *session_walk;
+ struct hlist_head *g_head;
+ struct hlist_head *head;
+ struct l2tp_net *pn;
+ int err;
+
+ head = l2tp_session_id_hash(tunnel, session->session_id);
+
+ spin_lock_bh(&tunnel->hlist_lock);
+ if (!tunnel->acpt_newsess) {
+ err = -ENODEV;
+ goto err_tlock;
+ }
+
+ hlist_for_each_entry(session_walk, head, hlist)
+ if (session_walk->session_id == session->session_id) {
+ err = -EEXIST;
+ goto err_tlock;
+ }
+
+ if (tunnel->version == L2TP_HDR_VER_3) {
+ pn = l2tp_pernet(tunnel->l2tp_net);
+ g_head = l2tp_session_id_hash_2(pn, session->session_id);
+
+ spin_lock_bh(&pn->l2tp_session_hlist_lock);
+
+ /* IP encap expects session IDs to be globally unique, while
+ * UDP encap doesn't.
+ */
+ hlist_for_each_entry(session_walk, g_head, global_hlist)
+ if (session_walk->session_id == session->session_id &&
+ (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP ||
+ tunnel->encap == L2TP_ENCAPTYPE_IP)) {
+ err = -EEXIST;
+ goto err_tlock_pnlock;
+ }
+
+ l2tp_tunnel_inc_refcount(tunnel);
+ hlist_add_head_rcu(&session->global_hlist, g_head);
+
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ } else {
+ l2tp_tunnel_inc_refcount(tunnel);
+ }
+
+ hlist_add_head_rcu(&session->hlist, head);
+ spin_unlock_bh(&tunnel->hlist_lock);
+
+ trace_register_session(session);
+
+ return 0;
+
+err_tlock_pnlock:
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+err_tlock:
+ spin_unlock_bh(&tunnel->hlist_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_register);
+
+/*****************************************************************************
+ * Receive data handling
+ *****************************************************************************/
+
+/* Queue a skb in order. We come here only if the skb has an L2TP sequence
+ * number.
+ */
+static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
+{
+ struct sk_buff *skbp;
+ struct sk_buff *tmp;
+ u32 ns = L2TP_SKB_CB(skb)->ns;
+
+ spin_lock_bh(&session->reorder_q.lock);
+ skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
+ if (L2TP_SKB_CB(skbp)->ns > ns) {
+ __skb_queue_before(&session->reorder_q, skbp, skb);
+ atomic_long_inc(&session->stats.rx_oos_packets);
+ goto out;
+ }
+ }
+
+ __skb_queue_tail(&session->reorder_q, skb);
+
+out:
+ spin_unlock_bh(&session->reorder_q.lock);
+}
+
+/* Dequeue a single skb.
+ */
+static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ int length = L2TP_SKB_CB(skb)->length;
+
+ /* We're about to requeue the skb, so return resources
+ * to its current owner (a socket receive buffer).
+ */
+ skb_orphan(skb);
+
+ atomic_long_inc(&tunnel->stats.rx_packets);
+ atomic_long_add(length, &tunnel->stats.rx_bytes);
+ atomic_long_inc(&session->stats.rx_packets);
+ atomic_long_add(length, &session->stats.rx_bytes);
+
+ if (L2TP_SKB_CB(skb)->has_seq) {
+ /* Bump our Nr */
+ session->nr++;
+ session->nr &= session->nr_max;
+ trace_session_seqnum_update(session);
+ }
+
+ /* call private receive handler */
+ if (session->recv_skb)
+ (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
+ else
+ kfree_skb(skb);
+}
+
+/* Dequeue skbs from the session's reorder_q, subject to packet order.
+ * Skbs that have been in the queue for too long are simply discarded.
+ */
+static void l2tp_recv_dequeue(struct l2tp_session *session)
+{
+ struct sk_buff *skb;
+ struct sk_buff *tmp;
+
+ /* If the pkt at the head of the queue has the nr that we
+ * expect to send up next, dequeue it and any other
+ * in-sequence packets behind it.
+ */
+start:
+ spin_lock_bh(&session->reorder_q.lock);
+ skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
+ struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
+
+ /* If the packet has been pending on the queue for too long, discard it */
+ if (time_after(jiffies, cb->expires)) {
+ atomic_long_inc(&session->stats.rx_seq_discards);
+ atomic_long_inc(&session->stats.rx_errors);
+ trace_session_pkt_expired(session, cb->ns);
+ session->reorder_skip = 1;
+ __skb_unlink(skb, &session->reorder_q);
+ kfree_skb(skb);
+ continue;
+ }
+
+ if (cb->has_seq) {
+ if (session->reorder_skip) {
+ session->reorder_skip = 0;
+ session->nr = cb->ns;
+ trace_session_seqnum_reset(session);
+ }
+ if (cb->ns != session->nr)
+ goto out;
+ }
+ __skb_unlink(skb, &session->reorder_q);
+
+ /* Process the skb. We release the queue lock while we
+ * do so to let other contexts process the queue.
+ */
+ spin_unlock_bh(&session->reorder_q.lock);
+ l2tp_recv_dequeue_skb(session, skb);
+ goto start;
+ }
+
+out:
+ spin_unlock_bh(&session->reorder_q.lock);
+}
+
+static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
+{
+ u32 nws;
+
+ if (nr >= session->nr)
+ nws = nr - session->nr;
+ else
+ nws = (session->nr_max + 1) - (session->nr - nr);
+
+ return nws < session->nr_window_size;
+}
+
+/* If packet has sequence numbers, queue it if acceptable. Returns 0 if
+ * acceptable, else non-zero.
+ */
+static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
+{
+ struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
+
+ if (!l2tp_seq_check_rx_window(session, cb->ns)) {
+ /* Packet sequence number is outside allowed window.
+ * Discard it.
+ */
+ trace_session_pkt_outside_rx_window(session, cb->ns);
+ goto discard;
+ }
+
+ if (session->reorder_timeout != 0) {
+ /* Packet reordering enabled. Add skb to session's
+ * reorder queue, in order of ns.
+ */
+ l2tp_recv_queue_skb(session, skb);
+ goto out;
+ }
+
+ /* Packet reordering disabled. Discard out-of-sequence packets, while
+ * tracking the number if in-sequence packets after the first OOS packet
+ * is seen. After nr_oos_count_max in-sequence packets, reset the
+ * sequence number to re-enable packet reception.
+ */
+ if (cb->ns == session->nr) {
+ skb_queue_tail(&session->reorder_q, skb);
+ } else {
+ u32 nr_oos = cb->ns;
+ u32 nr_next = (session->nr_oos + 1) & session->nr_max;
+
+ if (nr_oos == nr_next)
+ session->nr_oos_count++;
+ else
+ session->nr_oos_count = 0;
+
+ session->nr_oos = nr_oos;
+ if (session->nr_oos_count > session->nr_oos_count_max) {
+ session->reorder_skip = 1;
+ }
+ if (!session->reorder_skip) {
+ atomic_long_inc(&session->stats.rx_seq_discards);
+ trace_session_pkt_oos(session, cb->ns);
+ goto discard;
+ }
+ skb_queue_tail(&session->reorder_q, skb);
+ }
+
+out:
+ return 0;
+
+discard:
+ return 1;
+}
+
+/* Do receive processing of L2TP data frames. We handle both L2TPv2
+ * and L2TPv3 data frames here.
+ *
+ * L2TPv2 Data Message Header
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Tunnel ID | Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Ns (opt) | Nr (opt) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Offset Size (opt) | Offset pad... (opt)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Data frames are marked by T=0. All other fields are the same as
+ * those in L2TP control frames.
+ *
+ * L2TPv3 Data Message Header
+ *
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | L2TP Session Header |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | L2-Specific Sublayer |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Tunnel Payload ...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * L2TPv3 Session Header Over IP
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Cookie (optional, maximum 64 bits)...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * L2TPv3 L2-Specific Sublayer Format
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |x|S|x|x|x|x|x|x| Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Cookie value and sublayer format are negotiated with the peer when
+ * the session is set up. Unlike L2TPv2, we do not need to parse the
+ * packet header to determine if optional fields are present.
+ *
+ * Caller must already have parsed the frame and determined that it is
+ * a data (not control) frame before coming here. Fields up to the
+ * session-id have already been parsed and ptr points to the data
+ * after the session-id.
+ */
+void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
+ unsigned char *ptr, unsigned char *optr, u16 hdrflags,
+ int length)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ int offset;
+
+ /* Parse and check optional cookie */
+ if (session->peer_cookie_len > 0) {
+ if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
+ pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
+ tunnel->name, tunnel->tunnel_id,
+ session->session_id);
+ atomic_long_inc(&session->stats.rx_cookie_discards);
+ goto discard;
+ }
+ ptr += session->peer_cookie_len;
+ }
+
+ /* Handle the optional sequence numbers. Sequence numbers are
+ * in different places for L2TPv2 and L2TPv3.
+ *
+ * If we are the LAC, enable/disable sequence numbers under
+ * the control of the LNS. If no sequence numbers present but
+ * we were expecting them, discard frame.
+ */
+ L2TP_SKB_CB(skb)->has_seq = 0;
+ if (tunnel->version == L2TP_HDR_VER_2) {
+ if (hdrflags & L2TP_HDRFLAG_S) {
+ /* Store L2TP info in the skb */
+ L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
+ L2TP_SKB_CB(skb)->has_seq = 1;
+ ptr += 2;
+ /* Skip past nr in the header */
+ ptr += 2;
+
+ }
+ } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
+ u32 l2h = ntohl(*(__be32 *)ptr);
+
+ if (l2h & 0x40000000) {
+ /* Store L2TP info in the skb */
+ L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
+ L2TP_SKB_CB(skb)->has_seq = 1;
+ }
+ ptr += 4;
+ }
+
+ if (L2TP_SKB_CB(skb)->has_seq) {
+ /* Received a packet with sequence numbers. If we're the LAC,
+ * check if we sre sending sequence numbers and if not,
+ * configure it so.
+ */
+ if (!session->lns_mode && !session->send_seq) {
+ trace_session_seqnum_lns_enable(session);
+ session->send_seq = 1;
+ l2tp_session_set_header_len(session, tunnel->version);
+ }
+ } else {
+ /* No sequence numbers.
+ * If user has configured mandatory sequence numbers, discard.
+ */
+ if (session->recv_seq) {
+ pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
+ session->name);
+ atomic_long_inc(&session->stats.rx_seq_discards);
+ goto discard;
+ }
+
+ /* If we're the LAC and we're sending sequence numbers, the
+ * LNS has requested that we no longer send sequence numbers.
+ * If we're the LNS and we're sending sequence numbers, the
+ * LAC is broken. Discard the frame.
+ */
+ if (!session->lns_mode && session->send_seq) {
+ trace_session_seqnum_lns_disable(session);
+ session->send_seq = 0;
+ l2tp_session_set_header_len(session, tunnel->version);
+ } else if (session->send_seq) {
+ pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
+ session->name);
+ atomic_long_inc(&session->stats.rx_seq_discards);
+ goto discard;
+ }
+ }
+
+ /* Session data offset is defined only for L2TPv2 and is
+ * indicated by an optional 16-bit value in the header.
+ */
+ if (tunnel->version == L2TP_HDR_VER_2) {
+ /* If offset bit set, skip it. */
+ if (hdrflags & L2TP_HDRFLAG_O) {
+ offset = ntohs(*(__be16 *)ptr);
+ ptr += 2 + offset;
+ }
+ }
+
+ offset = ptr - optr;
+ if (!pskb_may_pull(skb, offset))
+ goto discard;
+
+ __skb_pull(skb, offset);
+
+ /* Prepare skb for adding to the session's reorder_q. Hold
+ * packets for max reorder_timeout or 1 second if not
+ * reordering.
+ */
+ L2TP_SKB_CB(skb)->length = length;
+ L2TP_SKB_CB(skb)->expires = jiffies +
+ (session->reorder_timeout ? session->reorder_timeout : HZ);
+
+ /* Add packet to the session's receive queue. Reordering is done here, if
+ * enabled. Saved L2TP protocol info is stored in skb->sb[].
+ */
+ if (L2TP_SKB_CB(skb)->has_seq) {
+ if (l2tp_recv_data_seq(session, skb))
+ goto discard;
+ } else {
+ /* No sequence numbers. Add the skb to the tail of the
+ * reorder queue. This ensures that it will be
+ * delivered after all previous sequenced skbs.
+ */
+ skb_queue_tail(&session->reorder_q, skb);
+ }
+
+ /* Try to dequeue as many skbs from reorder_q as we can. */
+ l2tp_recv_dequeue(session);
+
+ return;
+
+discard:
+ atomic_long_inc(&session->stats.rx_errors);
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(l2tp_recv_common);
+
+/* Drop skbs from the session's reorder_q
+ */
+static void l2tp_session_queue_purge(struct l2tp_session *session)
+{
+ struct sk_buff *skb = NULL;
+
+ while ((skb = skb_dequeue(&session->reorder_q))) {
+ atomic_long_inc(&session->stats.rx_errors);
+ kfree_skb(skb);
+ }
+}
+
+/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
+ * here. The skb is not on a list when we get here.
+ * Returns 0 if the packet was a data packet and was successfully passed on.
+ * Returns 1 if the packet was not a good data packet and could not be
+ * forwarded. All such packets are passed up to userspace to deal with.
+ */
+static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
+{
+ struct l2tp_session *session = NULL;
+ unsigned char *ptr, *optr;
+ u16 hdrflags;
+ u32 tunnel_id, session_id;
+ u16 version;
+ int length;
+
+ /* UDP has verified checksum */
+
+ /* UDP always verifies the packet length. */
+ __skb_pull(skb, sizeof(struct udphdr));
+
+ /* Short packet? */
+ if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
+ pr_debug_ratelimited("%s: recv short packet (len=%d)\n",
+ tunnel->name, skb->len);
+ goto invalid;
+ }
+
+ /* Point to L2TP header */
+ optr = skb->data;
+ ptr = skb->data;
+
+ /* Get L2TP header flags */
+ hdrflags = ntohs(*(__be16 *)ptr);
+
+ /* Check protocol version */
+ version = hdrflags & L2TP_HDR_VER_MASK;
+ if (version != tunnel->version) {
+ pr_debug_ratelimited("%s: recv protocol version mismatch: got %d expected %d\n",
+ tunnel->name, version, tunnel->version);
+ goto invalid;
+ }
+
+ /* Get length of L2TP packet */
+ length = skb->len;
+
+ /* If type is control packet, it is handled by userspace. */
+ if (hdrflags & L2TP_HDRFLAG_T)
+ goto pass;
+
+ /* Skip flags */
+ ptr += 2;
+
+ if (tunnel->version == L2TP_HDR_VER_2) {
+ /* If length is present, skip it */
+ if (hdrflags & L2TP_HDRFLAG_L)
+ ptr += 2;
+
+ /* Extract tunnel and session ID */
+ tunnel_id = ntohs(*(__be16 *)ptr);
+ ptr += 2;
+ session_id = ntohs(*(__be16 *)ptr);
+ ptr += 2;
+ } else {
+ ptr += 2; /* skip reserved bits */
+ tunnel_id = tunnel->tunnel_id;
+ session_id = ntohl(*(__be32 *)ptr);
+ ptr += 4;
+ }
+
+ /* Find the session context */
+ session = l2tp_tunnel_get_session(tunnel, session_id);
+ if (!session || !session->recv_skb) {
+ if (session)
+ l2tp_session_dec_refcount(session);
+
+ /* Not found? Pass to userspace to deal with */
+ pr_debug_ratelimited("%s: no session found (%u/%u). Passing up.\n",
+ tunnel->name, tunnel_id, session_id);
+ goto pass;
+ }
+
+ if (tunnel->version == L2TP_HDR_VER_3 &&
+ l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
+ l2tp_session_dec_refcount(session);
+ goto invalid;
+ }
+
+ l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
+ l2tp_session_dec_refcount(session);
+
+ return 0;
+
+invalid:
+ atomic_long_inc(&tunnel->stats.rx_invalid);
+
+pass:
+ /* Put UDP header back */
+ __skb_push(skb, sizeof(struct udphdr));
+
+ return 1;
+}
+
+/* UDP encapsulation receive handler. See net/ipv4/udp.c.
+ * Return codes:
+ * 0 : success.
+ * <0: error
+ * >0: skb should be passed up to userspace as UDP.
+ */
+int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct l2tp_tunnel *tunnel;
+
+ /* Note that this is called from the encap_rcv hook inside an
+ * RCU-protected region, but without the socket being locked.
+ * Hence we use rcu_dereference_sk_user_data to access the
+ * tunnel data structure rather the usual l2tp_sk_to_tunnel
+ * accessor function.
+ */
+ tunnel = rcu_dereference_sk_user_data(sk);
+ if (!tunnel)
+ goto pass_up;
+ if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
+ goto pass_up;
+
+ if (l2tp_udp_recv_core(tunnel, skb))
+ goto pass_up;
+
+ return 0;
+
+pass_up:
+ return 1;
+}
+EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
+
+/************************************************************************
+ * Transmit handling
+ ***********************************************************************/
+
+/* Build an L2TP header for the session into the buffer provided.
+ */
+static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ __be16 *bufp = buf;
+ __be16 *optr = buf;
+ u16 flags = L2TP_HDR_VER_2;
+ u32 tunnel_id = tunnel->peer_tunnel_id;
+ u32 session_id = session->peer_session_id;
+
+ if (session->send_seq)
+ flags |= L2TP_HDRFLAG_S;
+
+ /* Setup L2TP header. */
+ *bufp++ = htons(flags);
+ *bufp++ = htons(tunnel_id);
+ *bufp++ = htons(session_id);
+ if (session->send_seq) {
+ *bufp++ = htons(session->ns);
+ *bufp++ = 0;
+ session->ns++;
+ session->ns &= 0xffff;
+ trace_session_seqnum_update(session);
+ }
+
+ return bufp - optr;
+}
+
+static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ char *bufp = buf;
+ char *optr = bufp;
+
+ /* Setup L2TP header. The header differs slightly for UDP and
+ * IP encapsulations. For UDP, there is 4 bytes of flags.
+ */
+ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+ u16 flags = L2TP_HDR_VER_3;
+ *((__be16 *)bufp) = htons(flags);
+ bufp += 2;
+ *((__be16 *)bufp) = 0;
+ bufp += 2;
+ }
+
+ *((__be32 *)bufp) = htonl(session->peer_session_id);
+ bufp += 4;
+ if (session->cookie_len) {
+ memcpy(bufp, &session->cookie[0], session->cookie_len);
+ bufp += session->cookie_len;
+ }
+ if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
+ u32 l2h = 0;
+
+ if (session->send_seq) {
+ l2h = 0x40000000 | session->ns;
+ session->ns++;
+ session->ns &= 0xffffff;
+ trace_session_seqnum_update(session);
+ }
+
+ *((__be32 *)bufp) = htonl(l2h);
+ bufp += 4;
+ }
+
+ return bufp - optr;
+}
+
+/* Queue the packet to IP for output: tunnel socket lock must be held */
+static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
+{
+ int err;
+
+ skb->ignore_df = 1;
+ skb_dst_drop(skb);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (l2tp_sk_is_v6(tunnel->sock))
+ err = inet6_csk_xmit(tunnel->sock, skb, NULL);
+ else
+#endif
+ err = ip_queue_xmit(tunnel->sock, skb, fl);
+
+ return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
+}
+
+static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ unsigned int data_len = skb->len;
+ struct sock *sk = tunnel->sock;
+ int headroom, uhlen, udp_len;
+ int ret = NET_XMIT_SUCCESS;
+ struct inet_sock *inet;
+ struct udphdr *uh;
+
+ /* Check that there's enough headroom in the skb to insert IP,
+ * UDP and L2TP headers. If not enough, expand it to
+ * make room. Adjust truesize.
+ */
+ uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
+ headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
+ if (skb_cow_head(skb, headroom)) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ /* Setup L2TP header */
+ if (tunnel->version == L2TP_HDR_VER_2)
+ l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
+ else
+ l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
+
+ /* Reset skb netfilter state */
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
+ nf_reset_ct(skb);
+
+ bh_lock_sock_nested(sk);
+ if (sock_owned_by_user(sk)) {
+ kfree_skb(skb);
+ ret = NET_XMIT_DROP;
+ goto out_unlock;
+ }
+
+ /* The user-space may change the connection status for the user-space
+ * provided socket at run time: we must check it under the socket lock
+ */
+ if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
+ kfree_skb(skb);
+ ret = NET_XMIT_DROP;
+ goto out_unlock;
+ }
+
+ /* Report transmitted length before we add encap header, which keeps
+ * statistics consistent for both UDP and IP encap tx/rx paths.
+ */
+ *len = skb->len;
+
+ inet = inet_sk(sk);
+ switch (tunnel->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ /* Setup UDP header */
+ __skb_push(skb, sizeof(*uh));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+ uh->source = inet->inet_sport;
+ uh->dest = inet->inet_dport;
+ udp_len = uhlen + session->hdr_len + data_len;
+ uh->len = htons(udp_len);
+
+ /* Calculate UDP checksum if configured to do so */
+#if IS_ENABLED(CONFIG_IPV6)
+ if (l2tp_sk_is_v6(sk))
+ udp6_set_csum(udp_get_no_check6_tx(sk),
+ skb, &inet6_sk(sk)->saddr,
+ &sk->sk_v6_daddr, udp_len);
+ else
+#endif
+ udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
+ inet->inet_daddr, udp_len);
+ break;
+
+ case L2TP_ENCAPTYPE_IP:
+ break;
+ }
+
+ ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
+
+out_unlock:
+ bh_unlock_sock(sk);
+
+ return ret;
+}
+
+/* If caller requires the skb to have a ppp header, the header must be
+ * inserted in the skb data before calling this function.
+ */
+int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
+{
+ unsigned int len = 0;
+ int ret;
+
+ ret = l2tp_xmit_core(session, skb, &len);
+ if (ret == NET_XMIT_SUCCESS) {
+ atomic_long_inc(&session->tunnel->stats.tx_packets);
+ atomic_long_add(len, &session->tunnel->stats.tx_bytes);
+ atomic_long_inc(&session->stats.tx_packets);
+ atomic_long_add(len, &session->stats.tx_bytes);
+ } else {
+ atomic_long_inc(&session->tunnel->stats.tx_errors);
+ atomic_long_inc(&session->stats.tx_errors);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
+
+/*****************************************************************************
+ * Tinnel and session create/destroy.
+ *****************************************************************************/
+
+/* Tunnel socket destruct hook.
+ * The tunnel context is deleted only when all session sockets have been
+ * closed.
+ */
+static void l2tp_tunnel_destruct(struct sock *sk)
+{
+ struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
+
+ if (!tunnel)
+ goto end;
+
+ /* Disable udp encapsulation */
+ switch (tunnel->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ /* No longer an encapsulation socket. See net/ipv4/udp.c */
+ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
+ udp_sk(sk)->encap_rcv = NULL;
+ udp_sk(sk)->encap_destroy = NULL;
+ break;
+ case L2TP_ENCAPTYPE_IP:
+ break;
+ }
+
+ /* Remove hooks into tunnel socket */
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_destruct = tunnel->old_sk_destruct;
+ sk->sk_user_data = NULL;
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ /* Call the original destructor */
+ if (sk->sk_destruct)
+ (*sk->sk_destruct)(sk);
+
+ kfree_rcu(tunnel, rcu);
+end:
+ return;
+}
+
+/* Remove an l2tp session from l2tp_core's hash lists. */
+static void l2tp_session_unhash(struct l2tp_session *session)
+{
+ struct l2tp_tunnel *tunnel = session->tunnel;
+
+ /* Remove the session from core hashes */
+ if (tunnel) {
+ /* Remove from the per-tunnel hash */
+ spin_lock_bh(&tunnel->hlist_lock);
+ hlist_del_init_rcu(&session->hlist);
+ spin_unlock_bh(&tunnel->hlist_lock);
+
+ /* For L2TPv3 we have a per-net hash: remove from there, too */
+ if (tunnel->version != L2TP_HDR_VER_2) {
+ struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+
+ spin_lock_bh(&pn->l2tp_session_hlist_lock);
+ hlist_del_init_rcu(&session->global_hlist);
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ }
+
+ synchronize_rcu();
+ }
+}
+
+/* When the tunnel is closed, all the attached sessions need to go too.
+ */
+static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
+{
+ struct l2tp_session *session;
+ int hash;
+
+ spin_lock_bh(&tunnel->hlist_lock);
+ tunnel->acpt_newsess = false;
+ for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
+again:
+ hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
+ hlist_del_init_rcu(&session->hlist);
+
+ spin_unlock_bh(&tunnel->hlist_lock);
+ l2tp_session_delete(session);
+ spin_lock_bh(&tunnel->hlist_lock);
+
+ /* Now restart from the beginning of this hash
+ * chain. We always remove a session from the
+ * list so we are guaranteed to make forward
+ * progress.
+ */
+ goto again;
+ }
+ }
+ spin_unlock_bh(&tunnel->hlist_lock);
+}
+
+/* Tunnel socket destroy hook for UDP encapsulation */
+static void l2tp_udp_encap_destroy(struct sock *sk)
+{
+ struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
+
+ if (tunnel)
+ l2tp_tunnel_delete(tunnel);
+}
+
+static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+
+ spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
+ idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
+ spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
+}
+
+/* Workqueue tunnel deletion function */
+static void l2tp_tunnel_del_work(struct work_struct *work)
+{
+ struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
+ del_work);
+ struct sock *sk = tunnel->sock;
+ struct socket *sock = sk->sk_socket;
+
+ l2tp_tunnel_closeall(tunnel);
+
+ /* If the tunnel socket was created within the kernel, use
+ * the sk API to release it here.
+ */
+ if (tunnel->fd < 0) {
+ if (sock) {
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+ sock_release(sock);
+ }
+ }
+
+ l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
+ /* drop initial ref */
+ l2tp_tunnel_dec_refcount(tunnel);
+
+ /* drop workqueue ref */
+ l2tp_tunnel_dec_refcount(tunnel);
+}
+
+/* Create a socket for the tunnel, if one isn't set up by
+ * userspace. This is used for static tunnels where there is no
+ * managing L2TP daemon.
+ *
+ * Since we don't want these sockets to keep a namespace alive by
+ * themselves, we drop the socket's namespace refcount after creation.
+ * These sockets are freed when the namespace exits using the pernet
+ * exit hook.
+ */
+static int l2tp_tunnel_sock_create(struct net *net,
+ u32 tunnel_id,
+ u32 peer_tunnel_id,
+ struct l2tp_tunnel_cfg *cfg,
+ struct socket **sockp)
+{
+ int err = -EINVAL;
+ struct socket *sock = NULL;
+ struct udp_port_cfg udp_conf;
+
+ switch (cfg->encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ memset(&udp_conf, 0, sizeof(udp_conf));
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (cfg->local_ip6 && cfg->peer_ip6) {
+ udp_conf.family = AF_INET6;
+ memcpy(&udp_conf.local_ip6, cfg->local_ip6,
+ sizeof(udp_conf.local_ip6));
+ memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
+ sizeof(udp_conf.peer_ip6));
+ udp_conf.use_udp6_tx_checksums =
+ !cfg->udp6_zero_tx_checksums;
+ udp_conf.use_udp6_rx_checksums =
+ !cfg->udp6_zero_rx_checksums;
+ } else
+#endif
+ {
+ udp_conf.family = AF_INET;
+ udp_conf.local_ip = cfg->local_ip;
+ udp_conf.peer_ip = cfg->peer_ip;
+ udp_conf.use_udp_checksums = cfg->use_udp_checksums;
+ }
+
+ udp_conf.local_udp_port = htons(cfg->local_udp_port);
+ udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
+
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err < 0)
+ goto out;
+
+ break;
+
+ case L2TP_ENCAPTYPE_IP:
+#if IS_ENABLED(CONFIG_IPV6)
+ if (cfg->local_ip6 && cfg->peer_ip6) {
+ struct sockaddr_l2tpip6 ip6_addr = {0};
+
+ err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
+ IPPROTO_L2TP, &sock);
+ if (err < 0)
+ goto out;
+
+ ip6_addr.l2tp_family = AF_INET6;
+ memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
+ sizeof(ip6_addr.l2tp_addr));
+ ip6_addr.l2tp_conn_id = tunnel_id;
+ err = kernel_bind(sock, (struct sockaddr *)&ip6_addr,
+ sizeof(ip6_addr));
+ if (err < 0)
+ goto out;
+
+ ip6_addr.l2tp_family = AF_INET6;
+ memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
+ sizeof(ip6_addr.l2tp_addr));
+ ip6_addr.l2tp_conn_id = peer_tunnel_id;
+ err = kernel_connect(sock,
+ (struct sockaddr *)&ip6_addr,
+ sizeof(ip6_addr), 0);
+ if (err < 0)
+ goto out;
+ } else
+#endif
+ {
+ struct sockaddr_l2tpip ip_addr = {0};
+
+ err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
+ IPPROTO_L2TP, &sock);
+ if (err < 0)
+ goto out;
+
+ ip_addr.l2tp_family = AF_INET;
+ ip_addr.l2tp_addr = cfg->local_ip;
+ ip_addr.l2tp_conn_id = tunnel_id;
+ err = kernel_bind(sock, (struct sockaddr *)&ip_addr,
+ sizeof(ip_addr));
+ if (err < 0)
+ goto out;
+
+ ip_addr.l2tp_family = AF_INET;
+ ip_addr.l2tp_addr = cfg->peer_ip;
+ ip_addr.l2tp_conn_id = peer_tunnel_id;
+ err = kernel_connect(sock, (struct sockaddr *)&ip_addr,
+ sizeof(ip_addr), 0);
+ if (err < 0)
+ goto out;
+ }
+ break;
+
+ default:
+ goto out;
+ }
+
+out:
+ *sockp = sock;
+ if (err < 0 && sock) {
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+ sock_release(sock);
+ *sockp = NULL;
+ }
+
+ return err;
+}
+
+int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
+ struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
+{
+ struct l2tp_tunnel *tunnel = NULL;
+ int err;
+ enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
+
+ if (cfg)
+ encap = cfg->encap;
+
+ tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
+ if (!tunnel) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ tunnel->version = version;
+ tunnel->tunnel_id = tunnel_id;
+ tunnel->peer_tunnel_id = peer_tunnel_id;
+
+ tunnel->magic = L2TP_TUNNEL_MAGIC;
+ sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
+ spin_lock_init(&tunnel->hlist_lock);
+ tunnel->acpt_newsess = true;
+
+ tunnel->encap = encap;
+
+ refcount_set(&tunnel->ref_count, 1);
+ tunnel->fd = fd;
+
+ /* Init delete workqueue struct */
+ INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
+
+ INIT_LIST_HEAD(&tunnel->list);
+
+ err = 0;
+err:
+ if (tunnelp)
+ *tunnelp = tunnel;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
+
+static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
+ enum l2tp_encap_type encap)
+{
+ if (!net_eq(sock_net(sk), net))
+ return -EINVAL;
+
+ if (sk->sk_type != SOCK_DGRAM)
+ return -EPROTONOSUPPORT;
+
+ if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
+ return -EPROTONOSUPPORT;
+
+ if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
+ (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
+ return -EPROTONOSUPPORT;
+
+ if (sk->sk_user_data)
+ return -EBUSY;
+
+ return 0;
+}
+
+int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ struct l2tp_tunnel_cfg *cfg)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ u32 tunnel_id = tunnel->tunnel_id;
+ struct socket *sock;
+ struct sock *sk;
+ int ret;
+
+ spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
+ ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
+ GFP_ATOMIC);
+ spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
+ if (ret)
+ return ret == -ENOSPC ? -EEXIST : ret;
+
+ if (tunnel->fd < 0) {
+ ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
+ tunnel->peer_tunnel_id, cfg,
+ &sock);
+ if (ret < 0)
+ goto err;
+ } else {
+ sock = sockfd_lookup(tunnel->fd, &ret);
+ if (!sock)
+ goto err;
+ }
+
+ sk = sock->sk;
+ lock_sock(sk);
+ write_lock_bh(&sk->sk_callback_lock);
+ ret = l2tp_validate_socket(sk, net, tunnel->encap);
+ if (ret < 0)
+ goto err_inval_sock;
+ rcu_assign_sk_user_data(sk, tunnel);
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+ struct udp_tunnel_sock_cfg udp_cfg = {
+ .sk_user_data = tunnel,
+ .encap_type = UDP_ENCAP_L2TPINUDP,
+ .encap_rcv = l2tp_udp_encap_recv,
+ .encap_destroy = l2tp_udp_encap_destroy,
+ };
+
+ setup_udp_tunnel_sock(net, sock, &udp_cfg);
+ }
+
+ tunnel->old_sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = &l2tp_tunnel_destruct;
+ sk->sk_allocation = GFP_ATOMIC;
+ release_sock(sk);
+
+ sock_hold(sk);
+ tunnel->sock = sk;
+ tunnel->l2tp_net = net;
+
+ spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
+ idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
+ spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
+
+ trace_register_tunnel(tunnel);
+
+ if (tunnel->fd >= 0)
+ sockfd_put(sock);
+
+ return 0;
+
+err_inval_sock:
+ write_unlock_bh(&sk->sk_callback_lock);
+ release_sock(sk);
+
+ if (tunnel->fd < 0)
+ sock_release(sock);
+ else
+ sockfd_put(sock);
+err:
+ l2tp_tunnel_remove(net, tunnel);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
+
+/* This function is used by the netlink TUNNEL_DELETE command.
+ */
+void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+{
+ if (!test_and_set_bit(0, &tunnel->dead)) {
+ trace_delete_tunnel(tunnel);
+ l2tp_tunnel_inc_refcount(tunnel);
+ queue_work(l2tp_wq, &tunnel->del_work);
+ }
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
+
+void l2tp_session_delete(struct l2tp_session *session)
+{
+ if (test_and_set_bit(0, &session->dead))
+ return;
+
+ trace_delete_session(session);
+ l2tp_session_unhash(session);
+ l2tp_session_queue_purge(session);
+ if (session->session_close)
+ (*session->session_close)(session);
+
+ l2tp_session_dec_refcount(session);
+}
+EXPORT_SYMBOL_GPL(l2tp_session_delete);
+
+/* We come here whenever a session's send_seq, cookie_len or
+ * l2specific_type parameters are set.
+ */
+void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+{
+ if (version == L2TP_HDR_VER_2) {
+ session->hdr_len = 6;
+ if (session->send_seq)
+ session->hdr_len += 4;
+ } else {
+ session->hdr_len = 4 + session->cookie_len;
+ session->hdr_len += l2tp_get_l2specific_len(session);
+ if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
+ session->hdr_len += 4;
+ }
+}
+EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
+
+struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
+ u32 peer_session_id, struct l2tp_session_cfg *cfg)
+{
+ struct l2tp_session *session;
+
+ session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
+ if (session) {
+ session->magic = L2TP_SESSION_MAGIC;
+ session->tunnel = tunnel;
+
+ session->session_id = session_id;
+ session->peer_session_id = peer_session_id;
+ session->nr = 0;
+ if (tunnel->version == L2TP_HDR_VER_2)
+ session->nr_max = 0xffff;
+ else
+ session->nr_max = 0xffffff;
+ session->nr_window_size = session->nr_max / 2;
+ session->nr_oos_count_max = 4;
+
+ /* Use NR of first received packet */
+ session->reorder_skip = 1;
+
+ sprintf(&session->name[0], "sess %u/%u",
+ tunnel->tunnel_id, session->session_id);
+
+ skb_queue_head_init(&session->reorder_q);
+
+ INIT_HLIST_NODE(&session->hlist);
+ INIT_HLIST_NODE(&session->global_hlist);
+
+ if (cfg) {
+ session->pwtype = cfg->pw_type;
+ session->send_seq = cfg->send_seq;
+ session->recv_seq = cfg->recv_seq;
+ session->lns_mode = cfg->lns_mode;
+ session->reorder_timeout = cfg->reorder_timeout;
+ session->l2specific_type = cfg->l2specific_type;
+ session->cookie_len = cfg->cookie_len;
+ memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
+ session->peer_cookie_len = cfg->peer_cookie_len;
+ memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
+ }
+
+ l2tp_session_set_header_len(session, tunnel->version);
+
+ refcount_set(&session->ref_count, 1);
+
+ return session;
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_GPL(l2tp_session_create);
+
+/*****************************************************************************
+ * Init and cleanup
+ *****************************************************************************/
+
+static __net_init int l2tp_init_net(struct net *net)
+{
+ struct l2tp_net *pn = net_generic(net, l2tp_net_id);
+ int hash;
+
+ idr_init(&pn->l2tp_tunnel_idr);
+ spin_lock_init(&pn->l2tp_tunnel_idr_lock);
+
+ for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
+ INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
+
+ spin_lock_init(&pn->l2tp_session_hlist_lock);
+
+ return 0;
+}
+
+static __net_exit void l2tp_exit_net(struct net *net)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ struct l2tp_tunnel *tunnel = NULL;
+ unsigned long tunnel_id, tmp;
+ int hash;
+
+ rcu_read_lock_bh();
+ idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
+ if (tunnel)
+ l2tp_tunnel_delete(tunnel);
+ }
+ rcu_read_unlock_bh();
+
+ if (l2tp_wq)
+ flush_workqueue(l2tp_wq);
+ rcu_barrier();
+
+ for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
+ WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
+ idr_destroy(&pn->l2tp_tunnel_idr);
+}
+
+static struct pernet_operations l2tp_net_ops = {
+ .init = l2tp_init_net,
+ .exit = l2tp_exit_net,
+ .id = &l2tp_net_id,
+ .size = sizeof(struct l2tp_net),
+};
+
+static int __init l2tp_init(void)
+{
+ int rc = 0;
+
+ rc = register_pernet_device(&l2tp_net_ops);
+ if (rc)
+ goto out;
+
+ l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
+ if (!l2tp_wq) {
+ pr_err("alloc_workqueue failed\n");
+ unregister_pernet_device(&l2tp_net_ops);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
+
+out:
+ return rc;
+}
+
+static void __exit l2tp_exit(void)
+{
+ unregister_pernet_device(&l2tp_net_ops);
+ if (l2tp_wq) {
+ destroy_workqueue(l2tp_wq);
+ l2tp_wq = NULL;
+ }
+}
+
+module_init(l2tp_init);
+module_exit(l2tp_exit);
+
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP core");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(L2TP_DRV_VERSION);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
new file mode 100644
index 0000000000..91ebf0a3f4
--- /dev/null
+++ b/net/l2tp/l2tp_core.h
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* L2TP internal definitions.
+ *
+ * Copyright (c) 2008,2009 Katalix Systems Ltd
+ */
+#include <linux/refcount.h>
+
+#ifndef _L2TP_CORE_H_
+#define _L2TP_CORE_H_
+
+#include <net/dst.h>
+#include <net/sock.h>
+
+#ifdef CONFIG_XFRM
+#include <net/xfrm.h>
+#endif
+
+/* Random numbers used for internal consistency checks of tunnel and session structures */
+#define L2TP_TUNNEL_MAGIC 0x42114DDA
+#define L2TP_SESSION_MAGIC 0x0C04EB7D
+
+/* Per tunnel session hash table size */
+#define L2TP_HASH_BITS 4
+#define L2TP_HASH_SIZE BIT(L2TP_HASH_BITS)
+
+/* System-wide session hash table size */
+#define L2TP_HASH_BITS_2 8
+#define L2TP_HASH_SIZE_2 BIT(L2TP_HASH_BITS_2)
+
+struct sk_buff;
+
+struct l2tp_stats {
+ atomic_long_t tx_packets;
+ atomic_long_t tx_bytes;
+ atomic_long_t tx_errors;
+ atomic_long_t rx_packets;
+ atomic_long_t rx_bytes;
+ atomic_long_t rx_seq_discards;
+ atomic_long_t rx_oos_packets;
+ atomic_long_t rx_errors;
+ atomic_long_t rx_cookie_discards;
+ atomic_long_t rx_invalid;
+};
+
+struct l2tp_tunnel;
+
+/* L2TP session configuration */
+struct l2tp_session_cfg {
+ enum l2tp_pwtype pw_type;
+ unsigned int recv_seq:1; /* expect receive packets with sequence numbers? */
+ unsigned int send_seq:1; /* send packets with sequence numbers? */
+ unsigned int lns_mode:1; /* behave as LNS?
+ * LAC enables sequence numbers under LNS control.
+ */
+ u16 l2specific_type; /* Layer 2 specific type */
+ u8 cookie[8]; /* optional cookie */
+ int cookie_len; /* 0, 4 or 8 bytes */
+ u8 peer_cookie[8]; /* peer's cookie */
+ int peer_cookie_len; /* 0, 4 or 8 bytes */
+ int reorder_timeout; /* configured reorder timeout (in jiffies) */
+ char *ifname;
+};
+
+/* Represents a session (pseudowire) instance.
+ * Tracks runtime state including cookies, dataplane packet sequencing, and IO statistics.
+ * Is linked into a per-tunnel session hashlist; and in the case of an L2TPv3 session into
+ * an additional per-net ("global") hashlist.
+ */
+#define L2TP_SESSION_NAME_MAX 32
+struct l2tp_session {
+ int magic; /* should be L2TP_SESSION_MAGIC */
+ long dead;
+
+ struct l2tp_tunnel *tunnel; /* back pointer to tunnel context */
+ u32 session_id;
+ u32 peer_session_id;
+ u8 cookie[8];
+ int cookie_len;
+ u8 peer_cookie[8];
+ int peer_cookie_len;
+ u16 l2specific_type;
+ u16 hdr_len;
+ u32 nr; /* session NR state (receive) */
+ u32 ns; /* session NR state (send) */
+ struct sk_buff_head reorder_q; /* receive reorder queue */
+ u32 nr_max; /* max NR. Depends on tunnel */
+ u32 nr_window_size; /* NR window size */
+ u32 nr_oos; /* NR of last OOS packet */
+ int nr_oos_count; /* for OOS recovery */
+ int nr_oos_count_max;
+ struct hlist_node hlist; /* hash list node */
+ refcount_t ref_count;
+
+ char name[L2TP_SESSION_NAME_MAX]; /* for logging */
+ char ifname[IFNAMSIZ];
+ unsigned int recv_seq:1; /* expect receive packets with sequence numbers? */
+ unsigned int send_seq:1; /* send packets with sequence numbers? */
+ unsigned int lns_mode:1; /* behave as LNS?
+ * LAC enables sequence numbers under LNS control.
+ */
+ int reorder_timeout; /* configured reorder timeout (in jiffies) */
+ int reorder_skip; /* set if skip to next nr */
+ enum l2tp_pwtype pwtype;
+ struct l2tp_stats stats;
+ struct hlist_node global_hlist; /* global hash list node */
+
+ /* Session receive handler for data packets.
+ * Each pseudowire implementation should implement this callback in order to
+ * handle incoming packets. Packets are passed to the pseudowire handler after
+ * reordering, if data sequence numbers are enabled for the session.
+ */
+ void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len);
+
+ /* Session close handler.
+ * Each pseudowire implementation may implement this callback in order to carry
+ * out pseudowire-specific shutdown actions.
+ * The callback is called by core after unhashing the session and purging its
+ * reorder queue.
+ */
+ void (*session_close)(struct l2tp_session *session);
+
+ /* Session show handler.
+ * Pseudowire-specific implementation of debugfs session rendering.
+ * The callback is called by l2tp_debugfs.c after rendering core session
+ * information.
+ */
+ void (*show)(struct seq_file *m, void *priv);
+
+ u8 priv[]; /* private data */
+};
+
+/* L2TP tunnel configuration */
+struct l2tp_tunnel_cfg {
+ enum l2tp_encap_type encap;
+
+ /* Used only for kernel-created sockets */
+ struct in_addr local_ip;
+ struct in_addr peer_ip;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr *local_ip6;
+ struct in6_addr *peer_ip6;
+#endif
+ u16 local_udp_port;
+ u16 peer_udp_port;
+ unsigned int use_udp_checksums:1,
+ udp6_zero_tx_checksums:1,
+ udp6_zero_rx_checksums:1;
+};
+
+/* Represents a tunnel instance.
+ * Tracks runtime state including IO statistics.
+ * Holds the tunnel socket (either passed from userspace or directly created by the kernel).
+ * Maintains a hashlist of sessions belonging to the tunnel instance.
+ * Is linked into a per-net list of tunnels.
+ */
+#define L2TP_TUNNEL_NAME_MAX 20
+struct l2tp_tunnel {
+ int magic; /* Should be L2TP_TUNNEL_MAGIC */
+
+ unsigned long dead;
+
+ struct rcu_head rcu;
+ spinlock_t hlist_lock; /* write-protection for session_hlist */
+ bool acpt_newsess; /* indicates whether this tunnel accepts
+ * new sessions. Protected by hlist_lock.
+ */
+ struct hlist_head session_hlist[L2TP_HASH_SIZE];
+ /* hashed list of sessions, hashed by id */
+ u32 tunnel_id;
+ u32 peer_tunnel_id;
+ int version; /* 2=>L2TPv2, 3=>L2TPv3 */
+
+ char name[L2TP_TUNNEL_NAME_MAX]; /* for logging */
+ enum l2tp_encap_type encap;
+ struct l2tp_stats stats;
+
+ struct list_head list; /* list node on per-namespace list of tunnels */
+ struct net *l2tp_net; /* the net we belong to */
+
+ refcount_t ref_count;
+ void (*old_sk_destruct)(struct sock *sk);
+ struct sock *sock; /* parent socket */
+ int fd; /* parent fd, if tunnel socket was created
+ * by userspace
+ */
+
+ struct work_struct del_work;
+};
+
+/* Pseudowire ops callbacks for use with the l2tp genetlink interface */
+struct l2tp_nl_cmd_ops {
+ /* The pseudowire session create callback is responsible for creating a session
+ * instance for a specific pseudowire type.
+ * It must call l2tp_session_create and l2tp_session_register to register the
+ * session instance, as well as carry out any pseudowire-specific initialisation.
+ * It must return >= 0 on success, or an appropriate negative errno value on failure.
+ */
+ int (*session_create)(struct net *net, struct l2tp_tunnel *tunnel,
+ u32 session_id, u32 peer_session_id,
+ struct l2tp_session_cfg *cfg);
+
+ /* The pseudowire session delete callback is responsible for initiating the deletion
+ * of a session instance.
+ * It must call l2tp_session_delete, as well as carry out any pseudowire-specific
+ * teardown actions.
+ */
+ void (*session_delete)(struct l2tp_session *session);
+};
+
+static inline void *l2tp_session_priv(struct l2tp_session *session)
+{
+ return &session->priv[0];
+}
+
+/* Tunnel and session refcounts */
+void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel);
+void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel);
+void l2tp_session_inc_refcount(struct l2tp_session *session);
+void l2tp_session_dec_refcount(struct l2tp_session *session);
+
+/* Tunnel and session lookup.
+ * These functions take a reference on the instances they return, so
+ * the caller must ensure that the reference is dropped appropriately.
+ */
+struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
+struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth);
+struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
+ u32 session_id);
+
+struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id);
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth);
+struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
+ const char *ifname);
+
+/* Tunnel and session lifetime management.
+ * Creation of a new instance is a two-step process: create, then register.
+ * Destruction is triggered using the *_delete functions, and completes asynchronously.
+ */
+int l2tp_tunnel_create(int fd, int version, u32 tunnel_id,
+ u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
+ struct l2tp_tunnel **tunnelp);
+int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ struct l2tp_tunnel_cfg *cfg);
+void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+
+struct l2tp_session *l2tp_session_create(int priv_size,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id, u32 peer_session_id,
+ struct l2tp_session_cfg *cfg);
+int l2tp_session_register(struct l2tp_session *session,
+ struct l2tp_tunnel *tunnel);
+void l2tp_session_delete(struct l2tp_session *session);
+
+/* Receive path helpers. If data sequencing is enabled for the session these
+ * functions handle queuing and reordering prior to passing packets to the
+ * pseudowire code to be passed to userspace.
+ */
+void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
+ unsigned char *ptr, unsigned char *optr, u16 hdrflags,
+ int length);
+int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
+
+/* Transmit path helpers for sending packets over the tunnel socket. */
+void l2tp_session_set_header_len(struct l2tp_session *session, int version);
+int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb);
+
+/* Pseudowire management.
+ * Pseudowires should register with l2tp core on module init, and unregister
+ * on module exit.
+ */
+int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
+void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
+
+/* IOCTL helper for IP encap modules. */
+int l2tp_ioctl(struct sock *sk, int cmd, int *karg);
+
+/* Extract the tunnel structure from a socket's sk_user_data pointer,
+ * validating the tunnel magic feather.
+ */
+struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk);
+
+static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
+{
+ switch (session->l2specific_type) {
+ case L2TP_L2SPECTYPE_DEFAULT:
+ return 4;
+ case L2TP_L2SPECTYPE_NONE:
+ default:
+ return 0;
+ }
+}
+
+static inline u32 l2tp_tunnel_dst_mtu(const struct l2tp_tunnel *tunnel)
+{
+ struct dst_entry *dst;
+ u32 mtu;
+
+ dst = sk_dst_get(tunnel->sock);
+ if (!dst)
+ return 0;
+
+ mtu = dst_mtu(dst);
+ dst_release(dst);
+
+ return mtu;
+}
+
+#ifdef CONFIG_XFRM
+static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
+{
+ struct sock *sk = tunnel->sock;
+
+ return sk && (rcu_access_pointer(sk->sk_policy[0]) ||
+ rcu_access_pointer(sk->sk_policy[1]));
+}
+#else
+static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
+{
+ return false;
+}
+#endif
+
+static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
+ unsigned char **ptr, unsigned char **optr)
+{
+ int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
+
+ if (opt_len > 0) {
+ int off = *ptr - *optr;
+
+ if (!pskb_may_pull(skb, off + opt_len))
+ return -1;
+
+ if (skb->data != *optr) {
+ *optr = skb->data;
+ *ptr = skb->data + off;
+ }
+ }
+
+ return 0;
+}
+
+#define MODULE_ALIAS_L2TP_PWTYPE(type) \
+ MODULE_ALIAS("net-l2tp-type-" __stringify(type))
+
+#endif /* _L2TP_CORE_H_ */
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
new file mode 100644
index 0000000000..4595b56d17
--- /dev/null
+++ b/net/l2tp/l2tp_debugfs.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* L2TP subsystem debugfs
+ *
+ * Copyright (c) 2010 Katalix Systems Ltd
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/hash.h>
+#include <linux/l2tp.h>
+#include <linux/in.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/inet_hashtables.h>
+#include <net/tcp_states.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#include "l2tp_core.h"
+
+static struct dentry *rootdir;
+
+struct l2tp_dfs_seq_data {
+ struct net *net;
+ netns_tracker ns_tracker;
+ int tunnel_idx; /* current tunnel */
+ int session_idx; /* index of session within current tunnel */
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session; /* NULL means get next tunnel */
+};
+
+static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
+{
+ /* Drop reference taken during previous invocation */
+ if (pd->tunnel)
+ l2tp_tunnel_dec_refcount(pd->tunnel);
+
+ pd->tunnel = l2tp_tunnel_get_nth(pd->net, pd->tunnel_idx);
+ pd->tunnel_idx++;
+}
+
+static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
+{
+ /* Drop reference taken during previous invocation */
+ if (pd->session)
+ l2tp_session_dec_refcount(pd->session);
+
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx);
+ pd->session_idx++;
+
+ if (!pd->session) {
+ pd->session_idx = 0;
+ l2tp_dfs_next_tunnel(pd);
+ }
+}
+
+static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs)
+{
+ struct l2tp_dfs_seq_data *pd = SEQ_START_TOKEN;
+ loff_t pos = *offs;
+
+ if (!pos)
+ goto out;
+
+ if (WARN_ON(!m->private)) {
+ pd = NULL;
+ goto out;
+ }
+ pd = m->private;
+
+ if (!pd->tunnel)
+ l2tp_dfs_next_tunnel(pd);
+ else
+ l2tp_dfs_next_session(pd);
+
+ /* NULL tunnel and session indicates end of list */
+ if (!pd->tunnel && !pd->session)
+ pd = NULL;
+
+out:
+ return pd;
+}
+
+static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
+{
+ struct l2tp_dfs_seq_data *pd = v;
+
+ if (!pd || pd == SEQ_START_TOKEN)
+ return;
+
+ /* Drop reference taken by last invocation of l2tp_dfs_next_session()
+ * or l2tp_dfs_next_tunnel().
+ */
+ if (pd->session) {
+ l2tp_session_dec_refcount(pd->session);
+ pd->session = NULL;
+ }
+ if (pd->tunnel) {
+ l2tp_tunnel_dec_refcount(pd->tunnel);
+ pd->tunnel = NULL;
+ }
+}
+
+static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
+{
+ struct l2tp_tunnel *tunnel = v;
+ struct l2tp_session *session;
+ int session_count = 0;
+ int hash;
+
+ rcu_read_lock_bh();
+ for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
+ hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
+ /* Session ID of zero is a dummy/reserved value used by pppol2tp */
+ if (session->session_id == 0)
+ continue;
+
+ session_count++;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id);
+ if (tunnel->sock) {
+ struct inet_sock *inet = inet_sk(tunnel->sock);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (tunnel->sock->sk_family == AF_INET6) {
+ const struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+
+ seq_printf(m, " from %pI6c to %pI6c\n",
+ &np->saddr, &tunnel->sock->sk_v6_daddr);
+ }
+#endif
+ if (tunnel->sock->sk_family == AF_INET)
+ seq_printf(m, " from %pI4 to %pI4\n",
+ &inet->inet_saddr, &inet->inet_daddr);
+
+ if (tunnel->encap == L2TP_ENCAPTYPE_UDP)
+ seq_printf(m, " source port %hu, dest port %hu\n",
+ ntohs(inet->inet_sport), ntohs(inet->inet_dport));
+ }
+ seq_printf(m, " L2TPv%d, %s\n", tunnel->version,
+ tunnel->encap == L2TP_ENCAPTYPE_UDP ? "UDP" :
+ tunnel->encap == L2TP_ENCAPTYPE_IP ? "IP" :
+ "");
+ seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count,
+ tunnel->sock ? refcount_read(&tunnel->sock->sk_refcnt) : 0,
+ refcount_read(&tunnel->ref_count));
+ seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",
+ 0,
+ atomic_long_read(&tunnel->stats.tx_packets),
+ atomic_long_read(&tunnel->stats.tx_bytes),
+ atomic_long_read(&tunnel->stats.tx_errors),
+ atomic_long_read(&tunnel->stats.rx_packets),
+ atomic_long_read(&tunnel->stats.rx_bytes),
+ atomic_long_read(&tunnel->stats.rx_errors));
+}
+
+static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
+{
+ struct l2tp_session *session = v;
+
+ seq_printf(m, " SESSION %u, peer %u, %s\n", session->session_id,
+ session->peer_session_id,
+ session->pwtype == L2TP_PWTYPE_ETH ? "ETH" :
+ session->pwtype == L2TP_PWTYPE_PPP ? "PPP" :
+ "");
+ if (session->send_seq || session->recv_seq)
+ seq_printf(m, " nr %u, ns %u\n", session->nr, session->ns);
+ seq_printf(m, " refcnt %d\n", refcount_read(&session->ref_count));
+ seq_printf(m, " config 0/0/%c/%c/-/%s %08x %u\n",
+ session->recv_seq ? 'R' : '-',
+ session->send_seq ? 'S' : '-',
+ session->lns_mode ? "LNS" : "LAC",
+ 0,
+ jiffies_to_msecs(session->reorder_timeout));
+ seq_printf(m, " offset 0 l2specific %hu/%d\n",
+ session->l2specific_type, l2tp_get_l2specific_len(session));
+ if (session->cookie_len) {
+ seq_printf(m, " cookie %02x%02x%02x%02x",
+ session->cookie[0], session->cookie[1],
+ session->cookie[2], session->cookie[3]);
+ if (session->cookie_len == 8)
+ seq_printf(m, "%02x%02x%02x%02x",
+ session->cookie[4], session->cookie[5],
+ session->cookie[6], session->cookie[7]);
+ seq_puts(m, "\n");
+ }
+ if (session->peer_cookie_len) {
+ seq_printf(m, " peer cookie %02x%02x%02x%02x",
+ session->peer_cookie[0], session->peer_cookie[1],
+ session->peer_cookie[2], session->peer_cookie[3]);
+ if (session->peer_cookie_len == 8)
+ seq_printf(m, "%02x%02x%02x%02x",
+ session->peer_cookie[4], session->peer_cookie[5],
+ session->peer_cookie[6], session->peer_cookie[7]);
+ seq_puts(m, "\n");
+ }
+
+ seq_printf(m, " %u/%u tx %ld/%ld/%ld rx %ld/%ld/%ld\n",
+ session->nr, session->ns,
+ atomic_long_read(&session->stats.tx_packets),
+ atomic_long_read(&session->stats.tx_bytes),
+ atomic_long_read(&session->stats.tx_errors),
+ atomic_long_read(&session->stats.rx_packets),
+ atomic_long_read(&session->stats.rx_bytes),
+ atomic_long_read(&session->stats.rx_errors));
+
+ if (session->show)
+ session->show(m, session);
+}
+
+static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
+{
+ struct l2tp_dfs_seq_data *pd = v;
+
+ /* display header on line 1 */
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(m, "TUNNEL ID, peer ID from IP to IP\n");
+ seq_puts(m, " L2TPv2/L2TPv3, UDP/IP\n");
+ seq_puts(m, " sessions session-count, refcnt refcnt/sk->refcnt\n");
+ seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ seq_puts(m, " SESSION ID, peer ID, PWTYPE\n");
+ seq_puts(m, " refcnt cnt\n");
+ seq_puts(m, " offset OFFSET l2specific TYPE/LEN\n");
+ seq_puts(m, " [ cookie ]\n");
+ seq_puts(m, " [ peer cookie ]\n");
+ seq_puts(m, " config mtu/mru/rcvseq/sendseq/dataseq/lns debug reorderto\n");
+ seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ goto out;
+ }
+
+ if (!pd->session)
+ l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
+ else
+ l2tp_dfs_seq_session_show(m, pd->session);
+
+out:
+ return 0;
+}
+
+static const struct seq_operations l2tp_dfs_seq_ops = {
+ .start = l2tp_dfs_seq_start,
+ .next = l2tp_dfs_seq_next,
+ .stop = l2tp_dfs_seq_stop,
+ .show = l2tp_dfs_seq_show,
+};
+
+static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
+{
+ struct l2tp_dfs_seq_data *pd;
+ struct seq_file *seq;
+ int rc = -ENOMEM;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ goto out;
+
+ /* Derive the network namespace from the pid opening the
+ * file.
+ */
+ pd->net = get_net_ns_by_pid(current->pid);
+ if (IS_ERR(pd->net)) {
+ rc = PTR_ERR(pd->net);
+ goto err_free_pd;
+ }
+ netns_tracker_alloc(pd->net, &pd->ns_tracker, GFP_KERNEL);
+ rc = seq_open(file, &l2tp_dfs_seq_ops);
+ if (rc)
+ goto err_free_net;
+
+ seq = file->private_data;
+ seq->private = pd;
+
+out:
+ return rc;
+
+err_free_net:
+ put_net_track(pd->net, &pd->ns_tracker);
+err_free_pd:
+ kfree(pd);
+ goto out;
+}
+
+static int l2tp_dfs_seq_release(struct inode *inode, struct file *file)
+{
+ struct l2tp_dfs_seq_data *pd;
+ struct seq_file *seq;
+
+ seq = file->private_data;
+ pd = seq->private;
+ if (pd->net)
+ put_net_track(pd->net, &pd->ns_tracker);
+ kfree(pd);
+ seq_release(inode, file);
+
+ return 0;
+}
+
+static const struct file_operations l2tp_dfs_fops = {
+ .owner = THIS_MODULE,
+ .open = l2tp_dfs_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = l2tp_dfs_seq_release,
+};
+
+static int __init l2tp_debugfs_init(void)
+{
+ rootdir = debugfs_create_dir("l2tp", NULL);
+
+ debugfs_create_file("tunnels", 0600, rootdir, NULL, &l2tp_dfs_fops);
+
+ pr_info("L2TP debugfs support\n");
+
+ return 0;
+}
+
+static void __exit l2tp_debugfs_exit(void)
+{
+ debugfs_remove_recursive(rootdir);
+}
+
+module_init(l2tp_debugfs_init);
+module_exit(l2tp_debugfs_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP debugfs driver");
+MODULE_VERSION("1.0");
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
new file mode 100644
index 0000000000..f2ae03c404
--- /dev/null
+++ b/net/l2tp/l2tp_eth.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* L2TPv3 ethernet pseudowire driver
+ *
+ * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/hash.h>
+#include <linux/l2tp.h>
+#include <linux/in.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/inet_hashtables.h>
+#include <net/tcp_states.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+
+#include "l2tp_core.h"
+
+/* Default device name. May be overridden by name specified by user */
+#define L2TP_ETH_DEV_NAME "l2tpeth%d"
+
+/* via netdev_priv() */
+struct l2tp_eth {
+ struct l2tp_session *session;
+ atomic_long_t tx_bytes;
+ atomic_long_t tx_packets;
+ atomic_long_t tx_dropped;
+ atomic_long_t rx_bytes;
+ atomic_long_t rx_packets;
+ atomic_long_t rx_errors;
+};
+
+/* via l2tp_session_priv() */
+struct l2tp_eth_sess {
+ struct net_device __rcu *dev;
+};
+
+static int l2tp_eth_dev_init(struct net_device *dev)
+{
+ eth_hw_addr_random(dev);
+ eth_broadcast_addr(dev->broadcast);
+ netdev_lockdep_set_classes(dev);
+
+ return 0;
+}
+
+static void l2tp_eth_dev_uninit(struct net_device *dev)
+{
+ struct l2tp_eth *priv = netdev_priv(dev);
+ struct l2tp_eth_sess *spriv;
+
+ spriv = l2tp_session_priv(priv->session);
+ RCU_INIT_POINTER(spriv->dev, NULL);
+ /* No need for synchronize_net() here. We're called by
+ * unregister_netdev*(), which does the synchronisation for us.
+ */
+}
+
+static netdev_tx_t l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct l2tp_eth *priv = netdev_priv(dev);
+ struct l2tp_session *session = priv->session;
+ unsigned int len = skb->len;
+ int ret = l2tp_xmit_skb(session, skb);
+
+ if (likely(ret == NET_XMIT_SUCCESS)) {
+ atomic_long_add(len, &priv->tx_bytes);
+ atomic_long_inc(&priv->tx_packets);
+ } else {
+ atomic_long_inc(&priv->tx_dropped);
+ }
+ return NETDEV_TX_OK;
+}
+
+static void l2tp_eth_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct l2tp_eth *priv = netdev_priv(dev);
+
+ stats->tx_bytes = (unsigned long)atomic_long_read(&priv->tx_bytes);
+ stats->tx_packets = (unsigned long)atomic_long_read(&priv->tx_packets);
+ stats->tx_dropped = (unsigned long)atomic_long_read(&priv->tx_dropped);
+ stats->rx_bytes = (unsigned long)atomic_long_read(&priv->rx_bytes);
+ stats->rx_packets = (unsigned long)atomic_long_read(&priv->rx_packets);
+ stats->rx_errors = (unsigned long)atomic_long_read(&priv->rx_errors);
+}
+
+static const struct net_device_ops l2tp_eth_netdev_ops = {
+ .ndo_init = l2tp_eth_dev_init,
+ .ndo_uninit = l2tp_eth_dev_uninit,
+ .ndo_start_xmit = l2tp_eth_dev_xmit,
+ .ndo_get_stats64 = l2tp_eth_get_stats64,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static struct device_type l2tpeth_type = {
+ .name = "l2tpeth",
+};
+
+static void l2tp_eth_dev_setup(struct net_device *dev)
+{
+ SET_NETDEV_DEVTYPE(dev, &l2tpeth_type);
+ ether_setup(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->features |= NETIF_F_LLTX;
+ dev->netdev_ops = &l2tp_eth_netdev_ops;
+ dev->needs_free_netdev = true;
+}
+
+static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
+{
+ struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
+ struct net_device *dev;
+ struct l2tp_eth *priv;
+
+ if (!pskb_may_pull(skb, ETH_HLEN))
+ goto error;
+
+ secpath_reset(skb);
+
+ /* checksums verified by L2TP */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_dst_drop(skb);
+ nf_reset_ct(skb);
+
+ rcu_read_lock();
+ dev = rcu_dereference(spriv->dev);
+ if (!dev)
+ goto error_rcu;
+
+ priv = netdev_priv(dev);
+ if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
+ atomic_long_inc(&priv->rx_packets);
+ atomic_long_add(data_len, &priv->rx_bytes);
+ } else {
+ atomic_long_inc(&priv->rx_errors);
+ }
+ rcu_read_unlock();
+
+ return;
+
+error_rcu:
+ rcu_read_unlock();
+error:
+ kfree_skb(skb);
+}
+
+static void l2tp_eth_delete(struct l2tp_session *session)
+{
+ struct l2tp_eth_sess *spriv;
+ struct net_device *dev;
+
+ if (session) {
+ spriv = l2tp_session_priv(session);
+
+ rtnl_lock();
+ dev = rtnl_dereference(spriv->dev);
+ if (dev) {
+ unregister_netdevice(dev);
+ rtnl_unlock();
+ module_put(THIS_MODULE);
+ } else {
+ rtnl_unlock();
+ }
+ }
+}
+
+static void l2tp_eth_show(struct seq_file *m, void *arg)
+{
+ struct l2tp_session *session = arg;
+ struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
+ struct net_device *dev;
+
+ rcu_read_lock();
+ dev = rcu_dereference(spriv->dev);
+ if (!dev) {
+ rcu_read_unlock();
+ return;
+ }
+ dev_hold(dev);
+ rcu_read_unlock();
+
+ seq_printf(m, " interface %s\n", dev->name);
+
+ dev_put(dev);
+}
+
+static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel,
+ struct l2tp_session *session,
+ struct net_device *dev)
+{
+ unsigned int overhead = 0;
+ u32 l3_overhead = 0;
+ u32 mtu;
+
+ /* if the encap is UDP, account for UDP header size */
+ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+ overhead += sizeof(struct udphdr);
+ dev->needed_headroom += sizeof(struct udphdr);
+ }
+
+ lock_sock(tunnel->sock);
+ l3_overhead = kernel_sock_ip_overhead(tunnel->sock);
+ release_sock(tunnel->sock);
+
+ if (l3_overhead == 0) {
+ /* L3 Overhead couldn't be identified, this could be
+ * because tunnel->sock was NULL or the socket's
+ * address family was not IPv4 or IPv6,
+ * dev mtu stays at 1500.
+ */
+ return;
+ }
+ /* Adjust MTU, factor overhead - underlay L3, overlay L2 hdr
+ * UDP overhead, if any, was already factored in above.
+ */
+ overhead += session->hdr_len + ETH_HLEN + l3_overhead;
+
+ mtu = l2tp_tunnel_dst_mtu(tunnel) - overhead;
+ if (mtu < dev->min_mtu || mtu > dev->max_mtu)
+ dev->mtu = ETH_DATA_LEN - overhead;
+ else
+ dev->mtu = mtu;
+
+ dev->needed_headroom += session->hdr_len;
+}
+
+static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
+ u32 session_id, u32 peer_session_id,
+ struct l2tp_session_cfg *cfg)
+{
+ unsigned char name_assign_type;
+ struct net_device *dev;
+ char name[IFNAMSIZ];
+ struct l2tp_session *session;
+ struct l2tp_eth *priv;
+ struct l2tp_eth_sess *spriv;
+ int rc;
+
+ if (cfg->ifname) {
+ strscpy(name, cfg->ifname, IFNAMSIZ);
+ name_assign_type = NET_NAME_USER;
+ } else {
+ strcpy(name, L2TP_ETH_DEV_NAME);
+ name_assign_type = NET_NAME_ENUM;
+ }
+
+ session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
+ peer_session_id, cfg);
+ if (IS_ERR(session)) {
+ rc = PTR_ERR(session);
+ goto err;
+ }
+
+ dev = alloc_netdev(sizeof(*priv), name, name_assign_type,
+ l2tp_eth_dev_setup);
+ if (!dev) {
+ rc = -ENOMEM;
+ goto err_sess;
+ }
+
+ dev_net_set(dev, net);
+ dev->min_mtu = 0;
+ dev->max_mtu = ETH_MAX_MTU;
+ l2tp_eth_adjust_mtu(tunnel, session, dev);
+
+ priv = netdev_priv(dev);
+ priv->session = session;
+
+ session->recv_skb = l2tp_eth_dev_recv;
+ session->session_close = l2tp_eth_delete;
+ if (IS_ENABLED(CONFIG_L2TP_DEBUGFS))
+ session->show = l2tp_eth_show;
+
+ spriv = l2tp_session_priv(session);
+
+ l2tp_session_inc_refcount(session);
+
+ rtnl_lock();
+
+ /* Register both device and session while holding the rtnl lock. This
+ * ensures that l2tp_eth_delete() will see that there's a device to
+ * unregister, even if it happened to run before we assign spriv->dev.
+ */
+ rc = l2tp_session_register(session, tunnel);
+ if (rc < 0) {
+ rtnl_unlock();
+ goto err_sess_dev;
+ }
+
+ rc = register_netdevice(dev);
+ if (rc < 0) {
+ rtnl_unlock();
+ l2tp_session_delete(session);
+ l2tp_session_dec_refcount(session);
+ free_netdev(dev);
+
+ return rc;
+ }
+
+ strscpy(session->ifname, dev->name, IFNAMSIZ);
+ rcu_assign_pointer(spriv->dev, dev);
+
+ rtnl_unlock();
+
+ l2tp_session_dec_refcount(session);
+
+ __module_get(THIS_MODULE);
+
+ return 0;
+
+err_sess_dev:
+ l2tp_session_dec_refcount(session);
+ free_netdev(dev);
+err_sess:
+ kfree(session);
+err:
+ return rc;
+}
+
+static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
+ .session_create = l2tp_eth_create,
+ .session_delete = l2tp_session_delete,
+};
+
+static int __init l2tp_eth_init(void)
+{
+ int err = 0;
+
+ err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
+ if (err)
+ goto err;
+
+ pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
+
+ return 0;
+
+err:
+ return err;
+}
+
+static void __exit l2tp_eth_exit(void)
+{
+ l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
+}
+
+module_init(l2tp_eth_init);
+module_exit(l2tp_eth_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP ethernet pseudowire driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS_L2TP_PWTYPE(5);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
new file mode 100644
index 0000000000..9a2a9ed3ba
--- /dev/null
+++ b/net/l2tp/l2tp_ip.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* L2TPv3 IP encapsulation support
+ *
+ * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/ioctls.h>
+#include <linux/icmp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/socket.h>
+#include <linux/l2tp.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/tcp_states.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+
+#include "l2tp_core.h"
+
+struct l2tp_ip_sock {
+ /* inet_sock has to be the first member of l2tp_ip_sock */
+ struct inet_sock inet;
+
+ u32 conn_id;
+ u32 peer_conn_id;
+};
+
+static DEFINE_RWLOCK(l2tp_ip_lock);
+static struct hlist_head l2tp_ip_table;
+static struct hlist_head l2tp_ip_bind_table;
+
+static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
+{
+ return (struct l2tp_ip_sock *)sk;
+}
+
+static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
+ __be32 raddr, int dif, u32 tunnel_id)
+{
+ struct sock *sk;
+
+ sk_for_each_bound(sk, &l2tp_ip_bind_table) {
+ const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
+ const struct inet_sock *inet = inet_sk(sk);
+ int bound_dev_if;
+
+ if (!net_eq(sock_net(sk), net))
+ continue;
+
+ bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ if (bound_dev_if && dif && bound_dev_if != dif)
+ continue;
+
+ if (inet->inet_rcv_saddr && laddr &&
+ inet->inet_rcv_saddr != laddr)
+ continue;
+
+ if (inet->inet_daddr && raddr && inet->inet_daddr != raddr)
+ continue;
+
+ if (l2tp->conn_id != tunnel_id)
+ continue;
+
+ goto found;
+ }
+
+ sk = NULL;
+found:
+ return sk;
+}
+
+/* When processing receive frames, there are two cases to
+ * consider. Data frames consist of a non-zero session-id and an
+ * optional cookie. Control frames consist of a regular L2TP header
+ * preceded by 32-bits of zeros.
+ *
+ * L2TPv3 Session Header Over IP
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Cookie (optional, maximum 64 bits)...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * L2TPv3 Control Message Header Over IP
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | (32 bits of zeros) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Control Connection ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Ns | Nr |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * All control frames are passed to userspace.
+ */
+static int l2tp_ip_recv(struct sk_buff *skb)
+{
+ struct net *net = dev_net(skb->dev);
+ struct sock *sk;
+ u32 session_id;
+ u32 tunnel_id;
+ unsigned char *ptr, *optr;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel = NULL;
+ struct iphdr *iph;
+
+ if (!pskb_may_pull(skb, 4))
+ goto discard;
+
+ /* Point to L2TP header */
+ optr = skb->data;
+ ptr = skb->data;
+ session_id = ntohl(*((__be32 *)ptr));
+ ptr += 4;
+
+ /* RFC3931: L2TP/IP packets have the first 4 bytes containing
+ * the session_id. If it is 0, the packet is a L2TP control
+ * frame and the session_id value can be discarded.
+ */
+ if (session_id == 0) {
+ __skb_pull(skb, 4);
+ goto pass_up;
+ }
+
+ /* Ok, this is a data packet. Lookup the session. */
+ session = l2tp_session_get(net, session_id);
+ if (!session)
+ goto discard;
+
+ tunnel = session->tunnel;
+ if (!tunnel)
+ goto discard_sess;
+
+ if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+ goto discard_sess;
+
+ l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
+ l2tp_session_dec_refcount(session);
+
+ return 0;
+
+pass_up:
+ /* Get the tunnel_id from the L2TP header */
+ if (!pskb_may_pull(skb, 12))
+ goto discard;
+
+ if ((skb->data[0] & 0xc0) != 0xc0)
+ goto discard;
+
+ tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
+ iph = (struct iphdr *)skb_network_header(skb);
+
+ read_lock_bh(&l2tp_ip_lock);
+ sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
+ tunnel_id);
+ if (!sk) {
+ read_unlock_bh(&l2tp_ip_lock);
+ goto discard;
+ }
+ sock_hold(sk);
+ read_unlock_bh(&l2tp_ip_lock);
+
+ if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
+ goto discard_put;
+
+ nf_reset_ct(skb);
+
+ return sk_receive_skb(sk, skb, 1);
+
+discard_sess:
+ l2tp_session_dec_refcount(session);
+ goto discard;
+
+discard_put:
+ sock_put(sk);
+
+discard:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int l2tp_ip_hash(struct sock *sk)
+{
+ if (sk_unhashed(sk)) {
+ write_lock_bh(&l2tp_ip_lock);
+ sk_add_node(sk, &l2tp_ip_table);
+ write_unlock_bh(&l2tp_ip_lock);
+ }
+ return 0;
+}
+
+static void l2tp_ip_unhash(struct sock *sk)
+{
+ if (sk_unhashed(sk))
+ return;
+ write_lock_bh(&l2tp_ip_lock);
+ sk_del_node_init(sk);
+ write_unlock_bh(&l2tp_ip_lock);
+}
+
+static int l2tp_ip_open(struct sock *sk)
+{
+ /* Prevent autobind. We don't have ports. */
+ inet_sk(sk)->inet_num = IPPROTO_L2TP;
+
+ l2tp_ip_hash(sk);
+ return 0;
+}
+
+static void l2tp_ip_close(struct sock *sk, long timeout)
+{
+ write_lock_bh(&l2tp_ip_lock);
+ hlist_del_init(&sk->sk_bind_node);
+ sk_del_node_init(sk);
+ write_unlock_bh(&l2tp_ip_lock);
+ sk_common_release(sk);
+}
+
+static void l2tp_ip_destroy_sock(struct sock *sk)
+{
+ struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
+ kfree_skb(skb);
+
+ if (tunnel)
+ l2tp_tunnel_delete(tunnel);
+}
+
+static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr;
+ struct net *net = sock_net(sk);
+ int ret;
+ int chk_addr_ret;
+
+ if (addr_len < sizeof(struct sockaddr_l2tpip))
+ return -EINVAL;
+ if (addr->l2tp_family != AF_INET)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ ret = -EINVAL;
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ goto out;
+
+ if (sk->sk_state != TCP_CLOSE)
+ goto out;
+
+ chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
+ ret = -EADDRNOTAVAIL;
+ if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
+ chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
+ goto out;
+
+ if (addr->l2tp_addr.s_addr) {
+ inet->inet_rcv_saddr = addr->l2tp_addr.s_addr;
+ inet->inet_saddr = addr->l2tp_addr.s_addr;
+ }
+ if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
+ inet->inet_saddr = 0; /* Use device */
+
+ write_lock_bh(&l2tp_ip_lock);
+ if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
+ sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
+ write_unlock_bh(&l2tp_ip_lock);
+ ret = -EADDRINUSE;
+ goto out;
+ }
+
+ sk_dst_reset(sk);
+ l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
+
+ sk_add_bind_node(sk, &l2tp_ip_bind_table);
+ sk_del_node_init(sk);
+ write_unlock_bh(&l2tp_ip_lock);
+
+ ret = 0;
+ sock_reset_flag(sk, SOCK_ZAPPED);
+
+out:
+ release_sock(sk);
+
+ return ret;
+}
+
+static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
+ int rc;
+
+ if (addr_len < sizeof(*lsa))
+ return -EINVAL;
+
+ if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ /* Must bind first - autobinding does not work */
+ if (sock_flag(sk, SOCK_ZAPPED)) {
+ rc = -EINVAL;
+ goto out_sk;
+ }
+
+ rc = __ip4_datagram_connect(sk, uaddr, addr_len);
+ if (rc < 0)
+ goto out_sk;
+
+ l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
+
+ write_lock_bh(&l2tp_ip_lock);
+ hlist_del_init(&sk->sk_bind_node);
+ sk_add_bind_node(sk, &l2tp_ip_bind_table);
+ write_unlock_bh(&l2tp_ip_lock);
+
+out_sk:
+ release_sock(sk);
+
+ return rc;
+}
+
+static int l2tp_ip_disconnect(struct sock *sk, int flags)
+{
+ if (sock_flag(sk, SOCK_ZAPPED))
+ return 0;
+
+ return __udp_disconnect(sk, flags);
+}
+
+static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
+ int peer)
+{
+ struct sock *sk = sock->sk;
+ struct inet_sock *inet = inet_sk(sk);
+ struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
+ struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
+
+ memset(lsa, 0, sizeof(*lsa));
+ lsa->l2tp_family = AF_INET;
+ if (peer) {
+ if (!inet->inet_dport)
+ return -ENOTCONN;
+ lsa->l2tp_conn_id = lsk->peer_conn_id;
+ lsa->l2tp_addr.s_addr = inet->inet_daddr;
+ } else {
+ __be32 addr = inet->inet_rcv_saddr;
+
+ if (!addr)
+ addr = inet->inet_saddr;
+ lsa->l2tp_conn_id = lsk->conn_id;
+ lsa->l2tp_addr.s_addr = addr;
+ }
+ return sizeof(*lsa);
+}
+
+static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
+{
+ int rc;
+
+ /* Charge it to the socket, dropping if the queue is full. */
+ rc = sock_queue_rcv_skb(sk, skb);
+ if (rc < 0)
+ goto drop;
+
+ return 0;
+
+drop:
+ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
+ kfree_skb(skb);
+ return 0;
+}
+
+/* Userspace will call sendmsg() on the tunnel socket to send L2TP
+ * control frames.
+ */
+static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+{
+ struct sk_buff *skb;
+ int rc;
+ struct inet_sock *inet = inet_sk(sk);
+ struct rtable *rt = NULL;
+ struct flowi4 *fl4;
+ int connected = 0;
+ __be32 daddr;
+
+ lock_sock(sk);
+
+ rc = -ENOTCONN;
+ if (sock_flag(sk, SOCK_DEAD))
+ goto out;
+
+ /* Get and verify the address. */
+ if (msg->msg_name) {
+ DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
+
+ rc = -EINVAL;
+ if (msg->msg_namelen < sizeof(*lip))
+ goto out;
+
+ if (lip->l2tp_family != AF_INET) {
+ rc = -EAFNOSUPPORT;
+ if (lip->l2tp_family != AF_UNSPEC)
+ goto out;
+ }
+
+ daddr = lip->l2tp_addr.s_addr;
+ } else {
+ rc = -EDESTADDRREQ;
+ if (sk->sk_state != TCP_ESTABLISHED)
+ goto out;
+
+ daddr = inet->inet_daddr;
+ connected = 1;
+ }
+
+ /* Allocate a socket buffer */
+ rc = -ENOMEM;
+ skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
+ 4 + len, 0, GFP_KERNEL);
+ if (!skb)
+ goto error;
+
+ /* Reserve space for headers, putting IP header on 4-byte boundary. */
+ skb_reserve(skb, 2 + NET_SKB_PAD);
+ skb_reset_network_header(skb);
+ skb_reserve(skb, sizeof(struct iphdr));
+ skb_reset_transport_header(skb);
+
+ /* Insert 0 session_id */
+ *((__be32 *)skb_put(skb, 4)) = 0;
+
+ /* Copy user data into skb */
+ rc = memcpy_from_msg(skb_put(skb, len), msg, len);
+ if (rc < 0) {
+ kfree_skb(skb);
+ goto error;
+ }
+
+ fl4 = &inet->cork.fl.u.ip4;
+ if (connected)
+ rt = (struct rtable *)__sk_dst_check(sk, 0);
+
+ rcu_read_lock();
+ if (!rt) {
+ const struct ip_options_rcu *inet_opt;
+
+ inet_opt = rcu_dereference(inet->inet_opt);
+
+ /* Use correct destination address if we have options. */
+ if (inet_opt && inet_opt->opt.srr)
+ daddr = inet_opt->opt.faddr;
+
+ /* If this fails, retransmit mechanism of transport layer will
+ * keep trying until route appears or the connection times
+ * itself out.
+ */
+ rt = ip_route_output_ports(sock_net(sk), fl4, sk,
+ daddr, inet->inet_saddr,
+ inet->inet_dport, inet->inet_sport,
+ sk->sk_protocol, RT_CONN_FLAGS(sk),
+ sk->sk_bound_dev_if);
+ if (IS_ERR(rt))
+ goto no_route;
+ if (connected) {
+ sk_setup_caps(sk, &rt->dst);
+ } else {
+ skb_dst_set(skb, &rt->dst);
+ goto xmit;
+ }
+ }
+
+ /* We don't need to clone dst here, it is guaranteed to not disappear.
+ * __dev_xmit_skb() might force a refcount if needed.
+ */
+ skb_dst_set_noref(skb, &rt->dst);
+
+xmit:
+ /* Queue the packet to IP for output */
+ rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
+ rcu_read_unlock();
+
+error:
+ if (rc >= 0)
+ rc = len;
+
+out:
+ release_sock(sk);
+ return rc;
+
+no_route:
+ rcu_read_unlock();
+ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ kfree_skb(skb);
+ rc = -EHOSTUNREACH;
+ goto out;
+}
+
+static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
+ size_t len, int flags, int *addr_len)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ size_t copied = 0;
+ int err = -EOPNOTSUPP;
+ DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
+ struct sk_buff *skb;
+
+ if (flags & MSG_OOB)
+ goto out;
+
+ skb = skb_recv_datagram(sk, flags, &err);
+ if (!skb)
+ goto out;
+
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
+ if (err)
+ goto done;
+
+ sock_recv_timestamp(msg, sk, skb);
+
+ /* Copy the address. */
+ if (sin) {
+ sin->sin_family = AF_INET;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sin->sin_port = 0;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
+ *addr_len = sizeof(*sin);
+ }
+ if (inet_cmsg_flags(inet))
+ ip_cmsg_recv(msg, skb);
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
+done:
+ skb_free_datagram(sk, skb);
+out:
+ return err ? err : copied;
+}
+
+int l2tp_ioctl(struct sock *sk, int cmd, int *karg)
+{
+ struct sk_buff *skb;
+
+ switch (cmd) {
+ case SIOCOUTQ:
+ *karg = sk_wmem_alloc_get(sk);
+ break;
+ case SIOCINQ:
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
+ *karg = skb ? skb->len : 0;
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(l2tp_ioctl);
+
+static struct proto l2tp_ip_prot = {
+ .name = "L2TP/IP",
+ .owner = THIS_MODULE,
+ .init = l2tp_ip_open,
+ .close = l2tp_ip_close,
+ .bind = l2tp_ip_bind,
+ .connect = l2tp_ip_connect,
+ .disconnect = l2tp_ip_disconnect,
+ .ioctl = l2tp_ioctl,
+ .destroy = l2tp_ip_destroy_sock,
+ .setsockopt = ip_setsockopt,
+ .getsockopt = ip_getsockopt,
+ .sendmsg = l2tp_ip_sendmsg,
+ .recvmsg = l2tp_ip_recvmsg,
+ .backlog_rcv = l2tp_ip_backlog_recv,
+ .hash = l2tp_ip_hash,
+ .unhash = l2tp_ip_unhash,
+ .obj_size = sizeof(struct l2tp_ip_sock),
+};
+
+static const struct proto_ops l2tp_ip_ops = {
+ .family = PF_INET,
+ .owner = THIS_MODULE,
+ .release = inet_release,
+ .bind = inet_bind,
+ .connect = inet_dgram_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = l2tp_ip_getname,
+ .poll = datagram_poll,
+ .ioctl = inet_ioctl,
+ .gettstamp = sock_gettstamp,
+ .listen = sock_no_listen,
+ .shutdown = inet_shutdown,
+ .setsockopt = sock_common_setsockopt,
+ .getsockopt = sock_common_getsockopt,
+ .sendmsg = inet_sendmsg,
+ .recvmsg = sock_common_recvmsg,
+ .mmap = sock_no_mmap,
+};
+
+static struct inet_protosw l2tp_ip_protosw = {
+ .type = SOCK_DGRAM,
+ .protocol = IPPROTO_L2TP,
+ .prot = &l2tp_ip_prot,
+ .ops = &l2tp_ip_ops,
+};
+
+static struct net_protocol l2tp_ip_protocol __read_mostly = {
+ .handler = l2tp_ip_recv,
+};
+
+static int __init l2tp_ip_init(void)
+{
+ int err;
+
+ pr_info("L2TP IP encapsulation support (L2TPv3)\n");
+
+ err = proto_register(&l2tp_ip_prot, 1);
+ if (err != 0)
+ goto out;
+
+ err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
+ if (err)
+ goto out1;
+
+ inet_register_protosw(&l2tp_ip_protosw);
+ return 0;
+
+out1:
+ proto_unregister(&l2tp_ip_prot);
+out:
+ return err;
+}
+
+static void __exit l2tp_ip_exit(void)
+{
+ inet_unregister_protosw(&l2tp_ip_protosw);
+ inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
+ proto_unregister(&l2tp_ip_prot);
+}
+
+module_init(l2tp_ip_init);
+module_exit(l2tp_ip_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP over IP");
+MODULE_VERSION("1.0");
+
+/* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
+ * because __stringify doesn't like enums
+ */
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 115, 2);
+MODULE_ALIAS_NET_PF_PROTO(PF_INET, 115);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
new file mode 100644
index 0000000000..11f3d375ce
--- /dev/null
+++ b/net/l2tp/l2tp_ip6.c
@@ -0,0 +1,810 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* L2TPv3 IP encapsulation support for IPv6
+ *
+ * Copyright (c) 2012 Katalix Systems Ltd
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/icmp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/socket.h>
+#include <linux/l2tp.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/tcp_states.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+
+#include <net/transp_v6.h>
+#include <net/addrconf.h>
+#include <net/ip6_route.h>
+
+#include "l2tp_core.h"
+
+struct l2tp_ip6_sock {
+ /* inet_sock has to be the first member of l2tp_ip6_sock */
+ struct inet_sock inet;
+
+ u32 conn_id;
+ u32 peer_conn_id;
+
+ struct ipv6_pinfo inet6;
+};
+
+static DEFINE_RWLOCK(l2tp_ip6_lock);
+static struct hlist_head l2tp_ip6_table;
+static struct hlist_head l2tp_ip6_bind_table;
+
+static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
+{
+ return (struct l2tp_ip6_sock *)sk;
+}
+
+static struct sock *__l2tp_ip6_bind_lookup(const struct net *net,
+ const struct in6_addr *laddr,
+ const struct in6_addr *raddr,
+ int dif, u32 tunnel_id)
+{
+ struct sock *sk;
+
+ sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
+ const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
+ const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
+ const struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
+ int bound_dev_if;
+
+ if (!net_eq(sock_net(sk), net))
+ continue;
+
+ bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ if (bound_dev_if && dif && bound_dev_if != dif)
+ continue;
+
+ if (sk_laddr && !ipv6_addr_any(sk_laddr) &&
+ !ipv6_addr_any(laddr) && !ipv6_addr_equal(sk_laddr, laddr))
+ continue;
+
+ if (!ipv6_addr_any(sk_raddr) && raddr &&
+ !ipv6_addr_any(raddr) && !ipv6_addr_equal(sk_raddr, raddr))
+ continue;
+
+ if (l2tp->conn_id != tunnel_id)
+ continue;
+
+ goto found;
+ }
+
+ sk = NULL;
+found:
+ return sk;
+}
+
+/* When processing receive frames, there are two cases to
+ * consider. Data frames consist of a non-zero session-id and an
+ * optional cookie. Control frames consist of a regular L2TP header
+ * preceded by 32-bits of zeros.
+ *
+ * L2TPv3 Session Header Over IP
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Session ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Cookie (optional, maximum 64 bits)...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * L2TPv3 Control Message Header Over IP
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | (32 bits of zeros) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Control Connection ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Ns | Nr |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * All control frames are passed to userspace.
+ */
+static int l2tp_ip6_recv(struct sk_buff *skb)
+{
+ struct net *net = dev_net(skb->dev);
+ struct sock *sk;
+ u32 session_id;
+ u32 tunnel_id;
+ unsigned char *ptr, *optr;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel = NULL;
+ struct ipv6hdr *iph;
+
+ if (!pskb_may_pull(skb, 4))
+ goto discard;
+
+ /* Point to L2TP header */
+ optr = skb->data;
+ ptr = skb->data;
+ session_id = ntohl(*((__be32 *)ptr));
+ ptr += 4;
+
+ /* RFC3931: L2TP/IP packets have the first 4 bytes containing
+ * the session_id. If it is 0, the packet is a L2TP control
+ * frame and the session_id value can be discarded.
+ */
+ if (session_id == 0) {
+ __skb_pull(skb, 4);
+ goto pass_up;
+ }
+
+ /* Ok, this is a data packet. Lookup the session. */
+ session = l2tp_session_get(net, session_id);
+ if (!session)
+ goto discard;
+
+ tunnel = session->tunnel;
+ if (!tunnel)
+ goto discard_sess;
+
+ if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+ goto discard_sess;
+
+ l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
+ l2tp_session_dec_refcount(session);
+
+ return 0;
+
+pass_up:
+ /* Get the tunnel_id from the L2TP header */
+ if (!pskb_may_pull(skb, 12))
+ goto discard;
+
+ if ((skb->data[0] & 0xc0) != 0xc0)
+ goto discard;
+
+ tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
+ iph = ipv6_hdr(skb);
+
+ read_lock_bh(&l2tp_ip6_lock);
+ sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
+ inet6_iif(skb), tunnel_id);
+ if (!sk) {
+ read_unlock_bh(&l2tp_ip6_lock);
+ goto discard;
+ }
+ sock_hold(sk);
+ read_unlock_bh(&l2tp_ip6_lock);
+
+ if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
+ goto discard_put;
+
+ nf_reset_ct(skb);
+
+ return sk_receive_skb(sk, skb, 1);
+
+discard_sess:
+ l2tp_session_dec_refcount(session);
+ goto discard;
+
+discard_put:
+ sock_put(sk);
+
+discard:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int l2tp_ip6_hash(struct sock *sk)
+{
+ if (sk_unhashed(sk)) {
+ write_lock_bh(&l2tp_ip6_lock);
+ sk_add_node(sk, &l2tp_ip6_table);
+ write_unlock_bh(&l2tp_ip6_lock);
+ }
+ return 0;
+}
+
+static void l2tp_ip6_unhash(struct sock *sk)
+{
+ if (sk_unhashed(sk))
+ return;
+ write_lock_bh(&l2tp_ip6_lock);
+ sk_del_node_init(sk);
+ write_unlock_bh(&l2tp_ip6_lock);
+}
+
+static int l2tp_ip6_open(struct sock *sk)
+{
+ /* Prevent autobind. We don't have ports. */
+ inet_sk(sk)->inet_num = IPPROTO_L2TP;
+
+ l2tp_ip6_hash(sk);
+ return 0;
+}
+
+static void l2tp_ip6_close(struct sock *sk, long timeout)
+{
+ write_lock_bh(&l2tp_ip6_lock);
+ hlist_del_init(&sk->sk_bind_node);
+ sk_del_node_init(sk);
+ write_unlock_bh(&l2tp_ip6_lock);
+
+ sk_common_release(sk);
+}
+
+static void l2tp_ip6_destroy_sock(struct sock *sk)
+{
+ struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
+
+ lock_sock(sk);
+ ip6_flush_pending_frames(sk);
+ release_sock(sk);
+
+ if (tunnel)
+ l2tp_tunnel_delete(tunnel);
+}
+
+static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *)uaddr;
+ struct net *net = sock_net(sk);
+ __be32 v4addr = 0;
+ int bound_dev_if;
+ int addr_type;
+ int err;
+
+ if (addr->l2tp_family != AF_INET6)
+ return -EINVAL;
+ if (addr_len < sizeof(*addr))
+ return -EINVAL;
+
+ addr_type = ipv6_addr_type(&addr->l2tp_addr);
+
+ /* l2tp_ip6 sockets are IPv6 only */
+ if (addr_type == IPV6_ADDR_MAPPED)
+ return -EADDRNOTAVAIL;
+
+ /* L2TP is point-point, not multicast */
+ if (addr_type & IPV6_ADDR_MULTICAST)
+ return -EADDRNOTAVAIL;
+
+ lock_sock(sk);
+
+ err = -EINVAL;
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ goto out_unlock;
+
+ if (sk->sk_state != TCP_CLOSE)
+ goto out_unlock;
+
+ bound_dev_if = sk->sk_bound_dev_if;
+
+ /* Check if the address belongs to the host. */
+ rcu_read_lock();
+ if (addr_type != IPV6_ADDR_ANY) {
+ struct net_device *dev = NULL;
+
+ if (addr_type & IPV6_ADDR_LINKLOCAL) {
+ if (addr->l2tp_scope_id)
+ bound_dev_if = addr->l2tp_scope_id;
+
+ /* Binding to link-local address requires an
+ * interface.
+ */
+ if (!bound_dev_if)
+ goto out_unlock_rcu;
+
+ err = -ENODEV;
+ dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if);
+ if (!dev)
+ goto out_unlock_rcu;
+ }
+
+ /* ipv4 addr of the socket is invalid. Only the
+ * unspecified and mapped address have a v4 equivalent.
+ */
+ v4addr = LOOPBACK4_IPV6;
+ err = -EADDRNOTAVAIL;
+ if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0))
+ goto out_unlock_rcu;
+ }
+ rcu_read_unlock();
+
+ write_lock_bh(&l2tp_ip6_lock);
+ if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
+ addr->l2tp_conn_id)) {
+ write_unlock_bh(&l2tp_ip6_lock);
+ err = -EADDRINUSE;
+ goto out_unlock;
+ }
+
+ inet->inet_saddr = v4addr;
+ inet->inet_rcv_saddr = v4addr;
+ sk->sk_bound_dev_if = bound_dev_if;
+ sk->sk_v6_rcv_saddr = addr->l2tp_addr;
+ np->saddr = addr->l2tp_addr;
+
+ l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
+
+ sk_add_bind_node(sk, &l2tp_ip6_bind_table);
+ sk_del_node_init(sk);
+ write_unlock_bh(&l2tp_ip6_lock);
+
+ sock_reset_flag(sk, SOCK_ZAPPED);
+ release_sock(sk);
+ return 0;
+
+out_unlock_rcu:
+ rcu_read_unlock();
+out_unlock:
+ release_sock(sk);
+
+ return err;
+}
+
+static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
+ int addr_len)
+{
+ struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
+ struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
+ struct in6_addr *daddr;
+ int addr_type;
+ int rc;
+
+ if (addr_len < sizeof(*lsa))
+ return -EINVAL;
+
+ if (usin->sin6_family != AF_INET6)
+ return -EINVAL;
+
+ addr_type = ipv6_addr_type(&usin->sin6_addr);
+ if (addr_type & IPV6_ADDR_MULTICAST)
+ return -EINVAL;
+
+ if (addr_type & IPV6_ADDR_MAPPED) {
+ daddr = &usin->sin6_addr;
+ if (ipv4_is_multicast(daddr->s6_addr32[3]))
+ return -EINVAL;
+ }
+
+ lock_sock(sk);
+
+ /* Must bind first - autobinding does not work */
+ if (sock_flag(sk, SOCK_ZAPPED)) {
+ rc = -EINVAL;
+ goto out_sk;
+ }
+
+ rc = __ip6_datagram_connect(sk, uaddr, addr_len);
+ if (rc < 0)
+ goto out_sk;
+
+ l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
+
+ write_lock_bh(&l2tp_ip6_lock);
+ hlist_del_init(&sk->sk_bind_node);
+ sk_add_bind_node(sk, &l2tp_ip6_bind_table);
+ write_unlock_bh(&l2tp_ip6_lock);
+
+out_sk:
+ release_sock(sk);
+
+ return rc;
+}
+
+static int l2tp_ip6_disconnect(struct sock *sk, int flags)
+{
+ if (sock_flag(sk, SOCK_ZAPPED))
+ return 0;
+
+ return __udp_disconnect(sk, flags);
+}
+
+static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
+ int peer)
+{
+ struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
+ struct sock *sk = sock->sk;
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk);
+
+ lsa->l2tp_family = AF_INET6;
+ lsa->l2tp_flowinfo = 0;
+ lsa->l2tp_scope_id = 0;
+ lsa->l2tp_unused = 0;
+ if (peer) {
+ if (!lsk->peer_conn_id)
+ return -ENOTCONN;
+ lsa->l2tp_conn_id = lsk->peer_conn_id;
+ lsa->l2tp_addr = sk->sk_v6_daddr;
+ if (np->sndflow)
+ lsa->l2tp_flowinfo = np->flow_label;
+ } else {
+ if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+ lsa->l2tp_addr = np->saddr;
+ else
+ lsa->l2tp_addr = sk->sk_v6_rcv_saddr;
+
+ lsa->l2tp_conn_id = lsk->conn_id;
+ }
+ if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
+ lsa->l2tp_scope_id = READ_ONCE(sk->sk_bound_dev_if);
+ return sizeof(*lsa);
+}
+
+static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
+{
+ int rc;
+
+ /* Charge it to the socket, dropping if the queue is full. */
+ rc = sock_queue_rcv_skb(sk, skb);
+ if (rc < 0)
+ goto drop;
+
+ return 0;
+
+drop:
+ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
+ kfree_skb(skb);
+ return -1;
+}
+
+static int l2tp_ip6_push_pending_frames(struct sock *sk)
+{
+ struct sk_buff *skb;
+ __be32 *transhdr = NULL;
+ int err = 0;
+
+ skb = skb_peek(&sk->sk_write_queue);
+ if (!skb)
+ goto out;
+
+ transhdr = (__be32 *)skb_transport_header(skb);
+ *transhdr = 0;
+
+ err = ip6_push_pending_frames(sk);
+
+out:
+ return err;
+}
+
+/* Userspace will call sendmsg() on the tunnel socket to send L2TP
+ * control frames.
+ */
+static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+{
+ struct ipv6_txoptions opt_space;
+ DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
+ struct in6_addr *daddr, *final_p, final;
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_txoptions *opt_to_free = NULL;
+ struct ipv6_txoptions *opt = NULL;
+ struct ip6_flowlabel *flowlabel = NULL;
+ struct dst_entry *dst = NULL;
+ struct flowi6 fl6;
+ struct ipcm6_cookie ipc6;
+ int addr_len = msg->msg_namelen;
+ int transhdrlen = 4; /* zero session-id */
+ int ulen;
+ int err;
+
+ /* Rough check on arithmetic overflow,
+ * better check is made in ip6_append_data().
+ */
+ if (len > INT_MAX - transhdrlen)
+ return -EMSGSIZE;
+
+ /* Mirror BSD error message compatibility */
+ if (msg->msg_flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+ /* Get and verify the address */
+ memset(&fl6, 0, sizeof(fl6));
+
+ fl6.flowi6_mark = READ_ONCE(sk->sk_mark);
+ fl6.flowi6_uid = sk->sk_uid;
+
+ ipcm6_init(&ipc6);
+
+ if (lsa) {
+ if (addr_len < SIN6_LEN_RFC2133)
+ return -EINVAL;
+
+ if (lsa->l2tp_family && lsa->l2tp_family != AF_INET6)
+ return -EAFNOSUPPORT;
+
+ daddr = &lsa->l2tp_addr;
+ if (np->sndflow) {
+ fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK;
+ if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
+ flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
+ if (IS_ERR(flowlabel))
+ return -EINVAL;
+ }
+ }
+
+ /* Otherwise it will be difficult to maintain
+ * sk->sk_dst_cache.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED &&
+ ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
+ daddr = &sk->sk_v6_daddr;
+
+ if (addr_len >= sizeof(struct sockaddr_in6) &&
+ lsa->l2tp_scope_id &&
+ ipv6_addr_type(daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = lsa->l2tp_scope_id;
+ } else {
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -EDESTADDRREQ;
+
+ daddr = &sk->sk_v6_daddr;
+ fl6.flowlabel = np->flow_label;
+ }
+
+ if (fl6.flowi6_oif == 0)
+ fl6.flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
+
+ if (msg->msg_controllen) {
+ opt = &opt_space;
+ memset(opt, 0, sizeof(struct ipv6_txoptions));
+ opt->tot_len = sizeof(struct ipv6_txoptions);
+ ipc6.opt = opt;
+
+ err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6);
+ if (err < 0) {
+ fl6_sock_release(flowlabel);
+ return err;
+ }
+ if ((fl6.flowlabel & IPV6_FLOWLABEL_MASK) && !flowlabel) {
+ flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
+ if (IS_ERR(flowlabel))
+ return -EINVAL;
+ }
+ if (!(opt->opt_nflen | opt->opt_flen))
+ opt = NULL;
+ }
+
+ if (!opt) {
+ opt = txopt_get(np);
+ opt_to_free = opt;
+ }
+ if (flowlabel)
+ opt = fl6_merge_options(&opt_space, flowlabel, opt);
+ opt = ipv6_fixup_options(&opt_space, opt);
+ ipc6.opt = opt;
+
+ fl6.flowi6_proto = sk->sk_protocol;
+ if (!ipv6_addr_any(daddr))
+ fl6.daddr = *daddr;
+ else
+ fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+ if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
+ fl6.saddr = np->saddr;
+
+ final_p = fl6_update_dst(&fl6, opt, &final);
+
+ if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+ fl6.flowi6_oif = np->mcast_oif;
+ else if (!fl6.flowi6_oif)
+ fl6.flowi6_oif = np->ucast_oif;
+
+ security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
+
+ if (ipc6.tclass < 0)
+ ipc6.tclass = np->tclass;
+
+ fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
+
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto out;
+ }
+
+ if (ipc6.hlimit < 0)
+ ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+
+ if (ipc6.dontfrag < 0)
+ ipc6.dontfrag = np->dontfrag;
+
+ if (msg->msg_flags & MSG_CONFIRM)
+ goto do_confirm;
+
+back_from_confirm:
+ lock_sock(sk);
+ ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
+ err = ip6_append_data(sk, ip_generic_getfrag, msg,
+ ulen, transhdrlen, &ipc6,
+ &fl6, (struct rt6_info *)dst,
+ msg->msg_flags);
+ if (err)
+ ip6_flush_pending_frames(sk);
+ else if (!(msg->msg_flags & MSG_MORE))
+ err = l2tp_ip6_push_pending_frames(sk);
+ release_sock(sk);
+done:
+ dst_release(dst);
+out:
+ fl6_sock_release(flowlabel);
+ txopt_put(opt_to_free);
+
+ return err < 0 ? err : len;
+
+do_confirm:
+ if (msg->msg_flags & MSG_PROBE)
+ dst_confirm_neigh(dst, &fl6.daddr);
+ if (!(msg->msg_flags & MSG_PROBE) || len)
+ goto back_from_confirm;
+ err = 0;
+ goto done;
+}
+
+static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int flags, int *addr_len)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
+ size_t copied = 0;
+ int err = -EOPNOTSUPP;
+ struct sk_buff *skb;
+
+ if (flags & MSG_OOB)
+ goto out;
+
+ if (flags & MSG_ERRQUEUE)
+ return ipv6_recv_error(sk, msg, len, addr_len);
+
+ skb = skb_recv_datagram(sk, flags, &err);
+ if (!skb)
+ goto out;
+
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
+ if (err)
+ goto done;
+
+ sock_recv_timestamp(msg, sk, skb);
+
+ /* Copy the address. */
+ if (lsa) {
+ lsa->l2tp_family = AF_INET6;
+ lsa->l2tp_unused = 0;
+ lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
+ lsa->l2tp_flowinfo = 0;
+ lsa->l2tp_scope_id = 0;
+ lsa->l2tp_conn_id = 0;
+ if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
+ lsa->l2tp_scope_id = inet6_iif(skb);
+ *addr_len = sizeof(*lsa);
+ }
+
+ if (np->rxopt.all)
+ ip6_datagram_recv_ctl(sk, msg, skb);
+
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
+done:
+ skb_free_datagram(sk, skb);
+out:
+ return err ? err : copied;
+}
+
+static struct proto l2tp_ip6_prot = {
+ .name = "L2TP/IPv6",
+ .owner = THIS_MODULE,
+ .init = l2tp_ip6_open,
+ .close = l2tp_ip6_close,
+ .bind = l2tp_ip6_bind,
+ .connect = l2tp_ip6_connect,
+ .disconnect = l2tp_ip6_disconnect,
+ .ioctl = l2tp_ioctl,
+ .destroy = l2tp_ip6_destroy_sock,
+ .setsockopt = ipv6_setsockopt,
+ .getsockopt = ipv6_getsockopt,
+ .sendmsg = l2tp_ip6_sendmsg,
+ .recvmsg = l2tp_ip6_recvmsg,
+ .backlog_rcv = l2tp_ip6_backlog_recv,
+ .hash = l2tp_ip6_hash,
+ .unhash = l2tp_ip6_unhash,
+ .obj_size = sizeof(struct l2tp_ip6_sock),
+ .ipv6_pinfo_offset = offsetof(struct l2tp_ip6_sock, inet6),
+};
+
+static const struct proto_ops l2tp_ip6_ops = {
+ .family = PF_INET6,
+ .owner = THIS_MODULE,
+ .release = inet6_release,
+ .bind = inet6_bind,
+ .connect = inet_dgram_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = l2tp_ip6_getname,
+ .poll = datagram_poll,
+ .ioctl = inet6_ioctl,
+ .gettstamp = sock_gettstamp,
+ .listen = sock_no_listen,
+ .shutdown = inet_shutdown,
+ .setsockopt = sock_common_setsockopt,
+ .getsockopt = sock_common_getsockopt,
+ .sendmsg = inet_sendmsg,
+ .recvmsg = sock_common_recvmsg,
+ .mmap = sock_no_mmap,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = inet6_compat_ioctl,
+#endif
+};
+
+static struct inet_protosw l2tp_ip6_protosw = {
+ .type = SOCK_DGRAM,
+ .protocol = IPPROTO_L2TP,
+ .prot = &l2tp_ip6_prot,
+ .ops = &l2tp_ip6_ops,
+};
+
+static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
+ .handler = l2tp_ip6_recv,
+};
+
+static int __init l2tp_ip6_init(void)
+{
+ int err;
+
+ pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
+
+ err = proto_register(&l2tp_ip6_prot, 1);
+ if (err != 0)
+ goto out;
+
+ err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
+ if (err)
+ goto out1;
+
+ inet6_register_protosw(&l2tp_ip6_protosw);
+ return 0;
+
+out1:
+ proto_unregister(&l2tp_ip6_prot);
+out:
+ return err;
+}
+
+static void __exit l2tp_ip6_exit(void)
+{
+ inet6_unregister_protosw(&l2tp_ip6_protosw);
+ inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
+ proto_unregister(&l2tp_ip6_prot);
+}
+
+module_init(l2tp_ip6_init);
+module_exit(l2tp_ip6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
+MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
+MODULE_VERSION("1.0");
+
+/* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
+ * because __stringify doesn't like enums
+ */
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 115, 2);
+MODULE_ALIAS_NET_PF_PROTO(PF_INET6, 115);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
new file mode 100644
index 0000000000..a901fd14fe
--- /dev/null
+++ b/net/l2tp/l2tp_netlink.c
@@ -0,0 +1,1048 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* L2TP netlink layer, for management
+ *
+ * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
+ *
+ * Partly based on the IrDA nelink implementation
+ * (see net/irda/irnetlink.c) which is:
+ * Copyright (c) 2007 Samuel Ortiz <samuel@sortiz.org>
+ * which is in turn partly based on the wireless netlink code:
+ * Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <net/sock.h>
+#include <net/genetlink.h>
+#include <net/udp.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/socket.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <net/net_namespace.h>
+
+#include <linux/l2tp.h>
+
+#include "l2tp_core.h"
+
+static struct genl_family l2tp_nl_family;
+
+static const struct genl_multicast_group l2tp_multicast_group[] = {
+ {
+ .name = L2TP_GENL_MCGROUP,
+ },
+};
+
+static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq,
+ int flags, struct l2tp_tunnel *tunnel, u8 cmd);
+static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
+ int flags, struct l2tp_session *session,
+ u8 cmd);
+
+/* Accessed under genl lock */
+static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
+
+static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info)
+{
+ u32 tunnel_id;
+ u32 session_id;
+ char *ifname;
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session = NULL;
+ struct net *net = genl_info_net(info);
+
+ if (info->attrs[L2TP_ATTR_IFNAME]) {
+ ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
+ session = l2tp_session_get_by_ifname(net, ifname);
+ } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
+ (info->attrs[L2TP_ATTR_CONN_ID])) {
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+ session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
+ if (tunnel) {
+ session = l2tp_tunnel_get_session(tunnel, session_id);
+ l2tp_tunnel_dec_refcount(tunnel);
+ }
+ }
+
+ return session;
+}
+
+static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ void *hdr;
+ int ret = -ENOBUFS;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
+ &l2tp_nl_family, 0, L2TP_CMD_NOOP);
+ if (!hdr) {
+ ret = -EMSGSIZE;
+ goto err_out;
+ }
+
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
+
+err_out:
+ nlmsg_free(msg);
+
+out:
+ return ret;
+}
+
+static int l2tp_tunnel_notify(struct genl_family *family,
+ struct genl_info *info,
+ struct l2tp_tunnel *tunnel,
+ u8 cmd)
+{
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
+ NLM_F_ACK, tunnel, cmd);
+
+ if (ret >= 0) {
+ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+ /* We don't care if no one is listening */
+ if (ret == -ESRCH)
+ ret = 0;
+ return ret;
+ }
+
+ nlmsg_free(msg);
+
+ return ret;
+}
+
+static int l2tp_session_notify(struct genl_family *family,
+ struct genl_info *info,
+ struct l2tp_session *session,
+ u8 cmd)
+{
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
+ NLM_F_ACK, session, cmd);
+
+ if (ret >= 0) {
+ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+ /* We don't care if no one is listening */
+ if (ret == -ESRCH)
+ ret = 0;
+ return ret;
+ }
+
+ nlmsg_free(msg);
+
+ return ret;
+}
+
+static int l2tp_nl_cmd_tunnel_create_get_addr(struct nlattr **attrs, struct l2tp_tunnel_cfg *cfg)
+{
+ if (attrs[L2TP_ATTR_UDP_SPORT])
+ cfg->local_udp_port = nla_get_u16(attrs[L2TP_ATTR_UDP_SPORT]);
+ if (attrs[L2TP_ATTR_UDP_DPORT])
+ cfg->peer_udp_port = nla_get_u16(attrs[L2TP_ATTR_UDP_DPORT]);
+ cfg->use_udp_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_CSUM]);
+
+ /* Must have either AF_INET or AF_INET6 address for source and destination */
+#if IS_ENABLED(CONFIG_IPV6)
+ if (attrs[L2TP_ATTR_IP6_SADDR] && attrs[L2TP_ATTR_IP6_DADDR]) {
+ cfg->local_ip6 = nla_data(attrs[L2TP_ATTR_IP6_SADDR]);
+ cfg->peer_ip6 = nla_data(attrs[L2TP_ATTR_IP6_DADDR]);
+ cfg->udp6_zero_tx_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX]);
+ cfg->udp6_zero_rx_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX]);
+ return 0;
+ }
+#endif
+ if (attrs[L2TP_ATTR_IP_SADDR] && attrs[L2TP_ATTR_IP_DADDR]) {
+ cfg->local_ip.s_addr = nla_get_in_addr(attrs[L2TP_ATTR_IP_SADDR]);
+ cfg->peer_ip.s_addr = nla_get_in_addr(attrs[L2TP_ATTR_IP_DADDR]);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info)
+{
+ u32 tunnel_id;
+ u32 peer_tunnel_id;
+ int proto_version;
+ int fd = -1;
+ int ret = 0;
+ struct l2tp_tunnel_cfg cfg = { 0, };
+ struct l2tp_tunnel *tunnel;
+ struct net *net = genl_info_net(info);
+ struct nlattr **attrs = info->attrs;
+
+ if (!attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ tunnel_id = nla_get_u32(attrs[L2TP_ATTR_CONN_ID]);
+
+ if (!attrs[L2TP_ATTR_PEER_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ peer_tunnel_id = nla_get_u32(attrs[L2TP_ATTR_PEER_CONN_ID]);
+
+ if (!attrs[L2TP_ATTR_PROTO_VERSION]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ proto_version = nla_get_u8(attrs[L2TP_ATTR_PROTO_VERSION]);
+
+ if (!attrs[L2TP_ATTR_ENCAP_TYPE]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cfg.encap = nla_get_u16(attrs[L2TP_ATTR_ENCAP_TYPE]);
+
+ /* Managed tunnels take the tunnel socket from userspace.
+ * Unmanaged tunnels must call out the source and destination addresses
+ * for the kernel to create the tunnel socket itself.
+ */
+ if (attrs[L2TP_ATTR_FD]) {
+ fd = nla_get_u32(attrs[L2TP_ATTR_FD]);
+ } else {
+ ret = l2tp_nl_cmd_tunnel_create_get_addr(attrs, &cfg);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = -EINVAL;
+ switch (cfg.encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ case L2TP_ENCAPTYPE_IP:
+ ret = l2tp_tunnel_create(fd, proto_version, tunnel_id,
+ peer_tunnel_id, &cfg, &tunnel);
+ break;
+ }
+
+ if (ret < 0)
+ goto out;
+
+ l2tp_tunnel_inc_refcount(tunnel);
+ ret = l2tp_tunnel_register(tunnel, net, &cfg);
+ if (ret < 0) {
+ kfree(tunnel);
+ goto out;
+ }
+ ret = l2tp_tunnel_notify(&l2tp_nl_family, info, tunnel,
+ L2TP_CMD_TUNNEL_CREATE);
+ l2tp_tunnel_dec_refcount(tunnel);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info)
+{
+ struct l2tp_tunnel *tunnel;
+ u32 tunnel_id;
+ int ret = 0;
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
+ if (!tunnel) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ l2tp_tunnel_notify(&l2tp_nl_family, info,
+ tunnel, L2TP_CMD_TUNNEL_DELETE);
+
+ l2tp_tunnel_delete(tunnel);
+
+ l2tp_tunnel_dec_refcount(tunnel);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info)
+{
+ struct l2tp_tunnel *tunnel;
+ u32 tunnel_id;
+ int ret = 0;
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
+ if (!tunnel) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
+ tunnel, L2TP_CMD_TUNNEL_MODIFY);
+
+ l2tp_tunnel_dec_refcount(tunnel);
+
+out:
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int l2tp_nl_tunnel_send_addr6(struct sk_buff *skb, struct sock *sk,
+ enum l2tp_encap_type encap)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ switch (encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ if (udp_get_no_check6_tx(sk) &&
+ nla_put_flag(skb, L2TP_ATTR_UDP_ZERO_CSUM6_TX))
+ return -1;
+ if (udp_get_no_check6_rx(sk) &&
+ nla_put_flag(skb, L2TP_ATTR_UDP_ZERO_CSUM6_RX))
+ return -1;
+ if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
+ nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)))
+ return -1;
+ fallthrough;
+ case L2TP_ENCAPTYPE_IP:
+ if (nla_put_in6_addr(skb, L2TP_ATTR_IP6_SADDR, &np->saddr) ||
+ nla_put_in6_addr(skb, L2TP_ATTR_IP6_DADDR, &sk->sk_v6_daddr))
+ return -1;
+ break;
+ }
+ return 0;
+}
+#endif
+
+static int l2tp_nl_tunnel_send_addr4(struct sk_buff *skb, struct sock *sk,
+ enum l2tp_encap_type encap)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ switch (encap) {
+ case L2TP_ENCAPTYPE_UDP:
+ if (nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, !sk->sk_no_check_tx) ||
+ nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
+ nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)))
+ return -1;
+ fallthrough;
+ case L2TP_ENCAPTYPE_IP:
+ if (nla_put_in_addr(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) ||
+ nla_put_in_addr(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr))
+ return -1;
+ break;
+ }
+
+ return 0;
+}
+
+/* Append attributes for the tunnel address, handling the different attribute types
+ * used for different tunnel encapsulation and AF_INET v.s. AF_INET6.
+ */
+static int l2tp_nl_tunnel_send_addr(struct sk_buff *skb, struct l2tp_tunnel *tunnel)
+{
+ struct sock *sk = tunnel->sock;
+
+ if (!sk)
+ return 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ return l2tp_nl_tunnel_send_addr6(skb, sk, tunnel->encap);
+#endif
+ return l2tp_nl_tunnel_send_addr4(skb, sk, tunnel->encap);
+}
+
+static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
+ struct l2tp_tunnel *tunnel, u8 cmd)
+{
+ void *hdr;
+ struct nlattr *nest;
+
+ hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
+ nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
+ nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
+ nla_put_u32(skb, L2TP_ATTR_DEBUG, 0) ||
+ nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap))
+ goto nla_put_failure;
+
+ nest = nla_nest_start_noflag(skb, L2TP_ATTR_STATS);
+ if (!nest)
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
+ atomic_long_read(&tunnel->stats.tx_packets),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES,
+ atomic_long_read(&tunnel->stats.tx_bytes),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS,
+ atomic_long_read(&tunnel->stats.tx_errors),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS,
+ atomic_long_read(&tunnel->stats.rx_packets),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES,
+ atomic_long_read(&tunnel->stats.rx_bytes),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+ atomic_long_read(&tunnel->stats.rx_seq_discards),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_COOKIE_DISCARDS,
+ atomic_long_read(&tunnel->stats.rx_cookie_discards),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
+ atomic_long_read(&tunnel->stats.rx_oos_packets),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
+ atomic_long_read(&tunnel->stats.rx_errors),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_INVALID,
+ atomic_long_read(&tunnel->stats.rx_invalid),
+ L2TP_ATTR_STATS_PAD))
+ goto nla_put_failure;
+ nla_nest_end(skb, nest);
+
+ if (l2tp_nl_tunnel_send_addr(skb, tunnel))
+ goto nla_put_failure;
+
+ genlmsg_end(skb, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -1;
+}
+
+static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct l2tp_tunnel *tunnel;
+ struct sk_buff *msg;
+ u32 tunnel_id;
+ int ret = -ENOBUFS;
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
+ if (!tunnel) {
+ ret = -ENODEV;
+ goto err_nlmsg;
+ }
+
+ ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
+ NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET);
+ if (ret < 0)
+ goto err_nlmsg_tunnel;
+
+ l2tp_tunnel_dec_refcount(tunnel);
+
+ return genlmsg_unicast(net, msg, info->snd_portid);
+
+err_nlmsg_tunnel:
+ l2tp_tunnel_dec_refcount(tunnel);
+err_nlmsg:
+ nlmsg_free(msg);
+err:
+ return ret;
+}
+
+static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int ti = cb->args[0];
+ struct l2tp_tunnel *tunnel;
+ struct net *net = sock_net(skb->sk);
+
+ for (;;) {
+ tunnel = l2tp_tunnel_get_nth(net, ti);
+ if (!tunnel)
+ goto out;
+
+ if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ tunnel, L2TP_CMD_TUNNEL_GET) < 0) {
+ l2tp_tunnel_dec_refcount(tunnel);
+ goto out;
+ }
+ l2tp_tunnel_dec_refcount(tunnel);
+
+ ti++;
+ }
+
+out:
+ cb->args[0] = ti;
+
+ return skb->len;
+}
+
+static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *info)
+{
+ u32 tunnel_id = 0;
+ u32 session_id;
+ u32 peer_session_id;
+ int ret = 0;
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session;
+ struct l2tp_session_cfg cfg = { 0, };
+ struct net *net = genl_info_net(info);
+
+ if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+ tunnel = l2tp_tunnel_get(net, tunnel_id);
+ if (!tunnel) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
+ ret = -EINVAL;
+ goto out_tunnel;
+ }
+ session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
+
+ if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
+ ret = -EINVAL;
+ goto out_tunnel;
+ }
+ peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);
+
+ if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
+ ret = -EINVAL;
+ goto out_tunnel;
+ }
+ cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
+ if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
+ ret = -EINVAL;
+ goto out_tunnel;
+ }
+
+ /* L2TPv2 only accepts PPP pseudo-wires */
+ if (tunnel->version == 2 && cfg.pw_type != L2TP_PWTYPE_PPP) {
+ ret = -EPROTONOSUPPORT;
+ goto out_tunnel;
+ }
+
+ if (tunnel->version > 2) {
+ if (info->attrs[L2TP_ATTR_L2SPEC_TYPE]) {
+ cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]);
+ if (cfg.l2specific_type != L2TP_L2SPECTYPE_DEFAULT &&
+ cfg.l2specific_type != L2TP_L2SPECTYPE_NONE) {
+ ret = -EINVAL;
+ goto out_tunnel;
+ }
+ } else {
+ cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT;
+ }
+
+ if (info->attrs[L2TP_ATTR_COOKIE]) {
+ u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
+
+ if (len > 8) {
+ ret = -EINVAL;
+ goto out_tunnel;
+ }
+ cfg.cookie_len = len;
+ memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
+ }
+ if (info->attrs[L2TP_ATTR_PEER_COOKIE]) {
+ u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
+
+ if (len > 8) {
+ ret = -EINVAL;
+ goto out_tunnel;
+ }
+ cfg.peer_cookie_len = len;
+ memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
+ }
+ if (info->attrs[L2TP_ATTR_IFNAME])
+ cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
+ }
+
+ if (info->attrs[L2TP_ATTR_RECV_SEQ])
+ cfg.recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
+
+ if (info->attrs[L2TP_ATTR_SEND_SEQ])
+ cfg.send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
+
+ if (info->attrs[L2TP_ATTR_LNS_MODE])
+ cfg.lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
+
+ if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
+ cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
+
+#ifdef CONFIG_MODULES
+ if (!l2tp_nl_cmd_ops[cfg.pw_type]) {
+ genl_unlock();
+ request_module("net-l2tp-type-%u", cfg.pw_type);
+ genl_lock();
+ }
+#endif
+ if (!l2tp_nl_cmd_ops[cfg.pw_type] || !l2tp_nl_cmd_ops[cfg.pw_type]->session_create) {
+ ret = -EPROTONOSUPPORT;
+ goto out_tunnel;
+ }
+
+ ret = l2tp_nl_cmd_ops[cfg.pw_type]->session_create(net, tunnel,
+ session_id,
+ peer_session_id,
+ &cfg);
+
+ if (ret >= 0) {
+ session = l2tp_tunnel_get_session(tunnel, session_id);
+ if (session) {
+ ret = l2tp_session_notify(&l2tp_nl_family, info, session,
+ L2TP_CMD_SESSION_CREATE);
+ l2tp_session_dec_refcount(session);
+ }
+ }
+
+out_tunnel:
+ l2tp_tunnel_dec_refcount(tunnel);
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *info)
+{
+ int ret = 0;
+ struct l2tp_session *session;
+ u16 pw_type;
+
+ session = l2tp_nl_session_get(info);
+ if (!session) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ l2tp_session_notify(&l2tp_nl_family, info,
+ session, L2TP_CMD_SESSION_DELETE);
+
+ pw_type = session->pwtype;
+ if (pw_type < __L2TP_PWTYPE_MAX)
+ if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
+ l2tp_nl_cmd_ops[pw_type]->session_delete(session);
+
+ l2tp_session_dec_refcount(session);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *info)
+{
+ int ret = 0;
+ struct l2tp_session *session;
+
+ session = l2tp_nl_session_get(info);
+ if (!session) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (info->attrs[L2TP_ATTR_RECV_SEQ])
+ session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
+
+ if (info->attrs[L2TP_ATTR_SEND_SEQ]) {
+ session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
+ l2tp_session_set_header_len(session, session->tunnel->version);
+ }
+
+ if (info->attrs[L2TP_ATTR_LNS_MODE])
+ session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
+
+ if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
+ session->reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
+
+ ret = l2tp_session_notify(&l2tp_nl_family, info,
+ session, L2TP_CMD_SESSION_MODIFY);
+
+ l2tp_session_dec_refcount(session);
+
+out:
+ return ret;
+}
+
+static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
+ struct l2tp_session *session, u8 cmd)
+{
+ void *hdr;
+ struct nlattr *nest;
+ struct l2tp_tunnel *tunnel = session->tunnel;
+
+ hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
+ nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
+ nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
+ nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id) ||
+ nla_put_u32(skb, L2TP_ATTR_DEBUG, 0) ||
+ nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype))
+ goto nla_put_failure;
+
+ if ((session->ifname[0] &&
+ nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
+ (session->cookie_len &&
+ nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, session->cookie)) ||
+ (session->peer_cookie_len &&
+ nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, session->peer_cookie)) ||
+ nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
+ nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
+ nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
+ (l2tp_tunnel_uses_xfrm(tunnel) &&
+ nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
+ (session->reorder_timeout &&
+ nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT,
+ session->reorder_timeout, L2TP_ATTR_PAD)))
+ goto nla_put_failure;
+
+ nest = nla_nest_start_noflag(skb, L2TP_ATTR_STATS);
+ if (!nest)
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
+ atomic_long_read(&session->stats.tx_packets),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES,
+ atomic_long_read(&session->stats.tx_bytes),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS,
+ atomic_long_read(&session->stats.tx_errors),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS,
+ atomic_long_read(&session->stats.rx_packets),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES,
+ atomic_long_read(&session->stats.rx_bytes),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+ atomic_long_read(&session->stats.rx_seq_discards),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_COOKIE_DISCARDS,
+ atomic_long_read(&session->stats.rx_cookie_discards),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
+ atomic_long_read(&session->stats.rx_oos_packets),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
+ atomic_long_read(&session->stats.rx_errors),
+ L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_INVALID,
+ atomic_long_read(&session->stats.rx_invalid),
+ L2TP_ATTR_STATS_PAD))
+ goto nla_put_failure;
+ nla_nest_end(skb, nest);
+
+ genlmsg_end(skb, hdr);
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -1;
+}
+
+static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct l2tp_session *session;
+ struct sk_buff *msg;
+ int ret;
+
+ session = l2tp_nl_session_get(info);
+ if (!session) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err_ref;
+ }
+
+ ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
+ 0, session, L2TP_CMD_SESSION_GET);
+ if (ret < 0)
+ goto err_ref_msg;
+
+ ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
+
+ l2tp_session_dec_refcount(session);
+
+ return ret;
+
+err_ref_msg:
+ nlmsg_free(msg);
+err_ref:
+ l2tp_session_dec_refcount(session);
+err:
+ return ret;
+}
+
+static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel = NULL;
+ int ti = cb->args[0];
+ int si = cb->args[1];
+
+ for (;;) {
+ if (!tunnel) {
+ tunnel = l2tp_tunnel_get_nth(net, ti);
+ if (!tunnel)
+ goto out;
+ }
+
+ session = l2tp_session_get_nth(tunnel, si);
+ if (!session) {
+ ti++;
+ l2tp_tunnel_dec_refcount(tunnel);
+ tunnel = NULL;
+ si = 0;
+ continue;
+ }
+
+ if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ session, L2TP_CMD_SESSION_GET) < 0) {
+ l2tp_session_dec_refcount(session);
+ l2tp_tunnel_dec_refcount(tunnel);
+ break;
+ }
+ l2tp_session_dec_refcount(session);
+
+ si++;
+ }
+
+out:
+ cb->args[0] = ti;
+ cb->args[1] = si;
+
+ return skb->len;
+}
+
+static const struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
+ [L2TP_ATTR_NONE] = { .type = NLA_UNSPEC, },
+ [L2TP_ATTR_PW_TYPE] = { .type = NLA_U16, },
+ [L2TP_ATTR_ENCAP_TYPE] = { .type = NLA_U16, },
+ [L2TP_ATTR_OFFSET] = { .type = NLA_U16, },
+ [L2TP_ATTR_DATA_SEQ] = { .type = NLA_U8, },
+ [L2TP_ATTR_L2SPEC_TYPE] = { .type = NLA_U8, },
+ [L2TP_ATTR_L2SPEC_LEN] = { .type = NLA_U8, },
+ [L2TP_ATTR_PROTO_VERSION] = { .type = NLA_U8, },
+ [L2TP_ATTR_CONN_ID] = { .type = NLA_U32, },
+ [L2TP_ATTR_PEER_CONN_ID] = { .type = NLA_U32, },
+ [L2TP_ATTR_SESSION_ID] = { .type = NLA_U32, },
+ [L2TP_ATTR_PEER_SESSION_ID] = { .type = NLA_U32, },
+ [L2TP_ATTR_UDP_CSUM] = { .type = NLA_U8, },
+ [L2TP_ATTR_VLAN_ID] = { .type = NLA_U16, },
+ [L2TP_ATTR_DEBUG] = { .type = NLA_U32, },
+ [L2TP_ATTR_RECV_SEQ] = { .type = NLA_U8, },
+ [L2TP_ATTR_SEND_SEQ] = { .type = NLA_U8, },
+ [L2TP_ATTR_LNS_MODE] = { .type = NLA_U8, },
+ [L2TP_ATTR_USING_IPSEC] = { .type = NLA_U8, },
+ [L2TP_ATTR_RECV_TIMEOUT] = { .type = NLA_MSECS, },
+ [L2TP_ATTR_FD] = { .type = NLA_U32, },
+ [L2TP_ATTR_IP_SADDR] = { .type = NLA_U32, },
+ [L2TP_ATTR_IP_DADDR] = { .type = NLA_U32, },
+ [L2TP_ATTR_UDP_SPORT] = { .type = NLA_U16, },
+ [L2TP_ATTR_UDP_DPORT] = { .type = NLA_U16, },
+ [L2TP_ATTR_MTU] = { .type = NLA_U16, },
+ [L2TP_ATTR_MRU] = { .type = NLA_U16, },
+ [L2TP_ATTR_STATS] = { .type = NLA_NESTED, },
+ [L2TP_ATTR_IP6_SADDR] = {
+ .type = NLA_BINARY,
+ .len = sizeof(struct in6_addr),
+ },
+ [L2TP_ATTR_IP6_DADDR] = {
+ .type = NLA_BINARY,
+ .len = sizeof(struct in6_addr),
+ },
+ [L2TP_ATTR_IFNAME] = {
+ .type = NLA_NUL_STRING,
+ .len = IFNAMSIZ - 1,
+ },
+ [L2TP_ATTR_COOKIE] = {
+ .type = NLA_BINARY,
+ .len = 8,
+ },
+ [L2TP_ATTR_PEER_COOKIE] = {
+ .type = NLA_BINARY,
+ .len = 8,
+ },
+};
+
+static const struct genl_small_ops l2tp_nl_ops[] = {
+ {
+ .cmd = L2TP_CMD_NOOP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_noop,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_CREATE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_tunnel_create,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_DELETE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_tunnel_delete,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_MODIFY,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_tunnel_modify,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_TUNNEL_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_tunnel_get,
+ .dumpit = l2tp_nl_cmd_tunnel_dump,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_CREATE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_session_create,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_DELETE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_session_delete,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_MODIFY,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_session_modify,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = L2TP_CMD_SESSION_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = l2tp_nl_cmd_session_get,
+ .dumpit = l2tp_nl_cmd_session_dump,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+};
+
+static struct genl_family l2tp_nl_family __ro_after_init = {
+ .name = L2TP_GENL_NAME,
+ .version = L2TP_GENL_VERSION,
+ .hdrsize = 0,
+ .maxattr = L2TP_ATTR_MAX,
+ .policy = l2tp_nl_policy,
+ .netnsok = true,
+ .module = THIS_MODULE,
+ .small_ops = l2tp_nl_ops,
+ .n_small_ops = ARRAY_SIZE(l2tp_nl_ops),
+ .resv_start_op = L2TP_CMD_SESSION_GET + 1,
+ .mcgrps = l2tp_multicast_group,
+ .n_mcgrps = ARRAY_SIZE(l2tp_multicast_group),
+};
+
+int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops)
+{
+ int ret;
+
+ ret = -EINVAL;
+ if (pw_type >= __L2TP_PWTYPE_MAX)
+ goto err;
+
+ genl_lock();
+ ret = -EBUSY;
+ if (l2tp_nl_cmd_ops[pw_type])
+ goto out;
+
+ l2tp_nl_cmd_ops[pw_type] = ops;
+ ret = 0;
+
+out:
+ genl_unlock();
+err:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(l2tp_nl_register_ops);
+
+void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type)
+{
+ if (pw_type < __L2TP_PWTYPE_MAX) {
+ genl_lock();
+ l2tp_nl_cmd_ops[pw_type] = NULL;
+ genl_unlock();
+ }
+}
+EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops);
+
+static int __init l2tp_nl_init(void)
+{
+ pr_info("L2TP netlink interface\n");
+ return genl_register_family(&l2tp_nl_family);
+}
+
+static void l2tp_nl_cleanup(void)
+{
+ genl_unregister_family(&l2tp_nl_family);
+}
+
+module_init(l2tp_nl_init);
+module_exit(l2tp_nl_cleanup);
+
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("L2TP netlink");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_ALIAS_GENL_FAMILY("l2tp");
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
new file mode 100644
index 0000000000..f011af6601
--- /dev/null
+++ b/net/l2tp/l2tp_ppp.c
@@ -0,0 +1,1743 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*****************************************************************************
+ * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
+ *
+ * PPPoX --- Generic PPP encapsulation socket family
+ * PPPoL2TP --- PPP over L2TP (RFC 2661)
+ *
+ * Version: 2.0.0
+ *
+ * Authors: James Chapman (jchapman@katalix.com)
+ *
+ * Based on original work by Martijn van Oosterhout <kleptog@svana.org>
+ *
+ * License:
+ */
+
+/* This driver handles only L2TP data frames; control frames are handled by a
+ * userspace application.
+ *
+ * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
+ * attaches it to a bound UDP socket with local tunnel_id / session_id and
+ * peer tunnel_id / session_id set. Data can then be sent or received using
+ * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
+ * can be read or modified using ioctl() or [gs]etsockopt() calls.
+ *
+ * When a PPPoL2TP socket is connected with local and peer session_id values
+ * zero, the socket is treated as a special tunnel management socket.
+ *
+ * Here's example userspace code to create a socket for sending/receiving data
+ * over an L2TP session:-
+ *
+ * struct sockaddr_pppol2tp sax;
+ * int fd;
+ * int session_fd;
+ *
+ * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
+ *
+ * sax.sa_family = AF_PPPOX;
+ * sax.sa_protocol = PX_PROTO_OL2TP;
+ * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
+ * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
+ * sax.pppol2tp.addr.sin_port = addr->sin_port;
+ * sax.pppol2tp.addr.sin_family = AF_INET;
+ * sax.pppol2tp.s_tunnel = tunnel_id;
+ * sax.pppol2tp.s_session = session_id;
+ * sax.pppol2tp.d_tunnel = peer_tunnel_id;
+ * sax.pppol2tp.d_session = peer_session_id;
+ *
+ * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
+ *
+ * A pppd plugin that allows PPP traffic to be carried over L2TP using
+ * this driver is available from the OpenL2TP project at
+ * http://openl2tp.sourceforge.net.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/if_pppox.h>
+#include <linux/if_pppol2tp.h>
+#include <net/sock.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/ppp-ioctl.h>
+#include <linux/file.h>
+#include <linux/hash.h>
+#include <linux/sort.h>
+#include <linux/proc_fs.h>
+#include <linux/l2tp.h>
+#include <linux/nsproxy.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
+
+#include "l2tp_core.h"
+
+#define PPPOL2TP_DRV_VERSION "V2.0"
+
+/* Space for UDP, L2TP and PPP headers */
+#define PPPOL2TP_HEADER_OVERHEAD 40
+
+/* Number of bytes to build transmit L2TP headers.
+ * Unfortunately the size is different depending on whether sequence numbers
+ * are enabled.
+ */
+#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
+#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
+
+/* Private data of each session. This data lives at the end of struct
+ * l2tp_session, referenced via session->priv[].
+ */
+struct pppol2tp_session {
+ int owner; /* pid that opened the socket */
+
+ struct mutex sk_lock; /* Protects .sk */
+ struct sock __rcu *sk; /* Pointer to the session PPPoX socket */
+ struct sock *__sk; /* Copy of .sk, for cleanup */
+ struct rcu_head rcu; /* For asynchronous release */
+};
+
+static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
+
+static const struct ppp_channel_ops pppol2tp_chan_ops = {
+ .start_xmit = pppol2tp_xmit,
+};
+
+static const struct proto_ops pppol2tp_ops;
+
+/* Retrieves the pppol2tp socket associated to a session.
+ * A reference is held on the returned socket, so this function must be paired
+ * with sock_put().
+ */
+static struct sock *pppol2tp_session_get_sock(struct l2tp_session *session)
+{
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+ struct sock *sk;
+
+ rcu_read_lock();
+ sk = rcu_dereference(ps->sk);
+ if (sk)
+ sock_hold(sk);
+ rcu_read_unlock();
+
+ return sk;
+}
+
+/* Helpers to obtain tunnel/session contexts from sockets.
+ */
+static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk)
+{
+ struct l2tp_session *session;
+
+ if (!sk)
+ return NULL;
+
+ sock_hold(sk);
+ session = (struct l2tp_session *)(sk->sk_user_data);
+ if (!session) {
+ sock_put(sk);
+ goto out;
+ }
+ if (WARN_ON(session->magic != L2TP_SESSION_MAGIC)) {
+ session = NULL;
+ sock_put(sk);
+ goto out;
+ }
+
+out:
+ return session;
+}
+
+/*****************************************************************************
+ * Receive data handling
+ *****************************************************************************/
+
+/* Receive message. This is the recvmsg for the PPPoL2TP socket.
+ */
+static int pppol2tp_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags)
+{
+ int err;
+ struct sk_buff *skb;
+ struct sock *sk = sock->sk;
+
+ err = -EIO;
+ if (sk->sk_state & PPPOX_BOUND)
+ goto end;
+
+ err = 0;
+ skb = skb_recv_datagram(sk, flags, &err);
+ if (!skb)
+ goto end;
+
+ if (len > skb->len)
+ len = skb->len;
+ else if (len < skb->len)
+ msg->msg_flags |= MSG_TRUNC;
+
+ err = skb_copy_datagram_msg(skb, 0, msg, len);
+ if (likely(err == 0))
+ err = len;
+
+ kfree_skb(skb);
+end:
+ return err;
+}
+
+static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
+{
+ struct pppol2tp_session *ps = l2tp_session_priv(session);
+ struct sock *sk = NULL;
+
+ /* If the socket is bound, send it in to PPP's input queue. Otherwise
+ * queue it on the session socket.
+ */
+ rcu_read_lock();
+ sk = rcu_dereference(ps->sk);
+ if (!sk)
+ goto no_sock;
+
+ /* If the first two bytes are 0xFF03, consider that it is the PPP's
+ * Address and Control fields and skip them. The L2TP module has always
+ * worked this way, although, in theory, the use of these fields should
+ * be negotiated and handled at the PPP layer. These fields are
+ * constant: 0xFF is the All-Stations Address and 0x03 the Unnumbered
+ * Information command with Poll/Final bit set to zero (RFC 1662).
+ */
+ if (pskb_may_pull(skb, 2) && skb->data[0] == PPP_ALLSTATIONS &&
+ skb->data[1] == PPP_UI)
+ skb_pull(skb, 2);
+
+ if (sk->sk_state & PPPOX_BOUND) {
+ struct pppox_sock *po;
+
+ po = pppox_sk(sk);
+ ppp_input(&po->chan, skb);
+ } else {
+ if (sock_queue_rcv_skb(sk, skb) < 0) {
+ atomic_long_inc(&session->stats.rx_errors);
+ kfree_skb(skb);
+ }
+ }
+ rcu_read_unlock();
+
+ return;
+
+no_sock:
+ rcu_read_unlock();
+ pr_warn_ratelimited("%s: no socket in recv\n", session->name);
+ kfree_skb(skb);
+}
+
+/************************************************************************
+ * Transmit handling
+ ***********************************************************************/
+
+/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
+ * when a user application does a sendmsg() on the session socket. L2TP and
+ * PPP headers must be inserted into the user's data.
+ */
+static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m,
+ size_t total_len)
+{
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ int error;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ int uhlen;
+
+ error = -ENOTCONN;
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
+ goto error;
+
+ /* Get session and tunnel contexts */
+ error = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (!session)
+ goto error;
+
+ tunnel = session->tunnel;
+
+ uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
+
+ /* Allocate a socket buffer */
+ error = -ENOMEM;
+ skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
+ uhlen + session->hdr_len +
+ 2 + total_len, /* 2 bytes for PPP_ALLSTATIONS & PPP_UI */
+ 0, GFP_KERNEL);
+ if (!skb)
+ goto error_put_sess;
+
+ /* Reserve space for headers. */
+ skb_reserve(skb, NET_SKB_PAD);
+ skb_reset_network_header(skb);
+ skb_reserve(skb, sizeof(struct iphdr));
+ skb_reset_transport_header(skb);
+ skb_reserve(skb, uhlen);
+
+ /* Add PPP header */
+ skb->data[0] = PPP_ALLSTATIONS;
+ skb->data[1] = PPP_UI;
+ skb_put(skb, 2);
+
+ /* Copy user data into skb */
+ error = memcpy_from_msg(skb_put(skb, total_len), m, total_len);
+ if (error < 0) {
+ kfree_skb(skb);
+ goto error_put_sess;
+ }
+
+ local_bh_disable();
+ l2tp_xmit_skb(session, skb);
+ local_bh_enable();
+
+ sock_put(sk);
+
+ return total_len;
+
+error_put_sess:
+ sock_put(sk);
+error:
+ return error;
+}
+
+/* Transmit function called by generic PPP driver. Sends PPP frame
+ * over PPPoL2TP socket.
+ *
+ * This is almost the same as pppol2tp_sendmsg(), but rather than
+ * being called with a msghdr from userspace, it is called with a skb
+ * from the kernel.
+ *
+ * The supplied skb from ppp doesn't have enough headroom for the
+ * insertion of L2TP, UDP and IP headers so we need to allocate more
+ * headroom in the skb. This will create a cloned skb. But we must be
+ * careful in the error case because the caller will expect to free
+ * the skb it supplied, not our cloned skb. So we take care to always
+ * leave the original skb unfreed if we return an error.
+ */
+static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct sock *sk = (struct sock *)chan->private;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ int uhlen, headroom;
+
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
+ goto abort;
+
+ /* Get session and tunnel contexts from the socket */
+ session = pppol2tp_sock_to_session(sk);
+ if (!session)
+ goto abort;
+
+ tunnel = session->tunnel;
+
+ uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
+ headroom = NET_SKB_PAD +
+ sizeof(struct iphdr) + /* IP header */
+ uhlen + /* UDP header (if L2TP_ENCAPTYPE_UDP) */
+ session->hdr_len + /* L2TP header */
+ 2; /* 2 bytes for PPP_ALLSTATIONS & PPP_UI */
+ if (skb_cow_head(skb, headroom))
+ goto abort_put_sess;
+
+ /* Setup PPP header */
+ __skb_push(skb, 2);
+ skb->data[0] = PPP_ALLSTATIONS;
+ skb->data[1] = PPP_UI;
+
+ local_bh_disable();
+ l2tp_xmit_skb(session, skb);
+ local_bh_enable();
+
+ sock_put(sk);
+
+ return 1;
+
+abort_put_sess:
+ sock_put(sk);
+abort:
+ /* Free the original skb */
+ kfree_skb(skb);
+ return 1;
+}
+
+/*****************************************************************************
+ * Session (and tunnel control) socket create/destroy.
+ *****************************************************************************/
+
+static void pppol2tp_put_sk(struct rcu_head *head)
+{
+ struct pppol2tp_session *ps;
+
+ ps = container_of(head, typeof(*ps), rcu);
+ sock_put(ps->__sk);
+}
+
+/* Really kill the session socket. (Called from sock_put() if
+ * refcnt == 0.)
+ */
+static void pppol2tp_session_destruct(struct sock *sk)
+{
+ struct l2tp_session *session = sk->sk_user_data;
+
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+
+ if (session) {
+ sk->sk_user_data = NULL;
+ if (WARN_ON(session->magic != L2TP_SESSION_MAGIC))
+ return;
+ l2tp_session_dec_refcount(session);
+ }
+}
+
+/* Called when the PPPoX socket (session) is closed.
+ */
+static int pppol2tp_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct l2tp_session *session;
+ int error;
+
+ if (!sk)
+ return 0;
+
+ error = -EBADF;
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD) != 0)
+ goto error;
+
+ pppox_unbind_sock(sk);
+
+ /* Signal the death of the socket. */
+ sk->sk_state = PPPOX_DEAD;
+ sock_orphan(sk);
+ sock->sk = NULL;
+
+ session = pppol2tp_sock_to_session(sk);
+ if (session) {
+ struct pppol2tp_session *ps;
+
+ l2tp_session_delete(session);
+
+ ps = l2tp_session_priv(session);
+ mutex_lock(&ps->sk_lock);
+ ps->__sk = rcu_dereference_protected(ps->sk,
+ lockdep_is_held(&ps->sk_lock));
+ RCU_INIT_POINTER(ps->sk, NULL);
+ mutex_unlock(&ps->sk_lock);
+ call_rcu(&ps->rcu, pppol2tp_put_sk);
+
+ /* Rely on the sock_put() call at the end of the function for
+ * dropping the reference held by pppol2tp_sock_to_session().
+ * The last reference will be dropped by pppol2tp_put_sk().
+ */
+ }
+
+ release_sock(sk);
+
+ /* This will delete the session context via
+ * pppol2tp_session_destruct() if the socket's refcnt drops to
+ * zero.
+ */
+ sock_put(sk);
+
+ return 0;
+
+error:
+ release_sock(sk);
+ return error;
+}
+
+static struct proto pppol2tp_sk_proto = {
+ .name = "PPPOL2TP",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb)
+{
+ int rc;
+
+ rc = l2tp_udp_encap_recv(sk, skb);
+ if (rc)
+ kfree_skb(skb);
+
+ return NET_RX_SUCCESS;
+}
+
+/* socket() handler. Initialize a new struct sock.
+ */
+static int pppol2tp_create(struct net *net, struct socket *sock, int kern)
+{
+ int error = -ENOMEM;
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, kern);
+ if (!sk)
+ goto out;
+
+ sock_init_data(sock, sk);
+
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppol2tp_ops;
+
+ sk->sk_backlog_rcv = pppol2tp_backlog_recv;
+ sk->sk_protocol = PX_PROTO_OL2TP;
+ sk->sk_family = PF_PPPOX;
+ sk->sk_state = PPPOX_NONE;
+ sk->sk_type = SOCK_STREAM;
+ sk->sk_destruct = pppol2tp_session_destruct;
+
+ error = 0;
+
+out:
+ return error;
+}
+
+static void pppol2tp_show(struct seq_file *m, void *arg)
+{
+ struct l2tp_session *session = arg;
+ struct sock *sk;
+
+ sk = pppol2tp_session_get_sock(session);
+ if (sk) {
+ struct pppox_sock *po = pppox_sk(sk);
+
+ seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
+ sock_put(sk);
+ }
+}
+
+static void pppol2tp_session_init(struct l2tp_session *session)
+{
+ struct pppol2tp_session *ps;
+
+ session->recv_skb = pppol2tp_recv;
+ if (IS_ENABLED(CONFIG_L2TP_DEBUGFS))
+ session->show = pppol2tp_show;
+
+ ps = l2tp_session_priv(session);
+ mutex_init(&ps->sk_lock);
+ ps->owner = current->pid;
+}
+
+struct l2tp_connect_info {
+ u8 version;
+ int fd;
+ u32 tunnel_id;
+ u32 peer_tunnel_id;
+ u32 session_id;
+ u32 peer_session_id;
+};
+
+static int pppol2tp_sockaddr_get_info(const void *sa, int sa_len,
+ struct l2tp_connect_info *info)
+{
+ switch (sa_len) {
+ case sizeof(struct sockaddr_pppol2tp):
+ {
+ const struct sockaddr_pppol2tp *sa_v2in4 = sa;
+
+ if (sa_v2in4->sa_protocol != PX_PROTO_OL2TP)
+ return -EINVAL;
+
+ info->version = 2;
+ info->fd = sa_v2in4->pppol2tp.fd;
+ info->tunnel_id = sa_v2in4->pppol2tp.s_tunnel;
+ info->peer_tunnel_id = sa_v2in4->pppol2tp.d_tunnel;
+ info->session_id = sa_v2in4->pppol2tp.s_session;
+ info->peer_session_id = sa_v2in4->pppol2tp.d_session;
+
+ break;
+ }
+ case sizeof(struct sockaddr_pppol2tpv3):
+ {
+ const struct sockaddr_pppol2tpv3 *sa_v3in4 = sa;
+
+ if (sa_v3in4->sa_protocol != PX_PROTO_OL2TP)
+ return -EINVAL;
+
+ info->version = 3;
+ info->fd = sa_v3in4->pppol2tp.fd;
+ info->tunnel_id = sa_v3in4->pppol2tp.s_tunnel;
+ info->peer_tunnel_id = sa_v3in4->pppol2tp.d_tunnel;
+ info->session_id = sa_v3in4->pppol2tp.s_session;
+ info->peer_session_id = sa_v3in4->pppol2tp.d_session;
+
+ break;
+ }
+ case sizeof(struct sockaddr_pppol2tpin6):
+ {
+ const struct sockaddr_pppol2tpin6 *sa_v2in6 = sa;
+
+ if (sa_v2in6->sa_protocol != PX_PROTO_OL2TP)
+ return -EINVAL;
+
+ info->version = 2;
+ info->fd = sa_v2in6->pppol2tp.fd;
+ info->tunnel_id = sa_v2in6->pppol2tp.s_tunnel;
+ info->peer_tunnel_id = sa_v2in6->pppol2tp.d_tunnel;
+ info->session_id = sa_v2in6->pppol2tp.s_session;
+ info->peer_session_id = sa_v2in6->pppol2tp.d_session;
+
+ break;
+ }
+ case sizeof(struct sockaddr_pppol2tpv3in6):
+ {
+ const struct sockaddr_pppol2tpv3in6 *sa_v3in6 = sa;
+
+ if (sa_v3in6->sa_protocol != PX_PROTO_OL2TP)
+ return -EINVAL;
+
+ info->version = 3;
+ info->fd = sa_v3in6->pppol2tp.fd;
+ info->tunnel_id = sa_v3in6->pppol2tp.s_tunnel;
+ info->peer_tunnel_id = sa_v3in6->pppol2tp.d_tunnel;
+ info->session_id = sa_v3in6->pppol2tp.s_session;
+ info->peer_session_id = sa_v3in6->pppol2tp.d_session;
+
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Rough estimation of the maximum payload size a tunnel can transmit without
+ * fragmenting at the lower IP layer. Assumes L2TPv2 with sequence
+ * numbers and no IP option. Not quite accurate, but the result is mostly
+ * unused anyway.
+ */
+static int pppol2tp_tunnel_mtu(const struct l2tp_tunnel *tunnel)
+{
+ int mtu;
+
+ mtu = l2tp_tunnel_dst_mtu(tunnel);
+ if (mtu <= PPPOL2TP_HEADER_OVERHEAD)
+ return 1500 - PPPOL2TP_HEADER_OVERHEAD;
+
+ return mtu - PPPOL2TP_HEADER_OVERHEAD;
+}
+
+static struct l2tp_tunnel *pppol2tp_tunnel_get(struct net *net,
+ const struct l2tp_connect_info *info,
+ bool *new_tunnel)
+{
+ struct l2tp_tunnel *tunnel;
+ int error;
+
+ *new_tunnel = false;
+
+ tunnel = l2tp_tunnel_get(net, info->tunnel_id);
+
+ /* Special case: create tunnel context if session_id and
+ * peer_session_id is 0. Otherwise look up tunnel using supplied
+ * tunnel id.
+ */
+ if (!info->session_id && !info->peer_session_id) {
+ if (!tunnel) {
+ struct l2tp_tunnel_cfg tcfg = {
+ .encap = L2TP_ENCAPTYPE_UDP,
+ };
+
+ /* Prevent l2tp_tunnel_register() from trying to set up
+ * a kernel socket.
+ */
+ if (info->fd < 0)
+ return ERR_PTR(-EBADF);
+
+ error = l2tp_tunnel_create(info->fd,
+ info->version,
+ info->tunnel_id,
+ info->peer_tunnel_id, &tcfg,
+ &tunnel);
+ if (error < 0)
+ return ERR_PTR(error);
+
+ l2tp_tunnel_inc_refcount(tunnel);
+ error = l2tp_tunnel_register(tunnel, net, &tcfg);
+ if (error < 0) {
+ kfree(tunnel);
+ return ERR_PTR(error);
+ }
+
+ *new_tunnel = true;
+ }
+ } else {
+ /* Error if we can't find the tunnel */
+ if (!tunnel)
+ return ERR_PTR(-ENOENT);
+
+ /* Error if socket is not prepped */
+ if (!tunnel->sock) {
+ l2tp_tunnel_dec_refcount(tunnel);
+ return ERR_PTR(-ENOENT);
+ }
+ }
+
+ return tunnel;
+}
+
+/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
+ */
+static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct l2tp_session *session = NULL;
+ struct l2tp_connect_info info;
+ struct l2tp_tunnel *tunnel;
+ struct pppol2tp_session *ps;
+ struct l2tp_session_cfg cfg = { 0, };
+ bool drop_refcnt = false;
+ bool new_session = false;
+ bool new_tunnel = false;
+ int error;
+
+ error = pppol2tp_sockaddr_get_info(uservaddr, sockaddr_len, &info);
+ if (error < 0)
+ return error;
+
+ /* Don't bind if tunnel_id is 0 */
+ if (!info.tunnel_id)
+ return -EINVAL;
+
+ tunnel = pppol2tp_tunnel_get(sock_net(sk), &info, &new_tunnel);
+ if (IS_ERR(tunnel))
+ return PTR_ERR(tunnel);
+
+ lock_sock(sk);
+
+ /* Check for already bound sockets */
+ error = -EBUSY;
+ if (sk->sk_state & PPPOX_CONNECTED)
+ goto end;
+
+ /* We don't supporting rebinding anyway */
+ error = -EALREADY;
+ if (sk->sk_user_data)
+ goto end; /* socket is already attached */
+
+ if (tunnel->peer_tunnel_id == 0)
+ tunnel->peer_tunnel_id = info.peer_tunnel_id;
+
+ session = l2tp_tunnel_get_session(tunnel, info.session_id);
+ if (session) {
+ drop_refcnt = true;
+
+ if (session->pwtype != L2TP_PWTYPE_PPP) {
+ error = -EPROTOTYPE;
+ goto end;
+ }
+
+ ps = l2tp_session_priv(session);
+
+ /* Using a pre-existing session is fine as long as it hasn't
+ * been connected yet.
+ */
+ mutex_lock(&ps->sk_lock);
+ if (rcu_dereference_protected(ps->sk,
+ lockdep_is_held(&ps->sk_lock)) ||
+ ps->__sk) {
+ mutex_unlock(&ps->sk_lock);
+ error = -EEXIST;
+ goto end;
+ }
+ } else {
+ cfg.pw_type = L2TP_PWTYPE_PPP;
+
+ session = l2tp_session_create(sizeof(struct pppol2tp_session),
+ tunnel, info.session_id,
+ info.peer_session_id, &cfg);
+ if (IS_ERR(session)) {
+ error = PTR_ERR(session);
+ goto end;
+ }
+
+ pppol2tp_session_init(session);
+ ps = l2tp_session_priv(session);
+ l2tp_session_inc_refcount(session);
+
+ mutex_lock(&ps->sk_lock);
+ error = l2tp_session_register(session, tunnel);
+ if (error < 0) {
+ mutex_unlock(&ps->sk_lock);
+ kfree(session);
+ goto end;
+ }
+ drop_refcnt = true;
+ new_session = true;
+ }
+
+ /* Special case: if source & dest session_id == 0x0000, this
+ * socket is being created to manage the tunnel. Just set up
+ * the internal context for use by ioctl() and sockopt()
+ * handlers.
+ */
+ if (session->session_id == 0 && session->peer_session_id == 0) {
+ error = 0;
+ goto out_no_ppp;
+ }
+
+ /* The only header we need to worry about is the L2TP
+ * header. This size is different depending on whether
+ * sequence numbers are enabled for the data channel.
+ */
+ po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
+
+ po->chan.private = sk;
+ po->chan.ops = &pppol2tp_chan_ops;
+ po->chan.mtu = pppol2tp_tunnel_mtu(tunnel);
+
+ error = ppp_register_net_channel(sock_net(sk), &po->chan);
+ if (error) {
+ mutex_unlock(&ps->sk_lock);
+ goto end;
+ }
+
+out_no_ppp:
+ /* This is how we get the session context from the socket. */
+ sk->sk_user_data = session;
+ rcu_assign_pointer(ps->sk, sk);
+ mutex_unlock(&ps->sk_lock);
+
+ /* Keep the reference we've grabbed on the session: sk doesn't expect
+ * the session to disappear. pppol2tp_session_destruct() is responsible
+ * for dropping it.
+ */
+ drop_refcnt = false;
+
+ sk->sk_state = PPPOX_CONNECTED;
+
+end:
+ if (error) {
+ if (new_session)
+ l2tp_session_delete(session);
+ if (new_tunnel)
+ l2tp_tunnel_delete(tunnel);
+ }
+ if (drop_refcnt)
+ l2tp_session_dec_refcount(session);
+ l2tp_tunnel_dec_refcount(tunnel);
+ release_sock(sk);
+
+ return error;
+}
+
+#ifdef CONFIG_L2TP_V3
+
+/* Called when creating sessions via the netlink interface. */
+static int pppol2tp_session_create(struct net *net, struct l2tp_tunnel *tunnel,
+ u32 session_id, u32 peer_session_id,
+ struct l2tp_session_cfg *cfg)
+{
+ int error;
+ struct l2tp_session *session;
+
+ /* Error if tunnel socket is not prepped */
+ if (!tunnel->sock) {
+ error = -ENOENT;
+ goto err;
+ }
+
+ /* Allocate and initialize a new session context. */
+ session = l2tp_session_create(sizeof(struct pppol2tp_session),
+ tunnel, session_id,
+ peer_session_id, cfg);
+ if (IS_ERR(session)) {
+ error = PTR_ERR(session);
+ goto err;
+ }
+
+ pppol2tp_session_init(session);
+
+ error = l2tp_session_register(session, tunnel);
+ if (error < 0)
+ goto err_sess;
+
+ return 0;
+
+err_sess:
+ kfree(session);
+err:
+ return error;
+}
+
+#endif /* CONFIG_L2TP_V3 */
+
+/* getname() support.
+ */
+static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
+ int peer)
+{
+ int len = 0;
+ int error = 0;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct sock *sk = sock->sk;
+ struct inet_sock *inet;
+ struct pppol2tp_session *pls;
+
+ error = -ENOTCONN;
+ if (!sk)
+ goto end;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ goto end;
+
+ error = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (!session)
+ goto end;
+
+ pls = l2tp_session_priv(session);
+ tunnel = session->tunnel;
+
+ inet = inet_sk(tunnel->sock);
+ if (tunnel->version == 2 && tunnel->sock->sk_family == AF_INET) {
+ struct sockaddr_pppol2tp sp;
+
+ len = sizeof(sp);
+ memset(&sp, 0, len);
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_OL2TP;
+ sp.pppol2tp.fd = tunnel->fd;
+ sp.pppol2tp.pid = pls->owner;
+ sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
+ sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
+ sp.pppol2tp.s_session = session->session_id;
+ sp.pppol2tp.d_session = session->peer_session_id;
+ sp.pppol2tp.addr.sin_family = AF_INET;
+ sp.pppol2tp.addr.sin_port = inet->inet_dport;
+ sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
+ memcpy(uaddr, &sp, len);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (tunnel->version == 2 && tunnel->sock->sk_family == AF_INET6) {
+ struct sockaddr_pppol2tpin6 sp;
+
+ len = sizeof(sp);
+ memset(&sp, 0, len);
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_OL2TP;
+ sp.pppol2tp.fd = tunnel->fd;
+ sp.pppol2tp.pid = pls->owner;
+ sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
+ sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
+ sp.pppol2tp.s_session = session->session_id;
+ sp.pppol2tp.d_session = session->peer_session_id;
+ sp.pppol2tp.addr.sin6_family = AF_INET6;
+ sp.pppol2tp.addr.sin6_port = inet->inet_dport;
+ memcpy(&sp.pppol2tp.addr.sin6_addr, &tunnel->sock->sk_v6_daddr,
+ sizeof(tunnel->sock->sk_v6_daddr));
+ memcpy(uaddr, &sp, len);
+ } else if (tunnel->version == 3 && tunnel->sock->sk_family == AF_INET6) {
+ struct sockaddr_pppol2tpv3in6 sp;
+
+ len = sizeof(sp);
+ memset(&sp, 0, len);
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_OL2TP;
+ sp.pppol2tp.fd = tunnel->fd;
+ sp.pppol2tp.pid = pls->owner;
+ sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
+ sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
+ sp.pppol2tp.s_session = session->session_id;
+ sp.pppol2tp.d_session = session->peer_session_id;
+ sp.pppol2tp.addr.sin6_family = AF_INET6;
+ sp.pppol2tp.addr.sin6_port = inet->inet_dport;
+ memcpy(&sp.pppol2tp.addr.sin6_addr, &tunnel->sock->sk_v6_daddr,
+ sizeof(tunnel->sock->sk_v6_daddr));
+ memcpy(uaddr, &sp, len);
+#endif
+ } else if (tunnel->version == 3) {
+ struct sockaddr_pppol2tpv3 sp;
+
+ len = sizeof(sp);
+ memset(&sp, 0, len);
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_OL2TP;
+ sp.pppol2tp.fd = tunnel->fd;
+ sp.pppol2tp.pid = pls->owner;
+ sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
+ sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
+ sp.pppol2tp.s_session = session->session_id;
+ sp.pppol2tp.d_session = session->peer_session_id;
+ sp.pppol2tp.addr.sin_family = AF_INET;
+ sp.pppol2tp.addr.sin_port = inet->inet_dport;
+ sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
+ memcpy(uaddr, &sp, len);
+ }
+
+ error = len;
+
+ sock_put(sk);
+end:
+ return error;
+}
+
+/****************************************************************************
+ * ioctl() handlers.
+ *
+ * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
+ * sockets. However, in order to control kernel tunnel features, we allow
+ * userspace to create a special "tunnel" PPPoX socket which is used for
+ * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
+ * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
+ * calls.
+ ****************************************************************************/
+
+static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
+ const struct l2tp_stats *stats)
+{
+ memset(dest, 0, sizeof(*dest));
+
+ dest->tx_packets = atomic_long_read(&stats->tx_packets);
+ dest->tx_bytes = atomic_long_read(&stats->tx_bytes);
+ dest->tx_errors = atomic_long_read(&stats->tx_errors);
+ dest->rx_packets = atomic_long_read(&stats->rx_packets);
+ dest->rx_bytes = atomic_long_read(&stats->rx_bytes);
+ dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards);
+ dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets);
+ dest->rx_errors = atomic_long_read(&stats->rx_errors);
+}
+
+static int pppol2tp_tunnel_copy_stats(struct pppol2tp_ioc_stats *stats,
+ struct l2tp_tunnel *tunnel)
+{
+ struct l2tp_session *session;
+
+ if (!stats->session_id) {
+ pppol2tp_copy_stats(stats, &tunnel->stats);
+ return 0;
+ }
+
+ /* If session_id is set, search the corresponding session in the
+ * context of this tunnel and record the session's statistics.
+ */
+ session = l2tp_tunnel_get_session(tunnel, stats->session_id);
+ if (!session)
+ return -EBADR;
+
+ if (session->pwtype != L2TP_PWTYPE_PPP) {
+ l2tp_session_dec_refcount(session);
+ return -EBADR;
+ }
+
+ pppol2tp_copy_stats(stats, &session->stats);
+ l2tp_session_dec_refcount(session);
+
+ return 0;
+}
+
+static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ struct pppol2tp_ioc_stats stats;
+ struct l2tp_session *session;
+
+ switch (cmd) {
+ case PPPIOCGMRU:
+ case PPPIOCGFLAGS:
+ session = sock->sk->sk_user_data;
+ if (!session)
+ return -ENOTCONN;
+
+ if (WARN_ON(session->magic != L2TP_SESSION_MAGIC))
+ return -EBADF;
+
+ /* Not defined for tunnels */
+ if (!session->session_id && !session->peer_session_id)
+ return -ENOSYS;
+
+ if (put_user(0, (int __user *)arg))
+ return -EFAULT;
+ break;
+
+ case PPPIOCSMRU:
+ case PPPIOCSFLAGS:
+ session = sock->sk->sk_user_data;
+ if (!session)
+ return -ENOTCONN;
+
+ if (WARN_ON(session->magic != L2TP_SESSION_MAGIC))
+ return -EBADF;
+
+ /* Not defined for tunnels */
+ if (!session->session_id && !session->peer_session_id)
+ return -ENOSYS;
+
+ if (!access_ok((int __user *)arg, sizeof(int)))
+ return -EFAULT;
+ break;
+
+ case PPPIOCGL2TPSTATS:
+ session = sock->sk->sk_user_data;
+ if (!session)
+ return -ENOTCONN;
+
+ if (WARN_ON(session->magic != L2TP_SESSION_MAGIC))
+ return -EBADF;
+
+ /* Session 0 represents the parent tunnel */
+ if (!session->session_id && !session->peer_session_id) {
+ u32 session_id;
+ int err;
+
+ if (copy_from_user(&stats, (void __user *)arg,
+ sizeof(stats)))
+ return -EFAULT;
+
+ session_id = stats.session_id;
+ err = pppol2tp_tunnel_copy_stats(&stats,
+ session->tunnel);
+ if (err < 0)
+ return err;
+
+ stats.session_id = session_id;
+ } else {
+ pppol2tp_copy_stats(&stats, &session->stats);
+ stats.session_id = session->session_id;
+ }
+ stats.tunnel_id = session->tunnel->tunnel_id;
+ stats.using_ipsec = l2tp_tunnel_uses_xfrm(session->tunnel);
+
+ if (copy_to_user((void __user *)arg, &stats, sizeof(stats)))
+ return -EFAULT;
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
+}
+
+/*****************************************************************************
+ * setsockopt() / getsockopt() support.
+ *
+ * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
+ * sockets. In order to control kernel tunnel features, we allow userspace to
+ * create a special "tunnel" PPPoX socket which is used for control only.
+ * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
+ * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
+ *****************************************************************************/
+
+/* Tunnel setsockopt() helper.
+ */
+static int pppol2tp_tunnel_setsockopt(struct sock *sk,
+ struct l2tp_tunnel *tunnel,
+ int optname, int val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_DEBUG:
+ /* Tunnel debug flags option is deprecated */
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Session setsockopt helper.
+ */
+static int pppol2tp_session_setsockopt(struct sock *sk,
+ struct l2tp_session *session,
+ int optname, int val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_RECVSEQ:
+ if (val != 0 && val != 1) {
+ err = -EINVAL;
+ break;
+ }
+ session->recv_seq = !!val;
+ break;
+
+ case PPPOL2TP_SO_SENDSEQ:
+ if (val != 0 && val != 1) {
+ err = -EINVAL;
+ break;
+ }
+ session->send_seq = !!val;
+ {
+ struct pppox_sock *po = pppox_sk(sk);
+
+ po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
+ PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
+ }
+ l2tp_session_set_header_len(session, session->tunnel->version);
+ break;
+
+ case PPPOL2TP_SO_LNSMODE:
+ if (val != 0 && val != 1) {
+ err = -EINVAL;
+ break;
+ }
+ session->lns_mode = !!val;
+ break;
+
+ case PPPOL2TP_SO_DEBUG:
+ /* Session debug flags option is deprecated */
+ break;
+
+ case PPPOL2TP_SO_REORDERTO:
+ session->reorder_timeout = msecs_to_jiffies(val);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Main setsockopt() entry point.
+ * Does API checks, then calls either the tunnel or session setsockopt
+ * handler, according to whether the PPPoL2TP socket is a for a regular
+ * session or the special tunnel type.
+ */
+static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ int val;
+ int err;
+
+ if (level != SOL_PPPOL2TP)
+ return -EINVAL;
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&val, optval, sizeof(int)))
+ return -EFAULT;
+
+ err = -ENOTCONN;
+ if (!sk->sk_user_data)
+ goto end;
+
+ /* Get session context from the socket */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (!session)
+ goto end;
+
+ /* Special case: if session_id == 0x0000, treat as operation on tunnel
+ */
+ if (session->session_id == 0 && session->peer_session_id == 0) {
+ tunnel = session->tunnel;
+ err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
+ } else {
+ err = pppol2tp_session_setsockopt(sk, session, optname, val);
+ }
+
+ sock_put(sk);
+end:
+ return err;
+}
+
+/* Tunnel getsockopt helper. Called with sock locked.
+ */
+static int pppol2tp_tunnel_getsockopt(struct sock *sk,
+ struct l2tp_tunnel *tunnel,
+ int optname, int *val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_DEBUG:
+ /* Tunnel debug flags option is deprecated */
+ *val = 0;
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Session getsockopt helper. Called with sock locked.
+ */
+static int pppol2tp_session_getsockopt(struct sock *sk,
+ struct l2tp_session *session,
+ int optname, int *val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_RECVSEQ:
+ *val = session->recv_seq;
+ break;
+
+ case PPPOL2TP_SO_SENDSEQ:
+ *val = session->send_seq;
+ break;
+
+ case PPPOL2TP_SO_LNSMODE:
+ *val = session->lns_mode;
+ break;
+
+ case PPPOL2TP_SO_DEBUG:
+ /* Session debug flags option is deprecated */
+ *val = 0;
+ break;
+
+ case PPPOL2TP_SO_REORDERTO:
+ *val = (int)jiffies_to_msecs(session->reorder_timeout);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ }
+
+ return err;
+}
+
+/* Main getsockopt() entry point.
+ * Does API checks, then calls either the tunnel or session getsockopt
+ * handler, according to whether the PPPoX socket is a for a regular session
+ * or the special tunnel type.
+ */
+static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ int val, len;
+ int err;
+
+ if (level != SOL_PPPOL2TP)
+ return -EINVAL;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ len = min_t(unsigned int, len, sizeof(int));
+
+ if (len < 0)
+ return -EINVAL;
+
+ err = -ENOTCONN;
+ if (!sk->sk_user_data)
+ goto end;
+
+ /* Get the session context */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (!session)
+ goto end;
+
+ /* Special case: if session_id == 0x0000, treat as operation on tunnel */
+ if (session->session_id == 0 && session->peer_session_id == 0) {
+ tunnel = session->tunnel;
+ err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
+ if (err)
+ goto end_put_sess;
+ } else {
+ err = pppol2tp_session_getsockopt(sk, session, optname, &val);
+ if (err)
+ goto end_put_sess;
+ }
+
+ err = -EFAULT;
+ if (put_user(len, optlen))
+ goto end_put_sess;
+
+ if (copy_to_user((void __user *)optval, &val, len))
+ goto end_put_sess;
+
+ err = 0;
+
+end_put_sess:
+ sock_put(sk);
+end:
+ return err;
+}
+
+/*****************************************************************************
+ * /proc filesystem for debug
+ * Since the original pppol2tp driver provided /proc/net/pppol2tp for
+ * L2TPv2, we dump only L2TPv2 tunnels and sessions here.
+ *****************************************************************************/
+
+static unsigned int pppol2tp_net_id;
+
+#ifdef CONFIG_PROC_FS
+
+struct pppol2tp_seq_data {
+ struct seq_net_private p;
+ int tunnel_idx; /* current tunnel */
+ int session_idx; /* index of session within current tunnel */
+ struct l2tp_tunnel *tunnel;
+ struct l2tp_session *session; /* NULL means get next tunnel */
+};
+
+static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
+{
+ /* Drop reference taken during previous invocation */
+ if (pd->tunnel)
+ l2tp_tunnel_dec_refcount(pd->tunnel);
+
+ for (;;) {
+ pd->tunnel = l2tp_tunnel_get_nth(net, pd->tunnel_idx);
+ pd->tunnel_idx++;
+
+ /* Only accept L2TPv2 tunnels */
+ if (!pd->tunnel || pd->tunnel->version == 2)
+ return;
+
+ l2tp_tunnel_dec_refcount(pd->tunnel);
+ }
+}
+
+static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
+{
+ /* Drop reference taken during previous invocation */
+ if (pd->session)
+ l2tp_session_dec_refcount(pd->session);
+
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx);
+ pd->session_idx++;
+
+ if (!pd->session) {
+ pd->session_idx = 0;
+ pppol2tp_next_tunnel(net, pd);
+ }
+}
+
+static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
+{
+ struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
+ loff_t pos = *offs;
+ struct net *net;
+
+ if (!pos)
+ goto out;
+
+ if (WARN_ON(!m->private)) {
+ pd = NULL;
+ goto out;
+ }
+
+ pd = m->private;
+ net = seq_file_net(m);
+
+ if (!pd->tunnel)
+ pppol2tp_next_tunnel(net, pd);
+ else
+ pppol2tp_next_session(net, pd);
+
+ /* NULL tunnel and session indicates end of list */
+ if (!pd->tunnel && !pd->session)
+ pd = NULL;
+
+out:
+ return pd;
+}
+
+static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void pppol2tp_seq_stop(struct seq_file *p, void *v)
+{
+ struct pppol2tp_seq_data *pd = v;
+
+ if (!pd || pd == SEQ_START_TOKEN)
+ return;
+
+ /* Drop reference taken by last invocation of pppol2tp_next_session()
+ * or pppol2tp_next_tunnel().
+ */
+ if (pd->session) {
+ l2tp_session_dec_refcount(pd->session);
+ pd->session = NULL;
+ }
+ if (pd->tunnel) {
+ l2tp_tunnel_dec_refcount(pd->tunnel);
+ pd->tunnel = NULL;
+ }
+}
+
+static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
+{
+ struct l2tp_tunnel *tunnel = v;
+
+ seq_printf(m, "\nTUNNEL '%s', %c %d\n",
+ tunnel->name,
+ (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
+ refcount_read(&tunnel->ref_count) - 1);
+ seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n",
+ 0,
+ atomic_long_read(&tunnel->stats.tx_packets),
+ atomic_long_read(&tunnel->stats.tx_bytes),
+ atomic_long_read(&tunnel->stats.tx_errors),
+ atomic_long_read(&tunnel->stats.rx_packets),
+ atomic_long_read(&tunnel->stats.rx_bytes),
+ atomic_long_read(&tunnel->stats.rx_errors));
+}
+
+static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
+{
+ struct l2tp_session *session = v;
+ struct l2tp_tunnel *tunnel = session->tunnel;
+ unsigned char state;
+ char user_data_ok;
+ struct sock *sk;
+ u32 ip = 0;
+ u16 port = 0;
+
+ if (tunnel->sock) {
+ struct inet_sock *inet = inet_sk(tunnel->sock);
+
+ ip = ntohl(inet->inet_saddr);
+ port = ntohs(inet->inet_sport);
+ }
+
+ sk = pppol2tp_session_get_sock(session);
+ if (sk) {
+ state = sk->sk_state;
+ user_data_ok = (session == sk->sk_user_data) ? 'Y' : 'N';
+ } else {
+ state = 0;
+ user_data_ok = 'N';
+ }
+
+ seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> %04X/%04X %d %c\n",
+ session->name, ip, port,
+ tunnel->tunnel_id,
+ session->session_id,
+ tunnel->peer_tunnel_id,
+ session->peer_session_id,
+ state, user_data_ok);
+ seq_printf(m, " 0/0/%c/%c/%s %08x %u\n",
+ session->recv_seq ? 'R' : '-',
+ session->send_seq ? 'S' : '-',
+ session->lns_mode ? "LNS" : "LAC",
+ 0,
+ jiffies_to_msecs(session->reorder_timeout));
+ seq_printf(m, " %u/%u %ld/%ld/%ld %ld/%ld/%ld\n",
+ session->nr, session->ns,
+ atomic_long_read(&session->stats.tx_packets),
+ atomic_long_read(&session->stats.tx_bytes),
+ atomic_long_read(&session->stats.tx_errors),
+ atomic_long_read(&session->stats.rx_packets),
+ atomic_long_read(&session->stats.rx_bytes),
+ atomic_long_read(&session->stats.rx_errors));
+
+ if (sk) {
+ struct pppox_sock *po = pppox_sk(sk);
+
+ seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
+ sock_put(sk);
+ }
+}
+
+static int pppol2tp_seq_show(struct seq_file *m, void *v)
+{
+ struct pppol2tp_seq_data *pd = v;
+
+ /* display header on line 1 */
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
+ seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
+ seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ seq_puts(m, " SESSION name, addr/port src-tid/sid dest-tid/sid state user-data-ok\n");
+ seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
+ seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ goto out;
+ }
+
+ if (!pd->session)
+ pppol2tp_seq_tunnel_show(m, pd->tunnel);
+ else
+ pppol2tp_seq_session_show(m, pd->session);
+
+out:
+ return 0;
+}
+
+static const struct seq_operations pppol2tp_seq_ops = {
+ .start = pppol2tp_seq_start,
+ .next = pppol2tp_seq_next,
+ .stop = pppol2tp_seq_stop,
+ .show = pppol2tp_seq_show,
+};
+#endif /* CONFIG_PROC_FS */
+
+/*****************************************************************************
+ * Network namespace
+ *****************************************************************************/
+
+static __net_init int pppol2tp_init_net(struct net *net)
+{
+ struct proc_dir_entry *pde;
+ int err = 0;
+
+ pde = proc_create_net("pppol2tp", 0444, net->proc_net,
+ &pppol2tp_seq_ops, sizeof(struct pppol2tp_seq_data));
+ if (!pde) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static __net_exit void pppol2tp_exit_net(struct net *net)
+{
+ remove_proc_entry("pppol2tp", net->proc_net);
+}
+
+static struct pernet_operations pppol2tp_net_ops = {
+ .init = pppol2tp_init_net,
+ .exit = pppol2tp_exit_net,
+ .id = &pppol2tp_net_id,
+};
+
+/*****************************************************************************
+ * Init and cleanup
+ *****************************************************************************/
+
+static const struct proto_ops pppol2tp_ops = {
+ .family = AF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppol2tp_release,
+ .bind = sock_no_bind,
+ .connect = pppol2tp_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = pppol2tp_getname,
+ .poll = datagram_poll,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = pppol2tp_setsockopt,
+ .getsockopt = pppol2tp_getsockopt,
+ .sendmsg = pppol2tp_sendmsg,
+ .recvmsg = pppol2tp_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pppox_compat_ioctl,
+#endif
+};
+
+static const struct pppox_proto pppol2tp_proto = {
+ .create = pppol2tp_create,
+ .ioctl = pppol2tp_ioctl,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_L2TP_V3
+
+static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = {
+ .session_create = pppol2tp_session_create,
+ .session_delete = l2tp_session_delete,
+};
+
+#endif /* CONFIG_L2TP_V3 */
+
+static int __init pppol2tp_init(void)
+{
+ int err;
+
+ err = register_pernet_device(&pppol2tp_net_ops);
+ if (err)
+ goto out;
+
+ err = proto_register(&pppol2tp_sk_proto, 0);
+ if (err)
+ goto out_unregister_pppol2tp_pernet;
+
+ err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
+ if (err)
+ goto out_unregister_pppol2tp_proto;
+
+#ifdef CONFIG_L2TP_V3
+ err = l2tp_nl_register_ops(L2TP_PWTYPE_PPP, &pppol2tp_nl_cmd_ops);
+ if (err)
+ goto out_unregister_pppox;
+#endif
+
+ pr_info("PPPoL2TP kernel driver, %s\n", PPPOL2TP_DRV_VERSION);
+
+out:
+ return err;
+
+#ifdef CONFIG_L2TP_V3
+out_unregister_pppox:
+ unregister_pppox_proto(PX_PROTO_OL2TP);
+#endif
+out_unregister_pppol2tp_proto:
+ proto_unregister(&pppol2tp_sk_proto);
+out_unregister_pppol2tp_pernet:
+ unregister_pernet_device(&pppol2tp_net_ops);
+ goto out;
+}
+
+static void __exit pppol2tp_exit(void)
+{
+#ifdef CONFIG_L2TP_V3
+ l2tp_nl_unregister_ops(L2TP_PWTYPE_PPP);
+#endif
+ unregister_pppox_proto(PX_PROTO_OL2TP);
+ proto_unregister(&pppol2tp_sk_proto);
+ unregister_pernet_device(&pppol2tp_net_ops);
+}
+
+module_init(pppol2tp_init);
+module_exit(pppol2tp_exit);
+
+MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("PPP over L2TP over UDP");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PPPOL2TP_DRV_VERSION);
+MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
+MODULE_ALIAS_L2TP_PWTYPE(7);
diff --git a/net/l2tp/trace.h b/net/l2tp/trace.h
new file mode 100644
index 0000000000..8596eaa12a
--- /dev/null
+++ b/net/l2tp/trace.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM l2tp
+
+#if !defined(_TRACE_L2TP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_L2TP_H
+
+#include <linux/tracepoint.h>
+#include <linux/l2tp.h>
+#include "l2tp_core.h"
+
+#define encap_type_name(e) { L2TP_ENCAPTYPE_##e, #e }
+#define show_encap_type_name(val) \
+ __print_symbolic(val, \
+ encap_type_name(UDP), \
+ encap_type_name(IP))
+
+#define pw_type_name(p) { L2TP_PWTYPE_##p, #p }
+#define show_pw_type_name(val) \
+ __print_symbolic(val, \
+ pw_type_name(ETH_VLAN), \
+ pw_type_name(ETH), \
+ pw_type_name(PPP), \
+ pw_type_name(PPP_AC), \
+ pw_type_name(IP))
+
+DECLARE_EVENT_CLASS(tunnel_only_evt,
+ TP_PROTO(struct l2tp_tunnel *tunnel),
+ TP_ARGS(tunnel),
+ TP_STRUCT__entry(
+ __array(char, name, L2TP_TUNNEL_NAME_MAX)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->name, tunnel->name, L2TP_TUNNEL_NAME_MAX);
+ ),
+ TP_printk("%s", __entry->name)
+);
+
+DECLARE_EVENT_CLASS(session_only_evt,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session),
+ TP_STRUCT__entry(
+ __array(char, name, L2TP_SESSION_NAME_MAX)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->name, session->name, L2TP_SESSION_NAME_MAX);
+ ),
+ TP_printk("%s", __entry->name)
+);
+
+TRACE_EVENT(register_tunnel,
+ TP_PROTO(struct l2tp_tunnel *tunnel),
+ TP_ARGS(tunnel),
+ TP_STRUCT__entry(
+ __array(char, name, L2TP_TUNNEL_NAME_MAX)
+ __field(int, fd)
+ __field(u32, tid)
+ __field(u32, ptid)
+ __field(int, version)
+ __field(enum l2tp_encap_type, encap)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->name, tunnel->name, L2TP_TUNNEL_NAME_MAX);
+ __entry->fd = tunnel->fd;
+ __entry->tid = tunnel->tunnel_id;
+ __entry->ptid = tunnel->peer_tunnel_id;
+ __entry->version = tunnel->version;
+ __entry->encap = tunnel->encap;
+ ),
+ TP_printk("%s: type=%s encap=%s version=L2TPv%d tid=%u ptid=%u fd=%d",
+ __entry->name,
+ __entry->fd > 0 ? "managed" : "unmanaged",
+ show_encap_type_name(__entry->encap),
+ __entry->version,
+ __entry->tid,
+ __entry->ptid,
+ __entry->fd)
+);
+
+DEFINE_EVENT(tunnel_only_evt, delete_tunnel,
+ TP_PROTO(struct l2tp_tunnel *tunnel),
+ TP_ARGS(tunnel)
+);
+
+DEFINE_EVENT(tunnel_only_evt, free_tunnel,
+ TP_PROTO(struct l2tp_tunnel *tunnel),
+ TP_ARGS(tunnel)
+);
+
+TRACE_EVENT(register_session,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session),
+ TP_STRUCT__entry(
+ __array(char, name, L2TP_SESSION_NAME_MAX)
+ __field(u32, tid)
+ __field(u32, ptid)
+ __field(u32, sid)
+ __field(u32, psid)
+ __field(enum l2tp_pwtype, pwtype)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->name, session->name, L2TP_SESSION_NAME_MAX);
+ __entry->tid = session->tunnel ? session->tunnel->tunnel_id : 0;
+ __entry->ptid = session->tunnel ? session->tunnel->peer_tunnel_id : 0;
+ __entry->sid = session->session_id;
+ __entry->psid = session->peer_session_id;
+ __entry->pwtype = session->pwtype;
+ ),
+ TP_printk("%s: pseudowire=%s sid=%u psid=%u tid=%u ptid=%u",
+ __entry->name,
+ show_pw_type_name(__entry->pwtype),
+ __entry->sid,
+ __entry->psid,
+ __entry->sid,
+ __entry->psid)
+);
+
+DEFINE_EVENT(session_only_evt, delete_session,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session)
+);
+
+DEFINE_EVENT(session_only_evt, free_session,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session)
+);
+
+DEFINE_EVENT(session_only_evt, session_seqnum_lns_enable,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session)
+);
+
+DEFINE_EVENT(session_only_evt, session_seqnum_lns_disable,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session)
+);
+
+DECLARE_EVENT_CLASS(session_seqnum_evt,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session),
+ TP_STRUCT__entry(
+ __array(char, name, L2TP_SESSION_NAME_MAX)
+ __field(u32, ns)
+ __field(u32, nr)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->name, session->name, L2TP_SESSION_NAME_MAX);
+ __entry->ns = session->ns;
+ __entry->nr = session->nr;
+ ),
+ TP_printk("%s: ns=%u nr=%u",
+ __entry->name,
+ __entry->ns,
+ __entry->nr)
+);
+
+DEFINE_EVENT(session_seqnum_evt, session_seqnum_update,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session)
+);
+
+DEFINE_EVENT(session_seqnum_evt, session_seqnum_reset,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session)
+);
+
+DECLARE_EVENT_CLASS(session_pkt_discard_evt,
+ TP_PROTO(struct l2tp_session *session, u32 pkt_ns),
+ TP_ARGS(session, pkt_ns),
+ TP_STRUCT__entry(
+ __array(char, name, L2TP_SESSION_NAME_MAX)
+ __field(u32, pkt_ns)
+ __field(u32, my_nr)
+ __field(u32, reorder_q_len)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->name, session->name, L2TP_SESSION_NAME_MAX);
+ __entry->pkt_ns = pkt_ns,
+ __entry->my_nr = session->nr;
+ __entry->reorder_q_len = skb_queue_len(&session->reorder_q);
+ ),
+ TP_printk("%s: pkt_ns=%u my_nr=%u reorder_q_len=%u",
+ __entry->name,
+ __entry->pkt_ns,
+ __entry->my_nr,
+ __entry->reorder_q_len)
+);
+
+DEFINE_EVENT(session_pkt_discard_evt, session_pkt_expired,
+ TP_PROTO(struct l2tp_session *session, u32 pkt_ns),
+ TP_ARGS(session, pkt_ns)
+);
+
+DEFINE_EVENT(session_pkt_discard_evt, session_pkt_outside_rx_window,
+ TP_PROTO(struct l2tp_session *session, u32 pkt_ns),
+ TP_ARGS(session, pkt_ns)
+);
+
+DEFINE_EVENT(session_pkt_discard_evt, session_pkt_oos,
+ TP_PROTO(struct l2tp_session *session, u32 pkt_ns),
+ TP_ARGS(session, pkt_ns)
+);
+
+#endif /* _TRACE_L2TP_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>