summaryrefslogtreecommitdiffstats
path: root/net/x25
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /net/x25
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'net/x25')
-rw-r--r--net/x25/Kconfig34
-rw-r--r--net/x25/Makefile11
-rw-r--r--net/x25/af_x25.c1856
-rw-r--r--net/x25/sysctl_net_x25.c88
-rw-r--r--net/x25/x25_dev.c216
-rw-r--r--net/x25/x25_facilities.c350
-rw-r--r--net/x25/x25_forward.c157
-rw-r--r--net/x25/x25_in.c456
-rw-r--r--net/x25/x25_link.c421
-rw-r--r--net/x25/x25_out.c226
-rw-r--r--net/x25/x25_proc.c206
-rw-r--r--net/x25/x25_route.c204
-rw-r--r--net/x25/x25_subr.c384
-rw-r--r--net/x25/x25_timer.c169
14 files changed, 4778 insertions, 0 deletions
diff --git a/net/x25/Kconfig b/net/x25/Kconfig
new file mode 100644
index 000000000..68729aa3a
--- /dev/null
+++ b/net/x25/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# CCITT X.25 Packet Layer
+#
+
+config X25
+ tristate "CCITT X.25 Packet Layer"
+ help
+ X.25 is a set of standardized network protocols, similar in scope to
+ frame relay; the one physical line from your box to the X.25 network
+ entry point can carry several logical point-to-point connections
+ (called "virtual circuits") to other computers connected to the X.25
+ network. Governments, banks, and other organizations tend to use it
+ to connect to each other or to form Wide Area Networks (WANs). Many
+ countries have public X.25 networks. X.25 consists of two
+ protocols: the higher level Packet Layer Protocol (PLP) (say Y here
+ if you want that) and the lower level data link layer protocol LAPB
+ (say Y to "LAPB Data Link Driver" below if you want that).
+
+ You can read more about X.25 at <https://www.sangoma.com/tutorials/x25/> and
+ <http://docwiki.cisco.com/wiki/X.25>.
+ Information about X.25 for Linux is contained in the files
+ <file:Documentation/networking/x25.rst> and
+ <file:Documentation/networking/x25-iface.rst>.
+
+ One connects to an X.25 network either with a dedicated network card
+ using the X.21 protocol (not yet supported by Linux) or one can do
+ X.25 over a standard telephone line using an ordinary modem (say Y
+ to "X.25 async driver" below) or over Ethernet using an ordinary
+ Ethernet card and the LAPB over Ethernet (say Y to "LAPB Data Link
+ Driver" and "LAPB over Ethernet driver" below).
+
+ To compile this driver as a module, choose M here: the module
+ will be called x25. If unsure, say N.
diff --git a/net/x25/Makefile b/net/x25/Makefile
new file mode 100644
index 000000000..5dd544a23
--- /dev/null
+++ b/net/x25/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux X.25 Packet layer.
+#
+
+obj-$(CONFIG_X25) += x25.o
+
+x25-y := af_x25.o x25_dev.o x25_facilities.o x25_in.o \
+ x25_link.o x25_out.o x25_route.o x25_subr.o \
+ x25_timer.o x25_proc.o x25_forward.o
+x25-$(CONFIG_SYSCTL) += sysctl_net_x25.o
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
new file mode 100644
index 000000000..5c7ad301d
--- /dev/null
+++ b/net/x25/af_x25.c
@@ -0,0 +1,1856 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * X.25 Packet Layer release 002
+ *
+ * This is ALPHA test software. This code may break your machine,
+ * randomly fail to work with new releases, misbehave and/or generally
+ * screw up. It might even work.
+ *
+ * This code REQUIRES 2.1.15 or higher
+ *
+ * History
+ * X.25 001 Jonathan Naylor Started coding.
+ * X.25 002 Jonathan Naylor Centralised disconnect handling.
+ * New timer architecture.
+ * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant.
+ * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of
+ * facilities negotiation and increased
+ * the throughput upper limit.
+ * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups
+ * 2000-09-04 Henner Eisen Set sock->state in x25_accept().
+ * Fixed x25_output() related skb leakage.
+ * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket.
+ * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation.
+ * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN
+ * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to
+ * x25_proc.c, using seq_file
+ * 2005-04-02 Shaun Pereira Selective sub address matching
+ * with call user data
+ * 2005-04-15 Shaun Pereira Fast select with no restriction on
+ * response
+ */
+
+#define pr_fmt(fmt) "X25: " fmt
+
+#include <linux/module.h>
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+#include <linux/uaccess.h>
+#include <linux/fcntl.h>
+#include <linux/termios.h> /* For TIOCINQ/OUTQ */
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/compat.h>
+#include <linux/ctype.h>
+
+#include <net/x25.h>
+#include <net/compat.h>
+
+int sysctl_x25_restart_request_timeout = X25_DEFAULT_T20;
+int sysctl_x25_call_request_timeout = X25_DEFAULT_T21;
+int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22;
+int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23;
+int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2;
+int sysctl_x25_forward = 0;
+
+HLIST_HEAD(x25_list);
+DEFINE_RWLOCK(x25_list_lock);
+
+static const struct proto_ops x25_proto_ops;
+
+static const struct x25_address null_x25_address = {" "};
+
+#ifdef CONFIG_COMPAT
+struct compat_x25_subscrip_struct {
+ char device[200-sizeof(compat_ulong_t)];
+ compat_ulong_t global_facil_mask;
+ compat_uint_t extended;
+};
+#endif
+
+
+int x25_parse_address_block(struct sk_buff *skb,
+ struct x25_address *called_addr,
+ struct x25_address *calling_addr)
+{
+ unsigned char len;
+ int needed;
+ int rc;
+
+ if (!pskb_may_pull(skb, 1)) {
+ /* packet has no address block */
+ rc = 0;
+ goto empty;
+ }
+
+ len = *skb->data;
+ needed = 1 + ((len >> 4) + (len & 0x0f) + 1) / 2;
+
+ if (!pskb_may_pull(skb, needed)) {
+ /* packet is too short to hold the addresses it claims
+ to hold */
+ rc = -1;
+ goto empty;
+ }
+
+ return x25_addr_ntoa(skb->data, called_addr, calling_addr);
+
+empty:
+ *called_addr->x25_addr = 0;
+ *calling_addr->x25_addr = 0;
+
+ return rc;
+}
+
+
+int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
+ struct x25_address *calling_addr)
+{
+ unsigned int called_len, calling_len;
+ char *called, *calling;
+ unsigned int i;
+
+ called_len = (*p >> 0) & 0x0F;
+ calling_len = (*p >> 4) & 0x0F;
+
+ called = called_addr->x25_addr;
+ calling = calling_addr->x25_addr;
+ p++;
+
+ for (i = 0; i < (called_len + calling_len); i++) {
+ if (i < called_len) {
+ if (i % 2 != 0) {
+ *called++ = ((*p >> 0) & 0x0F) + '0';
+ p++;
+ } else {
+ *called++ = ((*p >> 4) & 0x0F) + '0';
+ }
+ } else {
+ if (i % 2 != 0) {
+ *calling++ = ((*p >> 0) & 0x0F) + '0';
+ p++;
+ } else {
+ *calling++ = ((*p >> 4) & 0x0F) + '0';
+ }
+ }
+ }
+
+ *called = *calling = '\0';
+
+ return 1 + (called_len + calling_len + 1) / 2;
+}
+
+int x25_addr_aton(unsigned char *p, struct x25_address *called_addr,
+ struct x25_address *calling_addr)
+{
+ unsigned int called_len, calling_len;
+ char *called, *calling;
+ int i;
+
+ called = called_addr->x25_addr;
+ calling = calling_addr->x25_addr;
+
+ called_len = strlen(called);
+ calling_len = strlen(calling);
+
+ *p++ = (calling_len << 4) | (called_len << 0);
+
+ for (i = 0; i < (called_len + calling_len); i++) {
+ if (i < called_len) {
+ if (i % 2 != 0) {
+ *p |= (*called++ - '0') << 0;
+ p++;
+ } else {
+ *p = 0x00;
+ *p |= (*called++ - '0') << 4;
+ }
+ } else {
+ if (i % 2 != 0) {
+ *p |= (*calling++ - '0') << 0;
+ p++;
+ } else {
+ *p = 0x00;
+ *p |= (*calling++ - '0') << 4;
+ }
+ }
+ }
+
+ return 1 + (called_len + calling_len + 1) / 2;
+}
+
+/*
+ * Socket removal during an interrupt is now safe.
+ */
+static void x25_remove_socket(struct sock *sk)
+{
+ write_lock_bh(&x25_list_lock);
+ sk_del_node_init(sk);
+ write_unlock_bh(&x25_list_lock);
+}
+
+/*
+ * Handle device status changes.
+ */
+static int x25_device_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct x25_neigh *nb;
+
+ if (!net_eq(dev_net(dev), &init_net))
+ return NOTIFY_DONE;
+
+ if (dev->type == ARPHRD_X25) {
+ switch (event) {
+ case NETDEV_REGISTER:
+ case NETDEV_POST_TYPE_CHANGE:
+ x25_link_device_up(dev);
+ break;
+ case NETDEV_DOWN:
+ nb = x25_get_neigh(dev);
+ if (nb) {
+ x25_link_terminated(nb);
+ x25_neigh_put(nb);
+ }
+ x25_route_device_down(dev);
+ break;
+ case NETDEV_PRE_TYPE_CHANGE:
+ case NETDEV_UNREGISTER:
+ x25_link_device_down(dev);
+ break;
+ case NETDEV_CHANGE:
+ if (!netif_carrier_ok(dev)) {
+ nb = x25_get_neigh(dev);
+ if (nb) {
+ x25_link_terminated(nb);
+ x25_neigh_put(nb);
+ }
+ }
+ break;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+/*
+ * Add a socket to the bound sockets list.
+ */
+static void x25_insert_socket(struct sock *sk)
+{
+ write_lock_bh(&x25_list_lock);
+ sk_add_node(sk, &x25_list);
+ write_unlock_bh(&x25_list_lock);
+}
+
+/*
+ * Find a socket that wants to accept the Call Request we just
+ * received. Check the full list for an address/cud match.
+ * If no cuds match return the next_best thing, an address match.
+ * Note: if a listening socket has cud set it must only get calls
+ * with matching cud.
+ */
+static struct sock *x25_find_listener(struct x25_address *addr,
+ struct sk_buff *skb)
+{
+ struct sock *s;
+ struct sock *next_best;
+
+ read_lock_bh(&x25_list_lock);
+ next_best = NULL;
+
+ sk_for_each(s, &x25_list)
+ if ((!strcmp(addr->x25_addr,
+ x25_sk(s)->source_addr.x25_addr) ||
+ !strcmp(x25_sk(s)->source_addr.x25_addr,
+ null_x25_address.x25_addr)) &&
+ s->sk_state == TCP_LISTEN) {
+ /*
+ * Found a listening socket, now check the incoming
+ * call user data vs this sockets call user data
+ */
+ if (x25_sk(s)->cudmatchlength > 0 &&
+ skb->len >= x25_sk(s)->cudmatchlength) {
+ if((memcmp(x25_sk(s)->calluserdata.cuddata,
+ skb->data,
+ x25_sk(s)->cudmatchlength)) == 0) {
+ sock_hold(s);
+ goto found;
+ }
+ } else
+ next_best = s;
+ }
+ if (next_best) {
+ s = next_best;
+ sock_hold(s);
+ goto found;
+ }
+ s = NULL;
+found:
+ read_unlock_bh(&x25_list_lock);
+ return s;
+}
+
+/*
+ * Find a connected X.25 socket given my LCI and neighbour.
+ */
+static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb)
+{
+ struct sock *s;
+
+ sk_for_each(s, &x25_list)
+ if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) {
+ sock_hold(s);
+ goto found;
+ }
+ s = NULL;
+found:
+ return s;
+}
+
+struct sock *x25_find_socket(unsigned int lci, struct x25_neigh *nb)
+{
+ struct sock *s;
+
+ read_lock_bh(&x25_list_lock);
+ s = __x25_find_socket(lci, nb);
+ read_unlock_bh(&x25_list_lock);
+ return s;
+}
+
+/*
+ * Find a unique LCI for a given device.
+ */
+static unsigned int x25_new_lci(struct x25_neigh *nb)
+{
+ unsigned int lci = 1;
+ struct sock *sk;
+
+ while ((sk = x25_find_socket(lci, nb)) != NULL) {
+ sock_put(sk);
+ if (++lci == 4096) {
+ lci = 0;
+ break;
+ }
+ cond_resched();
+ }
+
+ return lci;
+}
+
+/*
+ * Deferred destroy.
+ */
+static void __x25_destroy_socket(struct sock *);
+
+/*
+ * handler for deferred kills.
+ */
+static void x25_destroy_timer(struct timer_list *t)
+{
+ struct sock *sk = from_timer(sk, t, sk_timer);
+
+ x25_destroy_socket_from_timer(sk);
+}
+
+/*
+ * This is called from user mode and the timers. Thus it protects itself
+ * against interrupting users but doesn't worry about being called during
+ * work. Once it is removed from the queue no interrupt or bottom half
+ * will touch it and we are (fairly 8-) ) safe.
+ * Not static as it's used by the timer
+ */
+static void __x25_destroy_socket(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ x25_stop_heartbeat(sk);
+ x25_stop_timer(sk);
+
+ x25_remove_socket(sk);
+ x25_clear_queues(sk); /* Flush the queues */
+
+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
+ if (skb->sk != sk) { /* A pending connection */
+ /*
+ * Queue the unaccepted socket for death
+ */
+ skb->sk->sk_state = TCP_LISTEN;
+ sock_set_flag(skb->sk, SOCK_DEAD);
+ x25_start_heartbeat(skb->sk);
+ x25_sk(skb->sk)->state = X25_STATE_0;
+ }
+
+ kfree_skb(skb);
+ }
+
+ if (sk_has_allocations(sk)) {
+ /* Defer: outstanding buffers */
+ sk->sk_timer.expires = jiffies + 10 * HZ;
+ sk->sk_timer.function = x25_destroy_timer;
+ add_timer(&sk->sk_timer);
+ } else {
+ /* drop last reference so sock_put will free */
+ __sock_put(sk);
+ }
+}
+
+void x25_destroy_socket_from_timer(struct sock *sk)
+{
+ sock_hold(sk);
+ bh_lock_sock(sk);
+ __x25_destroy_socket(sk);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+}
+
+/*
+ * Handling for system calls applied via the various interfaces to a
+ * X.25 socket object.
+ */
+
+static int x25_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ int opt;
+ struct sock *sk = sock->sk;
+ int rc = -ENOPROTOOPT;
+
+ if (level != SOL_X25 || optname != X25_QBITINCL)
+ goto out;
+
+ rc = -EINVAL;
+ if (optlen < sizeof(int))
+ goto out;
+
+ rc = -EFAULT;
+ if (copy_from_sockptr(&opt, optval, sizeof(int)))
+ goto out;
+
+ if (opt)
+ set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags);
+ else
+ clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags);
+ rc = 0;
+out:
+ return rc;
+}
+
+static int x25_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ int val, len, rc = -ENOPROTOOPT;
+
+ if (level != SOL_X25 || optname != X25_QBITINCL)
+ goto out;
+
+ rc = -EFAULT;
+ if (get_user(len, optlen))
+ goto out;
+
+ len = min_t(unsigned int, len, sizeof(int));
+
+ rc = -EINVAL;
+ if (len < 0)
+ goto out;
+
+ rc = -EFAULT;
+ if (put_user(len, optlen))
+ goto out;
+
+ val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags);
+ rc = copy_to_user(optval, &val, len) ? -EFAULT : 0;
+out:
+ return rc;
+}
+
+static int x25_listen(struct socket *sock, int backlog)
+{
+ struct sock *sk = sock->sk;
+ int rc = -EOPNOTSUPP;
+
+ lock_sock(sk);
+ if (sock->state != SS_UNCONNECTED) {
+ rc = -EINVAL;
+ release_sock(sk);
+ return rc;
+ }
+
+ if (sk->sk_state != TCP_LISTEN) {
+ memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_state = TCP_LISTEN;
+ rc = 0;
+ }
+ release_sock(sk);
+
+ return rc;
+}
+
+static struct proto x25_proto = {
+ .name = "X25",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct x25_sock),
+};
+
+static struct sock *x25_alloc_socket(struct net *net, int kern)
+{
+ struct x25_sock *x25;
+ struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto, kern);
+
+ if (!sk)
+ goto out;
+
+ sock_init_data(NULL, sk);
+
+ x25 = x25_sk(sk);
+ skb_queue_head_init(&x25->ack_queue);
+ skb_queue_head_init(&x25->fragment_queue);
+ skb_queue_head_init(&x25->interrupt_in_queue);
+ skb_queue_head_init(&x25->interrupt_out_queue);
+out:
+ return sk;
+}
+
+static int x25_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+{
+ struct sock *sk;
+ struct x25_sock *x25;
+ int rc = -EAFNOSUPPORT;
+
+ if (!net_eq(net, &init_net))
+ goto out;
+
+ rc = -ESOCKTNOSUPPORT;
+ if (sock->type != SOCK_SEQPACKET)
+ goto out;
+
+ rc = -EINVAL;
+ if (protocol)
+ goto out;
+
+ rc = -ENOMEM;
+ if ((sk = x25_alloc_socket(net, kern)) == NULL)
+ goto out;
+
+ x25 = x25_sk(sk);
+
+ sock_init_data(sock, sk);
+
+ x25_init_timers(sk);
+
+ sock->ops = &x25_proto_ops;
+ sk->sk_protocol = protocol;
+ sk->sk_backlog_rcv = x25_backlog_rcv;
+
+ x25->t21 = sysctl_x25_call_request_timeout;
+ x25->t22 = sysctl_x25_reset_request_timeout;
+ x25->t23 = sysctl_x25_clear_request_timeout;
+ x25->t2 = sysctl_x25_ack_holdback_timeout;
+ x25->state = X25_STATE_0;
+ x25->cudmatchlength = 0;
+ set_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); /* normally no cud */
+ /* on call accept */
+
+ x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE;
+ x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE;
+ x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE;
+ x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE;
+ x25->facilities.throughput = 0; /* by default don't negotiate
+ throughput */
+ x25->facilities.reverse = X25_DEFAULT_REVERSE;
+ x25->dte_facilities.calling_len = 0;
+ x25->dte_facilities.called_len = 0;
+ memset(x25->dte_facilities.called_ae, '\0',
+ sizeof(x25->dte_facilities.called_ae));
+ memset(x25->dte_facilities.calling_ae, '\0',
+ sizeof(x25->dte_facilities.calling_ae));
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static struct sock *x25_make_new(struct sock *osk)
+{
+ struct sock *sk = NULL;
+ struct x25_sock *x25, *ox25;
+
+ if (osk->sk_type != SOCK_SEQPACKET)
+ goto out;
+
+ if ((sk = x25_alloc_socket(sock_net(osk), 0)) == NULL)
+ goto out;
+
+ x25 = x25_sk(sk);
+
+ sk->sk_type = osk->sk_type;
+ sk->sk_priority = osk->sk_priority;
+ sk->sk_protocol = osk->sk_protocol;
+ sk->sk_rcvbuf = osk->sk_rcvbuf;
+ sk->sk_sndbuf = osk->sk_sndbuf;
+ sk->sk_state = TCP_ESTABLISHED;
+ sk->sk_backlog_rcv = osk->sk_backlog_rcv;
+ sock_copy_flags(sk, osk);
+
+ ox25 = x25_sk(osk);
+ x25->t21 = ox25->t21;
+ x25->t22 = ox25->t22;
+ x25->t23 = ox25->t23;
+ x25->t2 = ox25->t2;
+ x25->flags = ox25->flags;
+ x25->facilities = ox25->facilities;
+ x25->dte_facilities = ox25->dte_facilities;
+ x25->cudmatchlength = ox25->cudmatchlength;
+
+ clear_bit(X25_INTERRUPT_FLAG, &x25->flags);
+ x25_init_timers(sk);
+out:
+ return sk;
+}
+
+static int x25_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct x25_sock *x25;
+
+ if (!sk)
+ return 0;
+
+ x25 = x25_sk(sk);
+
+ sock_hold(sk);
+ lock_sock(sk);
+ switch (x25->state) {
+
+ case X25_STATE_0:
+ case X25_STATE_2:
+ x25_disconnect(sk, 0, 0, 0);
+ __x25_destroy_socket(sk);
+ goto out;
+
+ case X25_STATE_1:
+ case X25_STATE_3:
+ case X25_STATE_4:
+ x25_clear_queues(sk);
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25_start_t23timer(sk);
+ x25->state = X25_STATE_2;
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
+ sock_set_flag(sk, SOCK_DEAD);
+ sock_set_flag(sk, SOCK_DESTROY);
+ break;
+
+ case X25_STATE_5:
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25_disconnect(sk, 0, 0, 0);
+ __x25_destroy_socket(sk);
+ goto out;
+ }
+
+ sock_orphan(sk);
+out:
+ release_sock(sk);
+ sock_put(sk);
+ return 0;
+}
+
+static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
+ int len, i, rc = 0;
+
+ if (addr_len != sizeof(struct sockaddr_x25) ||
+ addr->sx25_family != AF_X25 ||
+ strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* check for the null_x25_address */
+ if (strcmp(addr->sx25_addr.x25_addr, null_x25_address.x25_addr)) {
+
+ len = strlen(addr->sx25_addr.x25_addr);
+ for (i = 0; i < len; i++) {
+ if (!isdigit(addr->sx25_addr.x25_addr[i])) {
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_ZAPPED)) {
+ x25_sk(sk)->source_addr = addr->sx25_addr;
+ x25_insert_socket(sk);
+ sock_reset_flag(sk, SOCK_ZAPPED);
+ } else {
+ rc = -EINVAL;
+ }
+ release_sock(sk);
+ SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
+out:
+ return rc;
+}
+
+static int x25_wait_for_connection_establishment(struct sock *sk)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int rc;
+
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
+ for (;;) {
+ __set_current_state(TASK_INTERRUPTIBLE);
+ rc = -ERESTARTSYS;
+ if (signal_pending(current))
+ break;
+ rc = sock_error(sk);
+ if (rc) {
+ sk->sk_socket->state = SS_UNCONNECTED;
+ break;
+ }
+ rc = -ENOTCONN;
+ if (sk->sk_state == TCP_CLOSE) {
+ sk->sk_socket->state = SS_UNCONNECTED;
+ break;
+ }
+ rc = 0;
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ release_sock(sk);
+ schedule();
+ lock_sock(sk);
+ } else
+ break;
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return rc;
+}
+
+static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct x25_sock *x25 = x25_sk(sk);
+ struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
+ struct x25_route *rt;
+ int rc = 0;
+
+ lock_sock(sk);
+ if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
+ sock->state = SS_CONNECTED;
+ goto out; /* Connect completed during a ERESTARTSYS event */
+ }
+
+ rc = -ECONNREFUSED;
+ if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
+ sock->state = SS_UNCONNECTED;
+ goto out;
+ }
+
+ rc = -EISCONN; /* No reconnect on a seqpacket socket */
+ if (sk->sk_state == TCP_ESTABLISHED)
+ goto out;
+
+ rc = -EALREADY; /* Do nothing if call is already in progress */
+ if (sk->sk_state == TCP_SYN_SENT)
+ goto out;
+
+ sk->sk_state = TCP_CLOSE;
+ sock->state = SS_UNCONNECTED;
+
+ rc = -EINVAL;
+ if (addr_len != sizeof(struct sockaddr_x25) ||
+ addr->sx25_family != AF_X25 ||
+ strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN)
+ goto out;
+
+ rc = -ENETUNREACH;
+ rt = x25_get_route(&addr->sx25_addr);
+ if (!rt)
+ goto out;
+
+ x25->neighbour = x25_get_neigh(rt->dev);
+ if (!x25->neighbour)
+ goto out_put_route;
+
+ x25_limit_facilities(&x25->facilities, x25->neighbour);
+
+ x25->lci = x25_new_lci(x25->neighbour);
+ if (!x25->lci)
+ goto out_put_neigh;
+
+ rc = -EINVAL;
+ if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
+ goto out_put_neigh;
+
+ if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr))
+ memset(&x25->source_addr, '\0', X25_ADDR_LEN);
+
+ x25->dest_addr = addr->sx25_addr;
+
+ /* Move to connecting socket, start sending Connect Requests */
+ sock->state = SS_CONNECTING;
+ sk->sk_state = TCP_SYN_SENT;
+
+ x25->state = X25_STATE_1;
+
+ x25_write_internal(sk, X25_CALL_REQUEST);
+
+ x25_start_heartbeat(sk);
+ x25_start_t21timer(sk);
+
+ /* Now the loop */
+ rc = -EINPROGRESS;
+ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
+ goto out;
+
+ rc = x25_wait_for_connection_establishment(sk);
+ if (rc)
+ goto out_put_neigh;
+
+ sock->state = SS_CONNECTED;
+ rc = 0;
+out_put_neigh:
+ if (rc && x25->neighbour) {
+ read_lock_bh(&x25_list_lock);
+ x25_neigh_put(x25->neighbour);
+ x25->neighbour = NULL;
+ read_unlock_bh(&x25_list_lock);
+ x25->state = X25_STATE_0;
+ }
+out_put_route:
+ x25_route_put(rt);
+out:
+ release_sock(sk);
+ return rc;
+}
+
+static int x25_wait_for_data(struct sock *sk, long timeout)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int rc = 0;
+
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
+ for (;;) {
+ __set_current_state(TASK_INTERRUPTIBLE);
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ break;
+ rc = -ERESTARTSYS;
+ if (signal_pending(current))
+ break;
+ rc = -EAGAIN;
+ if (!timeout)
+ break;
+ rc = 0;
+ if (skb_queue_empty(&sk->sk_receive_queue)) {
+ release_sock(sk);
+ timeout = schedule_timeout(timeout);
+ lock_sock(sk);
+ } else
+ break;
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return rc;
+}
+
+static int x25_accept(struct socket *sock, struct socket *newsock, int flags,
+ bool kern)
+{
+ struct sock *sk = sock->sk;
+ struct sock *newsk;
+ struct sk_buff *skb;
+ int rc = -EINVAL;
+
+ if (!sk)
+ goto out;
+
+ rc = -EOPNOTSUPP;
+ if (sk->sk_type != SOCK_SEQPACKET)
+ goto out;
+
+ lock_sock(sk);
+ rc = -EINVAL;
+ if (sk->sk_state != TCP_LISTEN)
+ goto out2;
+
+ rc = x25_wait_for_data(sk, sk->sk_rcvtimeo);
+ if (rc)
+ goto out2;
+ skb = skb_dequeue(&sk->sk_receive_queue);
+ rc = -EINVAL;
+ if (!skb->sk)
+ goto out2;
+ newsk = skb->sk;
+ sock_graft(newsk, newsock);
+
+ /* Now attach up the new socket */
+ skb->sk = NULL;
+ kfree_skb(skb);
+ sk_acceptq_removed(sk);
+ newsock->state = SS_CONNECTED;
+ rc = 0;
+out2:
+ release_sock(sk);
+out:
+ return rc;
+}
+
+static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
+ int peer)
+{
+ struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr;
+ struct sock *sk = sock->sk;
+ struct x25_sock *x25 = x25_sk(sk);
+ int rc = 0;
+
+ if (peer) {
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ rc = -ENOTCONN;
+ goto out;
+ }
+ sx25->sx25_addr = x25->dest_addr;
+ } else
+ sx25->sx25_addr = x25->source_addr;
+
+ sx25->sx25_family = AF_X25;
+ rc = sizeof(*sx25);
+
+out:
+ return rc;
+}
+
+int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
+ unsigned int lci)
+{
+ struct sock *sk;
+ struct sock *make;
+ struct x25_sock *makex25;
+ struct x25_address source_addr, dest_addr;
+ struct x25_facilities facilities;
+ struct x25_dte_facilities dte_facilities;
+ int len, addr_len, rc;
+
+ /*
+ * Remove the LCI and frame type.
+ */
+ skb_pull(skb, X25_STD_MIN_LEN);
+
+ /*
+ * Extract the X.25 addresses and convert them to ASCII strings,
+ * and remove them.
+ *
+ * Address block is mandatory in call request packets
+ */
+ addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr);
+ if (addr_len <= 0)
+ goto out_clear_request;
+ skb_pull(skb, addr_len);
+
+ /*
+ * Get the length of the facilities, skip past them for the moment
+ * get the call user data because this is needed to determine
+ * the correct listener
+ *
+ * Facilities length is mandatory in call request packets
+ */
+ if (!pskb_may_pull(skb, 1))
+ goto out_clear_request;
+ len = skb->data[0] + 1;
+ if (!pskb_may_pull(skb, len))
+ goto out_clear_request;
+ skb_pull(skb,len);
+
+ /*
+ * Ensure that the amount of call user data is valid.
+ */
+ if (skb->len > X25_MAX_CUD_LEN)
+ goto out_clear_request;
+
+ /*
+ * Get all the call user data so it can be used in
+ * x25_find_listener and skb_copy_from_linear_data up ahead.
+ */
+ if (!pskb_may_pull(skb, skb->len))
+ goto out_clear_request;
+
+ /*
+ * Find a listener for the particular address/cud pair.
+ */
+ sk = x25_find_listener(&source_addr,skb);
+ skb_push(skb,len);
+
+ if (sk != NULL && sk_acceptq_is_full(sk)) {
+ goto out_sock_put;
+ }
+
+ /*
+ * We dont have any listeners for this incoming call.
+ * Try forwarding it.
+ */
+ if (sk == NULL) {
+ skb_push(skb, addr_len + X25_STD_MIN_LEN);
+ if (sysctl_x25_forward &&
+ x25_forward_call(&dest_addr, nb, skb, lci) > 0)
+ {
+ /* Call was forwarded, dont process it any more */
+ kfree_skb(skb);
+ rc = 1;
+ goto out;
+ } else {
+ /* No listeners, can't forward, clear the call */
+ goto out_clear_request;
+ }
+ }
+
+ /*
+ * Try to reach a compromise on the requested facilities.
+ */
+ len = x25_negotiate_facilities(skb, sk, &facilities, &dte_facilities);
+ if (len == -1)
+ goto out_sock_put;
+
+ /*
+ * current neighbour/link might impose additional limits
+ * on certain facilities
+ */
+
+ x25_limit_facilities(&facilities, nb);
+
+ /*
+ * Try to create a new socket.
+ */
+ make = x25_make_new(sk);
+ if (!make)
+ goto out_sock_put;
+
+ /*
+ * Remove the facilities
+ */
+ skb_pull(skb, len);
+
+ skb->sk = make;
+ make->sk_state = TCP_ESTABLISHED;
+
+ makex25 = x25_sk(make);
+ makex25->lci = lci;
+ makex25->dest_addr = dest_addr;
+ makex25->source_addr = source_addr;
+ x25_neigh_hold(nb);
+ makex25->neighbour = nb;
+ makex25->facilities = facilities;
+ makex25->dte_facilities= dte_facilities;
+ makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask;
+ /* ensure no reverse facil on accept */
+ makex25->vc_facil_mask &= ~X25_MASK_REVERSE;
+ /* ensure no calling address extension on accept */
+ makex25->vc_facil_mask &= ~X25_MASK_CALLING_AE;
+ makex25->cudmatchlength = x25_sk(sk)->cudmatchlength;
+
+ /* Normally all calls are accepted immediately */
+ if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) {
+ x25_write_internal(make, X25_CALL_ACCEPTED);
+ makex25->state = X25_STATE_3;
+ } else {
+ makex25->state = X25_STATE_5;
+ }
+
+ /*
+ * Incoming Call User Data.
+ */
+ skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len);
+ makex25->calluserdata.cudlength = skb->len;
+
+ sk_acceptq_added(sk);
+
+ x25_insert_socket(make);
+
+ skb_queue_head(&sk->sk_receive_queue, skb);
+
+ x25_start_heartbeat(make);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk);
+ rc = 1;
+ sock_put(sk);
+out:
+ return rc;
+out_sock_put:
+ sock_put(sk);
+out_clear_request:
+ rc = 0;
+ x25_transmit_clear_request(nb, lci, 0x01);
+ goto out;
+}
+
+static int x25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct x25_sock *x25 = x25_sk(sk);
+ DECLARE_SOCKADDR(struct sockaddr_x25 *, usx25, msg->msg_name);
+ struct sockaddr_x25 sx25;
+ struct sk_buff *skb;
+ unsigned char *asmptr;
+ int noblock = msg->msg_flags & MSG_DONTWAIT;
+ size_t size;
+ int qbit = 0, rc = -EINVAL;
+
+ lock_sock(sk);
+ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT))
+ goto out;
+
+ /* we currently don't support segmented records at the user interface */
+ if (!(msg->msg_flags & (MSG_EOR|MSG_OOB)))
+ goto out;
+
+ rc = -EADDRNOTAVAIL;
+ if (sock_flag(sk, SOCK_ZAPPED))
+ goto out;
+
+ rc = -EPIPE;
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
+ send_sig(SIGPIPE, current, 0);
+ goto out;
+ }
+
+ rc = -ENETUNREACH;
+ if (!x25->neighbour)
+ goto out;
+
+ if (usx25) {
+ rc = -EINVAL;
+ if (msg->msg_namelen < sizeof(sx25))
+ goto out;
+ memcpy(&sx25, usx25, sizeof(sx25));
+ rc = -EISCONN;
+ if (strcmp(x25->dest_addr.x25_addr, sx25.sx25_addr.x25_addr))
+ goto out;
+ rc = -EINVAL;
+ if (sx25.sx25_family != AF_X25)
+ goto out;
+ } else {
+ /*
+ * FIXME 1003.1g - if the socket is like this because
+ * it has become closed (not started closed) we ought
+ * to SIGPIPE, EPIPE;
+ */
+ rc = -ENOTCONN;
+ if (sk->sk_state != TCP_ESTABLISHED)
+ goto out;
+
+ sx25.sx25_family = AF_X25;
+ sx25.sx25_addr = x25->dest_addr;
+ }
+
+ /* Sanity check the packet size */
+ if (len > 65535) {
+ rc = -EMSGSIZE;
+ goto out;
+ }
+
+ SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n");
+
+ /* Build a packet */
+ SOCK_DEBUG(sk, "x25_sendmsg: sendto: building packet.\n");
+
+ if ((msg->msg_flags & MSG_OOB) && len > 32)
+ len = 32;
+
+ size = len + X25_MAX_L2_LEN + X25_EXT_MIN_LEN;
+
+ release_sock(sk);
+ skb = sock_alloc_send_skb(sk, size, noblock, &rc);
+ lock_sock(sk);
+ if (!skb)
+ goto out;
+ X25_SKB_CB(skb)->flags = msg->msg_flags;
+
+ skb_reserve(skb, X25_MAX_L2_LEN + X25_EXT_MIN_LEN);
+
+ /*
+ * Put the data on the end
+ */
+ SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n");
+
+ skb_reset_transport_header(skb);
+ skb_put(skb, len);
+
+ rc = memcpy_from_msg(skb_transport_header(skb), msg, len);
+ if (rc)
+ goto out_kfree_skb;
+
+ /*
+ * If the Q BIT Include socket option is in force, the first
+ * byte of the user data is the logical value of the Q Bit.
+ */
+ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
+ if (!pskb_may_pull(skb, 1))
+ goto out_kfree_skb;
+
+ qbit = skb->data[0];
+ skb_pull(skb, 1);
+ }
+
+ /*
+ * Push down the X.25 header
+ */
+ SOCK_DEBUG(sk, "x25_sendmsg: Building X.25 Header.\n");
+
+ if (msg->msg_flags & MSG_OOB) {
+ if (x25->neighbour->extended) {
+ asmptr = skb_push(skb, X25_STD_MIN_LEN);
+ *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ;
+ *asmptr++ = (x25->lci >> 0) & 0xFF;
+ *asmptr++ = X25_INTERRUPT;
+ } else {
+ asmptr = skb_push(skb, X25_STD_MIN_LEN);
+ *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ;
+ *asmptr++ = (x25->lci >> 0) & 0xFF;
+ *asmptr++ = X25_INTERRUPT;
+ }
+ } else {
+ if (x25->neighbour->extended) {
+ /* Build an Extended X.25 header */
+ asmptr = skb_push(skb, X25_EXT_MIN_LEN);
+ *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ;
+ *asmptr++ = (x25->lci >> 0) & 0xFF;
+ *asmptr++ = X25_DATA;
+ *asmptr++ = X25_DATA;
+ } else {
+ /* Build an Standard X.25 header */
+ asmptr = skb_push(skb, X25_STD_MIN_LEN);
+ *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ;
+ *asmptr++ = (x25->lci >> 0) & 0xFF;
+ *asmptr++ = X25_DATA;
+ }
+
+ if (qbit)
+ skb->data[0] |= X25_Q_BIT;
+ }
+
+ SOCK_DEBUG(sk, "x25_sendmsg: Built header.\n");
+ SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n");
+
+ rc = -ENOTCONN;
+ if (sk->sk_state != TCP_ESTABLISHED)
+ goto out_kfree_skb;
+
+ if (msg->msg_flags & MSG_OOB)
+ skb_queue_tail(&x25->interrupt_out_queue, skb);
+ else {
+ rc = x25_output(sk, skb);
+ len = rc;
+ if (rc < 0)
+ kfree_skb(skb);
+ else if (test_bit(X25_Q_BIT_FLAG, &x25->flags))
+ len++;
+ }
+
+ x25_kick(sk);
+ rc = len;
+out:
+ release_sock(sk);
+ return rc;
+out_kfree_skb:
+ kfree_skb(skb);
+ goto out;
+}
+
+
+static int x25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
+{
+ struct sock *sk = sock->sk;
+ struct x25_sock *x25 = x25_sk(sk);
+ DECLARE_SOCKADDR(struct sockaddr_x25 *, sx25, msg->msg_name);
+ size_t copied;
+ int qbit, header_len;
+ struct sk_buff *skb;
+ unsigned char *asmptr;
+ int rc = -ENOTCONN;
+
+ lock_sock(sk);
+
+ if (x25->neighbour == NULL)
+ goto out;
+
+ header_len = x25->neighbour->extended ?
+ X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
+
+ /*
+ * This works for seqpacket too. The receiver has ordered the queue for
+ * us! We do one quick check first though
+ */
+ if (sk->sk_state != TCP_ESTABLISHED)
+ goto out;
+
+ if (flags & MSG_OOB) {
+ rc = -EINVAL;
+ if (sock_flag(sk, SOCK_URGINLINE) ||
+ !skb_peek(&x25->interrupt_in_queue))
+ goto out;
+
+ skb = skb_dequeue(&x25->interrupt_in_queue);
+
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
+ goto out_free_dgram;
+
+ skb_pull(skb, X25_STD_MIN_LEN);
+
+ /*
+ * No Q bit information on Interrupt data.
+ */
+ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
+ asmptr = skb_push(skb, 1);
+ *asmptr = 0x00;
+ }
+
+ msg->msg_flags |= MSG_OOB;
+ } else {
+ /* Now we can treat all alike */
+ release_sock(sk);
+ skb = skb_recv_datagram(sk, flags, &rc);
+ lock_sock(sk);
+ if (!skb)
+ goto out;
+
+ if (!pskb_may_pull(skb, header_len))
+ goto out_free_dgram;
+
+ qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
+
+ skb_pull(skb, header_len);
+
+ if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
+ asmptr = skb_push(skb, 1);
+ *asmptr = qbit;
+ }
+ }
+
+ skb_reset_transport_header(skb);
+ copied = skb->len;
+
+ if (copied > size) {
+ copied = size;
+ msg->msg_flags |= MSG_TRUNC;
+ }
+
+ /* Currently, each datagram always contains a complete record */
+ msg->msg_flags |= MSG_EOR;
+
+ rc = skb_copy_datagram_msg(skb, 0, msg, copied);
+ if (rc)
+ goto out_free_dgram;
+
+ if (sx25) {
+ sx25->sx25_family = AF_X25;
+ sx25->sx25_addr = x25->dest_addr;
+ msg->msg_namelen = sizeof(*sx25);
+ }
+
+ x25_check_rbuf(sk);
+ rc = copied;
+out_free_dgram:
+ skb_free_datagram(sk, skb);
+out:
+ release_sock(sk);
+ return rc;
+}
+
+
+static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+ struct x25_sock *x25 = x25_sk(sk);
+ void __user *argp = (void __user *)arg;
+ int rc;
+
+ switch (cmd) {
+ case TIOCOUTQ: {
+ int amount;
+
+ amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
+ if (amount < 0)
+ amount = 0;
+ rc = put_user(amount, (unsigned int __user *)argp);
+ break;
+ }
+
+ case TIOCINQ: {
+ struct sk_buff *skb;
+ int amount = 0;
+ /*
+ * These two are safe on a single CPU system as
+ * only user tasks fiddle here
+ */
+ lock_sock(sk);
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
+ amount = skb->len;
+ release_sock(sk);
+ rc = put_user(amount, (unsigned int __user *)argp);
+ break;
+ }
+
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCSIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCSIFMETRIC:
+ rc = -EINVAL;
+ break;
+ case SIOCADDRT:
+ case SIOCDELRT:
+ rc = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ break;
+ rc = x25_route_ioctl(cmd, argp);
+ break;
+ case SIOCX25GSUBSCRIP:
+ rc = x25_subscr_ioctl(cmd, argp);
+ break;
+ case SIOCX25SSUBSCRIP:
+ rc = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ break;
+ rc = x25_subscr_ioctl(cmd, argp);
+ break;
+ case SIOCX25GFACILITIES: {
+ lock_sock(sk);
+ rc = copy_to_user(argp, &x25->facilities,
+ sizeof(x25->facilities))
+ ? -EFAULT : 0;
+ release_sock(sk);
+ break;
+ }
+
+ case SIOCX25SFACILITIES: {
+ struct x25_facilities facilities;
+ rc = -EFAULT;
+ if (copy_from_user(&facilities, argp, sizeof(facilities)))
+ break;
+ rc = -EINVAL;
+ lock_sock(sk);
+ if (sk->sk_state != TCP_LISTEN &&
+ sk->sk_state != TCP_CLOSE)
+ goto out_fac_release;
+ if (facilities.pacsize_in < X25_PS16 ||
+ facilities.pacsize_in > X25_PS4096)
+ goto out_fac_release;
+ if (facilities.pacsize_out < X25_PS16 ||
+ facilities.pacsize_out > X25_PS4096)
+ goto out_fac_release;
+ if (facilities.winsize_in < 1 ||
+ facilities.winsize_in > 127)
+ goto out_fac_release;
+ if (facilities.throughput) {
+ int out = facilities.throughput & 0xf0;
+ int in = facilities.throughput & 0x0f;
+ if (!out)
+ facilities.throughput |=
+ X25_DEFAULT_THROUGHPUT << 4;
+ else if (out < 0x30 || out > 0xD0)
+ goto out_fac_release;
+ if (!in)
+ facilities.throughput |=
+ X25_DEFAULT_THROUGHPUT;
+ else if (in < 0x03 || in > 0x0D)
+ goto out_fac_release;
+ }
+ if (facilities.reverse &&
+ (facilities.reverse & 0x81) != 0x81)
+ goto out_fac_release;
+ x25->facilities = facilities;
+ rc = 0;
+out_fac_release:
+ release_sock(sk);
+ break;
+ }
+
+ case SIOCX25GDTEFACILITIES: {
+ lock_sock(sk);
+ rc = copy_to_user(argp, &x25->dte_facilities,
+ sizeof(x25->dte_facilities));
+ release_sock(sk);
+ if (rc)
+ rc = -EFAULT;
+ break;
+ }
+
+ case SIOCX25SDTEFACILITIES: {
+ struct x25_dte_facilities dtefacs;
+ rc = -EFAULT;
+ if (copy_from_user(&dtefacs, argp, sizeof(dtefacs)))
+ break;
+ rc = -EINVAL;
+ lock_sock(sk);
+ if (sk->sk_state != TCP_LISTEN &&
+ sk->sk_state != TCP_CLOSE)
+ goto out_dtefac_release;
+ if (dtefacs.calling_len > X25_MAX_AE_LEN)
+ goto out_dtefac_release;
+ if (dtefacs.called_len > X25_MAX_AE_LEN)
+ goto out_dtefac_release;
+ x25->dte_facilities = dtefacs;
+ rc = 0;
+out_dtefac_release:
+ release_sock(sk);
+ break;
+ }
+
+ case SIOCX25GCALLUSERDATA: {
+ lock_sock(sk);
+ rc = copy_to_user(argp, &x25->calluserdata,
+ sizeof(x25->calluserdata))
+ ? -EFAULT : 0;
+ release_sock(sk);
+ break;
+ }
+
+ case SIOCX25SCALLUSERDATA: {
+ struct x25_calluserdata calluserdata;
+
+ rc = -EFAULT;
+ if (copy_from_user(&calluserdata, argp, sizeof(calluserdata)))
+ break;
+ rc = -EINVAL;
+ if (calluserdata.cudlength > X25_MAX_CUD_LEN)
+ break;
+ lock_sock(sk);
+ x25->calluserdata = calluserdata;
+ release_sock(sk);
+ rc = 0;
+ break;
+ }
+
+ case SIOCX25GCAUSEDIAG: {
+ lock_sock(sk);
+ rc = copy_to_user(argp, &x25->causediag, sizeof(x25->causediag))
+ ? -EFAULT : 0;
+ release_sock(sk);
+ break;
+ }
+
+ case SIOCX25SCAUSEDIAG: {
+ struct x25_causediag causediag;
+ rc = -EFAULT;
+ if (copy_from_user(&causediag, argp, sizeof(causediag)))
+ break;
+ lock_sock(sk);
+ x25->causediag = causediag;
+ release_sock(sk);
+ rc = 0;
+ break;
+
+ }
+
+ case SIOCX25SCUDMATCHLEN: {
+ struct x25_subaddr sub_addr;
+ rc = -EINVAL;
+ lock_sock(sk);
+ if(sk->sk_state != TCP_CLOSE)
+ goto out_cud_release;
+ rc = -EFAULT;
+ if (copy_from_user(&sub_addr, argp,
+ sizeof(sub_addr)))
+ goto out_cud_release;
+ rc = -EINVAL;
+ if (sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
+ goto out_cud_release;
+ x25->cudmatchlength = sub_addr.cudmatchlength;
+ rc = 0;
+out_cud_release:
+ release_sock(sk);
+ break;
+ }
+
+ case SIOCX25CALLACCPTAPPRV: {
+ rc = -EINVAL;
+ lock_sock(sk);
+ if (sk->sk_state == TCP_CLOSE) {
+ clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
+ rc = 0;
+ }
+ release_sock(sk);
+ break;
+ }
+
+ case SIOCX25SENDCALLACCPT: {
+ rc = -EINVAL;
+ lock_sock(sk);
+ if (sk->sk_state != TCP_ESTABLISHED)
+ goto out_sendcallaccpt_release;
+ /* must call accptapprv above */
+ if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags))
+ goto out_sendcallaccpt_release;
+ x25_write_internal(sk, X25_CALL_ACCEPTED);
+ x25->state = X25_STATE_3;
+ rc = 0;
+out_sendcallaccpt_release:
+ release_sock(sk);
+ break;
+ }
+
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+
+ return rc;
+}
+
+static const struct net_proto_family x25_family_ops = {
+ .family = AF_X25,
+ .create = x25_create,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_COMPAT
+static int compat_x25_subscr_ioctl(unsigned int cmd,
+ struct compat_x25_subscrip_struct __user *x25_subscr32)
+{
+ struct compat_x25_subscrip_struct x25_subscr;
+ struct x25_neigh *nb;
+ struct net_device *dev;
+ int rc = -EINVAL;
+
+ rc = -EFAULT;
+ if (copy_from_user(&x25_subscr, x25_subscr32, sizeof(*x25_subscr32)))
+ goto out;
+
+ rc = -EINVAL;
+ dev = x25_dev_get(x25_subscr.device);
+ if (dev == NULL)
+ goto out;
+
+ nb = x25_get_neigh(dev);
+ if (nb == NULL)
+ goto out_dev_put;
+
+ dev_put(dev);
+
+ if (cmd == SIOCX25GSUBSCRIP) {
+ read_lock_bh(&x25_neigh_list_lock);
+ x25_subscr.extended = nb->extended;
+ x25_subscr.global_facil_mask = nb->global_facil_mask;
+ read_unlock_bh(&x25_neigh_list_lock);
+ rc = copy_to_user(x25_subscr32, &x25_subscr,
+ sizeof(*x25_subscr32)) ? -EFAULT : 0;
+ } else {
+ rc = -EINVAL;
+ if (x25_subscr.extended == 0 || x25_subscr.extended == 1) {
+ rc = 0;
+ write_lock_bh(&x25_neigh_list_lock);
+ nb->extended = x25_subscr.extended;
+ nb->global_facil_mask = x25_subscr.global_facil_mask;
+ write_unlock_bh(&x25_neigh_list_lock);
+ }
+ }
+ x25_neigh_put(nb);
+out:
+ return rc;
+out_dev_put:
+ dev_put(dev);
+ goto out;
+}
+
+static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = compat_ptr(arg);
+ int rc = -ENOIOCTLCMD;
+
+ switch(cmd) {
+ case TIOCOUTQ:
+ case TIOCINQ:
+ rc = x25_ioctl(sock, cmd, (unsigned long)argp);
+ break;
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCSIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCSIFMETRIC:
+ rc = -EINVAL;
+ break;
+ case SIOCADDRT:
+ case SIOCDELRT:
+ rc = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ break;
+ rc = x25_route_ioctl(cmd, argp);
+ break;
+ case SIOCX25GSUBSCRIP:
+ rc = compat_x25_subscr_ioctl(cmd, argp);
+ break;
+ case SIOCX25SSUBSCRIP:
+ rc = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ break;
+ rc = compat_x25_subscr_ioctl(cmd, argp);
+ break;
+ case SIOCX25GFACILITIES:
+ case SIOCX25SFACILITIES:
+ case SIOCX25GDTEFACILITIES:
+ case SIOCX25SDTEFACILITIES:
+ case SIOCX25GCALLUSERDATA:
+ case SIOCX25SCALLUSERDATA:
+ case SIOCX25GCAUSEDIAG:
+ case SIOCX25SCAUSEDIAG:
+ case SIOCX25SCUDMATCHLEN:
+ case SIOCX25CALLACCPTAPPRV:
+ case SIOCX25SENDCALLACCPT:
+ rc = x25_ioctl(sock, cmd, (unsigned long)argp);
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+ return rc;
+}
+#endif
+
+static const struct proto_ops x25_proto_ops = {
+ .family = AF_X25,
+ .owner = THIS_MODULE,
+ .release = x25_release,
+ .bind = x25_bind,
+ .connect = x25_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = x25_accept,
+ .getname = x25_getname,
+ .poll = datagram_poll,
+ .ioctl = x25_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_x25_ioctl,
+#endif
+ .gettstamp = sock_gettstamp,
+ .listen = x25_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = x25_setsockopt,
+ .getsockopt = x25_getsockopt,
+ .sendmsg = x25_sendmsg,
+ .recvmsg = x25_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+static struct packet_type x25_packet_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_X25),
+ .func = x25_lapb_receive_frame,
+};
+
+static struct notifier_block x25_dev_notifier = {
+ .notifier_call = x25_device_event,
+};
+
+void x25_kill_by_neigh(struct x25_neigh *nb)
+{
+ struct sock *s;
+
+ write_lock_bh(&x25_list_lock);
+
+ sk_for_each(s, &x25_list) {
+ if (x25_sk(s)->neighbour == nb) {
+ write_unlock_bh(&x25_list_lock);
+ lock_sock(s);
+ x25_disconnect(s, ENETUNREACH, 0, 0);
+ release_sock(s);
+ write_lock_bh(&x25_list_lock);
+ }
+ }
+ write_unlock_bh(&x25_list_lock);
+
+ /* Remove any related forwards */
+ x25_clear_forward_by_dev(nb->dev);
+}
+
+static int __init x25_init(void)
+{
+ int rc;
+
+ rc = proto_register(&x25_proto, 0);
+ if (rc)
+ goto out;
+
+ rc = sock_register(&x25_family_ops);
+ if (rc)
+ goto out_proto;
+
+ dev_add_pack(&x25_packet_type);
+
+ rc = register_netdevice_notifier(&x25_dev_notifier);
+ if (rc)
+ goto out_sock;
+
+ rc = x25_register_sysctl();
+ if (rc)
+ goto out_dev;
+
+ rc = x25_proc_init();
+ if (rc)
+ goto out_sysctl;
+
+ pr_info("Linux Version 0.2\n");
+
+out:
+ return rc;
+out_sysctl:
+ x25_unregister_sysctl();
+out_dev:
+ unregister_netdevice_notifier(&x25_dev_notifier);
+out_sock:
+ dev_remove_pack(&x25_packet_type);
+ sock_unregister(AF_X25);
+out_proto:
+ proto_unregister(&x25_proto);
+ goto out;
+}
+module_init(x25_init);
+
+static void __exit x25_exit(void)
+{
+ x25_proc_exit();
+ x25_link_free();
+ x25_route_free();
+
+ x25_unregister_sysctl();
+
+ unregister_netdevice_notifier(&x25_dev_notifier);
+
+ dev_remove_pack(&x25_packet_type);
+
+ sock_unregister(AF_X25);
+ proto_unregister(&x25_proto);
+}
+module_exit(x25_exit);
+
+MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
+MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_X25);
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
new file mode 100644
index 000000000..e9802afa4
--- /dev/null
+++ b/net/x25/sysctl_net_x25.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* -*- linux-c -*-
+ * sysctl_net_x25.c: sysctl interface to net X.25 subsystem.
+ *
+ * Begun April 1, 1996, Mike Shaver.
+ * Added /proc/sys/net/x25 directory entry (empty =) ). [MS]
+ */
+
+#include <linux/sysctl.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <net/x25.h>
+
+static int min_timer[] = { 1 * HZ };
+static int max_timer[] = { 300 * HZ };
+
+static struct ctl_table_header *x25_table_header;
+
+static struct ctl_table x25_table[] = {
+ {
+ .procname = "restart_request_timeout",
+ .data = &sysctl_x25_restart_request_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_timer,
+ .extra2 = &max_timer,
+ },
+ {
+ .procname = "call_request_timeout",
+ .data = &sysctl_x25_call_request_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_timer,
+ .extra2 = &max_timer,
+ },
+ {
+ .procname = "reset_request_timeout",
+ .data = &sysctl_x25_reset_request_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_timer,
+ .extra2 = &max_timer,
+ },
+ {
+ .procname = "clear_request_timeout",
+ .data = &sysctl_x25_clear_request_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_timer,
+ .extra2 = &max_timer,
+ },
+ {
+ .procname = "acknowledgement_hold_back_timeout",
+ .data = &sysctl_x25_ack_holdback_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_timer,
+ .extra2 = &max_timer,
+ },
+ {
+ .procname = "x25_forward",
+ .data = &sysctl_x25_forward,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ { },
+};
+
+int __init x25_register_sysctl(void)
+{
+ x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
+ if (!x25_table_header)
+ return -ENOMEM;
+ return 0;
+}
+
+void x25_unregister_sysctl(void)
+{
+ unregister_net_sysctl_table(x25_table_header);
+}
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
new file mode 100644
index 000000000..748d8630a
--- /dev/null
+++ b/net/x25/x25_dev.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * X.25 Packet Layer release 002
+ *
+ * This is ALPHA test software. This code may break your machine, randomly fail to work with new
+ * releases, misbehave and/or generally screw up. It might even work.
+ *
+ * This code REQUIRES 2.1.15 or higher
+ *
+ * History
+ * X.25 001 Jonathan Naylor Started coding.
+ * 2000-09-04 Henner Eisen Prevent freeing a dangling skb.
+ */
+
+#define pr_fmt(fmt) "X25: " fmt
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/sock.h>
+#include <linux/if_arp.h>
+#include <net/x25.h>
+#include <net/x25device.h>
+
+static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
+{
+ struct sock *sk;
+ unsigned short frametype;
+ unsigned int lci;
+
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
+ return 0;
+
+ frametype = skb->data[2];
+ lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
+
+ /*
+ * LCI of zero is always for us, and its always a link control
+ * frame.
+ */
+ if (lci == 0) {
+ x25_link_control(skb, nb, frametype);
+ return 0;
+ }
+
+ /*
+ * Find an existing socket.
+ */
+ if ((sk = x25_find_socket(lci, nb)) != NULL) {
+ int queued = 1;
+
+ skb_reset_transport_header(skb);
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk)) {
+ queued = x25_process_rx_frame(sk, skb);
+ } else {
+ queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
+ }
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ return queued;
+ }
+
+ /*
+ * Is is a Call Request ? if so process it.
+ */
+ if (frametype == X25_CALL_REQUEST)
+ return x25_rx_call_request(skb, nb, lci);
+
+ /*
+ * Its not a Call Request, nor is it a control frame.
+ * Can we forward it?
+ */
+
+ if (x25_forward_data(lci, nb, skb)) {
+ if (frametype == X25_CLEAR_CONFIRMATION) {
+ x25_clear_forward_by_lci(lci);
+ }
+ kfree_skb(skb);
+ return 1;
+ }
+
+/*
+ x25_transmit_clear_request(nb, lci, 0x0D);
+*/
+
+ if (frametype != X25_CLEAR_CONFIRMATION)
+ pr_debug("x25_receive_data(): unknown frame type %2x\n",frametype);
+
+ return 0;
+}
+
+int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *orig_dev)
+{
+ struct sk_buff *nskb;
+ struct x25_neigh *nb;
+
+ if (!net_eq(dev_net(dev), &init_net))
+ goto drop;
+
+ nskb = skb_copy(skb, GFP_ATOMIC);
+ if (!nskb)
+ goto drop;
+ kfree_skb(skb);
+ skb = nskb;
+
+ /*
+ * Packet received from unrecognised device, throw it away.
+ */
+ nb = x25_get_neigh(dev);
+ if (!nb) {
+ pr_debug("unknown neighbour - %s\n", dev->name);
+ goto drop;
+ }
+
+ if (!pskb_may_pull(skb, 1)) {
+ x25_neigh_put(nb);
+ goto drop;
+ }
+
+ switch (skb->data[0]) {
+
+ case X25_IFACE_DATA:
+ skb_pull(skb, 1);
+ if (x25_receive_data(skb, nb)) {
+ x25_neigh_put(nb);
+ goto out;
+ }
+ break;
+
+ case X25_IFACE_CONNECT:
+ x25_link_established(nb);
+ break;
+
+ case X25_IFACE_DISCONNECT:
+ x25_link_terminated(nb);
+ break;
+ }
+ x25_neigh_put(nb);
+drop:
+ kfree_skb(skb);
+out:
+ return 0;
+}
+
+void x25_establish_link(struct x25_neigh *nb)
+{
+ struct sk_buff *skb;
+ unsigned char *ptr;
+
+ switch (nb->dev->type) {
+ case ARPHRD_X25:
+ if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) {
+ pr_err("x25_dev: out of memory\n");
+ return;
+ }
+ ptr = skb_put(skb, 1);
+ *ptr = X25_IFACE_CONNECT;
+ break;
+
+ default:
+ return;
+ }
+
+ skb->protocol = htons(ETH_P_X25);
+ skb->dev = nb->dev;
+
+ dev_queue_xmit(skb);
+}
+
+void x25_terminate_link(struct x25_neigh *nb)
+{
+ struct sk_buff *skb;
+ unsigned char *ptr;
+
+ if (nb->dev->type != ARPHRD_X25)
+ return;
+
+ skb = alloc_skb(1, GFP_ATOMIC);
+ if (!skb) {
+ pr_err("x25_dev: out of memory\n");
+ return;
+ }
+
+ ptr = skb_put(skb, 1);
+ *ptr = X25_IFACE_DISCONNECT;
+
+ skb->protocol = htons(ETH_P_X25);
+ skb->dev = nb->dev;
+ dev_queue_xmit(skb);
+}
+
+void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
+{
+ unsigned char *dptr;
+
+ skb_reset_network_header(skb);
+
+ switch (nb->dev->type) {
+ case ARPHRD_X25:
+ dptr = skb_push(skb, 1);
+ *dptr = X25_IFACE_DATA;
+ break;
+
+ default:
+ kfree_skb(skb);
+ return;
+ }
+
+ skb->protocol = htons(ETH_P_X25);
+ skb->dev = nb->dev;
+
+ dev_queue_xmit(skb);
+}
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
new file mode 100644
index 000000000..8e1a49b0c
--- /dev/null
+++ b/net/x25/x25_facilities.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * X.25 Packet Layer release 002
+ *
+ * This is ALPHA test software. This code may break your machine,
+ * randomly fail to work with new releases, misbehave and/or generally
+ * screw up. It might even work.
+ *
+ * This code REQUIRES 2.1.15 or higher
+ *
+ * History
+ * X.25 001 Split from x25_subr.c
+ * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
+ * negotiation.
+ * apr/14/05 Shaun Pereira - Allow fast select with no restriction
+ * on response.
+ */
+
+#define pr_fmt(fmt) "X25: " fmt
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/x25.h>
+
+/**
+ * x25_parse_facilities - Parse facilities from skb into the facilities structs
+ *
+ * @skb: sk_buff to parse
+ * @facilities: Regular facilities, updated as facilities are found
+ * @dte_facs: ITU DTE facilities, updated as DTE facilities are found
+ * @vc_fac_mask: mask is updated with all facilities found
+ *
+ * Return codes:
+ * -1 - Parsing error, caller should drop call and clean up
+ * 0 - Parse OK, this skb has no facilities
+ * >0 - Parse OK, returns the length of the facilities header
+ *
+ */
+int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
+ struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
+{
+ unsigned char *p;
+ unsigned int len;
+
+ *vc_fac_mask = 0;
+
+ /*
+ * The kernel knows which facilities were set on an incoming call but
+ * currently this information is not available to userspace. Here we
+ * give userspace who read incoming call facilities 0 length to indicate
+ * it wasn't set.
+ */
+ dte_facs->calling_len = 0;
+ dte_facs->called_len = 0;
+ memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
+ memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
+
+ if (!pskb_may_pull(skb, 1))
+ return 0;
+
+ len = skb->data[0];
+
+ if (!pskb_may_pull(skb, 1 + len))
+ return -1;
+
+ p = skb->data + 1;
+
+ while (len > 0) {
+ switch (*p & X25_FAC_CLASS_MASK) {
+ case X25_FAC_CLASS_A:
+ if (len < 2)
+ return -1;
+ switch (*p) {
+ case X25_FAC_REVERSE:
+ if((p[1] & 0x81) == 0x81) {
+ facilities->reverse = p[1] & 0x81;
+ *vc_fac_mask |= X25_MASK_REVERSE;
+ break;
+ }
+
+ if((p[1] & 0x01) == 0x01) {
+ facilities->reverse = p[1] & 0x01;
+ *vc_fac_mask |= X25_MASK_REVERSE;
+ break;
+ }
+
+ if((p[1] & 0x80) == 0x80) {
+ facilities->reverse = p[1] & 0x80;
+ *vc_fac_mask |= X25_MASK_REVERSE;
+ break;
+ }
+
+ if(p[1] == 0x00) {
+ facilities->reverse
+ = X25_DEFAULT_REVERSE;
+ *vc_fac_mask |= X25_MASK_REVERSE;
+ break;
+ }
+ fallthrough;
+ case X25_FAC_THROUGHPUT:
+ facilities->throughput = p[1];
+ *vc_fac_mask |= X25_MASK_THROUGHPUT;
+ break;
+ case X25_MARKER:
+ break;
+ default:
+ pr_debug("unknown facility "
+ "%02X, value %02X\n",
+ p[0], p[1]);
+ break;
+ }
+ p += 2;
+ len -= 2;
+ break;
+ case X25_FAC_CLASS_B:
+ if (len < 3)
+ return -1;
+ switch (*p) {
+ case X25_FAC_PACKET_SIZE:
+ facilities->pacsize_in = p[1];
+ facilities->pacsize_out = p[2];
+ *vc_fac_mask |= X25_MASK_PACKET_SIZE;
+ break;
+ case X25_FAC_WINDOW_SIZE:
+ facilities->winsize_in = p[1];
+ facilities->winsize_out = p[2];
+ *vc_fac_mask |= X25_MASK_WINDOW_SIZE;
+ break;
+ default:
+ pr_debug("unknown facility "
+ "%02X, values %02X, %02X\n",
+ p[0], p[1], p[2]);
+ break;
+ }
+ p += 3;
+ len -= 3;
+ break;
+ case X25_FAC_CLASS_C:
+ if (len < 4)
+ return -1;
+ pr_debug("unknown facility %02X, "
+ "values %02X, %02X, %02X\n",
+ p[0], p[1], p[2], p[3]);
+ p += 4;
+ len -= 4;
+ break;
+ case X25_FAC_CLASS_D:
+ if (len < p[1] + 2)
+ return -1;
+ switch (*p) {
+ case X25_FAC_CALLING_AE:
+ if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+ return -1;
+ if (p[2] > X25_MAX_AE_LEN)
+ return -1;
+ dte_facs->calling_len = p[2];
+ memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
+ *vc_fac_mask |= X25_MASK_CALLING_AE;
+ break;
+ case X25_FAC_CALLED_AE:
+ if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+ return -1;
+ if (p[2] > X25_MAX_AE_LEN)
+ return -1;
+ dte_facs->called_len = p[2];
+ memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
+ *vc_fac_mask |= X25_MASK_CALLED_AE;
+ break;
+ default:
+ pr_debug("unknown facility %02X,"
+ "length %d\n", p[0], p[1]);
+ break;
+ }
+ len -= p[1] + 2;
+ p += p[1] + 2;
+ break;
+ }
+ }
+
+ return p - skb->data;
+}
+
+/*
+ * Create a set of facilities.
+ */
+int x25_create_facilities(unsigned char *buffer,
+ struct x25_facilities *facilities,
+ struct x25_dte_facilities *dte_facs, unsigned long facil_mask)
+{
+ unsigned char *p = buffer + 1;
+ int len;
+
+ if (!facil_mask) {
+ /*
+ * Length of the facilities field in call_req or
+ * call_accept packets
+ */
+ buffer[0] = 0;
+ len = 1; /* 1 byte for the length field */
+ return len;
+ }
+
+ if (facilities->reverse && (facil_mask & X25_MASK_REVERSE)) {
+ *p++ = X25_FAC_REVERSE;
+ *p++ = facilities->reverse;
+ }
+
+ if (facilities->throughput && (facil_mask & X25_MASK_THROUGHPUT)) {
+ *p++ = X25_FAC_THROUGHPUT;
+ *p++ = facilities->throughput;
+ }
+
+ if ((facilities->pacsize_in || facilities->pacsize_out) &&
+ (facil_mask & X25_MASK_PACKET_SIZE)) {
+ *p++ = X25_FAC_PACKET_SIZE;
+ *p++ = facilities->pacsize_in ? : facilities->pacsize_out;
+ *p++ = facilities->pacsize_out ? : facilities->pacsize_in;
+ }
+
+ if ((facilities->winsize_in || facilities->winsize_out) &&
+ (facil_mask & X25_MASK_WINDOW_SIZE)) {
+ *p++ = X25_FAC_WINDOW_SIZE;
+ *p++ = facilities->winsize_in ? : facilities->winsize_out;
+ *p++ = facilities->winsize_out ? : facilities->winsize_in;
+ }
+
+ if (facil_mask & (X25_MASK_CALLING_AE|X25_MASK_CALLED_AE)) {
+ *p++ = X25_MARKER;
+ *p++ = X25_DTE_SERVICES;
+ }
+
+ if (dte_facs->calling_len && (facil_mask & X25_MASK_CALLING_AE)) {
+ unsigned int bytecount = (dte_facs->calling_len + 1) >> 1;
+ *p++ = X25_FAC_CALLING_AE;
+ *p++ = 1 + bytecount;
+ *p++ = dte_facs->calling_len;
+ memcpy(p, dte_facs->calling_ae, bytecount);
+ p += bytecount;
+ }
+
+ if (dte_facs->called_len && (facil_mask & X25_MASK_CALLED_AE)) {
+ unsigned int bytecount = (dte_facs->called_len % 2) ?
+ dte_facs->called_len / 2 + 1 :
+ dte_facs->called_len / 2;
+ *p++ = X25_FAC_CALLED_AE;
+ *p++ = 1 + bytecount;
+ *p++ = dte_facs->called_len;
+ memcpy(p, dte_facs->called_ae, bytecount);
+ p+=bytecount;
+ }
+
+ len = p - buffer;
+ buffer[0] = len - 1;
+
+ return len;
+}
+
+/*
+ * Try to reach a compromise on a set of facilities.
+ *
+ * The only real problem is with reverse charging.
+ */
+int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
+ struct x25_facilities *new, struct x25_dte_facilities *dte)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+ struct x25_facilities *ours = &x25->facilities;
+ struct x25_facilities theirs;
+ int len;
+
+ memset(&theirs, 0, sizeof(theirs));
+ memcpy(new, ours, sizeof(*new));
+ memset(dte, 0, sizeof(*dte));
+
+ len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
+ if (len < 0)
+ return len;
+
+ /*
+ * They want reverse charging, we won't accept it.
+ */
+ if ((theirs.reverse & 0x01 ) && (ours->reverse & 0x01)) {
+ SOCK_DEBUG(sk, "X.25: rejecting reverse charging request\n");
+ return -1;
+ }
+
+ new->reverse = theirs.reverse;
+
+ if (theirs.throughput) {
+ int theirs_in = theirs.throughput & 0x0f;
+ int theirs_out = theirs.throughput & 0xf0;
+ int ours_in = ours->throughput & 0x0f;
+ int ours_out = ours->throughput & 0xf0;
+ if (!ours_in || theirs_in < ours_in) {
+ SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n");
+ new->throughput = (new->throughput & 0xf0) | theirs_in;
+ }
+ if (!ours_out || theirs_out < ours_out) {
+ SOCK_DEBUG(sk,
+ "X.25: outbound throughput negotiated\n");
+ new->throughput = (new->throughput & 0x0f) | theirs_out;
+ }
+ }
+
+ if (theirs.pacsize_in && theirs.pacsize_out) {
+ if (theirs.pacsize_in < ours->pacsize_in) {
+ SOCK_DEBUG(sk, "X.25: packet size inwards negotiated down\n");
+ new->pacsize_in = theirs.pacsize_in;
+ }
+ if (theirs.pacsize_out < ours->pacsize_out) {
+ SOCK_DEBUG(sk, "X.25: packet size outwards negotiated down\n");
+ new->pacsize_out = theirs.pacsize_out;
+ }
+ }
+
+ if (theirs.winsize_in && theirs.winsize_out) {
+ if (theirs.winsize_in < ours->winsize_in) {
+ SOCK_DEBUG(sk, "X.25: window size inwards negotiated down\n");
+ new->winsize_in = theirs.winsize_in;
+ }
+ if (theirs.winsize_out < ours->winsize_out) {
+ SOCK_DEBUG(sk, "X.25: window size outwards negotiated down\n");
+ new->winsize_out = theirs.winsize_out;
+ }
+ }
+
+ return len;
+}
+
+/*
+ * Limit values of certain facilities according to the capability of the
+ * currently attached x25 link.
+ */
+void x25_limit_facilities(struct x25_facilities *facilities,
+ struct x25_neigh *nb)
+{
+
+ if (!nb->extended) {
+ if (facilities->winsize_in > 7) {
+ pr_debug("incoming winsize limited to 7\n");
+ facilities->winsize_in = 7;
+ }
+ if (facilities->winsize_out > 7) {
+ facilities->winsize_out = 7;
+ pr_debug("outgoing winsize limited to 7\n");
+ }
+ }
+}
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
new file mode 100644
index 000000000..21b30b56e
--- /dev/null
+++ b/net/x25/x25_forward.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * History
+ * 03-01-2007 Added forwarding for x.25 Andrew Hendry
+ */
+
+#define pr_fmt(fmt) "X25: " fmt
+
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <net/x25.h>
+
+LIST_HEAD(x25_forward_list);
+DEFINE_RWLOCK(x25_forward_list_lock);
+
+int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
+ struct sk_buff *skb, int lci)
+{
+ struct x25_route *rt;
+ struct x25_neigh *neigh_new = NULL;
+ struct x25_forward *x25_frwd, *new_frwd;
+ struct sk_buff *skbn;
+ short same_lci = 0;
+ int rc = 0;
+
+ if ((rt = x25_get_route(dest_addr)) == NULL)
+ goto out_no_route;
+
+ if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
+ /* This shouldn't happen, if it occurs somehow
+ * do something sensible
+ */
+ goto out_put_route;
+ }
+
+ /* Avoid a loop. This is the normal exit path for a
+ * system with only one x.25 iface and default route
+ */
+ if (rt->dev == from->dev) {
+ goto out_put_nb;
+ }
+
+ /* Remote end sending a call request on an already
+ * established LCI? It shouldn't happen, just in case..
+ */
+ read_lock_bh(&x25_forward_list_lock);
+ list_for_each_entry(x25_frwd, &x25_forward_list, node) {
+ if (x25_frwd->lci == lci) {
+ pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n");
+ same_lci = 1;
+ }
+ }
+ read_unlock_bh(&x25_forward_list_lock);
+
+ /* Save the forwarding details for future traffic */
+ if (!same_lci){
+ if ((new_frwd = kmalloc(sizeof(struct x25_forward),
+ GFP_ATOMIC)) == NULL){
+ rc = -ENOMEM;
+ goto out_put_nb;
+ }
+ new_frwd->lci = lci;
+ new_frwd->dev1 = rt->dev;
+ new_frwd->dev2 = from->dev;
+ write_lock_bh(&x25_forward_list_lock);
+ list_add(&new_frwd->node, &x25_forward_list);
+ write_unlock_bh(&x25_forward_list_lock);
+ }
+
+ /* Forward the call request */
+ if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
+ goto out_put_nb;
+ }
+ x25_transmit_link(skbn, neigh_new);
+ rc = 1;
+
+
+out_put_nb:
+ x25_neigh_put(neigh_new);
+
+out_put_route:
+ x25_route_put(rt);
+
+out_no_route:
+ return rc;
+}
+
+
+int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
+
+ struct x25_forward *frwd;
+ struct net_device *peer = NULL;
+ struct x25_neigh *nb;
+ struct sk_buff *skbn;
+ int rc = 0;
+
+ read_lock_bh(&x25_forward_list_lock);
+ list_for_each_entry(frwd, &x25_forward_list, node) {
+ if (frwd->lci == lci) {
+ /* The call is established, either side can send */
+ if (from->dev == frwd->dev1) {
+ peer = frwd->dev2;
+ } else {
+ peer = frwd->dev1;
+ }
+ break;
+ }
+ }
+ read_unlock_bh(&x25_forward_list_lock);
+
+ if ( (nb = x25_get_neigh(peer)) == NULL)
+ goto out;
+
+ if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
+ goto output;
+
+ }
+ x25_transmit_link(skbn, nb);
+
+ rc = 1;
+output:
+ x25_neigh_put(nb);
+out:
+ return rc;
+}
+
+void x25_clear_forward_by_lci(unsigned int lci)
+{
+ struct x25_forward *fwd, *tmp;
+
+ write_lock_bh(&x25_forward_list_lock);
+
+ list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
+ if (fwd->lci == lci) {
+ list_del(&fwd->node);
+ kfree(fwd);
+ }
+ }
+ write_unlock_bh(&x25_forward_list_lock);
+}
+
+
+void x25_clear_forward_by_dev(struct net_device *dev)
+{
+ struct x25_forward *fwd, *tmp;
+
+ write_lock_bh(&x25_forward_list_lock);
+
+ list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
+ if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
+ list_del(&fwd->node);
+ kfree(fwd);
+ }
+ }
+ write_unlock_bh(&x25_forward_list_lock);
+}
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
new file mode 100644
index 000000000..b981a4828
--- /dev/null
+++ b/net/x25/x25_in.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * X.25 Packet Layer release 002
+ *
+ * This is ALPHA test software. This code may break your machine,
+ * randomly fail to work with new releases, misbehave and/or generally
+ * screw up. It might even work.
+ *
+ * This code REQUIRES 2.1.15 or higher
+ *
+ * History
+ * X.25 001 Jonathan Naylor Started coding.
+ * X.25 002 Jonathan Naylor Centralised disconnection code.
+ * New timer architecture.
+ * 2000-03-20 Daniela Squassoni Disabling/enabling of facilities
+ * negotiation.
+ * 2000-11-10 Henner Eisen Check and reset for out-of-sequence
+ * i-frames.
+ */
+
+#define pr_fmt(fmt) "X25: " fmt
+
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+#include <net/x25.h>
+
+static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
+{
+ struct sk_buff *skbo, *skbn = skb;
+ struct x25_sock *x25 = x25_sk(sk);
+
+ if (more) {
+ x25->fraglen += skb->len;
+ skb_queue_tail(&x25->fragment_queue, skb);
+ skb_set_owner_r(skb, sk);
+ return 0;
+ }
+
+ if (x25->fraglen > 0) { /* End of fragment */
+ int len = x25->fraglen + skb->len;
+
+ if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL){
+ kfree_skb(skb);
+ return 1;
+ }
+
+ skb_queue_tail(&x25->fragment_queue, skb);
+
+ skb_reset_transport_header(skbn);
+
+ skbo = skb_dequeue(&x25->fragment_queue);
+ skb_copy_from_linear_data(skbo, skb_put(skbn, skbo->len),
+ skbo->len);
+ kfree_skb(skbo);
+
+ while ((skbo =
+ skb_dequeue(&x25->fragment_queue)) != NULL) {
+ skb_pull(skbo, (x25->neighbour->extended) ?
+ X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
+ skb_copy_from_linear_data(skbo,
+ skb_put(skbn, skbo->len),
+ skbo->len);
+ kfree_skb(skbo);
+ }
+
+ x25->fraglen = 0;
+ }
+
+ skb_set_owner_r(skbn, sk);
+ skb_queue_tail(&sk->sk_receive_queue, skbn);
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk);
+
+ return 0;
+}
+
+/*
+ * State machine for state 1, Awaiting Call Accepted State.
+ * The handling of the timer(s) is in file x25_timer.c.
+ * Handling of state 0 and connection release is in af_x25.c.
+ */
+static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
+{
+ struct x25_address source_addr, dest_addr;
+ int len;
+ struct x25_sock *x25 = x25_sk(sk);
+
+ switch (frametype) {
+ case X25_CALL_ACCEPTED: {
+
+ x25_stop_timer(sk);
+ x25->condition = 0x00;
+ x25->vs = 0;
+ x25->va = 0;
+ x25->vr = 0;
+ x25->vl = 0;
+ x25->state = X25_STATE_3;
+ sk->sk_state = TCP_ESTABLISHED;
+ /*
+ * Parse the data in the frame.
+ */
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
+ goto out_clear;
+ skb_pull(skb, X25_STD_MIN_LEN);
+
+ len = x25_parse_address_block(skb, &source_addr,
+ &dest_addr);
+ if (len > 0)
+ skb_pull(skb, len);
+ else if (len < 0)
+ goto out_clear;
+
+ len = x25_parse_facilities(skb, &x25->facilities,
+ &x25->dte_facilities,
+ &x25->vc_facil_mask);
+ if (len > 0)
+ skb_pull(skb, len);
+ else if (len < 0)
+ goto out_clear;
+ /*
+ * Copy any Call User Data.
+ */
+ if (skb->len > 0) {
+ if (skb->len > X25_MAX_CUD_LEN)
+ goto out_clear;
+
+ skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
+ skb->len);
+ x25->calluserdata.cudlength = skb->len;
+ }
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_state_change(sk);
+ break;
+ }
+ case X25_CALL_REQUEST:
+ /* call collision */
+ x25->causediag.cause = 0x01;
+ x25->causediag.diagnostic = 0x48;
+
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25_disconnect(sk, EISCONN, 0x01, 0x48);
+ break;
+
+ case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
+ goto out_clear;
+
+ x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+ x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+
+out_clear:
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25->state = X25_STATE_2;
+ x25_start_t23timer(sk);
+ return 0;
+}
+
+/*
+ * State machine for state 2, Awaiting Clear Confirmation State.
+ * The handling of the timer(s) is in file x25_timer.c
+ * Handling of state 0 and connection release is in af_x25.c.
+ */
+static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype)
+{
+ switch (frametype) {
+
+ case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
+ goto out_clear;
+
+ x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+ x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
+ break;
+
+ case X25_CLEAR_CONFIRMATION:
+ x25_disconnect(sk, 0, 0, 0);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+
+out_clear:
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25_start_t23timer(sk);
+ return 0;
+}
+
+/*
+ * State machine for state 3, Connected State.
+ * The handling of the timer(s) is in file x25_timer.c
+ * Handling of state 0 and connection release is in af_x25.c.
+ */
+static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
+{
+ int queued = 0;
+ int modulus;
+ struct x25_sock *x25 = x25_sk(sk);
+
+ modulus = (x25->neighbour->extended) ? X25_EMODULUS : X25_SMODULUS;
+
+ switch (frametype) {
+
+ case X25_RESET_REQUEST:
+ x25_write_internal(sk, X25_RESET_CONFIRMATION);
+ x25_stop_timer(sk);
+ x25->condition = 0x00;
+ x25->vs = 0;
+ x25->vr = 0;
+ x25->va = 0;
+ x25->vl = 0;
+ x25_requeue_frames(sk);
+ break;
+
+ case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
+ goto out_clear;
+
+ x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+ x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
+ break;
+
+ case X25_RR:
+ case X25_RNR:
+ if (!x25_validate_nr(sk, nr)) {
+ x25_clear_queues(sk);
+ x25_write_internal(sk, X25_RESET_REQUEST);
+ x25_start_t22timer(sk);
+ x25->condition = 0x00;
+ x25->vs = 0;
+ x25->vr = 0;
+ x25->va = 0;
+ x25->vl = 0;
+ x25->state = X25_STATE_4;
+ } else {
+ x25_frames_acked(sk, nr);
+ if (frametype == X25_RNR) {
+ x25->condition |= X25_COND_PEER_RX_BUSY;
+ } else {
+ x25->condition &= ~X25_COND_PEER_RX_BUSY;
+ }
+ }
+ break;
+
+ case X25_DATA: /* XXX */
+ x25->condition &= ~X25_COND_PEER_RX_BUSY;
+ if ((ns != x25->vr) || !x25_validate_nr(sk, nr)) {
+ x25_clear_queues(sk);
+ x25_write_internal(sk, X25_RESET_REQUEST);
+ x25_start_t22timer(sk);
+ x25->condition = 0x00;
+ x25->vs = 0;
+ x25->vr = 0;
+ x25->va = 0;
+ x25->vl = 0;
+ x25->state = X25_STATE_4;
+ break;
+ }
+ x25_frames_acked(sk, nr);
+ if (ns == x25->vr) {
+ if (x25_queue_rx_frame(sk, skb, m) == 0) {
+ x25->vr = (x25->vr + 1) % modulus;
+ queued = 1;
+ } else {
+ /* Should never happen */
+ x25_clear_queues(sk);
+ x25_write_internal(sk, X25_RESET_REQUEST);
+ x25_start_t22timer(sk);
+ x25->condition = 0x00;
+ x25->vs = 0;
+ x25->vr = 0;
+ x25->va = 0;
+ x25->vl = 0;
+ x25->state = X25_STATE_4;
+ break;
+ }
+ if (atomic_read(&sk->sk_rmem_alloc) >
+ (sk->sk_rcvbuf >> 1))
+ x25->condition |= X25_COND_OWN_RX_BUSY;
+ }
+ /*
+ * If the window is full Ack it immediately, else
+ * start the holdback timer.
+ */
+ if (((x25->vl + x25->facilities.winsize_in) % modulus) == x25->vr) {
+ x25->condition &= ~X25_COND_ACK_PENDING;
+ x25_stop_timer(sk);
+ x25_enquiry_response(sk);
+ } else {
+ x25->condition |= X25_COND_ACK_PENDING;
+ x25_start_t2timer(sk);
+ }
+ break;
+
+ case X25_INTERRUPT_CONFIRMATION:
+ clear_bit(X25_INTERRUPT_FLAG, &x25->flags);
+ break;
+
+ case X25_INTERRUPT:
+ if (sock_flag(sk, SOCK_URGINLINE))
+ queued = !sock_queue_rcv_skb(sk, skb);
+ else {
+ skb_set_owner_r(skb, sk);
+ skb_queue_tail(&x25->interrupt_in_queue, skb);
+ queued = 1;
+ }
+ sk_send_sigurg(sk);
+ x25_write_internal(sk, X25_INTERRUPT_CONFIRMATION);
+ break;
+
+ default:
+ pr_warn("unknown %02X in state 3\n", frametype);
+ break;
+ }
+
+ return queued;
+
+out_clear:
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25->state = X25_STATE_2;
+ x25_start_t23timer(sk);
+ return 0;
+}
+
+/*
+ * State machine for state 4, Awaiting Reset Confirmation State.
+ * The handling of the timer(s) is in file x25_timer.c
+ * Handling of state 0 and connection release is in af_x25.c.
+ */
+static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ switch (frametype) {
+
+ case X25_RESET_REQUEST:
+ x25_write_internal(sk, X25_RESET_CONFIRMATION);
+ fallthrough;
+ case X25_RESET_CONFIRMATION: {
+ x25_stop_timer(sk);
+ x25->condition = 0x00;
+ x25->va = 0;
+ x25->vr = 0;
+ x25->vs = 0;
+ x25->vl = 0;
+ x25->state = X25_STATE_3;
+ x25_requeue_frames(sk);
+ break;
+ }
+ case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
+ goto out_clear;
+
+ x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+ x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+
+out_clear:
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25->state = X25_STATE_2;
+ x25_start_t23timer(sk);
+ return 0;
+}
+
+/*
+ * State machine for state 5, Call Accepted / Call Connected pending (X25_ACCPT_APPRV_FLAG).
+ * The handling of the timer(s) is in file x25_timer.c
+ * Handling of state 0 and connection release is in af_x25.c.
+ */
+static int x25_state5_machine(struct sock *sk, struct sk_buff *skb, int frametype)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ switch (frametype) {
+ case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) {
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25->state = X25_STATE_2;
+ x25_start_t23timer(sk);
+ return 0;
+ }
+
+ x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+ x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* Higher level upcall for a LAPB frame */
+int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+ int queued = 0, frametype, ns, nr, q, d, m;
+
+ if (x25->state == X25_STATE_0)
+ return 0;
+
+ frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m);
+
+ switch (x25->state) {
+ case X25_STATE_1:
+ queued = x25_state1_machine(sk, skb, frametype);
+ break;
+ case X25_STATE_2:
+ queued = x25_state2_machine(sk, skb, frametype);
+ break;
+ case X25_STATE_3:
+ queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
+ break;
+ case X25_STATE_4:
+ queued = x25_state4_machine(sk, skb, frametype);
+ break;
+ case X25_STATE_5:
+ queued = x25_state5_machine(sk, skb, frametype);
+ break;
+ }
+
+ x25_kick(sk);
+
+ return queued;
+}
+
+int x25_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ int queued = x25_process_rx_frame(sk, skb);
+
+ if (!queued)
+ kfree_skb(skb);
+
+ return 0;
+}
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
new file mode 100644
index 000000000..5460b9146
--- /dev/null
+++ b/net/x25/x25_link.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * X.25 Packet Layer release 002
+ *
+ * This is ALPHA test software. This code may break your machine,
+ * randomly fail to work with new releases, misbehave and/or generally
+ * screw up. It might even work.
+ *
+ * This code REQUIRES 2.1.15 or higher
+ *
+ * History
+ * X.25 001 Jonathan Naylor Started coding.
+ * X.25 002 Jonathan Naylor New timer architecture.
+ * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
+ * negotiation.
+ * 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh.
+ */
+
+#define pr_fmt(fmt) "X25: " fmt
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <net/x25.h>
+
+LIST_HEAD(x25_neigh_list);
+DEFINE_RWLOCK(x25_neigh_list_lock);
+
+static void x25_t20timer_expiry(struct timer_list *);
+
+static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
+static void x25_transmit_restart_request(struct x25_neigh *nb);
+
+/*
+ * Linux set/reset timer routines
+ */
+static inline void x25_start_t20timer(struct x25_neigh *nb)
+{
+ mod_timer(&nb->t20timer, jiffies + nb->t20);
+}
+
+static void x25_t20timer_expiry(struct timer_list *t)
+{
+ struct x25_neigh *nb = from_timer(nb, t, t20timer);
+
+ x25_transmit_restart_request(nb);
+
+ x25_start_t20timer(nb);
+}
+
+static inline void x25_stop_t20timer(struct x25_neigh *nb)
+{
+ del_timer(&nb->t20timer);
+}
+
+/*
+ * This handles all restart and diagnostic frames.
+ */
+void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
+ unsigned short frametype)
+{
+ struct sk_buff *skbn;
+
+ switch (frametype) {
+ case X25_RESTART_REQUEST:
+ switch (nb->state) {
+ case X25_LINK_STATE_0:
+ /* This can happen when the x25 module just gets loaded
+ * and doesn't know layer 2 has already connected
+ */
+ nb->state = X25_LINK_STATE_3;
+ x25_transmit_restart_confirmation(nb);
+ break;
+ case X25_LINK_STATE_2:
+ x25_stop_t20timer(nb);
+ nb->state = X25_LINK_STATE_3;
+ break;
+ case X25_LINK_STATE_3:
+ /* clear existing virtual calls */
+ x25_kill_by_neigh(nb);
+
+ x25_transmit_restart_confirmation(nb);
+ break;
+ }
+ break;
+
+ case X25_RESTART_CONFIRMATION:
+ switch (nb->state) {
+ case X25_LINK_STATE_2:
+ x25_stop_t20timer(nb);
+ nb->state = X25_LINK_STATE_3;
+ break;
+ case X25_LINK_STATE_3:
+ /* clear existing virtual calls */
+ x25_kill_by_neigh(nb);
+
+ x25_transmit_restart_request(nb);
+ nb->state = X25_LINK_STATE_2;
+ x25_start_t20timer(nb);
+ break;
+ }
+ break;
+
+ case X25_DIAGNOSTIC:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
+ break;
+
+ pr_warn("diagnostic #%d - %02X %02X %02X\n",
+ skb->data[3], skb->data[4],
+ skb->data[5], skb->data[6]);
+ break;
+
+ default:
+ pr_warn("received unknown %02X with LCI 000\n",
+ frametype);
+ break;
+ }
+
+ if (nb->state == X25_LINK_STATE_3)
+ while ((skbn = skb_dequeue(&nb->queue)) != NULL)
+ x25_send_frame(skbn, nb);
+}
+
+/*
+ * This routine is called when a Restart Request is needed
+ */
+static void x25_transmit_restart_request(struct x25_neigh *nb)
+{
+ unsigned char *dptr;
+ int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
+ struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
+
+ if (!skb)
+ return;
+
+ skb_reserve(skb, X25_MAX_L2_LEN);
+
+ dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
+
+ *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
+ *dptr++ = 0x00;
+ *dptr++ = X25_RESTART_REQUEST;
+ *dptr++ = 0x00;
+ *dptr++ = 0;
+
+ skb->sk = NULL;
+
+ x25_send_frame(skb, nb);
+}
+
+/*
+ * This routine is called when a Restart Confirmation is needed
+ */
+static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
+{
+ unsigned char *dptr;
+ int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
+ struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
+
+ if (!skb)
+ return;
+
+ skb_reserve(skb, X25_MAX_L2_LEN);
+
+ dptr = skb_put(skb, X25_STD_MIN_LEN);
+
+ *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
+ *dptr++ = 0x00;
+ *dptr++ = X25_RESTART_CONFIRMATION;
+
+ skb->sk = NULL;
+
+ x25_send_frame(skb, nb);
+}
+
+/*
+ * This routine is called when a Clear Request is needed outside of the context
+ * of a connected socket.
+ */
+void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
+ unsigned char cause)
+{
+ unsigned char *dptr;
+ int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
+ struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
+
+ if (!skb)
+ return;
+
+ skb_reserve(skb, X25_MAX_L2_LEN);
+
+ dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
+
+ *dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
+ X25_GFI_EXTSEQ :
+ X25_GFI_STDSEQ);
+ *dptr++ = (lci >> 0) & 0xFF;
+ *dptr++ = X25_CLEAR_REQUEST;
+ *dptr++ = cause;
+ *dptr++ = 0x00;
+
+ skb->sk = NULL;
+
+ x25_send_frame(skb, nb);
+}
+
+void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
+{
+ switch (nb->state) {
+ case X25_LINK_STATE_0:
+ skb_queue_tail(&nb->queue, skb);
+ nb->state = X25_LINK_STATE_1;
+ x25_establish_link(nb);
+ break;
+ case X25_LINK_STATE_1:
+ case X25_LINK_STATE_2:
+ skb_queue_tail(&nb->queue, skb);
+ break;
+ case X25_LINK_STATE_3:
+ x25_send_frame(skb, nb);
+ break;
+ }
+}
+
+/*
+ * Called when the link layer has become established.
+ */
+void x25_link_established(struct x25_neigh *nb)
+{
+ switch (nb->state) {
+ case X25_LINK_STATE_0:
+ case X25_LINK_STATE_1:
+ x25_transmit_restart_request(nb);
+ nb->state = X25_LINK_STATE_2;
+ x25_start_t20timer(nb);
+ break;
+ }
+}
+
+/*
+ * Called when the link layer has terminated, or an establishment
+ * request has failed.
+ */
+
+void x25_link_terminated(struct x25_neigh *nb)
+{
+ nb->state = X25_LINK_STATE_0;
+ skb_queue_purge(&nb->queue);
+ x25_stop_t20timer(nb);
+
+ /* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
+ x25_kill_by_neigh(nb);
+}
+
+/*
+ * Add a new device.
+ */
+void x25_link_device_up(struct net_device *dev)
+{
+ struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
+
+ if (!nb)
+ return;
+
+ skb_queue_head_init(&nb->queue);
+ timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
+
+ dev_hold(dev);
+ nb->dev = dev;
+ nb->state = X25_LINK_STATE_0;
+ nb->extended = 0;
+ /*
+ * Enables negotiation
+ */
+ nb->global_facil_mask = X25_MASK_REVERSE |
+ X25_MASK_THROUGHPUT |
+ X25_MASK_PACKET_SIZE |
+ X25_MASK_WINDOW_SIZE;
+ nb->t20 = sysctl_x25_restart_request_timeout;
+ refcount_set(&nb->refcnt, 1);
+
+ write_lock_bh(&x25_neigh_list_lock);
+ list_add(&nb->node, &x25_neigh_list);
+ write_unlock_bh(&x25_neigh_list_lock);
+}
+
+/**
+ * __x25_remove_neigh - remove neighbour from x25_neigh_list
+ * @nb: - neigh to remove
+ *
+ * Remove neighbour from x25_neigh_list. If it was there.
+ * Caller must hold x25_neigh_list_lock.
+ */
+static void __x25_remove_neigh(struct x25_neigh *nb)
+{
+ if (nb->node.next) {
+ list_del(&nb->node);
+ x25_neigh_put(nb);
+ }
+}
+
+/*
+ * A device has been removed, remove its links.
+ */
+void x25_link_device_down(struct net_device *dev)
+{
+ struct x25_neigh *nb;
+ struct list_head *entry, *tmp;
+
+ write_lock_bh(&x25_neigh_list_lock);
+
+ list_for_each_safe(entry, tmp, &x25_neigh_list) {
+ nb = list_entry(entry, struct x25_neigh, node);
+
+ if (nb->dev == dev) {
+ __x25_remove_neigh(nb);
+ dev_put(dev);
+ }
+ }
+
+ write_unlock_bh(&x25_neigh_list_lock);
+}
+
+/*
+ * Given a device, return the neighbour address.
+ */
+struct x25_neigh *x25_get_neigh(struct net_device *dev)
+{
+ struct x25_neigh *nb, *use = NULL;
+
+ read_lock_bh(&x25_neigh_list_lock);
+ list_for_each_entry(nb, &x25_neigh_list, node) {
+ if (nb->dev == dev) {
+ use = nb;
+ break;
+ }
+ }
+
+ if (use)
+ x25_neigh_hold(use);
+ read_unlock_bh(&x25_neigh_list_lock);
+ return use;
+}
+
+/*
+ * Handle the ioctls that control the subscription functions.
+ */
+int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
+{
+ struct x25_subscrip_struct x25_subscr;
+ struct x25_neigh *nb;
+ struct net_device *dev;
+ int rc = -EINVAL;
+
+ if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
+ goto out;
+
+ rc = -EFAULT;
+ if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
+ goto out;
+
+ rc = -EINVAL;
+ if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
+ goto out;
+
+ if ((nb = x25_get_neigh(dev)) == NULL)
+ goto out_dev_put;
+
+ dev_put(dev);
+
+ if (cmd == SIOCX25GSUBSCRIP) {
+ read_lock_bh(&x25_neigh_list_lock);
+ x25_subscr.extended = nb->extended;
+ x25_subscr.global_facil_mask = nb->global_facil_mask;
+ read_unlock_bh(&x25_neigh_list_lock);
+ rc = copy_to_user(arg, &x25_subscr,
+ sizeof(x25_subscr)) ? -EFAULT : 0;
+ } else {
+ rc = -EINVAL;
+ if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
+ rc = 0;
+ write_lock_bh(&x25_neigh_list_lock);
+ nb->extended = x25_subscr.extended;
+ nb->global_facil_mask = x25_subscr.global_facil_mask;
+ write_unlock_bh(&x25_neigh_list_lock);
+ }
+ }
+ x25_neigh_put(nb);
+out:
+ return rc;
+out_dev_put:
+ dev_put(dev);
+ goto out;
+}
+
+
+/*
+ * Release all memory associated with X.25 neighbour structures.
+ */
+void __exit x25_link_free(void)
+{
+ struct x25_neigh *nb;
+ struct list_head *entry, *tmp;
+
+ write_lock_bh(&x25_neigh_list_lock);
+
+ list_for_each_safe(entry, tmp, &x25_neigh_list) {
+ struct net_device *dev;
+
+ nb = list_entry(entry, struct x25_neigh, node);
+ dev = nb->dev;
+ __x25_remove_neigh(nb);
+ dev_put(dev);
+ }
+ write_unlock_bh(&x25_neigh_list_lock);
+}
diff --git a/net/x25/x25_out.c b/net/x25/x25_out.c
new file mode 100644
index 000000000..dbc0940bf
--- /dev/null
+++ b/net/x25/x25_out.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * X.25 Packet Layer release 002
+ *
+ * This is ALPHA test software. This code may break your machine,
+ * randomly fail to work with new releases, misbehave and/or generally
+ * screw up. It might even work.
+ *
+ * This code REQUIRES 2.1.15 or higher
+ *
+ * History
+ * X.25 001 Jonathan Naylor Started coding.
+ * X.25 002 Jonathan Naylor New timer architecture.
+ * 2000-09-04 Henner Eisen Prevented x25_output() skb leakage.
+ * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation.
+ * 2000-11-10 Henner Eisen x25_send_iframe(): re-queued frames
+ * needed cleaned seq-number fields.
+ */
+
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/x25.h>
+
+static int x25_pacsize_to_bytes(unsigned int pacsize)
+{
+ int bytes = 1;
+
+ if (!pacsize)
+ return 128;
+
+ while (pacsize-- > 0)
+ bytes *= 2;
+
+ return bytes;
+}
+
+/*
+ * This is where all X.25 information frames pass.
+ *
+ * Returns the amount of user data bytes sent on success
+ * or a negative error code on failure.
+ */
+int x25_output(struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff *skbn;
+ unsigned char header[X25_EXT_MIN_LEN];
+ int err, frontlen, len;
+ int sent=0, noblock = X25_SKB_CB(skb)->flags & MSG_DONTWAIT;
+ struct x25_sock *x25 = x25_sk(sk);
+ int header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN :
+ X25_STD_MIN_LEN;
+ int max_len = x25_pacsize_to_bytes(x25->facilities.pacsize_out);
+
+ if (skb->len - header_len > max_len) {
+ /* Save a copy of the Header */
+ skb_copy_from_linear_data(skb, header, header_len);
+ skb_pull(skb, header_len);
+
+ frontlen = skb_headroom(skb);
+
+ while (skb->len > 0) {
+ release_sock(sk);
+ skbn = sock_alloc_send_skb(sk, frontlen + max_len,
+ noblock, &err);
+ lock_sock(sk);
+ if (!skbn) {
+ if (err == -EWOULDBLOCK && noblock){
+ kfree_skb(skb);
+ return sent;
+ }
+ SOCK_DEBUG(sk, "x25_output: fragment alloc"
+ " failed, err=%d, %d bytes "
+ "sent\n", err, sent);
+ return err;
+ }
+
+ skb_reserve(skbn, frontlen);
+
+ len = max_len > skb->len ? skb->len : max_len;
+
+ /* Copy the user data */
+ skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
+ skb_pull(skb, len);
+
+ /* Duplicate the Header */
+ skb_push(skbn, header_len);
+ skb_copy_to_linear_data(skbn, header, header_len);
+
+ if (skb->len > 0) {
+ if (x25->neighbour->extended)
+ skbn->data[3] |= X25_EXT_M_BIT;
+ else
+ skbn->data[2] |= X25_STD_M_BIT;
+ }
+
+ skb_queue_tail(&sk->sk_write_queue, skbn);
+ sent += len;
+ }
+
+ kfree_skb(skb);
+ } else {
+ skb_queue_tail(&sk->sk_write_queue, skb);
+ sent = skb->len - header_len;
+ }
+ return sent;
+}
+
+/*
+ * This procedure is passed a buffer descriptor for an iframe. It builds
+ * the rest of the control part of the frame and then writes it out.
+ */
+static void x25_send_iframe(struct sock *sk, struct sk_buff *skb)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ if (!skb)
+ return;
+
+ if (x25->neighbour->extended) {
+ skb->data[2] = (x25->vs << 1) & 0xFE;
+ skb->data[3] &= X25_EXT_M_BIT;
+ skb->data[3] |= (x25->vr << 1) & 0xFE;
+ } else {
+ skb->data[2] &= X25_STD_M_BIT;
+ skb->data[2] |= (x25->vs << 1) & 0x0E;
+ skb->data[2] |= (x25->vr << 5) & 0xE0;
+ }
+
+ x25_transmit_link(skb, x25->neighbour);
+}
+
+void x25_kick(struct sock *sk)
+{
+ struct sk_buff *skb, *skbn;
+ unsigned short start, end;
+ int modulus;
+ struct x25_sock *x25 = x25_sk(sk);
+
+ if (x25->state != X25_STATE_3)
+ return;
+
+ /*
+ * Transmit interrupt data.
+ */
+ if (skb_peek(&x25->interrupt_out_queue) != NULL &&
+ !test_and_set_bit(X25_INTERRUPT_FLAG, &x25->flags)) {
+
+ skb = skb_dequeue(&x25->interrupt_out_queue);
+ x25_transmit_link(skb, x25->neighbour);
+ }
+
+ if (x25->condition & X25_COND_PEER_RX_BUSY)
+ return;
+
+ if (!skb_peek(&sk->sk_write_queue))
+ return;
+
+ modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS;
+
+ start = skb_peek(&x25->ack_queue) ? x25->vs : x25->va;
+ end = (x25->va + x25->facilities.winsize_out) % modulus;
+
+ if (start == end)
+ return;
+
+ x25->vs = start;
+
+ /*
+ * Transmit data until either we're out of data to send or
+ * the window is full.
+ */
+
+ skb = skb_dequeue(&sk->sk_write_queue);
+
+ do {
+ if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
+ skb_queue_head(&sk->sk_write_queue, skb);
+ break;
+ }
+
+ skb_set_owner_w(skbn, sk);
+
+ /*
+ * Transmit the frame copy.
+ */
+ x25_send_iframe(sk, skbn);
+
+ x25->vs = (x25->vs + 1) % modulus;
+
+ /*
+ * Requeue the original data frame.
+ */
+ skb_queue_tail(&x25->ack_queue, skb);
+
+ } while (x25->vs != end &&
+ (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
+
+ x25->vl = x25->vr;
+ x25->condition &= ~X25_COND_ACK_PENDING;
+
+ x25_stop_timer(sk);
+}
+
+/*
+ * The following routines are taken from page 170 of the 7th ARRL Computer
+ * Networking Conference paper, as is the whole state machine.
+ */
+
+void x25_enquiry_response(struct sock *sk)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ if (x25->condition & X25_COND_OWN_RX_BUSY)
+ x25_write_internal(sk, X25_RNR);
+ else
+ x25_write_internal(sk, X25_RR);
+
+ x25->vl = x25->vr;
+ x25->condition &= ~X25_COND_ACK_PENDING;
+
+ x25_stop_timer(sk);
+}
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
new file mode 100644
index 000000000..0412814a2
--- /dev/null
+++ b/net/x25/x25_proc.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * X.25 Packet Layer release 002
+ *
+ * This is ALPHA test software. This code may break your machine,
+ * randomly fail to work with new releases, misbehave and/or generally
+ * screw up. It might even work.
+ *
+ * This code REQUIRES 2.4 with seq_file support
+ *
+ * History
+ * 2002/10/06 Arnaldo Carvalho de Melo seq_file support
+ */
+
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/export.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <net/x25.h>
+
+#ifdef CONFIG_PROC_FS
+
+static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos)
+ __acquires(x25_route_list_lock)
+{
+ read_lock_bh(&x25_route_list_lock);
+ return seq_list_start_head(&x25_route_list, *pos);
+}
+
+static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &x25_route_list, pos);
+}
+
+static void x25_seq_route_stop(struct seq_file *seq, void *v)
+ __releases(x25_route_list_lock)
+{
+ read_unlock_bh(&x25_route_list_lock);
+}
+
+static int x25_seq_route_show(struct seq_file *seq, void *v)
+{
+ struct x25_route *rt = list_entry(v, struct x25_route, node);
+
+ if (v == &x25_route_list) {
+ seq_puts(seq, "Address Digits Device\n");
+ goto out;
+ }
+
+ rt = v;
+ seq_printf(seq, "%-15s %-6d %-5s\n",
+ rt->address.x25_addr, rt->sigdigits,
+ rt->dev ? rt->dev->name : "???");
+out:
+ return 0;
+}
+
+static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos)
+ __acquires(x25_list_lock)
+{
+ read_lock_bh(&x25_list_lock);
+ return seq_hlist_start_head(&x25_list, *pos);
+}
+
+static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return seq_hlist_next(v, &x25_list, pos);
+}
+
+static void x25_seq_socket_stop(struct seq_file *seq, void *v)
+ __releases(x25_list_lock)
+{
+ read_unlock_bh(&x25_list_lock);
+}
+
+static int x25_seq_socket_show(struct seq_file *seq, void *v)
+{
+ struct sock *s;
+ struct x25_sock *x25;
+ const char *devname;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq, "dest_addr src_addr dev lci st vs vr "
+ "va t t2 t21 t22 t23 Snd-Q Rcv-Q inode\n");
+ goto out;
+ }
+
+ s = sk_entry(v);
+ x25 = x25_sk(s);
+
+ if (!x25->neighbour || !x25->neighbour->dev)
+ devname = "???";
+ else
+ devname = x25->neighbour->dev->name;
+
+ seq_printf(seq, "%-10s %-10s %-5s %3.3X %d %d %d %d %3lu %3lu "
+ "%3lu %3lu %3lu %5d %5d %ld\n",
+ !x25->dest_addr.x25_addr[0] ? "*" : x25->dest_addr.x25_addr,
+ !x25->source_addr.x25_addr[0] ? "*" : x25->source_addr.x25_addr,
+ devname, x25->lci & 0x0FFF, x25->state, x25->vs, x25->vr,
+ x25->va, x25_display_timer(s) / HZ, x25->t2 / HZ,
+ x25->t21 / HZ, x25->t22 / HZ, x25->t23 / HZ,
+ sk_wmem_alloc_get(s),
+ sk_rmem_alloc_get(s),
+ s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
+out:
+ return 0;
+}
+
+static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos)
+ __acquires(x25_forward_list_lock)
+{
+ read_lock_bh(&x25_forward_list_lock);
+ return seq_list_start_head(&x25_forward_list, *pos);
+}
+
+static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &x25_forward_list, pos);
+}
+
+static void x25_seq_forward_stop(struct seq_file *seq, void *v)
+ __releases(x25_forward_list_lock)
+{
+ read_unlock_bh(&x25_forward_list_lock);
+}
+
+static int x25_seq_forward_show(struct seq_file *seq, void *v)
+{
+ struct x25_forward *f = list_entry(v, struct x25_forward, node);
+
+ if (v == &x25_forward_list) {
+ seq_printf(seq, "lci dev1 dev2\n");
+ goto out;
+ }
+
+ f = v;
+
+ seq_printf(seq, "%d %-10s %-10s\n",
+ f->lci, f->dev1->name, f->dev2->name);
+out:
+ return 0;
+}
+
+static const struct seq_operations x25_seq_route_ops = {
+ .start = x25_seq_route_start,
+ .next = x25_seq_route_next,
+ .stop = x25_seq_route_stop,
+ .show = x25_seq_route_show,
+};
+
+static const struct seq_operations x25_seq_socket_ops = {
+ .start = x25_seq_socket_start,
+ .next = x25_seq_socket_next,
+ .stop = x25_seq_socket_stop,
+ .show = x25_seq_socket_show,
+};
+
+static const struct seq_operations x25_seq_forward_ops = {
+ .start = x25_seq_forward_start,
+ .next = x25_seq_forward_next,
+ .stop = x25_seq_forward_stop,
+ .show = x25_seq_forward_show,
+};
+
+int __init x25_proc_init(void)
+{
+ if (!proc_mkdir("x25", init_net.proc_net))
+ return -ENOMEM;
+
+ if (!proc_create_seq("x25/route", 0444, init_net.proc_net,
+ &x25_seq_route_ops))
+ goto out;
+
+ if (!proc_create_seq("x25/socket", 0444, init_net.proc_net,
+ &x25_seq_socket_ops))
+ goto out;
+
+ if (!proc_create_seq("x25/forward", 0444, init_net.proc_net,
+ &x25_seq_forward_ops))
+ goto out;
+ return 0;
+
+out:
+ remove_proc_subtree("x25", init_net.proc_net);
+ return -ENOMEM;
+}
+
+void __exit x25_proc_exit(void)
+{
+ remove_proc_subtree("x25", init_net.proc_net);
+}
+
+#else /* CONFIG_PROC_FS */
+
+int __init x25_proc_init(void)
+{
+ return 0;
+}
+
+void __exit x25_proc_exit(void)
+{
+}
+#endif /* CONFIG_PROC_FS */
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
new file mode 100644
index 000000000..647f325ed
--- /dev/null
+++ b/net/x25/x25_route.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * X.25 Packet Layer release 002
+ *
+ * This is ALPHA test software. This code may break your machine,
+ * randomly fail to work with new releases, misbehave and/or generally
+ * screw up. It might even work.
+ *
+ * This code REQUIRES 2.1.15 or higher
+ *
+ * History
+ * X.25 001 Jonathan Naylor Started coding.
+ */
+
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <net/x25.h>
+
+LIST_HEAD(x25_route_list);
+DEFINE_RWLOCK(x25_route_list_lock);
+
+/*
+ * Add a new route.
+ */
+static int x25_add_route(struct x25_address *address, unsigned int sigdigits,
+ struct net_device *dev)
+{
+ struct x25_route *rt;
+ int rc = -EINVAL;
+
+ write_lock_bh(&x25_route_list_lock);
+
+ list_for_each_entry(rt, &x25_route_list, node) {
+ if (!memcmp(&rt->address, address, sigdigits) &&
+ rt->sigdigits == sigdigits)
+ goto out;
+ }
+
+ rt = kmalloc(sizeof(*rt), GFP_ATOMIC);
+ rc = -ENOMEM;
+ if (!rt)
+ goto out;
+
+ strcpy(rt->address.x25_addr, "000000000000000");
+ memcpy(rt->address.x25_addr, address->x25_addr, sigdigits);
+
+ rt->sigdigits = sigdigits;
+ rt->dev = dev;
+ refcount_set(&rt->refcnt, 1);
+
+ list_add(&rt->node, &x25_route_list);
+ rc = 0;
+out:
+ write_unlock_bh(&x25_route_list_lock);
+ return rc;
+}
+
+/**
+ * __x25_remove_route - remove route from x25_route_list
+ * @rt: route to remove
+ *
+ * Remove route from x25_route_list. If it was there.
+ * Caller must hold x25_route_list_lock.
+ */
+static void __x25_remove_route(struct x25_route *rt)
+{
+ if (rt->node.next) {
+ list_del(&rt->node);
+ x25_route_put(rt);
+ }
+}
+
+static int x25_del_route(struct x25_address *address, unsigned int sigdigits,
+ struct net_device *dev)
+{
+ struct x25_route *rt;
+ int rc = -EINVAL;
+
+ write_lock_bh(&x25_route_list_lock);
+
+ list_for_each_entry(rt, &x25_route_list, node) {
+ if (!memcmp(&rt->address, address, sigdigits) &&
+ rt->sigdigits == sigdigits && rt->dev == dev) {
+ __x25_remove_route(rt);
+ rc = 0;
+ break;
+ }
+ }
+
+ write_unlock_bh(&x25_route_list_lock);
+ return rc;
+}
+
+/*
+ * A device has been removed, remove its routes.
+ */
+void x25_route_device_down(struct net_device *dev)
+{
+ struct x25_route *rt;
+ struct list_head *entry, *tmp;
+
+ write_lock_bh(&x25_route_list_lock);
+
+ list_for_each_safe(entry, tmp, &x25_route_list) {
+ rt = list_entry(entry, struct x25_route, node);
+
+ if (rt->dev == dev)
+ __x25_remove_route(rt);
+ }
+ write_unlock_bh(&x25_route_list_lock);
+}
+
+/*
+ * Check that the device given is a valid X.25 interface that is "up".
+ */
+struct net_device *x25_dev_get(char *devname)
+{
+ struct net_device *dev = dev_get_by_name(&init_net, devname);
+
+ if (dev && (!(dev->flags & IFF_UP) || dev->type != ARPHRD_X25)) {
+ dev_put(dev);
+ dev = NULL;
+ }
+
+ return dev;
+}
+
+/**
+ * x25_get_route - Find a route given an X.25 address.
+ * @addr: - address to find a route for
+ *
+ * Find a route given an X.25 address.
+ */
+struct x25_route *x25_get_route(struct x25_address *addr)
+{
+ struct x25_route *rt, *use = NULL;
+
+ read_lock_bh(&x25_route_list_lock);
+
+ list_for_each_entry(rt, &x25_route_list, node) {
+ if (!memcmp(&rt->address, addr, rt->sigdigits)) {
+ if (!use)
+ use = rt;
+ else if (rt->sigdigits > use->sigdigits)
+ use = rt;
+ }
+ }
+
+ if (use)
+ x25_route_hold(use);
+
+ read_unlock_bh(&x25_route_list_lock);
+ return use;
+}
+
+/*
+ * Handle the ioctls that control the routing functions.
+ */
+int x25_route_ioctl(unsigned int cmd, void __user *arg)
+{
+ struct x25_route_struct rt;
+ struct net_device *dev;
+ int rc = -EINVAL;
+
+ if (cmd != SIOCADDRT && cmd != SIOCDELRT)
+ goto out;
+
+ rc = -EFAULT;
+ if (copy_from_user(&rt, arg, sizeof(rt)))
+ goto out;
+
+ rc = -EINVAL;
+ if (rt.sigdigits > 15)
+ goto out;
+
+ dev = x25_dev_get(rt.device);
+ if (!dev)
+ goto out;
+
+ if (cmd == SIOCADDRT)
+ rc = x25_add_route(&rt.address, rt.sigdigits, dev);
+ else
+ rc = x25_del_route(&rt.address, rt.sigdigits, dev);
+ dev_put(dev);
+out:
+ return rc;
+}
+
+/*
+ * Release all memory associated with X.25 routing structures.
+ */
+void __exit x25_route_free(void)
+{
+ struct x25_route *rt;
+ struct list_head *entry, *tmp;
+
+ write_lock_bh(&x25_route_list_lock);
+ list_for_each_safe(entry, tmp, &x25_route_list) {
+ rt = list_entry(entry, struct x25_route, node);
+ __x25_remove_route(rt);
+ }
+ write_unlock_bh(&x25_route_list_lock);
+}
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
new file mode 100644
index 000000000..0285aaa1e
--- /dev/null
+++ b/net/x25/x25_subr.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * X.25 Packet Layer release 002
+ *
+ * This is ALPHA test software. This code may break your machine,
+ * randomly fail to work with new releases, misbehave and/or generally
+ * screw up. It might even work.
+ *
+ * This code REQUIRES 2.1.15 or higher
+ *
+ * History
+ * X.25 001 Jonathan Naylor Started coding.
+ * X.25 002 Jonathan Naylor Centralised disconnection processing.
+ * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
+ * negotiation.
+ * jun/24/01 Arnaldo C. Melo use skb_queue_purge, cleanups
+ * apr/04/15 Shaun Pereira Fast select with no
+ * restriction on response.
+ */
+
+#define pr_fmt(fmt) "X25: " fmt
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+#include <net/x25.h>
+
+/*
+ * This routine purges all of the queues of frames.
+ */
+void x25_clear_queues(struct sock *sk)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ skb_queue_purge(&sk->sk_write_queue);
+ skb_queue_purge(&x25->ack_queue);
+ skb_queue_purge(&x25->interrupt_in_queue);
+ skb_queue_purge(&x25->interrupt_out_queue);
+ skb_queue_purge(&x25->fragment_queue);
+}
+
+
+/*
+ * This routine purges the input queue of those frames that have been
+ * acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the
+ * SDL diagram.
+*/
+void x25_frames_acked(struct sock *sk, unsigned short nr)
+{
+ struct sk_buff *skb;
+ struct x25_sock *x25 = x25_sk(sk);
+ int modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS;
+
+ /*
+ * Remove all the ack-ed frames from the ack queue.
+ */
+ if (x25->va != nr)
+ while (skb_peek(&x25->ack_queue) && x25->va != nr) {
+ skb = skb_dequeue(&x25->ack_queue);
+ kfree_skb(skb);
+ x25->va = (x25->va + 1) % modulus;
+ }
+}
+
+void x25_requeue_frames(struct sock *sk)
+{
+ struct sk_buff *skb, *skb_prev = NULL;
+
+ /*
+ * Requeue all the un-ack-ed frames on the output queue to be picked
+ * up by x25_kick. This arrangement handles the possibility of an empty
+ * output queue.
+ */
+ while ((skb = skb_dequeue(&x25_sk(sk)->ack_queue)) != NULL) {
+ if (!skb_prev)
+ skb_queue_head(&sk->sk_write_queue, skb);
+ else
+ skb_append(skb_prev, skb, &sk->sk_write_queue);
+ skb_prev = skb;
+ }
+}
+
+/*
+ * Validate that the value of nr is between va and vs. Return true or
+ * false for testing.
+ */
+int x25_validate_nr(struct sock *sk, unsigned short nr)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+ unsigned short vc = x25->va;
+ int modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS;
+
+ while (vc != x25->vs) {
+ if (nr == vc)
+ return 1;
+ vc = (vc + 1) % modulus;
+ }
+
+ return nr == x25->vs ? 1 : 0;
+}
+
+/*
+ * This routine is called when the packet layer internally generates a
+ * control frame.
+ */
+void x25_write_internal(struct sock *sk, int frametype)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+ struct sk_buff *skb;
+ unsigned char *dptr;
+ unsigned char facilities[X25_MAX_FAC_LEN];
+ unsigned char addresses[1 + X25_ADDR_LEN];
+ unsigned char lci1, lci2;
+ /*
+ * Default safe frame size.
+ */
+ int len = X25_MAX_L2_LEN + X25_EXT_MIN_LEN;
+
+ /*
+ * Adjust frame size.
+ */
+ switch (frametype) {
+ case X25_CALL_REQUEST:
+ len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
+ break;
+ case X25_CALL_ACCEPTED: /* fast sel with no restr on resp */
+ if (x25->facilities.reverse & 0x80) {
+ len += 1 + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
+ } else {
+ len += 1 + X25_MAX_FAC_LEN;
+ }
+ break;
+ case X25_CLEAR_REQUEST:
+ case X25_RESET_REQUEST:
+ len += 2;
+ break;
+ case X25_RR:
+ case X25_RNR:
+ case X25_REJ:
+ case X25_CLEAR_CONFIRMATION:
+ case X25_INTERRUPT_CONFIRMATION:
+ case X25_RESET_CONFIRMATION:
+ break;
+ default:
+ pr_err("invalid frame type %02X\n", frametype);
+ return;
+ }
+
+ if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
+ return;
+
+ /*
+ * Space for Ethernet and 802.2 LLC headers.
+ */
+ skb_reserve(skb, X25_MAX_L2_LEN);
+
+ /*
+ * Make space for the GFI and LCI, and fill them in.
+ */
+ dptr = skb_put(skb, 2);
+
+ lci1 = (x25->lci >> 8) & 0x0F;
+ lci2 = (x25->lci >> 0) & 0xFF;
+
+ if (x25->neighbour->extended) {
+ *dptr++ = lci1 | X25_GFI_EXTSEQ;
+ *dptr++ = lci2;
+ } else {
+ *dptr++ = lci1 | X25_GFI_STDSEQ;
+ *dptr++ = lci2;
+ }
+
+ /*
+ * Now fill in the frame type specific information.
+ */
+ switch (frametype) {
+
+ case X25_CALL_REQUEST:
+ dptr = skb_put(skb, 1);
+ *dptr++ = X25_CALL_REQUEST;
+ len = x25_addr_aton(addresses, &x25->dest_addr,
+ &x25->source_addr);
+ skb_put_data(skb, addresses, len);
+ len = x25_create_facilities(facilities,
+ &x25->facilities,
+ &x25->dte_facilities,
+ x25->neighbour->global_facil_mask);
+ skb_put_data(skb, facilities, len);
+ skb_put_data(skb, x25->calluserdata.cuddata,
+ x25->calluserdata.cudlength);
+ x25->calluserdata.cudlength = 0;
+ break;
+
+ case X25_CALL_ACCEPTED:
+ dptr = skb_put(skb, 2);
+ *dptr++ = X25_CALL_ACCEPTED;
+ *dptr++ = 0x00; /* Address lengths */
+ len = x25_create_facilities(facilities,
+ &x25->facilities,
+ &x25->dte_facilities,
+ x25->vc_facil_mask);
+ skb_put_data(skb, facilities, len);
+
+ /* fast select with no restriction on response
+ allows call user data. Userland must
+ ensure it is ours and not theirs */
+ if(x25->facilities.reverse & 0x80) {
+ skb_put_data(skb,
+ x25->calluserdata.cuddata,
+ x25->calluserdata.cudlength);
+ }
+ x25->calluserdata.cudlength = 0;
+ break;
+
+ case X25_CLEAR_REQUEST:
+ dptr = skb_put(skb, 3);
+ *dptr++ = frametype;
+ *dptr++ = x25->causediag.cause;
+ *dptr++ = x25->causediag.diagnostic;
+ break;
+
+ case X25_RESET_REQUEST:
+ dptr = skb_put(skb, 3);
+ *dptr++ = frametype;
+ *dptr++ = 0x00; /* XXX */
+ *dptr++ = 0x00; /* XXX */
+ break;
+
+ case X25_RR:
+ case X25_RNR:
+ case X25_REJ:
+ if (x25->neighbour->extended) {
+ dptr = skb_put(skb, 2);
+ *dptr++ = frametype;
+ *dptr++ = (x25->vr << 1) & 0xFE;
+ } else {
+ dptr = skb_put(skb, 1);
+ *dptr = frametype;
+ *dptr++ |= (x25->vr << 5) & 0xE0;
+ }
+ break;
+
+ case X25_CLEAR_CONFIRMATION:
+ case X25_INTERRUPT_CONFIRMATION:
+ case X25_RESET_CONFIRMATION:
+ dptr = skb_put(skb, 1);
+ *dptr = frametype;
+ break;
+ }
+
+ x25_transmit_link(skb, x25->neighbour);
+}
+
+/*
+ * Unpick the contents of the passed X.25 Packet Layer frame.
+ */
+int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
+ int *d, int *m)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+ unsigned char *frame;
+
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
+ return X25_ILLEGAL;
+ frame = skb->data;
+
+ *ns = *nr = *q = *d = *m = 0;
+
+ switch (frame[2]) {
+ case X25_CALL_REQUEST:
+ case X25_CALL_ACCEPTED:
+ case X25_CLEAR_REQUEST:
+ case X25_CLEAR_CONFIRMATION:
+ case X25_INTERRUPT:
+ case X25_INTERRUPT_CONFIRMATION:
+ case X25_RESET_REQUEST:
+ case X25_RESET_CONFIRMATION:
+ case X25_RESTART_REQUEST:
+ case X25_RESTART_CONFIRMATION:
+ case X25_REGISTRATION_REQUEST:
+ case X25_REGISTRATION_CONFIRMATION:
+ case X25_DIAGNOSTIC:
+ return frame[2];
+ }
+
+ if (x25->neighbour->extended) {
+ if (frame[2] == X25_RR ||
+ frame[2] == X25_RNR ||
+ frame[2] == X25_REJ) {
+ if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
+ return X25_ILLEGAL;
+ frame = skb->data;
+
+ *nr = (frame[3] >> 1) & 0x7F;
+ return frame[2];
+ }
+ } else {
+ if ((frame[2] & 0x1F) == X25_RR ||
+ (frame[2] & 0x1F) == X25_RNR ||
+ (frame[2] & 0x1F) == X25_REJ) {
+ *nr = (frame[2] >> 5) & 0x07;
+ return frame[2] & 0x1F;
+ }
+ }
+
+ if (x25->neighbour->extended) {
+ if ((frame[2] & 0x01) == X25_DATA) {
+ if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
+ return X25_ILLEGAL;
+ frame = skb->data;
+
+ *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
+ *d = (frame[0] & X25_D_BIT) == X25_D_BIT;
+ *m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT;
+ *nr = (frame[3] >> 1) & 0x7F;
+ *ns = (frame[2] >> 1) & 0x7F;
+ return X25_DATA;
+ }
+ } else {
+ if ((frame[2] & 0x01) == X25_DATA) {
+ *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
+ *d = (frame[0] & X25_D_BIT) == X25_D_BIT;
+ *m = (frame[2] & X25_STD_M_BIT) == X25_STD_M_BIT;
+ *nr = (frame[2] >> 5) & 0x07;
+ *ns = (frame[2] >> 1) & 0x07;
+ return X25_DATA;
+ }
+ }
+
+ pr_debug("invalid PLP frame %3ph\n", frame);
+
+ return X25_ILLEGAL;
+}
+
+void x25_disconnect(struct sock *sk, int reason, unsigned char cause,
+ unsigned char diagnostic)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ x25_clear_queues(sk);
+ x25_stop_timer(sk);
+
+ x25->lci = 0;
+ x25->state = X25_STATE_0;
+
+ x25->causediag.cause = cause;
+ x25->causediag.diagnostic = diagnostic;
+
+ sk->sk_state = TCP_CLOSE;
+ sk->sk_err = reason;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ sk->sk_state_change(sk);
+ sock_set_flag(sk, SOCK_DEAD);
+ }
+ if (x25->neighbour) {
+ read_lock_bh(&x25_list_lock);
+ x25_neigh_put(x25->neighbour);
+ x25->neighbour = NULL;
+ read_unlock_bh(&x25_list_lock);
+ }
+}
+
+/*
+ * Clear an own-rx-busy condition and tell the peer about this, provided
+ * that there is a significant amount of free receive buffer space available.
+ */
+void x25_check_rbuf(struct sock *sk)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf >> 1) &&
+ (x25->condition & X25_COND_OWN_RX_BUSY)) {
+ x25->condition &= ~X25_COND_OWN_RX_BUSY;
+ x25->condition &= ~X25_COND_ACK_PENDING;
+ x25->vl = x25->vr;
+ x25_write_internal(sk, X25_RR);
+ x25_stop_timer(sk);
+ }
+}
diff --git a/net/x25/x25_timer.c b/net/x25/x25_timer.c
new file mode 100644
index 000000000..9376365cd
--- /dev/null
+++ b/net/x25/x25_timer.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * X.25 Packet Layer release 002
+ *
+ * This is ALPHA test software. This code may break your machine,
+ * randomly fail to work with new releases, misbehave and/or generally
+ * screw up. It might even work.
+ *
+ * This code REQUIRES 2.1.15 or higher
+ *
+ * History
+ * X.25 001 Jonathan Naylor Started coding.
+ * X.25 002 Jonathan Naylor New timer architecture.
+ * Centralised disconnection processing.
+ */
+
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+#include <net/x25.h>
+
+static void x25_heartbeat_expiry(struct timer_list *t);
+static void x25_timer_expiry(struct timer_list *t);
+
+void x25_init_timers(struct sock *sk)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ timer_setup(&x25->timer, x25_timer_expiry, 0);
+
+ /* initialized by sock_init_data */
+ sk->sk_timer.function = x25_heartbeat_expiry;
+}
+
+void x25_start_heartbeat(struct sock *sk)
+{
+ mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
+}
+
+void x25_stop_heartbeat(struct sock *sk)
+{
+ del_timer(&sk->sk_timer);
+}
+
+void x25_start_t2timer(struct sock *sk)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ mod_timer(&x25->timer, jiffies + x25->t2);
+}
+
+void x25_start_t21timer(struct sock *sk)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ mod_timer(&x25->timer, jiffies + x25->t21);
+}
+
+void x25_start_t22timer(struct sock *sk)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ mod_timer(&x25->timer, jiffies + x25->t22);
+}
+
+void x25_start_t23timer(struct sock *sk)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ mod_timer(&x25->timer, jiffies + x25->t23);
+}
+
+void x25_stop_timer(struct sock *sk)
+{
+ del_timer(&x25_sk(sk)->timer);
+}
+
+unsigned long x25_display_timer(struct sock *sk)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ if (!timer_pending(&x25->timer))
+ return 0;
+
+ return x25->timer.expires - jiffies;
+}
+
+static void x25_heartbeat_expiry(struct timer_list *t)
+{
+ struct sock *sk = from_timer(sk, t, sk_timer);
+
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) /* can currently only occur in state 3 */
+ goto restart_heartbeat;
+
+ switch (x25_sk(sk)->state) {
+
+ case X25_STATE_0:
+ /*
+ * Magic here: If we listen() and a new link dies
+ * before it is accepted() it isn't 'dead' so doesn't
+ * get removed.
+ */
+ if (sock_flag(sk, SOCK_DESTROY) ||
+ (sk->sk_state == TCP_LISTEN &&
+ sock_flag(sk, SOCK_DEAD))) {
+ bh_unlock_sock(sk);
+ x25_destroy_socket_from_timer(sk);
+ return;
+ }
+ break;
+
+ case X25_STATE_3:
+ /*
+ * Check for the state of the receive buffer.
+ */
+ x25_check_rbuf(sk);
+ break;
+ }
+restart_heartbeat:
+ x25_start_heartbeat(sk);
+ bh_unlock_sock(sk);
+}
+
+/*
+ * Timer has expired, it may have been T2, T21, T22, or T23. We can tell
+ * by the state machine state.
+ */
+static inline void x25_do_timer_expiry(struct sock * sk)
+{
+ struct x25_sock *x25 = x25_sk(sk);
+
+ switch (x25->state) {
+
+ case X25_STATE_3: /* T2 */
+ if (x25->condition & X25_COND_ACK_PENDING) {
+ x25->condition &= ~X25_COND_ACK_PENDING;
+ x25_enquiry_response(sk);
+ }
+ break;
+
+ case X25_STATE_1: /* T21 */
+ case X25_STATE_4: /* T22 */
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25->state = X25_STATE_2;
+ x25_start_t23timer(sk);
+ break;
+
+ case X25_STATE_2: /* T23 */
+ x25_disconnect(sk, ETIMEDOUT, 0, 0);
+ break;
+ }
+}
+
+static void x25_timer_expiry(struct timer_list *t)
+{
+ struct x25_sock *x25 = from_timer(x25, t, timer);
+ struct sock *sk = &x25->sk;
+
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) { /* can currently only occur in state 3 */
+ if (x25_sk(sk)->state == X25_STATE_3)
+ x25_start_t2timer(sk);
+ } else
+ x25_do_timer_expiry(sk);
+ bh_unlock_sock(sk);
+}