summaryrefslogtreecommitdiffstats
path: root/net/sctp
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /net/sctp
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/Kconfig97
-rw-r--r--net/sctp/Makefile24
-rw-r--r--net/sctp/associola.c1729
-rw-r--r--net/sctp/auth.c1089
-rw-r--r--net/sctp/bind_addr.c575
-rw-r--r--net/sctp/chunk.c353
-rw-r--r--net/sctp/debug.c170
-rw-r--r--net/sctp/diag.c529
-rw-r--r--net/sctp/endpointola.c417
-rw-r--r--net/sctp/input.c1349
-rw-r--r--net/sctp/inqueue.c237
-rw-r--r--net/sctp/ipv6.c1217
-rw-r--r--net/sctp/objcnt.c105
-rw-r--r--net/sctp/offload.c120
-rw-r--r--net/sctp/output.c865
-rw-r--r--net/sctp/outqueue.c1922
-rw-r--r--net/sctp/primitive.c201
-rw-r--r--net/sctp/proc.c399
-rw-r--r--net/sctp/protocol.c1728
-rw-r--r--net/sctp/sm_make_chunk.c3937
-rw-r--r--net/sctp/sm_sideeffect.c1825
-rw-r--r--net/sctp/sm_statefuns.c6677
-rw-r--r--net/sctp/sm_statetable.c1041
-rw-r--r--net/sctp/socket.c9745
-rw-r--r--net/sctp/stream.c1087
-rw-r--r--net/sctp/stream_interleave.c1359
-rw-r--r--net/sctp/stream_sched.c275
-rw-r--r--net/sctp/stream_sched_prio.c346
-rw-r--r--net/sctp/stream_sched_rr.c196
-rw-r--r--net/sctp/sysctl.c633
-rw-r--r--net/sctp/transport.c854
-rw-r--r--net/sctp/tsnmap.c364
-rw-r--r--net/sctp/ulpevent.c1190
-rw-r--r--net/sctp/ulpqueue.c1132
34 files changed, 43787 insertions, 0 deletions
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
new file mode 100644
index 000000000..5da599ff8
--- /dev/null
+++ b/net/sctp/Kconfig
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# SCTP configuration
+#
+
+menuconfig IP_SCTP
+ tristate "The SCTP Protocol"
+ depends on INET
+ depends on IPV6 || IPV6=n
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select LIBCRC32C
+ select NET_UDP_TUNNEL
+ help
+ Stream Control Transmission Protocol
+
+ From RFC 2960 <http://www.ietf.org/rfc/rfc2960.txt>.
+
+ "SCTP is a reliable transport protocol operating on top of a
+ connectionless packet network such as IP. It offers the following
+ services to its users:
+
+ -- acknowledged error-free non-duplicated transfer of user data,
+ -- data fragmentation to conform to discovered path MTU size,
+ -- sequenced delivery of user messages within multiple streams,
+ with an option for order-of-arrival delivery of individual user
+ messages,
+ -- optional bundling of multiple user messages into a single SCTP
+ packet, and
+ -- network-level fault tolerance through supporting of multi-
+ homing at either or both ends of an association."
+
+ To compile this protocol support as a module, choose M here: the
+ module will be called sctp. Debug messages are handled by the
+ kernel's dynamic debugging framework.
+
+ If in doubt, say N.
+
+if IP_SCTP
+
+config SCTP_DBG_OBJCNT
+ bool "SCTP: Debug object counts"
+ depends on PROC_FS
+ help
+ If you say Y, this will enable debugging support for counting the
+ type of objects that are currently allocated. This is useful for
+ identifying memory leaks. This debug information can be viewed by
+ 'cat /proc/net/sctp/sctp_dbg_objcnt'
+
+ If unsure, say N
+choice
+ prompt "Default SCTP cookie HMAC encoding"
+ default SCTP_DEFAULT_COOKIE_HMAC_MD5
+ help
+ This option sets the default sctp cookie hmac algorithm
+ when in doubt select 'md5'
+
+config SCTP_DEFAULT_COOKIE_HMAC_MD5
+ bool "Enable optional MD5 hmac cookie generation"
+ help
+ Enable optional MD5 hmac based SCTP cookie generation
+ select SCTP_COOKIE_HMAC_MD5
+
+config SCTP_DEFAULT_COOKIE_HMAC_SHA1
+ bool "Enable optional SHA1 hmac cookie generation"
+ help
+ Enable optional SHA1 hmac based SCTP cookie generation
+ select SCTP_COOKIE_HMAC_SHA1
+
+config SCTP_DEFAULT_COOKIE_HMAC_NONE
+ bool "Use no hmac alg in SCTP cookie generation"
+ help
+ Use no hmac algorithm in SCTP cookie generation
+
+endchoice
+
+config SCTP_COOKIE_HMAC_MD5
+ bool "Enable optional MD5 hmac cookie generation"
+ help
+ Enable optional MD5 hmac based SCTP cookie generation
+ select CRYPTO_HMAC if SCTP_COOKIE_HMAC_MD5
+ select CRYPTO_MD5 if SCTP_COOKIE_HMAC_MD5
+
+config SCTP_COOKIE_HMAC_SHA1
+ bool "Enable optional SHA1 hmac cookie generation"
+ help
+ Enable optional SHA1 hmac based SCTP cookie generation
+ select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1
+ select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1
+
+config INET_SCTP_DIAG
+ depends on INET_DIAG
+ def_tristate INET_DIAG
+
+
+endif # IP_SCTP
diff --git a/net/sctp/Makefile b/net/sctp/Makefile
new file mode 100644
index 000000000..e845e4588
--- /dev/null
+++ b/net/sctp/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for SCTP support code.
+#
+
+obj-$(CONFIG_IP_SCTP) += sctp.o
+obj-$(CONFIG_INET_SCTP_DIAG) += sctp_diag.o
+
+sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
+ protocol.o endpointola.o associola.o \
+ transport.o chunk.o sm_make_chunk.o ulpevent.o \
+ inqueue.o outqueue.o ulpqueue.o \
+ tsnmap.o bind_addr.o socket.o primitive.o \
+ output.o input.o debug.o stream.o auth.o \
+ offload.o stream_sched.o stream_sched_prio.o \
+ stream_sched_rr.o stream_interleave.o
+
+sctp_diag-y := diag.o
+
+sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o
+sctp-$(CONFIG_PROC_FS) += proc.o
+sctp-$(CONFIG_SYSCTL) += sysctl.o
+
+sctp-$(subst m,y,$(CONFIG_IPV6)) += ipv6.o
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
new file mode 100644
index 000000000..2965a12fe
--- /dev/null
+++ b/net/sctp/associola.c
@@ -0,0 +1,1729 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * This module provides the abstraction for an SCTP association.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Hui Huang <hui.huang@nokia.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Ryan Layer <rmlayer@us.ibm.com>
+ * Kevin Gao <kevin.gao@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <net/ipv6.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+/* Forward declarations for internal functions. */
+static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
+static void sctp_assoc_bh_rcv(struct work_struct *work);
+static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
+static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
+
+/* 1st Level Abstractions. */
+
+/* Initialize a new association from provided memory. */
+static struct sctp_association *sctp_association_init(
+ struct sctp_association *asoc,
+ const struct sctp_endpoint *ep,
+ const struct sock *sk,
+ enum sctp_scope scope, gfp_t gfp)
+{
+ struct sctp_sock *sp;
+ struct sctp_paramhdr *p;
+ int i;
+
+ /* Retrieve the SCTP per socket area. */
+ sp = sctp_sk((struct sock *)sk);
+
+ /* Discarding const is appropriate here. */
+ asoc->ep = (struct sctp_endpoint *)ep;
+ asoc->base.sk = (struct sock *)sk;
+ asoc->base.net = sock_net(sk);
+
+ sctp_endpoint_hold(asoc->ep);
+ sock_hold(asoc->base.sk);
+
+ /* Initialize the common base substructure. */
+ asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
+
+ /* Initialize the object handling fields. */
+ refcount_set(&asoc->base.refcnt, 1);
+
+ /* Initialize the bind addr area. */
+ sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
+
+ asoc->state = SCTP_STATE_CLOSED;
+ asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
+ asoc->user_frag = sp->user_frag;
+
+ /* Set the association max_retrans and RTO values from the
+ * socket values.
+ */
+ asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
+ asoc->pf_retrans = sp->pf_retrans;
+ asoc->ps_retrans = sp->ps_retrans;
+ asoc->pf_expose = sp->pf_expose;
+
+ asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
+ asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
+ asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
+
+ /* Initialize the association's heartbeat interval based on the
+ * sock configured value.
+ */
+ asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
+ asoc->probe_interval = msecs_to_jiffies(sp->probe_interval);
+
+ asoc->encap_port = sp->encap_port;
+
+ /* Initialize path max retrans value. */
+ asoc->pathmaxrxt = sp->pathmaxrxt;
+
+ asoc->flowlabel = sp->flowlabel;
+ asoc->dscp = sp->dscp;
+
+ /* Set association default SACK delay */
+ asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
+ asoc->sackfreq = sp->sackfreq;
+
+ /* Set the association default flags controlling
+ * Heartbeat, SACK delay, and Path MTU Discovery.
+ */
+ asoc->param_flags = sp->param_flags;
+
+ /* Initialize the maximum number of new data packets that can be sent
+ * in a burst.
+ */
+ asoc->max_burst = sp->max_burst;
+
+ asoc->subscribe = sp->subscribe;
+
+ /* initialize association timers */
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
+
+ /* sctpimpguide Section 2.12.2
+ * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
+ * recommended value of 5 times 'RTO.Max'.
+ */
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
+ = 5 * asoc->rto_max;
+
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
+
+ /* Initializes the timers */
+ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
+ timer_setup(&asoc->timers[i], sctp_timer_events[i], 0);
+
+ /* Pull default initialization values from the sock options.
+ * Note: This assumes that the values have already been
+ * validated in the sock.
+ */
+ asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
+ asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
+ asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
+
+ asoc->max_init_timeo =
+ msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
+
+ /* Set the local window size for receive.
+ * This is also the rcvbuf space per association.
+ * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
+ * 1500 bytes in one SCTP packet.
+ */
+ if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
+ asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
+ else
+ asoc->rwnd = sk->sk_rcvbuf/2;
+
+ asoc->a_rwnd = asoc->rwnd;
+
+ /* Use my own max window until I learn something better. */
+ asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
+
+ /* Initialize the receive memory counter */
+ atomic_set(&asoc->rmem_alloc, 0);
+
+ init_waitqueue_head(&asoc->wait);
+
+ asoc->c.my_vtag = sctp_generate_tag(ep);
+ asoc->c.my_port = ep->base.bind_addr.port;
+
+ asoc->c.initial_tsn = sctp_generate_tsn(ep);
+
+ asoc->next_tsn = asoc->c.initial_tsn;
+
+ asoc->ctsn_ack_point = asoc->next_tsn - 1;
+ asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
+ asoc->highest_sacked = asoc->ctsn_ack_point;
+ asoc->last_cwr_tsn = asoc->ctsn_ack_point;
+
+ /* ADDIP Section 4.1 Asconf Chunk Procedures
+ *
+ * When an endpoint has an ASCONF signaled change to be sent to the
+ * remote endpoint it should do the following:
+ * ...
+ * A2) a serial number should be assigned to the chunk. The serial
+ * number SHOULD be a monotonically increasing number. The serial
+ * numbers SHOULD be initialized at the start of the
+ * association to the same value as the initial TSN.
+ */
+ asoc->addip_serial = asoc->c.initial_tsn;
+ asoc->strreset_outseq = asoc->c.initial_tsn;
+
+ INIT_LIST_HEAD(&asoc->addip_chunk_list);
+ INIT_LIST_HEAD(&asoc->asconf_ack_list);
+
+ /* Make an empty list of remote transport addresses. */
+ INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
+
+ /* RFC 2960 5.1 Normal Establishment of an Association
+ *
+ * After the reception of the first data chunk in an
+ * association the endpoint must immediately respond with a
+ * sack to acknowledge the data chunk. Subsequent
+ * acknowledgements should be done as described in Section
+ * 6.2.
+ *
+ * [We implement this by telling a new association that it
+ * already received one packet.]
+ */
+ asoc->peer.sack_needed = 1;
+ asoc->peer.sack_generation = 1;
+
+ /* Create an input queue. */
+ sctp_inq_init(&asoc->base.inqueue);
+ sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
+
+ /* Create an output queue. */
+ sctp_outq_init(asoc, &asoc->outqueue);
+
+ if (!sctp_ulpq_init(&asoc->ulpq, asoc))
+ goto fail_init;
+
+ if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp))
+ goto stream_free;
+
+ /* Initialize default path MTU. */
+ asoc->pathmtu = sp->pathmtu;
+ sctp_assoc_update_frag_point(asoc);
+
+ /* Assume that peer would support both address types unless we are
+ * told otherwise.
+ */
+ asoc->peer.ipv4_address = 1;
+ if (asoc->base.sk->sk_family == PF_INET6)
+ asoc->peer.ipv6_address = 1;
+ INIT_LIST_HEAD(&asoc->asocs);
+
+ asoc->default_stream = sp->default_stream;
+ asoc->default_ppid = sp->default_ppid;
+ asoc->default_flags = sp->default_flags;
+ asoc->default_context = sp->default_context;
+ asoc->default_timetolive = sp->default_timetolive;
+ asoc->default_rcv_context = sp->default_rcv_context;
+
+ /* AUTH related initializations */
+ INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
+ if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
+ goto stream_free;
+
+ asoc->active_key_id = ep->active_key_id;
+ asoc->strreset_enable = ep->strreset_enable;
+
+ /* Save the hmacs and chunks list into this association */
+ if (ep->auth_hmacs_list)
+ memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
+ ntohs(ep->auth_hmacs_list->param_hdr.length));
+ if (ep->auth_chunk_list)
+ memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
+ ntohs(ep->auth_chunk_list->param_hdr.length));
+
+ /* Get the AUTH random number for this association */
+ p = (struct sctp_paramhdr *)asoc->c.auth_random;
+ p->type = SCTP_PARAM_RANDOM;
+ p->length = htons(sizeof(*p) + SCTP_AUTH_RANDOM_LENGTH);
+ get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
+
+ return asoc;
+
+stream_free:
+ sctp_stream_free(&asoc->stream);
+fail_init:
+ sock_put(asoc->base.sk);
+ sctp_endpoint_put(asoc->ep);
+ return NULL;
+}
+
+/* Allocate and initialize a new association */
+struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
+ const struct sock *sk,
+ enum sctp_scope scope, gfp_t gfp)
+{
+ struct sctp_association *asoc;
+
+ asoc = kzalloc(sizeof(*asoc), gfp);
+ if (!asoc)
+ goto fail;
+
+ if (!sctp_association_init(asoc, ep, sk, scope, gfp))
+ goto fail_init;
+
+ SCTP_DBG_OBJCNT_INC(assoc);
+
+ pr_debug("Created asoc %p\n", asoc);
+
+ return asoc;
+
+fail_init:
+ kfree(asoc);
+fail:
+ return NULL;
+}
+
+/* Free this association if possible. There may still be users, so
+ * the actual deallocation may be delayed.
+ */
+void sctp_association_free(struct sctp_association *asoc)
+{
+ struct sock *sk = asoc->base.sk;
+ struct sctp_transport *transport;
+ struct list_head *pos, *temp;
+ int i;
+
+ /* Only real associations count against the endpoint, so
+ * don't bother for if this is a temporary association.
+ */
+ if (!list_empty(&asoc->asocs)) {
+ list_del(&asoc->asocs);
+
+ /* Decrement the backlog value for a TCP-style listening
+ * socket.
+ */
+ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
+ sk_acceptq_removed(sk);
+ }
+
+ /* Mark as dead, so other users can know this structure is
+ * going away.
+ */
+ asoc->base.dead = true;
+
+ /* Dispose of any data lying around in the outqueue. */
+ sctp_outq_free(&asoc->outqueue);
+
+ /* Dispose of any pending messages for the upper layer. */
+ sctp_ulpq_free(&asoc->ulpq);
+
+ /* Dispose of any pending chunks on the inqueue. */
+ sctp_inq_free(&asoc->base.inqueue);
+
+ sctp_tsnmap_free(&asoc->peer.tsn_map);
+
+ /* Free stream information. */
+ sctp_stream_free(&asoc->stream);
+
+ if (asoc->strreset_chunk)
+ sctp_chunk_free(asoc->strreset_chunk);
+
+ /* Clean up the bound address list. */
+ sctp_bind_addr_free(&asoc->base.bind_addr);
+
+ /* Do we need to go through all of our timers and
+ * delete them? To be safe we will try to delete all, but we
+ * should be able to go through and make a guess based
+ * on our state.
+ */
+ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
+ if (del_timer(&asoc->timers[i]))
+ sctp_association_put(asoc);
+ }
+
+ /* Free peer's cached cookie. */
+ kfree(asoc->peer.cookie);
+ kfree(asoc->peer.peer_random);
+ kfree(asoc->peer.peer_chunks);
+ kfree(asoc->peer.peer_hmacs);
+
+ /* Release the transport structures. */
+ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
+ transport = list_entry(pos, struct sctp_transport, transports);
+ list_del_rcu(pos);
+ sctp_unhash_transport(transport);
+ sctp_transport_free(transport);
+ }
+
+ asoc->peer.transport_count = 0;
+
+ sctp_asconf_queue_teardown(asoc);
+
+ /* Free pending address space being deleted */
+ kfree(asoc->asconf_addr_del_pending);
+
+ /* AUTH - Free the endpoint shared keys */
+ sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
+
+ /* AUTH - Free the association shared key */
+ sctp_auth_key_put(asoc->asoc_shared_key);
+
+ sctp_association_put(asoc);
+}
+
+/* Cleanup and free up an association. */
+static void sctp_association_destroy(struct sctp_association *asoc)
+{
+ if (unlikely(!asoc->base.dead)) {
+ WARN(1, "Attempt to destroy undead association %p!\n", asoc);
+ return;
+ }
+
+ sctp_endpoint_put(asoc->ep);
+ sock_put(asoc->base.sk);
+
+ if (asoc->assoc_id != 0) {
+ spin_lock_bh(&sctp_assocs_id_lock);
+ idr_remove(&sctp_assocs_id, asoc->assoc_id);
+ spin_unlock_bh(&sctp_assocs_id_lock);
+ }
+
+ WARN_ON(atomic_read(&asoc->rmem_alloc));
+
+ kfree_rcu(asoc, rcu);
+ SCTP_DBG_OBJCNT_DEC(assoc);
+}
+
+/* Change the primary destination address for the peer. */
+void sctp_assoc_set_primary(struct sctp_association *asoc,
+ struct sctp_transport *transport)
+{
+ int changeover = 0;
+
+ /* it's a changeover only if we already have a primary path
+ * that we are changing
+ */
+ if (asoc->peer.primary_path != NULL &&
+ asoc->peer.primary_path != transport)
+ changeover = 1 ;
+
+ asoc->peer.primary_path = transport;
+ sctp_ulpevent_notify_peer_addr_change(transport,
+ SCTP_ADDR_MADE_PRIM, 0);
+
+ /* Set a default msg_name for events. */
+ memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
+ sizeof(union sctp_addr));
+
+ /* If the primary path is changing, assume that the
+ * user wants to use this new path.
+ */
+ if ((transport->state == SCTP_ACTIVE) ||
+ (transport->state == SCTP_UNKNOWN))
+ asoc->peer.active_path = transport;
+
+ /*
+ * SFR-CACC algorithm:
+ * Upon the receipt of a request to change the primary
+ * destination address, on the data structure for the new
+ * primary destination, the sender MUST do the following:
+ *
+ * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
+ * to this destination address earlier. The sender MUST set
+ * CYCLING_CHANGEOVER to indicate that this switch is a
+ * double switch to the same destination address.
+ *
+ * Really, only bother is we have data queued or outstanding on
+ * the association.
+ */
+ if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
+ return;
+
+ if (transport->cacc.changeover_active)
+ transport->cacc.cycling_changeover = changeover;
+
+ /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
+ * a changeover has occurred.
+ */
+ transport->cacc.changeover_active = changeover;
+
+ /* 3) The sender MUST store the next TSN to be sent in
+ * next_tsn_at_change.
+ */
+ transport->cacc.next_tsn_at_change = asoc->next_tsn;
+}
+
+/* Remove a transport from an association. */
+void sctp_assoc_rm_peer(struct sctp_association *asoc,
+ struct sctp_transport *peer)
+{
+ struct sctp_transport *transport;
+ struct list_head *pos;
+ struct sctp_chunk *ch;
+
+ pr_debug("%s: association:%p addr:%pISpc\n",
+ __func__, asoc, &peer->ipaddr.sa);
+
+ /* If we are to remove the current retran_path, update it
+ * to the next peer before removing this peer from the list.
+ */
+ if (asoc->peer.retran_path == peer)
+ sctp_assoc_update_retran_path(asoc);
+
+ /* Remove this peer from the list. */
+ list_del_rcu(&peer->transports);
+ /* Remove this peer from the transport hashtable */
+ sctp_unhash_transport(peer);
+
+ /* Get the first transport of asoc. */
+ pos = asoc->peer.transport_addr_list.next;
+ transport = list_entry(pos, struct sctp_transport, transports);
+
+ /* Update any entries that match the peer to be deleted. */
+ if (asoc->peer.primary_path == peer)
+ sctp_assoc_set_primary(asoc, transport);
+ if (asoc->peer.active_path == peer)
+ asoc->peer.active_path = transport;
+ if (asoc->peer.retran_path == peer)
+ asoc->peer.retran_path = transport;
+ if (asoc->peer.last_data_from == peer)
+ asoc->peer.last_data_from = transport;
+
+ if (asoc->strreset_chunk &&
+ asoc->strreset_chunk->transport == peer) {
+ asoc->strreset_chunk->transport = transport;
+ sctp_transport_reset_reconf_timer(transport);
+ }
+
+ /* If we remove the transport an INIT was last sent to, set it to
+ * NULL. Combined with the update of the retran path above, this
+ * will cause the next INIT to be sent to the next available
+ * transport, maintaining the cycle.
+ */
+ if (asoc->init_last_sent_to == peer)
+ asoc->init_last_sent_to = NULL;
+
+ /* If we remove the transport an SHUTDOWN was last sent to, set it
+ * to NULL. Combined with the update of the retran path above, this
+ * will cause the next SHUTDOWN to be sent to the next available
+ * transport, maintaining the cycle.
+ */
+ if (asoc->shutdown_last_sent_to == peer)
+ asoc->shutdown_last_sent_to = NULL;
+
+ /* If we remove the transport an ASCONF was last sent to, set it to
+ * NULL.
+ */
+ if (asoc->addip_last_asconf &&
+ asoc->addip_last_asconf->transport == peer)
+ asoc->addip_last_asconf->transport = NULL;
+
+ /* If we have something on the transmitted list, we have to
+ * save it off. The best place is the active path.
+ */
+ if (!list_empty(&peer->transmitted)) {
+ struct sctp_transport *active = asoc->peer.active_path;
+
+ /* Reset the transport of each chunk on this list */
+ list_for_each_entry(ch, &peer->transmitted,
+ transmitted_list) {
+ ch->transport = NULL;
+ ch->rtt_in_progress = 0;
+ }
+
+ list_splice_tail_init(&peer->transmitted,
+ &active->transmitted);
+
+ /* Start a T3 timer here in case it wasn't running so
+ * that these migrated packets have a chance to get
+ * retransmitted.
+ */
+ if (!timer_pending(&active->T3_rtx_timer))
+ if (!mod_timer(&active->T3_rtx_timer,
+ jiffies + active->rto))
+ sctp_transport_hold(active);
+ }
+
+ list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
+ if (ch->transport == peer)
+ ch->transport = NULL;
+
+ asoc->peer.transport_count--;
+
+ sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_REMOVED, 0);
+ sctp_transport_free(peer);
+}
+
+/* Add a transport address to an association. */
+struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
+ const union sctp_addr *addr,
+ const gfp_t gfp,
+ const int peer_state)
+{
+ struct sctp_transport *peer;
+ struct sctp_sock *sp;
+ unsigned short port;
+
+ sp = sctp_sk(asoc->base.sk);
+
+ /* AF_INET and AF_INET6 share common port field. */
+ port = ntohs(addr->v4.sin_port);
+
+ pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
+ asoc, &addr->sa, peer_state);
+
+ /* Set the port if it has not been set yet. */
+ if (0 == asoc->peer.port)
+ asoc->peer.port = port;
+
+ /* Check to see if this is a duplicate. */
+ peer = sctp_assoc_lookup_paddr(asoc, addr);
+ if (peer) {
+ /* An UNKNOWN state is only set on transports added by
+ * user in sctp_connectx() call. Such transports should be
+ * considered CONFIRMED per RFC 4960, Section 5.4.
+ */
+ if (peer->state == SCTP_UNKNOWN) {
+ peer->state = SCTP_ACTIVE;
+ }
+ return peer;
+ }
+
+ peer = sctp_transport_new(asoc->base.net, addr, gfp);
+ if (!peer)
+ return NULL;
+
+ sctp_transport_set_owner(peer, asoc);
+
+ /* Initialize the peer's heartbeat interval based on the
+ * association configured value.
+ */
+ peer->hbinterval = asoc->hbinterval;
+ peer->probe_interval = asoc->probe_interval;
+
+ peer->encap_port = asoc->encap_port;
+
+ /* Set the path max_retrans. */
+ peer->pathmaxrxt = asoc->pathmaxrxt;
+
+ /* And the partial failure retrans threshold */
+ peer->pf_retrans = asoc->pf_retrans;
+ /* And the primary path switchover retrans threshold */
+ peer->ps_retrans = asoc->ps_retrans;
+
+ /* Initialize the peer's SACK delay timeout based on the
+ * association configured value.
+ */
+ peer->sackdelay = asoc->sackdelay;
+ peer->sackfreq = asoc->sackfreq;
+
+ if (addr->sa.sa_family == AF_INET6) {
+ __be32 info = addr->v6.sin6_flowinfo;
+
+ if (info) {
+ peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK);
+ peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+ } else {
+ peer->flowlabel = asoc->flowlabel;
+ }
+ }
+ peer->dscp = asoc->dscp;
+
+ /* Enable/disable heartbeat, SACK delay, and path MTU discovery
+ * based on association setting.
+ */
+ peer->param_flags = asoc->param_flags;
+
+ /* Initialize the pmtu of the transport. */
+ sctp_transport_route(peer, NULL, sp);
+
+ /* If this is the first transport addr on this association,
+ * initialize the association PMTU to the peer's PMTU.
+ * If not and the current association PMTU is higher than the new
+ * peer's PMTU, reset the association PMTU to the new peer's PMTU.
+ */
+ sctp_assoc_set_pmtu(asoc, asoc->pathmtu ?
+ min_t(int, peer->pathmtu, asoc->pathmtu) :
+ peer->pathmtu);
+
+ peer->pmtu_pending = 0;
+
+ /* The asoc->peer.port might not be meaningful yet, but
+ * initialize the packet structure anyway.
+ */
+ sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
+ asoc->peer.port);
+
+ /* 7.2.1 Slow-Start
+ *
+ * o The initial cwnd before DATA transmission or after a sufficiently
+ * long idle period MUST be set to
+ * min(4*MTU, max(2*MTU, 4380 bytes))
+ *
+ * o The initial value of ssthresh MAY be arbitrarily high
+ * (for example, implementations MAY use the size of the
+ * receiver advertised window).
+ */
+ peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
+
+ /* At this point, we may not have the receiver's advertised window,
+ * so initialize ssthresh to the default value and it will be set
+ * later when we process the INIT.
+ */
+ peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
+
+ peer->partial_bytes_acked = 0;
+ peer->flight_size = 0;
+ peer->burst_limited = 0;
+
+ /* Set the transport's RTO.initial value */
+ peer->rto = asoc->rto_initial;
+ sctp_max_rto(asoc, peer);
+
+ /* Set the peer's active state. */
+ peer->state = peer_state;
+
+ /* Add this peer into the transport hashtable */
+ if (sctp_hash_transport(peer)) {
+ sctp_transport_free(peer);
+ return NULL;
+ }
+
+ sctp_transport_pl_reset(peer);
+
+ /* Attach the remote transport to our asoc. */
+ list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
+ asoc->peer.transport_count++;
+
+ sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_ADDED, 0);
+
+ /* If we do not yet have a primary path, set one. */
+ if (!asoc->peer.primary_path) {
+ sctp_assoc_set_primary(asoc, peer);
+ asoc->peer.retran_path = peer;
+ }
+
+ if (asoc->peer.active_path == asoc->peer.retran_path &&
+ peer->state != SCTP_UNCONFIRMED) {
+ asoc->peer.retran_path = peer;
+ }
+
+ return peer;
+}
+
+/* Delete a transport address from an association. */
+void sctp_assoc_del_peer(struct sctp_association *asoc,
+ const union sctp_addr *addr)
+{
+ struct list_head *pos;
+ struct list_head *temp;
+ struct sctp_transport *transport;
+
+ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
+ transport = list_entry(pos, struct sctp_transport, transports);
+ if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
+ /* Do book keeping for removing the peer and free it. */
+ sctp_assoc_rm_peer(asoc, transport);
+ break;
+ }
+ }
+}
+
+/* Lookup a transport by address. */
+struct sctp_transport *sctp_assoc_lookup_paddr(
+ const struct sctp_association *asoc,
+ const union sctp_addr *address)
+{
+ struct sctp_transport *t;
+
+ /* Cycle through all transports searching for a peer address. */
+
+ list_for_each_entry(t, &asoc->peer.transport_addr_list,
+ transports) {
+ if (sctp_cmp_addr_exact(address, &t->ipaddr))
+ return t;
+ }
+
+ return NULL;
+}
+
+/* Remove all transports except a give one */
+void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
+ struct sctp_transport *primary)
+{
+ struct sctp_transport *temp;
+ struct sctp_transport *t;
+
+ list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
+ transports) {
+ /* if the current transport is not the primary one, delete it */
+ if (t != primary)
+ sctp_assoc_rm_peer(asoc, t);
+ }
+}
+
+/* Engage in transport control operations.
+ * Mark the transport up or down and send a notification to the user.
+ * Select and update the new active and retran paths.
+ */
+void sctp_assoc_control_transport(struct sctp_association *asoc,
+ struct sctp_transport *transport,
+ enum sctp_transport_cmd command,
+ sctp_sn_error_t error)
+{
+ int spc_state = SCTP_ADDR_AVAILABLE;
+ bool ulp_notify = true;
+
+ /* Record the transition on the transport. */
+ switch (command) {
+ case SCTP_TRANSPORT_UP:
+ /* If we are moving from UNCONFIRMED state due
+ * to heartbeat success, report the SCTP_ADDR_CONFIRMED
+ * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
+ */
+ if (transport->state == SCTP_PF &&
+ asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
+ ulp_notify = false;
+ else if (transport->state == SCTP_UNCONFIRMED &&
+ error == SCTP_HEARTBEAT_SUCCESS)
+ spc_state = SCTP_ADDR_CONFIRMED;
+
+ transport->state = SCTP_ACTIVE;
+ sctp_transport_pl_reset(transport);
+ break;
+
+ case SCTP_TRANSPORT_DOWN:
+ /* If the transport was never confirmed, do not transition it
+ * to inactive state. Also, release the cached route since
+ * there may be a better route next time.
+ */
+ if (transport->state != SCTP_UNCONFIRMED) {
+ transport->state = SCTP_INACTIVE;
+ sctp_transport_pl_reset(transport);
+ spc_state = SCTP_ADDR_UNREACHABLE;
+ } else {
+ sctp_transport_dst_release(transport);
+ ulp_notify = false;
+ }
+ break;
+
+ case SCTP_TRANSPORT_PF:
+ transport->state = SCTP_PF;
+ if (asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
+ ulp_notify = false;
+ else
+ spc_state = SCTP_ADDR_POTENTIALLY_FAILED;
+ break;
+
+ default:
+ return;
+ }
+
+ /* Generate and send a SCTP_PEER_ADDR_CHANGE notification
+ * to the user.
+ */
+ if (ulp_notify)
+ sctp_ulpevent_notify_peer_addr_change(transport,
+ spc_state, error);
+
+ /* Select new active and retran paths. */
+ sctp_select_active_and_retran_path(asoc);
+}
+
+/* Hold a reference to an association. */
+void sctp_association_hold(struct sctp_association *asoc)
+{
+ refcount_inc(&asoc->base.refcnt);
+}
+
+/* Release a reference to an association and cleanup
+ * if there are no more references.
+ */
+void sctp_association_put(struct sctp_association *asoc)
+{
+ if (refcount_dec_and_test(&asoc->base.refcnt))
+ sctp_association_destroy(asoc);
+}
+
+/* Allocate the next TSN, Transmission Sequence Number, for the given
+ * association.
+ */
+__u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
+{
+ /* From Section 1.6 Serial Number Arithmetic:
+ * Transmission Sequence Numbers wrap around when they reach
+ * 2**32 - 1. That is, the next TSN a DATA chunk MUST use
+ * after transmitting TSN = 2*32 - 1 is TSN = 0.
+ */
+ __u32 retval = asoc->next_tsn;
+ asoc->next_tsn++;
+ asoc->unack_data++;
+
+ return retval;
+}
+
+/* Compare two addresses to see if they match. Wildcard addresses
+ * only match themselves.
+ */
+int sctp_cmp_addr_exact(const union sctp_addr *ss1,
+ const union sctp_addr *ss2)
+{
+ struct sctp_af *af;
+
+ af = sctp_get_af_specific(ss1->sa.sa_family);
+ if (unlikely(!af))
+ return 0;
+
+ return af->cmp_addr(ss1, ss2);
+}
+
+/* Return an ecne chunk to get prepended to a packet.
+ * Note: We are sly and return a shared, prealloced chunk. FIXME:
+ * No we don't, but we could/should.
+ */
+struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
+{
+ if (!asoc->need_ecne)
+ return NULL;
+
+ /* Send ECNE if needed.
+ * Not being able to allocate a chunk here is not deadly.
+ */
+ return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
+}
+
+/*
+ * Find which transport this TSN was sent on.
+ */
+struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
+ __u32 tsn)
+{
+ struct sctp_transport *active;
+ struct sctp_transport *match;
+ struct sctp_transport *transport;
+ struct sctp_chunk *chunk;
+ __be32 key = htonl(tsn);
+
+ match = NULL;
+
+ /*
+ * FIXME: In general, find a more efficient data structure for
+ * searching.
+ */
+
+ /*
+ * The general strategy is to search each transport's transmitted
+ * list. Return which transport this TSN lives on.
+ *
+ * Let's be hopeful and check the active_path first.
+ * Another optimization would be to know if there is only one
+ * outbound path and not have to look for the TSN at all.
+ *
+ */
+
+ active = asoc->peer.active_path;
+
+ list_for_each_entry(chunk, &active->transmitted,
+ transmitted_list) {
+
+ if (key == chunk->subh.data_hdr->tsn) {
+ match = active;
+ goto out;
+ }
+ }
+
+ /* If not found, go search all the other transports. */
+ list_for_each_entry(transport, &asoc->peer.transport_addr_list,
+ transports) {
+
+ if (transport == active)
+ continue;
+ list_for_each_entry(chunk, &transport->transmitted,
+ transmitted_list) {
+ if (key == chunk->subh.data_hdr->tsn) {
+ match = transport;
+ goto out;
+ }
+ }
+ }
+out:
+ return match;
+}
+
+/* Do delayed input processing. This is scheduled by sctp_rcv(). */
+static void sctp_assoc_bh_rcv(struct work_struct *work)
+{
+ struct sctp_association *asoc =
+ container_of(work, struct sctp_association,
+ base.inqueue.immediate);
+ struct net *net = asoc->base.net;
+ union sctp_subtype subtype;
+ struct sctp_endpoint *ep;
+ struct sctp_chunk *chunk;
+ struct sctp_inq *inqueue;
+ int first_time = 1; /* is this the first time through the loop */
+ int error = 0;
+ int state;
+
+ /* The association should be held so we should be safe. */
+ ep = asoc->ep;
+
+ inqueue = &asoc->base.inqueue;
+ sctp_association_hold(asoc);
+ while (NULL != (chunk = sctp_inq_pop(inqueue))) {
+ state = asoc->state;
+ subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
+
+ /* If the first chunk in the packet is AUTH, do special
+ * processing specified in Section 6.3 of SCTP-AUTH spec
+ */
+ if (first_time && subtype.chunk == SCTP_CID_AUTH) {
+ struct sctp_chunkhdr *next_hdr;
+
+ next_hdr = sctp_inq_peek(inqueue);
+ if (!next_hdr)
+ goto normal;
+
+ /* If the next chunk is COOKIE-ECHO, skip the AUTH
+ * chunk while saving a pointer to it so we can do
+ * Authentication later (during cookie-echo
+ * processing).
+ */
+ if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
+ chunk->auth_chunk = skb_clone(chunk->skb,
+ GFP_ATOMIC);
+ chunk->auth = 1;
+ continue;
+ }
+ }
+
+normal:
+ /* SCTP-AUTH, Section 6.3:
+ * The receiver has a list of chunk types which it expects
+ * to be received only after an AUTH-chunk. This list has
+ * been sent to the peer during the association setup. It
+ * MUST silently discard these chunks if they are not placed
+ * after an AUTH chunk in the packet.
+ */
+ if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
+ continue;
+
+ /* Remember where the last DATA chunk came from so we
+ * know where to send the SACK.
+ */
+ if (sctp_chunk_is_data(chunk))
+ asoc->peer.last_data_from = chunk->transport;
+ else {
+ SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
+ asoc->stats.ictrlchunks++;
+ if (chunk->chunk_hdr->type == SCTP_CID_SACK)
+ asoc->stats.isacks++;
+ }
+
+ if (chunk->transport)
+ chunk->transport->last_time_heard = ktime_get();
+
+ /* Run through the state machine. */
+ error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
+ state, ep, asoc, chunk, GFP_ATOMIC);
+
+ /* Check to see if the association is freed in response to
+ * the incoming chunk. If so, get out of the while loop.
+ */
+ if (asoc->base.dead)
+ break;
+
+ /* If there is an error on chunk, discard this packet. */
+ if (error && chunk)
+ chunk->pdiscard = 1;
+
+ if (first_time)
+ first_time = 0;
+ }
+ sctp_association_put(asoc);
+}
+
+/* This routine moves an association from its old sk to a new sk. */
+void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
+{
+ struct sctp_sock *newsp = sctp_sk(newsk);
+ struct sock *oldsk = assoc->base.sk;
+
+ /* Delete the association from the old endpoint's list of
+ * associations.
+ */
+ list_del_init(&assoc->asocs);
+
+ /* Decrement the backlog value for a TCP-style socket. */
+ if (sctp_style(oldsk, TCP))
+ sk_acceptq_removed(oldsk);
+
+ /* Release references to the old endpoint and the sock. */
+ sctp_endpoint_put(assoc->ep);
+ sock_put(assoc->base.sk);
+
+ /* Get a reference to the new endpoint. */
+ assoc->ep = newsp->ep;
+ sctp_endpoint_hold(assoc->ep);
+
+ /* Get a reference to the new sock. */
+ assoc->base.sk = newsk;
+ sock_hold(assoc->base.sk);
+
+ /* Add the association to the new endpoint's list of associations. */
+ sctp_endpoint_add_asoc(newsp->ep, assoc);
+}
+
+/* Update an association (possibly from unexpected COOKIE-ECHO processing). */
+int sctp_assoc_update(struct sctp_association *asoc,
+ struct sctp_association *new)
+{
+ struct sctp_transport *trans;
+ struct list_head *pos, *temp;
+
+ /* Copy in new parameters of peer. */
+ asoc->c = new->c;
+ asoc->peer.rwnd = new->peer.rwnd;
+ asoc->peer.sack_needed = new->peer.sack_needed;
+ asoc->peer.auth_capable = new->peer.auth_capable;
+ asoc->peer.i = new->peer.i;
+
+ if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
+ asoc->peer.i.initial_tsn, GFP_ATOMIC))
+ return -ENOMEM;
+
+ /* Remove any peer addresses not present in the new association. */
+ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
+ trans = list_entry(pos, struct sctp_transport, transports);
+ if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
+ sctp_assoc_rm_peer(asoc, trans);
+ continue;
+ }
+
+ if (asoc->state >= SCTP_STATE_ESTABLISHED)
+ sctp_transport_reset(trans);
+ }
+
+ /* If the case is A (association restart), use
+ * initial_tsn as next_tsn. If the case is B, use
+ * current next_tsn in case data sent to peer
+ * has been discarded and needs retransmission.
+ */
+ if (asoc->state >= SCTP_STATE_ESTABLISHED) {
+ asoc->next_tsn = new->next_tsn;
+ asoc->ctsn_ack_point = new->ctsn_ack_point;
+ asoc->adv_peer_ack_point = new->adv_peer_ack_point;
+
+ /* Reinitialize SSN for both local streams
+ * and peer's streams.
+ */
+ sctp_stream_clear(&asoc->stream);
+
+ /* Flush the ULP reassembly and ordered queue.
+ * Any data there will now be stale and will
+ * cause problems.
+ */
+ sctp_ulpq_flush(&asoc->ulpq);
+
+ /* reset the overall association error count so
+ * that the restarted association doesn't get torn
+ * down on the next retransmission timer.
+ */
+ asoc->overall_error_count = 0;
+
+ } else {
+ /* Add any peer addresses from the new association. */
+ list_for_each_entry(trans, &new->peer.transport_addr_list,
+ transports)
+ if (!sctp_assoc_add_peer(asoc, &trans->ipaddr,
+ GFP_ATOMIC, trans->state))
+ return -ENOMEM;
+
+ asoc->ctsn_ack_point = asoc->next_tsn - 1;
+ asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
+
+ if (sctp_state(asoc, COOKIE_WAIT))
+ sctp_stream_update(&asoc->stream, &new->stream);
+
+ /* get a new assoc id if we don't have one yet. */
+ if (sctp_assoc_set_id(asoc, GFP_ATOMIC))
+ return -ENOMEM;
+ }
+
+ /* SCTP-AUTH: Save the peer parameters from the new associations
+ * and also move the association shared keys over
+ */
+ kfree(asoc->peer.peer_random);
+ asoc->peer.peer_random = new->peer.peer_random;
+ new->peer.peer_random = NULL;
+
+ kfree(asoc->peer.peer_chunks);
+ asoc->peer.peer_chunks = new->peer.peer_chunks;
+ new->peer.peer_chunks = NULL;
+
+ kfree(asoc->peer.peer_hmacs);
+ asoc->peer.peer_hmacs = new->peer.peer_hmacs;
+ new->peer.peer_hmacs = NULL;
+
+ return sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
+}
+
+/* Update the retran path for sending a retransmitted packet.
+ * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
+ *
+ * When there is outbound data to send and the primary path
+ * becomes inactive (e.g., due to failures), or where the
+ * SCTP user explicitly requests to send data to an
+ * inactive destination transport address, before reporting
+ * an error to its ULP, the SCTP endpoint should try to send
+ * the data to an alternate active destination transport
+ * address if one exists.
+ *
+ * When retransmitting data that timed out, if the endpoint
+ * is multihomed, it should consider each source-destination
+ * address pair in its retransmission selection policy.
+ * When retransmitting timed-out data, the endpoint should
+ * attempt to pick the most divergent source-destination
+ * pair from the original source-destination pair to which
+ * the packet was transmitted.
+ *
+ * Note: Rules for picking the most divergent source-destination
+ * pair are an implementation decision and are not specified
+ * within this document.
+ *
+ * Our basic strategy is to round-robin transports in priorities
+ * according to sctp_trans_score() e.g., if no such
+ * transport with state SCTP_ACTIVE exists, round-robin through
+ * SCTP_UNKNOWN, etc. You get the picture.
+ */
+static u8 sctp_trans_score(const struct sctp_transport *trans)
+{
+ switch (trans->state) {
+ case SCTP_ACTIVE:
+ return 3; /* best case */
+ case SCTP_UNKNOWN:
+ return 2;
+ case SCTP_PF:
+ return 1;
+ default: /* case SCTP_INACTIVE */
+ return 0; /* worst case */
+ }
+}
+
+static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
+ struct sctp_transport *trans2)
+{
+ if (trans1->error_count > trans2->error_count) {
+ return trans2;
+ } else if (trans1->error_count == trans2->error_count &&
+ ktime_after(trans2->last_time_heard,
+ trans1->last_time_heard)) {
+ return trans2;
+ } else {
+ return trans1;
+ }
+}
+
+static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
+ struct sctp_transport *best)
+{
+ u8 score_curr, score_best;
+
+ if (best == NULL || curr == best)
+ return curr;
+
+ score_curr = sctp_trans_score(curr);
+ score_best = sctp_trans_score(best);
+
+ /* First, try a score-based selection if both transport states
+ * differ. If we're in a tie, lets try to make a more clever
+ * decision here based on error counts and last time heard.
+ */
+ if (score_curr > score_best)
+ return curr;
+ else if (score_curr == score_best)
+ return sctp_trans_elect_tie(best, curr);
+ else
+ return best;
+}
+
+void sctp_assoc_update_retran_path(struct sctp_association *asoc)
+{
+ struct sctp_transport *trans = asoc->peer.retran_path;
+ struct sctp_transport *trans_next = NULL;
+
+ /* We're done as we only have the one and only path. */
+ if (asoc->peer.transport_count == 1)
+ return;
+ /* If active_path and retran_path are the same and active,
+ * then this is the only active path. Use it.
+ */
+ if (asoc->peer.active_path == asoc->peer.retran_path &&
+ asoc->peer.active_path->state == SCTP_ACTIVE)
+ return;
+
+ /* Iterate from retran_path's successor back to retran_path. */
+ for (trans = list_next_entry(trans, transports); 1;
+ trans = list_next_entry(trans, transports)) {
+ /* Manually skip the head element. */
+ if (&trans->transports == &asoc->peer.transport_addr_list)
+ continue;
+ if (trans->state == SCTP_UNCONFIRMED)
+ continue;
+ trans_next = sctp_trans_elect_best(trans, trans_next);
+ /* Active is good enough for immediate return. */
+ if (trans_next->state == SCTP_ACTIVE)
+ break;
+ /* We've reached the end, time to update path. */
+ if (trans == asoc->peer.retran_path)
+ break;
+ }
+
+ asoc->peer.retran_path = trans_next;
+
+ pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
+ __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
+}
+
+static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
+{
+ struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
+ struct sctp_transport *trans_pf = NULL;
+
+ /* Look for the two most recently used active transports. */
+ list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+ transports) {
+ /* Skip uninteresting transports. */
+ if (trans->state == SCTP_INACTIVE ||
+ trans->state == SCTP_UNCONFIRMED)
+ continue;
+ /* Keep track of the best PF transport from our
+ * list in case we don't find an active one.
+ */
+ if (trans->state == SCTP_PF) {
+ trans_pf = sctp_trans_elect_best(trans, trans_pf);
+ continue;
+ }
+ /* For active transports, pick the most recent ones. */
+ if (trans_pri == NULL ||
+ ktime_after(trans->last_time_heard,
+ trans_pri->last_time_heard)) {
+ trans_sec = trans_pri;
+ trans_pri = trans;
+ } else if (trans_sec == NULL ||
+ ktime_after(trans->last_time_heard,
+ trans_sec->last_time_heard)) {
+ trans_sec = trans;
+ }
+ }
+
+ /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
+ *
+ * By default, an endpoint should always transmit to the primary
+ * path, unless the SCTP user explicitly specifies the
+ * destination transport address (and possibly source transport
+ * address) to use. [If the primary is active but not most recent,
+ * bump the most recently used transport.]
+ */
+ if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
+ asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
+ asoc->peer.primary_path != trans_pri) {
+ trans_sec = trans_pri;
+ trans_pri = asoc->peer.primary_path;
+ }
+
+ /* We did not find anything useful for a possible retransmission
+ * path; either primary path that we found is the same as
+ * the current one, or we didn't generally find an active one.
+ */
+ if (trans_sec == NULL)
+ trans_sec = trans_pri;
+
+ /* If we failed to find a usable transport, just camp on the
+ * active or pick a PF iff it's the better choice.
+ */
+ if (trans_pri == NULL) {
+ trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
+ trans_sec = trans_pri;
+ }
+
+ /* Set the active and retran transports. */
+ asoc->peer.active_path = trans_pri;
+ asoc->peer.retran_path = trans_sec;
+}
+
+struct sctp_transport *
+sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
+ struct sctp_transport *last_sent_to)
+{
+ /* If this is the first time packet is sent, use the active path,
+ * else use the retran path. If the last packet was sent over the
+ * retran path, update the retran path and use it.
+ */
+ if (last_sent_to == NULL) {
+ return asoc->peer.active_path;
+ } else {
+ if (last_sent_to == asoc->peer.retran_path)
+ sctp_assoc_update_retran_path(asoc);
+
+ return asoc->peer.retran_path;
+ }
+}
+
+void sctp_assoc_update_frag_point(struct sctp_association *asoc)
+{
+ int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu,
+ sctp_datachk_len(&asoc->stream));
+
+ if (asoc->user_frag)
+ frag = min_t(int, frag, asoc->user_frag);
+
+ frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN -
+ sctp_datachk_len(&asoc->stream));
+
+ asoc->frag_point = SCTP_TRUNC4(frag);
+}
+
+void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu)
+{
+ if (asoc->pathmtu != pmtu) {
+ asoc->pathmtu = pmtu;
+ sctp_assoc_update_frag_point(asoc);
+ }
+
+ pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
+ asoc->pathmtu, asoc->frag_point);
+}
+
+/* Update the association's pmtu and frag_point by going through all the
+ * transports. This routine is called when a transport's PMTU has changed.
+ */
+void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
+{
+ struct sctp_transport *t;
+ __u32 pmtu = 0;
+
+ if (!asoc)
+ return;
+
+ /* Get the lowest pmtu of all the transports. */
+ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
+ if (t->pmtu_pending && t->dst) {
+ sctp_transport_update_pmtu(t,
+ atomic_read(&t->mtu_info));
+ t->pmtu_pending = 0;
+ }
+ if (!pmtu || (t->pathmtu < pmtu))
+ pmtu = t->pathmtu;
+ }
+
+ sctp_assoc_set_pmtu(asoc, pmtu);
+}
+
+/* Should we send a SACK to update our peer? */
+static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
+{
+ struct net *net = asoc->base.net;
+
+ switch (asoc->state) {
+ case SCTP_STATE_ESTABLISHED:
+ case SCTP_STATE_SHUTDOWN_PENDING:
+ case SCTP_STATE_SHUTDOWN_RECEIVED:
+ case SCTP_STATE_SHUTDOWN_SENT:
+ if ((asoc->rwnd > asoc->a_rwnd) &&
+ ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
+ (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
+ asoc->pathmtu)))
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+/* Increase asoc's rwnd by len and send any window update SACK if needed. */
+void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
+{
+ struct sctp_chunk *sack;
+ struct timer_list *timer;
+
+ if (asoc->rwnd_over) {
+ if (asoc->rwnd_over >= len) {
+ asoc->rwnd_over -= len;
+ } else {
+ asoc->rwnd += (len - asoc->rwnd_over);
+ asoc->rwnd_over = 0;
+ }
+ } else {
+ asoc->rwnd += len;
+ }
+
+ /* If we had window pressure, start recovering it
+ * once our rwnd had reached the accumulated pressure
+ * threshold. The idea is to recover slowly, but up
+ * to the initial advertised window.
+ */
+ if (asoc->rwnd_press) {
+ int change = min(asoc->pathmtu, asoc->rwnd_press);
+ asoc->rwnd += change;
+ asoc->rwnd_press -= change;
+ }
+
+ pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
+ __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
+ asoc->a_rwnd);
+
+ /* Send a window update SACK if the rwnd has increased by at least the
+ * minimum of the association's PMTU and half of the receive buffer.
+ * The algorithm used is similar to the one described in
+ * Section 4.2.3.3 of RFC 1122.
+ */
+ if (sctp_peer_needs_update(asoc)) {
+ asoc->a_rwnd = asoc->rwnd;
+
+ pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
+ "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
+ asoc->a_rwnd);
+
+ sack = sctp_make_sack(asoc);
+ if (!sack)
+ return;
+
+ asoc->peer.sack_needed = 0;
+
+ sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC);
+
+ /* Stop the SACK timer. */
+ timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
+ if (del_timer(timer))
+ sctp_association_put(asoc);
+ }
+}
+
+/* Decrease asoc's rwnd by len. */
+void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
+{
+ int rx_count;
+ int over = 0;
+
+ if (unlikely(!asoc->rwnd || asoc->rwnd_over))
+ pr_debug("%s: association:%p has asoc->rwnd:%u, "
+ "asoc->rwnd_over:%u!\n", __func__, asoc,
+ asoc->rwnd, asoc->rwnd_over);
+
+ if (asoc->ep->rcvbuf_policy)
+ rx_count = atomic_read(&asoc->rmem_alloc);
+ else
+ rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
+
+ /* If we've reached or overflowed our receive buffer, announce
+ * a 0 rwnd if rwnd would still be positive. Store the
+ * potential pressure overflow so that the window can be restored
+ * back to original value.
+ */
+ if (rx_count >= asoc->base.sk->sk_rcvbuf)
+ over = 1;
+
+ if (asoc->rwnd >= len) {
+ asoc->rwnd -= len;
+ if (over) {
+ asoc->rwnd_press += asoc->rwnd;
+ asoc->rwnd = 0;
+ }
+ } else {
+ asoc->rwnd_over += len - asoc->rwnd;
+ asoc->rwnd = 0;
+ }
+
+ pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
+ __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
+ asoc->rwnd_press);
+}
+
+/* Build the bind address list for the association based on info from the
+ * local endpoint and the remote peer.
+ */
+int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
+ enum sctp_scope scope, gfp_t gfp)
+{
+ struct sock *sk = asoc->base.sk;
+ int flags;
+
+ /* Use scoping rules to determine the subset of addresses from
+ * the endpoint.
+ */
+ flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+ if (!inet_v6_ipv6only(sk))
+ flags |= SCTP_ADDR4_ALLOWED;
+ if (asoc->peer.ipv4_address)
+ flags |= SCTP_ADDR4_PEERSUPP;
+ if (asoc->peer.ipv6_address)
+ flags |= SCTP_ADDR6_PEERSUPP;
+
+ return sctp_bind_addr_copy(asoc->base.net,
+ &asoc->base.bind_addr,
+ &asoc->ep->base.bind_addr,
+ scope, gfp, flags);
+}
+
+/* Build the association's bind address list from the cookie. */
+int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
+ struct sctp_cookie *cookie,
+ gfp_t gfp)
+{
+ int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
+ int var_size3 = cookie->raw_addr_list_len;
+ __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
+
+ return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
+ asoc->ep->base.bind_addr.port, gfp);
+}
+
+/* Lookup laddr in the bind address list of an association. */
+int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
+ const union sctp_addr *laddr)
+{
+ int found = 0;
+
+ if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
+ sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
+ sctp_sk(asoc->base.sk)))
+ found = 1;
+
+ return found;
+}
+
+/* Set an association id for a given association */
+int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
+{
+ bool preload = gfpflags_allow_blocking(gfp);
+ int ret;
+
+ /* If the id is already assigned, keep it. */
+ if (asoc->assoc_id)
+ return 0;
+
+ if (preload)
+ idr_preload(gfp);
+ spin_lock_bh(&sctp_assocs_id_lock);
+ /* 0, 1, 2 are used as SCTP_FUTURE_ASSOC, SCTP_CURRENT_ASSOC and
+ * SCTP_ALL_ASSOC, so an available id must be > SCTP_ALL_ASSOC.
+ */
+ ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, SCTP_ALL_ASSOC + 1, 0,
+ GFP_NOWAIT);
+ spin_unlock_bh(&sctp_assocs_id_lock);
+ if (preload)
+ idr_preload_end();
+ if (ret < 0)
+ return ret;
+
+ asoc->assoc_id = (sctp_assoc_t)ret;
+ return 0;
+}
+
+/* Free the ASCONF queue */
+static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
+{
+ struct sctp_chunk *asconf;
+ struct sctp_chunk *tmp;
+
+ list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
+ list_del_init(&asconf->list);
+ sctp_chunk_free(asconf);
+ }
+}
+
+/* Free asconf_ack cache */
+static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
+{
+ struct sctp_chunk *ack;
+ struct sctp_chunk *tmp;
+
+ list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
+ transmitted_list) {
+ list_del_init(&ack->transmitted_list);
+ sctp_chunk_free(ack);
+ }
+}
+
+/* Clean up the ASCONF_ACK queue */
+void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
+{
+ struct sctp_chunk *ack;
+ struct sctp_chunk *tmp;
+
+ /* We can remove all the entries from the queue up to
+ * the "Peer-Sequence-Number".
+ */
+ list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
+ transmitted_list) {
+ if (ack->subh.addip_hdr->serial ==
+ htonl(asoc->peer.addip_serial))
+ break;
+
+ list_del_init(&ack->transmitted_list);
+ sctp_chunk_free(ack);
+ }
+}
+
+/* Find the ASCONF_ACK whose serial number matches ASCONF */
+struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
+ const struct sctp_association *asoc,
+ __be32 serial)
+{
+ struct sctp_chunk *ack;
+
+ /* Walk through the list of cached ASCONF-ACKs and find the
+ * ack chunk whose serial number matches that of the request.
+ */
+ list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
+ if (sctp_chunk_pending(ack))
+ continue;
+ if (ack->subh.addip_hdr->serial == serial) {
+ sctp_chunk_hold(ack);
+ return ack;
+ }
+ }
+
+ return NULL;
+}
+
+void sctp_asconf_queue_teardown(struct sctp_association *asoc)
+{
+ /* Free any cached ASCONF_ACK chunk. */
+ sctp_assoc_free_asconf_acks(asoc);
+
+ /* Free the ASCONF queue. */
+ sctp_assoc_free_asconf_queue(asoc);
+
+ /* Free any cached ASCONF chunk. */
+ if (asoc->addip_last_asconf)
+ sctp_chunk_free(asoc->addip_last_asconf);
+}
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
new file mode 100644
index 000000000..349641455
--- /dev/null
+++ b/net/sctp/auth.c
@@ -0,0 +1,1089 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright 2007 Hewlett-Packard Development Company, L.P.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Vlad Yasevich <vladislav.yasevich@hp.com>
+ */
+
+#include <crypto/hash.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/auth.h>
+
+static struct sctp_hmac sctp_hmac_list[SCTP_AUTH_NUM_HMACS] = {
+ {
+ /* id 0 is reserved. as all 0 */
+ .hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_0,
+ },
+ {
+ .hmac_id = SCTP_AUTH_HMAC_ID_SHA1,
+ .hmac_name = "hmac(sha1)",
+ .hmac_len = SCTP_SHA1_SIG_SIZE,
+ },
+ {
+ /* id 2 is reserved as well */
+ .hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_2,
+ },
+#if IS_ENABLED(CONFIG_CRYPTO_SHA256)
+ {
+ .hmac_id = SCTP_AUTH_HMAC_ID_SHA256,
+ .hmac_name = "hmac(sha256)",
+ .hmac_len = SCTP_SHA256_SIG_SIZE,
+ }
+#endif
+};
+
+
+void sctp_auth_key_put(struct sctp_auth_bytes *key)
+{
+ if (!key)
+ return;
+
+ if (refcount_dec_and_test(&key->refcnt)) {
+ kfree_sensitive(key);
+ SCTP_DBG_OBJCNT_DEC(keys);
+ }
+}
+
+/* Create a new key structure of a given length */
+static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
+{
+ struct sctp_auth_bytes *key;
+
+ /* Verify that we are not going to overflow INT_MAX */
+ if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
+ return NULL;
+
+ /* Allocate the shared key */
+ key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp);
+ if (!key)
+ return NULL;
+
+ key->len = key_len;
+ refcount_set(&key->refcnt, 1);
+ SCTP_DBG_OBJCNT_INC(keys);
+
+ return key;
+}
+
+/* Create a new shared key container with a give key id */
+struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp)
+{
+ struct sctp_shared_key *new;
+
+ /* Allocate the shared key container */
+ new = kzalloc(sizeof(struct sctp_shared_key), gfp);
+ if (!new)
+ return NULL;
+
+ INIT_LIST_HEAD(&new->key_list);
+ refcount_set(&new->refcnt, 1);
+ new->key_id = key_id;
+
+ return new;
+}
+
+/* Free the shared key structure */
+static void sctp_auth_shkey_destroy(struct sctp_shared_key *sh_key)
+{
+ BUG_ON(!list_empty(&sh_key->key_list));
+ sctp_auth_key_put(sh_key->key);
+ sh_key->key = NULL;
+ kfree(sh_key);
+}
+
+void sctp_auth_shkey_release(struct sctp_shared_key *sh_key)
+{
+ if (refcount_dec_and_test(&sh_key->refcnt))
+ sctp_auth_shkey_destroy(sh_key);
+}
+
+void sctp_auth_shkey_hold(struct sctp_shared_key *sh_key)
+{
+ refcount_inc(&sh_key->refcnt);
+}
+
+/* Destroy the entire key list. This is done during the
+ * associon and endpoint free process.
+ */
+void sctp_auth_destroy_keys(struct list_head *keys)
+{
+ struct sctp_shared_key *ep_key;
+ struct sctp_shared_key *tmp;
+
+ if (list_empty(keys))
+ return;
+
+ key_for_each_safe(ep_key, tmp, keys) {
+ list_del_init(&ep_key->key_list);
+ sctp_auth_shkey_release(ep_key);
+ }
+}
+
+/* Compare two byte vectors as numbers. Return values
+ * are:
+ * 0 - vectors are equal
+ * < 0 - vector 1 is smaller than vector2
+ * > 0 - vector 1 is greater than vector2
+ *
+ * Algorithm is:
+ * This is performed by selecting the numerically smaller key vector...
+ * If the key vectors are equal as numbers but differ in length ...
+ * the shorter vector is considered smaller
+ *
+ * Examples (with small values):
+ * 000123456789 > 123456789 (first number is longer)
+ * 000123456789 < 234567891 (second number is larger numerically)
+ * 123456789 > 2345678 (first number is both larger & longer)
+ */
+static int sctp_auth_compare_vectors(struct sctp_auth_bytes *vector1,
+ struct sctp_auth_bytes *vector2)
+{
+ int diff;
+ int i;
+ const __u8 *longer;
+
+ diff = vector1->len - vector2->len;
+ if (diff) {
+ longer = (diff > 0) ? vector1->data : vector2->data;
+
+ /* Check to see if the longer number is
+ * lead-zero padded. If it is not, it
+ * is automatically larger numerically.
+ */
+ for (i = 0; i < abs(diff); i++) {
+ if (longer[i] != 0)
+ return diff;
+ }
+ }
+
+ /* lengths are the same, compare numbers */
+ return memcmp(vector1->data, vector2->data, vector1->len);
+}
+
+/*
+ * Create a key vector as described in SCTP-AUTH, Section 6.1
+ * The RANDOM parameter, the CHUNKS parameter and the HMAC-ALGO
+ * parameter sent by each endpoint are concatenated as byte vectors.
+ * These parameters include the parameter type, parameter length, and
+ * the parameter value, but padding is omitted; all padding MUST be
+ * removed from this concatenation before proceeding with further
+ * computation of keys. Parameters which were not sent are simply
+ * omitted from the concatenation process. The resulting two vectors
+ * are called the two key vectors.
+ */
+static struct sctp_auth_bytes *sctp_auth_make_key_vector(
+ struct sctp_random_param *random,
+ struct sctp_chunks_param *chunks,
+ struct sctp_hmac_algo_param *hmacs,
+ gfp_t gfp)
+{
+ struct sctp_auth_bytes *new;
+ __u32 len;
+ __u32 offset = 0;
+ __u16 random_len, hmacs_len, chunks_len = 0;
+
+ random_len = ntohs(random->param_hdr.length);
+ hmacs_len = ntohs(hmacs->param_hdr.length);
+ if (chunks)
+ chunks_len = ntohs(chunks->param_hdr.length);
+
+ len = random_len + hmacs_len + chunks_len;
+
+ new = sctp_auth_create_key(len, gfp);
+ if (!new)
+ return NULL;
+
+ memcpy(new->data, random, random_len);
+ offset += random_len;
+
+ if (chunks) {
+ memcpy(new->data + offset, chunks, chunks_len);
+ offset += chunks_len;
+ }
+
+ memcpy(new->data + offset, hmacs, hmacs_len);
+
+ return new;
+}
+
+
+/* Make a key vector based on our local parameters */
+static struct sctp_auth_bytes *sctp_auth_make_local_vector(
+ const struct sctp_association *asoc,
+ gfp_t gfp)
+{
+ return sctp_auth_make_key_vector(
+ (struct sctp_random_param *)asoc->c.auth_random,
+ (struct sctp_chunks_param *)asoc->c.auth_chunks,
+ (struct sctp_hmac_algo_param *)asoc->c.auth_hmacs, gfp);
+}
+
+/* Make a key vector based on peer's parameters */
+static struct sctp_auth_bytes *sctp_auth_make_peer_vector(
+ const struct sctp_association *asoc,
+ gfp_t gfp)
+{
+ return sctp_auth_make_key_vector(asoc->peer.peer_random,
+ asoc->peer.peer_chunks,
+ asoc->peer.peer_hmacs,
+ gfp);
+}
+
+
+/* Set the value of the association shared key base on the parameters
+ * given. The algorithm is:
+ * From the endpoint pair shared keys and the key vectors the
+ * association shared keys are computed. This is performed by selecting
+ * the numerically smaller key vector and concatenating it to the
+ * endpoint pair shared key, and then concatenating the numerically
+ * larger key vector to that. The result of the concatenation is the
+ * association shared key.
+ */
+static struct sctp_auth_bytes *sctp_auth_asoc_set_secret(
+ struct sctp_shared_key *ep_key,
+ struct sctp_auth_bytes *first_vector,
+ struct sctp_auth_bytes *last_vector,
+ gfp_t gfp)
+{
+ struct sctp_auth_bytes *secret;
+ __u32 offset = 0;
+ __u32 auth_len;
+
+ auth_len = first_vector->len + last_vector->len;
+ if (ep_key->key)
+ auth_len += ep_key->key->len;
+
+ secret = sctp_auth_create_key(auth_len, gfp);
+ if (!secret)
+ return NULL;
+
+ if (ep_key->key) {
+ memcpy(secret->data, ep_key->key->data, ep_key->key->len);
+ offset += ep_key->key->len;
+ }
+
+ memcpy(secret->data + offset, first_vector->data, first_vector->len);
+ offset += first_vector->len;
+
+ memcpy(secret->data + offset, last_vector->data, last_vector->len);
+
+ return secret;
+}
+
+/* Create an association shared key. Follow the algorithm
+ * described in SCTP-AUTH, Section 6.1
+ */
+static struct sctp_auth_bytes *sctp_auth_asoc_create_secret(
+ const struct sctp_association *asoc,
+ struct sctp_shared_key *ep_key,
+ gfp_t gfp)
+{
+ struct sctp_auth_bytes *local_key_vector;
+ struct sctp_auth_bytes *peer_key_vector;
+ struct sctp_auth_bytes *first_vector,
+ *last_vector;
+ struct sctp_auth_bytes *secret = NULL;
+ int cmp;
+
+
+ /* Now we need to build the key vectors
+ * SCTP-AUTH , Section 6.1
+ * The RANDOM parameter, the CHUNKS parameter and the HMAC-ALGO
+ * parameter sent by each endpoint are concatenated as byte vectors.
+ * These parameters include the parameter type, parameter length, and
+ * the parameter value, but padding is omitted; all padding MUST be
+ * removed from this concatenation before proceeding with further
+ * computation of keys. Parameters which were not sent are simply
+ * omitted from the concatenation process. The resulting two vectors
+ * are called the two key vectors.
+ */
+
+ local_key_vector = sctp_auth_make_local_vector(asoc, gfp);
+ peer_key_vector = sctp_auth_make_peer_vector(asoc, gfp);
+
+ if (!peer_key_vector || !local_key_vector)
+ goto out;
+
+ /* Figure out the order in which the key_vectors will be
+ * added to the endpoint shared key.
+ * SCTP-AUTH, Section 6.1:
+ * This is performed by selecting the numerically smaller key
+ * vector and concatenating it to the endpoint pair shared
+ * key, and then concatenating the numerically larger key
+ * vector to that. If the key vectors are equal as numbers
+ * but differ in length, then the concatenation order is the
+ * endpoint shared key, followed by the shorter key vector,
+ * followed by the longer key vector. Otherwise, the key
+ * vectors are identical, and may be concatenated to the
+ * endpoint pair key in any order.
+ */
+ cmp = sctp_auth_compare_vectors(local_key_vector,
+ peer_key_vector);
+ if (cmp < 0) {
+ first_vector = local_key_vector;
+ last_vector = peer_key_vector;
+ } else {
+ first_vector = peer_key_vector;
+ last_vector = local_key_vector;
+ }
+
+ secret = sctp_auth_asoc_set_secret(ep_key, first_vector, last_vector,
+ gfp);
+out:
+ sctp_auth_key_put(local_key_vector);
+ sctp_auth_key_put(peer_key_vector);
+
+ return secret;
+}
+
+/*
+ * Populate the association overlay list with the list
+ * from the endpoint.
+ */
+int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep,
+ struct sctp_association *asoc,
+ gfp_t gfp)
+{
+ struct sctp_shared_key *sh_key;
+ struct sctp_shared_key *new;
+
+ BUG_ON(!list_empty(&asoc->endpoint_shared_keys));
+
+ key_for_each(sh_key, &ep->endpoint_shared_keys) {
+ new = sctp_auth_shkey_create(sh_key->key_id, gfp);
+ if (!new)
+ goto nomem;
+
+ new->key = sh_key->key;
+ sctp_auth_key_hold(new->key);
+ list_add(&new->key_list, &asoc->endpoint_shared_keys);
+ }
+
+ return 0;
+
+nomem:
+ sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
+ return -ENOMEM;
+}
+
+
+/* Public interface to create the association shared key.
+ * See code above for the algorithm.
+ */
+int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
+{
+ struct sctp_auth_bytes *secret;
+ struct sctp_shared_key *ep_key;
+ struct sctp_chunk *chunk;
+
+ /* If we don't support AUTH, or peer is not capable
+ * we don't need to do anything.
+ */
+ if (!asoc->peer.auth_capable)
+ return 0;
+
+ /* If the key_id is non-zero and we couldn't find an
+ * endpoint pair shared key, we can't compute the
+ * secret.
+ * For key_id 0, endpoint pair shared key is a NULL key.
+ */
+ ep_key = sctp_auth_get_shkey(asoc, asoc->active_key_id);
+ BUG_ON(!ep_key);
+
+ secret = sctp_auth_asoc_create_secret(asoc, ep_key, gfp);
+ if (!secret)
+ return -ENOMEM;
+
+ sctp_auth_key_put(asoc->asoc_shared_key);
+ asoc->asoc_shared_key = secret;
+ asoc->shkey = ep_key;
+
+ /* Update send queue in case any chunk already in there now
+ * needs authenticating
+ */
+ list_for_each_entry(chunk, &asoc->outqueue.out_chunk_list, list) {
+ if (sctp_auth_send_cid(chunk->chunk_hdr->type, asoc)) {
+ chunk->auth = 1;
+ if (!chunk->shkey) {
+ chunk->shkey = asoc->shkey;
+ sctp_auth_shkey_hold(chunk->shkey);
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+/* Find the endpoint pair shared key based on the key_id */
+struct sctp_shared_key *sctp_auth_get_shkey(
+ const struct sctp_association *asoc,
+ __u16 key_id)
+{
+ struct sctp_shared_key *key;
+
+ /* First search associations set of endpoint pair shared keys */
+ key_for_each(key, &asoc->endpoint_shared_keys) {
+ if (key->key_id == key_id) {
+ if (!key->deactivated)
+ return key;
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Initialize all the possible digest transforms that we can use. Right
+ * now, the supported digests are SHA1 and SHA256. We do this here once
+ * because of the restrictiong that transforms may only be allocated in
+ * user context. This forces us to pre-allocated all possible transforms
+ * at the endpoint init time.
+ */
+int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
+{
+ struct crypto_shash *tfm = NULL;
+ __u16 id;
+
+ /* If the transforms are already allocated, we are done */
+ if (ep->auth_hmacs)
+ return 0;
+
+ /* Allocated the array of pointers to transorms */
+ ep->auth_hmacs = kcalloc(SCTP_AUTH_NUM_HMACS,
+ sizeof(struct crypto_shash *),
+ gfp);
+ if (!ep->auth_hmacs)
+ return -ENOMEM;
+
+ for (id = 0; id < SCTP_AUTH_NUM_HMACS; id++) {
+
+ /* See is we support the id. Supported IDs have name and
+ * length fields set, so that we can allocated and use
+ * them. We can safely just check for name, for without the
+ * name, we can't allocate the TFM.
+ */
+ if (!sctp_hmac_list[id].hmac_name)
+ continue;
+
+ /* If this TFM has been allocated, we are all set */
+ if (ep->auth_hmacs[id])
+ continue;
+
+ /* Allocate the ID */
+ tfm = crypto_alloc_shash(sctp_hmac_list[id].hmac_name, 0, 0);
+ if (IS_ERR(tfm))
+ goto out_err;
+
+ ep->auth_hmacs[id] = tfm;
+ }
+
+ return 0;
+
+out_err:
+ /* Clean up any successful allocations */
+ sctp_auth_destroy_hmacs(ep->auth_hmacs);
+ ep->auth_hmacs = NULL;
+ return -ENOMEM;
+}
+
+/* Destroy the hmac tfm array */
+void sctp_auth_destroy_hmacs(struct crypto_shash *auth_hmacs[])
+{
+ int i;
+
+ if (!auth_hmacs)
+ return;
+
+ for (i = 0; i < SCTP_AUTH_NUM_HMACS; i++) {
+ crypto_free_shash(auth_hmacs[i]);
+ }
+ kfree(auth_hmacs);
+}
+
+
+struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id)
+{
+ return &sctp_hmac_list[hmac_id];
+}
+
+/* Get an hmac description information that we can use to build
+ * the AUTH chunk
+ */
+struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc)
+{
+ struct sctp_hmac_algo_param *hmacs;
+ __u16 n_elt;
+ __u16 id = 0;
+ int i;
+
+ /* If we have a default entry, use it */
+ if (asoc->default_hmac_id)
+ return &sctp_hmac_list[asoc->default_hmac_id];
+
+ /* Since we do not have a default entry, find the first entry
+ * we support and return that. Do not cache that id.
+ */
+ hmacs = asoc->peer.peer_hmacs;
+ if (!hmacs)
+ return NULL;
+
+ n_elt = (ntohs(hmacs->param_hdr.length) -
+ sizeof(struct sctp_paramhdr)) >> 1;
+ for (i = 0; i < n_elt; i++) {
+ id = ntohs(hmacs->hmac_ids[i]);
+
+ /* Check the id is in the supported range. And
+ * see if we support the id. Supported IDs have name and
+ * length fields set, so that we can allocate and use
+ * them. We can safely just check for name, for without the
+ * name, we can't allocate the TFM.
+ */
+ if (id > SCTP_AUTH_HMAC_ID_MAX ||
+ !sctp_hmac_list[id].hmac_name) {
+ id = 0;
+ continue;
+ }
+
+ break;
+ }
+
+ if (id == 0)
+ return NULL;
+
+ return &sctp_hmac_list[id];
+}
+
+static int __sctp_auth_find_hmacid(__be16 *hmacs, int n_elts, __be16 hmac_id)
+{
+ int found = 0;
+ int i;
+
+ for (i = 0; i < n_elts; i++) {
+ if (hmac_id == hmacs[i]) {
+ found = 1;
+ break;
+ }
+ }
+
+ return found;
+}
+
+/* See if the HMAC_ID is one that we claim as supported */
+int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc,
+ __be16 hmac_id)
+{
+ struct sctp_hmac_algo_param *hmacs;
+ __u16 n_elt;
+
+ if (!asoc)
+ return 0;
+
+ hmacs = (struct sctp_hmac_algo_param *)asoc->c.auth_hmacs;
+ n_elt = (ntohs(hmacs->param_hdr.length) -
+ sizeof(struct sctp_paramhdr)) >> 1;
+
+ return __sctp_auth_find_hmacid(hmacs->hmac_ids, n_elt, hmac_id);
+}
+
+
+/* Cache the default HMAC id. This to follow this text from SCTP-AUTH:
+ * Section 6.1:
+ * The receiver of a HMAC-ALGO parameter SHOULD use the first listed
+ * algorithm it supports.
+ */
+void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc,
+ struct sctp_hmac_algo_param *hmacs)
+{
+ struct sctp_endpoint *ep;
+ __u16 id;
+ int i;
+ int n_params;
+
+ /* if the default id is already set, use it */
+ if (asoc->default_hmac_id)
+ return;
+
+ n_params = (ntohs(hmacs->param_hdr.length) -
+ sizeof(struct sctp_paramhdr)) >> 1;
+ ep = asoc->ep;
+ for (i = 0; i < n_params; i++) {
+ id = ntohs(hmacs->hmac_ids[i]);
+
+ /* Check the id is in the supported range */
+ if (id > SCTP_AUTH_HMAC_ID_MAX)
+ continue;
+
+ /* If this TFM has been allocated, use this id */
+ if (ep->auth_hmacs[id]) {
+ asoc->default_hmac_id = id;
+ break;
+ }
+ }
+}
+
+
+/* Check to see if the given chunk is supposed to be authenticated */
+static int __sctp_auth_cid(enum sctp_cid chunk, struct sctp_chunks_param *param)
+{
+ unsigned short len;
+ int found = 0;
+ int i;
+
+ if (!param || param->param_hdr.length == 0)
+ return 0;
+
+ len = ntohs(param->param_hdr.length) - sizeof(struct sctp_paramhdr);
+
+ /* SCTP-AUTH, Section 3.2
+ * The chunk types for INIT, INIT-ACK, SHUTDOWN-COMPLETE and AUTH
+ * chunks MUST NOT be listed in the CHUNKS parameter. However, if
+ * a CHUNKS parameter is received then the types for INIT, INIT-ACK,
+ * SHUTDOWN-COMPLETE and AUTH chunks MUST be ignored.
+ */
+ for (i = 0; !found && i < len; i++) {
+ switch (param->chunks[i]) {
+ case SCTP_CID_INIT:
+ case SCTP_CID_INIT_ACK:
+ case SCTP_CID_SHUTDOWN_COMPLETE:
+ case SCTP_CID_AUTH:
+ break;
+
+ default:
+ if (param->chunks[i] == chunk)
+ found = 1;
+ break;
+ }
+ }
+
+ return found;
+}
+
+/* Check if peer requested that this chunk is authenticated */
+int sctp_auth_send_cid(enum sctp_cid chunk, const struct sctp_association *asoc)
+{
+ if (!asoc)
+ return 0;
+
+ if (!asoc->peer.auth_capable)
+ return 0;
+
+ return __sctp_auth_cid(chunk, asoc->peer.peer_chunks);
+}
+
+/* Check if we requested that peer authenticate this chunk. */
+int sctp_auth_recv_cid(enum sctp_cid chunk, const struct sctp_association *asoc)
+{
+ if (!asoc)
+ return 0;
+
+ if (!asoc->peer.auth_capable)
+ return 0;
+
+ return __sctp_auth_cid(chunk,
+ (struct sctp_chunks_param *)asoc->c.auth_chunks);
+}
+
+/* SCTP-AUTH: Section 6.2:
+ * The sender MUST calculate the MAC as described in RFC2104 [2] using
+ * the hash function H as described by the MAC Identifier and the shared
+ * association key K based on the endpoint pair shared key described by
+ * the shared key identifier. The 'data' used for the computation of
+ * the AUTH-chunk is given by the AUTH chunk with its HMAC field set to
+ * zero (as shown in Figure 6) followed by all chunks that are placed
+ * after the AUTH chunk in the SCTP packet.
+ */
+void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
+ struct sk_buff *skb, struct sctp_auth_chunk *auth,
+ struct sctp_shared_key *ep_key, gfp_t gfp)
+{
+ struct sctp_auth_bytes *asoc_key;
+ struct crypto_shash *tfm;
+ __u16 key_id, hmac_id;
+ unsigned char *end;
+ int free_key = 0;
+ __u8 *digest;
+
+ /* Extract the info we need:
+ * - hmac id
+ * - key id
+ */
+ key_id = ntohs(auth->auth_hdr.shkey_id);
+ hmac_id = ntohs(auth->auth_hdr.hmac_id);
+
+ if (key_id == asoc->active_key_id)
+ asoc_key = asoc->asoc_shared_key;
+ else {
+ /* ep_key can't be NULL here */
+ asoc_key = sctp_auth_asoc_create_secret(asoc, ep_key, gfp);
+ if (!asoc_key)
+ return;
+
+ free_key = 1;
+ }
+
+ /* set up scatter list */
+ end = skb_tail_pointer(skb);
+
+ tfm = asoc->ep->auth_hmacs[hmac_id];
+
+ digest = auth->auth_hdr.hmac;
+ if (crypto_shash_setkey(tfm, &asoc_key->data[0], asoc_key->len))
+ goto free;
+
+ crypto_shash_tfm_digest(tfm, (u8 *)auth, end - (unsigned char *)auth,
+ digest);
+
+free:
+ if (free_key)
+ sctp_auth_key_put(asoc_key);
+}
+
+/* API Helpers */
+
+/* Add a chunk to the endpoint authenticated chunk list */
+int sctp_auth_ep_add_chunkid(struct sctp_endpoint *ep, __u8 chunk_id)
+{
+ struct sctp_chunks_param *p = ep->auth_chunk_list;
+ __u16 nchunks;
+ __u16 param_len;
+
+ /* If this chunk is already specified, we are done */
+ if (__sctp_auth_cid(chunk_id, p))
+ return 0;
+
+ /* Check if we can add this chunk to the array */
+ param_len = ntohs(p->param_hdr.length);
+ nchunks = param_len - sizeof(struct sctp_paramhdr);
+ if (nchunks == SCTP_NUM_CHUNK_TYPES)
+ return -EINVAL;
+
+ p->chunks[nchunks] = chunk_id;
+ p->param_hdr.length = htons(param_len + 1);
+ return 0;
+}
+
+/* Add hmac identifires to the endpoint list of supported hmac ids */
+int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
+ struct sctp_hmacalgo *hmacs)
+{
+ int has_sha1 = 0;
+ __u16 id;
+ int i;
+
+ /* Scan the list looking for unsupported id. Also make sure that
+ * SHA1 is specified.
+ */
+ for (i = 0; i < hmacs->shmac_num_idents; i++) {
+ id = hmacs->shmac_idents[i];
+
+ if (id > SCTP_AUTH_HMAC_ID_MAX)
+ return -EOPNOTSUPP;
+
+ if (SCTP_AUTH_HMAC_ID_SHA1 == id)
+ has_sha1 = 1;
+
+ if (!sctp_hmac_list[id].hmac_name)
+ return -EOPNOTSUPP;
+ }
+
+ if (!has_sha1)
+ return -EINVAL;
+
+ for (i = 0; i < hmacs->shmac_num_idents; i++)
+ ep->auth_hmacs_list->hmac_ids[i] =
+ htons(hmacs->shmac_idents[i]);
+ ep->auth_hmacs_list->param_hdr.length =
+ htons(sizeof(struct sctp_paramhdr) +
+ hmacs->shmac_num_idents * sizeof(__u16));
+ return 0;
+}
+
+/* Set a new shared key on either endpoint or association. If the
+ * key with a same ID already exists, replace the key (remove the
+ * old key and add a new one).
+ */
+int sctp_auth_set_key(struct sctp_endpoint *ep,
+ struct sctp_association *asoc,
+ struct sctp_authkey *auth_key)
+{
+ struct sctp_shared_key *cur_key, *shkey;
+ struct sctp_auth_bytes *key;
+ struct list_head *sh_keys;
+ int replace = 0;
+
+ /* Try to find the given key id to see if
+ * we are doing a replace, or adding a new key
+ */
+ if (asoc) {
+ if (!asoc->peer.auth_capable)
+ return -EACCES;
+ sh_keys = &asoc->endpoint_shared_keys;
+ } else {
+ if (!ep->auth_enable)
+ return -EACCES;
+ sh_keys = &ep->endpoint_shared_keys;
+ }
+
+ key_for_each(shkey, sh_keys) {
+ if (shkey->key_id == auth_key->sca_keynumber) {
+ replace = 1;
+ break;
+ }
+ }
+
+ cur_key = sctp_auth_shkey_create(auth_key->sca_keynumber, GFP_KERNEL);
+ if (!cur_key)
+ return -ENOMEM;
+
+ /* Create a new key data based on the info passed in */
+ key = sctp_auth_create_key(auth_key->sca_keylength, GFP_KERNEL);
+ if (!key) {
+ kfree(cur_key);
+ return -ENOMEM;
+ }
+
+ memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylength);
+ cur_key->key = key;
+
+ if (!replace) {
+ list_add(&cur_key->key_list, sh_keys);
+ return 0;
+ }
+
+ list_del_init(&shkey->key_list);
+ list_add(&cur_key->key_list, sh_keys);
+
+ if (asoc && asoc->active_key_id == auth_key->sca_keynumber &&
+ sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) {
+ list_del_init(&cur_key->key_list);
+ sctp_auth_shkey_release(cur_key);
+ list_add(&shkey->key_list, sh_keys);
+ return -ENOMEM;
+ }
+
+ sctp_auth_shkey_release(shkey);
+ return 0;
+}
+
+int sctp_auth_set_active_key(struct sctp_endpoint *ep,
+ struct sctp_association *asoc,
+ __u16 key_id)
+{
+ struct sctp_shared_key *key;
+ struct list_head *sh_keys;
+ int found = 0;
+
+ /* The key identifier MUST correst to an existing key */
+ if (asoc) {
+ if (!asoc->peer.auth_capable)
+ return -EACCES;
+ sh_keys = &asoc->endpoint_shared_keys;
+ } else {
+ if (!ep->auth_enable)
+ return -EACCES;
+ sh_keys = &ep->endpoint_shared_keys;
+ }
+
+ key_for_each(key, sh_keys) {
+ if (key->key_id == key_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found || key->deactivated)
+ return -EINVAL;
+
+ if (asoc) {
+ __u16 active_key_id = asoc->active_key_id;
+
+ asoc->active_key_id = key_id;
+ if (sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL)) {
+ asoc->active_key_id = active_key_id;
+ return -ENOMEM;
+ }
+ } else
+ ep->active_key_id = key_id;
+
+ return 0;
+}
+
+int sctp_auth_del_key_id(struct sctp_endpoint *ep,
+ struct sctp_association *asoc,
+ __u16 key_id)
+{
+ struct sctp_shared_key *key;
+ struct list_head *sh_keys;
+ int found = 0;
+
+ /* The key identifier MUST NOT be the current active key
+ * The key identifier MUST correst to an existing key
+ */
+ if (asoc) {
+ if (!asoc->peer.auth_capable)
+ return -EACCES;
+ if (asoc->active_key_id == key_id)
+ return -EINVAL;
+
+ sh_keys = &asoc->endpoint_shared_keys;
+ } else {
+ if (!ep->auth_enable)
+ return -EACCES;
+ if (ep->active_key_id == key_id)
+ return -EINVAL;
+
+ sh_keys = &ep->endpoint_shared_keys;
+ }
+
+ key_for_each(key, sh_keys) {
+ if (key->key_id == key_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ /* Delete the shared key */
+ list_del_init(&key->key_list);
+ sctp_auth_shkey_release(key);
+
+ return 0;
+}
+
+int sctp_auth_deact_key_id(struct sctp_endpoint *ep,
+ struct sctp_association *asoc, __u16 key_id)
+{
+ struct sctp_shared_key *key;
+ struct list_head *sh_keys;
+ int found = 0;
+
+ /* The key identifier MUST NOT be the current active key
+ * The key identifier MUST correst to an existing key
+ */
+ if (asoc) {
+ if (!asoc->peer.auth_capable)
+ return -EACCES;
+ if (asoc->active_key_id == key_id)
+ return -EINVAL;
+
+ sh_keys = &asoc->endpoint_shared_keys;
+ } else {
+ if (!ep->auth_enable)
+ return -EACCES;
+ if (ep->active_key_id == key_id)
+ return -EINVAL;
+
+ sh_keys = &ep->endpoint_shared_keys;
+ }
+
+ key_for_each(key, sh_keys) {
+ if (key->key_id == key_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ /* refcnt == 1 and !list_empty mean it's not being used anywhere
+ * and deactivated will be set, so it's time to notify userland
+ * that this shkey can be freed.
+ */
+ if (asoc && !list_empty(&key->key_list) &&
+ refcount_read(&key->refcnt) == 1) {
+ struct sctp_ulpevent *ev;
+
+ ev = sctp_ulpevent_make_authkey(asoc, key->key_id,
+ SCTP_AUTH_FREE_KEY, GFP_KERNEL);
+ if (ev)
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
+ }
+
+ key->deactivated = 1;
+
+ return 0;
+}
+
+int sctp_auth_init(struct sctp_endpoint *ep, gfp_t gfp)
+{
+ int err = -ENOMEM;
+
+ /* Allocate space for HMACS and CHUNKS authentication
+ * variables. There are arrays that we encode directly
+ * into parameters to make the rest of the operations easier.
+ */
+ if (!ep->auth_hmacs_list) {
+ struct sctp_hmac_algo_param *auth_hmacs;
+
+ auth_hmacs = kzalloc(struct_size(auth_hmacs, hmac_ids,
+ SCTP_AUTH_NUM_HMACS), gfp);
+ if (!auth_hmacs)
+ goto nomem;
+ /* Initialize the HMACS parameter.
+ * SCTP-AUTH: Section 3.3
+ * Every endpoint supporting SCTP chunk authentication MUST
+ * support the HMAC based on the SHA-1 algorithm.
+ */
+ auth_hmacs->param_hdr.type = SCTP_PARAM_HMAC_ALGO;
+ auth_hmacs->param_hdr.length =
+ htons(sizeof(struct sctp_paramhdr) + 2);
+ auth_hmacs->hmac_ids[0] = htons(SCTP_AUTH_HMAC_ID_SHA1);
+ ep->auth_hmacs_list = auth_hmacs;
+ }
+
+ if (!ep->auth_chunk_list) {
+ struct sctp_chunks_param *auth_chunks;
+
+ auth_chunks = kzalloc(sizeof(*auth_chunks) +
+ SCTP_NUM_CHUNK_TYPES, gfp);
+ if (!auth_chunks)
+ goto nomem;
+ /* Initialize the CHUNKS parameter */
+ auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS;
+ auth_chunks->param_hdr.length =
+ htons(sizeof(struct sctp_paramhdr));
+ ep->auth_chunk_list = auth_chunks;
+ }
+
+ /* Allocate and initialize transorms arrays for supported
+ * HMACs.
+ */
+ err = sctp_auth_init_hmacs(ep, gfp);
+ if (err)
+ goto nomem;
+
+ return 0;
+
+nomem:
+ /* Free all allocations */
+ kfree(ep->auth_hmacs_list);
+ kfree(ep->auth_chunk_list);
+ ep->auth_hmacs_list = NULL;
+ ep->auth_chunk_list = NULL;
+ return err;
+}
+
+void sctp_auth_free(struct sctp_endpoint *ep)
+{
+ kfree(ep->auth_hmacs_list);
+ kfree(ep->auth_chunk_list);
+ ep->auth_hmacs_list = NULL;
+ ep->auth_chunk_list = NULL;
+ sctp_auth_destroy_hmacs(ep->auth_hmacs);
+ ep->auth_hmacs = NULL;
+}
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
new file mode 100644
index 000000000..6b95d3ba8
--- /dev/null
+++ b/net/sctp/bind_addr.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2003
+ * Copyright (c) Cisco 1999,2000
+ * Copyright (c) Motorola 1999,2000,2001
+ * Copyright (c) La Monte H.P. Yarroll 2001
+ *
+ * This file is part of the SCTP kernel implementation.
+ *
+ * A collection class to handle the storage of transport addresses.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/ipv6.h>
+#include <net/if_inet6.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+/* Forward declarations for internal helpers. */
+static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
+ union sctp_addr *addr, enum sctp_scope scope,
+ gfp_t gfp, int flags);
+static void sctp_bind_addr_clean(struct sctp_bind_addr *);
+
+/* First Level Abstractions. */
+
+/* Copy 'src' to 'dest' taking 'scope' into account. Omit addresses
+ * in 'src' which have a broader scope than 'scope'.
+ */
+int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
+ const struct sctp_bind_addr *src,
+ enum sctp_scope scope, gfp_t gfp,
+ int flags)
+{
+ struct sctp_sockaddr_entry *addr;
+ int error = 0;
+
+ /* All addresses share the same port. */
+ dest->port = src->port;
+
+ /* Extract the addresses which are relevant for this scope. */
+ list_for_each_entry(addr, &src->address_list, list) {
+ error = sctp_copy_one_addr(net, dest, &addr->a, scope,
+ gfp, flags);
+ if (error < 0)
+ goto out;
+ }
+
+ /* If there are no addresses matching the scope and
+ * this is global scope, try to get a link scope address, with
+ * the assumption that we must be sitting behind a NAT.
+ */
+ if (list_empty(&dest->address_list) && (SCTP_SCOPE_GLOBAL == scope)) {
+ list_for_each_entry(addr, &src->address_list, list) {
+ error = sctp_copy_one_addr(net, dest, &addr->a,
+ SCTP_SCOPE_LINK, gfp,
+ flags);
+ if (error < 0)
+ goto out;
+ }
+ }
+
+ /* If somehow no addresses were found that can be used with this
+ * scope, it's an error.
+ */
+ if (list_empty(&dest->address_list))
+ error = -ENETUNREACH;
+
+out:
+ if (error)
+ sctp_bind_addr_clean(dest);
+
+ return error;
+}
+
+/* Exactly duplicate the address lists. This is necessary when doing
+ * peer-offs and accepts. We don't want to put all the current system
+ * addresses into the endpoint. That's useless. But we do want duplicat
+ * the list of bound addresses that the older endpoint used.
+ */
+int sctp_bind_addr_dup(struct sctp_bind_addr *dest,
+ const struct sctp_bind_addr *src,
+ gfp_t gfp)
+{
+ struct sctp_sockaddr_entry *addr;
+ int error = 0;
+
+ /* All addresses share the same port. */
+ dest->port = src->port;
+
+ list_for_each_entry(addr, &src->address_list, list) {
+ error = sctp_add_bind_addr(dest, &addr->a, sizeof(addr->a),
+ 1, gfp);
+ if (error < 0)
+ break;
+ }
+
+ return error;
+}
+
+/* Initialize the SCTP_bind_addr structure for either an endpoint or
+ * an association.
+ */
+void sctp_bind_addr_init(struct sctp_bind_addr *bp, __u16 port)
+{
+ INIT_LIST_HEAD(&bp->address_list);
+ bp->port = port;
+}
+
+/* Dispose of the address list. */
+static void sctp_bind_addr_clean(struct sctp_bind_addr *bp)
+{
+ struct sctp_sockaddr_entry *addr, *temp;
+
+ /* Empty the bind address list. */
+ list_for_each_entry_safe(addr, temp, &bp->address_list, list) {
+ list_del_rcu(&addr->list);
+ kfree_rcu(addr, rcu);
+ SCTP_DBG_OBJCNT_DEC(addr);
+ }
+}
+
+/* Dispose of an SCTP_bind_addr structure */
+void sctp_bind_addr_free(struct sctp_bind_addr *bp)
+{
+ /* Empty the bind address list. */
+ sctp_bind_addr_clean(bp);
+}
+
+/* Add an address to the bind address list in the SCTP_bind_addr structure. */
+int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
+ int new_size, __u8 addr_state, gfp_t gfp)
+{
+ struct sctp_sockaddr_entry *addr;
+
+ /* Add the address to the bind address list. */
+ addr = kzalloc(sizeof(*addr), gfp);
+ if (!addr)
+ return -ENOMEM;
+
+ memcpy(&addr->a, new, min_t(size_t, sizeof(*new), new_size));
+
+ /* Fix up the port if it has not yet been set.
+ * Both v4 and v6 have the port at the same offset.
+ */
+ if (!addr->a.v4.sin_port)
+ addr->a.v4.sin_port = htons(bp->port);
+
+ addr->state = addr_state;
+ addr->valid = 1;
+
+ INIT_LIST_HEAD(&addr->list);
+
+ /* We always hold a socket lock when calling this function,
+ * and that acts as a writer synchronizing lock.
+ */
+ list_add_tail_rcu(&addr->list, &bp->address_list);
+ SCTP_DBG_OBJCNT_INC(addr);
+
+ return 0;
+}
+
+/* Delete an address from the bind address list in the SCTP_bind_addr
+ * structure.
+ */
+int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr)
+{
+ struct sctp_sockaddr_entry *addr, *temp;
+ int found = 0;
+
+ /* We hold the socket lock when calling this function,
+ * and that acts as a writer synchronizing lock.
+ */
+ list_for_each_entry_safe(addr, temp, &bp->address_list, list) {
+ if (sctp_cmp_addr_exact(&addr->a, del_addr)) {
+ /* Found the exact match. */
+ found = 1;
+ addr->valid = 0;
+ list_del_rcu(&addr->list);
+ break;
+ }
+ }
+
+ if (found) {
+ kfree_rcu(addr, rcu);
+ SCTP_DBG_OBJCNT_DEC(addr);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Create a network byte-order representation of all the addresses
+ * formated as SCTP parameters.
+ *
+ * The second argument is the return value for the length.
+ */
+union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp,
+ int *addrs_len,
+ gfp_t gfp)
+{
+ union sctp_params addrparms;
+ union sctp_params retval;
+ int addrparms_len;
+ union sctp_addr_param rawaddr;
+ int len;
+ struct sctp_sockaddr_entry *addr;
+ struct list_head *pos;
+ struct sctp_af *af;
+
+ addrparms_len = 0;
+ len = 0;
+
+ /* Allocate enough memory at once. */
+ list_for_each(pos, &bp->address_list) {
+ len += sizeof(union sctp_addr_param);
+ }
+
+ /* Don't even bother embedding an address if there
+ * is only one.
+ */
+ if (len == sizeof(union sctp_addr_param)) {
+ retval.v = NULL;
+ goto end_raw;
+ }
+
+ retval.v = kmalloc(len, gfp);
+ if (!retval.v)
+ goto end_raw;
+
+ addrparms = retval;
+
+ list_for_each_entry(addr, &bp->address_list, list) {
+ af = sctp_get_af_specific(addr->a.v4.sin_family);
+ len = af->to_addr_param(&addr->a, &rawaddr);
+ memcpy(addrparms.v, &rawaddr, len);
+ addrparms.v += len;
+ addrparms_len += len;
+ }
+
+end_raw:
+ *addrs_len = addrparms_len;
+ return retval;
+}
+
+/*
+ * Create an address list out of the raw address list format (IPv4 and IPv6
+ * address parameters).
+ */
+int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
+ int addrs_len, __u16 port, gfp_t gfp)
+{
+ union sctp_addr_param *rawaddr;
+ struct sctp_paramhdr *param;
+ union sctp_addr addr;
+ int retval = 0;
+ int len;
+ struct sctp_af *af;
+
+ /* Convert the raw address to standard address format */
+ while (addrs_len) {
+ param = (struct sctp_paramhdr *)raw_addr_list;
+ rawaddr = (union sctp_addr_param *)raw_addr_list;
+
+ af = sctp_get_af_specific(param_type2af(param->type));
+ if (unlikely(!af) ||
+ !af->from_addr_param(&addr, rawaddr, htons(port), 0)) {
+ retval = -EINVAL;
+ goto out_err;
+ }
+
+ if (sctp_bind_addr_state(bp, &addr) != -1)
+ goto next;
+ retval = sctp_add_bind_addr(bp, &addr, sizeof(addr),
+ SCTP_ADDR_SRC, gfp);
+ if (retval)
+ /* Can't finish building the list, clean up. */
+ goto out_err;
+
+next:
+ len = ntohs(param->length);
+ addrs_len -= len;
+ raw_addr_list += len;
+ }
+
+ return retval;
+
+out_err:
+ if (retval)
+ sctp_bind_addr_clean(bp);
+
+ return retval;
+}
+
+/********************************************************************
+ * 2nd Level Abstractions
+ ********************************************************************/
+
+/* Does this contain a specified address? Allow wildcarding. */
+int sctp_bind_addr_match(struct sctp_bind_addr *bp,
+ const union sctp_addr *addr,
+ struct sctp_sock *opt)
+{
+ struct sctp_sockaddr_entry *laddr;
+ int match = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ if (!laddr->valid)
+ continue;
+ if (opt->pf->cmp_addr(&laddr->a, addr, opt)) {
+ match = 1;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return match;
+}
+
+int sctp_bind_addrs_check(struct sctp_sock *sp,
+ struct sctp_sock *sp2, int cnt2)
+{
+ struct sctp_bind_addr *bp2 = &sp2->ep->base.bind_addr;
+ struct sctp_bind_addr *bp = &sp->ep->base.bind_addr;
+ struct sctp_sockaddr_entry *laddr, *laddr2;
+ bool exist = false;
+ int cnt = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ list_for_each_entry_rcu(laddr2, &bp2->address_list, list) {
+ if (sp->pf->af->cmp_addr(&laddr->a, &laddr2->a) &&
+ laddr->valid && laddr2->valid) {
+ exist = true;
+ goto next;
+ }
+ }
+ cnt = 0;
+ break;
+next:
+ cnt++;
+ }
+ rcu_read_unlock();
+
+ return (cnt == cnt2) ? 0 : (exist ? -EEXIST : 1);
+}
+
+/* Does the address 'addr' conflict with any addresses in
+ * the bp.
+ */
+int sctp_bind_addr_conflict(struct sctp_bind_addr *bp,
+ const union sctp_addr *addr,
+ struct sctp_sock *bp_sp,
+ struct sctp_sock *addr_sp)
+{
+ struct sctp_sockaddr_entry *laddr;
+ int conflict = 0;
+ struct sctp_sock *sp;
+
+ /* Pick the IPv6 socket as the basis of comparison
+ * since it's usually a superset of the IPv4.
+ * If there is no IPv6 socket, then default to bind_addr.
+ */
+ if (sctp_opt2sk(bp_sp)->sk_family == AF_INET6)
+ sp = bp_sp;
+ else if (sctp_opt2sk(addr_sp)->sk_family == AF_INET6)
+ sp = addr_sp;
+ else
+ sp = bp_sp;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ if (!laddr->valid)
+ continue;
+
+ conflict = sp->pf->cmp_addr(&laddr->a, addr, sp);
+ if (conflict)
+ break;
+ }
+ rcu_read_unlock();
+
+ return conflict;
+}
+
+/* Get the state of the entry in the bind_addr_list */
+int sctp_bind_addr_state(const struct sctp_bind_addr *bp,
+ const union sctp_addr *addr)
+{
+ struct sctp_sockaddr_entry *laddr;
+ struct sctp_af *af;
+
+ af = sctp_get_af_specific(addr->sa.sa_family);
+ if (unlikely(!af))
+ return -1;
+
+ list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ if (!laddr->valid)
+ continue;
+ if (af->cmp_addr(&laddr->a, addr))
+ return laddr->state;
+ }
+
+ return -1;
+}
+
+/* Find the first address in the bind address list that is not present in
+ * the addrs packed array.
+ */
+union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
+ const union sctp_addr *addrs,
+ int addrcnt,
+ struct sctp_sock *opt)
+{
+ struct sctp_sockaddr_entry *laddr;
+ union sctp_addr *addr;
+ void *addr_buf;
+ struct sctp_af *af;
+ int i;
+
+ /* This is only called sctp_send_asconf_del_ip() and we hold
+ * the socket lock in that code patch, so that address list
+ * can't change.
+ */
+ list_for_each_entry(laddr, &bp->address_list, list) {
+ addr_buf = (union sctp_addr *)addrs;
+ for (i = 0; i < addrcnt; i++) {
+ addr = addr_buf;
+ af = sctp_get_af_specific(addr->v4.sin_family);
+ if (!af)
+ break;
+
+ if (opt->pf->cmp_addr(&laddr->a, addr, opt))
+ break;
+
+ addr_buf += af->sockaddr_len;
+ }
+ if (i == addrcnt)
+ return &laddr->a;
+ }
+
+ return NULL;
+}
+
+/* Copy out addresses from the global local address list. */
+static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
+ union sctp_addr *addr, enum sctp_scope scope,
+ gfp_t gfp, int flags)
+{
+ int error = 0;
+
+ if (sctp_is_any(NULL, addr)) {
+ error = sctp_copy_local_addr_list(net, dest, scope, gfp, flags);
+ } else if (sctp_in_scope(net, addr, scope)) {
+ /* Now that the address is in scope, check to see if
+ * the address type is supported by local sock as
+ * well as the remote peer.
+ */
+ if ((((AF_INET == addr->sa.sa_family) &&
+ (flags & SCTP_ADDR4_ALLOWED) &&
+ (flags & SCTP_ADDR4_PEERSUPP))) ||
+ (((AF_INET6 == addr->sa.sa_family) &&
+ (flags & SCTP_ADDR6_ALLOWED) &&
+ (flags & SCTP_ADDR6_PEERSUPP))))
+ error = sctp_add_bind_addr(dest, addr, sizeof(*addr),
+ SCTP_ADDR_SRC, gfp);
+ }
+
+ return error;
+}
+
+/* Is this a wildcard address? */
+int sctp_is_any(struct sock *sk, const union sctp_addr *addr)
+{
+ unsigned short fam = 0;
+ struct sctp_af *af;
+
+ /* Try to get the right address family */
+ if (addr->sa.sa_family != AF_UNSPEC)
+ fam = addr->sa.sa_family;
+ else if (sk)
+ fam = sk->sk_family;
+
+ af = sctp_get_af_specific(fam);
+ if (!af)
+ return 0;
+
+ return af->is_any(addr);
+}
+
+/* Is 'addr' valid for 'scope'? */
+int sctp_in_scope(struct net *net, const union sctp_addr *addr,
+ enum sctp_scope scope)
+{
+ enum sctp_scope addr_scope = sctp_scope(addr);
+
+ /* The unusable SCTP addresses will not be considered with
+ * any defined scopes.
+ */
+ if (SCTP_SCOPE_UNUSABLE == addr_scope)
+ return 0;
+ /*
+ * For INIT and INIT-ACK address list, let L be the level of
+ * requested destination address, sender and receiver
+ * SHOULD include all of its addresses with level greater
+ * than or equal to L.
+ *
+ * Address scoping can be selectively controlled via sysctl
+ * option
+ */
+ switch (net->sctp.scope_policy) {
+ case SCTP_SCOPE_POLICY_DISABLE:
+ return 1;
+ case SCTP_SCOPE_POLICY_ENABLE:
+ if (addr_scope <= scope)
+ return 1;
+ break;
+ case SCTP_SCOPE_POLICY_PRIVATE:
+ if (addr_scope <= scope || SCTP_SCOPE_PRIVATE == addr_scope)
+ return 1;
+ break;
+ case SCTP_SCOPE_POLICY_LINK:
+ if (addr_scope <= scope || SCTP_SCOPE_LINK == addr_scope)
+ return 1;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int sctp_is_ep_boundall(struct sock *sk)
+{
+ struct sctp_bind_addr *bp;
+ struct sctp_sockaddr_entry *addr;
+
+ bp = &sctp_sk(sk)->ep->base.bind_addr;
+ if (sctp_list_single_entry(&bp->address_list)) {
+ addr = list_entry(bp->address_list.next,
+ struct sctp_sockaddr_entry, list);
+ if (sctp_is_any(sk, &addr->a))
+ return 1;
+ }
+ return 0;
+}
+
+/********************************************************************
+ * 3rd Level Abstractions
+ ********************************************************************/
+
+/* What is the scope of 'addr'? */
+enum sctp_scope sctp_scope(const union sctp_addr *addr)
+{
+ struct sctp_af *af;
+
+ af = sctp_get_af_specific(addr->sa.sa_family);
+ if (!af)
+ return SCTP_SCOPE_UNUSABLE;
+
+ return af->scope((union sctp_addr *)addr);
+}
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
new file mode 100644
index 000000000..fd4f8243c
--- /dev/null
+++ b/net/sctp/chunk.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2003, 2004
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * This file contains the code relating the chunk abstraction.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/net.h>
+#include <linux/inet.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/sock.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+/* This file is mostly in anticipation of future work, but initially
+ * populate with fragment tracking for an outbound message.
+ */
+
+/* Initialize datamsg from memory. */
+static void sctp_datamsg_init(struct sctp_datamsg *msg)
+{
+ refcount_set(&msg->refcnt, 1);
+ msg->send_failed = 0;
+ msg->send_error = 0;
+ msg->can_delay = 1;
+ msg->abandoned = 0;
+ msg->expires_at = 0;
+ INIT_LIST_HEAD(&msg->chunks);
+}
+
+/* Allocate and initialize datamsg. */
+static struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
+{
+ struct sctp_datamsg *msg;
+ msg = kmalloc(sizeof(struct sctp_datamsg), gfp);
+ if (msg) {
+ sctp_datamsg_init(msg);
+ SCTP_DBG_OBJCNT_INC(datamsg);
+ }
+ return msg;
+}
+
+void sctp_datamsg_free(struct sctp_datamsg *msg)
+{
+ struct sctp_chunk *chunk;
+
+ /* This doesn't have to be a _safe vairant because
+ * sctp_chunk_free() only drops the refs.
+ */
+ list_for_each_entry(chunk, &msg->chunks, frag_list)
+ sctp_chunk_free(chunk);
+
+ sctp_datamsg_put(msg);
+}
+
+/* Final destructruction of datamsg memory. */
+static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
+{
+ struct sctp_association *asoc = NULL;
+ struct list_head *pos, *temp;
+ struct sctp_chunk *chunk;
+ struct sctp_ulpevent *ev;
+ int error, sent;
+
+ /* Release all references. */
+ list_for_each_safe(pos, temp, &msg->chunks) {
+ list_del_init(pos);
+ chunk = list_entry(pos, struct sctp_chunk, frag_list);
+
+ if (!msg->send_failed) {
+ sctp_chunk_put(chunk);
+ continue;
+ }
+
+ asoc = chunk->asoc;
+ error = msg->send_error ?: asoc->outqueue.error;
+ sent = chunk->has_tsn ? SCTP_DATA_SENT : SCTP_DATA_UNSENT;
+
+ if (sctp_ulpevent_type_enabled(asoc->subscribe,
+ SCTP_SEND_FAILED)) {
+ ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent,
+ error, GFP_ATOMIC);
+ if (ev)
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
+ }
+
+ if (sctp_ulpevent_type_enabled(asoc->subscribe,
+ SCTP_SEND_FAILED_EVENT)) {
+ ev = sctp_ulpevent_make_send_failed_event(asoc, chunk,
+ sent, error,
+ GFP_ATOMIC);
+ if (ev)
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
+ }
+
+ sctp_chunk_put(chunk);
+ }
+
+ SCTP_DBG_OBJCNT_DEC(datamsg);
+ kfree(msg);
+}
+
+/* Hold a reference. */
+static void sctp_datamsg_hold(struct sctp_datamsg *msg)
+{
+ refcount_inc(&msg->refcnt);
+}
+
+/* Release a reference. */
+void sctp_datamsg_put(struct sctp_datamsg *msg)
+{
+ if (refcount_dec_and_test(&msg->refcnt))
+ sctp_datamsg_destroy(msg);
+}
+
+/* Assign a chunk to this datamsg. */
+static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk)
+{
+ sctp_datamsg_hold(msg);
+ chunk->msg = msg;
+}
+
+
+/* A data chunk can have a maximum payload of (2^16 - 20). Break
+ * down any such message into smaller chunks. Opportunistically, fragment
+ * the chunks down to the current MTU constraints. We may get refragmented
+ * later if the PMTU changes, but it is _much better_ to fragment immediately
+ * with a reasonable guess than always doing our fragmentation on the
+ * soft-interrupt.
+ */
+struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
+ struct sctp_sndrcvinfo *sinfo,
+ struct iov_iter *from)
+{
+ size_t len, first_len, max_data, remaining;
+ size_t msg_len = iov_iter_count(from);
+ struct sctp_shared_key *shkey = NULL;
+ struct list_head *pos, *temp;
+ struct sctp_chunk *chunk;
+ struct sctp_datamsg *msg;
+ int err;
+
+ msg = sctp_datamsg_new(GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ /* Note: Calculate this outside of the loop, so that all fragments
+ * have the same expiration.
+ */
+ if (asoc->peer.prsctp_capable && sinfo->sinfo_timetolive &&
+ (SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags) ||
+ !SCTP_PR_POLICY(sinfo->sinfo_flags)))
+ msg->expires_at = jiffies +
+ msecs_to_jiffies(sinfo->sinfo_timetolive);
+
+ /* This is the biggest possible DATA chunk that can fit into
+ * the packet
+ */
+ max_data = asoc->frag_point;
+ if (unlikely(!max_data)) {
+ max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk),
+ sctp_datachk_len(&asoc->stream));
+ pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%zu)",
+ __func__, asoc, max_data);
+ }
+
+ /* If the peer requested that we authenticate DATA chunks
+ * we need to account for bundling of the AUTH chunks along with
+ * DATA.
+ */
+ if (sctp_auth_send_cid(SCTP_CID_DATA, asoc)) {
+ struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc);
+
+ if (hmac_desc)
+ max_data -= SCTP_PAD4(sizeof(struct sctp_auth_chunk) +
+ hmac_desc->hmac_len);
+
+ if (sinfo->sinfo_tsn &&
+ sinfo->sinfo_ssn != asoc->active_key_id) {
+ shkey = sctp_auth_get_shkey(asoc, sinfo->sinfo_ssn);
+ if (!shkey) {
+ err = -EINVAL;
+ goto errout;
+ }
+ } else {
+ shkey = asoc->shkey;
+ }
+ }
+
+ /* Set first_len and then account for possible bundles on first frag */
+ first_len = max_data;
+
+ /* Check to see if we have a pending SACK and try to let it be bundled
+ * with this message. Do this if we don't have any data queued already.
+ * To check that, look at out_qlen and retransmit list.
+ * NOTE: we will not reduce to account for SACK, if the message would
+ * not have been fragmented.
+ */
+ if (timer_pending(&asoc->timers[SCTP_EVENT_TIMEOUT_SACK]) &&
+ asoc->outqueue.out_qlen == 0 &&
+ list_empty(&asoc->outqueue.retransmit) &&
+ msg_len > max_data)
+ first_len -= SCTP_PAD4(sizeof(struct sctp_sack_chunk));
+
+ /* Encourage Cookie-ECHO bundling. */
+ if (asoc->state < SCTP_STATE_COOKIE_ECHOED)
+ first_len -= SCTP_ARBITRARY_COOKIE_ECHO_LEN;
+
+ /* Account for a different sized first fragment */
+ if (msg_len >= first_len) {
+ msg->can_delay = 0;
+ if (msg_len > first_len)
+ SCTP_INC_STATS(asoc->base.net,
+ SCTP_MIB_FRAGUSRMSGS);
+ } else {
+ /* Which may be the only one... */
+ first_len = msg_len;
+ }
+
+ /* Create chunks for all DATA chunks. */
+ for (remaining = msg_len; remaining; remaining -= len) {
+ u8 frag = SCTP_DATA_MIDDLE_FRAG;
+
+ if (remaining == msg_len) {
+ /* First frag, which may also be the last */
+ frag |= SCTP_DATA_FIRST_FRAG;
+ len = first_len;
+ } else {
+ /* Middle frags */
+ len = max_data;
+ }
+
+ if (len >= remaining) {
+ /* Last frag, which may also be the first */
+ len = remaining;
+ frag |= SCTP_DATA_LAST_FRAG;
+
+ /* The application requests to set the I-bit of the
+ * last DATA chunk of a user message when providing
+ * the user message to the SCTP implementation.
+ */
+ if ((sinfo->sinfo_flags & SCTP_EOF) ||
+ (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
+ frag |= SCTP_DATA_SACK_IMM;
+ }
+
+ chunk = asoc->stream.si->make_datafrag(asoc, sinfo, len, frag,
+ GFP_KERNEL);
+ if (!chunk) {
+ err = -ENOMEM;
+ goto errout;
+ }
+
+ err = sctp_user_addto_chunk(chunk, len, from);
+ if (err < 0)
+ goto errout_chunk_free;
+
+ chunk->shkey = shkey;
+
+ /* Put the chunk->skb back into the form expected by send. */
+ __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr -
+ chunk->skb->data);
+
+ sctp_datamsg_assign(msg, chunk);
+ list_add_tail(&chunk->frag_list, &msg->chunks);
+ }
+
+ return msg;
+
+errout_chunk_free:
+ sctp_chunk_free(chunk);
+
+errout:
+ list_for_each_safe(pos, temp, &msg->chunks) {
+ list_del_init(pos);
+ chunk = list_entry(pos, struct sctp_chunk, frag_list);
+ sctp_chunk_free(chunk);
+ }
+ sctp_datamsg_put(msg);
+
+ return ERR_PTR(err);
+}
+
+/* Check whether this message has expired. */
+int sctp_chunk_abandoned(struct sctp_chunk *chunk)
+{
+ if (!chunk->asoc->peer.prsctp_capable)
+ return 0;
+
+ if (chunk->msg->abandoned)
+ return 1;
+
+ if (!chunk->has_tsn &&
+ !(chunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG))
+ return 0;
+
+ if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
+ time_after(jiffies, chunk->msg->expires_at)) {
+ struct sctp_stream_out *streamout =
+ SCTP_SO(&chunk->asoc->stream,
+ chunk->sinfo.sinfo_stream);
+
+ if (chunk->sent_count) {
+ chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
+ streamout->ext->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
+ } else {
+ chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
+ streamout->ext->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
+ }
+ chunk->msg->abandoned = 1;
+ return 1;
+ } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
+ chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
+ struct sctp_stream_out *streamout =
+ SCTP_SO(&chunk->asoc->stream,
+ chunk->sinfo.sinfo_stream);
+
+ chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
+ streamout->ext->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
+ chunk->msg->abandoned = 1;
+ return 1;
+ } else if (!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags) &&
+ chunk->msg->expires_at &&
+ time_after(jiffies, chunk->msg->expires_at)) {
+ chunk->msg->abandoned = 1;
+ return 1;
+ }
+ /* PRIO policy is processed by sendmsg, not here */
+
+ return 0;
+}
+
+/* This chunk (and consequently entire message) has failed in its sending. */
+void sctp_chunk_fail(struct sctp_chunk *chunk, int error)
+{
+ chunk->msg->send_failed = 1;
+ chunk->msg->send_error = error;
+}
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
new file mode 100644
index 000000000..ccd773e4c
--- /dev/null
+++ b/net/sctp/debug.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * This file converts numerical ID value to alphabetical names for SCTP
+ * terms such as chunk type, parameter time, event type, etc.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#include <net/sctp/sctp.h>
+
+/* These are printable forms of Chunk ID's from section 3.1. */
+static const char *const sctp_cid_tbl[SCTP_NUM_BASE_CHUNK_TYPES] = {
+ "DATA",
+ "INIT",
+ "INIT_ACK",
+ "SACK",
+ "HEARTBEAT",
+ "HEARTBEAT_ACK",
+ "ABORT",
+ "SHUTDOWN",
+ "SHUTDOWN_ACK",
+ "ERROR",
+ "COOKIE_ECHO",
+ "COOKIE_ACK",
+ "ECN_ECNE",
+ "ECN_CWR",
+ "SHUTDOWN_COMPLETE",
+};
+
+/* Lookup "chunk type" debug name. */
+const char *sctp_cname(const union sctp_subtype cid)
+{
+ if (cid.chunk <= SCTP_CID_BASE_MAX)
+ return sctp_cid_tbl[cid.chunk];
+
+ switch (cid.chunk) {
+ case SCTP_CID_ASCONF:
+ return "ASCONF";
+
+ case SCTP_CID_ASCONF_ACK:
+ return "ASCONF_ACK";
+
+ case SCTP_CID_FWD_TSN:
+ return "FWD_TSN";
+
+ case SCTP_CID_AUTH:
+ return "AUTH";
+
+ case SCTP_CID_RECONF:
+ return "RECONF";
+
+ case SCTP_CID_I_DATA:
+ return "I_DATA";
+
+ case SCTP_CID_I_FWD_TSN:
+ return "I_FWD_TSN";
+
+ default:
+ break;
+ }
+
+ return "unknown chunk";
+}
+
+/* These are printable forms of the states. */
+const char *const sctp_state_tbl[SCTP_STATE_NUM_STATES] = {
+ "STATE_CLOSED",
+ "STATE_COOKIE_WAIT",
+ "STATE_COOKIE_ECHOED",
+ "STATE_ESTABLISHED",
+ "STATE_SHUTDOWN_PENDING",
+ "STATE_SHUTDOWN_SENT",
+ "STATE_SHUTDOWN_RECEIVED",
+ "STATE_SHUTDOWN_ACK_SENT",
+};
+
+/* Events that could change the state of an association. */
+const char *const sctp_evttype_tbl[] = {
+ "EVENT_T_unknown",
+ "EVENT_T_CHUNK",
+ "EVENT_T_TIMEOUT",
+ "EVENT_T_OTHER",
+ "EVENT_T_PRIMITIVE"
+};
+
+/* Return value of a state function */
+const char *const sctp_status_tbl[] = {
+ "DISPOSITION_DISCARD",
+ "DISPOSITION_CONSUME",
+ "DISPOSITION_NOMEM",
+ "DISPOSITION_DELETE_TCB",
+ "DISPOSITION_ABORT",
+ "DISPOSITION_VIOLATION",
+ "DISPOSITION_NOT_IMPL",
+ "DISPOSITION_ERROR",
+ "DISPOSITION_BUG"
+};
+
+/* Printable forms of primitives */
+static const char *const sctp_primitive_tbl[SCTP_NUM_PRIMITIVE_TYPES] = {
+ "PRIMITIVE_ASSOCIATE",
+ "PRIMITIVE_SHUTDOWN",
+ "PRIMITIVE_ABORT",
+ "PRIMITIVE_SEND",
+ "PRIMITIVE_REQUESTHEARTBEAT",
+ "PRIMITIVE_ASCONF",
+};
+
+/* Lookup primitive debug name. */
+const char *sctp_pname(const union sctp_subtype id)
+{
+ if (id.primitive <= SCTP_EVENT_PRIMITIVE_MAX)
+ return sctp_primitive_tbl[id.primitive];
+ return "unknown_primitive";
+}
+
+static const char *const sctp_other_tbl[] = {
+ "NO_PENDING_TSN",
+ "ICMP_PROTO_UNREACH",
+};
+
+/* Lookup "other" debug name. */
+const char *sctp_oname(const union sctp_subtype id)
+{
+ if (id.other <= SCTP_EVENT_OTHER_MAX)
+ return sctp_other_tbl[id.other];
+ return "unknown 'other' event";
+}
+
+static const char *const sctp_timer_tbl[] = {
+ "TIMEOUT_NONE",
+ "TIMEOUT_T1_COOKIE",
+ "TIMEOUT_T1_INIT",
+ "TIMEOUT_T2_SHUTDOWN",
+ "TIMEOUT_T3_RTX",
+ "TIMEOUT_T4_RTO",
+ "TIMEOUT_T5_SHUTDOWN_GUARD",
+ "TIMEOUT_HEARTBEAT",
+ "TIMEOUT_RECONF",
+ "TIMEOUT_PROBE",
+ "TIMEOUT_SACK",
+ "TIMEOUT_AUTOCLOSE",
+};
+
+/* Lookup timer debug name. */
+const char *sctp_tname(const union sctp_subtype id)
+{
+ BUILD_BUG_ON(SCTP_EVENT_TIMEOUT_MAX + 1 != ARRAY_SIZE(sctp_timer_tbl));
+
+ if (id.timeout < ARRAY_SIZE(sctp_timer_tbl))
+ return sctp_timer_tbl[id.timeout];
+ return "unknown_timer";
+}
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
new file mode 100644
index 000000000..b0ce10808
--- /dev/null
+++ b/net/sctp/diag.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions implement sctp diag support.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresched(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Xin Long <lucien.xin@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+#include <net/sctp/sctp.h>
+
+static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+ void *info);
+
+/* define some functions to make asoc/ep fill look clean */
+static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
+ struct sock *sk,
+ struct sctp_association *asoc)
+{
+ union sctp_addr laddr, paddr;
+ struct dst_entry *dst;
+ struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
+
+ laddr = list_entry(asoc->base.bind_addr.address_list.next,
+ struct sctp_sockaddr_entry, list)->a;
+ paddr = asoc->peer.primary_path->ipaddr;
+ dst = asoc->peer.primary_path->dst;
+
+ r->idiag_family = sk->sk_family;
+ r->id.idiag_sport = htons(asoc->base.bind_addr.port);
+ r->id.idiag_dport = htons(asoc->peer.port);
+ r->id.idiag_if = dst ? dst->dev->ifindex : 0;
+ sock_diag_save_cookie(sk, r->id.idiag_cookie);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ *(struct in6_addr *)r->id.idiag_src = laddr.v6.sin6_addr;
+ *(struct in6_addr *)r->id.idiag_dst = paddr.v6.sin6_addr;
+ } else
+#endif
+ {
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
+ r->id.idiag_src[0] = laddr.v4.sin_addr.s_addr;
+ r->id.idiag_dst[0] = paddr.v4.sin_addr.s_addr;
+ }
+
+ r->idiag_state = asoc->state;
+ if (timer_pending(t3_rtx)) {
+ r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
+ r->idiag_retrans = asoc->rtx_data_chunks;
+ r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
+ }
+}
+
+static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
+ struct list_head *address_list)
+{
+ struct sctp_sockaddr_entry *laddr;
+ int addrlen = sizeof(struct sockaddr_storage);
+ int addrcnt = 0;
+ struct nlattr *attr;
+ void *info = NULL;
+
+ list_for_each_entry_rcu(laddr, address_list, list)
+ addrcnt++;
+
+ attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt);
+ if (!attr)
+ return -EMSGSIZE;
+
+ info = nla_data(attr);
+ list_for_each_entry_rcu(laddr, address_list, list) {
+ memcpy(info, &laddr->a, sizeof(laddr->a));
+ memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a));
+ info += addrlen;
+ }
+
+ return 0;
+}
+
+static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
+ struct sctp_association *asoc)
+{
+ int addrlen = sizeof(struct sockaddr_storage);
+ struct sctp_transport *from;
+ struct nlattr *attr;
+ void *info = NULL;
+
+ attr = nla_reserve(skb, INET_DIAG_PEERS,
+ addrlen * asoc->peer.transport_count);
+ if (!attr)
+ return -EMSGSIZE;
+
+ info = nla_data(attr);
+ list_for_each_entry(from, &asoc->peer.transport_addr_list,
+ transports) {
+ memcpy(info, &from->ipaddr, sizeof(from->ipaddr));
+ memset(info + sizeof(from->ipaddr), 0,
+ addrlen - sizeof(from->ipaddr));
+ info += addrlen;
+ }
+
+ return 0;
+}
+
+/* sctp asoc/ep fill*/
+static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
+ struct sk_buff *skb,
+ const struct inet_diag_req_v2 *req,
+ struct user_namespace *user_ns,
+ int portid, u32 seq, u16 nlmsg_flags,
+ const struct nlmsghdr *unlh,
+ bool net_admin)
+{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct list_head *addr_list;
+ struct inet_diag_msg *r;
+ struct nlmsghdr *nlh;
+ int ext = req->idiag_ext;
+ struct sctp_infox infox;
+ void *info = NULL;
+
+ nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
+ nlmsg_flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ r = nlmsg_data(nlh);
+ BUG_ON(!sk_fullsock(sk));
+
+ r->idiag_timer = 0;
+ r->idiag_retrans = 0;
+ r->idiag_expires = 0;
+ if (asoc) {
+ inet_diag_msg_sctpasoc_fill(r, sk, asoc);
+ } else {
+ inet_diag_msg_common_fill(r, sk);
+ r->idiag_state = sk->sk_state;
+ }
+
+ if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
+ goto errout;
+
+ if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) {
+ u32 mem[SK_MEMINFO_VARS];
+ int amt;
+
+ if (asoc && asoc->ep->sndbuf_policy)
+ amt = asoc->sndbuf_used;
+ else
+ amt = sk_wmem_alloc_get(sk);
+ mem[SK_MEMINFO_WMEM_ALLOC] = amt;
+ if (asoc && asoc->ep->rcvbuf_policy)
+ amt = atomic_read(&asoc->rmem_alloc);
+ else
+ amt = sk_rmem_alloc_get(sk);
+ mem[SK_MEMINFO_RMEM_ALLOC] = amt;
+ mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
+ mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
+ mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+ mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+ mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+ mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
+ mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
+
+ if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
+ goto errout;
+ }
+
+ if (ext & (1 << (INET_DIAG_INFO - 1))) {
+ struct nlattr *attr;
+
+ attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
+ sizeof(struct sctp_info),
+ INET_DIAG_PAD);
+ if (!attr)
+ goto errout;
+
+ info = nla_data(attr);
+ }
+ infox.sctpinfo = (struct sctp_info *)info;
+ infox.asoc = asoc;
+ sctp_diag_get_info(sk, r, &infox);
+
+ addr_list = asoc ? &asoc->base.bind_addr.address_list
+ : &ep->base.bind_addr.address_list;
+ if (inet_diag_msg_sctpladdrs_fill(skb, addr_list))
+ goto errout;
+
+ if (asoc && (ext & (1 << (INET_DIAG_CONG - 1))))
+ if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0)
+ goto errout;
+
+ if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc))
+ goto errout;
+
+ nlmsg_end(skb, nlh);
+ return 0;
+
+errout:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+/* callback and param */
+struct sctp_comm_param {
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ const struct inet_diag_req_v2 *r;
+ const struct nlmsghdr *nlh;
+ bool net_admin;
+};
+
+static size_t inet_assoc_attr_size(struct sctp_association *asoc)
+{
+ int addrlen = sizeof(struct sockaddr_storage);
+ int addrcnt = 0;
+ struct sctp_sockaddr_entry *laddr;
+
+ list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list,
+ list)
+ addrcnt++;
+
+ return nla_total_size(sizeof(struct sctp_info))
+ + nla_total_size(addrlen * asoc->peer.transport_count)
+ + nla_total_size(addrlen * addrcnt)
+ + nla_total_size(sizeof(struct inet_diag_msg))
+ + inet_diag_msg_attrs_size()
+ + nla_total_size(sizeof(struct inet_diag_meminfo))
+ + 64;
+}
+
+static int sctp_sock_dump_one(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
+{
+ struct sctp_association *assoc = tsp->asoc;
+ struct sctp_comm_param *commp = p;
+ struct sock *sk = ep->base.sk;
+ const struct inet_diag_req_v2 *req = commp->r;
+ struct sk_buff *skb = commp->skb;
+ struct sk_buff *rep;
+ int err;
+
+ err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
+ if (err)
+ return err;
+
+ rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ lock_sock(sk);
+ if (ep != assoc->ep) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ err = inet_sctp_diag_fill(sk, assoc, rep, req, sk_user_ns(NETLINK_CB(skb).sk),
+ NETLINK_CB(skb).portid, commp->nlh->nlmsg_seq, 0,
+ commp->nlh, commp->net_admin);
+ if (err < 0) {
+ WARN_ON(err == -EMSGSIZE);
+ goto out;
+ }
+ release_sock(sk);
+
+ return nlmsg_unicast(sock_net(skb->sk)->diag_nlsk, rep, NETLINK_CB(skb).portid);
+
+out:
+ release_sock(sk);
+ kfree_skb(rep);
+ return err;
+}
+
+static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
+{
+ struct sctp_comm_param *commp = p;
+ struct sock *sk = ep->base.sk;
+ struct sk_buff *skb = commp->skb;
+ struct netlink_callback *cb = commp->cb;
+ const struct inet_diag_req_v2 *r = commp->r;
+ struct sctp_association *assoc;
+ int err = 0;
+
+ lock_sock(sk);
+ if (ep != tsp->asoc->ep)
+ goto release;
+ list_for_each_entry(assoc, &ep->asocs, asocs) {
+ if (cb->args[4] < cb->args[1])
+ goto next;
+
+ if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) &&
+ r->id.idiag_sport)
+ goto next;
+ if (r->id.idiag_dport != htons(assoc->peer.port) &&
+ r->id.idiag_dport)
+ goto next;
+
+ if (!cb->args[3] &&
+ inet_sctp_diag_fill(sk, NULL, skb, r,
+ sk_user_ns(NETLINK_CB(cb->skb).sk),
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI, cb->nlh,
+ commp->net_admin) < 0) {
+ err = 1;
+ goto release;
+ }
+ cb->args[3] = 1;
+
+ if (inet_sctp_diag_fill(sk, assoc, skb, r,
+ sk_user_ns(NETLINK_CB(cb->skb).sk),
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, 0, cb->nlh,
+ commp->net_admin) < 0) {
+ err = 1;
+ goto release;
+ }
+next:
+ cb->args[4]++;
+ }
+ cb->args[1] = 0;
+ cb->args[3] = 0;
+ cb->args[4] = 0;
+release:
+ release_sock(sk);
+ return err;
+}
+
+static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
+{
+ struct sctp_comm_param *commp = p;
+ struct sock *sk = ep->base.sk;
+ const struct inet_diag_req_v2 *r = commp->r;
+
+ /* find the ep only once through the transports by this condition */
+ if (!list_is_first(&tsp->asoc->asocs, &ep->asocs))
+ return 0;
+
+ if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
+ return 0;
+
+ return 1;
+}
+
+static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
+{
+ struct sctp_comm_param *commp = p;
+ struct sock *sk = ep->base.sk;
+ struct sk_buff *skb = commp->skb;
+ struct netlink_callback *cb = commp->cb;
+ const struct inet_diag_req_v2 *r = commp->r;
+ struct net *net = sock_net(skb->sk);
+ struct inet_sock *inet = inet_sk(sk);
+ int err = 0;
+
+ if (!net_eq(sock_net(sk), net))
+ goto out;
+
+ if (cb->args[4] < cb->args[1])
+ goto next;
+
+ if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs))
+ goto next;
+
+ if (r->sdiag_family != AF_UNSPEC &&
+ sk->sk_family != r->sdiag_family)
+ goto next;
+
+ if (r->id.idiag_sport != inet->inet_sport &&
+ r->id.idiag_sport)
+ goto next;
+
+ if (r->id.idiag_dport != inet->inet_dport &&
+ r->id.idiag_dport)
+ goto next;
+
+ if (inet_sctp_diag_fill(sk, NULL, skb, r,
+ sk_user_ns(NETLINK_CB(cb->skb).sk),
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ cb->nlh, commp->net_admin) < 0) {
+ err = 2;
+ goto out;
+ }
+next:
+ cb->args[4]++;
+out:
+ return err;
+}
+
+/* define the functions for sctp_diag_handler*/
+static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+ void *info)
+{
+ struct sctp_infox *infox = (struct sctp_infox *)info;
+
+ if (infox->asoc) {
+ r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
+ r->idiag_wqueue = infox->asoc->sndbuf_used;
+ } else {
+ r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
+ r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
+ }
+ if (infox->sctpinfo)
+ sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
+}
+
+static int sctp_diag_dump_one(struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *req)
+{
+ struct sk_buff *skb = cb->skb;
+ struct net *net = sock_net(skb->sk);
+ const struct nlmsghdr *nlh = cb->nlh;
+ union sctp_addr laddr, paddr;
+ struct sctp_comm_param commp = {
+ .skb = skb,
+ .r = req,
+ .nlh = nlh,
+ .net_admin = netlink_net_capable(skb, CAP_NET_ADMIN),
+ };
+
+ if (req->sdiag_family == AF_INET) {
+ laddr.v4.sin_port = req->id.idiag_sport;
+ laddr.v4.sin_addr.s_addr = req->id.idiag_src[0];
+ laddr.v4.sin_family = AF_INET;
+
+ paddr.v4.sin_port = req->id.idiag_dport;
+ paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0];
+ paddr.v4.sin_family = AF_INET;
+ } else {
+ laddr.v6.sin6_port = req->id.idiag_sport;
+ memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
+ sizeof(laddr.v6.sin6_addr));
+ laddr.v6.sin6_family = AF_INET6;
+
+ paddr.v6.sin6_port = req->id.idiag_dport;
+ memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
+ sizeof(paddr.v6.sin6_addr));
+ paddr.v6.sin6_family = AF_INET6;
+ }
+
+ return sctp_transport_lookup_process(sctp_sock_dump_one,
+ net, &laddr, &paddr, &commp);
+}
+
+static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *r)
+{
+ u32 idiag_states = r->idiag_states;
+ struct net *net = sock_net(skb->sk);
+ struct sctp_comm_param commp = {
+ .skb = skb,
+ .cb = cb,
+ .r = r,
+ .net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
+ };
+ int pos = cb->args[2];
+
+ /* eps hashtable dumps
+ * args:
+ * 0 : if it will traversal listen sock
+ * 1 : to record the sock pos of this time's traversal
+ * 4 : to work as a temporary variable to traversal list
+ */
+ if (cb->args[0] == 0) {
+ if (!(idiag_states & TCPF_LISTEN))
+ goto skip;
+ if (sctp_for_each_endpoint(sctp_ep_dump, &commp))
+ goto done;
+skip:
+ cb->args[0] = 1;
+ cb->args[1] = 0;
+ cb->args[4] = 0;
+ }
+
+ /* asocs by transport hashtable dump
+ * args:
+ * 1 : to record the assoc pos of this time's traversal
+ * 2 : to record the transport pos of this time's traversal
+ * 3 : to mark if we have dumped the ep info of the current asoc
+ * 4 : to work as a temporary variable to traversal list
+ * 5 : to save the sk we get from travelsing the tsp list.
+ */
+ if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
+ goto done;
+
+ sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump,
+ net, &pos, &commp);
+ cb->args[2] = pos;
+
+done:
+ cb->args[1] = cb->args[4];
+ cb->args[4] = 0;
+}
+
+static const struct inet_diag_handler sctp_diag_handler = {
+ .dump = sctp_diag_dump,
+ .dump_one = sctp_diag_dump_one,
+ .idiag_get_info = sctp_diag_get_info,
+ .idiag_type = IPPROTO_SCTP,
+ .idiag_info_size = sizeof(struct sctp_info),
+};
+
+static int __init sctp_diag_init(void)
+{
+ return inet_diag_register(&sctp_diag_handler);
+}
+
+static void __exit sctp_diag_exit(void)
+{
+ inet_diag_unregister(&sctp_diag_handler);
+}
+
+module_init(sctp_diag_init);
+module_exit(sctp_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132);
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
new file mode 100644
index 000000000..efffde7f2
--- /dev/null
+++ b/net/sctp/endpointola.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001-2002 International Business Machines, Corp.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * This abstraction represents an SCTP endpoint.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@austin.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Dajiang Zhang <dajiang.zhang@nokia.com>
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <linux/random.h> /* get_random_bytes() */
+#include <net/sock.h>
+#include <net/ipv6.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+/* Forward declarations for internal helpers. */
+static void sctp_endpoint_bh_rcv(struct work_struct *work);
+
+/*
+ * Initialize the base fields of the endpoint structure.
+ */
+static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
+ struct sock *sk,
+ gfp_t gfp)
+{
+ struct net *net = sock_net(sk);
+ struct sctp_shared_key *null_key;
+
+ ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
+ if (!ep->digest)
+ return NULL;
+
+ ep->asconf_enable = net->sctp.addip_enable;
+ ep->auth_enable = net->sctp.auth_enable;
+ if (ep->auth_enable) {
+ if (sctp_auth_init(ep, gfp))
+ goto nomem;
+ if (ep->asconf_enable) {
+ sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF);
+ sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF_ACK);
+ }
+ }
+
+ /* Initialize the base structure. */
+ /* What type of endpoint are we? */
+ ep->base.type = SCTP_EP_TYPE_SOCKET;
+
+ /* Initialize the basic object fields. */
+ refcount_set(&ep->base.refcnt, 1);
+ ep->base.dead = false;
+
+ /* Create an input queue. */
+ sctp_inq_init(&ep->base.inqueue);
+
+ /* Set its top-half handler */
+ sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
+
+ /* Initialize the bind addr area */
+ sctp_bind_addr_init(&ep->base.bind_addr, 0);
+
+ /* Create the lists of associations. */
+ INIT_LIST_HEAD(&ep->asocs);
+
+ /* Use SCTP specific send buffer space queues. */
+ ep->sndbuf_policy = net->sctp.sndbuf_policy;
+
+ sk->sk_data_ready = sctp_data_ready;
+ sk->sk_write_space = sctp_write_space;
+ sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+
+ /* Get the receive buffer policy for this endpoint */
+ ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
+
+ /* Initialize the secret key used with cookie. */
+ get_random_bytes(ep->secret_key, sizeof(ep->secret_key));
+
+ /* SCTP-AUTH extensions*/
+ INIT_LIST_HEAD(&ep->endpoint_shared_keys);
+ null_key = sctp_auth_shkey_create(0, gfp);
+ if (!null_key)
+ goto nomem_shkey;
+
+ list_add(&null_key->key_list, &ep->endpoint_shared_keys);
+
+ /* Add the null key to the endpoint shared keys list and
+ * set the hmcas and chunks pointers.
+ */
+ ep->prsctp_enable = net->sctp.prsctp_enable;
+ ep->reconf_enable = net->sctp.reconf_enable;
+ ep->ecn_enable = net->sctp.ecn_enable;
+
+ /* Remember who we are attached to. */
+ ep->base.sk = sk;
+ ep->base.net = sock_net(sk);
+ sock_hold(ep->base.sk);
+
+ return ep;
+
+nomem_shkey:
+ sctp_auth_free(ep);
+nomem:
+ kfree(ep->digest);
+ return NULL;
+
+}
+
+/* Create a sctp_endpoint with all that boring stuff initialized.
+ * Returns NULL if there isn't enough memory.
+ */
+struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp)
+{
+ struct sctp_endpoint *ep;
+
+ /* Build a local endpoint. */
+ ep = kzalloc(sizeof(*ep), gfp);
+ if (!ep)
+ goto fail;
+
+ if (!sctp_endpoint_init(ep, sk, gfp))
+ goto fail_init;
+
+ SCTP_DBG_OBJCNT_INC(ep);
+ return ep;
+
+fail_init:
+ kfree(ep);
+fail:
+ return NULL;
+}
+
+/* Add an association to an endpoint. */
+void sctp_endpoint_add_asoc(struct sctp_endpoint *ep,
+ struct sctp_association *asoc)
+{
+ struct sock *sk = ep->base.sk;
+
+ /* If this is a temporary association, don't bother
+ * since we'll be removing it shortly and don't
+ * want anyone to find it anyway.
+ */
+ if (asoc->temp)
+ return;
+
+ /* Now just add it to our list of asocs */
+ list_add_tail(&asoc->asocs, &ep->asocs);
+
+ /* Increment the backlog value for a TCP-style listening socket. */
+ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
+ sk_acceptq_added(sk);
+}
+
+/* Free the endpoint structure. Delay cleanup until
+ * all users have released their reference count on this structure.
+ */
+void sctp_endpoint_free(struct sctp_endpoint *ep)
+{
+ ep->base.dead = true;
+
+ inet_sk_set_state(ep->base.sk, SCTP_SS_CLOSED);
+
+ /* Unlink this endpoint, so we can't find it again! */
+ sctp_unhash_endpoint(ep);
+
+ sctp_endpoint_put(ep);
+}
+
+/* Final destructor for endpoint. */
+static void sctp_endpoint_destroy_rcu(struct rcu_head *head)
+{
+ struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu);
+ struct sock *sk = ep->base.sk;
+
+ sctp_sk(sk)->ep = NULL;
+ sock_put(sk);
+
+ kfree(ep);
+ SCTP_DBG_OBJCNT_DEC(ep);
+}
+
+static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
+{
+ struct sock *sk;
+
+ if (unlikely(!ep->base.dead)) {
+ WARN(1, "Attempt to destroy undead endpoint %p!\n", ep);
+ return;
+ }
+
+ /* Free the digest buffer */
+ kfree(ep->digest);
+
+ /* SCTP-AUTH: Free up AUTH releated data such as shared keys
+ * chunks and hmacs arrays that were allocated
+ */
+ sctp_auth_destroy_keys(&ep->endpoint_shared_keys);
+ sctp_auth_free(ep);
+
+ /* Cleanup. */
+ sctp_inq_free(&ep->base.inqueue);
+ sctp_bind_addr_free(&ep->base.bind_addr);
+
+ memset(ep->secret_key, 0, sizeof(ep->secret_key));
+
+ sk = ep->base.sk;
+ /* Remove and free the port */
+ if (sctp_sk(sk)->bind_hash)
+ sctp_put_port(sk);
+
+ call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu);
+}
+
+/* Hold a reference to an endpoint. */
+int sctp_endpoint_hold(struct sctp_endpoint *ep)
+{
+ return refcount_inc_not_zero(&ep->base.refcnt);
+}
+
+/* Release a reference to an endpoint and clean up if there are
+ * no more references.
+ */
+void sctp_endpoint_put(struct sctp_endpoint *ep)
+{
+ if (refcount_dec_and_test(&ep->base.refcnt))
+ sctp_endpoint_destroy(ep);
+}
+
+/* Is this the endpoint we are looking for? */
+struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
+ struct net *net,
+ const union sctp_addr *laddr)
+{
+ struct sctp_endpoint *retval = NULL;
+
+ if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) &&
+ net_eq(ep->base.net, net)) {
+ if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
+ sctp_sk(ep->base.sk)))
+ retval = ep;
+ }
+
+ return retval;
+}
+
+/* Find the association that goes with this chunk.
+ * We lookup the transport from hashtable at first, then get association
+ * through t->assoc.
+ */
+struct sctp_association *sctp_endpoint_lookup_assoc(
+ const struct sctp_endpoint *ep,
+ const union sctp_addr *paddr,
+ struct sctp_transport **transport)
+{
+ struct sctp_association *asoc = NULL;
+ struct sctp_transport *t;
+
+ *transport = NULL;
+
+ /* If the local port is not set, there can't be any associations
+ * on this endpoint.
+ */
+ if (!ep->base.bind_addr.port)
+ return NULL;
+
+ rcu_read_lock();
+ t = sctp_epaddr_lookup_transport(ep, paddr);
+ if (!t)
+ goto out;
+
+ *transport = t;
+ asoc = t->asoc;
+out:
+ rcu_read_unlock();
+ return asoc;
+}
+
+/* Look for any peeled off association from the endpoint that matches the
+ * given peer address.
+ */
+bool sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
+ const union sctp_addr *paddr)
+{
+ struct sctp_sockaddr_entry *addr;
+ struct net *net = ep->base.net;
+ struct sctp_bind_addr *bp;
+
+ bp = &ep->base.bind_addr;
+ /* This function is called with the socket lock held,
+ * so the address_list can not change.
+ */
+ list_for_each_entry(addr, &bp->address_list, list) {
+ if (sctp_has_association(net, &addr->a, paddr))
+ return true;
+ }
+
+ return false;
+}
+
+/* Do delayed input processing. This is scheduled by sctp_rcv().
+ * This may be called on BH or task time.
+ */
+static void sctp_endpoint_bh_rcv(struct work_struct *work)
+{
+ struct sctp_endpoint *ep =
+ container_of(work, struct sctp_endpoint,
+ base.inqueue.immediate);
+ struct sctp_association *asoc;
+ struct sock *sk;
+ struct net *net;
+ struct sctp_transport *transport;
+ struct sctp_chunk *chunk;
+ struct sctp_inq *inqueue;
+ union sctp_subtype subtype;
+ enum sctp_state state;
+ int error = 0;
+ int first_time = 1; /* is this the first time through the loop */
+
+ if (ep->base.dead)
+ return;
+
+ asoc = NULL;
+ inqueue = &ep->base.inqueue;
+ sk = ep->base.sk;
+ net = sock_net(sk);
+
+ while (NULL != (chunk = sctp_inq_pop(inqueue))) {
+ subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
+
+ /* If the first chunk in the packet is AUTH, do special
+ * processing specified in Section 6.3 of SCTP-AUTH spec
+ */
+ if (first_time && (subtype.chunk == SCTP_CID_AUTH)) {
+ struct sctp_chunkhdr *next_hdr;
+
+ next_hdr = sctp_inq_peek(inqueue);
+ if (!next_hdr)
+ goto normal;
+
+ /* If the next chunk is COOKIE-ECHO, skip the AUTH
+ * chunk while saving a pointer to it so we can do
+ * Authentication later (during cookie-echo
+ * processing).
+ */
+ if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
+ chunk->auth_chunk = skb_clone(chunk->skb,
+ GFP_ATOMIC);
+ chunk->auth = 1;
+ continue;
+ }
+ }
+normal:
+ /* We might have grown an association since last we
+ * looked, so try again.
+ *
+ * This happens when we've just processed our
+ * COOKIE-ECHO chunk.
+ */
+ if (NULL == chunk->asoc) {
+ asoc = sctp_endpoint_lookup_assoc(ep,
+ sctp_source(chunk),
+ &transport);
+ chunk->asoc = asoc;
+ chunk->transport = transport;
+ }
+
+ state = asoc ? asoc->state : SCTP_STATE_CLOSED;
+ if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
+ continue;
+
+ /* Remember where the last DATA chunk came from so we
+ * know where to send the SACK.
+ */
+ if (asoc && sctp_chunk_is_data(chunk))
+ asoc->peer.last_data_from = chunk->transport;
+ else {
+ SCTP_INC_STATS(ep->base.net, SCTP_MIB_INCTRLCHUNKS);
+ if (asoc)
+ asoc->stats.ictrlchunks++;
+ }
+
+ if (chunk->transport)
+ chunk->transport->last_time_heard = ktime_get();
+
+ error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state,
+ ep, asoc, chunk, GFP_ATOMIC);
+
+ if (error && chunk)
+ chunk->pdiscard = 1;
+
+ /* Check to see if the endpoint is freed in response to
+ * the incoming chunk. If so, get out of the while loop.
+ */
+ if (!sctp_sk(sk)->ep)
+ break;
+
+ if (first_time)
+ first_time = 0;
+ }
+}
diff --git a/net/sctp/input.c b/net/sctp/input.c
new file mode 100644
index 000000000..4f43afa86
--- /dev/null
+++ b/net/sctp/input.c
@@ -0,0 +1,1349 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001-2003 International Business Machines, Corp.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions handle all input from the IP layer into SCTP.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Hui Huang <hui.huang@nokia.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ */
+
+#include <linux/types.h>
+#include <linux/list.h> /* For struct list_head */
+#include <linux/socket.h>
+#include <linux/ip.h>
+#include <linux/time.h> /* For struct timeval */
+#include <linux/slab.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/snmp.h>
+#include <net/sock.h>
+#include <net/xfrm.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/checksum.h>
+#include <net/net_namespace.h>
+#include <linux/rhashtable.h>
+#include <net/sock_reuseport.h>
+
+/* Forward declarations for internal helpers. */
+static int sctp_rcv_ootb(struct sk_buff *);
+static struct sctp_association *__sctp_rcv_lookup(struct net *net,
+ struct sk_buff *skb,
+ const union sctp_addr *paddr,
+ const union sctp_addr *laddr,
+ struct sctp_transport **transportp);
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
+ struct net *net, struct sk_buff *skb,
+ const union sctp_addr *laddr,
+ const union sctp_addr *daddr);
+static struct sctp_association *__sctp_lookup_association(
+ struct net *net,
+ const union sctp_addr *local,
+ const union sctp_addr *peer,
+ struct sctp_transport **pt);
+
+static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
+
+
+/* Calculate the SCTP checksum of an SCTP packet. */
+static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
+{
+ struct sctphdr *sh = sctp_hdr(skb);
+ __le32 cmp = sh->checksum;
+ __le32 val = sctp_compute_cksum(skb, 0);
+
+ if (val != cmp) {
+ /* CRC failure, dump it. */
+ __SCTP_INC_STATS(net, SCTP_MIB_CHECKSUMERRORS);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * This is the routine which IP calls when receiving an SCTP packet.
+ */
+int sctp_rcv(struct sk_buff *skb)
+{
+ struct sock *sk;
+ struct sctp_association *asoc;
+ struct sctp_endpoint *ep = NULL;
+ struct sctp_ep_common *rcvr;
+ struct sctp_transport *transport = NULL;
+ struct sctp_chunk *chunk;
+ union sctp_addr src;
+ union sctp_addr dest;
+ int bound_dev_if;
+ int family;
+ struct sctp_af *af;
+ struct net *net = dev_net(skb->dev);
+ bool is_gso = skb_is_gso(skb) && skb_is_gso_sctp(skb);
+
+ if (skb->pkt_type != PACKET_HOST)
+ goto discard_it;
+
+ __SCTP_INC_STATS(net, SCTP_MIB_INSCTPPACKS);
+
+ /* If packet is too small to contain a single chunk, let's not
+ * waste time on it anymore.
+ */
+ if (skb->len < sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr) +
+ skb_transport_offset(skb))
+ goto discard_it;
+
+ /* If the packet is fragmented and we need to do crc checking,
+ * it's better to just linearize it otherwise crc computing
+ * takes longer.
+ */
+ if ((!is_gso && skb_linearize(skb)) ||
+ !pskb_may_pull(skb, sizeof(struct sctphdr)))
+ goto discard_it;
+
+ /* Pull up the IP header. */
+ __skb_pull(skb, skb_transport_offset(skb));
+
+ skb->csum_valid = 0; /* Previous value not applicable */
+ if (skb_csum_unnecessary(skb))
+ __skb_decr_checksum_unnecessary(skb);
+ else if (!sctp_checksum_disable &&
+ !is_gso &&
+ sctp_rcv_checksum(net, skb) < 0)
+ goto discard_it;
+ skb->csum_valid = 1;
+
+ __skb_pull(skb, sizeof(struct sctphdr));
+
+ family = ipver2af(ip_hdr(skb)->version);
+ af = sctp_get_af_specific(family);
+ if (unlikely(!af))
+ goto discard_it;
+ SCTP_INPUT_CB(skb)->af = af;
+
+ /* Initialize local addresses for lookups. */
+ af->from_skb(&src, skb, 1);
+ af->from_skb(&dest, skb, 0);
+
+ /* If the packet is to or from a non-unicast address,
+ * silently discard the packet.
+ *
+ * This is not clearly defined in the RFC except in section
+ * 8.4 - OOTB handling. However, based on the book "Stream Control
+ * Transmission Protocol" 2.1, "It is important to note that the
+ * IP address of an SCTP transport address must be a routable
+ * unicast address. In other words, IP multicast addresses and
+ * IP broadcast addresses cannot be used in an SCTP transport
+ * address."
+ */
+ if (!af->addr_valid(&src, NULL, skb) ||
+ !af->addr_valid(&dest, NULL, skb))
+ goto discard_it;
+
+ asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
+
+ if (!asoc)
+ ep = __sctp_rcv_lookup_endpoint(net, skb, &dest, &src);
+
+ /* Retrieve the common input handling substructure. */
+ rcvr = asoc ? &asoc->base : &ep->base;
+ sk = rcvr->sk;
+
+ /*
+ * If a frame arrives on an interface and the receiving socket is
+ * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
+ */
+ bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ if (bound_dev_if && (bound_dev_if != af->skb_iif(skb))) {
+ if (transport) {
+ sctp_transport_put(transport);
+ asoc = NULL;
+ transport = NULL;
+ } else {
+ sctp_endpoint_put(ep);
+ ep = NULL;
+ }
+ sk = net->sctp.ctl_sock;
+ ep = sctp_sk(sk)->ep;
+ sctp_endpoint_hold(ep);
+ rcvr = &ep->base;
+ }
+
+ /*
+ * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
+ * An SCTP packet is called an "out of the blue" (OOTB)
+ * packet if it is correctly formed, i.e., passed the
+ * receiver's checksum check, but the receiver is not
+ * able to identify the association to which this
+ * packet belongs.
+ */
+ if (!asoc) {
+ if (sctp_rcv_ootb(skb)) {
+ __SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+ goto discard_release;
+ }
+ }
+
+ if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
+ goto discard_release;
+ nf_reset_ct(skb);
+
+ if (sk_filter(sk, skb))
+ goto discard_release;
+
+ /* Create an SCTP packet structure. */
+ chunk = sctp_chunkify(skb, asoc, sk, GFP_ATOMIC);
+ if (!chunk)
+ goto discard_release;
+ SCTP_INPUT_CB(skb)->chunk = chunk;
+
+ /* Remember what endpoint is to handle this packet. */
+ chunk->rcvr = rcvr;
+
+ /* Remember the SCTP header. */
+ chunk->sctp_hdr = sctp_hdr(skb);
+
+ /* Set the source and destination addresses of the incoming chunk. */
+ sctp_init_addrs(chunk, &src, &dest);
+
+ /* Remember where we came from. */
+ chunk->transport = transport;
+
+ /* Acquire access to the sock lock. Note: We are safe from other
+ * bottom halves on this lock, but a user may be in the lock too,
+ * so check if it is busy.
+ */
+ bh_lock_sock(sk);
+
+ if (sk != rcvr->sk) {
+ /* Our cached sk is different from the rcvr->sk. This is
+ * because migrate()/accept() may have moved the association
+ * to a new socket and released all the sockets. So now we
+ * are holding a lock on the old socket while the user may
+ * be doing something with the new socket. Switch our veiw
+ * of the current sk.
+ */
+ bh_unlock_sock(sk);
+ sk = rcvr->sk;
+ bh_lock_sock(sk);
+ }
+
+ if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
+ if (sctp_add_backlog(sk, skb)) {
+ bh_unlock_sock(sk);
+ sctp_chunk_free(chunk);
+ skb = NULL; /* sctp_chunk_free already freed the skb */
+ goto discard_release;
+ }
+ __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_BACKLOG);
+ } else {
+ __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_SOFTIRQ);
+ sctp_inq_push(&chunk->rcvr->inqueue, chunk);
+ }
+
+ bh_unlock_sock(sk);
+
+ /* Release the asoc/ep ref we took in the lookup calls. */
+ if (transport)
+ sctp_transport_put(transport);
+ else
+ sctp_endpoint_put(ep);
+
+ return 0;
+
+discard_it:
+ __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
+ kfree_skb(skb);
+ return 0;
+
+discard_release:
+ /* Release the asoc/ep ref we took in the lookup calls. */
+ if (transport)
+ sctp_transport_put(transport);
+ else
+ sctp_endpoint_put(ep);
+
+ goto discard_it;
+}
+
+/* Process the backlog queue of the socket. Every skb on
+ * the backlog holds a ref on an association or endpoint.
+ * We hold this ref throughout the state machine to make
+ * sure that the structure we need is still around.
+ */
+int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
+ struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
+ struct sctp_transport *t = chunk->transport;
+ struct sctp_ep_common *rcvr = NULL;
+ int backloged = 0;
+
+ rcvr = chunk->rcvr;
+
+ /* If the rcvr is dead then the association or endpoint
+ * has been deleted and we can safely drop the chunk
+ * and refs that we are holding.
+ */
+ if (rcvr->dead) {
+ sctp_chunk_free(chunk);
+ goto done;
+ }
+
+ if (unlikely(rcvr->sk != sk)) {
+ /* In this case, the association moved from one socket to
+ * another. We are currently sitting on the backlog of the
+ * old socket, so we need to move.
+ * However, since we are here in the process context we
+ * need to take make sure that the user doesn't own
+ * the new socket when we process the packet.
+ * If the new socket is user-owned, queue the chunk to the
+ * backlog of the new socket without dropping any refs.
+ * Otherwise, we can safely push the chunk on the inqueue.
+ */
+
+ sk = rcvr->sk;
+ local_bh_disable();
+ bh_lock_sock(sk);
+
+ if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
+ if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
+ sctp_chunk_free(chunk);
+ else
+ backloged = 1;
+ } else
+ sctp_inq_push(inqueue, chunk);
+
+ bh_unlock_sock(sk);
+ local_bh_enable();
+
+ /* If the chunk was backloged again, don't drop refs */
+ if (backloged)
+ return 0;
+ } else {
+ if (!sctp_newsk_ready(sk)) {
+ if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
+ return 0;
+ sctp_chunk_free(chunk);
+ } else {
+ sctp_inq_push(inqueue, chunk);
+ }
+ }
+
+done:
+ /* Release the refs we took in sctp_add_backlog */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_transport_put(t);
+ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+ sctp_endpoint_put(sctp_ep(rcvr));
+ else
+ BUG();
+
+ return 0;
+}
+
+static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
+{
+ struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
+ struct sctp_transport *t = chunk->transport;
+ struct sctp_ep_common *rcvr = chunk->rcvr;
+ int ret;
+
+ ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
+ if (!ret) {
+ /* Hold the assoc/ep while hanging on the backlog queue.
+ * This way, we know structures we need will not disappear
+ * from us
+ */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_transport_hold(t);
+ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+ sctp_endpoint_hold(sctp_ep(rcvr));
+ else
+ BUG();
+ }
+ return ret;
+
+}
+
+/* Handle icmp frag needed error. */
+void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
+ struct sctp_transport *t, __u32 pmtu)
+{
+ if (!t ||
+ (t->pathmtu <= pmtu &&
+ t->pl.probe_size + sctp_transport_pl_hlen(t) <= pmtu))
+ return;
+
+ if (sock_owned_by_user(sk)) {
+ atomic_set(&t->mtu_info, pmtu);
+ asoc->pmtu_pending = 1;
+ t->pmtu_pending = 1;
+ return;
+ }
+
+ if (!(t->param_flags & SPP_PMTUD_ENABLE))
+ /* We can't allow retransmitting in such case, as the
+ * retransmission would be sized just as before, and thus we
+ * would get another icmp, and retransmit again.
+ */
+ return;
+
+ /* Update transports view of the MTU. Return if no update was needed.
+ * If an update wasn't needed/possible, it also doesn't make sense to
+ * try to retransmit now.
+ */
+ if (!sctp_transport_update_pmtu(t, pmtu))
+ return;
+
+ /* Update association pmtu. */
+ sctp_assoc_sync_pmtu(asoc);
+
+ /* Retransmit with the new pmtu setting. */
+ sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
+}
+
+void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
+ struct sk_buff *skb)
+{
+ struct dst_entry *dst;
+
+ if (sock_owned_by_user(sk) || !t)
+ return;
+ dst = sctp_transport_dst_check(t);
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+}
+
+/*
+ * SCTP Implementer's Guide, 2.37 ICMP handling procedures
+ *
+ * ICMP8) If the ICMP code is a "Unrecognized next header type encountered"
+ * or a "Protocol Unreachable" treat this message as an abort
+ * with the T bit set.
+ *
+ * This function sends an event to the state machine, which will abort the
+ * association.
+ *
+ */
+void sctp_icmp_proto_unreachable(struct sock *sk,
+ struct sctp_association *asoc,
+ struct sctp_transport *t)
+{
+ if (sock_owned_by_user(sk)) {
+ if (timer_pending(&t->proto_unreach_timer))
+ return;
+ else {
+ if (!mod_timer(&t->proto_unreach_timer,
+ jiffies + (HZ/20)))
+ sctp_transport_hold(t);
+ }
+ } else {
+ struct net *net = sock_net(sk);
+
+ pr_debug("%s: unrecognized next header type "
+ "encountered!\n", __func__);
+
+ if (del_timer(&t->proto_unreach_timer))
+ sctp_transport_put(t);
+
+ sctp_do_sm(net, SCTP_EVENT_T_OTHER,
+ SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
+ asoc->state, asoc->ep, asoc, t,
+ GFP_ATOMIC);
+ }
+}
+
+/* Common lookup code for icmp/icmpv6 error handler. */
+struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
+ struct sctphdr *sctphdr,
+ struct sctp_association **app,
+ struct sctp_transport **tpp)
+{
+ struct sctp_init_chunk *chunkhdr, _chunkhdr;
+ union sctp_addr saddr;
+ union sctp_addr daddr;
+ struct sctp_af *af;
+ struct sock *sk = NULL;
+ struct sctp_association *asoc;
+ struct sctp_transport *transport = NULL;
+ __u32 vtag = ntohl(sctphdr->vtag);
+
+ *app = NULL; *tpp = NULL;
+
+ af = sctp_get_af_specific(family);
+ if (unlikely(!af)) {
+ return NULL;
+ }
+
+ /* Initialize local addresses for lookups. */
+ af->from_skb(&saddr, skb, 1);
+ af->from_skb(&daddr, skb, 0);
+
+ /* Look for an association that matches the incoming ICMP error
+ * packet.
+ */
+ asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport);
+ if (!asoc)
+ return NULL;
+
+ sk = asoc->base.sk;
+
+ /* RFC 4960, Appendix C. ICMP Handling
+ *
+ * ICMP6) An implementation MUST validate that the Verification Tag
+ * contained in the ICMP message matches the Verification Tag of
+ * the peer. If the Verification Tag is not 0 and does NOT
+ * match, discard the ICMP message. If it is 0 and the ICMP
+ * message contains enough bytes to verify that the chunk type is
+ * an INIT chunk and that the Initiate Tag matches the tag of the
+ * peer, continue with ICMP7. If the ICMP message is too short
+ * or the chunk type or the Initiate Tag does not match, silently
+ * discard the packet.
+ */
+ if (vtag == 0) {
+ /* chunk header + first 4 octects of init header */
+ chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
+ sizeof(struct sctphdr),
+ sizeof(struct sctp_chunkhdr) +
+ sizeof(__be32), &_chunkhdr);
+ if (!chunkhdr ||
+ chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
+ ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
+ goto out;
+
+ } else if (vtag != asoc->c.peer_vtag) {
+ goto out;
+ }
+
+ bh_lock_sock(sk);
+
+ /* If too many ICMPs get dropped on busy
+ * servers this needs to be solved differently.
+ */
+ if (sock_owned_by_user(sk))
+ __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
+
+ *app = asoc;
+ *tpp = transport;
+ return sk;
+
+out:
+ sctp_transport_put(transport);
+ return NULL;
+}
+
+/* Common cleanup code for icmp/icmpv6 error handler. */
+void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
+ __releases(&((__sk)->sk_lock.slock))
+{
+ bh_unlock_sock(sk);
+ sctp_transport_put(t);
+}
+
+static void sctp_v4_err_handle(struct sctp_transport *t, struct sk_buff *skb,
+ __u8 type, __u8 code, __u32 info)
+{
+ struct sctp_association *asoc = t->asoc;
+ struct sock *sk = asoc->base.sk;
+ int err = 0;
+
+ switch (type) {
+ case ICMP_PARAMETERPROB:
+ err = EPROTO;
+ break;
+ case ICMP_DEST_UNREACH:
+ if (code > NR_ICMP_UNREACH)
+ return;
+ if (code == ICMP_FRAG_NEEDED) {
+ sctp_icmp_frag_needed(sk, asoc, t, SCTP_TRUNC4(info));
+ return;
+ }
+ if (code == ICMP_PROT_UNREACH) {
+ sctp_icmp_proto_unreachable(sk, asoc, t);
+ return;
+ }
+ err = icmp_err_convert[code].errno;
+ break;
+ case ICMP_TIME_EXCEEDED:
+ if (code == ICMP_EXC_FRAGTIME)
+ return;
+
+ err = EHOSTUNREACH;
+ break;
+ case ICMP_REDIRECT:
+ sctp_icmp_redirect(sk, t, skb);
+ return;
+ default:
+ return;
+ }
+ if (!sock_owned_by_user(sk) && inet_sk(sk)->recverr) {
+ sk->sk_err = err;
+ sk_error_report(sk);
+ } else { /* Only an error on timeout */
+ sk->sk_err_soft = err;
+ }
+}
+
+/*
+ * This routine is called by the ICMP module when it gets some
+ * sort of error condition. If err < 0 then the socket should
+ * be closed and the error returned to the user. If err > 0
+ * it's just the icmp type << 8 | icmp code. After adjustment
+ * header points to the first 8 bytes of the sctp header. We need
+ * to find the appropriate port.
+ *
+ * The locking strategy used here is very "optimistic". When
+ * someone else accesses the socket the ICMP is just dropped
+ * and for some paths there is no check at all.
+ * A more general error queue to queue errors for later handling
+ * is probably better.
+ *
+ */
+int sctp_v4_err(struct sk_buff *skb, __u32 info)
+{
+ const struct iphdr *iph = (const struct iphdr *)skb->data;
+ const int type = icmp_hdr(skb)->type;
+ const int code = icmp_hdr(skb)->code;
+ struct net *net = dev_net(skb->dev);
+ struct sctp_transport *transport;
+ struct sctp_association *asoc;
+ __u16 saveip, savesctp;
+ struct sock *sk;
+
+ /* Fix up skb to look at the embedded net header. */
+ saveip = skb->network_header;
+ savesctp = skb->transport_header;
+ skb_reset_network_header(skb);
+ skb_set_transport_header(skb, iph->ihl * 4);
+ sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
+ /* Put back, the original values. */
+ skb->network_header = saveip;
+ skb->transport_header = savesctp;
+ if (!sk) {
+ __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
+ return -ENOENT;
+ }
+
+ sctp_v4_err_handle(transport, skb, type, code, info);
+ sctp_err_finish(sk, transport);
+
+ return 0;
+}
+
+int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb)
+{
+ struct net *net = dev_net(skb->dev);
+ struct sctp_association *asoc;
+ struct sctp_transport *t;
+ struct icmphdr *hdr;
+ __u32 info = 0;
+
+ skb->transport_header += sizeof(struct udphdr);
+ sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &t);
+ if (!sk) {
+ __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
+ return -ENOENT;
+ }
+
+ skb->transport_header -= sizeof(struct udphdr);
+ hdr = (struct icmphdr *)(skb_network_header(skb) - sizeof(struct icmphdr));
+ if (hdr->type == ICMP_REDIRECT) {
+ /* can't be handled without outer iphdr known, leave it to udp_err */
+ sctp_err_finish(sk, t);
+ return 0;
+ }
+ if (hdr->type == ICMP_DEST_UNREACH && hdr->code == ICMP_FRAG_NEEDED)
+ info = ntohs(hdr->un.frag.mtu);
+ sctp_v4_err_handle(t, skb, hdr->type, hdr->code, info);
+
+ sctp_err_finish(sk, t);
+ return 1;
+}
+
+/*
+ * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
+ *
+ * This function scans all the chunks in the OOTB packet to determine if
+ * the packet should be discarded right away. If a response might be needed
+ * for this packet, or, if further processing is possible, the packet will
+ * be queued to a proper inqueue for the next phase of handling.
+ *
+ * Output:
+ * Return 0 - If further processing is needed.
+ * Return 1 - If the packet can be discarded right away.
+ */
+static int sctp_rcv_ootb(struct sk_buff *skb)
+{
+ struct sctp_chunkhdr *ch, _ch;
+ int ch_end, offset = 0;
+
+ /* Scan through all the chunks in the packet. */
+ do {
+ /* Make sure we have at least the header there */
+ if (offset + sizeof(_ch) > skb->len)
+ break;
+
+ ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
+
+ /* Break out if chunk length is less then minimal. */
+ if (!ch || ntohs(ch->length) < sizeof(_ch))
+ break;
+
+ ch_end = offset + SCTP_PAD4(ntohs(ch->length));
+ if (ch_end > skb->len)
+ break;
+
+ /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
+ * receiver MUST silently discard the OOTB packet and take no
+ * further action.
+ */
+ if (SCTP_CID_ABORT == ch->type)
+ goto discard;
+
+ /* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE
+ * chunk, the receiver should silently discard the packet
+ * and take no further action.
+ */
+ if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
+ goto discard;
+
+ /* RFC 4460, 2.11.2
+ * This will discard packets with INIT chunk bundled as
+ * subsequent chunks in the packet. When INIT is first,
+ * the normal INIT processing will discard the chunk.
+ */
+ if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
+ goto discard;
+
+ offset = ch_end;
+ } while (ch_end < skb->len);
+
+ return 0;
+
+discard:
+ return 1;
+}
+
+/* Insert endpoint into the hash table. */
+static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
+{
+ struct sock *sk = ep->base.sk;
+ struct net *net = sock_net(sk);
+ struct sctp_hashbucket *head;
+
+ ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port);
+ head = &sctp_ep_hashtable[ep->hashent];
+
+ if (sk->sk_reuseport) {
+ bool any = sctp_is_ep_boundall(sk);
+ struct sctp_endpoint *ep2;
+ struct list_head *list;
+ int cnt = 0, err = 1;
+
+ list_for_each(list, &ep->base.bind_addr.address_list)
+ cnt++;
+
+ sctp_for_each_hentry(ep2, &head->chain) {
+ struct sock *sk2 = ep2->base.sk;
+
+ if (!net_eq(sock_net(sk2), net) || sk2 == sk ||
+ !uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) ||
+ !sk2->sk_reuseport)
+ continue;
+
+ err = sctp_bind_addrs_check(sctp_sk(sk2),
+ sctp_sk(sk), cnt);
+ if (!err) {
+ err = reuseport_add_sock(sk, sk2, any);
+ if (err)
+ return err;
+ break;
+ } else if (err < 0) {
+ return err;
+ }
+ }
+
+ if (err) {
+ err = reuseport_alloc(sk, any);
+ if (err)
+ return err;
+ }
+ }
+
+ write_lock(&head->lock);
+ hlist_add_head(&ep->node, &head->chain);
+ write_unlock(&head->lock);
+ return 0;
+}
+
+/* Add an endpoint to the hash. Local BH-safe. */
+int sctp_hash_endpoint(struct sctp_endpoint *ep)
+{
+ int err;
+
+ local_bh_disable();
+ err = __sctp_hash_endpoint(ep);
+ local_bh_enable();
+
+ return err;
+}
+
+/* Remove endpoint from the hash table. */
+static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
+{
+ struct sock *sk = ep->base.sk;
+ struct sctp_hashbucket *head;
+
+ ep->hashent = sctp_ep_hashfn(sock_net(sk), ep->base.bind_addr.port);
+
+ head = &sctp_ep_hashtable[ep->hashent];
+
+ if (rcu_access_pointer(sk->sk_reuseport_cb))
+ reuseport_detach_sock(sk);
+
+ write_lock(&head->lock);
+ hlist_del_init(&ep->node);
+ write_unlock(&head->lock);
+}
+
+/* Remove endpoint from the hash. Local BH-safe. */
+void sctp_unhash_endpoint(struct sctp_endpoint *ep)
+{
+ local_bh_disable();
+ __sctp_unhash_endpoint(ep);
+ local_bh_enable();
+}
+
+static inline __u32 sctp_hashfn(const struct net *net, __be16 lport,
+ const union sctp_addr *paddr, __u32 seed)
+{
+ __u32 addr;
+
+ if (paddr->sa.sa_family == AF_INET6)
+ addr = jhash(&paddr->v6.sin6_addr, 16, seed);
+ else
+ addr = (__force __u32)paddr->v4.sin_addr.s_addr;
+
+ return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
+ (__force __u32)lport, net_hash_mix(net), seed);
+}
+
+/* Look up an endpoint. */
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
+ struct net *net, struct sk_buff *skb,
+ const union sctp_addr *laddr,
+ const union sctp_addr *paddr)
+{
+ struct sctp_hashbucket *head;
+ struct sctp_endpoint *ep;
+ struct sock *sk;
+ __be16 lport;
+ int hash;
+
+ lport = laddr->v4.sin_port;
+ hash = sctp_ep_hashfn(net, ntohs(lport));
+ head = &sctp_ep_hashtable[hash];
+ read_lock(&head->lock);
+ sctp_for_each_hentry(ep, &head->chain) {
+ if (sctp_endpoint_is_match(ep, net, laddr))
+ goto hit;
+ }
+
+ ep = sctp_sk(net->sctp.ctl_sock)->ep;
+
+hit:
+ sk = ep->base.sk;
+ if (sk->sk_reuseport) {
+ __u32 phash = sctp_hashfn(net, lport, paddr, 0);
+
+ sk = reuseport_select_sock(sk, phash, skb,
+ sizeof(struct sctphdr));
+ if (sk)
+ ep = sctp_sk(sk)->ep;
+ }
+ sctp_endpoint_hold(ep);
+ read_unlock(&head->lock);
+ return ep;
+}
+
+/* rhashtable for transport */
+struct sctp_hash_cmp_arg {
+ const union sctp_addr *paddr;
+ const struct net *net;
+ __be16 lport;
+};
+
+static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
+ const void *ptr)
+{
+ struct sctp_transport *t = (struct sctp_transport *)ptr;
+ const struct sctp_hash_cmp_arg *x = arg->key;
+ int err = 1;
+
+ if (!sctp_cmp_addr_exact(&t->ipaddr, x->paddr))
+ return err;
+ if (!sctp_transport_hold(t))
+ return err;
+
+ if (!net_eq(t->asoc->base.net, x->net))
+ goto out;
+ if (x->lport != htons(t->asoc->base.bind_addr.port))
+ goto out;
+
+ err = 0;
+out:
+ sctp_transport_put(t);
+ return err;
+}
+
+static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
+{
+ const struct sctp_transport *t = data;
+
+ return sctp_hashfn(t->asoc->base.net,
+ htons(t->asoc->base.bind_addr.port),
+ &t->ipaddr, seed);
+}
+
+static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed)
+{
+ const struct sctp_hash_cmp_arg *x = data;
+
+ return sctp_hashfn(x->net, x->lport, x->paddr, seed);
+}
+
+static const struct rhashtable_params sctp_hash_params = {
+ .head_offset = offsetof(struct sctp_transport, node),
+ .hashfn = sctp_hash_key,
+ .obj_hashfn = sctp_hash_obj,
+ .obj_cmpfn = sctp_hash_cmp,
+ .automatic_shrinking = true,
+};
+
+int sctp_transport_hashtable_init(void)
+{
+ return rhltable_init(&sctp_transport_hashtable, &sctp_hash_params);
+}
+
+void sctp_transport_hashtable_destroy(void)
+{
+ rhltable_destroy(&sctp_transport_hashtable);
+}
+
+int sctp_hash_transport(struct sctp_transport *t)
+{
+ struct sctp_transport *transport;
+ struct rhlist_head *tmp, *list;
+ struct sctp_hash_cmp_arg arg;
+ int err;
+
+ if (t->asoc->temp)
+ return 0;
+
+ arg.net = t->asoc->base.net;
+ arg.paddr = &t->ipaddr;
+ arg.lport = htons(t->asoc->base.bind_addr.port);
+
+ rcu_read_lock();
+ list = rhltable_lookup(&sctp_transport_hashtable, &arg,
+ sctp_hash_params);
+
+ rhl_for_each_entry_rcu(transport, tmp, list, node)
+ if (transport->asoc->ep == t->asoc->ep) {
+ rcu_read_unlock();
+ return -EEXIST;
+ }
+ rcu_read_unlock();
+
+ err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
+ &t->node, sctp_hash_params);
+ if (err)
+ pr_err_once("insert transport fail, errno %d\n", err);
+
+ return err;
+}
+
+void sctp_unhash_transport(struct sctp_transport *t)
+{
+ if (t->asoc->temp)
+ return;
+
+ rhltable_remove(&sctp_transport_hashtable, &t->node,
+ sctp_hash_params);
+}
+
+/* return a transport with holding it */
+struct sctp_transport *sctp_addrs_lookup_transport(
+ struct net *net,
+ const union sctp_addr *laddr,
+ const union sctp_addr *paddr)
+{
+ struct rhlist_head *tmp, *list;
+ struct sctp_transport *t;
+ struct sctp_hash_cmp_arg arg = {
+ .paddr = paddr,
+ .net = net,
+ .lport = laddr->v4.sin_port,
+ };
+
+ list = rhltable_lookup(&sctp_transport_hashtable, &arg,
+ sctp_hash_params);
+
+ rhl_for_each_entry_rcu(t, tmp, list, node) {
+ if (!sctp_transport_hold(t))
+ continue;
+
+ if (sctp_bind_addr_match(&t->asoc->base.bind_addr,
+ laddr, sctp_sk(t->asoc->base.sk)))
+ return t;
+ sctp_transport_put(t);
+ }
+
+ return NULL;
+}
+
+/* return a transport without holding it, as it's only used under sock lock */
+struct sctp_transport *sctp_epaddr_lookup_transport(
+ const struct sctp_endpoint *ep,
+ const union sctp_addr *paddr)
+{
+ struct rhlist_head *tmp, *list;
+ struct sctp_transport *t;
+ struct sctp_hash_cmp_arg arg = {
+ .paddr = paddr,
+ .net = ep->base.net,
+ .lport = htons(ep->base.bind_addr.port),
+ };
+
+ list = rhltable_lookup(&sctp_transport_hashtable, &arg,
+ sctp_hash_params);
+
+ rhl_for_each_entry_rcu(t, tmp, list, node)
+ if (ep == t->asoc->ep)
+ return t;
+
+ return NULL;
+}
+
+/* Look up an association. */
+static struct sctp_association *__sctp_lookup_association(
+ struct net *net,
+ const union sctp_addr *local,
+ const union sctp_addr *peer,
+ struct sctp_transport **pt)
+{
+ struct sctp_transport *t;
+ struct sctp_association *asoc = NULL;
+
+ t = sctp_addrs_lookup_transport(net, local, peer);
+ if (!t)
+ goto out;
+
+ asoc = t->asoc;
+ *pt = t;
+
+out:
+ return asoc;
+}
+
+/* Look up an association. protected by RCU read lock */
+static
+struct sctp_association *sctp_lookup_association(struct net *net,
+ const union sctp_addr *laddr,
+ const union sctp_addr *paddr,
+ struct sctp_transport **transportp)
+{
+ struct sctp_association *asoc;
+
+ rcu_read_lock();
+ asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
+ rcu_read_unlock();
+
+ return asoc;
+}
+
+/* Is there an association matching the given local and peer addresses? */
+bool sctp_has_association(struct net *net,
+ const union sctp_addr *laddr,
+ const union sctp_addr *paddr)
+{
+ struct sctp_transport *transport;
+
+ if (sctp_lookup_association(net, laddr, paddr, &transport)) {
+ sctp_transport_put(transport);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * SCTP Implementors Guide, 2.18 Handling of address
+ * parameters within the INIT or INIT-ACK.
+ *
+ * D) When searching for a matching TCB upon reception of an INIT
+ * or INIT-ACK chunk the receiver SHOULD use not only the
+ * source address of the packet (containing the INIT or
+ * INIT-ACK) but the receiver SHOULD also use all valid
+ * address parameters contained within the chunk.
+ *
+ * 2.18.3 Solution description
+ *
+ * This new text clearly specifies to an implementor the need
+ * to look within the INIT or INIT-ACK. Any implementation that
+ * does not do this, may not be able to establish associations
+ * in certain circumstances.
+ *
+ */
+static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
+ struct sk_buff *skb,
+ const union sctp_addr *laddr, struct sctp_transport **transportp)
+{
+ struct sctp_association *asoc;
+ union sctp_addr addr;
+ union sctp_addr *paddr = &addr;
+ struct sctphdr *sh = sctp_hdr(skb);
+ union sctp_params params;
+ struct sctp_init_chunk *init;
+ struct sctp_af *af;
+
+ /*
+ * This code will NOT touch anything inside the chunk--it is
+ * strictly READ-ONLY.
+ *
+ * RFC 2960 3 SCTP packet Format
+ *
+ * Multiple chunks can be bundled into one SCTP packet up to
+ * the MTU size, except for the INIT, INIT ACK, and SHUTDOWN
+ * COMPLETE chunks. These chunks MUST NOT be bundled with any
+ * other chunk in a packet. See Section 6.10 for more details
+ * on chunk bundling.
+ */
+
+ /* Find the start of the TLVs and the end of the chunk. This is
+ * the region we search for address parameters.
+ */
+ init = (struct sctp_init_chunk *)skb->data;
+
+ /* Walk the parameters looking for embedded addresses. */
+ sctp_walk_params(params, init, init_hdr.params) {
+
+ /* Note: Ignoring hostname addresses. */
+ af = sctp_get_af_specific(param_type2af(params.p->type));
+ if (!af)
+ continue;
+
+ if (!af->from_addr_param(paddr, params.addr, sh->source, 0))
+ continue;
+
+ asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
+ if (asoc)
+ return asoc;
+ }
+
+ return NULL;
+}
+
+/* ADD-IP, Section 5.2
+ * When an endpoint receives an ASCONF Chunk from the remote peer
+ * special procedures may be needed to identify the association the
+ * ASCONF Chunk is associated with. To properly find the association
+ * the following procedures SHOULD be followed:
+ *
+ * D2) If the association is not found, use the address found in the
+ * Address Parameter TLV combined with the port number found in the
+ * SCTP common header. If found proceed to rule D4.
+ *
+ * D2-ext) If more than one ASCONF Chunks are packed together, use the
+ * address found in the ASCONF Address Parameter TLV of each of the
+ * subsequent ASCONF Chunks. If found, proceed to rule D4.
+ */
+static struct sctp_association *__sctp_rcv_asconf_lookup(
+ struct net *net,
+ struct sctp_chunkhdr *ch,
+ const union sctp_addr *laddr,
+ __be16 peer_port,
+ struct sctp_transport **transportp)
+{
+ struct sctp_addip_chunk *asconf = (struct sctp_addip_chunk *)ch;
+ struct sctp_af *af;
+ union sctp_addr_param *param;
+ union sctp_addr paddr;
+
+ if (ntohs(ch->length) < sizeof(*asconf) + sizeof(struct sctp_paramhdr))
+ return NULL;
+
+ /* Skip over the ADDIP header and find the Address parameter */
+ param = (union sctp_addr_param *)(asconf + 1);
+
+ af = sctp_get_af_specific(param_type2af(param->p.type));
+ if (unlikely(!af))
+ return NULL;
+
+ if (!af->from_addr_param(&paddr, param, peer_port, 0))
+ return NULL;
+
+ return __sctp_lookup_association(net, laddr, &paddr, transportp);
+}
+
+
+/* SCTP-AUTH, Section 6.3:
+* If the receiver does not find a STCB for a packet containing an AUTH
+* chunk as the first chunk and not a COOKIE-ECHO chunk as the second
+* chunk, it MUST use the chunks after the AUTH chunk to look up an existing
+* association.
+*
+* This means that any chunks that can help us identify the association need
+* to be looked at to find this association.
+*/
+static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
+ struct sk_buff *skb,
+ const union sctp_addr *laddr,
+ struct sctp_transport **transportp)
+{
+ struct sctp_association *asoc = NULL;
+ struct sctp_chunkhdr *ch;
+ int have_auth = 0;
+ unsigned int chunk_num = 1;
+ __u8 *ch_end;
+
+ /* Walk through the chunks looking for AUTH or ASCONF chunks
+ * to help us find the association.
+ */
+ ch = (struct sctp_chunkhdr *)skb->data;
+ do {
+ /* Break out if chunk length is less then minimal. */
+ if (ntohs(ch->length) < sizeof(*ch))
+ break;
+
+ ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
+ if (ch_end > skb_tail_pointer(skb))
+ break;
+
+ switch (ch->type) {
+ case SCTP_CID_AUTH:
+ have_auth = chunk_num;
+ break;
+
+ case SCTP_CID_COOKIE_ECHO:
+ /* If a packet arrives containing an AUTH chunk as
+ * a first chunk, a COOKIE-ECHO chunk as the second
+ * chunk, and possibly more chunks after them, and
+ * the receiver does not have an STCB for that
+ * packet, then authentication is based on
+ * the contents of the COOKIE- ECHO chunk.
+ */
+ if (have_auth == 1 && chunk_num == 2)
+ return NULL;
+ break;
+
+ case SCTP_CID_ASCONF:
+ if (have_auth || net->sctp.addip_noauth)
+ asoc = __sctp_rcv_asconf_lookup(
+ net, ch, laddr,
+ sctp_hdr(skb)->source,
+ transportp);
+ break;
+ default:
+ break;
+ }
+
+ if (asoc)
+ break;
+
+ ch = (struct sctp_chunkhdr *)ch_end;
+ chunk_num++;
+ } while (ch_end + sizeof(*ch) < skb_tail_pointer(skb));
+
+ return asoc;
+}
+
+/*
+ * There are circumstances when we need to look inside the SCTP packet
+ * for information to help us find the association. Examples
+ * include looking inside of INIT/INIT-ACK chunks or after the AUTH
+ * chunks.
+ */
+static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
+ struct sk_buff *skb,
+ const union sctp_addr *laddr,
+ struct sctp_transport **transportp)
+{
+ struct sctp_chunkhdr *ch;
+
+ /* We do not allow GSO frames here as we need to linearize and
+ * then cannot guarantee frame boundaries. This shouldn't be an
+ * issue as packets hitting this are mostly INIT or INIT-ACK and
+ * those cannot be on GSO-style anyway.
+ */
+ if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
+ return NULL;
+
+ ch = (struct sctp_chunkhdr *)skb->data;
+
+ /* The code below will attempt to walk the chunk and extract
+ * parameter information. Before we do that, we need to verify
+ * that the chunk length doesn't cause overflow. Otherwise, we'll
+ * walk off the end.
+ */
+ if (SCTP_PAD4(ntohs(ch->length)) > skb->len)
+ return NULL;
+
+ /* If this is INIT/INIT-ACK look inside the chunk too. */
+ if (ch->type == SCTP_CID_INIT || ch->type == SCTP_CID_INIT_ACK)
+ return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
+
+ return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
+}
+
+/* Lookup an association for an inbound skb. */
+static struct sctp_association *__sctp_rcv_lookup(struct net *net,
+ struct sk_buff *skb,
+ const union sctp_addr *paddr,
+ const union sctp_addr *laddr,
+ struct sctp_transport **transportp)
+{
+ struct sctp_association *asoc;
+
+ asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
+ if (asoc)
+ goto out;
+
+ /* Further lookup for INIT/INIT-ACK packets.
+ * SCTP Implementors Guide, 2.18 Handling of address
+ * parameters within the INIT or INIT-ACK.
+ */
+ asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
+ if (asoc)
+ goto out;
+
+ if (paddr->sa.sa_family == AF_INET)
+ pr_debug("sctp: asoc not found for src:%pI4:%d dst:%pI4:%d\n",
+ &laddr->v4.sin_addr, ntohs(laddr->v4.sin_port),
+ &paddr->v4.sin_addr, ntohs(paddr->v4.sin_port));
+ else
+ pr_debug("sctp: asoc not found for src:%pI6:%d dst:%pI6:%d\n",
+ &laddr->v6.sin6_addr, ntohs(laddr->v6.sin6_port),
+ &paddr->v6.sin6_addr, ntohs(paddr->v6.sin6_port));
+
+out:
+ return asoc;
+}
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
new file mode 100644
index 000000000..7182c5a45
--- /dev/null
+++ b/net/sctp/inqueue.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2002 International Business Machines, Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions are the methods for accessing the SCTP inqueue.
+ *
+ * An SCTP inqueue is a queue into which you push SCTP packets
+ * (which might be bundles or fragments of chunks) and out of which you
+ * pop SCTP whole chunks.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+/* Initialize an SCTP inqueue. */
+void sctp_inq_init(struct sctp_inq *queue)
+{
+ INIT_LIST_HEAD(&queue->in_chunk_list);
+ queue->in_progress = NULL;
+
+ /* Create a task for delivering data. */
+ INIT_WORK(&queue->immediate, NULL);
+}
+
+/* Release the memory associated with an SCTP inqueue. */
+void sctp_inq_free(struct sctp_inq *queue)
+{
+ struct sctp_chunk *chunk, *tmp;
+
+ /* Empty the queue. */
+ list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) {
+ list_del_init(&chunk->list);
+ sctp_chunk_free(chunk);
+ }
+
+ /* If there is a packet which is currently being worked on,
+ * free it as well.
+ */
+ if (queue->in_progress) {
+ sctp_chunk_free(queue->in_progress);
+ queue->in_progress = NULL;
+ }
+}
+
+/* Put a new packet in an SCTP inqueue.
+ * We assume that packet->sctp_hdr is set and in host byte order.
+ */
+void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
+{
+ /* Directly call the packet handling routine. */
+ if (chunk->rcvr->dead) {
+ sctp_chunk_free(chunk);
+ return;
+ }
+
+ /* We are now calling this either from the soft interrupt
+ * or from the backlog processing.
+ * Eventually, we should clean up inqueue to not rely
+ * on the BH related data structures.
+ */
+ list_add_tail(&chunk->list, &q->in_chunk_list);
+ if (chunk->asoc)
+ chunk->asoc->stats.ipackets++;
+ q->immediate.func(&q->immediate);
+}
+
+/* Peek at the next chunk on the inqeue. */
+struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue)
+{
+ struct sctp_chunk *chunk;
+ struct sctp_chunkhdr *ch = NULL;
+
+ chunk = queue->in_progress;
+ /* If there is no more chunks in this packet, say so */
+ if (chunk->singleton ||
+ chunk->end_of_packet ||
+ chunk->pdiscard)
+ return NULL;
+
+ ch = (struct sctp_chunkhdr *)chunk->chunk_end;
+
+ return ch;
+}
+
+
+/* Extract a chunk from an SCTP inqueue.
+ *
+ * WARNING: If you need to put the chunk on another queue, you need to
+ * make a shallow copy (clone) of it.
+ */
+struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
+{
+ struct sctp_chunk *chunk;
+ struct sctp_chunkhdr *ch = NULL;
+
+ /* The assumption is that we are safe to process the chunks
+ * at this time.
+ */
+
+ chunk = queue->in_progress;
+ if (chunk) {
+ /* There is a packet that we have been working on.
+ * Any post processing work to do before we move on?
+ */
+ if (chunk->singleton ||
+ chunk->end_of_packet ||
+ chunk->pdiscard) {
+ if (chunk->head_skb == chunk->skb) {
+ chunk->skb = skb_shinfo(chunk->skb)->frag_list;
+ goto new_skb;
+ }
+ if (chunk->skb->next) {
+ chunk->skb = chunk->skb->next;
+ goto new_skb;
+ }
+
+ if (chunk->head_skb)
+ chunk->skb = chunk->head_skb;
+ sctp_chunk_free(chunk);
+ chunk = queue->in_progress = NULL;
+ } else {
+ /* Nothing to do. Next chunk in the packet, please. */
+ ch = (struct sctp_chunkhdr *)chunk->chunk_end;
+ /* Force chunk->skb->data to chunk->chunk_end. */
+ skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
+ /* We are guaranteed to pull a SCTP header. */
+ }
+ }
+
+ /* Do we need to take the next packet out of the queue to process? */
+ if (!chunk) {
+ struct list_head *entry;
+
+next_chunk:
+ /* Is the queue empty? */
+ entry = sctp_list_dequeue(&queue->in_chunk_list);
+ if (!entry)
+ return NULL;
+
+ chunk = list_entry(entry, struct sctp_chunk, list);
+
+ if (skb_is_gso(chunk->skb) && skb_is_gso_sctp(chunk->skb)) {
+ /* GSO-marked skbs but without frags, handle
+ * them normally
+ */
+ if (skb_shinfo(chunk->skb)->frag_list)
+ chunk->head_skb = chunk->skb;
+
+ /* skbs with "cover letter" */
+ if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
+ chunk->skb = skb_shinfo(chunk->skb)->frag_list;
+
+ if (WARN_ON(!chunk->skb)) {
+ __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
+ sctp_chunk_free(chunk);
+ goto next_chunk;
+ }
+ }
+
+ if (chunk->asoc)
+ sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
+
+ queue->in_progress = chunk;
+
+new_skb:
+ /* This is the first chunk in the packet. */
+ ch = (struct sctp_chunkhdr *)chunk->skb->data;
+ chunk->singleton = 1;
+ chunk->data_accepted = 0;
+ chunk->pdiscard = 0;
+ chunk->auth = 0;
+ chunk->has_asconf = 0;
+ chunk->end_of_packet = 0;
+ if (chunk->head_skb) {
+ struct sctp_input_cb
+ *cb = SCTP_INPUT_CB(chunk->skb),
+ *head_cb = SCTP_INPUT_CB(chunk->head_skb);
+
+ cb->chunk = head_cb->chunk;
+ cb->af = head_cb->af;
+ }
+ }
+
+ chunk->chunk_hdr = ch;
+ chunk->chunk_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
+ skb_pull(chunk->skb, sizeof(*ch));
+ chunk->subh.v = NULL; /* Subheader is no longer valid. */
+
+ if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) {
+ /* This is not a singleton */
+ chunk->singleton = 0;
+ } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
+ /* Discard inside state machine. */
+ chunk->pdiscard = 1;
+ chunk->chunk_end = skb_tail_pointer(chunk->skb);
+ } else {
+ /* We are at the end of the packet, so mark the chunk
+ * in case we need to send a SACK.
+ */
+ chunk->end_of_packet = 1;
+ }
+
+ pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n",
+ chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
+ ntohs(chunk->chunk_hdr->length), chunk->skb->len);
+
+ return chunk;
+}
+
+/* Set a top-half handler.
+ *
+ * Originally, we the top-half handler was scheduled as a BH. We now
+ * call the handler directly in sctp_inq_push() at a time that
+ * we know we are lock safe.
+ * The intent is that this routine will pull stuff out of the
+ * inqueue and process it.
+ */
+void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
+{
+ INIT_WORK(&q->immediate, callback);
+}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
new file mode 100644
index 000000000..d081858c2
--- /dev/null
+++ b/net/sctp/ipv6.c
@@ -0,0 +1,1217 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2002, 2004
+ * Copyright (c) 2001 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ * Copyright (c) 2002-2003 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * SCTP over IPv6.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Le Yanqun <yanqun.le@nokia.com>
+ * Hui Huang <hui.huang@nokia.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ *
+ * Based on:
+ * linux/net/ipv6/tcp_ipv6.c
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/ipsec.h>
+#include <linux/slab.h>
+
+#include <linux/ipv6.h>
+#include <linux/icmpv6.h>
+#include <linux/random.h>
+#include <linux/seq_file.h>
+
+#include <net/protocol.h>
+#include <net/ndisc.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
+#include <net/addrconf.h>
+#include <net/ip6_route.h>
+#include <net/inet_common.h>
+#include <net/inet_ecn.h>
+#include <net/sctp/sctp.h>
+#include <net/udp_tunnel.h>
+
+#include <linux/uaccess.h>
+
+static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
+ union sctp_addr *s2);
+static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
+ __be16 port);
+static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
+ const union sctp_addr *addr2);
+
+/* Event handler for inet6 address addition/deletion events.
+ * The sctp_local_addr_list needs to be protocted by a spin lock since
+ * multiple notifiers (say IPv4 and IPv6) may be running at the same
+ * time and thus corrupt the list.
+ * The reader side is protected with RCU.
+ */
+static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
+ void *ptr)
+{
+ struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
+ struct sctp_sockaddr_entry *addr = NULL;
+ struct sctp_sockaddr_entry *temp;
+ struct net *net = dev_net(ifa->idev->dev);
+ int found = 0;
+
+ switch (ev) {
+ case NETDEV_UP:
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
+ if (addr) {
+ addr->a.v6.sin6_family = AF_INET6;
+ addr->a.v6.sin6_addr = ifa->addr;
+ addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
+ addr->valid = 1;
+ spin_lock_bh(&net->sctp.local_addr_lock);
+ list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list);
+ sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW);
+ spin_unlock_bh(&net->sctp.local_addr_lock);
+ }
+ break;
+ case NETDEV_DOWN:
+ spin_lock_bh(&net->sctp.local_addr_lock);
+ list_for_each_entry_safe(addr, temp,
+ &net->sctp.local_addr_list, list) {
+ if (addr->a.sa.sa_family == AF_INET6 &&
+ ipv6_addr_equal(&addr->a.v6.sin6_addr,
+ &ifa->addr) &&
+ addr->a.v6.sin6_scope_id == ifa->idev->dev->ifindex) {
+ sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
+ found = 1;
+ addr->valid = 0;
+ list_del_rcu(&addr->list);
+ break;
+ }
+ }
+ spin_unlock_bh(&net->sctp.local_addr_lock);
+ if (found)
+ kfree_rcu(addr, rcu);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sctp_inet6addr_notifier = {
+ .notifier_call = sctp_inet6addr_event,
+};
+
+static void sctp_v6_err_handle(struct sctp_transport *t, struct sk_buff *skb,
+ __u8 type, __u8 code, __u32 info)
+{
+ struct sctp_association *asoc = t->asoc;
+ struct sock *sk = asoc->base.sk;
+ struct ipv6_pinfo *np;
+ int err = 0;
+
+ switch (type) {
+ case ICMPV6_PKT_TOOBIG:
+ if (ip6_sk_accept_pmtu(sk))
+ sctp_icmp_frag_needed(sk, asoc, t, info);
+ return;
+ case ICMPV6_PARAMPROB:
+ if (ICMPV6_UNK_NEXTHDR == code) {
+ sctp_icmp_proto_unreachable(sk, asoc, t);
+ return;
+ }
+ break;
+ case NDISC_REDIRECT:
+ sctp_icmp_redirect(sk, t, skb);
+ return;
+ default:
+ break;
+ }
+
+ np = inet6_sk(sk);
+ icmpv6_err_convert(type, code, &err);
+ if (!sock_owned_by_user(sk) && np->recverr) {
+ sk->sk_err = err;
+ sk_error_report(sk);
+ } else {
+ sk->sk_err_soft = err;
+ }
+}
+
+/* ICMP error handler. */
+static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info)
+{
+ struct net *net = dev_net(skb->dev);
+ struct sctp_transport *transport;
+ struct sctp_association *asoc;
+ __u16 saveip, savesctp;
+ struct sock *sk;
+
+ /* Fix up skb to look at the embedded net header. */
+ saveip = skb->network_header;
+ savesctp = skb->transport_header;
+ skb_reset_network_header(skb);
+ skb_set_transport_header(skb, offset);
+ sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
+ /* Put back, the original pointers. */
+ skb->network_header = saveip;
+ skb->transport_header = savesctp;
+ if (!sk) {
+ __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
+ return -ENOENT;
+ }
+
+ sctp_v6_err_handle(transport, skb, type, code, ntohl(info));
+ sctp_err_finish(sk, transport);
+
+ return 0;
+}
+
+int sctp_udp_v6_err(struct sock *sk, struct sk_buff *skb)
+{
+ struct net *net = dev_net(skb->dev);
+ struct sctp_association *asoc;
+ struct sctp_transport *t;
+ struct icmp6hdr *hdr;
+ __u32 info = 0;
+
+ skb->transport_header += sizeof(struct udphdr);
+ sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &t);
+ if (!sk) {
+ __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
+ return -ENOENT;
+ }
+
+ skb->transport_header -= sizeof(struct udphdr);
+ hdr = (struct icmp6hdr *)(skb_network_header(skb) - sizeof(struct icmp6hdr));
+ if (hdr->icmp6_type == NDISC_REDIRECT) {
+ /* can't be handled without outer ip6hdr known, leave it to udpv6_err */
+ sctp_err_finish(sk, t);
+ return 0;
+ }
+ if (hdr->icmp6_type == ICMPV6_PKT_TOOBIG)
+ info = ntohl(hdr->icmp6_mtu);
+ sctp_v6_err_handle(t, skb, hdr->icmp6_type, hdr->icmp6_code, info);
+
+ sctp_err_finish(sk, t);
+ return 1;
+}
+
+static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *t)
+{
+ struct dst_entry *dst = dst_clone(t->dst);
+ struct flowi6 *fl6 = &t->fl.u.ip6;
+ struct sock *sk = skb->sk;
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ __u8 tclass = np->tclass;
+ __be32 label;
+
+ pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb,
+ skb->len, &fl6->saddr, &fl6->daddr);
+
+ if (t->dscp & SCTP_DSCP_SET_MASK)
+ tclass = t->dscp & SCTP_DSCP_VAL_MASK;
+
+ if (INET_ECN_is_capable(tclass))
+ IP6_ECN_flow_xmit(sk, fl6->flowlabel);
+
+ if (!(t->param_flags & SPP_PMTUD_ENABLE))
+ skb->ignore_df = 1;
+
+ SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
+
+ if (!t->encap_port || !sctp_sk(sk)->udp_port) {
+ int res;
+
+ skb_dst_set(skb, dst);
+ rcu_read_lock();
+ res = ip6_xmit(sk, skb, fl6, sk->sk_mark,
+ rcu_dereference(np->opt),
+ tclass, sk->sk_priority);
+ rcu_read_unlock();
+ return res;
+ }
+
+ if (skb_is_gso(skb))
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+
+ skb->encapsulation = 1;
+ skb_reset_inner_mac_header(skb);
+ skb_reset_inner_transport_header(skb);
+ skb_set_inner_ipproto(skb, IPPROTO_SCTP);
+ label = ip6_make_flowlabel(sock_net(sk), skb, fl6->flowlabel, true, fl6);
+
+ return udp_tunnel6_xmit_skb(dst, sk, skb, NULL, &fl6->saddr,
+ &fl6->daddr, tclass, ip6_dst_hoplimit(dst),
+ label, sctp_sk(sk)->udp_port, t->encap_port, false);
+}
+
+/* Returns the dst cache entry for the given source and destination ip
+ * addresses.
+ */
+static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ struct flowi *fl, struct sock *sk)
+{
+ struct sctp_association *asoc = t->asoc;
+ struct dst_entry *dst = NULL;
+ struct flowi _fl;
+ struct flowi6 *fl6 = &_fl.u.ip6;
+ struct sctp_bind_addr *bp;
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sctp_sockaddr_entry *laddr;
+ union sctp_addr *daddr = &t->ipaddr;
+ union sctp_addr dst_saddr;
+ struct in6_addr *final_p, final;
+ enum sctp_scope scope;
+ __u8 matchlen = 0;
+
+ memset(&_fl, 0, sizeof(_fl));
+ fl6->daddr = daddr->v6.sin6_addr;
+ fl6->fl6_dport = daddr->v6.sin6_port;
+ fl6->flowi6_proto = IPPROTO_SCTP;
+ if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ fl6->flowi6_oif = daddr->v6.sin6_scope_id;
+ else if (asoc)
+ fl6->flowi6_oif = asoc->base.sk->sk_bound_dev_if;
+ if (t->flowlabel & SCTP_FLOWLABEL_SET_MASK)
+ fl6->flowlabel = htonl(t->flowlabel & SCTP_FLOWLABEL_VAL_MASK);
+
+ if (np->sndflow && (fl6->flowlabel & IPV6_FLOWLABEL_MASK)) {
+ struct ip6_flowlabel *flowlabel;
+
+ flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
+ if (IS_ERR(flowlabel))
+ goto out;
+ fl6_sock_release(flowlabel);
+ }
+
+ pr_debug("%s: dst=%pI6 ", __func__, &fl6->daddr);
+
+ if (asoc)
+ fl6->fl6_sport = htons(asoc->base.bind_addr.port);
+
+ if (saddr) {
+ fl6->saddr = saddr->v6.sin6_addr;
+ if (!fl6->fl6_sport)
+ fl6->fl6_sport = saddr->v6.sin6_port;
+
+ pr_debug("src=%pI6 - ", &fl6->saddr);
+ }
+
+ rcu_read_lock();
+ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+ rcu_read_unlock();
+
+ dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
+ if (!asoc || saddr) {
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
+ goto out;
+ }
+
+ bp = &asoc->base.bind_addr;
+ scope = sctp_scope(daddr);
+ /* ip6_dst_lookup has filled in the fl6->saddr for us. Check
+ * to see if we can use it.
+ */
+ if (!IS_ERR(dst)) {
+ /* Walk through the bind address list and look for a bind
+ * address that matches the source address of the returned dst.
+ */
+ sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port));
+ rcu_read_lock();
+ list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ if (!laddr->valid || laddr->state == SCTP_ADDR_DEL ||
+ (laddr->state != SCTP_ADDR_SRC &&
+ !asoc->src_out_of_asoc_ok))
+ continue;
+
+ /* Do not compare against v4 addrs */
+ if ((laddr->a.sa.sa_family == AF_INET6) &&
+ (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) {
+ rcu_read_unlock();
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
+ goto out;
+ }
+ }
+ rcu_read_unlock();
+ /* None of the bound addresses match the source address of the
+ * dst. So release it.
+ */
+ dst_release(dst);
+ dst = NULL;
+ }
+
+ /* Walk through the bind address list and try to get the
+ * best source address for a given destination.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ struct dst_entry *bdst;
+ __u8 bmatchlen;
+
+ if (!laddr->valid ||
+ laddr->state != SCTP_ADDR_SRC ||
+ laddr->a.sa.sa_family != AF_INET6 ||
+ scope > sctp_scope(&laddr->a))
+ continue;
+
+ fl6->saddr = laddr->a.v6.sin6_addr;
+ fl6->fl6_sport = laddr->a.v6.sin6_port;
+ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+ bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
+
+ if (IS_ERR(bdst))
+ continue;
+
+ if (ipv6_chk_addr(dev_net(bdst->dev),
+ &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
+ if (!IS_ERR_OR_NULL(dst))
+ dst_release(dst);
+ dst = bdst;
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
+ break;
+ }
+
+ bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
+ if (matchlen > bmatchlen) {
+ dst_release(bdst);
+ continue;
+ }
+
+ if (!IS_ERR_OR_NULL(dst))
+ dst_release(dst);
+ dst = bdst;
+ matchlen = bmatchlen;
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
+ }
+ rcu_read_unlock();
+
+out:
+ if (!IS_ERR_OR_NULL(dst)) {
+ struct rt6_info *rt;
+
+ rt = (struct rt6_info *)dst;
+ t->dst_cookie = rt6_get_cookie(rt);
+ pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
+ &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
+ &fl->u.ip6.saddr);
+ } else {
+ t->dst = NULL;
+ pr_debug("no route\n");
+ }
+}
+
+/* Returns the number of consecutive initial bits that match in the 2 ipv6
+ * addresses.
+ */
+static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
+ union sctp_addr *s2)
+{
+ return ipv6_addr_diff(&s1->v6.sin6_addr, &s2->v6.sin6_addr);
+}
+
+/* Fills in the source address(saddr) based on the destination address(daddr)
+ * and asoc's bind address list.
+ */
+static void sctp_v6_get_saddr(struct sctp_sock *sk,
+ struct sctp_transport *t,
+ struct flowi *fl)
+{
+ struct flowi6 *fl6 = &fl->u.ip6;
+ union sctp_addr *saddr = &t->saddr;
+
+ pr_debug("%s: asoc:%p dst:%p\n", __func__, t->asoc, t->dst);
+
+ if (t->dst) {
+ saddr->v6.sin6_family = AF_INET6;
+ saddr->v6.sin6_addr = fl6->saddr;
+ }
+}
+
+/* Make a copy of all potential local addresses. */
+static void sctp_v6_copy_addrlist(struct list_head *addrlist,
+ struct net_device *dev)
+{
+ struct inet6_dev *in6_dev;
+ struct inet6_ifaddr *ifp;
+ struct sctp_sockaddr_entry *addr;
+
+ rcu_read_lock();
+ if ((in6_dev = __in6_dev_get(dev)) == NULL) {
+ rcu_read_unlock();
+ return;
+ }
+
+ read_lock_bh(&in6_dev->lock);
+ list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
+ /* Add the address to the local list. */
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
+ if (addr) {
+ addr->a.v6.sin6_family = AF_INET6;
+ addr->a.v6.sin6_addr = ifp->addr;
+ addr->a.v6.sin6_scope_id = dev->ifindex;
+ addr->valid = 1;
+ INIT_LIST_HEAD(&addr->list);
+ list_add_tail(&addr->list, addrlist);
+ }
+ }
+
+ read_unlock_bh(&in6_dev->lock);
+ rcu_read_unlock();
+}
+
+/* Copy over any ip options */
+static void sctp_v6_copy_ip_options(struct sock *sk, struct sock *newsk)
+{
+ struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
+ struct ipv6_txoptions *opt;
+
+ newnp = inet6_sk(newsk);
+
+ rcu_read_lock();
+ opt = rcu_dereference(np->opt);
+ if (opt) {
+ opt = ipv6_dup_options(newsk, opt);
+ if (!opt)
+ pr_err("%s: Failed to copy ip options\n", __func__);
+ }
+ RCU_INIT_POINTER(newnp->opt, opt);
+ rcu_read_unlock();
+}
+
+/* Account for the IP options */
+static int sctp_v6_ip_options_len(struct sock *sk)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_txoptions *opt;
+ int len = 0;
+
+ rcu_read_lock();
+ opt = rcu_dereference(np->opt);
+ if (opt)
+ len = opt->opt_flen + opt->opt_nflen;
+
+ rcu_read_unlock();
+ return len;
+}
+
+/* Initialize a sockaddr_storage from in incoming skb. */
+static void sctp_v6_from_skb(union sctp_addr *addr, struct sk_buff *skb,
+ int is_saddr)
+{
+ /* Always called on head skb, so this is safe */
+ struct sctphdr *sh = sctp_hdr(skb);
+ struct sockaddr_in6 *sa = &addr->v6;
+
+ addr->v6.sin6_family = AF_INET6;
+ addr->v6.sin6_flowinfo = 0; /* FIXME */
+ addr->v6.sin6_scope_id = ((struct inet6_skb_parm *)skb->cb)->iif;
+
+ if (is_saddr) {
+ sa->sin6_port = sh->source;
+ sa->sin6_addr = ipv6_hdr(skb)->saddr;
+ } else {
+ sa->sin6_port = sh->dest;
+ sa->sin6_addr = ipv6_hdr(skb)->daddr;
+ }
+}
+
+/* Initialize an sctp_addr from a socket. */
+static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
+{
+ addr->v6.sin6_family = AF_INET6;
+ addr->v6.sin6_port = 0;
+ addr->v6.sin6_addr = sk->sk_v6_rcv_saddr;
+}
+
+/* Initialize sk->sk_rcv_saddr from sctp_addr. */
+static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
+{
+ if (addr->sa.sa_family == AF_INET) {
+ sk->sk_v6_rcv_saddr.s6_addr32[0] = 0;
+ sk->sk_v6_rcv_saddr.s6_addr32[1] = 0;
+ sk->sk_v6_rcv_saddr.s6_addr32[2] = htonl(0x0000ffff);
+ sk->sk_v6_rcv_saddr.s6_addr32[3] =
+ addr->v4.sin_addr.s_addr;
+ } else {
+ sk->sk_v6_rcv_saddr = addr->v6.sin6_addr;
+ }
+}
+
+/* Initialize sk->sk_daddr from sctp_addr. */
+static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
+{
+ if (addr->sa.sa_family == AF_INET) {
+ sk->sk_v6_daddr.s6_addr32[0] = 0;
+ sk->sk_v6_daddr.s6_addr32[1] = 0;
+ sk->sk_v6_daddr.s6_addr32[2] = htonl(0x0000ffff);
+ sk->sk_v6_daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+ } else {
+ sk->sk_v6_daddr = addr->v6.sin6_addr;
+ }
+}
+
+/* Initialize a sctp_addr from an address parameter. */
+static bool sctp_v6_from_addr_param(union sctp_addr *addr,
+ union sctp_addr_param *param,
+ __be16 port, int iif)
+{
+ if (ntohs(param->v6.param_hdr.length) < sizeof(struct sctp_ipv6addr_param))
+ return false;
+
+ addr->v6.sin6_family = AF_INET6;
+ addr->v6.sin6_port = port;
+ addr->v6.sin6_flowinfo = 0; /* BUG */
+ addr->v6.sin6_addr = param->v6.addr;
+ addr->v6.sin6_scope_id = iif;
+
+ return true;
+}
+
+/* Initialize an address parameter from a sctp_addr and return the length
+ * of the address parameter.
+ */
+static int sctp_v6_to_addr_param(const union sctp_addr *addr,
+ union sctp_addr_param *param)
+{
+ int length = sizeof(struct sctp_ipv6addr_param);
+
+ param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS;
+ param->v6.param_hdr.length = htons(length);
+ param->v6.addr = addr->v6.sin6_addr;
+
+ return length;
+}
+
+/* Initialize a sctp_addr from struct in6_addr. */
+static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
+ __be16 port)
+{
+ addr->sa.sa_family = AF_INET6;
+ addr->v6.sin6_port = port;
+ addr->v6.sin6_flowinfo = 0;
+ addr->v6.sin6_addr = *saddr;
+ addr->v6.sin6_scope_id = 0;
+}
+
+static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
+ const union sctp_addr *addr2)
+{
+ if (addr1->sa.sa_family != addr2->sa.sa_family) {
+ if (addr1->sa.sa_family == AF_INET &&
+ addr2->sa.sa_family == AF_INET6 &&
+ ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
+ addr2->v6.sin6_addr.s6_addr32[3] ==
+ addr1->v4.sin_addr.s_addr)
+ return 1;
+
+ if (addr2->sa.sa_family == AF_INET &&
+ addr1->sa.sa_family == AF_INET6 &&
+ ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
+ addr1->v6.sin6_addr.s6_addr32[3] ==
+ addr2->v4.sin_addr.s_addr)
+ return 1;
+
+ return 0;
+ }
+
+ if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
+ return 0;
+
+ /* If this is a linklocal address, compare the scope_id. */
+ if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
+ addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
+ addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
+ return 0;
+
+ return 1;
+}
+
+/* Compare addresses exactly.
+ * v4-mapped-v6 is also in consideration.
+ */
+static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
+ const union sctp_addr *addr2)
+{
+ return __sctp_v6_cmp_addr(addr1, addr2) &&
+ addr1->v6.sin6_port == addr2->v6.sin6_port;
+}
+
+/* Initialize addr struct to INADDR_ANY. */
+static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
+{
+ memset(addr, 0x00, sizeof(union sctp_addr));
+ addr->v6.sin6_family = AF_INET6;
+ addr->v6.sin6_port = port;
+}
+
+/* Is this a wildcard address? */
+static int sctp_v6_is_any(const union sctp_addr *addr)
+{
+ return ipv6_addr_any(&addr->v6.sin6_addr);
+}
+
+/* Should this be available for binding? */
+static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
+{
+ int type;
+ struct net *net = sock_net(&sp->inet.sk);
+ const struct in6_addr *in6 = (const struct in6_addr *)&addr->v6.sin6_addr;
+
+ type = ipv6_addr_type(in6);
+ if (IPV6_ADDR_ANY == type)
+ return 1;
+ if (type == IPV6_ADDR_MAPPED) {
+ if (sp && ipv6_only_sock(sctp_opt2sk(sp)))
+ return 0;
+ sctp_v6_map_v4(addr);
+ return sctp_get_af_specific(AF_INET)->available(addr, sp);
+ }
+ if (!(type & IPV6_ADDR_UNICAST))
+ return 0;
+
+ return ipv6_can_nonlocal_bind(net, &sp->inet) ||
+ ipv6_chk_addr(net, in6, NULL, 0);
+}
+
+/* This function checks if the address is a valid address to be used for
+ * SCTP.
+ *
+ * Output:
+ * Return 0 - If the address is a non-unicast or an illegal address.
+ * Return 1 - If the address is a unicast.
+ */
+static int sctp_v6_addr_valid(union sctp_addr *addr,
+ struct sctp_sock *sp,
+ const struct sk_buff *skb)
+{
+ int ret = ipv6_addr_type(&addr->v6.sin6_addr);
+
+ /* Support v4-mapped-v6 address. */
+ if (ret == IPV6_ADDR_MAPPED) {
+ /* Note: This routine is used in input, so v4-mapped-v6
+ * are disallowed here when there is no sctp_sock.
+ */
+ if (sp && ipv6_only_sock(sctp_opt2sk(sp)))
+ return 0;
+ sctp_v6_map_v4(addr);
+ return sctp_get_af_specific(AF_INET)->addr_valid(addr, sp, skb);
+ }
+
+ /* Is this a non-unicast address */
+ if (!(ret & IPV6_ADDR_UNICAST))
+ return 0;
+
+ return 1;
+}
+
+/* What is the scope of 'addr'? */
+static enum sctp_scope sctp_v6_scope(union sctp_addr *addr)
+{
+ enum sctp_scope retval;
+ int v6scope;
+
+ /* The IPv6 scope is really a set of bit fields.
+ * See IFA_* in <net/if_inet6.h>. Map to a generic SCTP scope.
+ */
+
+ v6scope = ipv6_addr_scope(&addr->v6.sin6_addr);
+ switch (v6scope) {
+ case IFA_HOST:
+ retval = SCTP_SCOPE_LOOPBACK;
+ break;
+ case IFA_LINK:
+ retval = SCTP_SCOPE_LINK;
+ break;
+ case IFA_SITE:
+ retval = SCTP_SCOPE_PRIVATE;
+ break;
+ default:
+ retval = SCTP_SCOPE_GLOBAL;
+ break;
+ }
+
+ return retval;
+}
+
+/* Create and initialize a new sk for the socket to be returned by accept(). */
+static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
+ struct sctp_association *asoc,
+ bool kern)
+{
+ struct sock *newsk;
+ struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
+ struct sctp6_sock *newsctp6sk;
+
+ newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, kern);
+ if (!newsk)
+ goto out;
+
+ sock_init_data(NULL, newsk);
+
+ sctp_copy_sock(newsk, sk, asoc);
+ sock_reset_flag(sk, SOCK_ZAPPED);
+
+ newsctp6sk = (struct sctp6_sock *)newsk;
+ inet_sk(newsk)->pinet6 = &newsctp6sk->inet6;
+
+ sctp_sk(newsk)->v4mapped = sctp_sk(sk)->v4mapped;
+
+ newnp = inet6_sk(newsk);
+
+ memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
+
+ sctp_v6_copy_ip_options(sk, newsk);
+
+ /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname()
+ * and getpeername().
+ */
+ sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk);
+
+ newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+
+ sk_refcnt_debug_inc(newsk);
+
+ if (newsk->sk_prot->init(newsk)) {
+ sk_common_release(newsk);
+ newsk = NULL;
+ }
+
+out:
+ return newsk;
+}
+
+/* Format a sockaddr for return to user space. This makes sure the return is
+ * AF_INET or AF_INET6 depending on the SCTP_I_WANT_MAPPED_V4_ADDR option.
+ */
+static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
+{
+ if (sp->v4mapped) {
+ if (addr->sa.sa_family == AF_INET)
+ sctp_v4_map_v6(addr);
+ } else {
+ if (addr->sa.sa_family == AF_INET6 &&
+ ipv6_addr_v4mapped(&addr->v6.sin6_addr))
+ sctp_v6_map_v4(addr);
+ }
+
+ if (addr->sa.sa_family == AF_INET) {
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+ return sizeof(struct sockaddr_in);
+ }
+ return sizeof(struct sockaddr_in6);
+}
+
+/* Where did this skb come from? */
+static int sctp_v6_skb_iif(const struct sk_buff *skb)
+{
+ return IP6CB(skb)->iif;
+}
+
+/* Was this packet marked by Explicit Congestion Notification? */
+static int sctp_v6_is_ce(const struct sk_buff *skb)
+{
+ return *((__u32 *)(ipv6_hdr(skb))) & (__force __u32)htonl(1 << 20);
+}
+
+/* Dump the v6 addr to the seq file. */
+static void sctp_v6_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
+{
+ seq_printf(seq, "%pI6 ", &addr->v6.sin6_addr);
+}
+
+static void sctp_v6_ecn_capable(struct sock *sk)
+{
+ inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
+}
+
+/* Initialize a PF_INET msgname from a ulpevent. */
+static void sctp_inet6_event_msgname(struct sctp_ulpevent *event,
+ char *msgname, int *addrlen)
+{
+ union sctp_addr *addr;
+ struct sctp_association *asoc;
+ union sctp_addr *paddr;
+
+ if (!msgname)
+ return;
+
+ addr = (union sctp_addr *)msgname;
+ asoc = event->asoc;
+ paddr = &asoc->peer.primary_addr;
+
+ if (paddr->sa.sa_family == AF_INET) {
+ addr->v4.sin_family = AF_INET;
+ addr->v4.sin_port = htons(asoc->peer.port);
+ addr->v4.sin_addr = paddr->v4.sin_addr;
+ } else {
+ addr->v6.sin6_family = AF_INET6;
+ addr->v6.sin6_flowinfo = 0;
+ if (ipv6_addr_type(&paddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ addr->v6.sin6_scope_id = paddr->v6.sin6_scope_id;
+ else
+ addr->v6.sin6_scope_id = 0;
+ addr->v6.sin6_port = htons(asoc->peer.port);
+ addr->v6.sin6_addr = paddr->v6.sin6_addr;
+ }
+
+ *addrlen = sctp_v6_addr_to_user(sctp_sk(asoc->base.sk), addr);
+}
+
+/* Initialize a msg_name from an inbound skb. */
+static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
+ int *addr_len)
+{
+ union sctp_addr *addr;
+ struct sctphdr *sh;
+
+ if (!msgname)
+ return;
+
+ addr = (union sctp_addr *)msgname;
+ sh = sctp_hdr(skb);
+
+ if (ip_hdr(skb)->version == 4) {
+ addr->v4.sin_family = AF_INET;
+ addr->v4.sin_port = sh->source;
+ addr->v4.sin_addr.s_addr = ip_hdr(skb)->saddr;
+ } else {
+ addr->v6.sin6_family = AF_INET6;
+ addr->v6.sin6_flowinfo = 0;
+ addr->v6.sin6_port = sh->source;
+ addr->v6.sin6_addr = ipv6_hdr(skb)->saddr;
+ if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ addr->v6.sin6_scope_id = sctp_v6_skb_iif(skb);
+ else
+ addr->v6.sin6_scope_id = 0;
+ }
+
+ *addr_len = sctp_v6_addr_to_user(sctp_sk(skb->sk), addr);
+}
+
+/* Do we support this AF? */
+static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp)
+{
+ switch (family) {
+ case AF_INET6:
+ return 1;
+ /* v4-mapped-v6 addresses */
+ case AF_INET:
+ if (!ipv6_only_sock(sctp_opt2sk(sp)))
+ return 1;
+ fallthrough;
+ default:
+ return 0;
+ }
+}
+
+/* Address matching with wildcards allowed. This extra level
+ * of indirection lets us choose whether a PF_INET6 should
+ * disallow any v4 addresses if we so choose.
+ */
+static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
+ const union sctp_addr *addr2,
+ struct sctp_sock *opt)
+{
+ struct sock *sk = sctp_opt2sk(opt);
+ struct sctp_af *af1, *af2;
+
+ af1 = sctp_get_af_specific(addr1->sa.sa_family);
+ af2 = sctp_get_af_specific(addr2->sa.sa_family);
+
+ if (!af1 || !af2)
+ return 0;
+
+ /* If the socket is IPv6 only, v4 addrs will not match */
+ if (ipv6_only_sock(sk) && af1 != af2)
+ return 0;
+
+ /* Today, wildcard AF_INET/AF_INET6. */
+ if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
+ return 1;
+
+ if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
+ return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
+
+ return __sctp_v6_cmp_addr(addr1, addr2);
+}
+
+/* Verify that the provided sockaddr looks bindable. Common verification,
+ * has already been taken care of.
+ */
+static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
+{
+ struct sctp_af *af;
+
+ /* ASSERT: address family has already been verified. */
+ if (addr->sa.sa_family != AF_INET6)
+ af = sctp_get_af_specific(addr->sa.sa_family);
+ else {
+ int type = ipv6_addr_type(&addr->v6.sin6_addr);
+ struct net_device *dev;
+
+ if (type & IPV6_ADDR_LINKLOCAL) {
+ struct net *net;
+ if (!addr->v6.sin6_scope_id)
+ return 0;
+ net = sock_net(&opt->inet.sk);
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
+ if (!dev || !(ipv6_can_nonlocal_bind(net, &opt->inet) ||
+ ipv6_chk_addr(net, &addr->v6.sin6_addr,
+ dev, 0))) {
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+ }
+
+ af = opt->pf->af;
+ }
+ return af->available(addr, opt);
+}
+
+/* Verify that the provided sockaddr looks sendable. Common verification,
+ * has already been taken care of.
+ */
+static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
+{
+ struct sctp_af *af = NULL;
+
+ /* ASSERT: address family has already been verified. */
+ if (addr->sa.sa_family != AF_INET6)
+ af = sctp_get_af_specific(addr->sa.sa_family);
+ else {
+ int type = ipv6_addr_type(&addr->v6.sin6_addr);
+ struct net_device *dev;
+
+ if (type & IPV6_ADDR_LINKLOCAL) {
+ if (!addr->v6.sin6_scope_id)
+ return 0;
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(sock_net(&opt->inet.sk),
+ addr->v6.sin6_scope_id);
+ rcu_read_unlock();
+ if (!dev)
+ return 0;
+ }
+ af = opt->pf->af;
+ }
+
+ return af != NULL;
+}
+
+/* Fill in Supported Address Type information for INIT and INIT-ACK
+ * chunks. Note: In the future, we may want to look at sock options
+ * to determine whether a PF_INET6 socket really wants to have IPV4
+ * addresses.
+ * Returns number of addresses supported.
+ */
+static int sctp_inet6_supported_addrs(const struct sctp_sock *opt,
+ __be16 *types)
+{
+ types[0] = SCTP_PARAM_IPV6_ADDRESS;
+ if (!opt || !ipv6_only_sock(sctp_opt2sk(opt))) {
+ types[1] = SCTP_PARAM_IPV4_ADDRESS;
+ return 2;
+ }
+ return 1;
+}
+
+/* Handle SCTP_I_WANT_MAPPED_V4_ADDR for getpeername() and getsockname() */
+static int sctp_getname(struct socket *sock, struct sockaddr *uaddr,
+ int peer)
+{
+ int rc;
+
+ rc = inet6_getname(sock, uaddr, peer);
+
+ if (rc < 0)
+ return rc;
+
+ rc = sctp_v6_addr_to_user(sctp_sk(sock->sk),
+ (union sctp_addr *)uaddr);
+
+ return rc;
+}
+
+static const struct proto_ops inet6_seqpacket_ops = {
+ .family = PF_INET6,
+ .owner = THIS_MODULE,
+ .release = inet6_release,
+ .bind = inet6_bind,
+ .connect = sctp_inet_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = inet_accept,
+ .getname = sctp_getname,
+ .poll = sctp_poll,
+ .ioctl = inet6_ioctl,
+ .gettstamp = sock_gettstamp,
+ .listen = sctp_inet_listen,
+ .shutdown = inet_shutdown,
+ .setsockopt = sock_common_setsockopt,
+ .getsockopt = sock_common_getsockopt,
+ .sendmsg = inet_sendmsg,
+ .recvmsg = inet_recvmsg,
+ .mmap = sock_no_mmap,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = inet6_compat_ioctl,
+#endif
+};
+
+static struct inet_protosw sctpv6_seqpacket_protosw = {
+ .type = SOCK_SEQPACKET,
+ .protocol = IPPROTO_SCTP,
+ .prot = &sctpv6_prot,
+ .ops = &inet6_seqpacket_ops,
+ .flags = SCTP_PROTOSW_FLAG
+};
+static struct inet_protosw sctpv6_stream_protosw = {
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_SCTP,
+ .prot = &sctpv6_prot,
+ .ops = &inet6_seqpacket_ops,
+ .flags = SCTP_PROTOSW_FLAG,
+};
+
+static int sctp6_rcv(struct sk_buff *skb)
+{
+ SCTP_INPUT_CB(skb)->encap_port = 0;
+ return sctp_rcv(skb) ? -1 : 0;
+}
+
+static const struct inet6_protocol sctpv6_protocol = {
+ .handler = sctp6_rcv,
+ .err_handler = sctp_v6_err,
+ .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
+};
+
+static struct sctp_af sctp_af_inet6 = {
+ .sa_family = AF_INET6,
+ .sctp_xmit = sctp_v6_xmit,
+ .setsockopt = ipv6_setsockopt,
+ .getsockopt = ipv6_getsockopt,
+ .get_dst = sctp_v6_get_dst,
+ .get_saddr = sctp_v6_get_saddr,
+ .copy_addrlist = sctp_v6_copy_addrlist,
+ .from_skb = sctp_v6_from_skb,
+ .from_sk = sctp_v6_from_sk,
+ .from_addr_param = sctp_v6_from_addr_param,
+ .to_addr_param = sctp_v6_to_addr_param,
+ .cmp_addr = sctp_v6_cmp_addr,
+ .scope = sctp_v6_scope,
+ .addr_valid = sctp_v6_addr_valid,
+ .inaddr_any = sctp_v6_inaddr_any,
+ .is_any = sctp_v6_is_any,
+ .available = sctp_v6_available,
+ .skb_iif = sctp_v6_skb_iif,
+ .is_ce = sctp_v6_is_ce,
+ .seq_dump_addr = sctp_v6_seq_dump_addr,
+ .ecn_capable = sctp_v6_ecn_capable,
+ .net_header_len = sizeof(struct ipv6hdr),
+ .sockaddr_len = sizeof(struct sockaddr_in6),
+ .ip_options_len = sctp_v6_ip_options_len,
+};
+
+static struct sctp_pf sctp_pf_inet6 = {
+ .event_msgname = sctp_inet6_event_msgname,
+ .skb_msgname = sctp_inet6_skb_msgname,
+ .af_supported = sctp_inet6_af_supported,
+ .cmp_addr = sctp_inet6_cmp_addr,
+ .bind_verify = sctp_inet6_bind_verify,
+ .send_verify = sctp_inet6_send_verify,
+ .supported_addrs = sctp_inet6_supported_addrs,
+ .create_accept_sk = sctp_v6_create_accept_sk,
+ .addr_to_user = sctp_v6_addr_to_user,
+ .to_sk_saddr = sctp_v6_to_sk_saddr,
+ .to_sk_daddr = sctp_v6_to_sk_daddr,
+ .copy_ip_options = sctp_v6_copy_ip_options,
+ .af = &sctp_af_inet6,
+};
+
+/* Initialize IPv6 support and register with socket layer. */
+void sctp_v6_pf_init(void)
+{
+ /* Register the SCTP specific PF_INET6 functions. */
+ sctp_register_pf(&sctp_pf_inet6, PF_INET6);
+
+ /* Register the SCTP specific AF_INET6 functions. */
+ sctp_register_af(&sctp_af_inet6);
+}
+
+void sctp_v6_pf_exit(void)
+{
+ list_del(&sctp_af_inet6.list);
+}
+
+/* Initialize IPv6 support and register with socket layer. */
+int sctp_v6_protosw_init(void)
+{
+ int rc;
+
+ rc = proto_register(&sctpv6_prot, 1);
+ if (rc)
+ return rc;
+
+ /* Add SCTPv6(UDP and TCP style) to inetsw6 linked list. */
+ inet6_register_protosw(&sctpv6_seqpacket_protosw);
+ inet6_register_protosw(&sctpv6_stream_protosw);
+
+ return 0;
+}
+
+void sctp_v6_protosw_exit(void)
+{
+ inet6_unregister_protosw(&sctpv6_seqpacket_protosw);
+ inet6_unregister_protosw(&sctpv6_stream_protosw);
+ proto_unregister(&sctpv6_prot);
+}
+
+
+/* Register with inet6 layer. */
+int sctp_v6_add_protocol(void)
+{
+ /* Register notifier for inet6 address additions/deletions. */
+ register_inet6addr_notifier(&sctp_inet6addr_notifier);
+
+ if (inet6_add_protocol(&sctpv6_protocol, IPPROTO_SCTP) < 0)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/* Unregister with inet6 layer. */
+void sctp_v6_del_protocol(void)
+{
+ inet6_del_protocol(&sctpv6_protocol, IPPROTO_SCTP);
+ unregister_inet6addr_notifier(&sctp_inet6addr_notifier);
+}
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
new file mode 100644
index 000000000..0400c964e
--- /dev/null
+++ b/net/sctp/objcnt.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * Support for memory object debugging. This allows one to monitor the
+ * object allocations/deallocations for types instrumented for this
+ * via the proc fs.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <net/sctp/sctp.h>
+
+/*
+ * Global counters to count raw object allocation counts.
+ * To add new counters, choose a unique suffix for the variable
+ * name as the helper macros key off this suffix to make
+ * life easier for the programmer.
+ */
+
+SCTP_DBG_OBJCNT(sock);
+SCTP_DBG_OBJCNT(ep);
+SCTP_DBG_OBJCNT(transport);
+SCTP_DBG_OBJCNT(assoc);
+SCTP_DBG_OBJCNT(bind_addr);
+SCTP_DBG_OBJCNT(bind_bucket);
+SCTP_DBG_OBJCNT(chunk);
+SCTP_DBG_OBJCNT(addr);
+SCTP_DBG_OBJCNT(datamsg);
+SCTP_DBG_OBJCNT(keys);
+
+/* An array to make it easy to pretty print the debug information
+ * to the proc fs.
+ */
+static struct sctp_dbg_objcnt_entry sctp_dbg_objcnt[] = {
+ SCTP_DBG_OBJCNT_ENTRY(sock),
+ SCTP_DBG_OBJCNT_ENTRY(ep),
+ SCTP_DBG_OBJCNT_ENTRY(assoc),
+ SCTP_DBG_OBJCNT_ENTRY(transport),
+ SCTP_DBG_OBJCNT_ENTRY(chunk),
+ SCTP_DBG_OBJCNT_ENTRY(bind_addr),
+ SCTP_DBG_OBJCNT_ENTRY(bind_bucket),
+ SCTP_DBG_OBJCNT_ENTRY(addr),
+ SCTP_DBG_OBJCNT_ENTRY(datamsg),
+ SCTP_DBG_OBJCNT_ENTRY(keys),
+};
+
+/* Callback from procfs to read out objcount information.
+ * Walk through the entries in the sctp_dbg_objcnt array, dumping
+ * the raw object counts for each monitored type.
+ */
+static int sctp_objcnt_seq_show(struct seq_file *seq, void *v)
+{
+ int i;
+
+ i = (int)*(loff_t *)v;
+ seq_setwidth(seq, 127);
+ seq_printf(seq, "%s: %d", sctp_dbg_objcnt[i].label,
+ atomic_read(sctp_dbg_objcnt[i].counter));
+ seq_pad(seq, '\n');
+ return 0;
+}
+
+static void *sctp_objcnt_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return (*pos >= ARRAY_SIZE(sctp_dbg_objcnt)) ? NULL : (void *)pos;
+}
+
+static void sctp_objcnt_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *sctp_objcnt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return (*pos >= ARRAY_SIZE(sctp_dbg_objcnt)) ? NULL : (void *)pos;
+}
+
+static const struct seq_operations sctp_objcnt_seq_ops = {
+ .start = sctp_objcnt_seq_start,
+ .next = sctp_objcnt_seq_next,
+ .stop = sctp_objcnt_seq_stop,
+ .show = sctp_objcnt_seq_show,
+};
+
+/* Initialize the objcount in the proc filesystem. */
+void sctp_dbg_objcnt_init(struct net *net)
+{
+ struct proc_dir_entry *ent;
+
+ ent = proc_create_seq("sctp_dbg_objcnt", 0,
+ net->sctp.proc_net_sctp, &sctp_objcnt_seq_ops);
+ if (!ent)
+ pr_warn("sctp_dbg_objcnt: Unable to create /proc entry.\n");
+}
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
new file mode 100644
index 000000000..eb874e3c3
--- /dev/null
+++ b/net/sctp/offload.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * sctp_offload - GRO/GSO Offloading for SCTP
+ *
+ * Copyright (C) 2015, Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/socket.h>
+#include <linux/sctp.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/kfifo.h>
+#include <linux/time.h>
+#include <net/net_namespace.h>
+
+#include <linux/skbuff.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/checksum.h>
+#include <net/protocol.h>
+
+static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_not_inet = 0;
+ /* csum and csum_start in GSO CB may be needed to do the UDP
+ * checksum when it's a UDP tunneling packet.
+ */
+ SKB_GSO_CB(skb)->csum = (__force __wsum)~0;
+ SKB_GSO_CB(skb)->csum_start = skb_headroom(skb) + skb->len;
+ return sctp_compute_cksum(skb, skb_transport_offset(skb));
+}
+
+static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ struct sctphdr *sh;
+
+ if (!skb_is_gso_sctp(skb))
+ goto out;
+
+ sh = sctp_hdr(skb);
+ if (!pskb_may_pull(skb, sizeof(*sh)))
+ goto out;
+
+ __skb_pull(skb, sizeof(*sh));
+
+ if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
+ /* Packet is from an untrusted source, reset gso_segs. */
+ struct skb_shared_info *pinfo = skb_shinfo(skb);
+ struct sk_buff *frag_iter;
+
+ pinfo->gso_segs = 0;
+ if (skb->len != skb->data_len) {
+ /* Means we have chunks in here too */
+ pinfo->gso_segs++;
+ }
+
+ skb_walk_frags(skb, frag_iter)
+ pinfo->gso_segs++;
+
+ segs = NULL;
+ goto out;
+ }
+
+ segs = skb_segment(skb, (features | NETIF_F_HW_CSUM) & ~NETIF_F_SG);
+ if (IS_ERR(segs))
+ goto out;
+
+ /* All that is left is update SCTP CRC if necessary */
+ if (!(features & NETIF_F_SCTP_CRC)) {
+ for (skb = segs; skb; skb = skb->next) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ sh = sctp_hdr(skb);
+ sh->checksum = sctp_gso_make_checksum(skb);
+ }
+ }
+ }
+
+out:
+ return segs;
+}
+
+static const struct net_offload sctp_offload = {
+ .callbacks = {
+ .gso_segment = sctp_gso_segment,
+ },
+};
+
+static const struct net_offload sctp6_offload = {
+ .callbacks = {
+ .gso_segment = sctp_gso_segment,
+ },
+};
+
+int __init sctp_offload_init(void)
+{
+ int ret;
+
+ ret = inet_add_offload(&sctp_offload, IPPROTO_SCTP);
+ if (ret)
+ goto out;
+
+ ret = inet6_add_offload(&sctp6_offload, IPPROTO_SCTP);
+ if (ret)
+ goto ipv4;
+
+ crc32c_csum_stub = &sctp_csum_ops;
+ return ret;
+
+ipv4:
+ inet_del_offload(&sctp_offload, IPPROTO_SCTP);
+out:
+ return ret;
+}
diff --git a/net/sctp/output.c b/net/sctp/output.c
new file mode 100644
index 000000000..a63df055a
--- /dev/null
+++ b/net/sctp/output.c
@@ -0,0 +1,865 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions handle output processing.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@austin.ibm.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <net/inet_ecn.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/net_namespace.h>
+
+#include <linux/socket.h> /* for sa_family_t */
+#include <net/sock.h>
+
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/checksum.h>
+
+/* Forward declarations for private helpers. */
+static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk);
+static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
+ struct sctp_chunk *chunk);
+static void sctp_packet_append_data(struct sctp_packet *packet,
+ struct sctp_chunk *chunk);
+static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
+ struct sctp_chunk *chunk,
+ u16 chunk_len);
+
+static void sctp_packet_reset(struct sctp_packet *packet)
+{
+ /* sctp_packet_transmit() relies on this to reset size to the
+ * current overhead after sending packets.
+ */
+ packet->size = packet->overhead;
+
+ packet->has_cookie_echo = 0;
+ packet->has_sack = 0;
+ packet->has_data = 0;
+ packet->has_auth = 0;
+ packet->ipfragok = 0;
+ packet->auth = NULL;
+}
+
+/* Config a packet.
+ * This appears to be a followup set of initializations.
+ */
+void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
+ int ecn_capable)
+{
+ struct sctp_transport *tp = packet->transport;
+ struct sctp_association *asoc = tp->asoc;
+ struct sctp_sock *sp = NULL;
+ struct sock *sk;
+
+ pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
+ packet->vtag = vtag;
+
+ /* do the following jobs only once for a flush schedule */
+ if (!sctp_packet_empty(packet))
+ return;
+
+ /* set packet max_size with pathmtu, then calculate overhead */
+ packet->max_size = tp->pathmtu;
+
+ if (asoc) {
+ sk = asoc->base.sk;
+ sp = sctp_sk(sk);
+ }
+ packet->overhead = sctp_mtu_payload(sp, 0, 0);
+ packet->size = packet->overhead;
+
+ if (!asoc)
+ return;
+
+ /* update dst or transport pathmtu if in need */
+ if (!sctp_transport_dst_check(tp)) {
+ sctp_transport_route(tp, NULL, sp);
+ if (asoc->param_flags & SPP_PMTUD_ENABLE)
+ sctp_assoc_sync_pmtu(asoc);
+ } else if (!sctp_transport_pl_enabled(tp) &&
+ asoc->param_flags & SPP_PMTUD_ENABLE) {
+ if (!sctp_transport_pmtu_check(tp))
+ sctp_assoc_sync_pmtu(asoc);
+ }
+
+ if (asoc->pmtu_pending) {
+ if (asoc->param_flags & SPP_PMTUD_ENABLE)
+ sctp_assoc_sync_pmtu(asoc);
+ asoc->pmtu_pending = 0;
+ }
+
+ /* If there a is a prepend chunk stick it on the list before
+ * any other chunks get appended.
+ */
+ if (ecn_capable) {
+ struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
+
+ if (chunk)
+ sctp_packet_append_chunk(packet, chunk);
+ }
+
+ if (!tp->dst)
+ return;
+
+ /* set packet max_size with gso_max_size if gso is enabled*/
+ rcu_read_lock();
+ if (__sk_dst_get(sk) != tp->dst) {
+ dst_hold(tp->dst);
+ sk_setup_caps(sk, tp->dst);
+ }
+ packet->max_size = sk_can_gso(sk) ? min(READ_ONCE(tp->dst->dev->gso_max_size),
+ GSO_LEGACY_MAX_SIZE)
+ : asoc->pathmtu;
+ rcu_read_unlock();
+}
+
+/* Initialize the packet structure. */
+void sctp_packet_init(struct sctp_packet *packet,
+ struct sctp_transport *transport,
+ __u16 sport, __u16 dport)
+{
+ pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
+
+ packet->transport = transport;
+ packet->source_port = sport;
+ packet->destination_port = dport;
+ INIT_LIST_HEAD(&packet->chunk_list);
+ /* The overhead will be calculated by sctp_packet_config() */
+ packet->overhead = 0;
+ sctp_packet_reset(packet);
+ packet->vtag = 0;
+}
+
+/* Free a packet. */
+void sctp_packet_free(struct sctp_packet *packet)
+{
+ struct sctp_chunk *chunk, *tmp;
+
+ pr_debug("%s: packet:%p\n", __func__, packet);
+
+ list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
+ list_del_init(&chunk->list);
+ sctp_chunk_free(chunk);
+ }
+}
+
+/* This routine tries to append the chunk to the offered packet. If adding
+ * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
+ * is not present in the packet, it transmits the input packet.
+ * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
+ * as it can fit in the packet, but any more data that does not fit in this
+ * packet can be sent only after receiving the COOKIE_ACK.
+ */
+enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk,
+ int one_packet, gfp_t gfp)
+{
+ enum sctp_xmit retval;
+
+ pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__,
+ packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
+
+ switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
+ case SCTP_XMIT_PMTU_FULL:
+ if (!packet->has_cookie_echo) {
+ int error = 0;
+
+ error = sctp_packet_transmit(packet, gfp);
+ if (error < 0)
+ chunk->skb->sk->sk_err = -error;
+
+ /* If we have an empty packet, then we can NOT ever
+ * return PMTU_FULL.
+ */
+ if (!one_packet)
+ retval = sctp_packet_append_chunk(packet,
+ chunk);
+ }
+ break;
+
+ case SCTP_XMIT_RWND_FULL:
+ case SCTP_XMIT_OK:
+ case SCTP_XMIT_DELAY:
+ break;
+ }
+
+ return retval;
+}
+
+/* Try to bundle a pad chunk into a packet with a heartbeat chunk for PLPMTUTD probe */
+static enum sctp_xmit sctp_packet_bundle_pad(struct sctp_packet *pkt, struct sctp_chunk *chunk)
+{
+ struct sctp_transport *t = pkt->transport;
+ struct sctp_chunk *pad;
+ int overhead = 0;
+
+ if (!chunk->pmtu_probe)
+ return SCTP_XMIT_OK;
+
+ /* calculate the Padding Data size for the pad chunk */
+ overhead += sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+ overhead += sizeof(struct sctp_sender_hb_info) + sizeof(struct sctp_pad_chunk);
+ pad = sctp_make_pad(t->asoc, t->pl.probe_size - overhead);
+ if (!pad)
+ return SCTP_XMIT_DELAY;
+
+ list_add_tail(&pad->list, &pkt->chunk_list);
+ pkt->size += SCTP_PAD4(ntohs(pad->chunk_hdr->length));
+ chunk->transport = t;
+
+ return SCTP_XMIT_OK;
+}
+
+/* Try to bundle an auth chunk into the packet. */
+static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt,
+ struct sctp_chunk *chunk)
+{
+ struct sctp_association *asoc = pkt->transport->asoc;
+ enum sctp_xmit retval = SCTP_XMIT_OK;
+ struct sctp_chunk *auth;
+
+ /* if we don't have an association, we can't do authentication */
+ if (!asoc)
+ return retval;
+
+ /* See if this is an auth chunk we are bundling or if
+ * auth is already bundled.
+ */
+ if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
+ return retval;
+
+ /* if the peer did not request this chunk to be authenticated,
+ * don't do it
+ */
+ if (!chunk->auth)
+ return retval;
+
+ auth = sctp_make_auth(asoc, chunk->shkey->key_id);
+ if (!auth)
+ return retval;
+
+ auth->shkey = chunk->shkey;
+ sctp_auth_shkey_hold(auth->shkey);
+
+ retval = __sctp_packet_append_chunk(pkt, auth);
+
+ if (retval != SCTP_XMIT_OK)
+ sctp_chunk_free(auth);
+
+ return retval;
+}
+
+/* Try to bundle a SACK with the packet. */
+static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt,
+ struct sctp_chunk *chunk)
+{
+ enum sctp_xmit retval = SCTP_XMIT_OK;
+
+ /* If sending DATA and haven't aleady bundled a SACK, try to
+ * bundle one in to the packet.
+ */
+ if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
+ !pkt->has_cookie_echo) {
+ struct sctp_association *asoc;
+ struct timer_list *timer;
+ asoc = pkt->transport->asoc;
+ timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
+
+ /* If the SACK timer is running, we have a pending SACK */
+ if (timer_pending(timer)) {
+ struct sctp_chunk *sack;
+
+ if (pkt->transport->sack_generation !=
+ pkt->transport->asoc->peer.sack_generation)
+ return retval;
+
+ asoc->a_rwnd = asoc->rwnd;
+ sack = sctp_make_sack(asoc);
+ if (sack) {
+ retval = __sctp_packet_append_chunk(pkt, sack);
+ if (retval != SCTP_XMIT_OK) {
+ sctp_chunk_free(sack);
+ goto out;
+ }
+ SCTP_INC_STATS(asoc->base.net,
+ SCTP_MIB_OUTCTRLCHUNKS);
+ asoc->stats.octrlchunks++;
+ asoc->peer.sack_needed = 0;
+ if (del_timer(timer))
+ sctp_association_put(asoc);
+ }
+ }
+ }
+out:
+ return retval;
+}
+
+
+/* Append a chunk to the offered packet reporting back any inability to do
+ * so.
+ */
+static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk)
+{
+ __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
+ enum sctp_xmit retval = SCTP_XMIT_OK;
+
+ /* Check to see if this chunk will fit into the packet */
+ retval = sctp_packet_will_fit(packet, chunk, chunk_len);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
+
+ /* We believe that this chunk is OK to add to the packet */
+ switch (chunk->chunk_hdr->type) {
+ case SCTP_CID_DATA:
+ case SCTP_CID_I_DATA:
+ /* Account for the data being in the packet */
+ sctp_packet_append_data(packet, chunk);
+ /* Disallow SACK bundling after DATA. */
+ packet->has_sack = 1;
+ /* Disallow AUTH bundling after DATA */
+ packet->has_auth = 1;
+ /* Let it be knows that packet has DATA in it */
+ packet->has_data = 1;
+ /* timestamp the chunk for rtx purposes */
+ chunk->sent_at = jiffies;
+ /* Mainly used for prsctp RTX policy */
+ chunk->sent_count++;
+ break;
+ case SCTP_CID_COOKIE_ECHO:
+ packet->has_cookie_echo = 1;
+ break;
+
+ case SCTP_CID_SACK:
+ packet->has_sack = 1;
+ if (chunk->asoc)
+ chunk->asoc->stats.osacks++;
+ break;
+
+ case SCTP_CID_AUTH:
+ packet->has_auth = 1;
+ packet->auth = chunk;
+ break;
+ }
+
+ /* It is OK to send this chunk. */
+ list_add_tail(&chunk->list, &packet->chunk_list);
+ packet->size += chunk_len;
+ chunk->transport = packet->transport;
+finish:
+ return retval;
+}
+
+/* Append a chunk to the offered packet reporting back any inability to do
+ * so.
+ */
+enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk)
+{
+ enum sctp_xmit retval = SCTP_XMIT_OK;
+
+ pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
+
+ /* Data chunks are special. Before seeing what else we can
+ * bundle into this packet, check to see if we are allowed to
+ * send this DATA.
+ */
+ if (sctp_chunk_is_data(chunk)) {
+ retval = sctp_packet_can_append_data(packet, chunk);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
+ }
+
+ /* Try to bundle AUTH chunk */
+ retval = sctp_packet_bundle_auth(packet, chunk);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
+
+ /* Try to bundle SACK chunk */
+ retval = sctp_packet_bundle_sack(packet, chunk);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
+
+ retval = __sctp_packet_append_chunk(packet, chunk);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
+
+ retval = sctp_packet_bundle_pad(packet, chunk);
+
+finish:
+ return retval;
+}
+
+static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
+{
+ if (SCTP_OUTPUT_CB(head)->last == head)
+ skb_shinfo(head)->frag_list = skb;
+ else
+ SCTP_OUTPUT_CB(head)->last->next = skb;
+ SCTP_OUTPUT_CB(head)->last = skb;
+
+ head->truesize += skb->truesize;
+ head->data_len += skb->len;
+ head->len += skb->len;
+ refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
+
+ __skb_header_release(skb);
+}
+
+static int sctp_packet_pack(struct sctp_packet *packet,
+ struct sk_buff *head, int gso, gfp_t gfp)
+{
+ struct sctp_transport *tp = packet->transport;
+ struct sctp_auth_chunk *auth = NULL;
+ struct sctp_chunk *chunk, *tmp;
+ int pkt_count = 0, pkt_size;
+ struct sock *sk = head->sk;
+ struct sk_buff *nskb;
+ int auth_len = 0;
+
+ if (gso) {
+ skb_shinfo(head)->gso_type = sk->sk_gso_type;
+ SCTP_OUTPUT_CB(head)->last = head;
+ } else {
+ nskb = head;
+ pkt_size = packet->size;
+ goto merge;
+ }
+
+ do {
+ /* calculate the pkt_size and alloc nskb */
+ pkt_size = packet->overhead;
+ list_for_each_entry_safe(chunk, tmp, &packet->chunk_list,
+ list) {
+ int padded = SCTP_PAD4(chunk->skb->len);
+
+ if (chunk == packet->auth)
+ auth_len = padded;
+ else if (auth_len + padded + packet->overhead >
+ tp->pathmtu)
+ return 0;
+ else if (pkt_size + padded > tp->pathmtu)
+ break;
+ pkt_size += padded;
+ }
+ nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
+ if (!nskb)
+ return 0;
+ skb_reserve(nskb, packet->overhead + MAX_HEADER);
+
+merge:
+ /* merge chunks into nskb and append nskb into head list */
+ pkt_size -= packet->overhead;
+ list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
+ int padding;
+
+ list_del_init(&chunk->list);
+ if (sctp_chunk_is_data(chunk)) {
+ if (!sctp_chunk_retransmitted(chunk) &&
+ !tp->rto_pending) {
+ chunk->rtt_in_progress = 1;
+ tp->rto_pending = 1;
+ }
+ }
+
+ padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
+ if (padding)
+ skb_put_zero(chunk->skb, padding);
+
+ if (chunk == packet->auth)
+ auth = (struct sctp_auth_chunk *)
+ skb_tail_pointer(nskb);
+
+ skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
+
+ pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
+ chunk,
+ sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
+ chunk->has_tsn ? "TSN" : "No TSN",
+ chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
+ ntohs(chunk->chunk_hdr->length), chunk->skb->len,
+ chunk->rtt_in_progress);
+
+ pkt_size -= SCTP_PAD4(chunk->skb->len);
+
+ if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
+ sctp_chunk_free(chunk);
+
+ if (!pkt_size)
+ break;
+ }
+
+ if (auth) {
+ sctp_auth_calculate_hmac(tp->asoc, nskb, auth,
+ packet->auth->shkey, gfp);
+ /* free auth if no more chunks, or add it back */
+ if (list_empty(&packet->chunk_list))
+ sctp_chunk_free(packet->auth);
+ else
+ list_add(&packet->auth->list,
+ &packet->chunk_list);
+ }
+
+ if (gso)
+ sctp_packet_gso_append(head, nskb);
+
+ pkt_count++;
+ } while (!list_empty(&packet->chunk_list));
+
+ if (gso) {
+ memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
+ sizeof(struct inet6_skb_parm)));
+ skb_shinfo(head)->gso_segs = pkt_count;
+ skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
+ goto chksum;
+ }
+
+ if (sctp_checksum_disable)
+ return 1;
+
+ if (!(tp->dst->dev->features & NETIF_F_SCTP_CRC) ||
+ dst_xfrm(tp->dst) || packet->ipfragok || tp->encap_port) {
+ struct sctphdr *sh =
+ (struct sctphdr *)skb_transport_header(head);
+
+ sh->checksum = sctp_compute_cksum(head, 0);
+ } else {
+chksum:
+ head->ip_summed = CHECKSUM_PARTIAL;
+ head->csum_not_inet = 1;
+ head->csum_start = skb_transport_header(head) - head->head;
+ head->csum_offset = offsetof(struct sctphdr, checksum);
+ }
+
+ return pkt_count;
+}
+
+/* All packets are sent to the network through this function from
+ * sctp_outq_tail().
+ *
+ * The return value is always 0 for now.
+ */
+int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
+{
+ struct sctp_transport *tp = packet->transport;
+ struct sctp_association *asoc = tp->asoc;
+ struct sctp_chunk *chunk, *tmp;
+ int pkt_count, gso = 0;
+ struct sk_buff *head;
+ struct sctphdr *sh;
+ struct sock *sk;
+
+ pr_debug("%s: packet:%p\n", __func__, packet);
+ if (list_empty(&packet->chunk_list))
+ return 0;
+ chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
+ sk = chunk->skb->sk;
+
+ if (packet->size > tp->pathmtu && !packet->ipfragok && !chunk->pmtu_probe) {
+ if (tp->pl.state == SCTP_PL_ERROR) { /* do IP fragmentation if in Error state */
+ packet->ipfragok = 1;
+ } else {
+ if (!sk_can_gso(sk)) { /* check gso */
+ pr_err_once("Trying to GSO but underlying device doesn't support it.");
+ goto out;
+ }
+ gso = 1;
+ }
+ }
+
+ /* alloc head skb */
+ head = alloc_skb((gso ? packet->overhead : packet->size) +
+ MAX_HEADER, gfp);
+ if (!head)
+ goto out;
+ skb_reserve(head, packet->overhead + MAX_HEADER);
+ skb_set_owner_w(head, sk);
+
+ /* set sctp header */
+ sh = skb_push(head, sizeof(struct sctphdr));
+ skb_reset_transport_header(head);
+ sh->source = htons(packet->source_port);
+ sh->dest = htons(packet->destination_port);
+ sh->vtag = htonl(packet->vtag);
+ sh->checksum = 0;
+
+ /* drop packet if no dst */
+ if (!tp->dst) {
+ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ kfree_skb(head);
+ goto out;
+ }
+
+ /* pack up chunks */
+ pkt_count = sctp_packet_pack(packet, head, gso, gfp);
+ if (!pkt_count) {
+ kfree_skb(head);
+ goto out;
+ }
+ pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
+
+ /* start autoclose timer */
+ if (packet->has_data && sctp_state(asoc, ESTABLISHED) &&
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
+ struct timer_list *timer =
+ &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
+ unsigned long timeout =
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
+
+ if (!mod_timer(timer, jiffies + timeout))
+ sctp_association_hold(asoc);
+ }
+
+ /* sctp xmit */
+ tp->af_specific->ecn_capable(sk);
+ if (asoc) {
+ asoc->stats.opackets += pkt_count;
+ if (asoc->peer.last_sent_to != tp)
+ asoc->peer.last_sent_to = tp;
+ }
+ head->ignore_df = packet->ipfragok;
+ if (tp->dst_pending_confirm)
+ skb_set_dst_pending_confirm(head, 1);
+ /* neighbour should be confirmed on successful transmission or
+ * positive error
+ */
+ if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
+ tp->dst_pending_confirm)
+ tp->dst_pending_confirm = 0;
+
+out:
+ list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
+ list_del_init(&chunk->list);
+ if (!sctp_chunk_is_data(chunk))
+ sctp_chunk_free(chunk);
+ }
+ sctp_packet_reset(packet);
+ return 0;
+}
+
+/********************************************************************
+ * 2nd Level Abstractions
+ ********************************************************************/
+
+/* This private function check to see if a chunk can be added */
+static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
+ struct sctp_chunk *chunk)
+{
+ size_t datasize, rwnd, inflight, flight_size;
+ struct sctp_transport *transport = packet->transport;
+ struct sctp_association *asoc = transport->asoc;
+ struct sctp_outq *q = &asoc->outqueue;
+
+ /* RFC 2960 6.1 Transmission of DATA Chunks
+ *
+ * A) At any given time, the data sender MUST NOT transmit new data to
+ * any destination transport address if its peer's rwnd indicates
+ * that the peer has no buffer space (i.e. rwnd is 0, see Section
+ * 6.2.1). However, regardless of the value of rwnd (including if it
+ * is 0), the data sender can always have one DATA chunk in flight to
+ * the receiver if allowed by cwnd (see rule B below). This rule
+ * allows the sender to probe for a change in rwnd that the sender
+ * missed due to the SACK having been lost in transit from the data
+ * receiver to the data sender.
+ */
+
+ rwnd = asoc->peer.rwnd;
+ inflight = q->outstanding_bytes;
+ flight_size = transport->flight_size;
+
+ datasize = sctp_data_size(chunk);
+
+ if (datasize > rwnd && inflight > 0)
+ /* We have (at least) one data chunk in flight,
+ * so we can't fall back to rule 6.1 B).
+ */
+ return SCTP_XMIT_RWND_FULL;
+
+ /* RFC 2960 6.1 Transmission of DATA Chunks
+ *
+ * B) At any given time, the sender MUST NOT transmit new data
+ * to a given transport address if it has cwnd or more bytes
+ * of data outstanding to that transport address.
+ */
+ /* RFC 7.2.4 & the Implementers Guide 2.8.
+ *
+ * 3) ...
+ * When a Fast Retransmit is being performed the sender SHOULD
+ * ignore the value of cwnd and SHOULD NOT delay retransmission.
+ */
+ if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
+ flight_size >= transport->cwnd)
+ return SCTP_XMIT_RWND_FULL;
+
+ /* Nagle's algorithm to solve small-packet problem:
+ * Inhibit the sending of new chunks when new outgoing data arrives
+ * if any previously transmitted data on the connection remains
+ * unacknowledged.
+ */
+
+ if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
+ !asoc->force_delay)
+ /* Nothing unacked */
+ return SCTP_XMIT_OK;
+
+ if (!sctp_packet_empty(packet))
+ /* Append to packet */
+ return SCTP_XMIT_OK;
+
+ if (!sctp_state(asoc, ESTABLISHED))
+ return SCTP_XMIT_OK;
+
+ /* Check whether this chunk and all the rest of pending data will fit
+ * or delay in hopes of bundling a full sized packet.
+ */
+ if (chunk->skb->len + q->out_qlen > transport->pathmtu -
+ packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
+ /* Enough data queued to fill a packet */
+ return SCTP_XMIT_OK;
+
+ /* Don't delay large message writes that may have been fragmented */
+ if (!chunk->msg->can_delay)
+ return SCTP_XMIT_OK;
+
+ /* Defer until all data acked or packet full */
+ return SCTP_XMIT_DELAY;
+}
+
+/* This private function does management things when adding DATA chunk */
+static void sctp_packet_append_data(struct sctp_packet *packet,
+ struct sctp_chunk *chunk)
+{
+ struct sctp_transport *transport = packet->transport;
+ size_t datasize = sctp_data_size(chunk);
+ struct sctp_association *asoc = transport->asoc;
+ u32 rwnd = asoc->peer.rwnd;
+
+ /* Keep track of how many bytes are in flight over this transport. */
+ transport->flight_size += datasize;
+
+ /* Keep track of how many bytes are in flight to the receiver. */
+ asoc->outqueue.outstanding_bytes += datasize;
+
+ /* Update our view of the receiver's rwnd. */
+ if (datasize < rwnd)
+ rwnd -= datasize;
+ else
+ rwnd = 0;
+
+ asoc->peer.rwnd = rwnd;
+ sctp_chunk_assign_tsn(chunk);
+ asoc->stream.si->assign_number(chunk);
+}
+
+static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
+ struct sctp_chunk *chunk,
+ u16 chunk_len)
+{
+ enum sctp_xmit retval = SCTP_XMIT_OK;
+ size_t psize, pmtu, maxsize;
+
+ /* Don't bundle in this packet if this chunk's auth key doesn't
+ * match other chunks already enqueued on this packet. Also,
+ * don't bundle the chunk with auth key if other chunks in this
+ * packet don't have auth key.
+ */
+ if ((packet->auth && chunk->shkey != packet->auth->shkey) ||
+ (!packet->auth && chunk->shkey &&
+ chunk->chunk_hdr->type != SCTP_CID_AUTH))
+ return SCTP_XMIT_PMTU_FULL;
+
+ psize = packet->size;
+ if (packet->transport->asoc)
+ pmtu = packet->transport->asoc->pathmtu;
+ else
+ pmtu = packet->transport->pathmtu;
+
+ /* Decide if we need to fragment or resubmit later. */
+ if (psize + chunk_len > pmtu) {
+ /* It's OK to fragment at IP level if any one of the following
+ * is true:
+ * 1. The packet is empty (meaning this chunk is greater
+ * the MTU)
+ * 2. The packet doesn't have any data in it yet and data
+ * requires authentication.
+ */
+ if (sctp_packet_empty(packet) ||
+ (!packet->has_data && chunk->auth)) {
+ /* We no longer do re-fragmentation.
+ * Just fragment at the IP layer, if we
+ * actually hit this condition
+ */
+ packet->ipfragok = 1;
+ goto out;
+ }
+
+ /* Similarly, if this chunk was built before a PMTU
+ * reduction, we have to fragment it at IP level now. So
+ * if the packet already contains something, we need to
+ * flush.
+ */
+ maxsize = pmtu - packet->overhead;
+ if (packet->auth)
+ maxsize -= SCTP_PAD4(packet->auth->skb->len);
+ if (chunk_len > maxsize)
+ retval = SCTP_XMIT_PMTU_FULL;
+
+ /* It is also okay to fragment if the chunk we are
+ * adding is a control chunk, but only if current packet
+ * is not a GSO one otherwise it causes fragmentation of
+ * a large frame. So in this case we allow the
+ * fragmentation by forcing it to be in a new packet.
+ */
+ if (!sctp_chunk_is_data(chunk) && packet->has_data)
+ retval = SCTP_XMIT_PMTU_FULL;
+
+ if (psize + chunk_len > packet->max_size)
+ /* Hit GSO/PMTU limit, gotta flush */
+ retval = SCTP_XMIT_PMTU_FULL;
+
+ if (!packet->transport->burst_limited &&
+ psize + chunk_len > (packet->transport->cwnd >> 1))
+ /* Do not allow a single GSO packet to use more
+ * than half of cwnd.
+ */
+ retval = SCTP_XMIT_PMTU_FULL;
+
+ if (packet->transport->burst_limited &&
+ psize + chunk_len > (packet->transport->burst_limited >> 1))
+ /* Do not allow a single GSO packet to use more
+ * than half of original cwnd.
+ */
+ retval = SCTP_XMIT_PMTU_FULL;
+ /* Otherwise it will fit in the GSO packet */
+ }
+
+out:
+ return retval;
+}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
new file mode 100644
index 000000000..20831079f
--- /dev/null
+++ b/net/sctp/outqueue.c
@@ -0,0 +1,1922 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001-2003 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions implement the sctp_outq class. The outqueue handles
+ * bundling and queueing of outgoing SCTP chunks.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Perry Melange <pmelange@null.cc.uic.edu>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Hui Huang <hui.huang@nokia.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/list.h> /* For struct list_head */
+#include <linux/socket.h>
+#include <linux/ip.h>
+#include <linux/slab.h>
+#include <net/sock.h> /* For skb_set_owner_w */
+
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/stream_sched.h>
+#include <trace/events/sctp.h>
+
+/* Declare internal functions here. */
+static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
+static void sctp_check_transmitted(struct sctp_outq *q,
+ struct list_head *transmitted_queue,
+ struct sctp_transport *transport,
+ union sctp_addr *saddr,
+ struct sctp_sackhdr *sack,
+ __u32 *highest_new_tsn);
+
+static void sctp_mark_missing(struct sctp_outq *q,
+ struct list_head *transmitted_queue,
+ struct sctp_transport *transport,
+ __u32 highest_new_tsn,
+ int count_of_newacks);
+
+static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
+
+/* Add data to the front of the queue. */
+static inline void sctp_outq_head_data(struct sctp_outq *q,
+ struct sctp_chunk *ch)
+{
+ struct sctp_stream_out_ext *oute;
+ __u16 stream;
+
+ list_add(&ch->list, &q->out_chunk_list);
+ q->out_qlen += ch->skb->len;
+
+ stream = sctp_chunk_stream_no(ch);
+ oute = SCTP_SO(&q->asoc->stream, stream)->ext;
+ list_add(&ch->stream_list, &oute->outq);
+}
+
+/* Take data from the front of the queue. */
+static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
+{
+ return q->sched->dequeue(q);
+}
+
+/* Add data chunk to the end of the queue. */
+static inline void sctp_outq_tail_data(struct sctp_outq *q,
+ struct sctp_chunk *ch)
+{
+ struct sctp_stream_out_ext *oute;
+ __u16 stream;
+
+ list_add_tail(&ch->list, &q->out_chunk_list);
+ q->out_qlen += ch->skb->len;
+
+ stream = sctp_chunk_stream_no(ch);
+ oute = SCTP_SO(&q->asoc->stream, stream)->ext;
+ list_add_tail(&ch->stream_list, &oute->outq);
+}
+
+/*
+ * SFR-CACC algorithm:
+ * D) If count_of_newacks is greater than or equal to 2
+ * and t was not sent to the current primary then the
+ * sender MUST NOT increment missing report count for t.
+ */
+static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
+ struct sctp_transport *transport,
+ int count_of_newacks)
+{
+ if (count_of_newacks >= 2 && transport != primary)
+ return 1;
+ return 0;
+}
+
+/*
+ * SFR-CACC algorithm:
+ * F) If count_of_newacks is less than 2, let d be the
+ * destination to which t was sent. If cacc_saw_newack
+ * is 0 for destination d, then the sender MUST NOT
+ * increment missing report count for t.
+ */
+static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
+ int count_of_newacks)
+{
+ if (count_of_newacks < 2 &&
+ (transport && !transport->cacc.cacc_saw_newack))
+ return 1;
+ return 0;
+}
+
+/*
+ * SFR-CACC algorithm:
+ * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
+ * execute steps C, D, F.
+ *
+ * C has been implemented in sctp_outq_sack
+ */
+static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
+ struct sctp_transport *transport,
+ int count_of_newacks)
+{
+ if (!primary->cacc.cycling_changeover) {
+ if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
+ return 1;
+ if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
+ return 1;
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * SFR-CACC algorithm:
+ * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
+ * than next_tsn_at_change of the current primary, then
+ * the sender MUST NOT increment missing report count
+ * for t.
+ */
+static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
+{
+ if (primary->cacc.cycling_changeover &&
+ TSN_lt(tsn, primary->cacc.next_tsn_at_change))
+ return 1;
+ return 0;
+}
+
+/*
+ * SFR-CACC algorithm:
+ * 3) If the missing report count for TSN t is to be
+ * incremented according to [RFC2960] and
+ * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
+ * then the sender MUST further execute steps 3.1 and
+ * 3.2 to determine if the missing report count for
+ * TSN t SHOULD NOT be incremented.
+ *
+ * 3.3) If 3.1 and 3.2 do not dictate that the missing
+ * report count for t should not be incremented, then
+ * the sender SHOULD increment missing report count for
+ * t (according to [RFC2960] and [SCTP_STEWART_2002]).
+ */
+static inline int sctp_cacc_skip(struct sctp_transport *primary,
+ struct sctp_transport *transport,
+ int count_of_newacks,
+ __u32 tsn)
+{
+ if (primary->cacc.changeover_active &&
+ (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
+ sctp_cacc_skip_3_2(primary, tsn)))
+ return 1;
+ return 0;
+}
+
+/* Initialize an existing sctp_outq. This does the boring stuff.
+ * You still need to define handlers if you really want to DO
+ * something with this structure...
+ */
+void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
+{
+ memset(q, 0, sizeof(struct sctp_outq));
+
+ q->asoc = asoc;
+ INIT_LIST_HEAD(&q->out_chunk_list);
+ INIT_LIST_HEAD(&q->control_chunk_list);
+ INIT_LIST_HEAD(&q->retransmit);
+ INIT_LIST_HEAD(&q->sacked);
+ INIT_LIST_HEAD(&q->abandoned);
+ sctp_sched_set_sched(asoc, sctp_sk(asoc->base.sk)->default_ss);
+}
+
+/* Free the outqueue structure and any related pending chunks.
+ */
+static void __sctp_outq_teardown(struct sctp_outq *q)
+{
+ struct sctp_transport *transport;
+ struct list_head *lchunk, *temp;
+ struct sctp_chunk *chunk, *tmp;
+
+ /* Throw away unacknowledged chunks. */
+ list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
+ transports) {
+ while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
+ chunk = list_entry(lchunk, struct sctp_chunk,
+ transmitted_list);
+ /* Mark as part of a failed message. */
+ sctp_chunk_fail(chunk, q->error);
+ sctp_chunk_free(chunk);
+ }
+ }
+
+ /* Throw away chunks that have been gap ACKed. */
+ list_for_each_safe(lchunk, temp, &q->sacked) {
+ list_del_init(lchunk);
+ chunk = list_entry(lchunk, struct sctp_chunk,
+ transmitted_list);
+ sctp_chunk_fail(chunk, q->error);
+ sctp_chunk_free(chunk);
+ }
+
+ /* Throw away any chunks in the retransmit queue. */
+ list_for_each_safe(lchunk, temp, &q->retransmit) {
+ list_del_init(lchunk);
+ chunk = list_entry(lchunk, struct sctp_chunk,
+ transmitted_list);
+ sctp_chunk_fail(chunk, q->error);
+ sctp_chunk_free(chunk);
+ }
+
+ /* Throw away any chunks that are in the abandoned queue. */
+ list_for_each_safe(lchunk, temp, &q->abandoned) {
+ list_del_init(lchunk);
+ chunk = list_entry(lchunk, struct sctp_chunk,
+ transmitted_list);
+ sctp_chunk_fail(chunk, q->error);
+ sctp_chunk_free(chunk);
+ }
+
+ /* Throw away any leftover data chunks. */
+ while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
+ sctp_sched_dequeue_done(q, chunk);
+
+ /* Mark as send failure. */
+ sctp_chunk_fail(chunk, q->error);
+ sctp_chunk_free(chunk);
+ }
+
+ /* Throw away any leftover control chunks. */
+ list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
+ list_del_init(&chunk->list);
+ sctp_chunk_free(chunk);
+ }
+}
+
+void sctp_outq_teardown(struct sctp_outq *q)
+{
+ __sctp_outq_teardown(q);
+ sctp_outq_init(q->asoc, q);
+}
+
+/* Free the outqueue structure and any related pending chunks. */
+void sctp_outq_free(struct sctp_outq *q)
+{
+ /* Throw away leftover chunks. */
+ __sctp_outq_teardown(q);
+}
+
+/* Put a new chunk in an sctp_outq. */
+void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
+{
+ struct net *net = q->asoc->base.net;
+
+ pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
+ chunk && chunk->chunk_hdr ?
+ sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
+ "illegal chunk");
+
+ /* If it is data, queue it up, otherwise, send it
+ * immediately.
+ */
+ if (sctp_chunk_is_data(chunk)) {
+ pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
+ __func__, q, chunk, chunk && chunk->chunk_hdr ?
+ sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
+ "illegal chunk");
+
+ sctp_outq_tail_data(q, chunk);
+ if (chunk->asoc->peer.prsctp_capable &&
+ SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
+ chunk->asoc->sent_cnt_removable++;
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
+ else
+ SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
+ } else {
+ list_add_tail(&chunk->list, &q->control_chunk_list);
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+ }
+
+ if (!q->cork)
+ sctp_outq_flush(q, 0, gfp);
+}
+
+/* Insert a chunk into the sorted list based on the TSNs. The retransmit list
+ * and the abandoned list are in ascending order.
+ */
+static void sctp_insert_list(struct list_head *head, struct list_head *new)
+{
+ struct list_head *pos;
+ struct sctp_chunk *nchunk, *lchunk;
+ __u32 ntsn, ltsn;
+ int done = 0;
+
+ nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
+ ntsn = ntohl(nchunk->subh.data_hdr->tsn);
+
+ list_for_each(pos, head) {
+ lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
+ ltsn = ntohl(lchunk->subh.data_hdr->tsn);
+ if (TSN_lt(ntsn, ltsn)) {
+ list_add(new, pos->prev);
+ done = 1;
+ break;
+ }
+ }
+ if (!done)
+ list_add_tail(new, head);
+}
+
+static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
+ struct sctp_sndrcvinfo *sinfo,
+ struct list_head *queue, int msg_len)
+{
+ struct sctp_chunk *chk, *temp;
+
+ list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
+ struct sctp_stream_out *streamout;
+
+ if (!chk->msg->abandoned &&
+ (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
+ chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
+ continue;
+
+ chk->msg->abandoned = 1;
+ list_del_init(&chk->transmitted_list);
+ sctp_insert_list(&asoc->outqueue.abandoned,
+ &chk->transmitted_list);
+
+ streamout = SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream);
+ asoc->sent_cnt_removable--;
+ asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
+ streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
+
+ if (queue != &asoc->outqueue.retransmit &&
+ !chk->tsn_gap_acked) {
+ if (chk->transport)
+ chk->transport->flight_size -=
+ sctp_data_size(chk);
+ asoc->outqueue.outstanding_bytes -= sctp_data_size(chk);
+ }
+
+ msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
+ if (msg_len <= 0)
+ break;
+ }
+
+ return msg_len;
+}
+
+static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
+ struct sctp_sndrcvinfo *sinfo, int msg_len)
+{
+ struct sctp_outq *q = &asoc->outqueue;
+ struct sctp_chunk *chk, *temp;
+ struct sctp_stream_out *sout;
+
+ q->sched->unsched_all(&asoc->stream);
+
+ list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
+ if (!chk->msg->abandoned &&
+ (!(chk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) ||
+ !SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
+ chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
+ continue;
+
+ chk->msg->abandoned = 1;
+ sctp_sched_dequeue_common(q, chk);
+ asoc->sent_cnt_removable--;
+ asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
+
+ sout = SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream);
+ sout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
+
+ /* clear out_curr if all frag chunks are pruned */
+ if (asoc->stream.out_curr == sout &&
+ list_is_last(&chk->frag_list, &chk->msg->chunks))
+ asoc->stream.out_curr = NULL;
+
+ msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
+ sctp_chunk_free(chk);
+ if (msg_len <= 0)
+ break;
+ }
+
+ q->sched->sched_all(&asoc->stream);
+
+ return msg_len;
+}
+
+/* Abandon the chunks according their priorities */
+void sctp_prsctp_prune(struct sctp_association *asoc,
+ struct sctp_sndrcvinfo *sinfo, int msg_len)
+{
+ struct sctp_transport *transport;
+
+ if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable)
+ return;
+
+ msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
+ &asoc->outqueue.retransmit,
+ msg_len);
+ if (msg_len <= 0)
+ return;
+
+ list_for_each_entry(transport, &asoc->peer.transport_addr_list,
+ transports) {
+ msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
+ &transport->transmitted,
+ msg_len);
+ if (msg_len <= 0)
+ return;
+ }
+
+ sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
+}
+
+/* Mark all the eligible packets on a transport for retransmission. */
+void sctp_retransmit_mark(struct sctp_outq *q,
+ struct sctp_transport *transport,
+ __u8 reason)
+{
+ struct list_head *lchunk, *ltemp;
+ struct sctp_chunk *chunk;
+
+ /* Walk through the specified transmitted queue. */
+ list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
+ chunk = list_entry(lchunk, struct sctp_chunk,
+ transmitted_list);
+
+ /* If the chunk is abandoned, move it to abandoned list. */
+ if (sctp_chunk_abandoned(chunk)) {
+ list_del_init(lchunk);
+ sctp_insert_list(&q->abandoned, lchunk);
+
+ /* If this chunk has not been previousely acked,
+ * stop considering it 'outstanding'. Our peer
+ * will most likely never see it since it will
+ * not be retransmitted
+ */
+ if (!chunk->tsn_gap_acked) {
+ if (chunk->transport)
+ chunk->transport->flight_size -=
+ sctp_data_size(chunk);
+ q->outstanding_bytes -= sctp_data_size(chunk);
+ q->asoc->peer.rwnd += sctp_data_size(chunk);
+ }
+ continue;
+ }
+
+ /* If we are doing retransmission due to a timeout or pmtu
+ * discovery, only the chunks that are not yet acked should
+ * be added to the retransmit queue.
+ */
+ if ((reason == SCTP_RTXR_FAST_RTX &&
+ (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
+ (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
+ /* RFC 2960 6.2.1 Processing a Received SACK
+ *
+ * C) Any time a DATA chunk is marked for
+ * retransmission (via either T3-rtx timer expiration
+ * (Section 6.3.3) or via fast retransmit
+ * (Section 7.2.4)), add the data size of those
+ * chunks to the rwnd.
+ */
+ q->asoc->peer.rwnd += sctp_data_size(chunk);
+ q->outstanding_bytes -= sctp_data_size(chunk);
+ if (chunk->transport)
+ transport->flight_size -= sctp_data_size(chunk);
+
+ /* sctpimpguide-05 Section 2.8.2
+ * M5) If a T3-rtx timer expires, the
+ * 'TSN.Missing.Report' of all affected TSNs is set
+ * to 0.
+ */
+ chunk->tsn_missing_report = 0;
+
+ /* If a chunk that is being used for RTT measurement
+ * has to be retransmitted, we cannot use this chunk
+ * anymore for RTT measurements. Reset rto_pending so
+ * that a new RTT measurement is started when a new
+ * data chunk is sent.
+ */
+ if (chunk->rtt_in_progress) {
+ chunk->rtt_in_progress = 0;
+ transport->rto_pending = 0;
+ }
+
+ /* Move the chunk to the retransmit queue. The chunks
+ * on the retransmit queue are always kept in order.
+ */
+ list_del_init(lchunk);
+ sctp_insert_list(&q->retransmit, lchunk);
+ }
+ }
+
+ pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
+ "flight_size:%d, pba:%d\n", __func__, transport, reason,
+ transport->cwnd, transport->ssthresh, transport->flight_size,
+ transport->partial_bytes_acked);
+}
+
+/* Mark all the eligible packets on a transport for retransmission and force
+ * one packet out.
+ */
+void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
+ enum sctp_retransmit_reason reason)
+{
+ struct net *net = q->asoc->base.net;
+
+ switch (reason) {
+ case SCTP_RTXR_T3_RTX:
+ SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
+ sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
+ /* Update the retran path if the T3-rtx timer has expired for
+ * the current retran path.
+ */
+ if (transport == transport->asoc->peer.retran_path)
+ sctp_assoc_update_retran_path(transport->asoc);
+ transport->asoc->rtx_data_chunks +=
+ transport->asoc->unack_data;
+ if (transport->pl.state == SCTP_PL_COMPLETE &&
+ transport->asoc->unack_data)
+ sctp_transport_reset_probe_timer(transport);
+ break;
+ case SCTP_RTXR_FAST_RTX:
+ SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
+ sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
+ q->fast_rtx = 1;
+ break;
+ case SCTP_RTXR_PMTUD:
+ SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
+ break;
+ case SCTP_RTXR_T1_RTX:
+ SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
+ transport->asoc->init_retries++;
+ break;
+ default:
+ BUG();
+ }
+
+ sctp_retransmit_mark(q, transport, reason);
+
+ /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
+ * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
+ * following the procedures outlined in C1 - C5.
+ */
+ if (reason == SCTP_RTXR_T3_RTX)
+ q->asoc->stream.si->generate_ftsn(q, q->asoc->ctsn_ack_point);
+
+ /* Flush the queues only on timeout, since fast_rtx is only
+ * triggered during sack processing and the queue
+ * will be flushed at the end.
+ */
+ if (reason != SCTP_RTXR_FAST_RTX)
+ sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
+}
+
+/*
+ * Transmit DATA chunks on the retransmit queue. Upon return from
+ * __sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
+ * need to be transmitted by the caller.
+ * We assume that pkt->transport has already been set.
+ *
+ * The return value is a normal kernel error return value.
+ */
+static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
+ int rtx_timeout, int *start_timer, gfp_t gfp)
+{
+ struct sctp_transport *transport = pkt->transport;
+ struct sctp_chunk *chunk, *chunk1;
+ struct list_head *lqueue;
+ enum sctp_xmit status;
+ int error = 0;
+ int timer = 0;
+ int done = 0;
+ int fast_rtx;
+
+ lqueue = &q->retransmit;
+ fast_rtx = q->fast_rtx;
+
+ /* This loop handles time-out retransmissions, fast retransmissions,
+ * and retransmissions due to opening of whindow.
+ *
+ * RFC 2960 6.3.3 Handle T3-rtx Expiration
+ *
+ * E3) Determine how many of the earliest (i.e., lowest TSN)
+ * outstanding DATA chunks for the address for which the
+ * T3-rtx has expired will fit into a single packet, subject
+ * to the MTU constraint for the path corresponding to the
+ * destination transport address to which the retransmission
+ * is being sent (this may be different from the address for
+ * which the timer expires [see Section 6.4]). Call this value
+ * K. Bundle and retransmit those K DATA chunks in a single
+ * packet to the destination endpoint.
+ *
+ * [Just to be painfully clear, if we are retransmitting
+ * because a timeout just happened, we should send only ONE
+ * packet of retransmitted data.]
+ *
+ * For fast retransmissions we also send only ONE packet. However,
+ * if we are just flushing the queue due to open window, we'll
+ * try to send as much as possible.
+ */
+ list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
+ /* If the chunk is abandoned, move it to abandoned list. */
+ if (sctp_chunk_abandoned(chunk)) {
+ list_del_init(&chunk->transmitted_list);
+ sctp_insert_list(&q->abandoned,
+ &chunk->transmitted_list);
+ continue;
+ }
+
+ /* Make sure that Gap Acked TSNs are not retransmitted. A
+ * simple approach is just to move such TSNs out of the
+ * way and into a 'transmitted' queue and skip to the
+ * next chunk.
+ */
+ if (chunk->tsn_gap_acked) {
+ list_move_tail(&chunk->transmitted_list,
+ &transport->transmitted);
+ continue;
+ }
+
+ /* If we are doing fast retransmit, ignore non-fast_rtransmit
+ * chunks
+ */
+ if (fast_rtx && !chunk->fast_retransmit)
+ continue;
+
+redo:
+ /* Attempt to append this chunk to the packet. */
+ status = sctp_packet_append_chunk(pkt, chunk);
+
+ switch (status) {
+ case SCTP_XMIT_PMTU_FULL:
+ if (!pkt->has_data && !pkt->has_cookie_echo) {
+ /* If this packet did not contain DATA then
+ * retransmission did not happen, so do it
+ * again. We'll ignore the error here since
+ * control chunks are already freed so there
+ * is nothing we can do.
+ */
+ sctp_packet_transmit(pkt, gfp);
+ goto redo;
+ }
+
+ /* Send this packet. */
+ error = sctp_packet_transmit(pkt, gfp);
+
+ /* If we are retransmitting, we should only
+ * send a single packet.
+ * Otherwise, try appending this chunk again.
+ */
+ if (rtx_timeout || fast_rtx)
+ done = 1;
+ else
+ goto redo;
+
+ /* Bundle next chunk in the next round. */
+ break;
+
+ case SCTP_XMIT_RWND_FULL:
+ /* Send this packet. */
+ error = sctp_packet_transmit(pkt, gfp);
+
+ /* Stop sending DATA as there is no more room
+ * at the receiver.
+ */
+ done = 1;
+ break;
+
+ case SCTP_XMIT_DELAY:
+ /* Send this packet. */
+ error = sctp_packet_transmit(pkt, gfp);
+
+ /* Stop sending DATA because of nagle delay. */
+ done = 1;
+ break;
+
+ default:
+ /* The append was successful, so add this chunk to
+ * the transmitted list.
+ */
+ list_move_tail(&chunk->transmitted_list,
+ &transport->transmitted);
+
+ /* Mark the chunk as ineligible for fast retransmit
+ * after it is retransmitted.
+ */
+ if (chunk->fast_retransmit == SCTP_NEED_FRTX)
+ chunk->fast_retransmit = SCTP_DONT_FRTX;
+
+ q->asoc->stats.rtxchunks++;
+ break;
+ }
+
+ /* Set the timer if there were no errors */
+ if (!error && !timer)
+ timer = 1;
+
+ if (done)
+ break;
+ }
+
+ /* If we are here due to a retransmit timeout or a fast
+ * retransmit and if there are any chunks left in the retransmit
+ * queue that could not fit in the PMTU sized packet, they need
+ * to be marked as ineligible for a subsequent fast retransmit.
+ */
+ if (rtx_timeout || fast_rtx) {
+ list_for_each_entry(chunk1, lqueue, transmitted_list) {
+ if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
+ chunk1->fast_retransmit = SCTP_DONT_FRTX;
+ }
+ }
+
+ *start_timer = timer;
+
+ /* Clear fast retransmit hint */
+ if (fast_rtx)
+ q->fast_rtx = 0;
+
+ return error;
+}
+
+/* Cork the outqueue so queued chunks are really queued. */
+void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
+{
+ if (q->cork)
+ q->cork = 0;
+
+ sctp_outq_flush(q, 0, gfp);
+}
+
+static int sctp_packet_singleton(struct sctp_transport *transport,
+ struct sctp_chunk *chunk, gfp_t gfp)
+{
+ const struct sctp_association *asoc = transport->asoc;
+ const __u16 sport = asoc->base.bind_addr.port;
+ const __u16 dport = asoc->peer.port;
+ const __u32 vtag = asoc->peer.i.init_tag;
+ struct sctp_packet singleton;
+
+ sctp_packet_init(&singleton, transport, sport, dport);
+ sctp_packet_config(&singleton, vtag, 0);
+ if (sctp_packet_append_chunk(&singleton, chunk) != SCTP_XMIT_OK) {
+ list_del_init(&chunk->list);
+ sctp_chunk_free(chunk);
+ return -ENOMEM;
+ }
+ return sctp_packet_transmit(&singleton, gfp);
+}
+
+/* Struct to hold the context during sctp outq flush */
+struct sctp_flush_ctx {
+ struct sctp_outq *q;
+ /* Current transport being used. It's NOT the same as curr active one */
+ struct sctp_transport *transport;
+ /* These transports have chunks to send. */
+ struct list_head transport_list;
+ struct sctp_association *asoc;
+ /* Packet on the current transport above */
+ struct sctp_packet *packet;
+ gfp_t gfp;
+};
+
+/* transport: current transport */
+static void sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
+ struct sctp_chunk *chunk)
+{
+ struct sctp_transport *new_transport = chunk->transport;
+
+ if (!new_transport) {
+ if (!sctp_chunk_is_data(chunk)) {
+ /* If we have a prior transport pointer, see if
+ * the destination address of the chunk
+ * matches the destination address of the
+ * current transport. If not a match, then
+ * try to look up the transport with a given
+ * destination address. We do this because
+ * after processing ASCONFs, we may have new
+ * transports created.
+ */
+ if (ctx->transport && sctp_cmp_addr_exact(&chunk->dest,
+ &ctx->transport->ipaddr))
+ new_transport = ctx->transport;
+ else
+ new_transport = sctp_assoc_lookup_paddr(ctx->asoc,
+ &chunk->dest);
+ }
+
+ /* if we still don't have a new transport, then
+ * use the current active path.
+ */
+ if (!new_transport)
+ new_transport = ctx->asoc->peer.active_path;
+ } else {
+ __u8 type;
+
+ switch (new_transport->state) {
+ case SCTP_INACTIVE:
+ case SCTP_UNCONFIRMED:
+ case SCTP_PF:
+ /* If the chunk is Heartbeat or Heartbeat Ack,
+ * send it to chunk->transport, even if it's
+ * inactive.
+ *
+ * 3.3.6 Heartbeat Acknowledgement:
+ * ...
+ * A HEARTBEAT ACK is always sent to the source IP
+ * address of the IP datagram containing the
+ * HEARTBEAT chunk to which this ack is responding.
+ * ...
+ *
+ * ASCONF_ACKs also must be sent to the source.
+ */
+ type = chunk->chunk_hdr->type;
+ if (type != SCTP_CID_HEARTBEAT &&
+ type != SCTP_CID_HEARTBEAT_ACK &&
+ type != SCTP_CID_ASCONF_ACK)
+ new_transport = ctx->asoc->peer.active_path;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Are we switching transports? Take care of transport locks. */
+ if (new_transport != ctx->transport) {
+ ctx->transport = new_transport;
+ ctx->packet = &ctx->transport->packet;
+
+ if (list_empty(&ctx->transport->send_ready))
+ list_add_tail(&ctx->transport->send_ready,
+ &ctx->transport_list);
+
+ sctp_packet_config(ctx->packet,
+ ctx->asoc->peer.i.init_tag,
+ ctx->asoc->peer.ecn_capable);
+ /* We've switched transports, so apply the
+ * Burst limit to the new transport.
+ */
+ sctp_transport_burst_limited(ctx->transport);
+ }
+}
+
+static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
+{
+ struct sctp_chunk *chunk, *tmp;
+ enum sctp_xmit status;
+ int one_packet, error;
+
+ list_for_each_entry_safe(chunk, tmp, &ctx->q->control_chunk_list, list) {
+ one_packet = 0;
+
+ /* RFC 5061, 5.3
+ * F1) This means that until such time as the ASCONF
+ * containing the add is acknowledged, the sender MUST
+ * NOT use the new IP address as a source for ANY SCTP
+ * packet except on carrying an ASCONF Chunk.
+ */
+ if (ctx->asoc->src_out_of_asoc_ok &&
+ chunk->chunk_hdr->type != SCTP_CID_ASCONF)
+ continue;
+
+ list_del_init(&chunk->list);
+
+ /* Pick the right transport to use. Should always be true for
+ * the first chunk as we don't have a transport by then.
+ */
+ sctp_outq_select_transport(ctx, chunk);
+
+ switch (chunk->chunk_hdr->type) {
+ /* 6.10 Bundling
+ * ...
+ * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
+ * COMPLETE with any other chunks. [Send them immediately.]
+ */
+ case SCTP_CID_INIT:
+ case SCTP_CID_INIT_ACK:
+ case SCTP_CID_SHUTDOWN_COMPLETE:
+ error = sctp_packet_singleton(ctx->transport, chunk,
+ ctx->gfp);
+ if (error < 0) {
+ ctx->asoc->base.sk->sk_err = -error;
+ return;
+ }
+ ctx->asoc->stats.octrlchunks++;
+ break;
+
+ case SCTP_CID_ABORT:
+ if (sctp_test_T_bit(chunk))
+ ctx->packet->vtag = ctx->asoc->c.my_vtag;
+ fallthrough;
+
+ /* The following chunks are "response" chunks, i.e.
+ * they are generated in response to something we
+ * received. If we are sending these, then we can
+ * send only 1 packet containing these chunks.
+ */
+ case SCTP_CID_HEARTBEAT_ACK:
+ case SCTP_CID_SHUTDOWN_ACK:
+ case SCTP_CID_COOKIE_ACK:
+ case SCTP_CID_COOKIE_ECHO:
+ case SCTP_CID_ERROR:
+ case SCTP_CID_ECN_CWR:
+ case SCTP_CID_ASCONF_ACK:
+ one_packet = 1;
+ fallthrough;
+
+ case SCTP_CID_HEARTBEAT:
+ if (chunk->pmtu_probe) {
+ error = sctp_packet_singleton(ctx->transport,
+ chunk, ctx->gfp);
+ if (!error)
+ ctx->asoc->stats.octrlchunks++;
+ break;
+ }
+ fallthrough;
+ case SCTP_CID_SACK:
+ case SCTP_CID_SHUTDOWN:
+ case SCTP_CID_ECN_ECNE:
+ case SCTP_CID_ASCONF:
+ case SCTP_CID_FWD_TSN:
+ case SCTP_CID_I_FWD_TSN:
+ case SCTP_CID_RECONF:
+ status = sctp_packet_transmit_chunk(ctx->packet, chunk,
+ one_packet, ctx->gfp);
+ if (status != SCTP_XMIT_OK) {
+ /* put the chunk back */
+ list_add(&chunk->list, &ctx->q->control_chunk_list);
+ break;
+ }
+
+ ctx->asoc->stats.octrlchunks++;
+ /* PR-SCTP C5) If a FORWARD TSN is sent, the
+ * sender MUST assure that at least one T3-rtx
+ * timer is running.
+ */
+ if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN ||
+ chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) {
+ sctp_transport_reset_t3_rtx(ctx->transport);
+ ctx->transport->last_time_sent = jiffies;
+ }
+
+ if (chunk == ctx->asoc->strreset_chunk)
+ sctp_transport_reset_reconf_timer(ctx->transport);
+
+ break;
+
+ default:
+ /* We built a chunk with an illegal type! */
+ BUG();
+ }
+ }
+}
+
+/* Returns false if new data shouldn't be sent */
+static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx,
+ int rtx_timeout)
+{
+ int error, start_timer = 0;
+
+ if (ctx->asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
+ return false;
+
+ if (ctx->transport != ctx->asoc->peer.retran_path) {
+ /* Switch transports & prepare the packet. */
+ ctx->transport = ctx->asoc->peer.retran_path;
+ ctx->packet = &ctx->transport->packet;
+
+ if (list_empty(&ctx->transport->send_ready))
+ list_add_tail(&ctx->transport->send_ready,
+ &ctx->transport_list);
+
+ sctp_packet_config(ctx->packet, ctx->asoc->peer.i.init_tag,
+ ctx->asoc->peer.ecn_capable);
+ }
+
+ error = __sctp_outq_flush_rtx(ctx->q, ctx->packet, rtx_timeout,
+ &start_timer, ctx->gfp);
+ if (error < 0)
+ ctx->asoc->base.sk->sk_err = -error;
+
+ if (start_timer) {
+ sctp_transport_reset_t3_rtx(ctx->transport);
+ ctx->transport->last_time_sent = jiffies;
+ }
+
+ /* This can happen on COOKIE-ECHO resend. Only
+ * one chunk can get bundled with a COOKIE-ECHO.
+ */
+ if (ctx->packet->has_cookie_echo)
+ return false;
+
+ /* Don't send new data if there is still data
+ * waiting to retransmit.
+ */
+ if (!list_empty(&ctx->q->retransmit))
+ return false;
+
+ return true;
+}
+
+static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
+ int rtx_timeout)
+{
+ struct sctp_chunk *chunk;
+ enum sctp_xmit status;
+
+ /* Is it OK to send data chunks? */
+ switch (ctx->asoc->state) {
+ case SCTP_STATE_COOKIE_ECHOED:
+ /* Only allow bundling when this packet has a COOKIE-ECHO
+ * chunk.
+ */
+ if (!ctx->packet || !ctx->packet->has_cookie_echo)
+ return;
+
+ fallthrough;
+ case SCTP_STATE_ESTABLISHED:
+ case SCTP_STATE_SHUTDOWN_PENDING:
+ case SCTP_STATE_SHUTDOWN_RECEIVED:
+ break;
+
+ default:
+ /* Do nothing. */
+ return;
+ }
+
+ /* RFC 2960 6.1 Transmission of DATA Chunks
+ *
+ * C) When the time comes for the sender to transmit,
+ * before sending new DATA chunks, the sender MUST
+ * first transmit any outstanding DATA chunks which
+ * are marked for retransmission (limited by the
+ * current cwnd).
+ */
+ if (!list_empty(&ctx->q->retransmit) &&
+ !sctp_outq_flush_rtx(ctx, rtx_timeout))
+ return;
+
+ /* Apply Max.Burst limitation to the current transport in
+ * case it will be used for new data. We are going to
+ * rest it before we return, but we want to apply the limit
+ * to the currently queued data.
+ */
+ if (ctx->transport)
+ sctp_transport_burst_limited(ctx->transport);
+
+ /* Finally, transmit new packets. */
+ while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) {
+ __u32 sid = ntohs(chunk->subh.data_hdr->stream);
+ __u8 stream_state = SCTP_SO(&ctx->asoc->stream, sid)->state;
+
+ /* Has this chunk expired? */
+ if (sctp_chunk_abandoned(chunk)) {
+ sctp_sched_dequeue_done(ctx->q, chunk);
+ sctp_chunk_fail(chunk, 0);
+ sctp_chunk_free(chunk);
+ continue;
+ }
+
+ if (stream_state == SCTP_STREAM_CLOSED) {
+ sctp_outq_head_data(ctx->q, chunk);
+ break;
+ }
+
+ sctp_outq_select_transport(ctx, chunk);
+
+ pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p skb->users:%d\n",
+ __func__, ctx->q, chunk, chunk && chunk->chunk_hdr ?
+ sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
+ "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
+ chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
+ refcount_read(&chunk->skb->users) : -1);
+
+ /* Add the chunk to the packet. */
+ status = sctp_packet_transmit_chunk(ctx->packet, chunk, 0,
+ ctx->gfp);
+ if (status != SCTP_XMIT_OK) {
+ /* We could not append this chunk, so put
+ * the chunk back on the output queue.
+ */
+ pr_debug("%s: could not transmit tsn:0x%x, status:%d\n",
+ __func__, ntohl(chunk->subh.data_hdr->tsn),
+ status);
+
+ sctp_outq_head_data(ctx->q, chunk);
+ break;
+ }
+
+ /* The sender is in the SHUTDOWN-PENDING state,
+ * The sender MAY set the I-bit in the DATA
+ * chunk header.
+ */
+ if (ctx->asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
+ chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ ctx->asoc->stats.ouodchunks++;
+ else
+ ctx->asoc->stats.oodchunks++;
+
+ /* Only now it's safe to consider this
+ * chunk as sent, sched-wise.
+ */
+ sctp_sched_dequeue_done(ctx->q, chunk);
+
+ list_add_tail(&chunk->transmitted_list,
+ &ctx->transport->transmitted);
+
+ sctp_transport_reset_t3_rtx(ctx->transport);
+ ctx->transport->last_time_sent = jiffies;
+
+ /* Only let one DATA chunk get bundled with a
+ * COOKIE-ECHO chunk.
+ */
+ if (ctx->packet->has_cookie_echo)
+ break;
+ }
+}
+
+static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
+{
+ struct sock *sk = ctx->asoc->base.sk;
+ struct list_head *ltransport;
+ struct sctp_packet *packet;
+ struct sctp_transport *t;
+ int error = 0;
+
+ while ((ltransport = sctp_list_dequeue(&ctx->transport_list)) != NULL) {
+ t = list_entry(ltransport, struct sctp_transport, send_ready);
+ packet = &t->packet;
+ if (!sctp_packet_empty(packet)) {
+ rcu_read_lock();
+ if (t->dst && __sk_dst_get(sk) != t->dst) {
+ dst_hold(t->dst);
+ sk_setup_caps(sk, t->dst);
+ }
+ rcu_read_unlock();
+ error = sctp_packet_transmit(packet, ctx->gfp);
+ if (error < 0)
+ ctx->q->asoc->base.sk->sk_err = -error;
+ }
+
+ /* Clear the burst limited state, if any */
+ sctp_transport_burst_reset(t);
+ }
+}
+
+/* Try to flush an outqueue.
+ *
+ * Description: Send everything in q which we legally can, subject to
+ * congestion limitations.
+ * * Note: This function can be called from multiple contexts so appropriate
+ * locking concerns must be made. Today we use the sock lock to protect
+ * this function.
+ */
+
+static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
+{
+ struct sctp_flush_ctx ctx = {
+ .q = q,
+ .transport = NULL,
+ .transport_list = LIST_HEAD_INIT(ctx.transport_list),
+ .asoc = q->asoc,
+ .packet = NULL,
+ .gfp = gfp,
+ };
+
+ /* 6.10 Bundling
+ * ...
+ * When bundling control chunks with DATA chunks, an
+ * endpoint MUST place control chunks first in the outbound
+ * SCTP packet. The transmitter MUST transmit DATA chunks
+ * within a SCTP packet in increasing order of TSN.
+ * ...
+ */
+
+ sctp_outq_flush_ctrl(&ctx);
+
+ if (q->asoc->src_out_of_asoc_ok)
+ goto sctp_flush_out;
+
+ sctp_outq_flush_data(&ctx, rtx_timeout);
+
+sctp_flush_out:
+
+ sctp_outq_flush_transports(&ctx);
+}
+
+/* Update unack_data based on the incoming SACK chunk */
+static void sctp_sack_update_unack_data(struct sctp_association *assoc,
+ struct sctp_sackhdr *sack)
+{
+ union sctp_sack_variable *frags;
+ __u16 unack_data;
+ int i;
+
+ unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
+
+ frags = sack->variable;
+ for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
+ unack_data -= ((ntohs(frags[i].gab.end) -
+ ntohs(frags[i].gab.start) + 1));
+ }
+
+ assoc->unack_data = unack_data;
+}
+
+/* This is where we REALLY process a SACK.
+ *
+ * Process the SACK against the outqueue. Mostly, this just frees
+ * things off the transmitted queue.
+ */
+int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
+{
+ struct sctp_association *asoc = q->asoc;
+ struct sctp_sackhdr *sack = chunk->subh.sack_hdr;
+ struct sctp_transport *transport;
+ struct sctp_chunk *tchunk = NULL;
+ struct list_head *lchunk, *transport_list, *temp;
+ union sctp_sack_variable *frags = sack->variable;
+ __u32 sack_ctsn, ctsn, tsn;
+ __u32 highest_tsn, highest_new_tsn;
+ __u32 sack_a_rwnd;
+ unsigned int outstanding;
+ struct sctp_transport *primary = asoc->peer.primary_path;
+ int count_of_newacks = 0;
+ int gap_ack_blocks;
+ u8 accum_moved = 0;
+
+ /* Grab the association's destination address list. */
+ transport_list = &asoc->peer.transport_addr_list;
+
+ /* SCTP path tracepoint for congestion control debugging. */
+ if (trace_sctp_probe_path_enabled()) {
+ list_for_each_entry(transport, transport_list, transports)
+ trace_sctp_probe_path(transport, asoc);
+ }
+
+ sack_ctsn = ntohl(sack->cum_tsn_ack);
+ gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
+ asoc->stats.gapcnt += gap_ack_blocks;
+ /*
+ * SFR-CACC algorithm:
+ * On receipt of a SACK the sender SHOULD execute the
+ * following statements.
+ *
+ * 1) If the cumulative ack in the SACK passes next tsn_at_change
+ * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
+ * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
+ * all destinations.
+ * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
+ * is set the receiver of the SACK MUST take the following actions:
+ *
+ * A) Initialize the cacc_saw_newack to 0 for all destination
+ * addresses.
+ *
+ * Only bother if changeover_active is set. Otherwise, this is
+ * totally suboptimal to do on every SACK.
+ */
+ if (primary->cacc.changeover_active) {
+ u8 clear_cycling = 0;
+
+ if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
+ primary->cacc.changeover_active = 0;
+ clear_cycling = 1;
+ }
+
+ if (clear_cycling || gap_ack_blocks) {
+ list_for_each_entry(transport, transport_list,
+ transports) {
+ if (clear_cycling)
+ transport->cacc.cycling_changeover = 0;
+ if (gap_ack_blocks)
+ transport->cacc.cacc_saw_newack = 0;
+ }
+ }
+ }
+
+ /* Get the highest TSN in the sack. */
+ highest_tsn = sack_ctsn;
+ if (gap_ack_blocks)
+ highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
+
+ if (TSN_lt(asoc->highest_sacked, highest_tsn))
+ asoc->highest_sacked = highest_tsn;
+
+ highest_new_tsn = sack_ctsn;
+
+ /* Run through the retransmit queue. Credit bytes received
+ * and free those chunks that we can.
+ */
+ sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
+
+ /* Run through the transmitted queue.
+ * Credit bytes received and free those chunks which we can.
+ *
+ * This is a MASSIVE candidate for optimization.
+ */
+ list_for_each_entry(transport, transport_list, transports) {
+ sctp_check_transmitted(q, &transport->transmitted,
+ transport, &chunk->source, sack,
+ &highest_new_tsn);
+ /*
+ * SFR-CACC algorithm:
+ * C) Let count_of_newacks be the number of
+ * destinations for which cacc_saw_newack is set.
+ */
+ if (transport->cacc.cacc_saw_newack)
+ count_of_newacks++;
+ }
+
+ /* Move the Cumulative TSN Ack Point if appropriate. */
+ if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
+ asoc->ctsn_ack_point = sack_ctsn;
+ accum_moved = 1;
+ }
+
+ if (gap_ack_blocks) {
+
+ if (asoc->fast_recovery && accum_moved)
+ highest_new_tsn = highest_tsn;
+
+ list_for_each_entry(transport, transport_list, transports)
+ sctp_mark_missing(q, &transport->transmitted, transport,
+ highest_new_tsn, count_of_newacks);
+ }
+
+ /* Update unack_data field in the assoc. */
+ sctp_sack_update_unack_data(asoc, sack);
+
+ ctsn = asoc->ctsn_ack_point;
+
+ /* Throw away stuff rotting on the sack queue. */
+ list_for_each_safe(lchunk, temp, &q->sacked) {
+ tchunk = list_entry(lchunk, struct sctp_chunk,
+ transmitted_list);
+ tsn = ntohl(tchunk->subh.data_hdr->tsn);
+ if (TSN_lte(tsn, ctsn)) {
+ list_del_init(&tchunk->transmitted_list);
+ if (asoc->peer.prsctp_capable &&
+ SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
+ asoc->sent_cnt_removable--;
+ sctp_chunk_free(tchunk);
+ }
+ }
+
+ /* ii) Set rwnd equal to the newly received a_rwnd minus the
+ * number of bytes still outstanding after processing the
+ * Cumulative TSN Ack and the Gap Ack Blocks.
+ */
+
+ sack_a_rwnd = ntohl(sack->a_rwnd);
+ asoc->peer.zero_window_announced = !sack_a_rwnd;
+ outstanding = q->outstanding_bytes;
+
+ if (outstanding < sack_a_rwnd)
+ sack_a_rwnd -= outstanding;
+ else
+ sack_a_rwnd = 0;
+
+ asoc->peer.rwnd = sack_a_rwnd;
+
+ asoc->stream.si->generate_ftsn(q, sack_ctsn);
+
+ pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
+ pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
+ "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
+ asoc->adv_peer_ack_point);
+
+ return sctp_outq_is_empty(q);
+}
+
+/* Is the outqueue empty?
+ * The queue is empty when we have not pending data, no in-flight data
+ * and nothing pending retransmissions.
+ */
+int sctp_outq_is_empty(const struct sctp_outq *q)
+{
+ return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
+ list_empty(&q->retransmit);
+}
+
+/********************************************************************
+ * 2nd Level Abstractions
+ ********************************************************************/
+
+/* Go through a transport's transmitted list or the association's retransmit
+ * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
+ * The retransmit list will not have an associated transport.
+ *
+ * I added coherent debug information output. --xguo
+ *
+ * Instead of printing 'sacked' or 'kept' for each TSN on the
+ * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
+ * KEPT TSN6-TSN7, etc.
+ */
+static void sctp_check_transmitted(struct sctp_outq *q,
+ struct list_head *transmitted_queue,
+ struct sctp_transport *transport,
+ union sctp_addr *saddr,
+ struct sctp_sackhdr *sack,
+ __u32 *highest_new_tsn_in_sack)
+{
+ struct list_head *lchunk;
+ struct sctp_chunk *tchunk;
+ struct list_head tlist;
+ __u32 tsn;
+ __u32 sack_ctsn;
+ __u32 rtt;
+ __u8 restart_timer = 0;
+ int bytes_acked = 0;
+ int migrate_bytes = 0;
+ bool forward_progress = false;
+
+ sack_ctsn = ntohl(sack->cum_tsn_ack);
+
+ INIT_LIST_HEAD(&tlist);
+
+ /* The while loop will skip empty transmitted queues. */
+ while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
+ tchunk = list_entry(lchunk, struct sctp_chunk,
+ transmitted_list);
+
+ if (sctp_chunk_abandoned(tchunk)) {
+ /* Move the chunk to abandoned list. */
+ sctp_insert_list(&q->abandoned, lchunk);
+
+ /* If this chunk has not been acked, stop
+ * considering it as 'outstanding'.
+ */
+ if (transmitted_queue != &q->retransmit &&
+ !tchunk->tsn_gap_acked) {
+ if (tchunk->transport)
+ tchunk->transport->flight_size -=
+ sctp_data_size(tchunk);
+ q->outstanding_bytes -= sctp_data_size(tchunk);
+ }
+ continue;
+ }
+
+ tsn = ntohl(tchunk->subh.data_hdr->tsn);
+ if (sctp_acked(sack, tsn)) {
+ /* If this queue is the retransmit queue, the
+ * retransmit timer has already reclaimed
+ * the outstanding bytes for this chunk, so only
+ * count bytes associated with a transport.
+ */
+ if (transport && !tchunk->tsn_gap_acked) {
+ /* If this chunk is being used for RTT
+ * measurement, calculate the RTT and update
+ * the RTO using this value.
+ *
+ * 6.3.1 C5) Karn's algorithm: RTT measurements
+ * MUST NOT be made using packets that were
+ * retransmitted (and thus for which it is
+ * ambiguous whether the reply was for the
+ * first instance of the packet or a later
+ * instance).
+ */
+ if (!sctp_chunk_retransmitted(tchunk) &&
+ tchunk->rtt_in_progress) {
+ tchunk->rtt_in_progress = 0;
+ rtt = jiffies - tchunk->sent_at;
+ sctp_transport_update_rto(transport,
+ rtt);
+ }
+
+ if (TSN_lte(tsn, sack_ctsn)) {
+ /*
+ * SFR-CACC algorithm:
+ * 2) If the SACK contains gap acks
+ * and the flag CHANGEOVER_ACTIVE is
+ * set the receiver of the SACK MUST
+ * take the following action:
+ *
+ * B) For each TSN t being acked that
+ * has not been acked in any SACK so
+ * far, set cacc_saw_newack to 1 for
+ * the destination that the TSN was
+ * sent to.
+ */
+ if (sack->num_gap_ack_blocks &&
+ q->asoc->peer.primary_path->cacc.
+ changeover_active)
+ transport->cacc.cacc_saw_newack
+ = 1;
+ }
+ }
+
+ /* If the chunk hasn't been marked as ACKED,
+ * mark it and account bytes_acked if the
+ * chunk had a valid transport (it will not
+ * have a transport if ASCONF had deleted it
+ * while DATA was outstanding).
+ */
+ if (!tchunk->tsn_gap_acked) {
+ tchunk->tsn_gap_acked = 1;
+ if (TSN_lt(*highest_new_tsn_in_sack, tsn))
+ *highest_new_tsn_in_sack = tsn;
+ bytes_acked += sctp_data_size(tchunk);
+ if (!tchunk->transport)
+ migrate_bytes += sctp_data_size(tchunk);
+ forward_progress = true;
+ }
+
+ if (TSN_lte(tsn, sack_ctsn)) {
+ /* RFC 2960 6.3.2 Retransmission Timer Rules
+ *
+ * R3) Whenever a SACK is received
+ * that acknowledges the DATA chunk
+ * with the earliest outstanding TSN
+ * for that address, restart T3-rtx
+ * timer for that address with its
+ * current RTO.
+ */
+ restart_timer = 1;
+ forward_progress = true;
+
+ list_add_tail(&tchunk->transmitted_list,
+ &q->sacked);
+ } else {
+ /* RFC2960 7.2.4, sctpimpguide-05 2.8.2
+ * M2) Each time a SACK arrives reporting
+ * 'Stray DATA chunk(s)' record the highest TSN
+ * reported as newly acknowledged, call this
+ * value 'HighestTSNinSack'. A newly
+ * acknowledged DATA chunk is one not
+ * previously acknowledged in a SACK.
+ *
+ * When the SCTP sender of data receives a SACK
+ * chunk that acknowledges, for the first time,
+ * the receipt of a DATA chunk, all the still
+ * unacknowledged DATA chunks whose TSN is
+ * older than that newly acknowledged DATA
+ * chunk, are qualified as 'Stray DATA chunks'.
+ */
+ list_add_tail(lchunk, &tlist);
+ }
+ } else {
+ if (tchunk->tsn_gap_acked) {
+ pr_debug("%s: receiver reneged on data TSN:0x%x\n",
+ __func__, tsn);
+
+ tchunk->tsn_gap_acked = 0;
+
+ if (tchunk->transport)
+ bytes_acked -= sctp_data_size(tchunk);
+
+ /* RFC 2960 6.3.2 Retransmission Timer Rules
+ *
+ * R4) Whenever a SACK is received missing a
+ * TSN that was previously acknowledged via a
+ * Gap Ack Block, start T3-rtx for the
+ * destination address to which the DATA
+ * chunk was originally
+ * transmitted if it is not already running.
+ */
+ restart_timer = 1;
+ }
+
+ list_add_tail(lchunk, &tlist);
+ }
+ }
+
+ if (transport) {
+ if (bytes_acked) {
+ struct sctp_association *asoc = transport->asoc;
+
+ /* We may have counted DATA that was migrated
+ * to this transport due to DEL-IP operation.
+ * Subtract those bytes, since the were never
+ * send on this transport and shouldn't be
+ * credited to this transport.
+ */
+ bytes_acked -= migrate_bytes;
+
+ /* 8.2. When an outstanding TSN is acknowledged,
+ * the endpoint shall clear the error counter of
+ * the destination transport address to which the
+ * DATA chunk was last sent.
+ * The association's overall error counter is
+ * also cleared.
+ */
+ transport->error_count = 0;
+ transport->asoc->overall_error_count = 0;
+ forward_progress = true;
+
+ /*
+ * While in SHUTDOWN PENDING, we may have started
+ * the T5 shutdown guard timer after reaching the
+ * retransmission limit. Stop that timer as soon
+ * as the receiver acknowledged any data.
+ */
+ if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
+ del_timer(&asoc->timers
+ [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
+ sctp_association_put(asoc);
+
+ /* Mark the destination transport address as
+ * active if it is not so marked.
+ */
+ if ((transport->state == SCTP_INACTIVE ||
+ transport->state == SCTP_UNCONFIRMED) &&
+ sctp_cmp_addr_exact(&transport->ipaddr, saddr)) {
+ sctp_assoc_control_transport(
+ transport->asoc,
+ transport,
+ SCTP_TRANSPORT_UP,
+ SCTP_RECEIVED_SACK);
+ }
+
+ sctp_transport_raise_cwnd(transport, sack_ctsn,
+ bytes_acked);
+
+ transport->flight_size -= bytes_acked;
+ if (transport->flight_size == 0)
+ transport->partial_bytes_acked = 0;
+ q->outstanding_bytes -= bytes_acked + migrate_bytes;
+ } else {
+ /* RFC 2960 6.1, sctpimpguide-06 2.15.2
+ * When a sender is doing zero window probing, it
+ * should not timeout the association if it continues
+ * to receive new packets from the receiver. The
+ * reason is that the receiver MAY keep its window
+ * closed for an indefinite time.
+ * A sender is doing zero window probing when the
+ * receiver's advertised window is zero, and there is
+ * only one data chunk in flight to the receiver.
+ *
+ * Allow the association to timeout while in SHUTDOWN
+ * PENDING or SHUTDOWN RECEIVED in case the receiver
+ * stays in zero window mode forever.
+ */
+ if (!q->asoc->peer.rwnd &&
+ !list_empty(&tlist) &&
+ (sack_ctsn+2 == q->asoc->next_tsn) &&
+ q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
+ pr_debug("%s: sack received for zero window "
+ "probe:%u\n", __func__, sack_ctsn);
+
+ q->asoc->overall_error_count = 0;
+ transport->error_count = 0;
+ }
+ }
+
+ /* RFC 2960 6.3.2 Retransmission Timer Rules
+ *
+ * R2) Whenever all outstanding data sent to an address have
+ * been acknowledged, turn off the T3-rtx timer of that
+ * address.
+ */
+ if (!transport->flight_size) {
+ if (del_timer(&transport->T3_rtx_timer))
+ sctp_transport_put(transport);
+ } else if (restart_timer) {
+ if (!mod_timer(&transport->T3_rtx_timer,
+ jiffies + transport->rto))
+ sctp_transport_hold(transport);
+ }
+
+ if (forward_progress) {
+ if (transport->dst)
+ sctp_transport_dst_confirm(transport);
+ }
+ }
+
+ list_splice(&tlist, transmitted_queue);
+}
+
+/* Mark chunks as missing and consequently may get retransmitted. */
+static void sctp_mark_missing(struct sctp_outq *q,
+ struct list_head *transmitted_queue,
+ struct sctp_transport *transport,
+ __u32 highest_new_tsn_in_sack,
+ int count_of_newacks)
+{
+ struct sctp_chunk *chunk;
+ __u32 tsn;
+ char do_fast_retransmit = 0;
+ struct sctp_association *asoc = q->asoc;
+ struct sctp_transport *primary = asoc->peer.primary_path;
+
+ list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
+
+ tsn = ntohl(chunk->subh.data_hdr->tsn);
+
+ /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
+ * 'Unacknowledged TSN's', if the TSN number of an
+ * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
+ * value, increment the 'TSN.Missing.Report' count on that
+ * chunk if it has NOT been fast retransmitted or marked for
+ * fast retransmit already.
+ */
+ if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
+ !chunk->tsn_gap_acked &&
+ TSN_lt(tsn, highest_new_tsn_in_sack)) {
+
+ /* SFR-CACC may require us to skip marking
+ * this chunk as missing.
+ */
+ if (!transport || !sctp_cacc_skip(primary,
+ chunk->transport,
+ count_of_newacks, tsn)) {
+ chunk->tsn_missing_report++;
+
+ pr_debug("%s: tsn:0x%x missing counter:%d\n",
+ __func__, tsn, chunk->tsn_missing_report);
+ }
+ }
+ /*
+ * M4) If any DATA chunk is found to have a
+ * 'TSN.Missing.Report'
+ * value larger than or equal to 3, mark that chunk for
+ * retransmission and start the fast retransmit procedure.
+ */
+
+ if (chunk->tsn_missing_report >= 3) {
+ chunk->fast_retransmit = SCTP_NEED_FRTX;
+ do_fast_retransmit = 1;
+ }
+ }
+
+ if (transport) {
+ if (do_fast_retransmit)
+ sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
+
+ pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
+ "flight_size:%d, pba:%d\n", __func__, transport,
+ transport->cwnd, transport->ssthresh,
+ transport->flight_size, transport->partial_bytes_acked);
+ }
+}
+
+/* Is the given TSN acked by this packet? */
+static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
+{
+ __u32 ctsn = ntohl(sack->cum_tsn_ack);
+ union sctp_sack_variable *frags;
+ __u16 tsn_offset, blocks;
+ int i;
+
+ if (TSN_lte(tsn, ctsn))
+ goto pass;
+
+ /* 3.3.4 Selective Acknowledgment (SACK) (3):
+ *
+ * Gap Ack Blocks:
+ * These fields contain the Gap Ack Blocks. They are repeated
+ * for each Gap Ack Block up to the number of Gap Ack Blocks
+ * defined in the Number of Gap Ack Blocks field. All DATA
+ * chunks with TSNs greater than or equal to (Cumulative TSN
+ * Ack + Gap Ack Block Start) and less than or equal to
+ * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
+ * Block are assumed to have been received correctly.
+ */
+
+ frags = sack->variable;
+ blocks = ntohs(sack->num_gap_ack_blocks);
+ tsn_offset = tsn - ctsn;
+ for (i = 0; i < blocks; ++i) {
+ if (tsn_offset >= ntohs(frags[i].gab.start) &&
+ tsn_offset <= ntohs(frags[i].gab.end))
+ goto pass;
+ }
+
+ return 0;
+pass:
+ return 1;
+}
+
+static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
+ int nskips, __be16 stream)
+{
+ int i;
+
+ for (i = 0; i < nskips; i++) {
+ if (skiplist[i].stream == stream)
+ return i;
+ }
+ return i;
+}
+
+/* Create and add a fwdtsn chunk to the outq's control queue if needed. */
+void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
+{
+ struct sctp_association *asoc = q->asoc;
+ struct sctp_chunk *ftsn_chunk = NULL;
+ struct sctp_fwdtsn_skip ftsn_skip_arr[10];
+ int nskips = 0;
+ int skip_pos = 0;
+ __u32 tsn;
+ struct sctp_chunk *chunk;
+ struct list_head *lchunk, *temp;
+
+ if (!asoc->peer.prsctp_capable)
+ return;
+
+ /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
+ * received SACK.
+ *
+ * If (Advanced.Peer.Ack.Point < SackCumAck), then update
+ * Advanced.Peer.Ack.Point to be equal to SackCumAck.
+ */
+ if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
+ asoc->adv_peer_ack_point = ctsn;
+
+ /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
+ * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
+ * the chunk next in the out-queue space is marked as "abandoned" as
+ * shown in the following example:
+ *
+ * Assuming that a SACK arrived with the Cumulative TSN ACK 102
+ * and the Advanced.Peer.Ack.Point is updated to this value:
+ *
+ * out-queue at the end of ==> out-queue after Adv.Ack.Point
+ * normal SACK processing local advancement
+ * ... ...
+ * Adv.Ack.Pt-> 102 acked 102 acked
+ * 103 abandoned 103 abandoned
+ * 104 abandoned Adv.Ack.P-> 104 abandoned
+ * 105 105
+ * 106 acked 106 acked
+ * ... ...
+ *
+ * In this example, the data sender successfully advanced the
+ * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
+ */
+ list_for_each_safe(lchunk, temp, &q->abandoned) {
+ chunk = list_entry(lchunk, struct sctp_chunk,
+ transmitted_list);
+ tsn = ntohl(chunk->subh.data_hdr->tsn);
+
+ /* Remove any chunks in the abandoned queue that are acked by
+ * the ctsn.
+ */
+ if (TSN_lte(tsn, ctsn)) {
+ list_del_init(lchunk);
+ sctp_chunk_free(chunk);
+ } else {
+ if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
+ asoc->adv_peer_ack_point = tsn;
+ if (chunk->chunk_hdr->flags &
+ SCTP_DATA_UNORDERED)
+ continue;
+ skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
+ nskips,
+ chunk->subh.data_hdr->stream);
+ ftsn_skip_arr[skip_pos].stream =
+ chunk->subh.data_hdr->stream;
+ ftsn_skip_arr[skip_pos].ssn =
+ chunk->subh.data_hdr->ssn;
+ if (skip_pos == nskips)
+ nskips++;
+ if (nskips == 10)
+ break;
+ } else
+ break;
+ }
+ }
+
+ /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
+ * is greater than the Cumulative TSN ACK carried in the received
+ * SACK, the data sender MUST send the data receiver a FORWARD TSN
+ * chunk containing the latest value of the
+ * "Advanced.Peer.Ack.Point".
+ *
+ * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
+ * list each stream and sequence number in the forwarded TSN. This
+ * information will enable the receiver to easily find any
+ * stranded TSN's waiting on stream reorder queues. Each stream
+ * SHOULD only be reported once; this means that if multiple
+ * abandoned messages occur in the same stream then only the
+ * highest abandoned stream sequence number is reported. If the
+ * total size of the FORWARD TSN does NOT fit in a single MTU then
+ * the sender of the FORWARD TSN SHOULD lower the
+ * Advanced.Peer.Ack.Point to the last TSN that will fit in a
+ * single MTU.
+ */
+ if (asoc->adv_peer_ack_point > ctsn)
+ ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
+ nskips, &ftsn_skip_arr[0]);
+
+ if (ftsn_chunk) {
+ list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
+ SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS);
+ }
+}
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
new file mode 100644
index 000000000..782d673c3
--- /dev/null
+++ b/net/sctp/primitive.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions implement the SCTP primitive functions from Section 10.
+ *
+ * Note that the descriptions from the specification are USER level
+ * functions--this file is the functions which populate the struct proto
+ * for SCTP which is the BOTTOM of the sockets interface.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Narasimha Budihal <narasimha@refcode.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Kevin Gao <kevin.gao@intel.com>
+ */
+
+#include <linux/types.h>
+#include <linux/list.h> /* For struct list_head */
+#include <linux/socket.h>
+#include <linux/ip.h>
+#include <linux/time.h> /* For struct timeval */
+#include <linux/gfp.h>
+#include <net/sock.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+#define DECLARE_PRIMITIVE(name) \
+/* This is called in the code as sctp_primitive_ ## name. */ \
+int sctp_primitive_ ## name(struct net *net, struct sctp_association *asoc, \
+ void *arg) { \
+ int error = 0; \
+ enum sctp_event_type event_type; union sctp_subtype subtype; \
+ enum sctp_state state; \
+ struct sctp_endpoint *ep; \
+ \
+ event_type = SCTP_EVENT_T_PRIMITIVE; \
+ subtype = SCTP_ST_PRIMITIVE(SCTP_PRIMITIVE_ ## name); \
+ state = asoc ? asoc->state : SCTP_STATE_CLOSED; \
+ ep = asoc ? asoc->ep : NULL; \
+ \
+ error = sctp_do_sm(net, event_type, subtype, state, ep, asoc, \
+ arg, GFP_KERNEL); \
+ return error; \
+}
+
+/* 10.1 ULP-to-SCTP
+ * B) Associate
+ *
+ * Format: ASSOCIATE(local SCTP instance name, destination transport addr,
+ * outbound stream count)
+ * -> association id [,destination transport addr list] [,outbound stream
+ * count]
+ *
+ * This primitive allows the upper layer to initiate an association to a
+ * specific peer endpoint.
+ *
+ * This version assumes that asoc is fully populated with the initial
+ * parameters. We then return a traditional kernel indicator of
+ * success or failure.
+ */
+
+/* This is called in the code as sctp_primitive_ASSOCIATE. */
+
+DECLARE_PRIMITIVE(ASSOCIATE)
+
+/* 10.1 ULP-to-SCTP
+ * C) Shutdown
+ *
+ * Format: SHUTDOWN(association id)
+ * -> result
+ *
+ * Gracefully closes an association. Any locally queued user data
+ * will be delivered to the peer. The association will be terminated only
+ * after the peer acknowledges all the SCTP packets sent. A success code
+ * will be returned on successful termination of the association. If
+ * attempting to terminate the association results in a failure, an error
+ * code shall be returned.
+ */
+
+DECLARE_PRIMITIVE(SHUTDOWN);
+
+/* 10.1 ULP-to-SCTP
+ * C) Abort
+ *
+ * Format: Abort(association id [, cause code])
+ * -> result
+ *
+ * Ungracefully closes an association. Any locally queued user data
+ * will be discarded and an ABORT chunk is sent to the peer. A success
+ * code will be returned on successful abortion of the association. If
+ * attempting to abort the association results in a failure, an error
+ * code shall be returned.
+ */
+
+DECLARE_PRIMITIVE(ABORT);
+
+/* 10.1 ULP-to-SCTP
+ * E) Send
+ *
+ * Format: SEND(association id, buffer address, byte count [,context]
+ * [,stream id] [,life time] [,destination transport address]
+ * [,unorder flag] [,no-bundle flag] [,payload protocol-id] )
+ * -> result
+ *
+ * This is the main method to send user data via SCTP.
+ *
+ * Mandatory attributes:
+ *
+ * o association id - local handle to the SCTP association
+ *
+ * o buffer address - the location where the user message to be
+ * transmitted is stored;
+ *
+ * o byte count - The size of the user data in number of bytes;
+ *
+ * Optional attributes:
+ *
+ * o context - an optional 32 bit integer that will be carried in the
+ * sending failure notification to the ULP if the transportation of
+ * this User Message fails.
+ *
+ * o stream id - to indicate which stream to send the data on. If not
+ * specified, stream 0 will be used.
+ *
+ * o life time - specifies the life time of the user data. The user data
+ * will not be sent by SCTP after the life time expires. This
+ * parameter can be used to avoid efforts to transmit stale
+ * user messages. SCTP notifies the ULP if the data cannot be
+ * initiated to transport (i.e. sent to the destination via SCTP's
+ * send primitive) within the life time variable. However, the
+ * user data will be transmitted if SCTP has attempted to transmit a
+ * chunk before the life time expired.
+ *
+ * o destination transport address - specified as one of the destination
+ * transport addresses of the peer endpoint to which this packet
+ * should be sent. Whenever possible, SCTP should use this destination
+ * transport address for sending the packets, instead of the current
+ * primary path.
+ *
+ * o unorder flag - this flag, if present, indicates that the user
+ * would like the data delivered in an unordered fashion to the peer
+ * (i.e., the U flag is set to 1 on all DATA chunks carrying this
+ * message).
+ *
+ * o no-bundle flag - instructs SCTP not to bundle this user data with
+ * other outbound DATA chunks. SCTP MAY still bundle even when
+ * this flag is present, when faced with network congestion.
+ *
+ * o payload protocol-id - A 32 bit unsigned integer that is to be
+ * passed to the peer indicating the type of payload protocol data
+ * being transmitted. This value is passed as opaque data by SCTP.
+ */
+
+DECLARE_PRIMITIVE(SEND);
+
+/* 10.1 ULP-to-SCTP
+ * J) Request Heartbeat
+ *
+ * Format: REQUESTHEARTBEAT(association id, destination transport address)
+ *
+ * -> result
+ *
+ * Instructs the local endpoint to perform a HeartBeat on the specified
+ * destination transport address of the given association. The returned
+ * result should indicate whether the transmission of the HEARTBEAT
+ * chunk to the destination address is successful.
+ *
+ * Mandatory attributes:
+ *
+ * o association id - local handle to the SCTP association
+ *
+ * o destination transport address - the transport address of the
+ * association on which a heartbeat should be issued.
+ */
+
+DECLARE_PRIMITIVE(REQUESTHEARTBEAT);
+
+/* ADDIP
+* 3.1.1 Address Configuration Change Chunk (ASCONF)
+*
+* This chunk is used to communicate to the remote endpoint one of the
+* configuration change requests that MUST be acknowledged. The
+* information carried in the ASCONF Chunk uses the form of a
+* Type-Length-Value (TLV), as described in "3.2.1 Optional/
+* Variable-length Parameter Format" in RFC2960 [5], forall variable
+* parameters.
+*/
+
+DECLARE_PRIMITIVE(ASCONF);
+
+/* RE-CONFIG 5.1 */
+DECLARE_PRIMITIVE(RECONF);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
new file mode 100644
index 000000000..ec00ee75d
--- /dev/null
+++ b/net/sctp/proc.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * Copyright (c) 2003 International Business Machines, Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <net/sctp/sctp.h>
+#include <net/ip.h> /* for snmp_fold_field */
+
+static const struct snmp_mib sctp_snmp_list[] = {
+ SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB),
+ SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS),
+ SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS),
+ SNMP_MIB_ITEM("SctpAborteds", SCTP_MIB_ABORTEDS),
+ SNMP_MIB_ITEM("SctpShutdowns", SCTP_MIB_SHUTDOWNS),
+ SNMP_MIB_ITEM("SctpOutOfBlues", SCTP_MIB_OUTOFBLUES),
+ SNMP_MIB_ITEM("SctpChecksumErrors", SCTP_MIB_CHECKSUMERRORS),
+ SNMP_MIB_ITEM("SctpOutCtrlChunks", SCTP_MIB_OUTCTRLCHUNKS),
+ SNMP_MIB_ITEM("SctpOutOrderChunks", SCTP_MIB_OUTORDERCHUNKS),
+ SNMP_MIB_ITEM("SctpOutUnorderChunks", SCTP_MIB_OUTUNORDERCHUNKS),
+ SNMP_MIB_ITEM("SctpInCtrlChunks", SCTP_MIB_INCTRLCHUNKS),
+ SNMP_MIB_ITEM("SctpInOrderChunks", SCTP_MIB_INORDERCHUNKS),
+ SNMP_MIB_ITEM("SctpInUnorderChunks", SCTP_MIB_INUNORDERCHUNKS),
+ SNMP_MIB_ITEM("SctpFragUsrMsgs", SCTP_MIB_FRAGUSRMSGS),
+ SNMP_MIB_ITEM("SctpReasmUsrMsgs", SCTP_MIB_REASMUSRMSGS),
+ SNMP_MIB_ITEM("SctpOutSCTPPacks", SCTP_MIB_OUTSCTPPACKS),
+ SNMP_MIB_ITEM("SctpInSCTPPacks", SCTP_MIB_INSCTPPACKS),
+ SNMP_MIB_ITEM("SctpT1InitExpireds", SCTP_MIB_T1_INIT_EXPIREDS),
+ SNMP_MIB_ITEM("SctpT1CookieExpireds", SCTP_MIB_T1_COOKIE_EXPIREDS),
+ SNMP_MIB_ITEM("SctpT2ShutdownExpireds", SCTP_MIB_T2_SHUTDOWN_EXPIREDS),
+ SNMP_MIB_ITEM("SctpT3RtxExpireds", SCTP_MIB_T3_RTX_EXPIREDS),
+ SNMP_MIB_ITEM("SctpT4RtoExpireds", SCTP_MIB_T4_RTO_EXPIREDS),
+ SNMP_MIB_ITEM("SctpT5ShutdownGuardExpireds", SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS),
+ SNMP_MIB_ITEM("SctpDelaySackExpireds", SCTP_MIB_DELAY_SACK_EXPIREDS),
+ SNMP_MIB_ITEM("SctpAutocloseExpireds", SCTP_MIB_AUTOCLOSE_EXPIREDS),
+ SNMP_MIB_ITEM("SctpT3Retransmits", SCTP_MIB_T3_RETRANSMITS),
+ SNMP_MIB_ITEM("SctpPmtudRetransmits", SCTP_MIB_PMTUD_RETRANSMITS),
+ SNMP_MIB_ITEM("SctpFastRetransmits", SCTP_MIB_FAST_RETRANSMITS),
+ SNMP_MIB_ITEM("SctpInPktSoftirq", SCTP_MIB_IN_PKT_SOFTIRQ),
+ SNMP_MIB_ITEM("SctpInPktBacklog", SCTP_MIB_IN_PKT_BACKLOG),
+ SNMP_MIB_ITEM("SctpInPktDiscards", SCTP_MIB_IN_PKT_DISCARDS),
+ SNMP_MIB_ITEM("SctpInDataChunkDiscards", SCTP_MIB_IN_DATA_CHUNK_DISCARDS),
+ SNMP_MIB_SENTINEL
+};
+
+/* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */
+static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
+{
+ unsigned long buff[SCTP_MIB_MAX];
+ struct net *net = seq->private;
+ int i;
+
+ memset(buff, 0, sizeof(unsigned long) * SCTP_MIB_MAX);
+
+ snmp_get_cpu_field_batch(buff, sctp_snmp_list,
+ net->sctp.sctp_statistics);
+ for (i = 0; sctp_snmp_list[i].name; i++)
+ seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
+ buff[i]);
+
+ return 0;
+}
+
+/* Dump local addresses of an association/endpoint. */
+static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_common *epb)
+{
+ struct sctp_association *asoc;
+ struct sctp_sockaddr_entry *laddr;
+ struct sctp_transport *peer;
+ union sctp_addr *addr, *primary = NULL;
+ struct sctp_af *af;
+
+ if (epb->type == SCTP_EP_TYPE_ASSOCIATION) {
+ asoc = sctp_assoc(epb);
+
+ peer = asoc->peer.primary_path;
+ if (unlikely(peer == NULL)) {
+ WARN(1, "Association %p with NULL primary path!\n", asoc);
+ return;
+ }
+
+ primary = &peer->saddr;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(laddr, &epb->bind_addr.address_list, list) {
+ if (!laddr->valid)
+ continue;
+
+ addr = &laddr->a;
+ af = sctp_get_af_specific(addr->sa.sa_family);
+ if (primary && af->cmp_addr(addr, primary)) {
+ seq_printf(seq, "*");
+ }
+ af->seq_dump_addr(seq, addr);
+ }
+ rcu_read_unlock();
+}
+
+/* Dump remote addresses of an association. */
+static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_association *assoc)
+{
+ struct sctp_transport *transport;
+ union sctp_addr *addr, *primary;
+ struct sctp_af *af;
+
+ primary = &assoc->peer.primary_addr;
+ list_for_each_entry_rcu(transport, &assoc->peer.transport_addr_list,
+ transports) {
+ addr = &transport->ipaddr;
+
+ af = sctp_get_af_specific(addr->sa.sa_family);
+ if (af->cmp_addr(addr, primary)) {
+ seq_printf(seq, "*");
+ }
+ af->seq_dump_addr(seq, addr);
+ }
+}
+
+static void *sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ if (*pos >= sctp_ep_hashsize)
+ return NULL;
+
+ if (*pos < 0)
+ *pos = 0;
+
+ if (*pos == 0)
+ seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n");
+
+ return (void *)pos;
+}
+
+static void sctp_eps_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+
+static void *sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ if (++*pos >= sctp_ep_hashsize)
+ return NULL;
+
+ return pos;
+}
+
+
+/* Display sctp endpoints (/proc/net/sctp/eps). */
+static int sctp_eps_seq_show(struct seq_file *seq, void *v)
+{
+ struct sctp_hashbucket *head;
+ struct sctp_endpoint *ep;
+ struct sock *sk;
+ int hash = *(loff_t *)v;
+
+ if (hash >= sctp_ep_hashsize)
+ return -ENOMEM;
+
+ head = &sctp_ep_hashtable[hash];
+ read_lock_bh(&head->lock);
+ sctp_for_each_hentry(ep, &head->chain) {
+ sk = ep->base.sk;
+ if (!net_eq(sock_net(sk), seq_file_net(seq)))
+ continue;
+ seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk,
+ sctp_sk(sk)->type, sk->sk_state, hash,
+ ep->base.bind_addr.port,
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ sock_i_ino(sk));
+
+ sctp_seq_dump_local_addrs(seq, &ep->base);
+ seq_printf(seq, "\n");
+ }
+ read_unlock_bh(&head->lock);
+
+ return 0;
+}
+
+static const struct seq_operations sctp_eps_ops = {
+ .start = sctp_eps_seq_start,
+ .next = sctp_eps_seq_next,
+ .stop = sctp_eps_seq_stop,
+ .show = sctp_eps_seq_show,
+};
+
+struct sctp_ht_iter {
+ struct seq_net_private p;
+ struct rhashtable_iter hti;
+};
+
+static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct sctp_ht_iter *iter = seq->private;
+
+ sctp_transport_walk_start(&iter->hti);
+
+ return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
+}
+
+static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
+{
+ struct sctp_ht_iter *iter = seq->private;
+
+ if (v && v != SEQ_START_TOKEN) {
+ struct sctp_transport *transport = v;
+
+ sctp_transport_put(transport);
+ }
+
+ sctp_transport_walk_stop(&iter->hti);
+}
+
+static void *sctp_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct sctp_ht_iter *iter = seq->private;
+
+ if (v && v != SEQ_START_TOKEN) {
+ struct sctp_transport *transport = v;
+
+ sctp_transport_put(transport);
+ }
+
+ ++*pos;
+
+ return sctp_transport_get_next(seq_file_net(seq), &iter->hti);
+}
+
+/* Display sctp associations (/proc/net/sctp/assocs). */
+static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
+{
+ struct sctp_transport *transport;
+ struct sctp_association *assoc;
+ struct sctp_ep_common *epb;
+ struct sock *sk;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq, " ASSOC SOCK STY SST ST HBKT "
+ "ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT "
+ "RPORT LADDRS <-> RADDRS "
+ "HBINT INS OUTS MAXRT T1X T2X RTXC "
+ "wmema wmemq sndbuf rcvbuf\n");
+ return 0;
+ }
+
+ transport = (struct sctp_transport *)v;
+ assoc = transport->asoc;
+ epb = &assoc->base;
+ sk = epb->sk;
+
+ seq_printf(seq,
+ "%8pK %8pK %-3d %-3d %-2d %-4d "
+ "%4d %8d %8d %7u %5lu %-5d %5d ",
+ assoc, sk, sctp_sk(sk)->type, sk->sk_state,
+ assoc->state, 0,
+ assoc->assoc_id,
+ assoc->sndbuf_used,
+ atomic_read(&assoc->rmem_alloc),
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ sock_i_ino(sk),
+ epb->bind_addr.port,
+ assoc->peer.port);
+ seq_printf(seq, " ");
+ sctp_seq_dump_local_addrs(seq, epb);
+ seq_printf(seq, "<-> ");
+ sctp_seq_dump_remote_addrs(seq, assoc);
+ seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d "
+ "%8d %8d %8d %8d",
+ assoc->hbinterval, assoc->stream.incnt,
+ assoc->stream.outcnt, assoc->max_retrans,
+ assoc->init_retries, assoc->shutdown_retries,
+ assoc->rtx_data_chunks,
+ refcount_read(&sk->sk_wmem_alloc),
+ READ_ONCE(sk->sk_wmem_queued),
+ sk->sk_sndbuf,
+ sk->sk_rcvbuf);
+ seq_printf(seq, "\n");
+
+ return 0;
+}
+
+static const struct seq_operations sctp_assoc_ops = {
+ .start = sctp_transport_seq_start,
+ .next = sctp_transport_seq_next,
+ .stop = sctp_transport_seq_stop,
+ .show = sctp_assocs_seq_show,
+};
+
+static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
+{
+ struct sctp_association *assoc;
+ struct sctp_transport *transport, *tsp;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX "
+ "REM_ADDR_RTX START STATE\n");
+ return 0;
+ }
+
+ transport = (struct sctp_transport *)v;
+ assoc = transport->asoc;
+
+ list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
+ transports) {
+ /*
+ * The remote address (ADDR)
+ */
+ tsp->af_specific->seq_dump_addr(seq, &tsp->ipaddr);
+ seq_printf(seq, " ");
+ /*
+ * The association ID (ASSOC_ID)
+ */
+ seq_printf(seq, "%d ", tsp->asoc->assoc_id);
+
+ /*
+ * If the Heartbeat is active (HB_ACT)
+ * Note: 1 = Active, 0 = Inactive
+ */
+ seq_printf(seq, "%d ", timer_pending(&tsp->hb_timer));
+
+ /*
+ * Retransmit time out (RTO)
+ */
+ seq_printf(seq, "%lu ", tsp->rto);
+
+ /*
+ * Maximum path retransmit count (PATH_MAX_RTX)
+ */
+ seq_printf(seq, "%d ", tsp->pathmaxrxt);
+
+ /*
+ * remote address retransmit count (REM_ADDR_RTX)
+ * Note: We don't have a way to tally this at the moment
+ * so lets just leave it as zero for the moment
+ */
+ seq_puts(seq, "0 ");
+
+ /*
+ * remote address start time (START). This is also not
+ * currently implemented, but we can record it with a
+ * jiffies marker in a subsequent patch
+ */
+ seq_puts(seq, "0 ");
+
+ /*
+ * The current state of this destination. I.e.
+ * SCTP_ACTIVE, SCTP_INACTIVE, ...
+ */
+ seq_printf(seq, "%d", tsp->state);
+
+ seq_printf(seq, "\n");
+ }
+
+ return 0;
+}
+
+static const struct seq_operations sctp_remaddr_ops = {
+ .start = sctp_transport_seq_start,
+ .next = sctp_transport_seq_next,
+ .stop = sctp_transport_seq_stop,
+ .show = sctp_remaddr_seq_show,
+};
+
+/* Set up the proc fs entry for the SCTP protocol. */
+int __net_init sctp_proc_init(struct net *net)
+{
+ net->sctp.proc_net_sctp = proc_net_mkdir(net, "sctp", net->proc_net);
+ if (!net->sctp.proc_net_sctp)
+ return -ENOMEM;
+ if (!proc_create_net_single("snmp", 0444, net->sctp.proc_net_sctp,
+ sctp_snmp_seq_show, NULL))
+ goto cleanup;
+ if (!proc_create_net("eps", 0444, net->sctp.proc_net_sctp,
+ &sctp_eps_ops, sizeof(struct seq_net_private)))
+ goto cleanup;
+ if (!proc_create_net("assocs", 0444, net->sctp.proc_net_sctp,
+ &sctp_assoc_ops, sizeof(struct sctp_ht_iter)))
+ goto cleanup;
+ if (!proc_create_net("remaddr", 0444, net->sctp.proc_net_sctp,
+ &sctp_remaddr_ops, sizeof(struct sctp_ht_iter)))
+ goto cleanup;
+ return 0;
+
+cleanup:
+ remove_proc_subtree("sctp", net->proc_net);
+ net->sctp.proc_net_sctp = NULL;
+ return -ENOMEM;
+}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
new file mode 100644
index 000000000..bcd3384ab
--- /dev/null
+++ b/net/sctp/protocol.c
@@ -0,0 +1,1728 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * Initialization/cleanup for SCTP protocol support.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/seq_file.h>
+#include <linux/memblock.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <net/net_namespace.h>
+#include <net/protocol.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/route.h>
+#include <net/sctp/sctp.h>
+#include <net/addrconf.h>
+#include <net/inet_common.h>
+#include <net/inet_ecn.h>
+#include <net/udp_tunnel.h>
+
+#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
+
+/* Global data structures. */
+struct sctp_globals sctp_globals __read_mostly;
+
+struct idr sctp_assocs_id;
+DEFINE_SPINLOCK(sctp_assocs_id_lock);
+
+static struct sctp_pf *sctp_pf_inet6_specific;
+static struct sctp_pf *sctp_pf_inet_specific;
+static struct sctp_af *sctp_af_v4_specific;
+static struct sctp_af *sctp_af_v6_specific;
+
+struct kmem_cache *sctp_chunk_cachep __read_mostly;
+struct kmem_cache *sctp_bucket_cachep __read_mostly;
+
+long sysctl_sctp_mem[3];
+int sysctl_sctp_rmem[3];
+int sysctl_sctp_wmem[3];
+
+/* Private helper to extract ipv4 address and stash them in
+ * the protocol structure.
+ */
+static void sctp_v4_copy_addrlist(struct list_head *addrlist,
+ struct net_device *dev)
+{
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa;
+ struct sctp_sockaddr_entry *addr;
+
+ rcu_read_lock();
+ if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
+ rcu_read_unlock();
+ return;
+ }
+
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
+ /* Add the address to the local list. */
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
+ if (addr) {
+ addr->a.v4.sin_family = AF_INET;
+ addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
+ addr->valid = 1;
+ INIT_LIST_HEAD(&addr->list);
+ list_add_tail(&addr->list, addrlist);
+ }
+ }
+
+ rcu_read_unlock();
+}
+
+/* Extract our IP addresses from the system and stash them in the
+ * protocol structure.
+ */
+static void sctp_get_local_addr_list(struct net *net)
+{
+ struct net_device *dev;
+ struct list_head *pos;
+ struct sctp_af *af;
+
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
+ list_for_each(pos, &sctp_address_families) {
+ af = list_entry(pos, struct sctp_af, list);
+ af->copy_addrlist(&net->sctp.local_addr_list, dev);
+ }
+ }
+ rcu_read_unlock();
+}
+
+/* Free the existing local addresses. */
+static void sctp_free_local_addr_list(struct net *net)
+{
+ struct sctp_sockaddr_entry *addr;
+ struct list_head *pos, *temp;
+
+ list_for_each_safe(pos, temp, &net->sctp.local_addr_list) {
+ addr = list_entry(pos, struct sctp_sockaddr_entry, list);
+ list_del(pos);
+ kfree(addr);
+ }
+}
+
+/* Copy the local addresses which are valid for 'scope' into 'bp'. */
+int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
+ enum sctp_scope scope, gfp_t gfp, int copy_flags)
+{
+ struct sctp_sockaddr_entry *addr;
+ union sctp_addr laddr;
+ int error = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
+ if (!addr->valid)
+ continue;
+ if (!sctp_in_scope(net, &addr->a, scope))
+ continue;
+
+ /* Now that the address is in scope, check to see if
+ * the address type is really supported by the local
+ * sock as well as the remote peer.
+ */
+ if (addr->a.sa.sa_family == AF_INET &&
+ (!(copy_flags & SCTP_ADDR4_ALLOWED) ||
+ !(copy_flags & SCTP_ADDR4_PEERSUPP)))
+ continue;
+ if (addr->a.sa.sa_family == AF_INET6 &&
+ (!(copy_flags & SCTP_ADDR6_ALLOWED) ||
+ !(copy_flags & SCTP_ADDR6_PEERSUPP)))
+ continue;
+
+ laddr = addr->a;
+ /* also works for setting ipv6 address port */
+ laddr.v4.sin_port = htons(bp->port);
+ if (sctp_bind_addr_state(bp, &laddr) != -1)
+ continue;
+
+ error = sctp_add_bind_addr(bp, &addr->a, sizeof(addr->a),
+ SCTP_ADDR_SRC, GFP_ATOMIC);
+ if (error)
+ break;
+ }
+
+ rcu_read_unlock();
+ return error;
+}
+
+/* Copy over any ip options */
+static void sctp_v4_copy_ip_options(struct sock *sk, struct sock *newsk)
+{
+ struct inet_sock *newinet, *inet = inet_sk(sk);
+ struct ip_options_rcu *inet_opt, *newopt = NULL;
+
+ newinet = inet_sk(newsk);
+
+ rcu_read_lock();
+ inet_opt = rcu_dereference(inet->inet_opt);
+ if (inet_opt) {
+ newopt = sock_kmalloc(newsk, sizeof(*inet_opt) +
+ inet_opt->opt.optlen, GFP_ATOMIC);
+ if (newopt)
+ memcpy(newopt, inet_opt, sizeof(*inet_opt) +
+ inet_opt->opt.optlen);
+ else
+ pr_err("%s: Failed to copy ip options\n", __func__);
+ }
+ RCU_INIT_POINTER(newinet->inet_opt, newopt);
+ rcu_read_unlock();
+}
+
+/* Account for the IP options */
+static int sctp_v4_ip_options_len(struct sock *sk)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct ip_options_rcu *inet_opt;
+ int len = 0;
+
+ rcu_read_lock();
+ inet_opt = rcu_dereference(inet->inet_opt);
+ if (inet_opt)
+ len = inet_opt->opt.optlen;
+
+ rcu_read_unlock();
+ return len;
+}
+
+/* Initialize a sctp_addr from in incoming skb. */
+static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb,
+ int is_saddr)
+{
+ /* Always called on head skb, so this is safe */
+ struct sctphdr *sh = sctp_hdr(skb);
+ struct sockaddr_in *sa = &addr->v4;
+
+ addr->v4.sin_family = AF_INET;
+
+ if (is_saddr) {
+ sa->sin_port = sh->source;
+ sa->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ } else {
+ sa->sin_port = sh->dest;
+ sa->sin_addr.s_addr = ip_hdr(skb)->daddr;
+ }
+ memset(sa->sin_zero, 0, sizeof(sa->sin_zero));
+}
+
+/* Initialize an sctp_addr from a socket. */
+static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk)
+{
+ addr->v4.sin_family = AF_INET;
+ addr->v4.sin_port = 0;
+ addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr;
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+}
+
+/* Initialize sk->sk_rcv_saddr from sctp_addr. */
+static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
+{
+ inet_sk(sk)->inet_rcv_saddr = addr->v4.sin_addr.s_addr;
+}
+
+/* Initialize sk->sk_daddr from sctp_addr. */
+static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
+{
+ inet_sk(sk)->inet_daddr = addr->v4.sin_addr.s_addr;
+}
+
+/* Initialize a sctp_addr from an address parameter. */
+static bool sctp_v4_from_addr_param(union sctp_addr *addr,
+ union sctp_addr_param *param,
+ __be16 port, int iif)
+{
+ if (ntohs(param->v4.param_hdr.length) < sizeof(struct sctp_ipv4addr_param))
+ return false;
+
+ addr->v4.sin_family = AF_INET;
+ addr->v4.sin_port = port;
+ addr->v4.sin_addr.s_addr = param->v4.addr.s_addr;
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+
+ return true;
+}
+
+/* Initialize an address parameter from a sctp_addr and return the length
+ * of the address parameter.
+ */
+static int sctp_v4_to_addr_param(const union sctp_addr *addr,
+ union sctp_addr_param *param)
+{
+ int length = sizeof(struct sctp_ipv4addr_param);
+
+ param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS;
+ param->v4.param_hdr.length = htons(length);
+ param->v4.addr.s_addr = addr->v4.sin_addr.s_addr;
+
+ return length;
+}
+
+/* Initialize a sctp_addr from a dst_entry. */
+static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4,
+ __be16 port)
+{
+ saddr->v4.sin_family = AF_INET;
+ saddr->v4.sin_port = port;
+ saddr->v4.sin_addr.s_addr = fl4->saddr;
+ memset(saddr->v4.sin_zero, 0, sizeof(saddr->v4.sin_zero));
+}
+
+/* Compare two addresses exactly. */
+static int sctp_v4_cmp_addr(const union sctp_addr *addr1,
+ const union sctp_addr *addr2)
+{
+ if (addr1->sa.sa_family != addr2->sa.sa_family)
+ return 0;
+ if (addr1->v4.sin_port != addr2->v4.sin_port)
+ return 0;
+ if (addr1->v4.sin_addr.s_addr != addr2->v4.sin_addr.s_addr)
+ return 0;
+
+ return 1;
+}
+
+/* Initialize addr struct to INADDR_ANY. */
+static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port)
+{
+ addr->v4.sin_family = AF_INET;
+ addr->v4.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr->v4.sin_port = port;
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+}
+
+/* Is this a wildcard address? */
+static int sctp_v4_is_any(const union sctp_addr *addr)
+{
+ return htonl(INADDR_ANY) == addr->v4.sin_addr.s_addr;
+}
+
+/* This function checks if the address is a valid address to be used for
+ * SCTP binding.
+ *
+ * Output:
+ * Return 0 - If the address is a non-unicast or an illegal address.
+ * Return 1 - If the address is a unicast.
+ */
+static int sctp_v4_addr_valid(union sctp_addr *addr,
+ struct sctp_sock *sp,
+ const struct sk_buff *skb)
+{
+ /* IPv4 addresses not allowed */
+ if (sp && ipv6_only_sock(sctp_opt2sk(sp)))
+ return 0;
+
+ /* Is this a non-unicast address or a unusable SCTP address? */
+ if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr))
+ return 0;
+
+ /* Is this a broadcast address? */
+ if (skb && skb_rtable(skb)->rt_flags & RTCF_BROADCAST)
+ return 0;
+
+ return 1;
+}
+
+/* Should this be available for binding? */
+static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
+{
+ struct net *net = sock_net(&sp->inet.sk);
+ int ret = inet_addr_type(net, addr->v4.sin_addr.s_addr);
+
+
+ if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) &&
+ ret != RTN_LOCAL &&
+ !sp->inet.freebind &&
+ !READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind))
+ return 0;
+
+ if (ipv6_only_sock(sctp_opt2sk(sp)))
+ return 0;
+
+ return 1;
+}
+
+/* Checking the loopback, private and other address scopes as defined in
+ * RFC 1918. The IPv4 scoping is based on the draft for SCTP IPv4
+ * scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>.
+ *
+ * Level 0 - unusable SCTP addresses
+ * Level 1 - loopback address
+ * Level 2 - link-local addresses
+ * Level 3 - private addresses.
+ * Level 4 - global addresses
+ * For INIT and INIT-ACK address list, let L be the level of
+ * requested destination address, sender and receiver
+ * SHOULD include all of its addresses with level greater
+ * than or equal to L.
+ *
+ * IPv4 scoping can be controlled through sysctl option
+ * net.sctp.addr_scope_policy
+ */
+static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
+{
+ enum sctp_scope retval;
+
+ /* Check for unusable SCTP addresses. */
+ if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) {
+ retval = SCTP_SCOPE_UNUSABLE;
+ } else if (ipv4_is_loopback(addr->v4.sin_addr.s_addr)) {
+ retval = SCTP_SCOPE_LOOPBACK;
+ } else if (ipv4_is_linklocal_169(addr->v4.sin_addr.s_addr)) {
+ retval = SCTP_SCOPE_LINK;
+ } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
+ ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
+ ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
+ ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
+ retval = SCTP_SCOPE_PRIVATE;
+ } else {
+ retval = SCTP_SCOPE_GLOBAL;
+ }
+
+ return retval;
+}
+
+/* Returns a valid dst cache entry for the given source and destination ip
+ * addresses. If an association is passed, trys to get a dst entry with a
+ * source address that matches an address in the bind address list.
+ */
+static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ struct flowi *fl, struct sock *sk)
+{
+ struct sctp_association *asoc = t->asoc;
+ struct rtable *rt;
+ struct flowi _fl;
+ struct flowi4 *fl4 = &_fl.u.ip4;
+ struct sctp_bind_addr *bp;
+ struct sctp_sockaddr_entry *laddr;
+ struct dst_entry *dst = NULL;
+ union sctp_addr *daddr = &t->ipaddr;
+ union sctp_addr dst_saddr;
+ __u8 tos = inet_sk(sk)->tos;
+
+ if (t->dscp & SCTP_DSCP_SET_MASK)
+ tos = t->dscp & SCTP_DSCP_VAL_MASK;
+ memset(&_fl, 0x0, sizeof(_fl));
+ fl4->daddr = daddr->v4.sin_addr.s_addr;
+ fl4->fl4_dport = daddr->v4.sin_port;
+ fl4->flowi4_proto = IPPROTO_SCTP;
+ if (asoc) {
+ fl4->flowi4_tos = RT_CONN_FLAGS_TOS(asoc->base.sk, tos);
+ fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if;
+ fl4->fl4_sport = htons(asoc->base.bind_addr.port);
+ }
+ if (saddr) {
+ fl4->saddr = saddr->v4.sin_addr.s_addr;
+ if (!fl4->fl4_sport)
+ fl4->fl4_sport = saddr->v4.sin_port;
+ }
+
+ pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr,
+ &fl4->saddr);
+
+ rt = ip_route_output_key(sock_net(sk), fl4);
+ if (!IS_ERR(rt)) {
+ dst = &rt->dst;
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
+ }
+
+ /* If there is no association or if a source address is passed, no
+ * more validation is required.
+ */
+ if (!asoc || saddr)
+ goto out;
+
+ bp = &asoc->base.bind_addr;
+
+ if (dst) {
+ /* Walk through the bind address list and look for a bind
+ * address that matches the source address of the returned dst.
+ */
+ sctp_v4_dst_saddr(&dst_saddr, fl4, htons(bp->port));
+ rcu_read_lock();
+ list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ if (!laddr->valid || (laddr->state == SCTP_ADDR_DEL) ||
+ (laddr->state != SCTP_ADDR_SRC &&
+ !asoc->src_out_of_asoc_ok))
+ continue;
+ if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
+ goto out_unlock;
+ }
+ rcu_read_unlock();
+
+ /* None of the bound addresses match the source address of the
+ * dst. So release it.
+ */
+ dst_release(dst);
+ dst = NULL;
+ }
+
+ /* Walk through the bind address list and try to get a dst that
+ * matches a bind address as the source address.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ struct net_device *odev;
+
+ if (!laddr->valid)
+ continue;
+ if (laddr->state != SCTP_ADDR_SRC ||
+ AF_INET != laddr->a.sa.sa_family)
+ continue;
+
+ fl4->fl4_sport = laddr->a.v4.sin_port;
+ flowi4_update_output(fl4,
+ asoc->base.sk->sk_bound_dev_if,
+ RT_CONN_FLAGS_TOS(asoc->base.sk, tos),
+ daddr->v4.sin_addr.s_addr,
+ laddr->a.v4.sin_addr.s_addr);
+
+ rt = ip_route_output_key(sock_net(sk), fl4);
+ if (IS_ERR(rt))
+ continue;
+
+ /* Ensure the src address belongs to the output
+ * interface.
+ */
+ odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
+ false);
+ if (!odev || odev->ifindex != fl4->flowi4_oif) {
+ if (!dst) {
+ dst = &rt->dst;
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
+ } else {
+ dst_release(&rt->dst);
+ }
+ continue;
+ }
+
+ dst_release(dst);
+ dst = &rt->dst;
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
+ break;
+ }
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ if (dst) {
+ pr_debug("rt_dst:%pI4, rt_src:%pI4\n",
+ &fl->u.ip4.daddr, &fl->u.ip4.saddr);
+ } else {
+ t->dst = NULL;
+ pr_debug("no route\n");
+ }
+}
+
+/* For v4, the source address is cached in the route entry(dst). So no need
+ * to cache it separately and hence this is an empty routine.
+ */
+static void sctp_v4_get_saddr(struct sctp_sock *sk,
+ struct sctp_transport *t,
+ struct flowi *fl)
+{
+ union sctp_addr *saddr = &t->saddr;
+ struct rtable *rt = (struct rtable *)t->dst;
+
+ if (rt) {
+ saddr->v4.sin_family = AF_INET;
+ saddr->v4.sin_addr.s_addr = fl->u.ip4.saddr;
+ }
+}
+
+/* What interface did this skb arrive on? */
+static int sctp_v4_skb_iif(const struct sk_buff *skb)
+{
+ return inet_iif(skb);
+}
+
+/* Was this packet marked by Explicit Congestion Notification? */
+static int sctp_v4_is_ce(const struct sk_buff *skb)
+{
+ return INET_ECN_is_ce(ip_hdr(skb)->tos);
+}
+
+/* Create and initialize a new sk for the socket returned by accept(). */
+static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
+ struct sctp_association *asoc,
+ bool kern)
+{
+ struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
+ sk->sk_prot, kern);
+ struct inet_sock *newinet;
+
+ if (!newsk)
+ goto out;
+
+ sock_init_data(NULL, newsk);
+
+ sctp_copy_sock(newsk, sk, asoc);
+ sock_reset_flag(newsk, SOCK_ZAPPED);
+
+ sctp_v4_copy_ip_options(sk, newsk);
+
+ newinet = inet_sk(newsk);
+
+ newinet->inet_daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
+
+ sk_refcnt_debug_inc(newsk);
+
+ if (newsk->sk_prot->init(newsk)) {
+ sk_common_release(newsk);
+ newsk = NULL;
+ }
+
+out:
+ return newsk;
+}
+
+static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
+{
+ /* No address mapping for V4 sockets */
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+ return sizeof(struct sockaddr_in);
+}
+
+/* Dump the v4 addr to the seq file. */
+static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
+{
+ seq_printf(seq, "%pI4 ", &addr->v4.sin_addr);
+}
+
+static void sctp_v4_ecn_capable(struct sock *sk)
+{
+ INET_ECN_xmit(sk);
+}
+
+static void sctp_addr_wq_timeout_handler(struct timer_list *t)
+{
+ struct net *net = from_timer(net, t, sctp.addr_wq_timer);
+ struct sctp_sockaddr_entry *addrw, *temp;
+ struct sctp_sock *sp;
+
+ spin_lock_bh(&net->sctp.addr_wq_lock);
+
+ list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
+ pr_debug("%s: the first ent in wq:%p is addr:%pISc for cmd:%d at "
+ "entry:%p\n", __func__, &net->sctp.addr_waitq, &addrw->a.sa,
+ addrw->state, addrw);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ /* Now we send an ASCONF for each association */
+ /* Note. we currently don't handle link local IPv6 addressees */
+ if (addrw->a.sa.sa_family == AF_INET6) {
+ struct in6_addr *in6;
+
+ if (ipv6_addr_type(&addrw->a.v6.sin6_addr) &
+ IPV6_ADDR_LINKLOCAL)
+ goto free_next;
+
+ in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr;
+ if (ipv6_chk_addr(net, in6, NULL, 0) == 0 &&
+ addrw->state == SCTP_ADDR_NEW) {
+ unsigned long timeo_val;
+
+ pr_debug("%s: this is on DAD, trying %d sec "
+ "later\n", __func__,
+ SCTP_ADDRESS_TICK_DELAY);
+
+ timeo_val = jiffies;
+ timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
+ mod_timer(&net->sctp.addr_wq_timer, timeo_val);
+ break;
+ }
+ }
+#endif
+ list_for_each_entry(sp, &net->sctp.auto_asconf_splist, auto_asconf_list) {
+ struct sock *sk;
+
+ sk = sctp_opt2sk(sp);
+ /* ignore bound-specific endpoints */
+ if (!sctp_is_ep_boundall(sk))
+ continue;
+ bh_lock_sock(sk);
+ if (sctp_asconf_mgmt(sp, addrw) < 0)
+ pr_debug("%s: sctp_asconf_mgmt failed\n", __func__);
+ bh_unlock_sock(sk);
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+free_next:
+#endif
+ list_del(&addrw->list);
+ kfree(addrw);
+ }
+ spin_unlock_bh(&net->sctp.addr_wq_lock);
+}
+
+static void sctp_free_addr_wq(struct net *net)
+{
+ struct sctp_sockaddr_entry *addrw;
+ struct sctp_sockaddr_entry *temp;
+
+ spin_lock_bh(&net->sctp.addr_wq_lock);
+ del_timer(&net->sctp.addr_wq_timer);
+ list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
+ list_del(&addrw->list);
+ kfree(addrw);
+ }
+ spin_unlock_bh(&net->sctp.addr_wq_lock);
+}
+
+/* lookup the entry for the same address in the addr_waitq
+ * sctp_addr_wq MUST be locked
+ */
+static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct net *net,
+ struct sctp_sockaddr_entry *addr)
+{
+ struct sctp_sockaddr_entry *addrw;
+
+ list_for_each_entry(addrw, &net->sctp.addr_waitq, list) {
+ if (addrw->a.sa.sa_family != addr->a.sa.sa_family)
+ continue;
+ if (addrw->a.sa.sa_family == AF_INET) {
+ if (addrw->a.v4.sin_addr.s_addr ==
+ addr->a.v4.sin_addr.s_addr)
+ return addrw;
+ } else if (addrw->a.sa.sa_family == AF_INET6) {
+ if (ipv6_addr_equal(&addrw->a.v6.sin6_addr,
+ &addr->a.v6.sin6_addr))
+ return addrw;
+ }
+ }
+ return NULL;
+}
+
+void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cmd)
+{
+ struct sctp_sockaddr_entry *addrw;
+ unsigned long timeo_val;
+
+ /* first, we check if an opposite message already exist in the queue.
+ * If we found such message, it is removed.
+ * This operation is a bit stupid, but the DHCP client attaches the
+ * new address after a couple of addition and deletion of that address
+ */
+
+ spin_lock_bh(&net->sctp.addr_wq_lock);
+ /* Offsets existing events in addr_wq */
+ addrw = sctp_addr_wq_lookup(net, addr);
+ if (addrw) {
+ if (addrw->state != cmd) {
+ pr_debug("%s: offsets existing entry for %d, addr:%pISc "
+ "in wq:%p\n", __func__, addrw->state, &addrw->a.sa,
+ &net->sctp.addr_waitq);
+
+ list_del(&addrw->list);
+ kfree(addrw);
+ }
+ spin_unlock_bh(&net->sctp.addr_wq_lock);
+ return;
+ }
+
+ /* OK, we have to add the new address to the wait queue */
+ addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+ if (addrw == NULL) {
+ spin_unlock_bh(&net->sctp.addr_wq_lock);
+ return;
+ }
+ addrw->state = cmd;
+ list_add_tail(&addrw->list, &net->sctp.addr_waitq);
+
+ pr_debug("%s: add new entry for cmd:%d, addr:%pISc in wq:%p\n",
+ __func__, addrw->state, &addrw->a.sa, &net->sctp.addr_waitq);
+
+ if (!timer_pending(&net->sctp.addr_wq_timer)) {
+ timeo_val = jiffies;
+ timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
+ mod_timer(&net->sctp.addr_wq_timer, timeo_val);
+ }
+ spin_unlock_bh(&net->sctp.addr_wq_lock);
+}
+
+/* Event handler for inet address addition/deletion events.
+ * The sctp_local_addr_list needs to be protocted by a spin lock since
+ * multiple notifiers (say IPv4 and IPv6) may be running at the same
+ * time and thus corrupt the list.
+ * The reader side is protected with RCU.
+ */
+static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ struct sctp_sockaddr_entry *addr = NULL;
+ struct sctp_sockaddr_entry *temp;
+ struct net *net = dev_net(ifa->ifa_dev->dev);
+ int found = 0;
+
+ switch (ev) {
+ case NETDEV_UP:
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
+ if (addr) {
+ addr->a.v4.sin_family = AF_INET;
+ addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
+ addr->valid = 1;
+ spin_lock_bh(&net->sctp.local_addr_lock);
+ list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list);
+ sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW);
+ spin_unlock_bh(&net->sctp.local_addr_lock);
+ }
+ break;
+ case NETDEV_DOWN:
+ spin_lock_bh(&net->sctp.local_addr_lock);
+ list_for_each_entry_safe(addr, temp,
+ &net->sctp.local_addr_list, list) {
+ if (addr->a.sa.sa_family == AF_INET &&
+ addr->a.v4.sin_addr.s_addr ==
+ ifa->ifa_local) {
+ sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
+ found = 1;
+ addr->valid = 0;
+ list_del_rcu(&addr->list);
+ break;
+ }
+ }
+ spin_unlock_bh(&net->sctp.local_addr_lock);
+ if (found)
+ kfree_rcu(addr, rcu);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/*
+ * Initialize the control inode/socket with a control endpoint data
+ * structure. This endpoint is reserved exclusively for the OOTB processing.
+ */
+static int sctp_ctl_sock_init(struct net *net)
+{
+ int err;
+ sa_family_t family = PF_INET;
+
+ if (sctp_get_pf_specific(PF_INET6))
+ family = PF_INET6;
+
+ err = inet_ctl_sock_create(&net->sctp.ctl_sock, family,
+ SOCK_SEQPACKET, IPPROTO_SCTP, net);
+
+ /* If IPv6 socket could not be created, try the IPv4 socket */
+ if (err < 0 && family == PF_INET6)
+ err = inet_ctl_sock_create(&net->sctp.ctl_sock, AF_INET,
+ SOCK_SEQPACKET, IPPROTO_SCTP,
+ net);
+
+ if (err < 0) {
+ pr_err("Failed to create the SCTP control socket\n");
+ return err;
+ }
+ return 0;
+}
+
+static int sctp_udp_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ SCTP_INPUT_CB(skb)->encap_port = udp_hdr(skb)->source;
+
+ skb_set_transport_header(skb, sizeof(struct udphdr));
+ sctp_rcv(skb);
+ return 0;
+}
+
+int sctp_udp_sock_start(struct net *net)
+{
+ struct udp_tunnel_sock_cfg tuncfg = {NULL};
+ struct udp_port_cfg udp_conf = {0};
+ struct socket *sock;
+ int err;
+
+ udp_conf.family = AF_INET;
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ udp_conf.local_udp_port = htons(net->sctp.udp_port);
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err) {
+ pr_err("Failed to create the SCTP UDP tunneling v4 sock\n");
+ return err;
+ }
+
+ tuncfg.encap_type = 1;
+ tuncfg.encap_rcv = sctp_udp_rcv;
+ tuncfg.encap_err_lookup = sctp_udp_v4_err;
+ setup_udp_tunnel_sock(net, sock, &tuncfg);
+ net->sctp.udp4_sock = sock->sk;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ memset(&udp_conf, 0, sizeof(udp_conf));
+
+ udp_conf.family = AF_INET6;
+ udp_conf.local_ip6 = in6addr_any;
+ udp_conf.local_udp_port = htons(net->sctp.udp_port);
+ udp_conf.use_udp6_rx_checksums = true;
+ udp_conf.ipv6_v6only = true;
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err) {
+ pr_err("Failed to create the SCTP UDP tunneling v6 sock\n");
+ udp_tunnel_sock_release(net->sctp.udp4_sock->sk_socket);
+ net->sctp.udp4_sock = NULL;
+ return err;
+ }
+
+ tuncfg.encap_type = 1;
+ tuncfg.encap_rcv = sctp_udp_rcv;
+ tuncfg.encap_err_lookup = sctp_udp_v6_err;
+ setup_udp_tunnel_sock(net, sock, &tuncfg);
+ net->sctp.udp6_sock = sock->sk;
+#endif
+
+ return 0;
+}
+
+void sctp_udp_sock_stop(struct net *net)
+{
+ if (net->sctp.udp4_sock) {
+ udp_tunnel_sock_release(net->sctp.udp4_sock->sk_socket);
+ net->sctp.udp4_sock = NULL;
+ }
+ if (net->sctp.udp6_sock) {
+ udp_tunnel_sock_release(net->sctp.udp6_sock->sk_socket);
+ net->sctp.udp6_sock = NULL;
+ }
+}
+
+/* Register address family specific functions. */
+int sctp_register_af(struct sctp_af *af)
+{
+ switch (af->sa_family) {
+ case AF_INET:
+ if (sctp_af_v4_specific)
+ return 0;
+ sctp_af_v4_specific = af;
+ break;
+ case AF_INET6:
+ if (sctp_af_v6_specific)
+ return 0;
+ sctp_af_v6_specific = af;
+ break;
+ default:
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&af->list);
+ list_add_tail(&af->list, &sctp_address_families);
+ return 1;
+}
+
+/* Get the table of functions for manipulating a particular address
+ * family.
+ */
+struct sctp_af *sctp_get_af_specific(sa_family_t family)
+{
+ switch (family) {
+ case AF_INET:
+ return sctp_af_v4_specific;
+ case AF_INET6:
+ return sctp_af_v6_specific;
+ default:
+ return NULL;
+ }
+}
+
+/* Common code to initialize a AF_INET msg_name. */
+static void sctp_inet_msgname(char *msgname, int *addr_len)
+{
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)msgname;
+ *addr_len = sizeof(struct sockaddr_in);
+ sin->sin_family = AF_INET;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+}
+
+/* Copy the primary address of the peer primary address as the msg_name. */
+static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname,
+ int *addr_len)
+{
+ struct sockaddr_in *sin, *sinfrom;
+
+ if (msgname) {
+ struct sctp_association *asoc;
+
+ asoc = event->asoc;
+ sctp_inet_msgname(msgname, addr_len);
+ sin = (struct sockaddr_in *)msgname;
+ sinfrom = &asoc->peer.primary_addr.v4;
+ sin->sin_port = htons(asoc->peer.port);
+ sin->sin_addr.s_addr = sinfrom->sin_addr.s_addr;
+ }
+}
+
+/* Initialize and copy out a msgname from an inbound skb. */
+static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len)
+{
+ if (msgname) {
+ struct sctphdr *sh = sctp_hdr(skb);
+ struct sockaddr_in *sin = (struct sockaddr_in *)msgname;
+
+ sctp_inet_msgname(msgname, len);
+ sin->sin_port = sh->source;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ }
+}
+
+/* Do we support this AF? */
+static int sctp_inet_af_supported(sa_family_t family, struct sctp_sock *sp)
+{
+ /* PF_INET only supports AF_INET addresses. */
+ return AF_INET == family;
+}
+
+/* Address matching with wildcards allowed. */
+static int sctp_inet_cmp_addr(const union sctp_addr *addr1,
+ const union sctp_addr *addr2,
+ struct sctp_sock *opt)
+{
+ /* PF_INET only supports AF_INET addresses. */
+ if (addr1->sa.sa_family != addr2->sa.sa_family)
+ return 0;
+ if (htonl(INADDR_ANY) == addr1->v4.sin_addr.s_addr ||
+ htonl(INADDR_ANY) == addr2->v4.sin_addr.s_addr)
+ return 1;
+ if (addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr)
+ return 1;
+
+ return 0;
+}
+
+/* Verify that provided sockaddr looks bindable. Common verification has
+ * already been taken care of.
+ */
+static int sctp_inet_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
+{
+ return sctp_v4_available(addr, opt);
+}
+
+/* Verify that sockaddr looks sendable. Common verification has already
+ * been taken care of.
+ */
+static int sctp_inet_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
+{
+ return 1;
+}
+
+/* Fill in Supported Address Type information for INIT and INIT-ACK
+ * chunks. Returns number of addresses supported.
+ */
+static int sctp_inet_supported_addrs(const struct sctp_sock *opt,
+ __be16 *types)
+{
+ types[0] = SCTP_PARAM_IPV4_ADDRESS;
+ return 1;
+}
+
+/* Wrapper routine that calls the ip transmit routine. */
+static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *t)
+{
+ struct dst_entry *dst = dst_clone(t->dst);
+ struct flowi4 *fl4 = &t->fl.u.ip4;
+ struct sock *sk = skb->sk;
+ struct inet_sock *inet = inet_sk(sk);
+ __u8 dscp = inet->tos;
+ __be16 df = 0;
+
+ pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__, skb,
+ skb->len, &fl4->saddr, &fl4->daddr);
+
+ if (t->dscp & SCTP_DSCP_SET_MASK)
+ dscp = t->dscp & SCTP_DSCP_VAL_MASK;
+
+ inet->pmtudisc = t->param_flags & SPP_PMTUD_ENABLE ? IP_PMTUDISC_DO
+ : IP_PMTUDISC_DONT;
+ SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
+
+ if (!t->encap_port || !sctp_sk(sk)->udp_port) {
+ skb_dst_set(skb, dst);
+ return __ip_queue_xmit(sk, skb, &t->fl, dscp);
+ }
+
+ if (skb_is_gso(skb))
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+
+ if (ip_dont_fragment(sk, dst) && !skb->ignore_df)
+ df = htons(IP_DF);
+
+ skb->encapsulation = 1;
+ skb_reset_inner_mac_header(skb);
+ skb_reset_inner_transport_header(skb);
+ skb_set_inner_ipproto(skb, IPPROTO_SCTP);
+ udp_tunnel_xmit_skb((struct rtable *)dst, sk, skb, fl4->saddr,
+ fl4->daddr, dscp, ip4_dst_hoplimit(dst), df,
+ sctp_sk(sk)->udp_port, t->encap_port, false, false);
+ return 0;
+}
+
+static struct sctp_af sctp_af_inet;
+
+static struct sctp_pf sctp_pf_inet = {
+ .event_msgname = sctp_inet_event_msgname,
+ .skb_msgname = sctp_inet_skb_msgname,
+ .af_supported = sctp_inet_af_supported,
+ .cmp_addr = sctp_inet_cmp_addr,
+ .bind_verify = sctp_inet_bind_verify,
+ .send_verify = sctp_inet_send_verify,
+ .supported_addrs = sctp_inet_supported_addrs,
+ .create_accept_sk = sctp_v4_create_accept_sk,
+ .addr_to_user = sctp_v4_addr_to_user,
+ .to_sk_saddr = sctp_v4_to_sk_saddr,
+ .to_sk_daddr = sctp_v4_to_sk_daddr,
+ .copy_ip_options = sctp_v4_copy_ip_options,
+ .af = &sctp_af_inet
+};
+
+/* Notifier for inetaddr addition/deletion events. */
+static struct notifier_block sctp_inetaddr_notifier = {
+ .notifier_call = sctp_inetaddr_event,
+};
+
+/* Socket operations. */
+static const struct proto_ops inet_seqpacket_ops = {
+ .family = PF_INET,
+ .owner = THIS_MODULE,
+ .release = inet_release, /* Needs to be wrapped... */
+ .bind = inet_bind,
+ .connect = sctp_inet_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = inet_accept,
+ .getname = inet_getname, /* Semantics are different. */
+ .poll = sctp_poll,
+ .ioctl = inet_ioctl,
+ .gettstamp = sock_gettstamp,
+ .listen = sctp_inet_listen,
+ .shutdown = inet_shutdown, /* Looks harmless. */
+ .setsockopt = sock_common_setsockopt, /* IP_SOL IP_OPTION is a problem */
+ .getsockopt = sock_common_getsockopt,
+ .sendmsg = inet_sendmsg,
+ .recvmsg = inet_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+/* Registration with AF_INET family. */
+static struct inet_protosw sctp_seqpacket_protosw = {
+ .type = SOCK_SEQPACKET,
+ .protocol = IPPROTO_SCTP,
+ .prot = &sctp_prot,
+ .ops = &inet_seqpacket_ops,
+ .flags = SCTP_PROTOSW_FLAG
+};
+static struct inet_protosw sctp_stream_protosw = {
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_SCTP,
+ .prot = &sctp_prot,
+ .ops = &inet_seqpacket_ops,
+ .flags = SCTP_PROTOSW_FLAG
+};
+
+static int sctp4_rcv(struct sk_buff *skb)
+{
+ SCTP_INPUT_CB(skb)->encap_port = 0;
+ return sctp_rcv(skb);
+}
+
+/* Register with IP layer. */
+static const struct net_protocol sctp_protocol = {
+ .handler = sctp4_rcv,
+ .err_handler = sctp_v4_err,
+ .no_policy = 1,
+ .icmp_strict_tag_validation = 1,
+};
+
+/* IPv4 address related functions. */
+static struct sctp_af sctp_af_inet = {
+ .sa_family = AF_INET,
+ .sctp_xmit = sctp_v4_xmit,
+ .setsockopt = ip_setsockopt,
+ .getsockopt = ip_getsockopt,
+ .get_dst = sctp_v4_get_dst,
+ .get_saddr = sctp_v4_get_saddr,
+ .copy_addrlist = sctp_v4_copy_addrlist,
+ .from_skb = sctp_v4_from_skb,
+ .from_sk = sctp_v4_from_sk,
+ .from_addr_param = sctp_v4_from_addr_param,
+ .to_addr_param = sctp_v4_to_addr_param,
+ .cmp_addr = sctp_v4_cmp_addr,
+ .addr_valid = sctp_v4_addr_valid,
+ .inaddr_any = sctp_v4_inaddr_any,
+ .is_any = sctp_v4_is_any,
+ .available = sctp_v4_available,
+ .scope = sctp_v4_scope,
+ .skb_iif = sctp_v4_skb_iif,
+ .is_ce = sctp_v4_is_ce,
+ .seq_dump_addr = sctp_v4_seq_dump_addr,
+ .ecn_capable = sctp_v4_ecn_capable,
+ .net_header_len = sizeof(struct iphdr),
+ .sockaddr_len = sizeof(struct sockaddr_in),
+ .ip_options_len = sctp_v4_ip_options_len,
+};
+
+struct sctp_pf *sctp_get_pf_specific(sa_family_t family)
+{
+ switch (family) {
+ case PF_INET:
+ return sctp_pf_inet_specific;
+ case PF_INET6:
+ return sctp_pf_inet6_specific;
+ default:
+ return NULL;
+ }
+}
+
+/* Register the PF specific function table. */
+int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
+{
+ switch (family) {
+ case PF_INET:
+ if (sctp_pf_inet_specific)
+ return 0;
+ sctp_pf_inet_specific = pf;
+ break;
+ case PF_INET6:
+ if (sctp_pf_inet6_specific)
+ return 0;
+ sctp_pf_inet6_specific = pf;
+ break;
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+static inline int init_sctp_mibs(struct net *net)
+{
+ net->sctp.sctp_statistics = alloc_percpu(struct sctp_mib);
+ if (!net->sctp.sctp_statistics)
+ return -ENOMEM;
+ return 0;
+}
+
+static inline void cleanup_sctp_mibs(struct net *net)
+{
+ free_percpu(net->sctp.sctp_statistics);
+}
+
+static void sctp_v4_pf_init(void)
+{
+ /* Initialize the SCTP specific PF functions. */
+ sctp_register_pf(&sctp_pf_inet, PF_INET);
+ sctp_register_af(&sctp_af_inet);
+}
+
+static void sctp_v4_pf_exit(void)
+{
+ list_del(&sctp_af_inet.list);
+}
+
+static int sctp_v4_protosw_init(void)
+{
+ int rc;
+
+ rc = proto_register(&sctp_prot, 1);
+ if (rc)
+ return rc;
+
+ /* Register SCTP(UDP and TCP style) with socket layer. */
+ inet_register_protosw(&sctp_seqpacket_protosw);
+ inet_register_protosw(&sctp_stream_protosw);
+
+ return 0;
+}
+
+static void sctp_v4_protosw_exit(void)
+{
+ inet_unregister_protosw(&sctp_stream_protosw);
+ inet_unregister_protosw(&sctp_seqpacket_protosw);
+ proto_unregister(&sctp_prot);
+}
+
+static int sctp_v4_add_protocol(void)
+{
+ /* Register notifier for inet address additions/deletions. */
+ register_inetaddr_notifier(&sctp_inetaddr_notifier);
+
+ /* Register SCTP with inet layer. */
+ if (inet_add_protocol(&sctp_protocol, IPPROTO_SCTP) < 0)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static void sctp_v4_del_protocol(void)
+{
+ inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
+ unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
+}
+
+static int __net_init sctp_defaults_init(struct net *net)
+{
+ int status;
+
+ /*
+ * 14. Suggested SCTP Protocol Parameter Values
+ */
+ /* The following protocol parameters are RECOMMENDED: */
+ /* RTO.Initial - 3 seconds */
+ net->sctp.rto_initial = SCTP_RTO_INITIAL;
+ /* RTO.Min - 1 second */
+ net->sctp.rto_min = SCTP_RTO_MIN;
+ /* RTO.Max - 60 seconds */
+ net->sctp.rto_max = SCTP_RTO_MAX;
+ /* RTO.Alpha - 1/8 */
+ net->sctp.rto_alpha = SCTP_RTO_ALPHA;
+ /* RTO.Beta - 1/4 */
+ net->sctp.rto_beta = SCTP_RTO_BETA;
+
+ /* Valid.Cookie.Life - 60 seconds */
+ net->sctp.valid_cookie_life = SCTP_DEFAULT_COOKIE_LIFE;
+
+ /* Whether Cookie Preservative is enabled(1) or not(0) */
+ net->sctp.cookie_preserve_enable = 1;
+
+ /* Default sctp sockets to use md5 as their hmac alg */
+#if defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5)
+ net->sctp.sctp_hmac_alg = "md5";
+#elif defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1)
+ net->sctp.sctp_hmac_alg = "sha1";
+#else
+ net->sctp.sctp_hmac_alg = NULL;
+#endif
+
+ /* Max.Burst - 4 */
+ net->sctp.max_burst = SCTP_DEFAULT_MAX_BURST;
+
+ /* Disable of Primary Path Switchover by default */
+ net->sctp.ps_retrans = SCTP_PS_RETRANS_MAX;
+
+ /* Enable pf state by default */
+ net->sctp.pf_enable = 1;
+
+ /* Ignore pf exposure feature by default */
+ net->sctp.pf_expose = SCTP_PF_EXPOSE_UNSET;
+
+ /* Association.Max.Retrans - 10 attempts
+ * Path.Max.Retrans - 5 attempts (per destination address)
+ * Max.Init.Retransmits - 8 attempts
+ */
+ net->sctp.max_retrans_association = 10;
+ net->sctp.max_retrans_path = 5;
+ net->sctp.max_retrans_init = 8;
+
+ /* Sendbuffer growth - do per-socket accounting */
+ net->sctp.sndbuf_policy = 0;
+
+ /* Rcvbuffer growth - do per-socket accounting */
+ net->sctp.rcvbuf_policy = 0;
+
+ /* HB.interval - 30 seconds */
+ net->sctp.hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
+
+ /* delayed SACK timeout */
+ net->sctp.sack_timeout = SCTP_DEFAULT_TIMEOUT_SACK;
+
+ /* Disable ADDIP by default. */
+ net->sctp.addip_enable = 0;
+ net->sctp.addip_noauth = 0;
+ net->sctp.default_auto_asconf = 0;
+
+ /* Enable PR-SCTP by default. */
+ net->sctp.prsctp_enable = 1;
+
+ /* Disable RECONF by default. */
+ net->sctp.reconf_enable = 0;
+
+ /* Disable AUTH by default. */
+ net->sctp.auth_enable = 0;
+
+ /* Enable ECN by default. */
+ net->sctp.ecn_enable = 1;
+
+ /* Set UDP tunneling listening port to 0 by default */
+ net->sctp.udp_port = 0;
+
+ /* Set remote encap port to 0 by default */
+ net->sctp.encap_port = 0;
+
+ /* Set SCOPE policy to enabled */
+ net->sctp.scope_policy = SCTP_SCOPE_POLICY_ENABLE;
+
+ /* Set the default rwnd update threshold */
+ net->sctp.rwnd_upd_shift = SCTP_DEFAULT_RWND_SHIFT;
+
+ /* Initialize maximum autoclose timeout. */
+ net->sctp.max_autoclose = INT_MAX / HZ;
+
+ status = sctp_sysctl_net_register(net);
+ if (status)
+ goto err_sysctl_register;
+
+ /* Allocate and initialise sctp mibs. */
+ status = init_sctp_mibs(net);
+ if (status)
+ goto err_init_mibs;
+
+#ifdef CONFIG_PROC_FS
+ /* Initialize proc fs directory. */
+ status = sctp_proc_init(net);
+ if (status)
+ goto err_init_proc;
+#endif
+
+ sctp_dbg_objcnt_init(net);
+
+ /* Initialize the local address list. */
+ INIT_LIST_HEAD(&net->sctp.local_addr_list);
+ spin_lock_init(&net->sctp.local_addr_lock);
+ sctp_get_local_addr_list(net);
+
+ /* Initialize the address event list */
+ INIT_LIST_HEAD(&net->sctp.addr_waitq);
+ INIT_LIST_HEAD(&net->sctp.auto_asconf_splist);
+ spin_lock_init(&net->sctp.addr_wq_lock);
+ net->sctp.addr_wq_timer.expires = 0;
+ timer_setup(&net->sctp.addr_wq_timer, sctp_addr_wq_timeout_handler, 0);
+
+ return 0;
+
+#ifdef CONFIG_PROC_FS
+err_init_proc:
+ cleanup_sctp_mibs(net);
+#endif
+err_init_mibs:
+ sctp_sysctl_net_unregister(net);
+err_sysctl_register:
+ return status;
+}
+
+static void __net_exit sctp_defaults_exit(struct net *net)
+{
+ /* Free the local address list */
+ sctp_free_addr_wq(net);
+ sctp_free_local_addr_list(net);
+
+#ifdef CONFIG_PROC_FS
+ remove_proc_subtree("sctp", net->proc_net);
+ net->sctp.proc_net_sctp = NULL;
+#endif
+ cleanup_sctp_mibs(net);
+ sctp_sysctl_net_unregister(net);
+}
+
+static struct pernet_operations sctp_defaults_ops = {
+ .init = sctp_defaults_init,
+ .exit = sctp_defaults_exit,
+};
+
+static int __net_init sctp_ctrlsock_init(struct net *net)
+{
+ int status;
+
+ /* Initialize the control inode/socket for handling OOTB packets. */
+ status = sctp_ctl_sock_init(net);
+ if (status)
+ pr_err("Failed to initialize the SCTP control sock\n");
+
+ return status;
+}
+
+static void __net_exit sctp_ctrlsock_exit(struct net *net)
+{
+ /* Free the control endpoint. */
+ inet_ctl_sock_destroy(net->sctp.ctl_sock);
+}
+
+static struct pernet_operations sctp_ctrlsock_ops = {
+ .init = sctp_ctrlsock_init,
+ .exit = sctp_ctrlsock_exit,
+};
+
+/* Initialize the universe into something sensible. */
+static __init int sctp_init(void)
+{
+ unsigned long nr_pages = totalram_pages();
+ unsigned long limit;
+ unsigned long goal;
+ int max_entry_order;
+ int num_entries;
+ int max_share;
+ int status;
+ int order;
+ int i;
+
+ sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
+
+ /* Allocate bind_bucket and chunk caches. */
+ status = -ENOBUFS;
+ sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket",
+ sizeof(struct sctp_bind_bucket),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!sctp_bucket_cachep)
+ goto out;
+
+ sctp_chunk_cachep = kmem_cache_create("sctp_chunk",
+ sizeof(struct sctp_chunk),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!sctp_chunk_cachep)
+ goto err_chunk_cachep;
+
+ status = percpu_counter_init(&sctp_sockets_allocated, 0, GFP_KERNEL);
+ if (status)
+ goto err_percpu_counter_init;
+
+ /* Implementation specific variables. */
+
+ /* Initialize default stream count setup information. */
+ sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
+ sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
+
+ /* Initialize handle used for association ids. */
+ idr_init(&sctp_assocs_id);
+
+ limit = nr_free_buffer_pages() / 8;
+ limit = max(limit, 128UL);
+ sysctl_sctp_mem[0] = limit / 4 * 3;
+ sysctl_sctp_mem[1] = limit;
+ sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2;
+
+ /* Set per-socket limits to no more than 1/128 the pressure threshold*/
+ limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7);
+ max_share = min(4UL*1024*1024, limit);
+
+ sysctl_sctp_rmem[0] = PAGE_SIZE; /* give each asoc 1 page min */
+ sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1);
+ sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
+
+ sysctl_sctp_wmem[0] = PAGE_SIZE;
+ sysctl_sctp_wmem[1] = 16*1024;
+ sysctl_sctp_wmem[2] = max(64*1024, max_share);
+
+ /* Size and allocate the association hash table.
+ * The methodology is similar to that of the tcp hash tables.
+ * Though not identical. Start by getting a goal size
+ */
+ if (nr_pages >= (128 * 1024))
+ goal = nr_pages >> (22 - PAGE_SHIFT);
+ else
+ goal = nr_pages >> (24 - PAGE_SHIFT);
+
+ /* Then compute the page order for said goal */
+ order = get_order(goal);
+
+ /* Now compute the required page order for the maximum sized table we
+ * want to create
+ */
+ max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
+ sizeof(struct sctp_bind_hashbucket));
+
+ /* Limit the page order by that maximum hash table size */
+ order = min(order, max_entry_order);
+
+ /* Allocate and initialize the endpoint hash table. */
+ sctp_ep_hashsize = 64;
+ sctp_ep_hashtable =
+ kmalloc_array(64, sizeof(struct sctp_hashbucket), GFP_KERNEL);
+ if (!sctp_ep_hashtable) {
+ pr_err("Failed endpoint_hash alloc\n");
+ status = -ENOMEM;
+ goto err_ehash_alloc;
+ }
+ for (i = 0; i < sctp_ep_hashsize; i++) {
+ rwlock_init(&sctp_ep_hashtable[i].lock);
+ INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
+ }
+
+ /* Allocate and initialize the SCTP port hash table.
+ * Note that order is initalized to start at the max sized
+ * table we want to support. If we can't get that many pages
+ * reduce the order and try again
+ */
+ do {
+ sctp_port_hashtable = (struct sctp_bind_hashbucket *)
+ __get_free_pages(GFP_KERNEL | __GFP_NOWARN, order);
+ } while (!sctp_port_hashtable && --order > 0);
+
+ if (!sctp_port_hashtable) {
+ pr_err("Failed bind hash alloc\n");
+ status = -ENOMEM;
+ goto err_bhash_alloc;
+ }
+
+ /* Now compute the number of entries that will fit in the
+ * port hash space we allocated
+ */
+ num_entries = (1UL << order) * PAGE_SIZE /
+ sizeof(struct sctp_bind_hashbucket);
+
+ /* And finish by rounding it down to the nearest power of two.
+ * This wastes some memory of course, but it's needed because
+ * the hash function operates based on the assumption that
+ * the number of entries is a power of two.
+ */
+ sctp_port_hashsize = rounddown_pow_of_two(num_entries);
+
+ for (i = 0; i < sctp_port_hashsize; i++) {
+ spin_lock_init(&sctp_port_hashtable[i].lock);
+ INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
+ }
+
+ status = sctp_transport_hashtable_init();
+ if (status)
+ goto err_thash_alloc;
+
+ pr_info("Hash tables configured (bind %d/%d)\n", sctp_port_hashsize,
+ num_entries);
+
+ sctp_sysctl_register();
+
+ INIT_LIST_HEAD(&sctp_address_families);
+ sctp_v4_pf_init();
+ sctp_v6_pf_init();
+ sctp_sched_ops_init();
+
+ status = register_pernet_subsys(&sctp_defaults_ops);
+ if (status)
+ goto err_register_defaults;
+
+ status = sctp_v4_protosw_init();
+ if (status)
+ goto err_protosw_init;
+
+ status = sctp_v6_protosw_init();
+ if (status)
+ goto err_v6_protosw_init;
+
+ status = register_pernet_subsys(&sctp_ctrlsock_ops);
+ if (status)
+ goto err_register_ctrlsock;
+
+ status = sctp_v4_add_protocol();
+ if (status)
+ goto err_add_protocol;
+
+ /* Register SCTP with inet6 layer. */
+ status = sctp_v6_add_protocol();
+ if (status)
+ goto err_v6_add_protocol;
+
+ if (sctp_offload_init() < 0)
+ pr_crit("%s: Cannot add SCTP protocol offload\n", __func__);
+
+out:
+ return status;
+err_v6_add_protocol:
+ sctp_v4_del_protocol();
+err_add_protocol:
+ unregister_pernet_subsys(&sctp_ctrlsock_ops);
+err_register_ctrlsock:
+ sctp_v6_protosw_exit();
+err_v6_protosw_init:
+ sctp_v4_protosw_exit();
+err_protosw_init:
+ unregister_pernet_subsys(&sctp_defaults_ops);
+err_register_defaults:
+ sctp_v4_pf_exit();
+ sctp_v6_pf_exit();
+ sctp_sysctl_unregister();
+ free_pages((unsigned long)sctp_port_hashtable,
+ get_order(sctp_port_hashsize *
+ sizeof(struct sctp_bind_hashbucket)));
+err_bhash_alloc:
+ sctp_transport_hashtable_destroy();
+err_thash_alloc:
+ kfree(sctp_ep_hashtable);
+err_ehash_alloc:
+ percpu_counter_destroy(&sctp_sockets_allocated);
+err_percpu_counter_init:
+ kmem_cache_destroy(sctp_chunk_cachep);
+err_chunk_cachep:
+ kmem_cache_destroy(sctp_bucket_cachep);
+ goto out;
+}
+
+/* Exit handler for the SCTP protocol. */
+static __exit void sctp_exit(void)
+{
+ /* BUG. This should probably do something useful like clean
+ * up all the remaining associations and all that memory.
+ */
+
+ /* Unregister with inet6/inet layers. */
+ sctp_v6_del_protocol();
+ sctp_v4_del_protocol();
+
+ unregister_pernet_subsys(&sctp_ctrlsock_ops);
+
+ /* Free protosw registrations */
+ sctp_v6_protosw_exit();
+ sctp_v4_protosw_exit();
+
+ unregister_pernet_subsys(&sctp_defaults_ops);
+
+ /* Unregister with socket layer. */
+ sctp_v6_pf_exit();
+ sctp_v4_pf_exit();
+
+ sctp_sysctl_unregister();
+
+ free_pages((unsigned long)sctp_port_hashtable,
+ get_order(sctp_port_hashsize *
+ sizeof(struct sctp_bind_hashbucket)));
+ kfree(sctp_ep_hashtable);
+ sctp_transport_hashtable_destroy();
+
+ percpu_counter_destroy(&sctp_sockets_allocated);
+
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
+
+ kmem_cache_destroy(sctp_chunk_cachep);
+ kmem_cache_destroy(sctp_bucket_cachep);
+}
+
+module_init(sctp_init);
+module_exit(sctp_exit);
+
+/*
+ * __stringify doesn't likes enums, so use IPPROTO_SCTP value (132) directly.
+ */
+MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132");
+MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132");
+MODULE_AUTHOR("Linux Kernel SCTP developers <linux-sctp@vger.kernel.org>");
+MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)");
+module_param_named(no_checksums, sctp_checksum_disable, bool, 0644);
+MODULE_PARM_DESC(no_checksums, "Disable checksums computing and verification");
+MODULE_LICENSE("GPL");
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
new file mode 100644
index 000000000..c7503fd64
--- /dev/null
+++ b/net/sctp/sm_make_chunk.c
@@ -0,0 +1,3937 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001-2002 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions work with the state functions in sctp_sm_statefuns.c
+ * to implement the state operations. These functions implement the
+ * steps which require modifying existing data structures.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * C. Robin <chris@hundredacre.ac.uk>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Dajiang Zhang <dajiang.zhang@nokia.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Kevin Gao <kevin.gao@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/hash.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/net.h>
+#include <linux/inet.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <net/sock.h>
+
+#include <linux/skbuff.h>
+#include <linux/random.h> /* for get_random_bytes */
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen,
+ gfp_t gfp);
+static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
+ __u8 flags, int paylen, gfp_t gfp);
+static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen,
+ gfp_t gfp);
+static struct sctp_cookie_param *sctp_pack_cookie(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *init_chunk,
+ int *cookie_len,
+ const __u8 *raw_addrs, int addrs_len);
+static int sctp_process_param(struct sctp_association *asoc,
+ union sctp_params param,
+ const union sctp_addr *peer_addr,
+ gfp_t gfp);
+static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
+ const void *data);
+
+/* Control chunk destructor */
+static void sctp_control_release_owner(struct sk_buff *skb)
+{
+ struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
+
+ if (chunk->shkey) {
+ struct sctp_shared_key *shkey = chunk->shkey;
+ struct sctp_association *asoc = chunk->asoc;
+
+ /* refcnt == 2 and !list_empty mean after this release, it's
+ * not being used anywhere, and it's time to notify userland
+ * that this shkey can be freed if it's been deactivated.
+ */
+ if (shkey->deactivated && !list_empty(&shkey->key_list) &&
+ refcount_read(&shkey->refcnt) == 2) {
+ struct sctp_ulpevent *ev;
+
+ ev = sctp_ulpevent_make_authkey(asoc, shkey->key_id,
+ SCTP_AUTH_FREE_KEY,
+ GFP_KERNEL);
+ if (ev)
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
+ }
+ sctp_auth_shkey_release(chunk->shkey);
+ }
+}
+
+static void sctp_control_set_owner_w(struct sctp_chunk *chunk)
+{
+ struct sctp_association *asoc = chunk->asoc;
+ struct sk_buff *skb = chunk->skb;
+
+ /* TODO: properly account for control chunks.
+ * To do it right we'll need:
+ * 1) endpoint if association isn't known.
+ * 2) proper memory accounting.
+ *
+ * For now don't do anything for now.
+ */
+ if (chunk->auth) {
+ chunk->shkey = asoc->shkey;
+ sctp_auth_shkey_hold(chunk->shkey);
+ }
+ skb->sk = asoc ? asoc->base.sk : NULL;
+ skb_shinfo(skb)->destructor_arg = chunk;
+ skb->destructor = sctp_control_release_owner;
+}
+
+/* What was the inbound interface for this chunk? */
+int sctp_chunk_iif(const struct sctp_chunk *chunk)
+{
+ struct sk_buff *skb = chunk->skb;
+
+ return SCTP_INPUT_CB(skb)->af->skb_iif(skb);
+}
+
+/* RFC 2960 3.3.2 Initiation (INIT) (1)
+ *
+ * Note 2: The ECN capable field is reserved for future use of
+ * Explicit Congestion Notification.
+ */
+static const struct sctp_paramhdr ecap_param = {
+ SCTP_PARAM_ECN_CAPABLE,
+ cpu_to_be16(sizeof(struct sctp_paramhdr)),
+};
+static const struct sctp_paramhdr prsctp_param = {
+ SCTP_PARAM_FWD_TSN_SUPPORT,
+ cpu_to_be16(sizeof(struct sctp_paramhdr)),
+};
+
+/* A helper to initialize an op error inside a provided chunk, as most
+ * cause codes will be embedded inside an abort chunk.
+ */
+int sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
+ size_t paylen)
+{
+ struct sctp_errhdr err;
+ __u16 len;
+
+ /* Cause code constants are now defined in network order. */
+ err.cause = cause_code;
+ len = sizeof(err) + paylen;
+ err.length = htons(len);
+
+ if (skb_tailroom(chunk->skb) < len)
+ return -ENOSPC;
+
+ chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(err), &err);
+
+ return 0;
+}
+
+/* 3.3.2 Initiation (INIT) (1)
+ *
+ * This chunk is used to initiate a SCTP association between two
+ * endpoints. The format of the INIT chunk is shown below:
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 1 | Chunk Flags | Chunk Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Initiate Tag |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Advertised Receiver Window Credit (a_rwnd) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Number of Outbound Streams | Number of Inbound Streams |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Initial TSN |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ \
+ * / Optional/Variable-Length Parameters /
+ * \ \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ *
+ * The INIT chunk contains the following parameters. Unless otherwise
+ * noted, each parameter MUST only be included once in the INIT chunk.
+ *
+ * Fixed Parameters Status
+ * ----------------------------------------------
+ * Initiate Tag Mandatory
+ * Advertised Receiver Window Credit Mandatory
+ * Number of Outbound Streams Mandatory
+ * Number of Inbound Streams Mandatory
+ * Initial TSN Mandatory
+ *
+ * Variable Parameters Status Type Value
+ * -------------------------------------------------------------
+ * IPv4 Address (Note 1) Optional 5
+ * IPv6 Address (Note 1) Optional 6
+ * Cookie Preservative Optional 9
+ * Reserved for ECN Capable (Note 2) Optional 32768 (0x8000)
+ * Host Name Address (Note 3) Optional 11
+ * Supported Address Types (Note 4) Optional 12
+ */
+struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
+ const struct sctp_bind_addr *bp,
+ gfp_t gfp, int vparam_len)
+{
+ struct sctp_supported_ext_param ext_param;
+ struct sctp_adaptation_ind_param aiparam;
+ struct sctp_paramhdr *auth_chunks = NULL;
+ struct sctp_paramhdr *auth_hmacs = NULL;
+ struct sctp_supported_addrs_param sat;
+ struct sctp_endpoint *ep = asoc->ep;
+ struct sctp_chunk *retval = NULL;
+ int num_types, addrs_len = 0;
+ struct sctp_inithdr init;
+ union sctp_params addrs;
+ struct sctp_sock *sp;
+ __u8 extensions[5];
+ size_t chunksize;
+ __be16 types[2];
+ int num_ext = 0;
+
+ /* RFC 2960 3.3.2 Initiation (INIT) (1)
+ *
+ * Note 1: The INIT chunks can contain multiple addresses that
+ * can be IPv4 and/or IPv6 in any combination.
+ */
+
+ /* Convert the provided bind address list to raw format. */
+ addrs = sctp_bind_addrs_to_raw(bp, &addrs_len, gfp);
+
+ init.init_tag = htonl(asoc->c.my_vtag);
+ init.a_rwnd = htonl(asoc->rwnd);
+ init.num_outbound_streams = htons(asoc->c.sinit_num_ostreams);
+ init.num_inbound_streams = htons(asoc->c.sinit_max_instreams);
+ init.initial_tsn = htonl(asoc->c.initial_tsn);
+
+ /* How many address types are needed? */
+ sp = sctp_sk(asoc->base.sk);
+ num_types = sp->pf->supported_addrs(sp, types);
+
+ chunksize = sizeof(init) + addrs_len;
+ chunksize += SCTP_PAD4(SCTP_SAT_LEN(num_types));
+
+ if (asoc->ep->ecn_enable)
+ chunksize += sizeof(ecap_param);
+
+ if (asoc->ep->prsctp_enable)
+ chunksize += sizeof(prsctp_param);
+
+ /* ADDIP: Section 4.2.7:
+ * An implementation supporting this extension [ADDIP] MUST list
+ * the ASCONF,the ASCONF-ACK, and the AUTH chunks in its INIT and
+ * INIT-ACK parameters.
+ */
+ if (asoc->ep->asconf_enable) {
+ extensions[num_ext] = SCTP_CID_ASCONF;
+ extensions[num_ext+1] = SCTP_CID_ASCONF_ACK;
+ num_ext += 2;
+ }
+
+ if (asoc->ep->reconf_enable) {
+ extensions[num_ext] = SCTP_CID_RECONF;
+ num_ext += 1;
+ }
+
+ if (sp->adaptation_ind)
+ chunksize += sizeof(aiparam);
+
+ if (asoc->ep->intl_enable) {
+ extensions[num_ext] = SCTP_CID_I_DATA;
+ num_ext += 1;
+ }
+
+ chunksize += vparam_len;
+
+ /* Account for AUTH related parameters */
+ if (ep->auth_enable) {
+ /* Add random parameter length*/
+ chunksize += sizeof(asoc->c.auth_random);
+
+ /* Add HMACS parameter length if any were defined */
+ auth_hmacs = (struct sctp_paramhdr *)asoc->c.auth_hmacs;
+ if (auth_hmacs->length)
+ chunksize += SCTP_PAD4(ntohs(auth_hmacs->length));
+ else
+ auth_hmacs = NULL;
+
+ /* Add CHUNKS parameter length */
+ auth_chunks = (struct sctp_paramhdr *)asoc->c.auth_chunks;
+ if (auth_chunks->length)
+ chunksize += SCTP_PAD4(ntohs(auth_chunks->length));
+ else
+ auth_chunks = NULL;
+
+ extensions[num_ext] = SCTP_CID_AUTH;
+ num_ext += 1;
+ }
+
+ /* If we have any extensions to report, account for that */
+ if (num_ext)
+ chunksize += SCTP_PAD4(sizeof(ext_param) + num_ext);
+
+ /* RFC 2960 3.3.2 Initiation (INIT) (1)
+ *
+ * Note 3: An INIT chunk MUST NOT contain more than one Host
+ * Name address parameter. Moreover, the sender of the INIT
+ * MUST NOT combine any other address types with the Host Name
+ * address in the INIT. The receiver of INIT MUST ignore any
+ * other address types if the Host Name address parameter is
+ * present in the received INIT chunk.
+ *
+ * PLEASE DO NOT FIXME [This version does not support Host Name.]
+ */
+
+ retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize, gfp);
+ if (!retval)
+ goto nodata;
+
+ retval->subh.init_hdr =
+ sctp_addto_chunk(retval, sizeof(init), &init);
+ retval->param_hdr.v =
+ sctp_addto_chunk(retval, addrs_len, addrs.v);
+
+ /* RFC 2960 3.3.2 Initiation (INIT) (1)
+ *
+ * Note 4: This parameter, when present, specifies all the
+ * address types the sending endpoint can support. The absence
+ * of this parameter indicates that the sending endpoint can
+ * support any address type.
+ */
+ sat.param_hdr.type = SCTP_PARAM_SUPPORTED_ADDRESS_TYPES;
+ sat.param_hdr.length = htons(SCTP_SAT_LEN(num_types));
+ sctp_addto_chunk(retval, sizeof(sat), &sat);
+ sctp_addto_chunk(retval, num_types * sizeof(__u16), &types);
+
+ if (asoc->ep->ecn_enable)
+ sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param);
+
+ /* Add the supported extensions parameter. Be nice and add this
+ * fist before addiding the parameters for the extensions themselves
+ */
+ if (num_ext) {
+ ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT;
+ ext_param.param_hdr.length = htons(sizeof(ext_param) + num_ext);
+ sctp_addto_chunk(retval, sizeof(ext_param), &ext_param);
+ sctp_addto_param(retval, num_ext, extensions);
+ }
+
+ if (asoc->ep->prsctp_enable)
+ sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param);
+
+ if (sp->adaptation_ind) {
+ aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND;
+ aiparam.param_hdr.length = htons(sizeof(aiparam));
+ aiparam.adaptation_ind = htonl(sp->adaptation_ind);
+ sctp_addto_chunk(retval, sizeof(aiparam), &aiparam);
+ }
+
+ /* Add SCTP-AUTH chunks to the parameter list */
+ if (ep->auth_enable) {
+ sctp_addto_chunk(retval, sizeof(asoc->c.auth_random),
+ asoc->c.auth_random);
+ if (auth_hmacs)
+ sctp_addto_chunk(retval, ntohs(auth_hmacs->length),
+ auth_hmacs);
+ if (auth_chunks)
+ sctp_addto_chunk(retval, ntohs(auth_chunks->length),
+ auth_chunks);
+ }
+nodata:
+ kfree(addrs.v);
+ return retval;
+}
+
+struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ gfp_t gfp, int unkparam_len)
+{
+ struct sctp_supported_ext_param ext_param;
+ struct sctp_adaptation_ind_param aiparam;
+ struct sctp_paramhdr *auth_chunks = NULL;
+ struct sctp_paramhdr *auth_random = NULL;
+ struct sctp_paramhdr *auth_hmacs = NULL;
+ struct sctp_chunk *retval = NULL;
+ struct sctp_cookie_param *cookie;
+ struct sctp_inithdr initack;
+ union sctp_params addrs;
+ struct sctp_sock *sp;
+ __u8 extensions[5];
+ size_t chunksize;
+ int num_ext = 0;
+ int cookie_len;
+ int addrs_len;
+
+ /* Note: there may be no addresses to embed. */
+ addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, gfp);
+
+ initack.init_tag = htonl(asoc->c.my_vtag);
+ initack.a_rwnd = htonl(asoc->rwnd);
+ initack.num_outbound_streams = htons(asoc->c.sinit_num_ostreams);
+ initack.num_inbound_streams = htons(asoc->c.sinit_max_instreams);
+ initack.initial_tsn = htonl(asoc->c.initial_tsn);
+
+ /* FIXME: We really ought to build the cookie right
+ * into the packet instead of allocating more fresh memory.
+ */
+ cookie = sctp_pack_cookie(asoc->ep, asoc, chunk, &cookie_len,
+ addrs.v, addrs_len);
+ if (!cookie)
+ goto nomem_cookie;
+
+ /* Calculate the total size of allocation, include the reserved
+ * space for reporting unknown parameters if it is specified.
+ */
+ sp = sctp_sk(asoc->base.sk);
+ chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len;
+
+ /* Tell peer that we'll do ECN only if peer advertised such cap. */
+ if (asoc->peer.ecn_capable)
+ chunksize += sizeof(ecap_param);
+
+ if (asoc->peer.prsctp_capable)
+ chunksize += sizeof(prsctp_param);
+
+ if (asoc->peer.asconf_capable) {
+ extensions[num_ext] = SCTP_CID_ASCONF;
+ extensions[num_ext+1] = SCTP_CID_ASCONF_ACK;
+ num_ext += 2;
+ }
+
+ if (asoc->peer.reconf_capable) {
+ extensions[num_ext] = SCTP_CID_RECONF;
+ num_ext += 1;
+ }
+
+ if (sp->adaptation_ind)
+ chunksize += sizeof(aiparam);
+
+ if (asoc->peer.intl_capable) {
+ extensions[num_ext] = SCTP_CID_I_DATA;
+ num_ext += 1;
+ }
+
+ if (asoc->peer.auth_capable) {
+ auth_random = (struct sctp_paramhdr *)asoc->c.auth_random;
+ chunksize += ntohs(auth_random->length);
+
+ auth_hmacs = (struct sctp_paramhdr *)asoc->c.auth_hmacs;
+ if (auth_hmacs->length)
+ chunksize += SCTP_PAD4(ntohs(auth_hmacs->length));
+ else
+ auth_hmacs = NULL;
+
+ auth_chunks = (struct sctp_paramhdr *)asoc->c.auth_chunks;
+ if (auth_chunks->length)
+ chunksize += SCTP_PAD4(ntohs(auth_chunks->length));
+ else
+ auth_chunks = NULL;
+
+ extensions[num_ext] = SCTP_CID_AUTH;
+ num_ext += 1;
+ }
+
+ if (num_ext)
+ chunksize += SCTP_PAD4(sizeof(ext_param) + num_ext);
+
+ /* Now allocate and fill out the chunk. */
+ retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize, gfp);
+ if (!retval)
+ goto nomem_chunk;
+
+ /* RFC 2960 6.4 Multi-homed SCTP Endpoints
+ *
+ * An endpoint SHOULD transmit reply chunks (e.g., SACK,
+ * HEARTBEAT ACK, * etc.) to the same destination transport
+ * address from which it received the DATA or control chunk
+ * to which it is replying.
+ *
+ * [INIT ACK back to where the INIT came from.]
+ */
+ if (chunk->transport)
+ retval->transport =
+ sctp_assoc_lookup_paddr(asoc,
+ &chunk->transport->ipaddr);
+
+ retval->subh.init_hdr =
+ sctp_addto_chunk(retval, sizeof(initack), &initack);
+ retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v);
+ sctp_addto_chunk(retval, cookie_len, cookie);
+ if (asoc->peer.ecn_capable)
+ sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param);
+ if (num_ext) {
+ ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT;
+ ext_param.param_hdr.length = htons(sizeof(ext_param) + num_ext);
+ sctp_addto_chunk(retval, sizeof(ext_param), &ext_param);
+ sctp_addto_param(retval, num_ext, extensions);
+ }
+ if (asoc->peer.prsctp_capable)
+ sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param);
+
+ if (sp->adaptation_ind) {
+ aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND;
+ aiparam.param_hdr.length = htons(sizeof(aiparam));
+ aiparam.adaptation_ind = htonl(sp->adaptation_ind);
+ sctp_addto_chunk(retval, sizeof(aiparam), &aiparam);
+ }
+
+ if (asoc->peer.auth_capable) {
+ sctp_addto_chunk(retval, ntohs(auth_random->length),
+ auth_random);
+ if (auth_hmacs)
+ sctp_addto_chunk(retval, ntohs(auth_hmacs->length),
+ auth_hmacs);
+ if (auth_chunks)
+ sctp_addto_chunk(retval, ntohs(auth_chunks->length),
+ auth_chunks);
+ }
+
+ /* We need to remove the const qualifier at this point. */
+ retval->asoc = (struct sctp_association *) asoc;
+
+nomem_chunk:
+ kfree(cookie);
+nomem_cookie:
+ kfree(addrs.v);
+ return retval;
+}
+
+/* 3.3.11 Cookie Echo (COOKIE ECHO) (10):
+ *
+ * This chunk is used only during the initialization of an association.
+ * It is sent by the initiator of an association to its peer to complete
+ * the initialization process. This chunk MUST precede any DATA chunk
+ * sent within the association, but MAY be bundled with one or more DATA
+ * chunks in the same packet.
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 10 |Chunk Flags | Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * / Cookie /
+ * \ \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Chunk Flags: 8 bit
+ *
+ * Set to zero on transmit and ignored on receipt.
+ *
+ * Length: 16 bits (unsigned integer)
+ *
+ * Set to the size of the chunk in bytes, including the 4 bytes of
+ * the chunk header and the size of the Cookie.
+ *
+ * Cookie: variable size
+ *
+ * This field must contain the exact cookie received in the
+ * State Cookie parameter from the previous INIT ACK.
+ *
+ * An implementation SHOULD make the cookie as small as possible
+ * to insure interoperability.
+ */
+struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ struct sctp_chunk *retval;
+ int cookie_len;
+ void *cookie;
+
+ cookie = asoc->peer.cookie;
+ cookie_len = asoc->peer.cookie_len;
+
+ /* Build a cookie echo chunk. */
+ retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0,
+ cookie_len, GFP_ATOMIC);
+ if (!retval)
+ goto nodata;
+ retval->subh.cookie_hdr =
+ sctp_addto_chunk(retval, cookie_len, cookie);
+
+ /* RFC 2960 6.4 Multi-homed SCTP Endpoints
+ *
+ * An endpoint SHOULD transmit reply chunks (e.g., SACK,
+ * HEARTBEAT ACK, * etc.) to the same destination transport
+ * address from which it * received the DATA or control chunk
+ * to which it is replying.
+ *
+ * [COOKIE ECHO back to where the INIT ACK came from.]
+ */
+ if (chunk)
+ retval->transport = chunk->transport;
+
+nodata:
+ return retval;
+}
+
+/* 3.3.12 Cookie Acknowledgement (COOKIE ACK) (11):
+ *
+ * This chunk is used only during the initialization of an
+ * association. It is used to acknowledge the receipt of a COOKIE
+ * ECHO chunk. This chunk MUST precede any DATA or SACK chunk sent
+ * within the association, but MAY be bundled with one or more DATA
+ * chunks or SACK chunk in the same SCTP packet.
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 11 |Chunk Flags | Length = 4 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Chunk Flags: 8 bits
+ *
+ * Set to zero on transmit and ignored on receipt.
+ */
+struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0, GFP_ATOMIC);
+
+ /* RFC 2960 6.4 Multi-homed SCTP Endpoints
+ *
+ * An endpoint SHOULD transmit reply chunks (e.g., SACK,
+ * HEARTBEAT ACK, * etc.) to the same destination transport
+ * address from which it * received the DATA or control chunk
+ * to which it is replying.
+ *
+ * [COOKIE ACK back to where the COOKIE ECHO came from.]
+ */
+ if (retval && chunk && chunk->transport)
+ retval->transport =
+ sctp_assoc_lookup_paddr(asoc,
+ &chunk->transport->ipaddr);
+
+ return retval;
+}
+
+/*
+ * Appendix A: Explicit Congestion Notification:
+ * CWR:
+ *
+ * RFC 2481 details a specific bit for a sender to send in the header of
+ * its next outbound TCP segment to indicate to its peer that it has
+ * reduced its congestion window. This is termed the CWR bit. For
+ * SCTP the same indication is made by including the CWR chunk.
+ * This chunk contains one data element, i.e. the TSN number that
+ * was sent in the ECNE chunk. This element represents the lowest
+ * TSN number in the datagram that was originally marked with the
+ * CE bit.
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Chunk Type=13 | Flags=00000000| Chunk Length = 8 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Lowest TSN Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Note: The CWR is considered a Control chunk.
+ */
+struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
+ const __u32 lowest_tsn,
+ const struct sctp_chunk *chunk)
+{
+ struct sctp_chunk *retval;
+ struct sctp_cwrhdr cwr;
+
+ cwr.lowest_tsn = htonl(lowest_tsn);
+ retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0,
+ sizeof(cwr), GFP_ATOMIC);
+
+ if (!retval)
+ goto nodata;
+
+ retval->subh.ecn_cwr_hdr =
+ sctp_addto_chunk(retval, sizeof(cwr), &cwr);
+
+ /* RFC 2960 6.4 Multi-homed SCTP Endpoints
+ *
+ * An endpoint SHOULD transmit reply chunks (e.g., SACK,
+ * HEARTBEAT ACK, * etc.) to the same destination transport
+ * address from which it * received the DATA or control chunk
+ * to which it is replying.
+ *
+ * [Report a reduced congestion window back to where the ECNE
+ * came from.]
+ */
+ if (chunk)
+ retval->transport = chunk->transport;
+
+nodata:
+ return retval;
+}
+
+/* Make an ECNE chunk. This is a congestion experienced report. */
+struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
+ const __u32 lowest_tsn)
+{
+ struct sctp_chunk *retval;
+ struct sctp_ecnehdr ecne;
+
+ ecne.lowest_tsn = htonl(lowest_tsn);
+ retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0,
+ sizeof(ecne), GFP_ATOMIC);
+ if (!retval)
+ goto nodata;
+ retval->subh.ecne_hdr =
+ sctp_addto_chunk(retval, sizeof(ecne), &ecne);
+
+nodata:
+ return retval;
+}
+
+/* Make a DATA chunk for the given association from the provided
+ * parameters. However, do not populate the data payload.
+ */
+struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
+ const struct sctp_sndrcvinfo *sinfo,
+ int len, __u8 flags, gfp_t gfp)
+{
+ struct sctp_chunk *retval;
+ struct sctp_datahdr dp;
+
+ /* We assign the TSN as LATE as possible, not here when
+ * creating the chunk.
+ */
+ memset(&dp, 0, sizeof(dp));
+ dp.ppid = sinfo->sinfo_ppid;
+ dp.stream = htons(sinfo->sinfo_stream);
+
+ /* Set the flags for an unordered send. */
+ if (sinfo->sinfo_flags & SCTP_UNORDERED)
+ flags |= SCTP_DATA_UNORDERED;
+
+ retval = sctp_make_data(asoc, flags, sizeof(dp) + len, gfp);
+ if (!retval)
+ return NULL;
+
+ retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
+ memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
+
+ return retval;
+}
+
+/* Create a selective ackowledgement (SACK) for the given
+ * association. This reports on which TSN's we've seen to date,
+ * including duplicates and gaps.
+ */
+struct sctp_chunk *sctp_make_sack(struct sctp_association *asoc)
+{
+ struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
+ struct sctp_gap_ack_block gabs[SCTP_MAX_GABS];
+ __u16 num_gabs, num_dup_tsns;
+ struct sctp_transport *trans;
+ struct sctp_chunk *retval;
+ struct sctp_sackhdr sack;
+ __u32 ctsn;
+ int len;
+
+ memset(gabs, 0, sizeof(gabs));
+ ctsn = sctp_tsnmap_get_ctsn(map);
+
+ pr_debug("%s: sackCTSNAck sent:0x%x\n", __func__, ctsn);
+
+ /* How much room is needed in the chunk? */
+ num_gabs = sctp_tsnmap_num_gabs(map, gabs);
+ num_dup_tsns = sctp_tsnmap_num_dups(map);
+
+ /* Initialize the SACK header. */
+ sack.cum_tsn_ack = htonl(ctsn);
+ sack.a_rwnd = htonl(asoc->a_rwnd);
+ sack.num_gap_ack_blocks = htons(num_gabs);
+ sack.num_dup_tsns = htons(num_dup_tsns);
+
+ len = sizeof(sack)
+ + sizeof(struct sctp_gap_ack_block) * num_gabs
+ + sizeof(__u32) * num_dup_tsns;
+
+ /* Create the chunk. */
+ retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len, GFP_ATOMIC);
+ if (!retval)
+ goto nodata;
+
+ /* RFC 2960 6.4 Multi-homed SCTP Endpoints
+ *
+ * An endpoint SHOULD transmit reply chunks (e.g., SACK,
+ * HEARTBEAT ACK, etc.) to the same destination transport
+ * address from which it received the DATA or control chunk to
+ * which it is replying. This rule should also be followed if
+ * the endpoint is bundling DATA chunks together with the
+ * reply chunk.
+ *
+ * However, when acknowledging multiple DATA chunks received
+ * in packets from different source addresses in a single
+ * SACK, the SACK chunk may be transmitted to one of the
+ * destination transport addresses from which the DATA or
+ * control chunks being acknowledged were received.
+ *
+ * [BUG: We do not implement the following paragraph.
+ * Perhaps we should remember the last transport we used for a
+ * SACK and avoid that (if possible) if we have seen any
+ * duplicates. --piggy]
+ *
+ * When a receiver of a duplicate DATA chunk sends a SACK to a
+ * multi- homed endpoint it MAY be beneficial to vary the
+ * destination address and not use the source address of the
+ * DATA chunk. The reason being that receiving a duplicate
+ * from a multi-homed endpoint might indicate that the return
+ * path (as specified in the source address of the DATA chunk)
+ * for the SACK is broken.
+ *
+ * [Send to the address from which we last received a DATA chunk.]
+ */
+ retval->transport = asoc->peer.last_data_from;
+
+ retval->subh.sack_hdr =
+ sctp_addto_chunk(retval, sizeof(sack), &sack);
+
+ /* Add the gap ack block information. */
+ if (num_gabs)
+ sctp_addto_chunk(retval, sizeof(__u32) * num_gabs,
+ gabs);
+
+ /* Add the duplicate TSN information. */
+ if (num_dup_tsns) {
+ asoc->stats.idupchunks += num_dup_tsns;
+ sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
+ sctp_tsnmap_get_dups(map));
+ }
+ /* Once we have a sack generated, check to see what our sack
+ * generation is, if its 0, reset the transports to 0, and reset
+ * the association generation to 1
+ *
+ * The idea is that zero is never used as a valid generation for the
+ * association so no transport will match after a wrap event like this,
+ * Until the next sack
+ */
+ if (++asoc->peer.sack_generation == 0) {
+ list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+ transports)
+ trans->sack_generation = 0;
+ asoc->peer.sack_generation = 1;
+ }
+nodata:
+ return retval;
+}
+
+/* Make a SHUTDOWN chunk. */
+struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ struct sctp_shutdownhdr shut;
+ struct sctp_chunk *retval;
+ __u32 ctsn;
+
+ ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
+ shut.cum_tsn_ack = htonl(ctsn);
+
+ retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0,
+ sizeof(shut), GFP_ATOMIC);
+ if (!retval)
+ goto nodata;
+
+ retval->subh.shutdown_hdr =
+ sctp_addto_chunk(retval, sizeof(shut), &shut);
+
+ if (chunk)
+ retval->transport = chunk->transport;
+nodata:
+ return retval;
+}
+
+struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0,
+ GFP_ATOMIC);
+
+ /* RFC 2960 6.4 Multi-homed SCTP Endpoints
+ *
+ * An endpoint SHOULD transmit reply chunks (e.g., SACK,
+ * HEARTBEAT ACK, * etc.) to the same destination transport
+ * address from which it * received the DATA or control chunk
+ * to which it is replying.
+ *
+ * [ACK back to where the SHUTDOWN came from.]
+ */
+ if (retval && chunk)
+ retval->transport = chunk->transport;
+
+ return retval;
+}
+
+struct sctp_chunk *sctp_make_shutdown_complete(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ struct sctp_chunk *retval;
+ __u8 flags = 0;
+
+ /* Set the T-bit if we have no association (vtag will be
+ * reflected)
+ */
+ flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T;
+
+ retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags,
+ 0, GFP_ATOMIC);
+
+ /* RFC 2960 6.4 Multi-homed SCTP Endpoints
+ *
+ * An endpoint SHOULD transmit reply chunks (e.g., SACK,
+ * HEARTBEAT ACK, * etc.) to the same destination transport
+ * address from which it * received the DATA or control chunk
+ * to which it is replying.
+ *
+ * [Report SHUTDOWN COMPLETE back to where the SHUTDOWN ACK
+ * came from.]
+ */
+ if (retval && chunk)
+ retval->transport = chunk->transport;
+
+ return retval;
+}
+
+/* Create an ABORT. Note that we set the T bit if we have no
+ * association, except when responding to an INIT (sctpimpguide 2.41).
+ */
+struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const size_t hint)
+{
+ struct sctp_chunk *retval;
+ __u8 flags = 0;
+
+ /* Set the T-bit if we have no association and 'chunk' is not
+ * an INIT (vtag will be reflected).
+ */
+ if (!asoc) {
+ if (chunk && chunk->chunk_hdr &&
+ chunk->chunk_hdr->type == SCTP_CID_INIT)
+ flags = 0;
+ else
+ flags = SCTP_CHUNK_FLAG_T;
+ }
+
+ retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint,
+ GFP_ATOMIC);
+
+ /* RFC 2960 6.4 Multi-homed SCTP Endpoints
+ *
+ * An endpoint SHOULD transmit reply chunks (e.g., SACK,
+ * HEARTBEAT ACK, * etc.) to the same destination transport
+ * address from which it * received the DATA or control chunk
+ * to which it is replying.
+ *
+ * [ABORT back to where the offender came from.]
+ */
+ if (retval && chunk)
+ retval->transport = chunk->transport;
+
+ return retval;
+}
+
+/* Helper to create ABORT with a NO_USER_DATA error. */
+struct sctp_chunk *sctp_make_abort_no_data(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ __u32 tsn)
+{
+ struct sctp_chunk *retval;
+ __be32 payload;
+
+ retval = sctp_make_abort(asoc, chunk,
+ sizeof(struct sctp_errhdr) + sizeof(tsn));
+
+ if (!retval)
+ goto no_mem;
+
+ /* Put the tsn back into network byte order. */
+ payload = htonl(tsn);
+ sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload));
+ sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload);
+
+ /* RFC 2960 6.4 Multi-homed SCTP Endpoints
+ *
+ * An endpoint SHOULD transmit reply chunks (e.g., SACK,
+ * HEARTBEAT ACK, * etc.) to the same destination transport
+ * address from which it * received the DATA or control chunk
+ * to which it is replying.
+ *
+ * [ABORT back to where the offender came from.]
+ */
+ if (chunk)
+ retval->transport = chunk->transport;
+
+no_mem:
+ return retval;
+}
+
+/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */
+struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
+ struct msghdr *msg,
+ size_t paylen)
+{
+ struct sctp_chunk *retval;
+ void *payload = NULL;
+ int err;
+
+ retval = sctp_make_abort(asoc, NULL,
+ sizeof(struct sctp_errhdr) + paylen);
+ if (!retval)
+ goto err_chunk;
+
+ if (paylen) {
+ /* Put the msg_iov together into payload. */
+ payload = kmalloc(paylen, GFP_KERNEL);
+ if (!payload)
+ goto err_payload;
+
+ err = memcpy_from_msg(payload, msg, paylen);
+ if (err < 0)
+ goto err_copy;
+ }
+
+ sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen);
+ sctp_addto_chunk(retval, paylen, payload);
+
+ if (paylen)
+ kfree(payload);
+
+ return retval;
+
+err_copy:
+ kfree(payload);
+err_payload:
+ sctp_chunk_free(retval);
+ retval = NULL;
+err_chunk:
+ return retval;
+}
+
+/* Append bytes to the end of a parameter. Will panic if chunk is not big
+ * enough.
+ */
+static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
+ const void *data)
+{
+ int chunklen = ntohs(chunk->chunk_hdr->length);
+ void *target;
+
+ target = skb_put(chunk->skb, len);
+
+ if (data)
+ memcpy(target, data, len);
+ else
+ memset(target, 0, len);
+
+ /* Adjust the chunk length field. */
+ chunk->chunk_hdr->length = htons(chunklen + len);
+ chunk->chunk_end = skb_tail_pointer(chunk->skb);
+
+ return target;
+}
+
+/* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */
+struct sctp_chunk *sctp_make_abort_violation(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const __u8 *payload,
+ const size_t paylen)
+{
+ struct sctp_chunk *retval;
+ struct sctp_paramhdr phdr;
+
+ retval = sctp_make_abort(asoc, chunk, sizeof(struct sctp_errhdr) +
+ paylen + sizeof(phdr));
+ if (!retval)
+ goto end;
+
+ sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen +
+ sizeof(phdr));
+
+ phdr.type = htons(chunk->chunk_hdr->type);
+ phdr.length = chunk->chunk_hdr->length;
+ sctp_addto_chunk(retval, paylen, payload);
+ sctp_addto_param(retval, sizeof(phdr), &phdr);
+
+end:
+ return retval;
+}
+
+struct sctp_chunk *sctp_make_violation_paramlen(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ struct sctp_paramhdr *param)
+{
+ static const char error[] = "The following parameter had invalid length:";
+ size_t payload_len = sizeof(error) + sizeof(struct sctp_errhdr) +
+ sizeof(*param);
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_abort(asoc, chunk, payload_len);
+ if (!retval)
+ goto nodata;
+
+ sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION,
+ sizeof(error) + sizeof(*param));
+ sctp_addto_chunk(retval, sizeof(error), error);
+ sctp_addto_param(retval, sizeof(*param), param);
+
+nodata:
+ return retval;
+}
+
+struct sctp_chunk *sctp_make_violation_max_retrans(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ static const char error[] = "Association exceeded its max_retrans count";
+ size_t payload_len = sizeof(error) + sizeof(struct sctp_errhdr);
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_abort(asoc, chunk, payload_len);
+ if (!retval)
+ goto nodata;
+
+ sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error));
+ sctp_addto_chunk(retval, sizeof(error), error);
+
+nodata:
+ return retval;
+}
+
+struct sctp_chunk *sctp_make_new_encap_port(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ struct sctp_new_encap_port_hdr nep;
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_abort(asoc, chunk,
+ sizeof(struct sctp_errhdr) + sizeof(nep));
+ if (!retval)
+ goto nodata;
+
+ sctp_init_cause(retval, SCTP_ERROR_NEW_ENCAP_PORT, sizeof(nep));
+ nep.cur_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
+ nep.new_port = chunk->transport->encap_port;
+ sctp_addto_chunk(retval, sizeof(nep), &nep);
+
+nodata:
+ return retval;
+}
+
+/* Make a HEARTBEAT chunk. */
+struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
+ const struct sctp_transport *transport,
+ __u32 probe_size)
+{
+ struct sctp_sender_hb_info hbinfo = {};
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0,
+ sizeof(hbinfo), GFP_ATOMIC);
+
+ if (!retval)
+ goto nodata;
+
+ hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO;
+ hbinfo.param_hdr.length = htons(sizeof(hbinfo));
+ hbinfo.daddr = transport->ipaddr;
+ hbinfo.sent_at = jiffies;
+ hbinfo.hb_nonce = transport->hb_nonce;
+ hbinfo.probe_size = probe_size;
+
+ /* Cast away the 'const', as this is just telling the chunk
+ * what transport it belongs to.
+ */
+ retval->transport = (struct sctp_transport *) transport;
+ retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo),
+ &hbinfo);
+ retval->pmtu_probe = !!probe_size;
+
+nodata:
+ return retval;
+}
+
+struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const void *payload,
+ const size_t paylen)
+{
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen,
+ GFP_ATOMIC);
+ if (!retval)
+ goto nodata;
+
+ retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload);
+
+ /* RFC 2960 6.4 Multi-homed SCTP Endpoints
+ *
+ * An endpoint SHOULD transmit reply chunks (e.g., SACK,
+ * HEARTBEAT ACK, * etc.) to the same destination transport
+ * address from which it * received the DATA or control chunk
+ * to which it is replying.
+ *
+ * [HBACK back to where the HEARTBEAT came from.]
+ */
+ if (chunk)
+ retval->transport = chunk->transport;
+
+nodata:
+ return retval;
+}
+
+/* RFC4820 3. Padding Chunk (PAD)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 0x84 | Flags=0 | Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * \ Padding Data /
+ * / \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct sctp_chunk *sctp_make_pad(const struct sctp_association *asoc, int len)
+{
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_control(asoc, SCTP_CID_PAD, 0, len, GFP_ATOMIC);
+ if (!retval)
+ return NULL;
+
+ skb_put_zero(retval->skb, len);
+ retval->chunk_hdr->length = htons(ntohs(retval->chunk_hdr->length) + len);
+ retval->chunk_end = skb_tail_pointer(retval->skb);
+
+ return retval;
+}
+
+/* Create an Operation Error chunk with the specified space reserved.
+ * This routine can be used for containing multiple causes in the chunk.
+ */
+static struct sctp_chunk *sctp_make_op_error_space(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ size_t size)
+{
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0,
+ sizeof(struct sctp_errhdr) + size,
+ GFP_ATOMIC);
+ if (!retval)
+ goto nodata;
+
+ /* RFC 2960 6.4 Multi-homed SCTP Endpoints
+ *
+ * An endpoint SHOULD transmit reply chunks (e.g., SACK,
+ * HEARTBEAT ACK, etc.) to the same destination transport
+ * address from which it received the DATA or control chunk
+ * to which it is replying.
+ *
+ */
+ if (chunk)
+ retval->transport = chunk->transport;
+
+nodata:
+ return retval;
+}
+
+/* Create an Operation Error chunk of a fixed size, specifically,
+ * min(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) - overheads.
+ * This is a helper function to allocate an error chunk for those
+ * invalid parameter codes in which we may not want to report all the
+ * errors, if the incoming chunk is large. If it can't fit in a single
+ * packet, we ignore it.
+ */
+static inline struct sctp_chunk *sctp_make_op_error_limited(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ size_t size = SCTP_DEFAULT_MAXSEGMENT;
+ struct sctp_sock *sp = NULL;
+
+ if (asoc) {
+ size = min_t(size_t, size, asoc->pathmtu);
+ sp = sctp_sk(asoc->base.sk);
+ }
+
+ size = sctp_mtu_payload(sp, size, sizeof(struct sctp_errhdr));
+
+ return sctp_make_op_error_space(asoc, chunk, size);
+}
+
+/* Create an Operation Error chunk. */
+struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ __be16 cause_code, const void *payload,
+ size_t paylen, size_t reserve_tail)
+{
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_op_error_space(asoc, chunk, paylen + reserve_tail);
+ if (!retval)
+ goto nodata;
+
+ sctp_init_cause(retval, cause_code, paylen + reserve_tail);
+ sctp_addto_chunk(retval, paylen, payload);
+ if (reserve_tail)
+ sctp_addto_param(retval, reserve_tail, NULL);
+
+nodata:
+ return retval;
+}
+
+struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc,
+ __u16 key_id)
+{
+ struct sctp_authhdr auth_hdr;
+ struct sctp_hmac *hmac_desc;
+ struct sctp_chunk *retval;
+
+ /* Get the first hmac that the peer told us to use */
+ hmac_desc = sctp_auth_asoc_get_hmac(asoc);
+ if (unlikely(!hmac_desc))
+ return NULL;
+
+ retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0,
+ hmac_desc->hmac_len + sizeof(auth_hdr),
+ GFP_ATOMIC);
+ if (!retval)
+ return NULL;
+
+ auth_hdr.hmac_id = htons(hmac_desc->hmac_id);
+ auth_hdr.shkey_id = htons(key_id);
+
+ retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(auth_hdr),
+ &auth_hdr);
+
+ skb_put_zero(retval->skb, hmac_desc->hmac_len);
+
+ /* Adjust the chunk header to include the empty MAC */
+ retval->chunk_hdr->length =
+ htons(ntohs(retval->chunk_hdr->length) + hmac_desc->hmac_len);
+ retval->chunk_end = skb_tail_pointer(retval->skb);
+
+ return retval;
+}
+
+
+/********************************************************************
+ * 2nd Level Abstractions
+ ********************************************************************/
+
+/* Turn an skb into a chunk.
+ * FIXME: Eventually move the structure directly inside the skb->cb[].
+ *
+ * sctpimpguide-05.txt Section 2.8.2
+ * M1) Each time a new DATA chunk is transmitted
+ * set the 'TSN.Missing.Report' count for that TSN to 0. The
+ * 'TSN.Missing.Report' count will be used to determine missing chunks
+ * and when to fast retransmit.
+ *
+ */
+struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
+ const struct sctp_association *asoc,
+ struct sock *sk, gfp_t gfp)
+{
+ struct sctp_chunk *retval;
+
+ retval = kmem_cache_zalloc(sctp_chunk_cachep, gfp);
+
+ if (!retval)
+ goto nodata;
+ if (!sk)
+ pr_debug("%s: chunkifying skb:%p w/o an sk\n", __func__, skb);
+
+ INIT_LIST_HEAD(&retval->list);
+ retval->skb = skb;
+ retval->asoc = (struct sctp_association *)asoc;
+ retval->singleton = 1;
+
+ retval->fast_retransmit = SCTP_CAN_FRTX;
+
+ /* Polish the bead hole. */
+ INIT_LIST_HEAD(&retval->transmitted_list);
+ INIT_LIST_HEAD(&retval->frag_list);
+ SCTP_DBG_OBJCNT_INC(chunk);
+ refcount_set(&retval->refcnt, 1);
+
+nodata:
+ return retval;
+}
+
+/* Set chunk->source and dest based on the IP header in chunk->skb. */
+void sctp_init_addrs(struct sctp_chunk *chunk, union sctp_addr *src,
+ union sctp_addr *dest)
+{
+ memcpy(&chunk->source, src, sizeof(union sctp_addr));
+ memcpy(&chunk->dest, dest, sizeof(union sctp_addr));
+}
+
+/* Extract the source address from a chunk. */
+const union sctp_addr *sctp_source(const struct sctp_chunk *chunk)
+{
+ /* If we have a known transport, use that. */
+ if (chunk->transport) {
+ return &chunk->transport->ipaddr;
+ } else {
+ /* Otherwise, extract it from the IP header. */
+ return &chunk->source;
+ }
+}
+
+/* Create a new chunk, setting the type and flags headers from the
+ * arguments, reserving enough space for a 'paylen' byte payload.
+ */
+static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen,
+ gfp_t gfp)
+{
+ struct sctp_chunkhdr *chunk_hdr;
+ struct sctp_chunk *retval;
+ struct sk_buff *skb;
+ struct sock *sk;
+ int chunklen;
+
+ chunklen = SCTP_PAD4(sizeof(*chunk_hdr) + paylen);
+ if (chunklen > SCTP_MAX_CHUNK_LEN)
+ goto nodata;
+
+ /* No need to allocate LL here, as this is only a chunk. */
+ skb = alloc_skb(chunklen, gfp);
+ if (!skb)
+ goto nodata;
+
+ /* Make room for the chunk header. */
+ chunk_hdr = (struct sctp_chunkhdr *)skb_put(skb, sizeof(*chunk_hdr));
+ chunk_hdr->type = type;
+ chunk_hdr->flags = flags;
+ chunk_hdr->length = htons(sizeof(*chunk_hdr));
+
+ sk = asoc ? asoc->base.sk : NULL;
+ retval = sctp_chunkify(skb, asoc, sk, gfp);
+ if (!retval) {
+ kfree_skb(skb);
+ goto nodata;
+ }
+
+ retval->chunk_hdr = chunk_hdr;
+ retval->chunk_end = ((__u8 *)chunk_hdr) + sizeof(*chunk_hdr);
+
+ /* Determine if the chunk needs to be authenticated */
+ if (sctp_auth_send_cid(type, asoc))
+ retval->auth = 1;
+
+ return retval;
+nodata:
+ return NULL;
+}
+
+static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
+ __u8 flags, int paylen, gfp_t gfp)
+{
+ return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen, gfp);
+}
+
+struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
+ __u8 flags, int paylen, gfp_t gfp)
+{
+ return _sctp_make_chunk(asoc, SCTP_CID_I_DATA, flags, paylen, gfp);
+}
+
+static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen,
+ gfp_t gfp)
+{
+ struct sctp_chunk *chunk;
+
+ chunk = _sctp_make_chunk(asoc, type, flags, paylen, gfp);
+ if (chunk)
+ sctp_control_set_owner_w(chunk);
+
+ return chunk;
+}
+
+/* Release the memory occupied by a chunk. */
+static void sctp_chunk_destroy(struct sctp_chunk *chunk)
+{
+ BUG_ON(!list_empty(&chunk->list));
+ list_del_init(&chunk->transmitted_list);
+
+ consume_skb(chunk->skb);
+ consume_skb(chunk->auth_chunk);
+
+ SCTP_DBG_OBJCNT_DEC(chunk);
+ kmem_cache_free(sctp_chunk_cachep, chunk);
+}
+
+/* Possibly, free the chunk. */
+void sctp_chunk_free(struct sctp_chunk *chunk)
+{
+ /* Release our reference on the message tracker. */
+ if (chunk->msg)
+ sctp_datamsg_put(chunk->msg);
+
+ sctp_chunk_put(chunk);
+}
+
+/* Grab a reference to the chunk. */
+void sctp_chunk_hold(struct sctp_chunk *ch)
+{
+ refcount_inc(&ch->refcnt);
+}
+
+/* Release a reference to the chunk. */
+void sctp_chunk_put(struct sctp_chunk *ch)
+{
+ if (refcount_dec_and_test(&ch->refcnt))
+ sctp_chunk_destroy(ch);
+}
+
+/* Append bytes to the end of a chunk. Will panic if chunk is not big
+ * enough.
+ */
+void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
+{
+ int chunklen = ntohs(chunk->chunk_hdr->length);
+ int padlen = SCTP_PAD4(chunklen) - chunklen;
+ void *target;
+
+ skb_put_zero(chunk->skb, padlen);
+ target = skb_put_data(chunk->skb, data, len);
+
+ /* Adjust the chunk length field. */
+ chunk->chunk_hdr->length = htons(chunklen + padlen + len);
+ chunk->chunk_end = skb_tail_pointer(chunk->skb);
+
+ return target;
+}
+
+/* Append bytes from user space to the end of a chunk. Will panic if
+ * chunk is not big enough.
+ * Returns a kernel err value.
+ */
+int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len,
+ struct iov_iter *from)
+{
+ void *target;
+
+ /* Make room in chunk for data. */
+ target = skb_put(chunk->skb, len);
+
+ /* Copy data (whole iovec) into chunk */
+ if (!copy_from_iter_full(target, len, from))
+ return -EFAULT;
+
+ /* Adjust the chunk length field. */
+ chunk->chunk_hdr->length =
+ htons(ntohs(chunk->chunk_hdr->length) + len);
+ chunk->chunk_end = skb_tail_pointer(chunk->skb);
+
+ return 0;
+}
+
+/* Helper function to assign a TSN if needed. This assumes that both
+ * the data_hdr and association have already been assigned.
+ */
+void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
+{
+ struct sctp_stream *stream;
+ struct sctp_chunk *lchunk;
+ struct sctp_datamsg *msg;
+ __u16 ssn, sid;
+
+ if (chunk->has_ssn)
+ return;
+
+ /* All fragments will be on the same stream */
+ sid = ntohs(chunk->subh.data_hdr->stream);
+ stream = &chunk->asoc->stream;
+
+ /* Now assign the sequence number to the entire message.
+ * All fragments must have the same stream sequence number.
+ */
+ msg = chunk->msg;
+ list_for_each_entry(lchunk, &msg->chunks, frag_list) {
+ if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
+ ssn = 0;
+ } else {
+ if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
+ ssn = sctp_ssn_next(stream, out, sid);
+ else
+ ssn = sctp_ssn_peek(stream, out, sid);
+ }
+
+ lchunk->subh.data_hdr->ssn = htons(ssn);
+ lchunk->has_ssn = 1;
+ }
+}
+
+/* Helper function to assign a TSN if needed. This assumes that both
+ * the data_hdr and association have already been assigned.
+ */
+void sctp_chunk_assign_tsn(struct sctp_chunk *chunk)
+{
+ if (!chunk->has_tsn) {
+ /* This is the last possible instant to
+ * assign a TSN.
+ */
+ chunk->subh.data_hdr->tsn =
+ htonl(sctp_association_get_next_tsn(chunk->asoc));
+ chunk->has_tsn = 1;
+ }
+}
+
+/* Create a CLOSED association to use with an incoming packet. */
+struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep,
+ struct sctp_chunk *chunk,
+ gfp_t gfp)
+{
+ struct sctp_association *asoc;
+ enum sctp_scope scope;
+ struct sk_buff *skb;
+
+ /* Create the bare association. */
+ scope = sctp_scope(sctp_source(chunk));
+ asoc = sctp_association_new(ep, ep->base.sk, scope, gfp);
+ if (!asoc)
+ goto nodata;
+ asoc->temp = 1;
+ skb = chunk->skb;
+ /* Create an entry for the source address of the packet. */
+ SCTP_INPUT_CB(skb)->af->from_skb(&asoc->c.peer_addr, skb, 1);
+
+nodata:
+ return asoc;
+}
+
+/* Build a cookie representing asoc.
+ * This INCLUDES the param header needed to put the cookie in the INIT ACK.
+ */
+static struct sctp_cookie_param *sctp_pack_cookie(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *init_chunk,
+ int *cookie_len, const __u8 *raw_addrs,
+ int addrs_len)
+{
+ struct sctp_signed_cookie *cookie;
+ struct sctp_cookie_param *retval;
+ int headersize, bodysize;
+
+ /* Header size is static data prior to the actual cookie, including
+ * any padding.
+ */
+ headersize = sizeof(struct sctp_paramhdr) +
+ (sizeof(struct sctp_signed_cookie) -
+ sizeof(struct sctp_cookie));
+ bodysize = sizeof(struct sctp_cookie)
+ + ntohs(init_chunk->chunk_hdr->length) + addrs_len;
+
+ /* Pad out the cookie to a multiple to make the signature
+ * functions simpler to write.
+ */
+ if (bodysize % SCTP_COOKIE_MULTIPLE)
+ bodysize += SCTP_COOKIE_MULTIPLE
+ - (bodysize % SCTP_COOKIE_MULTIPLE);
+ *cookie_len = headersize + bodysize;
+
+ /* Clear this memory since we are sending this data structure
+ * out on the network.
+ */
+ retval = kzalloc(*cookie_len, GFP_ATOMIC);
+ if (!retval)
+ goto nodata;
+
+ cookie = (struct sctp_signed_cookie *) retval->body;
+
+ /* Set up the parameter header. */
+ retval->p.type = SCTP_PARAM_STATE_COOKIE;
+ retval->p.length = htons(*cookie_len);
+
+ /* Copy the cookie part of the association itself. */
+ cookie->c = asoc->c;
+ /* Save the raw address list length in the cookie. */
+ cookie->c.raw_addr_list_len = addrs_len;
+
+ /* Remember PR-SCTP capability. */
+ cookie->c.prsctp_capable = asoc->peer.prsctp_capable;
+
+ /* Save adaptation indication in the cookie. */
+ cookie->c.adaptation_ind = asoc->peer.adaptation_ind;
+
+ /* Set an expiration time for the cookie. */
+ cookie->c.expiration = ktime_add(asoc->cookie_life,
+ ktime_get_real());
+
+ /* Copy the peer's init packet. */
+ memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr,
+ ntohs(init_chunk->chunk_hdr->length));
+
+ /* Copy the raw local address list of the association. */
+ memcpy((__u8 *)&cookie->c.peer_init[0] +
+ ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len);
+
+ if (sctp_sk(ep->base.sk)->hmac) {
+ struct crypto_shash *tfm = sctp_sk(ep->base.sk)->hmac;
+ int err;
+
+ /* Sign the message. */
+ err = crypto_shash_setkey(tfm, ep->secret_key,
+ sizeof(ep->secret_key)) ?:
+ crypto_shash_tfm_digest(tfm, (u8 *)&cookie->c, bodysize,
+ cookie->signature);
+ if (err)
+ goto free_cookie;
+ }
+
+ return retval;
+
+free_cookie:
+ kfree(retval);
+nodata:
+ *cookie_len = 0;
+ return NULL;
+}
+
+/* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */
+struct sctp_association *sctp_unpack_cookie(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk, gfp_t gfp,
+ int *error, struct sctp_chunk **errp)
+{
+ struct sctp_association *retval = NULL;
+ int headersize, bodysize, fixed_size;
+ struct sctp_signed_cookie *cookie;
+ struct sk_buff *skb = chunk->skb;
+ struct sctp_cookie *bear_cookie;
+ __u8 *digest = ep->digest;
+ enum sctp_scope scope;
+ unsigned int len;
+ ktime_t kt;
+
+ /* Header size is static data prior to the actual cookie, including
+ * any padding.
+ */
+ headersize = sizeof(struct sctp_chunkhdr) +
+ (sizeof(struct sctp_signed_cookie) -
+ sizeof(struct sctp_cookie));
+ bodysize = ntohs(chunk->chunk_hdr->length) - headersize;
+ fixed_size = headersize + sizeof(struct sctp_cookie);
+
+ /* Verify that the chunk looks like it even has a cookie.
+ * There must be enough room for our cookie and our peer's
+ * INIT chunk.
+ */
+ len = ntohs(chunk->chunk_hdr->length);
+ if (len < fixed_size + sizeof(struct sctp_chunkhdr))
+ goto malformed;
+
+ /* Verify that the cookie has been padded out. */
+ if (bodysize % SCTP_COOKIE_MULTIPLE)
+ goto malformed;
+
+ /* Process the cookie. */
+ cookie = chunk->subh.cookie_hdr;
+ bear_cookie = &cookie->c;
+
+ if (!sctp_sk(ep->base.sk)->hmac)
+ goto no_hmac;
+
+ /* Check the signature. */
+ {
+ struct crypto_shash *tfm = sctp_sk(ep->base.sk)->hmac;
+ int err;
+
+ err = crypto_shash_setkey(tfm, ep->secret_key,
+ sizeof(ep->secret_key)) ?:
+ crypto_shash_tfm_digest(tfm, (u8 *)bear_cookie, bodysize,
+ digest);
+ if (err) {
+ *error = -SCTP_IERROR_NOMEM;
+ goto fail;
+ }
+ }
+
+ if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
+ *error = -SCTP_IERROR_BAD_SIG;
+ goto fail;
+ }
+
+no_hmac:
+ /* IG Section 2.35.2:
+ * 3) Compare the port numbers and the verification tag contained
+ * within the COOKIE ECHO chunk to the actual port numbers and the
+ * verification tag within the SCTP common header of the received
+ * packet. If these values do not match the packet MUST be silently
+ * discarded,
+ */
+ if (ntohl(chunk->sctp_hdr->vtag) != bear_cookie->my_vtag) {
+ *error = -SCTP_IERROR_BAD_TAG;
+ goto fail;
+ }
+
+ if (chunk->sctp_hdr->source != bear_cookie->peer_addr.v4.sin_port ||
+ ntohs(chunk->sctp_hdr->dest) != bear_cookie->my_port) {
+ *error = -SCTP_IERROR_BAD_PORTS;
+ goto fail;
+ }
+
+ /* Check to see if the cookie is stale. If there is already
+ * an association, there is no need to check cookie's expiration
+ * for init collision case of lost COOKIE ACK.
+ * If skb has been timestamped, then use the stamp, otherwise
+ * use current time. This introduces a small possibility that
+ * a cookie may be considered expired, but this would only slow
+ * down the new association establishment instead of every packet.
+ */
+ if (sock_flag(ep->base.sk, SOCK_TIMESTAMP))
+ kt = skb_get_ktime(skb);
+ else
+ kt = ktime_get_real();
+
+ if (!asoc && ktime_before(bear_cookie->expiration, kt)) {
+ suseconds_t usecs = ktime_to_us(ktime_sub(kt, bear_cookie->expiration));
+ __be32 n = htonl(usecs);
+
+ /*
+ * Section 3.3.10.3 Stale Cookie Error (3)
+ *
+ * Cause of error
+ * ---------------
+ * Stale Cookie Error: Indicates the receipt of a valid State
+ * Cookie that has expired.
+ */
+ *errp = sctp_make_op_error(asoc, chunk,
+ SCTP_ERROR_STALE_COOKIE, &n,
+ sizeof(n), 0);
+ if (*errp)
+ *error = -SCTP_IERROR_STALE_COOKIE;
+ else
+ *error = -SCTP_IERROR_NOMEM;
+
+ goto fail;
+ }
+
+ /* Make a new base association. */
+ scope = sctp_scope(sctp_source(chunk));
+ retval = sctp_association_new(ep, ep->base.sk, scope, gfp);
+ if (!retval) {
+ *error = -SCTP_IERROR_NOMEM;
+ goto fail;
+ }
+
+ /* Set up our peer's port number. */
+ retval->peer.port = ntohs(chunk->sctp_hdr->source);
+
+ /* Populate the association from the cookie. */
+ memcpy(&retval->c, bear_cookie, sizeof(*bear_cookie));
+
+ if (sctp_assoc_set_bind_addr_from_cookie(retval, bear_cookie,
+ GFP_ATOMIC) < 0) {
+ *error = -SCTP_IERROR_NOMEM;
+ goto fail;
+ }
+
+ /* Also, add the destination address. */
+ if (list_empty(&retval->base.bind_addr.address_list)) {
+ sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest,
+ sizeof(chunk->dest), SCTP_ADDR_SRC,
+ GFP_ATOMIC);
+ }
+
+ retval->next_tsn = retval->c.initial_tsn;
+ retval->ctsn_ack_point = retval->next_tsn - 1;
+ retval->addip_serial = retval->c.initial_tsn;
+ retval->strreset_outseq = retval->c.initial_tsn;
+ retval->adv_peer_ack_point = retval->ctsn_ack_point;
+ retval->peer.prsctp_capable = retval->c.prsctp_capable;
+ retval->peer.adaptation_ind = retval->c.adaptation_ind;
+
+ /* The INIT stuff will be done by the side effects. */
+ return retval;
+
+fail:
+ if (retval)
+ sctp_association_free(retval);
+
+ return NULL;
+
+malformed:
+ /* Yikes! The packet is either corrupt or deliberately
+ * malformed.
+ */
+ *error = -SCTP_IERROR_MALFORMED;
+ goto fail;
+}
+
+/********************************************************************
+ * 3rd Level Abstractions
+ ********************************************************************/
+
+struct __sctp_missing {
+ __be32 num_missing;
+ __be16 type;
+} __packed;
+
+/*
+ * Report a missing mandatory parameter.
+ */
+static int sctp_process_missing_param(const struct sctp_association *asoc,
+ enum sctp_param paramtype,
+ struct sctp_chunk *chunk,
+ struct sctp_chunk **errp)
+{
+ struct __sctp_missing report;
+ __u16 len;
+
+ len = SCTP_PAD4(sizeof(report));
+
+ /* Make an ERROR chunk, preparing enough room for
+ * returning multiple unknown parameters.
+ */
+ if (!*errp)
+ *errp = sctp_make_op_error_space(asoc, chunk, len);
+
+ if (*errp) {
+ report.num_missing = htonl(1);
+ report.type = paramtype;
+ sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM,
+ sizeof(report));
+ sctp_addto_chunk(*errp, sizeof(report), &report);
+ }
+
+ /* Stop processing this chunk. */
+ return 0;
+}
+
+/* Report an Invalid Mandatory Parameter. */
+static int sctp_process_inv_mandatory(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ struct sctp_chunk **errp)
+{
+ /* Invalid Mandatory Parameter Error has no payload. */
+
+ if (!*errp)
+ *errp = sctp_make_op_error_space(asoc, chunk, 0);
+
+ if (*errp)
+ sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0);
+
+ /* Stop processing this chunk. */
+ return 0;
+}
+
+static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
+ struct sctp_paramhdr *param,
+ const struct sctp_chunk *chunk,
+ struct sctp_chunk **errp)
+{
+ /* This is a fatal error. Any accumulated non-fatal errors are
+ * not reported.
+ */
+ if (*errp)
+ sctp_chunk_free(*errp);
+
+ /* Create an error chunk and fill it in with our payload. */
+ *errp = sctp_make_violation_paramlen(asoc, chunk, param);
+
+ return 0;
+}
+
+
+/* Do not attempt to handle the HOST_NAME parm. However, do
+ * send back an indicator to the peer.
+ */
+static int sctp_process_hn_param(const struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_chunk *chunk,
+ struct sctp_chunk **errp)
+{
+ __u16 len = ntohs(param.p->length);
+
+ /* Processing of the HOST_NAME parameter will generate an
+ * ABORT. If we've accumulated any non-fatal errors, they
+ * would be unrecognized parameters and we should not include
+ * them in the ABORT.
+ */
+ if (*errp)
+ sctp_chunk_free(*errp);
+
+ *errp = sctp_make_op_error(asoc, chunk, SCTP_ERROR_DNS_FAILED,
+ param.v, len, 0);
+
+ /* Stop processing this chunk. */
+ return 0;
+}
+
+static int sctp_verify_ext_param(struct net *net,
+ const struct sctp_endpoint *ep,
+ union sctp_params param)
+{
+ __u16 num_ext = ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
+ int have_asconf = 0;
+ int have_auth = 0;
+ int i;
+
+ for (i = 0; i < num_ext; i++) {
+ switch (param.ext->chunks[i]) {
+ case SCTP_CID_AUTH:
+ have_auth = 1;
+ break;
+ case SCTP_CID_ASCONF:
+ case SCTP_CID_ASCONF_ACK:
+ have_asconf = 1;
+ break;
+ }
+ }
+
+ /* ADD-IP Security: The draft requires us to ABORT or ignore the
+ * INIT/INIT-ACK if ADD-IP is listed, but AUTH is not. Do this
+ * only if ADD-IP is turned on and we are not backward-compatible
+ * mode.
+ */
+ if (net->sctp.addip_noauth)
+ return 1;
+
+ if (ep->asconf_enable && !have_auth && have_asconf)
+ return 0;
+
+ return 1;
+}
+
+static void sctp_process_ext_param(struct sctp_association *asoc,
+ union sctp_params param)
+{
+ __u16 num_ext = ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
+ int i;
+
+ for (i = 0; i < num_ext; i++) {
+ switch (param.ext->chunks[i]) {
+ case SCTP_CID_RECONF:
+ if (asoc->ep->reconf_enable)
+ asoc->peer.reconf_capable = 1;
+ break;
+ case SCTP_CID_FWD_TSN:
+ if (asoc->ep->prsctp_enable)
+ asoc->peer.prsctp_capable = 1;
+ break;
+ case SCTP_CID_AUTH:
+ /* if the peer reports AUTH, assume that he
+ * supports AUTH.
+ */
+ if (asoc->ep->auth_enable)
+ asoc->peer.auth_capable = 1;
+ break;
+ case SCTP_CID_ASCONF:
+ case SCTP_CID_ASCONF_ACK:
+ if (asoc->ep->asconf_enable)
+ asoc->peer.asconf_capable = 1;
+ break;
+ case SCTP_CID_I_DATA:
+ if (asoc->ep->intl_enable)
+ asoc->peer.intl_capable = 1;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/* RFC 3.2.1 & the Implementers Guide 2.2.
+ *
+ * The Parameter Types are encoded such that the
+ * highest-order two bits specify the action that must be
+ * taken if the processing endpoint does not recognize the
+ * Parameter Type.
+ *
+ * 00 - Stop processing this parameter; do not process any further
+ * parameters within this chunk
+ *
+ * 01 - Stop processing this parameter, do not process any further
+ * parameters within this chunk, and report the unrecognized
+ * parameter in an 'Unrecognized Parameter' ERROR chunk.
+ *
+ * 10 - Skip this parameter and continue processing.
+ *
+ * 11 - Skip this parameter and continue processing but
+ * report the unrecognized parameter in an
+ * 'Unrecognized Parameter' ERROR chunk.
+ *
+ * Return value:
+ * SCTP_IERROR_NO_ERROR - continue with the chunk
+ * SCTP_IERROR_ERROR - stop and report an error.
+ * SCTP_IERROR_NOMEME - out of memory.
+ */
+static enum sctp_ierror sctp_process_unk_param(
+ const struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_chunk *chunk,
+ struct sctp_chunk **errp)
+{
+ int retval = SCTP_IERROR_NO_ERROR;
+
+ switch (param.p->type & SCTP_PARAM_ACTION_MASK) {
+ case SCTP_PARAM_ACTION_DISCARD:
+ retval = SCTP_IERROR_ERROR;
+ break;
+ case SCTP_PARAM_ACTION_SKIP:
+ break;
+ case SCTP_PARAM_ACTION_DISCARD_ERR:
+ retval = SCTP_IERROR_ERROR;
+ fallthrough;
+ case SCTP_PARAM_ACTION_SKIP_ERR:
+ /* Make an ERROR chunk, preparing enough room for
+ * returning multiple unknown parameters.
+ */
+ if (!*errp) {
+ *errp = sctp_make_op_error_limited(asoc, chunk);
+ if (!*errp) {
+ /* If there is no memory for generating the
+ * ERROR report as specified, an ABORT will be
+ * triggered to the peer and the association
+ * won't be established.
+ */
+ retval = SCTP_IERROR_NOMEM;
+ break;
+ }
+ }
+
+ if (!sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+ ntohs(param.p->length)))
+ sctp_addto_chunk(*errp, ntohs(param.p->length),
+ param.v);
+ break;
+ default:
+ break;
+ }
+
+ return retval;
+}
+
+/* Verify variable length parameters
+ * Return values:
+ * SCTP_IERROR_ABORT - trigger an ABORT
+ * SCTP_IERROR_NOMEM - out of memory (abort)
+ * SCTP_IERROR_ERROR - stop processing, trigger an ERROR
+ * SCTP_IERROR_NO_ERROR - continue with the chunk
+ */
+static enum sctp_ierror sctp_verify_param(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ union sctp_params param,
+ enum sctp_cid cid,
+ struct sctp_chunk *chunk,
+ struct sctp_chunk **err_chunk)
+{
+ struct sctp_hmac_algo_param *hmacs;
+ int retval = SCTP_IERROR_NO_ERROR;
+ __u16 n_elt, id = 0;
+ int i;
+
+ /* FIXME - This routine is not looking at each parameter per the
+ * chunk type, i.e., unrecognized parameters should be further
+ * identified based on the chunk id.
+ */
+
+ switch (param.p->type) {
+ case SCTP_PARAM_IPV4_ADDRESS:
+ case SCTP_PARAM_IPV6_ADDRESS:
+ case SCTP_PARAM_COOKIE_PRESERVATIVE:
+ case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES:
+ case SCTP_PARAM_STATE_COOKIE:
+ case SCTP_PARAM_HEARTBEAT_INFO:
+ case SCTP_PARAM_UNRECOGNIZED_PARAMETERS:
+ case SCTP_PARAM_ECN_CAPABLE:
+ case SCTP_PARAM_ADAPTATION_LAYER_IND:
+ break;
+
+ case SCTP_PARAM_SUPPORTED_EXT:
+ if (!sctp_verify_ext_param(net, ep, param))
+ return SCTP_IERROR_ABORT;
+ break;
+
+ case SCTP_PARAM_SET_PRIMARY:
+ if (!ep->asconf_enable)
+ goto unhandled;
+
+ if (ntohs(param.p->length) < sizeof(struct sctp_addip_param) +
+ sizeof(struct sctp_paramhdr)) {
+ sctp_process_inv_paramlength(asoc, param.p,
+ chunk, err_chunk);
+ retval = SCTP_IERROR_ABORT;
+ }
+ break;
+
+ case SCTP_PARAM_HOST_NAME_ADDRESS:
+ /* Tell the peer, we won't support this param. */
+ sctp_process_hn_param(asoc, param, chunk, err_chunk);
+ retval = SCTP_IERROR_ABORT;
+ break;
+
+ case SCTP_PARAM_FWD_TSN_SUPPORT:
+ if (ep->prsctp_enable)
+ break;
+ goto unhandled;
+
+ case SCTP_PARAM_RANDOM:
+ if (!ep->auth_enable)
+ goto unhandled;
+
+ /* SCTP-AUTH: Secion 6.1
+ * If the random number is not 32 byte long the association
+ * MUST be aborted. The ABORT chunk SHOULD contain the error
+ * cause 'Protocol Violation'.
+ */
+ if (SCTP_AUTH_RANDOM_LENGTH != ntohs(param.p->length) -
+ sizeof(struct sctp_paramhdr)) {
+ sctp_process_inv_paramlength(asoc, param.p,
+ chunk, err_chunk);
+ retval = SCTP_IERROR_ABORT;
+ }
+ break;
+
+ case SCTP_PARAM_CHUNKS:
+ if (!ep->auth_enable)
+ goto unhandled;
+
+ /* SCTP-AUTH: Section 3.2
+ * The CHUNKS parameter MUST be included once in the INIT or
+ * INIT-ACK chunk if the sender wants to receive authenticated
+ * chunks. Its maximum length is 260 bytes.
+ */
+ if (260 < ntohs(param.p->length)) {
+ sctp_process_inv_paramlength(asoc, param.p,
+ chunk, err_chunk);
+ retval = SCTP_IERROR_ABORT;
+ }
+ break;
+
+ case SCTP_PARAM_HMAC_ALGO:
+ if (!ep->auth_enable)
+ goto unhandled;
+
+ hmacs = (struct sctp_hmac_algo_param *)param.p;
+ n_elt = (ntohs(param.p->length) -
+ sizeof(struct sctp_paramhdr)) >> 1;
+
+ /* SCTP-AUTH: Section 6.1
+ * The HMAC algorithm based on SHA-1 MUST be supported and
+ * included in the HMAC-ALGO parameter.
+ */
+ for (i = 0; i < n_elt; i++) {
+ id = ntohs(hmacs->hmac_ids[i]);
+
+ if (id == SCTP_AUTH_HMAC_ID_SHA1)
+ break;
+ }
+
+ if (id != SCTP_AUTH_HMAC_ID_SHA1) {
+ sctp_process_inv_paramlength(asoc, param.p, chunk,
+ err_chunk);
+ retval = SCTP_IERROR_ABORT;
+ }
+ break;
+unhandled:
+ default:
+ pr_debug("%s: unrecognized param:%d for chunk:%d\n",
+ __func__, ntohs(param.p->type), cid);
+
+ retval = sctp_process_unk_param(asoc, param, chunk, err_chunk);
+ break;
+ }
+ return retval;
+}
+
+/* Verify the INIT packet before we process it. */
+int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc, enum sctp_cid cid,
+ struct sctp_init_chunk *peer_init,
+ struct sctp_chunk *chunk, struct sctp_chunk **errp)
+{
+ union sctp_params param;
+ bool has_cookie = false;
+ int result;
+
+ /* Check for missing mandatory parameters. Note: Initial TSN is
+ * also mandatory, but is not checked here since the valid range
+ * is 0..2**32-1. RFC4960, section 3.3.3.
+ */
+ if (peer_init->init_hdr.num_outbound_streams == 0 ||
+ peer_init->init_hdr.num_inbound_streams == 0 ||
+ peer_init->init_hdr.init_tag == 0 ||
+ ntohl(peer_init->init_hdr.a_rwnd) < SCTP_DEFAULT_MINWINDOW)
+ return sctp_process_inv_mandatory(asoc, chunk, errp);
+
+ sctp_walk_params(param, peer_init, init_hdr.params) {
+ if (param.p->type == SCTP_PARAM_STATE_COOKIE)
+ has_cookie = true;
+ }
+
+ /* There is a possibility that a parameter length was bad and
+ * in that case we would have stoped walking the parameters.
+ * The current param.p would point at the bad one.
+ * Current consensus on the mailing list is to generate a PROTOCOL
+ * VIOLATION error. We build the ERROR chunk here and let the normal
+ * error handling code build and send the packet.
+ */
+ if (param.v != (void *)chunk->chunk_end)
+ return sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
+
+ /* The only missing mandatory param possible today is
+ * the state cookie for an INIT-ACK chunk.
+ */
+ if ((SCTP_CID_INIT_ACK == cid) && !has_cookie)
+ return sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE,
+ chunk, errp);
+
+ /* Verify all the variable length parameters */
+ sctp_walk_params(param, peer_init, init_hdr.params) {
+ result = sctp_verify_param(net, ep, asoc, param, cid,
+ chunk, errp);
+ switch (result) {
+ case SCTP_IERROR_ABORT:
+ case SCTP_IERROR_NOMEM:
+ return 0;
+ case SCTP_IERROR_ERROR:
+ return 1;
+ case SCTP_IERROR_NO_ERROR:
+ default:
+ break;
+ }
+
+ } /* for (loop through all parameters) */
+
+ return 1;
+}
+
+/* Unpack the parameters in an INIT packet into an association.
+ * Returns 0 on failure, else success.
+ * FIXME: This is an association method.
+ */
+int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
+ const union sctp_addr *peer_addr,
+ struct sctp_init_chunk *peer_init, gfp_t gfp)
+{
+ struct sctp_transport *transport;
+ struct list_head *pos, *temp;
+ union sctp_params param;
+ union sctp_addr addr;
+ struct sctp_af *af;
+ int src_match = 0;
+
+ /* We must include the address that the INIT packet came from.
+ * This is the only address that matters for an INIT packet.
+ * When processing a COOKIE ECHO, we retrieve the from address
+ * of the INIT from the cookie.
+ */
+
+ /* This implementation defaults to making the first transport
+ * added as the primary transport. The source address seems to
+ * be a better choice than any of the embedded addresses.
+ */
+ asoc->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
+ if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
+ goto nomem;
+
+ if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr))
+ src_match = 1;
+
+ /* Process the initialization parameters. */
+ sctp_walk_params(param, peer_init, init_hdr.params) {
+ if (!src_match &&
+ (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
+ param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
+ af = sctp_get_af_specific(param_type2af(param.p->type));
+ if (!af->from_addr_param(&addr, param.addr,
+ chunk->sctp_hdr->source, 0))
+ continue;
+ if (sctp_cmp_addr_exact(sctp_source(chunk), &addr))
+ src_match = 1;
+ }
+
+ if (!sctp_process_param(asoc, param, peer_addr, gfp))
+ goto clean_up;
+ }
+
+ /* source address of chunk may not match any valid address */
+ if (!src_match)
+ goto clean_up;
+
+ /* AUTH: After processing the parameters, make sure that we
+ * have all the required info to potentially do authentications.
+ */
+ if (asoc->peer.auth_capable && (!asoc->peer.peer_random ||
+ !asoc->peer.peer_hmacs))
+ asoc->peer.auth_capable = 0;
+
+ /* In a non-backward compatible mode, if the peer claims
+ * support for ADD-IP but not AUTH, the ADD-IP spec states
+ * that we MUST ABORT the association. Section 6. The section
+ * also give us an option to silently ignore the packet, which
+ * is what we'll do here.
+ */
+ if (!asoc->base.net->sctp.addip_noauth &&
+ (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) {
+ asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP |
+ SCTP_PARAM_DEL_IP |
+ SCTP_PARAM_SET_PRIMARY);
+ asoc->peer.asconf_capable = 0;
+ goto clean_up;
+ }
+
+ /* Walk list of transports, removing transports in the UNKNOWN state. */
+ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
+ transport = list_entry(pos, struct sctp_transport, transports);
+ if (transport->state == SCTP_UNKNOWN) {
+ sctp_assoc_rm_peer(asoc, transport);
+ }
+ }
+
+ /* The fixed INIT headers are always in network byte
+ * order.
+ */
+ asoc->peer.i.init_tag =
+ ntohl(peer_init->init_hdr.init_tag);
+ asoc->peer.i.a_rwnd =
+ ntohl(peer_init->init_hdr.a_rwnd);
+ asoc->peer.i.num_outbound_streams =
+ ntohs(peer_init->init_hdr.num_outbound_streams);
+ asoc->peer.i.num_inbound_streams =
+ ntohs(peer_init->init_hdr.num_inbound_streams);
+ asoc->peer.i.initial_tsn =
+ ntohl(peer_init->init_hdr.initial_tsn);
+
+ asoc->strreset_inseq = asoc->peer.i.initial_tsn;
+
+ /* Apply the upper bounds for output streams based on peer's
+ * number of inbound streams.
+ */
+ if (asoc->c.sinit_num_ostreams >
+ ntohs(peer_init->init_hdr.num_inbound_streams)) {
+ asoc->c.sinit_num_ostreams =
+ ntohs(peer_init->init_hdr.num_inbound_streams);
+ }
+
+ if (asoc->c.sinit_max_instreams >
+ ntohs(peer_init->init_hdr.num_outbound_streams)) {
+ asoc->c.sinit_max_instreams =
+ ntohs(peer_init->init_hdr.num_outbound_streams);
+ }
+
+ /* Copy Initiation tag from INIT to VT_peer in cookie. */
+ asoc->c.peer_vtag = asoc->peer.i.init_tag;
+
+ /* Peer Rwnd : Current calculated value of the peer's rwnd. */
+ asoc->peer.rwnd = asoc->peer.i.a_rwnd;
+
+ /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily
+ * high (for example, implementations MAY use the size of the receiver
+ * advertised window).
+ */
+ list_for_each_entry(transport, &asoc->peer.transport_addr_list,
+ transports) {
+ transport->ssthresh = asoc->peer.i.a_rwnd;
+ }
+
+ /* Set up the TSN tracking pieces. */
+ if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
+ asoc->peer.i.initial_tsn, gfp))
+ goto clean_up;
+
+ /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
+ *
+ * The stream sequence number in all the streams shall start
+ * from 0 when the association is established. Also, when the
+ * stream sequence number reaches the value 65535 the next
+ * stream sequence number shall be set to 0.
+ */
+
+ if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
+ asoc->c.sinit_max_instreams, gfp))
+ goto clean_up;
+
+ /* Update frag_point when stream_interleave may get changed. */
+ sctp_assoc_update_frag_point(asoc);
+
+ if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
+ goto clean_up;
+
+ /* ADDIP Section 4.1 ASCONF Chunk Procedures
+ *
+ * When an endpoint has an ASCONF signaled change to be sent to the
+ * remote endpoint it should do the following:
+ * ...
+ * A2) A serial number should be assigned to the Chunk. The serial
+ * number should be a monotonically increasing number. All serial
+ * numbers are defined to be initialized at the start of the
+ * association to the same value as the Initial TSN.
+ */
+ asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1;
+ return 1;
+
+clean_up:
+ /* Release the transport structures. */
+ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
+ transport = list_entry(pos, struct sctp_transport, transports);
+ if (transport->state != SCTP_ACTIVE)
+ sctp_assoc_rm_peer(asoc, transport);
+ }
+
+nomem:
+ return 0;
+}
+
+
+/* Update asoc with the option described in param.
+ *
+ * RFC2960 3.3.2.1 Optional/Variable Length Parameters in INIT
+ *
+ * asoc is the association to update.
+ * param is the variable length parameter to use for update.
+ * cid tells us if this is an INIT, INIT ACK or COOKIE ECHO.
+ * If the current packet is an INIT we want to minimize the amount of
+ * work we do. In particular, we should not build transport
+ * structures for the addresses.
+ */
+static int sctp_process_param(struct sctp_association *asoc,
+ union sctp_params param,
+ const union sctp_addr *peer_addr,
+ gfp_t gfp)
+{
+ struct sctp_endpoint *ep = asoc->ep;
+ union sctp_addr_param *addr_param;
+ struct net *net = asoc->base.net;
+ struct sctp_transport *t;
+ enum sctp_scope scope;
+ union sctp_addr addr;
+ struct sctp_af *af;
+ int retval = 1, i;
+ u32 stale;
+ __u16 sat;
+
+ /* We maintain all INIT parameters in network byte order all the
+ * time. This allows us to not worry about whether the parameters
+ * came from a fresh INIT, and INIT ACK, or were stored in a cookie.
+ */
+ switch (param.p->type) {
+ case SCTP_PARAM_IPV6_ADDRESS:
+ if (PF_INET6 != asoc->base.sk->sk_family)
+ break;
+ goto do_addr_param;
+
+ case SCTP_PARAM_IPV4_ADDRESS:
+ /* v4 addresses are not allowed on v6-only socket */
+ if (ipv6_only_sock(asoc->base.sk))
+ break;
+do_addr_param:
+ af = sctp_get_af_specific(param_type2af(param.p->type));
+ if (!af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0))
+ break;
+ scope = sctp_scope(peer_addr);
+ if (sctp_in_scope(net, &addr, scope))
+ if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
+ return 0;
+ break;
+
+ case SCTP_PARAM_COOKIE_PRESERVATIVE:
+ if (!net->sctp.cookie_preserve_enable)
+ break;
+
+ stale = ntohl(param.life->lifespan_increment);
+
+ /* Suggested Cookie Life span increment's unit is msec,
+ * (1/1000sec).
+ */
+ asoc->cookie_life = ktime_add_ms(asoc->cookie_life, stale);
+ break;
+
+ case SCTP_PARAM_HOST_NAME_ADDRESS:
+ pr_debug("%s: unimplemented SCTP_HOST_NAME_ADDRESS\n", __func__);
+ break;
+
+ case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES:
+ /* Turn off the default values first so we'll know which
+ * ones are really set by the peer.
+ */
+ asoc->peer.ipv4_address = 0;
+ asoc->peer.ipv6_address = 0;
+
+ /* Assume that peer supports the address family
+ * by which it sends a packet.
+ */
+ if (peer_addr->sa.sa_family == AF_INET6)
+ asoc->peer.ipv6_address = 1;
+ else if (peer_addr->sa.sa_family == AF_INET)
+ asoc->peer.ipv4_address = 1;
+
+ /* Cycle through address types; avoid divide by 0. */
+ sat = ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
+ if (sat)
+ sat /= sizeof(__u16);
+
+ for (i = 0; i < sat; ++i) {
+ switch (param.sat->types[i]) {
+ case SCTP_PARAM_IPV4_ADDRESS:
+ asoc->peer.ipv4_address = 1;
+ break;
+
+ case SCTP_PARAM_IPV6_ADDRESS:
+ if (PF_INET6 == asoc->base.sk->sk_family)
+ asoc->peer.ipv6_address = 1;
+ break;
+
+ case SCTP_PARAM_HOST_NAME_ADDRESS:
+ asoc->peer.hostname_address = 1;
+ break;
+
+ default: /* Just ignore anything else. */
+ break;
+ }
+ }
+ break;
+
+ case SCTP_PARAM_STATE_COOKIE:
+ asoc->peer.cookie_len =
+ ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
+ kfree(asoc->peer.cookie);
+ asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
+ if (!asoc->peer.cookie)
+ retval = 0;
+ break;
+
+ case SCTP_PARAM_HEARTBEAT_INFO:
+ /* Would be odd to receive, but it causes no problems. */
+ break;
+
+ case SCTP_PARAM_UNRECOGNIZED_PARAMETERS:
+ /* Rejected during verify stage. */
+ break;
+
+ case SCTP_PARAM_ECN_CAPABLE:
+ if (asoc->ep->ecn_enable) {
+ asoc->peer.ecn_capable = 1;
+ break;
+ }
+ /* Fall Through */
+ goto fall_through;
+
+
+ case SCTP_PARAM_ADAPTATION_LAYER_IND:
+ asoc->peer.adaptation_ind = ntohl(param.aind->adaptation_ind);
+ break;
+
+ case SCTP_PARAM_SET_PRIMARY:
+ if (!ep->asconf_enable)
+ goto fall_through;
+
+ addr_param = param.v + sizeof(struct sctp_addip_param);
+
+ af = sctp_get_af_specific(param_type2af(addr_param->p.type));
+ if (!af)
+ break;
+
+ if (!af->from_addr_param(&addr, addr_param,
+ htons(asoc->peer.port), 0))
+ break;
+
+ if (!af->addr_valid(&addr, NULL, NULL))
+ break;
+
+ t = sctp_assoc_lookup_paddr(asoc, &addr);
+ if (!t)
+ break;
+
+ sctp_assoc_set_primary(asoc, t);
+ break;
+
+ case SCTP_PARAM_SUPPORTED_EXT:
+ sctp_process_ext_param(asoc, param);
+ break;
+
+ case SCTP_PARAM_FWD_TSN_SUPPORT:
+ if (asoc->ep->prsctp_enable) {
+ asoc->peer.prsctp_capable = 1;
+ break;
+ }
+ /* Fall Through */
+ goto fall_through;
+
+ case SCTP_PARAM_RANDOM:
+ if (!ep->auth_enable)
+ goto fall_through;
+
+ /* Save peer's random parameter */
+ kfree(asoc->peer.peer_random);
+ asoc->peer.peer_random = kmemdup(param.p,
+ ntohs(param.p->length), gfp);
+ if (!asoc->peer.peer_random) {
+ retval = 0;
+ break;
+ }
+ break;
+
+ case SCTP_PARAM_HMAC_ALGO:
+ if (!ep->auth_enable)
+ goto fall_through;
+
+ /* Save peer's HMAC list */
+ kfree(asoc->peer.peer_hmacs);
+ asoc->peer.peer_hmacs = kmemdup(param.p,
+ ntohs(param.p->length), gfp);
+ if (!asoc->peer.peer_hmacs) {
+ retval = 0;
+ break;
+ }
+
+ /* Set the default HMAC the peer requested*/
+ sctp_auth_asoc_set_default_hmac(asoc, param.hmac_algo);
+ break;
+
+ case SCTP_PARAM_CHUNKS:
+ if (!ep->auth_enable)
+ goto fall_through;
+
+ kfree(asoc->peer.peer_chunks);
+ asoc->peer.peer_chunks = kmemdup(param.p,
+ ntohs(param.p->length), gfp);
+ if (!asoc->peer.peer_chunks)
+ retval = 0;
+ break;
+fall_through:
+ default:
+ /* Any unrecognized parameters should have been caught
+ * and handled by sctp_verify_param() which should be
+ * called prior to this routine. Simply log the error
+ * here.
+ */
+ pr_debug("%s: ignoring param:%d for association:%p.\n",
+ __func__, ntohs(param.p->type), asoc);
+ break;
+ }
+
+ return retval;
+}
+
+/* Select a new verification tag. */
+__u32 sctp_generate_tag(const struct sctp_endpoint *ep)
+{
+ /* I believe that this random number generator complies with RFC1750.
+ * A tag of 0 is reserved for special cases (e.g. INIT).
+ */
+ __u32 x;
+
+ do {
+ get_random_bytes(&x, sizeof(__u32));
+ } while (x == 0);
+
+ return x;
+}
+
+/* Select an initial TSN to send during startup. */
+__u32 sctp_generate_tsn(const struct sctp_endpoint *ep)
+{
+ __u32 retval;
+
+ get_random_bytes(&retval, sizeof(__u32));
+ return retval;
+}
+
+/*
+ * ADDIP 3.1.1 Address Configuration Change Chunk (ASCONF)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 0xC1 | Chunk Flags | Chunk Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Serial Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Address Parameter |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ASCONF Parameter #1 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ \
+ * / .... /
+ * \ \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ASCONF Parameter #N |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Address Parameter and other parameter will not be wrapped in this function
+ */
+static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc,
+ union sctp_addr *addr,
+ int vparam_len)
+{
+ struct sctp_addiphdr asconf;
+ struct sctp_chunk *retval;
+ int length = sizeof(asconf) + vparam_len;
+ union sctp_addr_param addrparam;
+ int addrlen;
+ struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family);
+
+ addrlen = af->to_addr_param(addr, &addrparam);
+ if (!addrlen)
+ return NULL;
+ length += addrlen;
+
+ /* Create the chunk. */
+ retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length,
+ GFP_ATOMIC);
+ if (!retval)
+ return NULL;
+
+ asconf.serial = htonl(asoc->addip_serial++);
+
+ retval->subh.addip_hdr =
+ sctp_addto_chunk(retval, sizeof(asconf), &asconf);
+ retval->param_hdr.v =
+ sctp_addto_chunk(retval, addrlen, &addrparam);
+
+ return retval;
+}
+
+/* ADDIP
+ * 3.2.1 Add IP Address
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 0xC001 | Length = Variable |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ASCONF-Request Correlation ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Address Parameter |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 3.2.2 Delete IP Address
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 0xC002 | Length = Variable |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ASCONF-Request Correlation ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Address Parameter |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ */
+struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
+ union sctp_addr *laddr,
+ struct sockaddr *addrs,
+ int addrcnt, __be16 flags)
+{
+ union sctp_addr_param addr_param;
+ struct sctp_addip_param param;
+ int paramlen = sizeof(param);
+ struct sctp_chunk *retval;
+ int addr_param_len = 0;
+ union sctp_addr *addr;
+ int totallen = 0, i;
+ int del_pickup = 0;
+ struct sctp_af *af;
+ void *addr_buf;
+
+ /* Get total length of all the address parameters. */
+ addr_buf = addrs;
+ for (i = 0; i < addrcnt; i++) {
+ addr = addr_buf;
+ af = sctp_get_af_specific(addr->v4.sin_family);
+ addr_param_len = af->to_addr_param(addr, &addr_param);
+
+ totallen += paramlen;
+ totallen += addr_param_len;
+
+ addr_buf += af->sockaddr_len;
+ if (asoc->asconf_addr_del_pending && !del_pickup) {
+ /* reuse the parameter length from the same scope one */
+ totallen += paramlen;
+ totallen += addr_param_len;
+ del_pickup = 1;
+
+ pr_debug("%s: picked same-scope del_pending addr, "
+ "totallen for all addresses is %d\n",
+ __func__, totallen);
+ }
+ }
+
+ /* Create an asconf chunk with the required length. */
+ retval = sctp_make_asconf(asoc, laddr, totallen);
+ if (!retval)
+ return NULL;
+
+ /* Add the address parameters to the asconf chunk. */
+ addr_buf = addrs;
+ for (i = 0; i < addrcnt; i++) {
+ addr = addr_buf;
+ af = sctp_get_af_specific(addr->v4.sin_family);
+ addr_param_len = af->to_addr_param(addr, &addr_param);
+ param.param_hdr.type = flags;
+ param.param_hdr.length = htons(paramlen + addr_param_len);
+ param.crr_id = htonl(i);
+
+ sctp_addto_chunk(retval, paramlen, &param);
+ sctp_addto_chunk(retval, addr_param_len, &addr_param);
+
+ addr_buf += af->sockaddr_len;
+ }
+ if (flags == SCTP_PARAM_ADD_IP && del_pickup) {
+ addr = asoc->asconf_addr_del_pending;
+ af = sctp_get_af_specific(addr->v4.sin_family);
+ addr_param_len = af->to_addr_param(addr, &addr_param);
+ param.param_hdr.type = SCTP_PARAM_DEL_IP;
+ param.param_hdr.length = htons(paramlen + addr_param_len);
+ param.crr_id = htonl(i);
+
+ sctp_addto_chunk(retval, paramlen, &param);
+ sctp_addto_chunk(retval, addr_param_len, &addr_param);
+ }
+ return retval;
+}
+
+/* ADDIP
+ * 3.2.4 Set Primary IP Address
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type =0xC004 | Length = Variable |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ASCONF-Request Correlation ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Address Parameter |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Create an ASCONF chunk with Set Primary IP address parameter.
+ */
+struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
+ union sctp_addr *addr)
+{
+ struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family);
+ union sctp_addr_param addrparam;
+ struct sctp_addip_param param;
+ struct sctp_chunk *retval;
+ int len = sizeof(param);
+ int addrlen;
+
+ addrlen = af->to_addr_param(addr, &addrparam);
+ if (!addrlen)
+ return NULL;
+ len += addrlen;
+
+ /* Create the chunk and make asconf header. */
+ retval = sctp_make_asconf(asoc, addr, len);
+ if (!retval)
+ return NULL;
+
+ param.param_hdr.type = SCTP_PARAM_SET_PRIMARY;
+ param.param_hdr.length = htons(len);
+ param.crr_id = 0;
+
+ sctp_addto_chunk(retval, sizeof(param), &param);
+ sctp_addto_chunk(retval, addrlen, &addrparam);
+
+ return retval;
+}
+
+/* ADDIP 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 0x80 | Chunk Flags | Chunk Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Serial Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ASCONF Parameter Response#1 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ \
+ * / .... /
+ * \ \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ASCONF Parameter Response#N |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Create an ASCONF_ACK chunk with enough space for the parameter responses.
+ */
+static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc,
+ __u32 serial, int vparam_len)
+{
+ struct sctp_addiphdr asconf;
+ struct sctp_chunk *retval;
+ int length = sizeof(asconf) + vparam_len;
+
+ /* Create the chunk. */
+ retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length,
+ GFP_ATOMIC);
+ if (!retval)
+ return NULL;
+
+ asconf.serial = htonl(serial);
+
+ retval->subh.addip_hdr =
+ sctp_addto_chunk(retval, sizeof(asconf), &asconf);
+
+ return retval;
+}
+
+/* Add response parameters to an ASCONF_ACK chunk. */
+static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id,
+ __be16 err_code,
+ struct sctp_addip_param *asconf_param)
+{
+ struct sctp_addip_param ack_param;
+ struct sctp_errhdr err_param;
+ int asconf_param_len = 0;
+ int err_param_len = 0;
+ __be16 response_type;
+
+ if (SCTP_ERROR_NO_ERROR == err_code) {
+ response_type = SCTP_PARAM_SUCCESS_REPORT;
+ } else {
+ response_type = SCTP_PARAM_ERR_CAUSE;
+ err_param_len = sizeof(err_param);
+ if (asconf_param)
+ asconf_param_len =
+ ntohs(asconf_param->param_hdr.length);
+ }
+
+ /* Add Success Indication or Error Cause Indication parameter. */
+ ack_param.param_hdr.type = response_type;
+ ack_param.param_hdr.length = htons(sizeof(ack_param) +
+ err_param_len +
+ asconf_param_len);
+ ack_param.crr_id = crr_id;
+ sctp_addto_chunk(chunk, sizeof(ack_param), &ack_param);
+
+ if (SCTP_ERROR_NO_ERROR == err_code)
+ return;
+
+ /* Add Error Cause parameter. */
+ err_param.cause = err_code;
+ err_param.length = htons(err_param_len + asconf_param_len);
+ sctp_addto_chunk(chunk, err_param_len, &err_param);
+
+ /* Add the failed TLV copied from ASCONF chunk. */
+ if (asconf_param)
+ sctp_addto_chunk(chunk, asconf_param_len, asconf_param);
+}
+
+/* Process a asconf parameter. */
+static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
+ struct sctp_chunk *asconf,
+ struct sctp_addip_param *asconf_param)
+{
+ union sctp_addr_param *addr_param;
+ struct sctp_transport *peer;
+ union sctp_addr addr;
+ struct sctp_af *af;
+
+ addr_param = (void *)asconf_param + sizeof(*asconf_param);
+
+ if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP &&
+ asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP &&
+ asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY)
+ return SCTP_ERROR_UNKNOWN_PARAM;
+
+ switch (addr_param->p.type) {
+ case SCTP_PARAM_IPV6_ADDRESS:
+ if (!asoc->peer.ipv6_address)
+ return SCTP_ERROR_DNS_FAILED;
+ break;
+ case SCTP_PARAM_IPV4_ADDRESS:
+ if (!asoc->peer.ipv4_address)
+ return SCTP_ERROR_DNS_FAILED;
+ break;
+ default:
+ return SCTP_ERROR_DNS_FAILED;
+ }
+
+ af = sctp_get_af_specific(param_type2af(addr_param->p.type));
+ if (unlikely(!af))
+ return SCTP_ERROR_DNS_FAILED;
+
+ if (!af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0))
+ return SCTP_ERROR_DNS_FAILED;
+
+ /* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast
+ * or multicast address.
+ * (note: wildcard is permitted and requires special handling so
+ * make sure we check for that)
+ */
+ if (!af->is_any(&addr) && !af->addr_valid(&addr, NULL, asconf->skb))
+ return SCTP_ERROR_DNS_FAILED;
+
+ switch (asconf_param->param_hdr.type) {
+ case SCTP_PARAM_ADD_IP:
+ /* Section 4.2.1:
+ * If the address 0.0.0.0 or ::0 is provided, the source
+ * address of the packet MUST be added.
+ */
+ if (af->is_any(&addr))
+ memcpy(&addr, &asconf->source, sizeof(addr));
+
+ if (security_sctp_bind_connect(asoc->ep->base.sk,
+ SCTP_PARAM_ADD_IP,
+ (struct sockaddr *)&addr,
+ af->sockaddr_len))
+ return SCTP_ERROR_REQ_REFUSED;
+
+ /* ADDIP 4.3 D9) If an endpoint receives an ADD IP address
+ * request and does not have the local resources to add this
+ * new address to the association, it MUST return an Error
+ * Cause TLV set to the new error code 'Operation Refused
+ * Due to Resource Shortage'.
+ */
+
+ peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
+ if (!peer)
+ return SCTP_ERROR_RSRC_LOW;
+
+ /* Start the heartbeat timer. */
+ sctp_transport_reset_hb_timer(peer);
+ asoc->new_transport = peer;
+ break;
+ case SCTP_PARAM_DEL_IP:
+ /* ADDIP 4.3 D7) If a request is received to delete the
+ * last remaining IP address of a peer endpoint, the receiver
+ * MUST send an Error Cause TLV with the error cause set to the
+ * new error code 'Request to Delete Last Remaining IP Address'.
+ */
+ if (asoc->peer.transport_count == 1)
+ return SCTP_ERROR_DEL_LAST_IP;
+
+ /* ADDIP 4.3 D8) If a request is received to delete an IP
+ * address which is also the source address of the IP packet
+ * which contained the ASCONF chunk, the receiver MUST reject
+ * this request. To reject the request the receiver MUST send
+ * an Error Cause TLV set to the new error code 'Request to
+ * Delete Source IP Address'
+ */
+ if (sctp_cmp_addr_exact(&asconf->source, &addr))
+ return SCTP_ERROR_DEL_SRC_IP;
+
+ /* Section 4.2.2
+ * If the address 0.0.0.0 or ::0 is provided, all
+ * addresses of the peer except the source address of the
+ * packet MUST be deleted.
+ */
+ if (af->is_any(&addr)) {
+ sctp_assoc_set_primary(asoc, asconf->transport);
+ sctp_assoc_del_nonprimary_peers(asoc,
+ asconf->transport);
+ return SCTP_ERROR_NO_ERROR;
+ }
+
+ /* If the address is not part of the association, the
+ * ASCONF-ACK with Error Cause Indication Parameter
+ * which including cause of Unresolvable Address should
+ * be sent.
+ */
+ peer = sctp_assoc_lookup_paddr(asoc, &addr);
+ if (!peer)
+ return SCTP_ERROR_DNS_FAILED;
+
+ sctp_assoc_rm_peer(asoc, peer);
+ break;
+ case SCTP_PARAM_SET_PRIMARY:
+ /* ADDIP Section 4.2.4
+ * If the address 0.0.0.0 or ::0 is provided, the receiver
+ * MAY mark the source address of the packet as its
+ * primary.
+ */
+ if (af->is_any(&addr))
+ memcpy(&addr, sctp_source(asconf), sizeof(addr));
+
+ if (security_sctp_bind_connect(asoc->ep->base.sk,
+ SCTP_PARAM_SET_PRIMARY,
+ (struct sockaddr *)&addr,
+ af->sockaddr_len))
+ return SCTP_ERROR_REQ_REFUSED;
+
+ peer = sctp_assoc_lookup_paddr(asoc, &addr);
+ if (!peer)
+ return SCTP_ERROR_DNS_FAILED;
+
+ sctp_assoc_set_primary(asoc, peer);
+ break;
+ }
+
+ return SCTP_ERROR_NO_ERROR;
+}
+
+/* Verify the ASCONF packet before we process it. */
+bool sctp_verify_asconf(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk, bool addr_param_needed,
+ struct sctp_paramhdr **errp)
+{
+ struct sctp_addip_chunk *addip;
+ bool addr_param_seen = false;
+ union sctp_params param;
+
+ addip = (struct sctp_addip_chunk *)chunk->chunk_hdr;
+ sctp_walk_params(param, addip, addip_hdr.params) {
+ size_t length = ntohs(param.p->length);
+
+ *errp = param.p;
+ switch (param.p->type) {
+ case SCTP_PARAM_ERR_CAUSE:
+ break;
+ case SCTP_PARAM_IPV4_ADDRESS:
+ if (length != sizeof(struct sctp_ipv4addr_param))
+ return false;
+ /* ensure there is only one addr param and it's in the
+ * beginning of addip_hdr params, or we reject it.
+ */
+ if (param.v != addip->addip_hdr.params)
+ return false;
+ addr_param_seen = true;
+ break;
+ case SCTP_PARAM_IPV6_ADDRESS:
+ if (length != sizeof(struct sctp_ipv6addr_param))
+ return false;
+ if (param.v != addip->addip_hdr.params)
+ return false;
+ addr_param_seen = true;
+ break;
+ case SCTP_PARAM_ADD_IP:
+ case SCTP_PARAM_DEL_IP:
+ case SCTP_PARAM_SET_PRIMARY:
+ /* In ASCONF chunks, these need to be first. */
+ if (addr_param_needed && !addr_param_seen)
+ return false;
+ length = ntohs(param.addip->param_hdr.length);
+ if (length < sizeof(struct sctp_addip_param) +
+ sizeof(**errp))
+ return false;
+ break;
+ case SCTP_PARAM_SUCCESS_REPORT:
+ case SCTP_PARAM_ADAPTATION_LAYER_IND:
+ if (length != sizeof(struct sctp_addip_param))
+ return false;
+ break;
+ default:
+ /* This is unknown to us, reject! */
+ return false;
+ }
+ }
+
+ /* Remaining sanity checks. */
+ if (addr_param_needed && !addr_param_seen)
+ return false;
+ if (!addr_param_needed && addr_param_seen)
+ return false;
+ if (param.v != chunk->chunk_end)
+ return false;
+
+ return true;
+}
+
+/* Process an incoming ASCONF chunk with the next expected serial no. and
+ * return an ASCONF_ACK chunk to be sent in response.
+ */
+struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ struct sctp_chunk *asconf)
+{
+ union sctp_addr_param *addr_param;
+ struct sctp_addip_chunk *addip;
+ struct sctp_chunk *asconf_ack;
+ bool all_param_pass = true;
+ struct sctp_addiphdr *hdr;
+ int length = 0, chunk_len;
+ union sctp_params param;
+ __be16 err_code;
+ __u32 serial;
+
+ addip = (struct sctp_addip_chunk *)asconf->chunk_hdr;
+ chunk_len = ntohs(asconf->chunk_hdr->length) -
+ sizeof(struct sctp_chunkhdr);
+ hdr = (struct sctp_addiphdr *)asconf->skb->data;
+ serial = ntohl(hdr->serial);
+
+ /* Skip the addiphdr and store a pointer to address parameter. */
+ length = sizeof(*hdr);
+ addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
+ chunk_len -= length;
+
+ /* Skip the address parameter and store a pointer to the first
+ * asconf parameter.
+ */
+ length = ntohs(addr_param->p.length);
+ chunk_len -= length;
+
+ /* create an ASCONF_ACK chunk.
+ * Based on the definitions of parameters, we know that the size of
+ * ASCONF_ACK parameters are less than or equal to the fourfold of ASCONF
+ * parameters.
+ */
+ asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 4);
+ if (!asconf_ack)
+ goto done;
+
+ /* Process the TLVs contained within the ASCONF chunk. */
+ sctp_walk_params(param, addip, addip_hdr.params) {
+ /* Skip preceeding address parameters. */
+ if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
+ param.p->type == SCTP_PARAM_IPV6_ADDRESS)
+ continue;
+
+ err_code = sctp_process_asconf_param(asoc, asconf,
+ param.addip);
+ /* ADDIP 4.1 A7)
+ * If an error response is received for a TLV parameter,
+ * all TLVs with no response before the failed TLV are
+ * considered successful if not reported. All TLVs after
+ * the failed response are considered unsuccessful unless
+ * a specific success indication is present for the parameter.
+ */
+ if (err_code != SCTP_ERROR_NO_ERROR)
+ all_param_pass = false;
+ if (!all_param_pass)
+ sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
+ err_code, param.addip);
+
+ /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
+ * an IP address sends an 'Out of Resource' in its response, it
+ * MUST also fail any subsequent add or delete requests bundled
+ * in the ASCONF.
+ */
+ if (err_code == SCTP_ERROR_RSRC_LOW)
+ goto done;
+ }
+done:
+ asoc->peer.addip_serial++;
+
+ /* If we are sending a new ASCONF_ACK hold a reference to it in assoc
+ * after freeing the reference to old asconf ack if any.
+ */
+ if (asconf_ack) {
+ sctp_chunk_hold(asconf_ack);
+ list_add_tail(&asconf_ack->transmitted_list,
+ &asoc->asconf_ack_list);
+ }
+
+ return asconf_ack;
+}
+
+/* Process a asconf parameter that is successfully acked. */
+static void sctp_asconf_param_success(struct sctp_association *asoc,
+ struct sctp_addip_param *asconf_param)
+{
+ struct sctp_bind_addr *bp = &asoc->base.bind_addr;
+ union sctp_addr_param *addr_param;
+ struct sctp_sockaddr_entry *saddr;
+ struct sctp_transport *transport;
+ union sctp_addr addr;
+ struct sctp_af *af;
+
+ addr_param = (void *)asconf_param + sizeof(*asconf_param);
+
+ /* We have checked the packet before, so we do not check again. */
+ af = sctp_get_af_specific(param_type2af(addr_param->p.type));
+ if (!af->from_addr_param(&addr, addr_param, htons(bp->port), 0))
+ return;
+
+ switch (asconf_param->param_hdr.type) {
+ case SCTP_PARAM_ADD_IP:
+ /* This is always done in BH context with a socket lock
+ * held, so the list can not change.
+ */
+ local_bh_disable();
+ list_for_each_entry(saddr, &bp->address_list, list) {
+ if (sctp_cmp_addr_exact(&saddr->a, &addr))
+ saddr->state = SCTP_ADDR_SRC;
+ }
+ local_bh_enable();
+ list_for_each_entry(transport, &asoc->peer.transport_addr_list,
+ transports) {
+ sctp_transport_dst_release(transport);
+ }
+ break;
+ case SCTP_PARAM_DEL_IP:
+ local_bh_disable();
+ sctp_del_bind_addr(bp, &addr);
+ if (asoc->asconf_addr_del_pending != NULL &&
+ sctp_cmp_addr_exact(asoc->asconf_addr_del_pending, &addr)) {
+ kfree(asoc->asconf_addr_del_pending);
+ asoc->asconf_addr_del_pending = NULL;
+ }
+ local_bh_enable();
+ list_for_each_entry(transport, &asoc->peer.transport_addr_list,
+ transports) {
+ sctp_transport_dst_release(transport);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk
+ * for the given asconf parameter. If there is no response for this parameter,
+ * return the error code based on the third argument 'no_err'.
+ * ADDIP 4.1
+ * A7) If an error response is received for a TLV parameter, all TLVs with no
+ * response before the failed TLV are considered successful if not reported.
+ * All TLVs after the failed response are considered unsuccessful unless a
+ * specific success indication is present for the parameter.
+ */
+static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
+ struct sctp_addip_param *asconf_param,
+ int no_err)
+{
+ struct sctp_addip_param *asconf_ack_param;
+ struct sctp_errhdr *err_param;
+ int asconf_ack_len;
+ __be16 err_code;
+ int length;
+
+ if (no_err)
+ err_code = SCTP_ERROR_NO_ERROR;
+ else
+ err_code = SCTP_ERROR_REQ_REFUSED;
+
+ asconf_ack_len = ntohs(asconf_ack->chunk_hdr->length) -
+ sizeof(struct sctp_chunkhdr);
+
+ /* Skip the addiphdr from the asconf_ack chunk and store a pointer to
+ * the first asconf_ack parameter.
+ */
+ length = sizeof(struct sctp_addiphdr);
+ asconf_ack_param = (struct sctp_addip_param *)(asconf_ack->skb->data +
+ length);
+ asconf_ack_len -= length;
+
+ while (asconf_ack_len > 0) {
+ if (asconf_ack_param->crr_id == asconf_param->crr_id) {
+ switch (asconf_ack_param->param_hdr.type) {
+ case SCTP_PARAM_SUCCESS_REPORT:
+ return SCTP_ERROR_NO_ERROR;
+ case SCTP_PARAM_ERR_CAUSE:
+ length = sizeof(*asconf_ack_param);
+ err_param = (void *)asconf_ack_param + length;
+ asconf_ack_len -= length;
+ if (asconf_ack_len > 0)
+ return err_param->cause;
+ else
+ return SCTP_ERROR_INV_PARAM;
+ break;
+ default:
+ return SCTP_ERROR_INV_PARAM;
+ }
+ }
+
+ length = ntohs(asconf_ack_param->param_hdr.length);
+ asconf_ack_param = (void *)asconf_ack_param + length;
+ asconf_ack_len -= length;
+ }
+
+ return err_code;
+}
+
+/* Process an incoming ASCONF_ACK chunk against the cached last ASCONF chunk. */
+int sctp_process_asconf_ack(struct sctp_association *asoc,
+ struct sctp_chunk *asconf_ack)
+{
+ struct sctp_chunk *asconf = asoc->addip_last_asconf;
+ struct sctp_addip_param *asconf_param;
+ __be16 err_code = SCTP_ERROR_NO_ERROR;
+ union sctp_addr_param *addr_param;
+ int asconf_len = asconf->skb->len;
+ int all_param_pass = 0;
+ int length = 0;
+ int no_err = 1;
+ int retval = 0;
+
+ /* Skip the chunkhdr and addiphdr from the last asconf sent and store
+ * a pointer to address parameter.
+ */
+ length = sizeof(struct sctp_addip_chunk);
+ addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
+ asconf_len -= length;
+
+ /* Skip the address parameter in the last asconf sent and store a
+ * pointer to the first asconf parameter.
+ */
+ length = ntohs(addr_param->p.length);
+ asconf_param = (void *)addr_param + length;
+ asconf_len -= length;
+
+ /* ADDIP 4.1
+ * A8) If there is no response(s) to specific TLV parameter(s), and no
+ * failures are indicated, then all request(s) are considered
+ * successful.
+ */
+ if (asconf_ack->skb->len == sizeof(struct sctp_addiphdr))
+ all_param_pass = 1;
+
+ /* Process the TLVs contained in the last sent ASCONF chunk. */
+ while (asconf_len > 0) {
+ if (all_param_pass)
+ err_code = SCTP_ERROR_NO_ERROR;
+ else {
+ err_code = sctp_get_asconf_response(asconf_ack,
+ asconf_param,
+ no_err);
+ if (no_err && (SCTP_ERROR_NO_ERROR != err_code))
+ no_err = 0;
+ }
+
+ switch (err_code) {
+ case SCTP_ERROR_NO_ERROR:
+ sctp_asconf_param_success(asoc, asconf_param);
+ break;
+
+ case SCTP_ERROR_RSRC_LOW:
+ retval = 1;
+ break;
+
+ case SCTP_ERROR_UNKNOWN_PARAM:
+ /* Disable sending this type of asconf parameter in
+ * future.
+ */
+ asoc->peer.addip_disabled_mask |=
+ asconf_param->param_hdr.type;
+ break;
+
+ case SCTP_ERROR_REQ_REFUSED:
+ case SCTP_ERROR_DEL_LAST_IP:
+ case SCTP_ERROR_DEL_SRC_IP:
+ default:
+ break;
+ }
+
+ /* Skip the processed asconf parameter and move to the next
+ * one.
+ */
+ length = ntohs(asconf_param->param_hdr.length);
+ asconf_param = (void *)asconf_param + length;
+ asconf_len -= length;
+ }
+
+ if (no_err && asoc->src_out_of_asoc_ok) {
+ asoc->src_out_of_asoc_ok = 0;
+ sctp_transport_immediate_rtx(asoc->peer.primary_path);
+ }
+
+ /* Free the cached last sent asconf chunk. */
+ list_del_init(&asconf->transmitted_list);
+ sctp_chunk_free(asconf);
+ asoc->addip_last_asconf = NULL;
+
+ return retval;
+}
+
+/* Make a FWD TSN chunk. */
+struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
+ __u32 new_cum_tsn, size_t nstreams,
+ struct sctp_fwdtsn_skip *skiplist)
+{
+ struct sctp_chunk *retval = NULL;
+ struct sctp_fwdtsn_hdr ftsn_hdr;
+ struct sctp_fwdtsn_skip skip;
+ size_t hint;
+ int i;
+
+ hint = (nstreams + 1) * sizeof(__u32);
+
+ retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint, GFP_ATOMIC);
+
+ if (!retval)
+ return NULL;
+
+ ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn);
+ retval->subh.fwdtsn_hdr =
+ sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr);
+
+ for (i = 0; i < nstreams; i++) {
+ skip.stream = skiplist[i].stream;
+ skip.ssn = skiplist[i].ssn;
+ sctp_addto_chunk(retval, sizeof(skip), &skip);
+ }
+
+ return retval;
+}
+
+struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
+ __u32 new_cum_tsn, size_t nstreams,
+ struct sctp_ifwdtsn_skip *skiplist)
+{
+ struct sctp_chunk *retval = NULL;
+ struct sctp_ifwdtsn_hdr ftsn_hdr;
+ size_t hint;
+
+ hint = (nstreams + 1) * sizeof(__u32);
+
+ retval = sctp_make_control(asoc, SCTP_CID_I_FWD_TSN, 0, hint,
+ GFP_ATOMIC);
+ if (!retval)
+ return NULL;
+
+ ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn);
+ retval->subh.ifwdtsn_hdr =
+ sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr);
+
+ sctp_addto_chunk(retval, nstreams * sizeof(skiplist[0]), skiplist);
+
+ return retval;
+}
+
+/* RE-CONFIG 3.1 (RE-CONFIG chunk)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 130 | Chunk Flags | Chunk Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ \
+ * / Re-configuration Parameter /
+ * \ \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ \
+ * / Re-configuration Parameter (optional) /
+ * \ \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+static struct sctp_chunk *sctp_make_reconf(const struct sctp_association *asoc,
+ int length)
+{
+ struct sctp_reconf_chunk *reconf;
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_control(asoc, SCTP_CID_RECONF, 0, length,
+ GFP_ATOMIC);
+ if (!retval)
+ return NULL;
+
+ reconf = (struct sctp_reconf_chunk *)retval->chunk_hdr;
+ retval->param_hdr.v = reconf->params;
+
+ return retval;
+}
+
+/* RE-CONFIG 4.1 (STREAM OUT RESET)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Parameter Type = 13 | Parameter Length = 16 + 2 * N |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Re-configuration Request Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Re-configuration Response Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Sender's Last Assigned TSN |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Stream Number 1 (optional) | Stream Number 2 (optional) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * / ...... /
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Stream Number N-1 (optional) | Stream Number N (optional) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * RE-CONFIG 4.2 (STREAM IN RESET)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Parameter Type = 14 | Parameter Length = 8 + 2 * N |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Re-configuration Request Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Stream Number 1 (optional) | Stream Number 2 (optional) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * / ...... /
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Stream Number N-1 (optional) | Stream Number N (optional) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct sctp_chunk *sctp_make_strreset_req(
+ const struct sctp_association *asoc,
+ __u16 stream_num, __be16 *stream_list,
+ bool out, bool in)
+{
+ __u16 stream_len = stream_num * sizeof(__u16);
+ struct sctp_strreset_outreq outreq;
+ struct sctp_strreset_inreq inreq;
+ struct sctp_chunk *retval;
+ __u16 outlen, inlen;
+
+ outlen = (sizeof(outreq) + stream_len) * out;
+ inlen = (sizeof(inreq) + stream_len) * in;
+
+ retval = sctp_make_reconf(asoc, SCTP_PAD4(outlen) + SCTP_PAD4(inlen));
+ if (!retval)
+ return NULL;
+
+ if (outlen) {
+ outreq.param_hdr.type = SCTP_PARAM_RESET_OUT_REQUEST;
+ outreq.param_hdr.length = htons(outlen);
+ outreq.request_seq = htonl(asoc->strreset_outseq);
+ outreq.response_seq = htonl(asoc->strreset_inseq - 1);
+ outreq.send_reset_at_tsn = htonl(asoc->next_tsn - 1);
+
+ sctp_addto_chunk(retval, sizeof(outreq), &outreq);
+
+ if (stream_len)
+ sctp_addto_chunk(retval, stream_len, stream_list);
+ }
+
+ if (inlen) {
+ inreq.param_hdr.type = SCTP_PARAM_RESET_IN_REQUEST;
+ inreq.param_hdr.length = htons(inlen);
+ inreq.request_seq = htonl(asoc->strreset_outseq + out);
+
+ sctp_addto_chunk(retval, sizeof(inreq), &inreq);
+
+ if (stream_len)
+ sctp_addto_chunk(retval, stream_len, stream_list);
+ }
+
+ return retval;
+}
+
+/* RE-CONFIG 4.3 (SSN/TSN RESET ALL)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Parameter Type = 15 | Parameter Length = 8 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Re-configuration Request Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct sctp_chunk *sctp_make_strreset_tsnreq(
+ const struct sctp_association *asoc)
+{
+ struct sctp_strreset_tsnreq tsnreq;
+ __u16 length = sizeof(tsnreq);
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_reconf(asoc, length);
+ if (!retval)
+ return NULL;
+
+ tsnreq.param_hdr.type = SCTP_PARAM_RESET_TSN_REQUEST;
+ tsnreq.param_hdr.length = htons(length);
+ tsnreq.request_seq = htonl(asoc->strreset_outseq);
+
+ sctp_addto_chunk(retval, sizeof(tsnreq), &tsnreq);
+
+ return retval;
+}
+
+/* RE-CONFIG 4.5/4.6 (ADD STREAM)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Parameter Type = 17 | Parameter Length = 12 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Re-configuration Request Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Number of new streams | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct sctp_chunk *sctp_make_strreset_addstrm(
+ const struct sctp_association *asoc,
+ __u16 out, __u16 in)
+{
+ struct sctp_strreset_addstrm addstrm;
+ __u16 size = sizeof(addstrm);
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_reconf(asoc, (!!out + !!in) * size);
+ if (!retval)
+ return NULL;
+
+ if (out) {
+ addstrm.param_hdr.type = SCTP_PARAM_RESET_ADD_OUT_STREAMS;
+ addstrm.param_hdr.length = htons(size);
+ addstrm.number_of_streams = htons(out);
+ addstrm.request_seq = htonl(asoc->strreset_outseq);
+ addstrm.reserved = 0;
+
+ sctp_addto_chunk(retval, size, &addstrm);
+ }
+
+ if (in) {
+ addstrm.param_hdr.type = SCTP_PARAM_RESET_ADD_IN_STREAMS;
+ addstrm.param_hdr.length = htons(size);
+ addstrm.number_of_streams = htons(in);
+ addstrm.request_seq = htonl(asoc->strreset_outseq + !!out);
+ addstrm.reserved = 0;
+
+ sctp_addto_chunk(retval, size, &addstrm);
+ }
+
+ return retval;
+}
+
+/* RE-CONFIG 4.4 (RESP)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Parameter Type = 16 | Parameter Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Re-configuration Response Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Result |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct sctp_chunk *sctp_make_strreset_resp(const struct sctp_association *asoc,
+ __u32 result, __u32 sn)
+{
+ struct sctp_strreset_resp resp;
+ __u16 length = sizeof(resp);
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_reconf(asoc, length);
+ if (!retval)
+ return NULL;
+
+ resp.param_hdr.type = SCTP_PARAM_RESET_RESPONSE;
+ resp.param_hdr.length = htons(length);
+ resp.response_seq = htonl(sn);
+ resp.result = htonl(result);
+
+ sctp_addto_chunk(retval, sizeof(resp), &resp);
+
+ return retval;
+}
+
+/* RE-CONFIG 4.4 OPTIONAL (TSNRESP)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Parameter Type = 16 | Parameter Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Re-configuration Response Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Result |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Sender's Next TSN (optional) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Receiver's Next TSN (optional) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct sctp_chunk *sctp_make_strreset_tsnresp(struct sctp_association *asoc,
+ __u32 result, __u32 sn,
+ __u32 sender_tsn,
+ __u32 receiver_tsn)
+{
+ struct sctp_strreset_resptsn tsnresp;
+ __u16 length = sizeof(tsnresp);
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_reconf(asoc, length);
+ if (!retval)
+ return NULL;
+
+ tsnresp.param_hdr.type = SCTP_PARAM_RESET_RESPONSE;
+ tsnresp.param_hdr.length = htons(length);
+
+ tsnresp.response_seq = htonl(sn);
+ tsnresp.result = htonl(result);
+ tsnresp.senders_next_tsn = htonl(sender_tsn);
+ tsnresp.receivers_next_tsn = htonl(receiver_tsn);
+
+ sctp_addto_chunk(retval, sizeof(tsnresp), &tsnresp);
+
+ return retval;
+}
+
+bool sctp_verify_reconf(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ struct sctp_paramhdr **errp)
+{
+ struct sctp_reconf_chunk *hdr;
+ union sctp_params param;
+ __be16 last = 0;
+ __u16 cnt = 0;
+
+ hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
+ sctp_walk_params(param, hdr, params) {
+ __u16 length = ntohs(param.p->length);
+
+ *errp = param.p;
+ if (cnt++ > 2)
+ return false;
+ switch (param.p->type) {
+ case SCTP_PARAM_RESET_OUT_REQUEST:
+ if (length < sizeof(struct sctp_strreset_outreq) ||
+ (last && last != SCTP_PARAM_RESET_RESPONSE &&
+ last != SCTP_PARAM_RESET_IN_REQUEST))
+ return false;
+ break;
+ case SCTP_PARAM_RESET_IN_REQUEST:
+ if (length < sizeof(struct sctp_strreset_inreq) ||
+ (last && last != SCTP_PARAM_RESET_OUT_REQUEST))
+ return false;
+ break;
+ case SCTP_PARAM_RESET_RESPONSE:
+ if ((length != sizeof(struct sctp_strreset_resp) &&
+ length != sizeof(struct sctp_strreset_resptsn)) ||
+ (last && last != SCTP_PARAM_RESET_RESPONSE &&
+ last != SCTP_PARAM_RESET_OUT_REQUEST))
+ return false;
+ break;
+ case SCTP_PARAM_RESET_TSN_REQUEST:
+ if (length !=
+ sizeof(struct sctp_strreset_tsnreq) || last)
+ return false;
+ break;
+ case SCTP_PARAM_RESET_ADD_IN_STREAMS:
+ if (length != sizeof(struct sctp_strreset_addstrm) ||
+ (last && last != SCTP_PARAM_RESET_ADD_OUT_STREAMS))
+ return false;
+ break;
+ case SCTP_PARAM_RESET_ADD_OUT_STREAMS:
+ if (length != sizeof(struct sctp_strreset_addstrm) ||
+ (last && last != SCTP_PARAM_RESET_ADD_IN_STREAMS))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ last = param.p->type;
+ }
+
+ return true;
+}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
new file mode 100644
index 000000000..970c6a486
--- /dev/null
+++ b/net/sctp/sm_sideeffect.c
@@ -0,0 +1,1825 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions work with the state functions in sctp_sm_statefuns.c
+ * to implement that state operations. These functions implement the
+ * steps which require modifying existing data structures.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@austin.ibm.com>
+ * Hui Huang <hui.huang@nokia.com>
+ * Dajiang Zhang <dajiang.zhang@nokia.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/ip.h>
+#include <linux/gfp.h>
+#include <net/sock.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/stream_sched.h>
+
+static int sctp_cmd_interpreter(enum sctp_event_type event_type,
+ union sctp_subtype subtype,
+ enum sctp_state state,
+ struct sctp_endpoint *ep,
+ struct sctp_association *asoc,
+ void *event_arg,
+ enum sctp_disposition status,
+ struct sctp_cmd_seq *commands,
+ gfp_t gfp);
+static int sctp_side_effects(enum sctp_event_type event_type,
+ union sctp_subtype subtype,
+ enum sctp_state state,
+ struct sctp_endpoint *ep,
+ struct sctp_association **asoc,
+ void *event_arg,
+ enum sctp_disposition status,
+ struct sctp_cmd_seq *commands,
+ gfp_t gfp);
+
+/********************************************************************
+ * Helper functions
+ ********************************************************************/
+
+/* A helper function for delayed processing of INET ECN CE bit. */
+static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
+ __u32 lowest_tsn)
+{
+ /* Save the TSN away for comparison when we receive CWR */
+
+ asoc->last_ecne_tsn = lowest_tsn;
+ asoc->need_ecne = 1;
+}
+
+/* Helper function for delayed processing of SCTP ECNE chunk. */
+/* RFC 2960 Appendix A
+ *
+ * RFC 2481 details a specific bit for a sender to send in
+ * the header of its next outbound TCP segment to indicate to
+ * its peer that it has reduced its congestion window. This
+ * is termed the CWR bit. For SCTP the same indication is made
+ * by including the CWR chunk. This chunk contains one data
+ * element, i.e. the TSN number that was sent in the ECNE chunk.
+ * This element represents the lowest TSN number in the datagram
+ * that was originally marked with the CE bit.
+ */
+static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc,
+ __u32 lowest_tsn,
+ struct sctp_chunk *chunk)
+{
+ struct sctp_chunk *repl;
+
+ /* Our previously transmitted packet ran into some congestion
+ * so we should take action by reducing cwnd and ssthresh
+ * and then ACK our peer that we we've done so by
+ * sending a CWR.
+ */
+
+ /* First, try to determine if we want to actually lower
+ * our cwnd variables. Only lower them if the ECNE looks more
+ * recent than the last response.
+ */
+ if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
+ struct sctp_transport *transport;
+
+ /* Find which transport's congestion variables
+ * need to be adjusted.
+ */
+ transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
+
+ /* Update the congestion variables. */
+ if (transport)
+ sctp_transport_lower_cwnd(transport,
+ SCTP_LOWER_CWND_ECNE);
+ asoc->last_cwr_tsn = lowest_tsn;
+ }
+
+ /* Always try to quiet the other end. In case of lost CWR,
+ * resend last_cwr_tsn.
+ */
+ repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
+
+ /* If we run out of memory, it will look like a lost CWR. We'll
+ * get back in sync eventually.
+ */
+ return repl;
+}
+
+/* Helper function to do delayed processing of ECN CWR chunk. */
+static void sctp_do_ecn_cwr_work(struct sctp_association *asoc,
+ __u32 lowest_tsn)
+{
+ /* Turn off ECNE getting auto-prepended to every outgoing
+ * packet
+ */
+ asoc->need_ecne = 0;
+}
+
+/* Generate SACK if necessary. We call this at the end of a packet. */
+static int sctp_gen_sack(struct sctp_association *asoc, int force,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_transport *trans = asoc->peer.last_data_from;
+ __u32 ctsn, max_tsn_seen;
+ struct sctp_chunk *sack;
+ int error = 0;
+
+ if (force ||
+ (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
+ (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE)))
+ asoc->peer.sack_needed = 1;
+
+ ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
+ max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
+
+ /* From 12.2 Parameters necessary per association (i.e. the TCB):
+ *
+ * Ack State : This flag indicates if the next received packet
+ * : is to be responded to with a SACK. ...
+ * : When DATA chunks are out of order, SACK's
+ * : are not delayed (see Section 6).
+ *
+ * [This is actually not mentioned in Section 6, but we
+ * implement it here anyway. --piggy]
+ */
+ if (max_tsn_seen != ctsn)
+ asoc->peer.sack_needed = 1;
+
+ /* From 6.2 Acknowledgement on Reception of DATA Chunks:
+ *
+ * Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
+ * an acknowledgement SHOULD be generated for at least every
+ * second packet (not every second DATA chunk) received, and
+ * SHOULD be generated within 200 ms of the arrival of any
+ * unacknowledged DATA chunk. ...
+ */
+ if (!asoc->peer.sack_needed) {
+ asoc->peer.sack_cnt++;
+
+ /* Set the SACK delay timeout based on the
+ * SACK delay for the last transport
+ * data was received from, or the default
+ * for the association.
+ */
+ if (trans) {
+ /* We will need a SACK for the next packet. */
+ if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
+ asoc->peer.sack_needed = 1;
+
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
+ trans->sackdelay;
+ } else {
+ /* We will need a SACK for the next packet. */
+ if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
+ asoc->peer.sack_needed = 1;
+
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
+ asoc->sackdelay;
+ }
+
+ /* Restart the SACK timer. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
+ } else {
+ __u32 old_a_rwnd = asoc->a_rwnd;
+
+ asoc->a_rwnd = asoc->rwnd;
+ sack = sctp_make_sack(asoc);
+ if (!sack) {
+ asoc->a_rwnd = old_a_rwnd;
+ goto nomem;
+ }
+
+ asoc->peer.sack_needed = 0;
+ asoc->peer.sack_cnt = 0;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
+
+ /* Stop the SACK timer. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
+ }
+
+ return error;
+nomem:
+ error = -ENOMEM;
+ return error;
+}
+
+/* When the T3-RTX timer expires, it calls this function to create the
+ * relevant state machine event.
+ */
+void sctp_generate_t3_rtx_event(struct timer_list *t)
+{
+ struct sctp_transport *transport =
+ from_timer(transport, t, T3_rtx_timer);
+ struct sctp_association *asoc = transport->asoc;
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
+ int error;
+
+ /* Check whether a task is in the sock. */
+
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ pr_debug("%s: sock is busy\n", __func__);
+
+ /* Try again later. */
+ if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
+ sctp_transport_hold(transport);
+ goto out_unlock;
+ }
+
+ /* Run through the state machine. */
+ error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
+ SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
+ asoc->state,
+ asoc->ep, asoc,
+ transport, GFP_ATOMIC);
+
+ if (error)
+ sk->sk_err = -error;
+
+out_unlock:
+ bh_unlock_sock(sk);
+ sctp_transport_put(transport);
+}
+
+/* This is a sa interface for producing timeout events. It works
+ * for timeouts which use the association as their parameter.
+ */
+static void sctp_generate_timeout_event(struct sctp_association *asoc,
+ enum sctp_event_timeout timeout_type)
+{
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
+ int error = 0;
+
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ pr_debug("%s: sock is busy: timer %d\n", __func__,
+ timeout_type);
+
+ /* Try again later. */
+ if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
+ sctp_association_hold(asoc);
+ goto out_unlock;
+ }
+
+ /* Is this association really dead and just waiting around for
+ * the timer to let go of the reference?
+ */
+ if (asoc->base.dead)
+ goto out_unlock;
+
+ /* Run through the state machine. */
+ error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
+ SCTP_ST_TIMEOUT(timeout_type),
+ asoc->state, asoc->ep, asoc,
+ (void *)timeout_type, GFP_ATOMIC);
+
+ if (error)
+ sk->sk_err = -error;
+
+out_unlock:
+ bh_unlock_sock(sk);
+ sctp_association_put(asoc);
+}
+
+static void sctp_generate_t1_cookie_event(struct timer_list *t)
+{
+ struct sctp_association *asoc =
+ from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_COOKIE]);
+
+ sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE);
+}
+
+static void sctp_generate_t1_init_event(struct timer_list *t)
+{
+ struct sctp_association *asoc =
+ from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_INIT]);
+
+ sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT);
+}
+
+static void sctp_generate_t2_shutdown_event(struct timer_list *t)
+{
+ struct sctp_association *asoc =
+ from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN]);
+
+ sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
+}
+
+static void sctp_generate_t4_rto_event(struct timer_list *t)
+{
+ struct sctp_association *asoc =
+ from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T4_RTO]);
+
+ sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
+}
+
+static void sctp_generate_t5_shutdown_guard_event(struct timer_list *t)
+{
+ struct sctp_association *asoc =
+ from_timer(asoc, t,
+ timers[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]);
+
+ sctp_generate_timeout_event(asoc,
+ SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
+
+} /* sctp_generate_t5_shutdown_guard_event() */
+
+static void sctp_generate_autoclose_event(struct timer_list *t)
+{
+ struct sctp_association *asoc =
+ from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]);
+
+ sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE);
+}
+
+/* Generate a heart beat event. If the sock is busy, reschedule. Make
+ * sure that the transport is still valid.
+ */
+void sctp_generate_heartbeat_event(struct timer_list *t)
+{
+ struct sctp_transport *transport = from_timer(transport, t, hb_timer);
+ struct sctp_association *asoc = transport->asoc;
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
+ u32 elapsed, timeout;
+ int error = 0;
+
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ pr_debug("%s: sock is busy\n", __func__);
+
+ /* Try again later. */
+ if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
+ sctp_transport_hold(transport);
+ goto out_unlock;
+ }
+
+ /* Check if we should still send the heartbeat or reschedule */
+ elapsed = jiffies - transport->last_time_sent;
+ timeout = sctp_transport_timeout(transport);
+ if (elapsed < timeout) {
+ elapsed = timeout - elapsed;
+ if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
+ sctp_transport_hold(transport);
+ goto out_unlock;
+ }
+
+ error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
+ SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
+ asoc->state, asoc->ep, asoc,
+ transport, GFP_ATOMIC);
+
+ if (error)
+ sk->sk_err = -error;
+
+out_unlock:
+ bh_unlock_sock(sk);
+ sctp_transport_put(transport);
+}
+
+/* Handle the timeout of the ICMP protocol unreachable timer. Trigger
+ * the correct state machine transition that will close the association.
+ */
+void sctp_generate_proto_unreach_event(struct timer_list *t)
+{
+ struct sctp_transport *transport =
+ from_timer(transport, t, proto_unreach_timer);
+ struct sctp_association *asoc = transport->asoc;
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
+
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ pr_debug("%s: sock is busy\n", __func__);
+
+ /* Try again later. */
+ if (!mod_timer(&transport->proto_unreach_timer,
+ jiffies + (HZ/20)))
+ sctp_transport_hold(transport);
+ goto out_unlock;
+ }
+
+ /* Is this structure just waiting around for us to actually
+ * get destroyed?
+ */
+ if (asoc->base.dead)
+ goto out_unlock;
+
+ sctp_do_sm(net, SCTP_EVENT_T_OTHER,
+ SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
+ asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
+
+out_unlock:
+ bh_unlock_sock(sk);
+ sctp_transport_put(transport);
+}
+
+ /* Handle the timeout of the RE-CONFIG timer. */
+void sctp_generate_reconf_event(struct timer_list *t)
+{
+ struct sctp_transport *transport =
+ from_timer(transport, t, reconf_timer);
+ struct sctp_association *asoc = transport->asoc;
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
+ int error = 0;
+
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ pr_debug("%s: sock is busy\n", __func__);
+
+ /* Try again later. */
+ if (!mod_timer(&transport->reconf_timer, jiffies + (HZ / 20)))
+ sctp_transport_hold(transport);
+ goto out_unlock;
+ }
+
+ /* This happens when the response arrives after the timer is triggered. */
+ if (!asoc->strreset_chunk)
+ goto out_unlock;
+
+ error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
+ SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF),
+ asoc->state, asoc->ep, asoc,
+ transport, GFP_ATOMIC);
+
+ if (error)
+ sk->sk_err = -error;
+
+out_unlock:
+ bh_unlock_sock(sk);
+ sctp_transport_put(transport);
+}
+
+/* Handle the timeout of the probe timer. */
+void sctp_generate_probe_event(struct timer_list *t)
+{
+ struct sctp_transport *transport = from_timer(transport, t, probe_timer);
+ struct sctp_association *asoc = transport->asoc;
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
+ int error = 0;
+
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ pr_debug("%s: sock is busy\n", __func__);
+
+ /* Try again later. */
+ if (!mod_timer(&transport->probe_timer, jiffies + (HZ / 20)))
+ sctp_transport_hold(transport);
+ goto out_unlock;
+ }
+
+ error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
+ SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_PROBE),
+ asoc->state, asoc->ep, asoc,
+ transport, GFP_ATOMIC);
+
+ if (error)
+ sk->sk_err = -error;
+
+out_unlock:
+ bh_unlock_sock(sk);
+ sctp_transport_put(transport);
+}
+
+/* Inject a SACK Timeout event into the state machine. */
+static void sctp_generate_sack_event(struct timer_list *t)
+{
+ struct sctp_association *asoc =
+ from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_SACK]);
+
+ sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
+}
+
+sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
+ [SCTP_EVENT_TIMEOUT_NONE] = NULL,
+ [SCTP_EVENT_TIMEOUT_T1_COOKIE] = sctp_generate_t1_cookie_event,
+ [SCTP_EVENT_TIMEOUT_T1_INIT] = sctp_generate_t1_init_event,
+ [SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = sctp_generate_t2_shutdown_event,
+ [SCTP_EVENT_TIMEOUT_T3_RTX] = NULL,
+ [SCTP_EVENT_TIMEOUT_T4_RTO] = sctp_generate_t4_rto_event,
+ [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] =
+ sctp_generate_t5_shutdown_guard_event,
+ [SCTP_EVENT_TIMEOUT_HEARTBEAT] = NULL,
+ [SCTP_EVENT_TIMEOUT_RECONF] = NULL,
+ [SCTP_EVENT_TIMEOUT_SACK] = sctp_generate_sack_event,
+ [SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sctp_generate_autoclose_event,
+};
+
+
+/* RFC 2960 8.2 Path Failure Detection
+ *
+ * When its peer endpoint is multi-homed, an endpoint should keep a
+ * error counter for each of the destination transport addresses of the
+ * peer endpoint.
+ *
+ * Each time the T3-rtx timer expires on any address, or when a
+ * HEARTBEAT sent to an idle address is not acknowledged within a RTO,
+ * the error counter of that destination address will be incremented.
+ * When the value in the error counter exceeds the protocol parameter
+ * 'Path.Max.Retrans' of that destination address, the endpoint should
+ * mark the destination transport address as inactive, and a
+ * notification SHOULD be sent to the upper layer.
+ *
+ */
+static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
+ struct sctp_association *asoc,
+ struct sctp_transport *transport,
+ int is_hb)
+{
+ /* The check for association's overall error counter exceeding the
+ * threshold is done in the state function.
+ */
+ /* We are here due to a timer expiration. If the timer was
+ * not a HEARTBEAT, then normal error tracking is done.
+ * If the timer was a heartbeat, we only increment error counts
+ * when we already have an outstanding HEARTBEAT that has not
+ * been acknowledged.
+ * Additionally, some tranport states inhibit error increments.
+ */
+ if (!is_hb) {
+ asoc->overall_error_count++;
+ if (transport->state != SCTP_INACTIVE)
+ transport->error_count++;
+ } else if (transport->hb_sent) {
+ if (transport->state != SCTP_UNCONFIRMED)
+ asoc->overall_error_count++;
+ if (transport->state != SCTP_INACTIVE)
+ transport->error_count++;
+ }
+
+ /* If the transport error count is greater than the pf_retrans
+ * threshold, and less than pathmaxrtx, and if the current state
+ * is SCTP_ACTIVE, then mark this transport as Partially Failed,
+ * see SCTP Quick Failover Draft, section 5.1
+ */
+ if (asoc->base.net->sctp.pf_enable &&
+ transport->state == SCTP_ACTIVE &&
+ transport->error_count < transport->pathmaxrxt &&
+ transport->error_count > transport->pf_retrans) {
+
+ sctp_assoc_control_transport(asoc, transport,
+ SCTP_TRANSPORT_PF,
+ 0);
+
+ /* Update the hb timer to resend a heartbeat every rto */
+ sctp_transport_reset_hb_timer(transport);
+ }
+
+ if (transport->state != SCTP_INACTIVE &&
+ (transport->error_count > transport->pathmaxrxt)) {
+ pr_debug("%s: association:%p transport addr:%pISpc failed\n",
+ __func__, asoc, &transport->ipaddr.sa);
+
+ sctp_assoc_control_transport(asoc, transport,
+ SCTP_TRANSPORT_DOWN,
+ SCTP_FAILED_THRESHOLD);
+ }
+
+ if (transport->error_count > transport->ps_retrans &&
+ asoc->peer.primary_path == transport &&
+ asoc->peer.active_path != transport)
+ sctp_assoc_set_primary(asoc, asoc->peer.active_path);
+
+ /* E2) For the destination address for which the timer
+ * expires, set RTO <- RTO * 2 ("back off the timer"). The
+ * maximum value discussed in rule C7 above (RTO.max) may be
+ * used to provide an upper bound to this doubling operation.
+ *
+ * Special Case: the first HB doesn't trigger exponential backoff.
+ * The first unacknowledged HB triggers it. We do this with a flag
+ * that indicates that we have an outstanding HB.
+ */
+ if (!is_hb || transport->hb_sent) {
+ transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
+ sctp_max_rto(asoc, transport);
+ }
+}
+
+/* Worker routine to handle INIT command failure. */
+static void sctp_cmd_init_failed(struct sctp_cmd_seq *commands,
+ struct sctp_association *asoc,
+ unsigned int error)
+{
+ struct sctp_ulpevent *event;
+
+ event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC,
+ (__u16)error, 0, 0, NULL,
+ GFP_ATOMIC);
+
+ if (event)
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ SCTP_ULPEVENT(event));
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_CLOSED));
+
+ /* SEND_FAILED sent later when cleaning up the association. */
+ asoc->outqueue.error = error;
+ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
+}
+
+/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
+static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
+ struct sctp_association *asoc,
+ enum sctp_event_type event_type,
+ union sctp_subtype subtype,
+ struct sctp_chunk *chunk,
+ unsigned int error)
+{
+ struct sctp_ulpevent *event;
+ struct sctp_chunk *abort;
+
+ /* Cancel any partial delivery in progress. */
+ asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC);
+
+ if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
+ event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
+ (__u16)error, 0, 0, chunk,
+ GFP_ATOMIC);
+ else
+ event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
+ (__u16)error, 0, 0, NULL,
+ GFP_ATOMIC);
+ if (event)
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ SCTP_ULPEVENT(event));
+
+ if (asoc->overall_error_count >= asoc->max_retrans) {
+ abort = sctp_make_violation_max_retrans(asoc, chunk);
+ if (abort)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(abort));
+ }
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_CLOSED));
+
+ /* SEND_FAILED sent later when cleaning up the association. */
+ asoc->outqueue.error = error;
+ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
+}
+
+/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
+ * inside the cookie. In reality, this is only used for INIT-ACK processing
+ * since all other cases use "temporary" associations and can do all
+ * their work in statefuns directly.
+ */
+static int sctp_cmd_process_init(struct sctp_cmd_seq *commands,
+ struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ struct sctp_init_chunk *peer_init,
+ gfp_t gfp)
+{
+ int error;
+
+ /* We only process the init as a sideeffect in a single
+ * case. This is when we process the INIT-ACK. If we
+ * fail during INIT processing (due to malloc problems),
+ * just return the error and stop processing the stack.
+ */
+ if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
+ error = -ENOMEM;
+ else
+ error = 0;
+
+ return error;
+}
+
+/* Helper function to break out starting up of heartbeat timers. */
+static void sctp_cmd_hb_timers_start(struct sctp_cmd_seq *cmds,
+ struct sctp_association *asoc)
+{
+ struct sctp_transport *t;
+
+ /* Start a heartbeat timer for each transport on the association.
+ * hold a reference on the transport to make sure none of
+ * the needed data structures go away.
+ */
+ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
+ sctp_transport_reset_hb_timer(t);
+}
+
+static void sctp_cmd_hb_timers_stop(struct sctp_cmd_seq *cmds,
+ struct sctp_association *asoc)
+{
+ struct sctp_transport *t;
+
+ /* Stop all heartbeat timers. */
+
+ list_for_each_entry(t, &asoc->peer.transport_addr_list,
+ transports) {
+ if (del_timer(&t->hb_timer))
+ sctp_transport_put(t);
+ }
+}
+
+/* Helper function to stop any pending T3-RTX timers */
+static void sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq *cmds,
+ struct sctp_association *asoc)
+{
+ struct sctp_transport *t;
+
+ list_for_each_entry(t, &asoc->peer.transport_addr_list,
+ transports) {
+ if (del_timer(&t->T3_rtx_timer))
+ sctp_transport_put(t);
+ }
+}
+
+
+/* Helper function to handle the reception of an HEARTBEAT ACK. */
+static void sctp_cmd_transport_on(struct sctp_cmd_seq *cmds,
+ struct sctp_association *asoc,
+ struct sctp_transport *t,
+ struct sctp_chunk *chunk)
+{
+ struct sctp_sender_hb_info *hbinfo;
+ int was_unconfirmed = 0;
+
+ /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
+ * HEARTBEAT should clear the error counter of the destination
+ * transport address to which the HEARTBEAT was sent.
+ */
+ t->error_count = 0;
+
+ /*
+ * Although RFC4960 specifies that the overall error count must
+ * be cleared when a HEARTBEAT ACK is received, we make an
+ * exception while in SHUTDOWN PENDING. If the peer keeps its
+ * window shut forever, we may never be able to transmit our
+ * outstanding data and rely on the retransmission limit be reached
+ * to shutdown the association.
+ */
+ if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
+ t->asoc->overall_error_count = 0;
+
+ /* Clear the hb_sent flag to signal that we had a good
+ * acknowledgement.
+ */
+ t->hb_sent = 0;
+
+ /* Mark the destination transport address as active if it is not so
+ * marked.
+ */
+ if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
+ was_unconfirmed = 1;
+ sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
+ SCTP_HEARTBEAT_SUCCESS);
+ }
+
+ if (t->state == SCTP_PF)
+ sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
+ SCTP_HEARTBEAT_SUCCESS);
+
+ /* HB-ACK was received for a the proper HB. Consider this
+ * forward progress.
+ */
+ if (t->dst)
+ sctp_transport_dst_confirm(t);
+
+ /* The receiver of the HEARTBEAT ACK should also perform an
+ * RTT measurement for that destination transport address
+ * using the time value carried in the HEARTBEAT ACK chunk.
+ * If the transport's rto_pending variable has been cleared,
+ * it was most likely due to a retransmit. However, we want
+ * to re-enable it to properly update the rto.
+ */
+ if (t->rto_pending == 0)
+ t->rto_pending = 1;
+
+ hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data;
+ sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
+
+ /* Update the heartbeat timer. */
+ sctp_transport_reset_hb_timer(t);
+
+ if (was_unconfirmed && asoc->peer.transport_count == 1)
+ sctp_transport_immediate_rtx(t);
+}
+
+
+/* Helper function to process the process SACK command. */
+static int sctp_cmd_process_sack(struct sctp_cmd_seq *cmds,
+ struct sctp_association *asoc,
+ struct sctp_chunk *chunk)
+{
+ int err = 0;
+
+ if (sctp_outq_sack(&asoc->outqueue, chunk)) {
+ /* There are no more TSNs awaiting SACK. */
+ err = sctp_do_sm(asoc->base.net, SCTP_EVENT_T_OTHER,
+ SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
+ asoc->state, asoc->ep, asoc, NULL,
+ GFP_ATOMIC);
+ }
+
+ return err;
+}
+
+/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
+ * the transport for a shutdown chunk.
+ */
+static void sctp_cmd_setup_t2(struct sctp_cmd_seq *cmds,
+ struct sctp_association *asoc,
+ struct sctp_chunk *chunk)
+{
+ struct sctp_transport *t;
+
+ if (chunk->transport)
+ t = chunk->transport;
+ else {
+ t = sctp_assoc_choose_alter_transport(asoc,
+ asoc->shutdown_last_sent_to);
+ chunk->transport = t;
+ }
+ asoc->shutdown_last_sent_to = t;
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
+}
+
+/* Helper function to change the state of an association. */
+static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
+ struct sctp_association *asoc,
+ enum sctp_state state)
+{
+ struct sock *sk = asoc->base.sk;
+
+ asoc->state = state;
+
+ pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]);
+
+ if (sctp_style(sk, TCP)) {
+ /* Change the sk->sk_state of a TCP-style socket that has
+ * successfully completed a connect() call.
+ */
+ if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
+ inet_sk_set_state(sk, SCTP_SS_ESTABLISHED);
+
+ /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
+ if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
+ sctp_sstate(sk, ESTABLISHED)) {
+ inet_sk_set_state(sk, SCTP_SS_CLOSING);
+ sk->sk_shutdown |= RCV_SHUTDOWN;
+ }
+ }
+
+ if (sctp_state(asoc, COOKIE_WAIT)) {
+ /* Reset init timeouts since they may have been
+ * increased due to timer expirations.
+ */
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
+ asoc->rto_initial;
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
+ asoc->rto_initial;
+ }
+
+ if (sctp_state(asoc, ESTABLISHED)) {
+ kfree(asoc->peer.cookie);
+ asoc->peer.cookie = NULL;
+ }
+
+ if (sctp_state(asoc, ESTABLISHED) ||
+ sctp_state(asoc, CLOSED) ||
+ sctp_state(asoc, SHUTDOWN_RECEIVED)) {
+ /* Wake up any processes waiting in the asoc's wait queue in
+ * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
+ */
+ if (waitqueue_active(&asoc->wait))
+ wake_up_interruptible(&asoc->wait);
+
+ /* Wake up any processes waiting in the sk's sleep queue of
+ * a TCP-style or UDP-style peeled-off socket in
+ * sctp_wait_for_accept() or sctp_wait_for_packet().
+ * For a UDP-style socket, the waiters are woken up by the
+ * notifications.
+ */
+ if (!sctp_style(sk, UDP))
+ sk->sk_state_change(sk);
+ }
+
+ if (sctp_state(asoc, SHUTDOWN_PENDING) &&
+ !sctp_outq_is_empty(&asoc->outqueue))
+ sctp_outq_uncork(&asoc->outqueue, GFP_ATOMIC);
+}
+
+/* Helper function to delete an association. */
+static void sctp_cmd_delete_tcb(struct sctp_cmd_seq *cmds,
+ struct sctp_association *asoc)
+{
+ struct sock *sk = asoc->base.sk;
+
+ /* If it is a non-temporary association belonging to a TCP-style
+ * listening socket that is not closed, do not free it so that accept()
+ * can pick it up later.
+ */
+ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
+ (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
+ return;
+
+ sctp_association_free(asoc);
+}
+
+/*
+ * ADDIP Section 4.1 ASCONF Chunk Procedures
+ * A4) Start a T-4 RTO timer, using the RTO value of the selected
+ * destination address (we use active path instead of primary path just
+ * because primary path may be inactive.
+ */
+static void sctp_cmd_setup_t4(struct sctp_cmd_seq *cmds,
+ struct sctp_association *asoc,
+ struct sctp_chunk *chunk)
+{
+ struct sctp_transport *t;
+
+ t = sctp_assoc_choose_alter_transport(asoc, chunk->transport);
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
+ chunk->transport = t;
+}
+
+/* Process an incoming Operation Error Chunk. */
+static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
+ struct sctp_association *asoc,
+ struct sctp_chunk *chunk)
+{
+ struct sctp_errhdr *err_hdr;
+ struct sctp_ulpevent *ev;
+
+ while (chunk->chunk_end > chunk->skb->data) {
+ err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
+
+ ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
+ GFP_ATOMIC);
+ if (!ev)
+ return;
+
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
+
+ switch (err_hdr->cause) {
+ case SCTP_ERROR_UNKNOWN_CHUNK:
+ {
+ struct sctp_chunkhdr *unk_chunk_hdr;
+
+ unk_chunk_hdr = (struct sctp_chunkhdr *)
+ err_hdr->variable;
+ switch (unk_chunk_hdr->type) {
+ /* ADDIP 4.1 A9) If the peer responds to an ASCONF with
+ * an ERROR chunk reporting that it did not recognized
+ * the ASCONF chunk type, the sender of the ASCONF MUST
+ * NOT send any further ASCONF chunks and MUST stop its
+ * T-4 timer.
+ */
+ case SCTP_CID_ASCONF:
+ if (asoc->peer.asconf_capable == 0)
+ break;
+
+ asoc->peer.asconf_capable = 0;
+ sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+/* Helper function to remove the association non-primary peer
+ * transports.
+ */
+static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
+{
+ struct sctp_transport *t;
+ struct list_head *temp;
+ struct list_head *pos;
+
+ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
+ t = list_entry(pos, struct sctp_transport, transports);
+ if (!sctp_cmp_addr_exact(&t->ipaddr,
+ &asoc->peer.primary_addr)) {
+ sctp_assoc_rm_peer(asoc, t);
+ }
+ }
+}
+
+/* Helper function to set sk_err on a 1-1 style socket. */
+static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
+{
+ struct sock *sk = asoc->base.sk;
+
+ if (!sctp_style(sk, UDP))
+ sk->sk_err = error;
+}
+
+/* Helper function to generate an association change event */
+static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands,
+ struct sctp_association *asoc,
+ u8 state)
+{
+ struct sctp_ulpevent *ev;
+
+ ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0,
+ asoc->c.sinit_num_ostreams,
+ asoc->c.sinit_max_instreams,
+ NULL, GFP_ATOMIC);
+ if (ev)
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
+}
+
+static void sctp_cmd_peer_no_auth(struct sctp_cmd_seq *commands,
+ struct sctp_association *asoc)
+{
+ struct sctp_ulpevent *ev;
+
+ ev = sctp_ulpevent_make_authkey(asoc, 0, SCTP_AUTH_NO_AUTH, GFP_ATOMIC);
+ if (ev)
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
+}
+
+/* Helper function to generate an adaptation indication event */
+static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands,
+ struct sctp_association *asoc)
+{
+ struct sctp_ulpevent *ev;
+
+ ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
+
+ if (ev)
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
+}
+
+
+static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
+ enum sctp_event_timeout timer,
+ char *name)
+{
+ struct sctp_transport *t;
+
+ t = asoc->init_last_sent_to;
+ asoc->init_err_counter++;
+
+ if (t->init_sent_count > (asoc->init_cycle + 1)) {
+ asoc->timeouts[timer] *= 2;
+ if (asoc->timeouts[timer] > asoc->max_init_timeo) {
+ asoc->timeouts[timer] = asoc->max_init_timeo;
+ }
+ asoc->init_cycle++;
+
+ pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d"
+ " cycle:%d timeout:%ld\n", __func__, name,
+ asoc->init_err_counter, asoc->init_cycle,
+ asoc->timeouts[timer]);
+ }
+
+}
+
+/* Send the whole message, chunk by chunk, to the outqueue.
+ * This way the whole message is queued up and bundling if
+ * encouraged for small fragments.
+ */
+static void sctp_cmd_send_msg(struct sctp_association *asoc,
+ struct sctp_datamsg *msg, gfp_t gfp)
+{
+ struct sctp_chunk *chunk;
+
+ list_for_each_entry(chunk, &msg->chunks, frag_list)
+ sctp_outq_tail(&asoc->outqueue, chunk, gfp);
+
+ asoc->outqueue.sched->enqueue(&asoc->outqueue, msg);
+}
+
+
+/* These three macros allow us to pull the debugging code out of the
+ * main flow of sctp_do_sm() to keep attention focused on the real
+ * functionality there.
+ */
+#define debug_pre_sfn() \
+ pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \
+ ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \
+ asoc, sctp_state_tbl[state], state_fn->name)
+
+#define debug_post_sfn() \
+ pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \
+ sctp_status_tbl[status])
+
+#define debug_post_sfx() \
+ pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \
+ asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
+ sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED])
+
+/*
+ * This is the master state machine processing function.
+ *
+ * If you want to understand all of lksctp, this is a
+ * good place to start.
+ */
+int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
+ union sctp_subtype subtype, enum sctp_state state,
+ struct sctp_endpoint *ep, struct sctp_association *asoc,
+ void *event_arg, gfp_t gfp)
+{
+ typedef const char *(printfn_t)(union sctp_subtype);
+ static printfn_t *table[] = {
+ NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
+ };
+ printfn_t *debug_fn __attribute__ ((unused)) = table[event_type];
+ const struct sctp_sm_table_entry *state_fn;
+ struct sctp_cmd_seq commands;
+ enum sctp_disposition status;
+ int error = 0;
+
+ /* Look up the state function, run it, and then process the
+ * side effects. These three steps are the heart of lksctp.
+ */
+ state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
+
+ sctp_init_cmd_seq(&commands);
+
+ debug_pre_sfn();
+ status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands);
+ debug_post_sfn();
+
+ error = sctp_side_effects(event_type, subtype, state,
+ ep, &asoc, event_arg, status,
+ &commands, gfp);
+ debug_post_sfx();
+
+ return error;
+}
+
+/*****************************************************************
+ * This the master state function side effect processing function.
+ *****************************************************************/
+static int sctp_side_effects(enum sctp_event_type event_type,
+ union sctp_subtype subtype,
+ enum sctp_state state,
+ struct sctp_endpoint *ep,
+ struct sctp_association **asoc,
+ void *event_arg,
+ enum sctp_disposition status,
+ struct sctp_cmd_seq *commands,
+ gfp_t gfp)
+{
+ int error;
+
+ /* FIXME - Most of the dispositions left today would be categorized
+ * as "exceptional" dispositions. For those dispositions, it
+ * may not be proper to run through any of the commands at all.
+ * For example, the command interpreter might be run only with
+ * disposition SCTP_DISPOSITION_CONSUME.
+ */
+ if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
+ ep, *asoc,
+ event_arg, status,
+ commands, gfp)))
+ goto bail;
+
+ switch (status) {
+ case SCTP_DISPOSITION_DISCARD:
+ pr_debug("%s: ignored sctp protocol event - state:%d, "
+ "event_type:%d, event_id:%d\n", __func__, state,
+ event_type, subtype.chunk);
+ break;
+
+ case SCTP_DISPOSITION_NOMEM:
+ /* We ran out of memory, so we need to discard this
+ * packet.
+ */
+ /* BUG--we should now recover some memory, probably by
+ * reneging...
+ */
+ error = -ENOMEM;
+ break;
+
+ case SCTP_DISPOSITION_DELETE_TCB:
+ case SCTP_DISPOSITION_ABORT:
+ /* This should now be a command. */
+ *asoc = NULL;
+ break;
+
+ case SCTP_DISPOSITION_CONSUME:
+ /*
+ * We should no longer have much work to do here as the
+ * real work has been done as explicit commands above.
+ */
+ break;
+
+ case SCTP_DISPOSITION_VIOLATION:
+ net_err_ratelimited("protocol violation state %d chunkid %d\n",
+ state, subtype.chunk);
+ break;
+
+ case SCTP_DISPOSITION_NOT_IMPL:
+ pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
+ state, event_type, subtype.chunk);
+ break;
+
+ case SCTP_DISPOSITION_BUG:
+ pr_err("bug in state %d, event_type %d, event_id %d\n",
+ state, event_type, subtype.chunk);
+ BUG();
+ break;
+
+ default:
+ pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
+ status, state, event_type, subtype.chunk);
+ error = status;
+ if (error >= 0)
+ error = -EINVAL;
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+bail:
+ return error;
+}
+
+/********************************************************************
+ * 2nd Level Abstractions
+ ********************************************************************/
+
+/* This is the side-effect interpreter. */
+static int sctp_cmd_interpreter(enum sctp_event_type event_type,
+ union sctp_subtype subtype,
+ enum sctp_state state,
+ struct sctp_endpoint *ep,
+ struct sctp_association *asoc,
+ void *event_arg,
+ enum sctp_disposition status,
+ struct sctp_cmd_seq *commands,
+ gfp_t gfp)
+{
+ struct sctp_sock *sp = sctp_sk(ep->base.sk);
+ struct sctp_chunk *chunk = NULL, *new_obj;
+ struct sctp_packet *packet;
+ struct sctp_sackhdr sackh;
+ struct timer_list *timer;
+ struct sctp_transport *t;
+ unsigned long timeout;
+ struct sctp_cmd *cmd;
+ int local_cork = 0;
+ int error = 0;
+ int force;
+
+ if (SCTP_EVENT_T_TIMEOUT != event_type)
+ chunk = event_arg;
+
+ /* Note: This whole file is a huge candidate for rework.
+ * For example, each command could either have its own handler, so
+ * the loop would look like:
+ * while (cmds)
+ * cmd->handle(x, y, z)
+ * --jgrimm
+ */
+ while (NULL != (cmd = sctp_next_cmd(commands))) {
+ switch (cmd->verb) {
+ case SCTP_CMD_NOP:
+ /* Do nothing. */
+ break;
+
+ case SCTP_CMD_NEW_ASOC:
+ /* Register a new association. */
+ if (local_cork) {
+ sctp_outq_uncork(&asoc->outqueue, gfp);
+ local_cork = 0;
+ }
+
+ /* Register with the endpoint. */
+ asoc = cmd->obj.asoc;
+ BUG_ON(asoc->peer.primary_path == NULL);
+ sctp_endpoint_add_asoc(ep, asoc);
+ break;
+
+ case SCTP_CMD_PURGE_OUTQUEUE:
+ sctp_outq_teardown(&asoc->outqueue);
+ break;
+
+ case SCTP_CMD_DELETE_TCB:
+ if (local_cork) {
+ sctp_outq_uncork(&asoc->outqueue, gfp);
+ local_cork = 0;
+ }
+ /* Delete the current association. */
+ sctp_cmd_delete_tcb(commands, asoc);
+ asoc = NULL;
+ break;
+
+ case SCTP_CMD_NEW_STATE:
+ /* Enter a new state. */
+ sctp_cmd_new_state(commands, asoc, cmd->obj.state);
+ break;
+
+ case SCTP_CMD_REPORT_TSN:
+ /* Record the arrival of a TSN. */
+ error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
+ cmd->obj.u32, NULL);
+ break;
+
+ case SCTP_CMD_REPORT_FWDTSN:
+ asoc->stream.si->report_ftsn(&asoc->ulpq, cmd->obj.u32);
+ break;
+
+ case SCTP_CMD_PROCESS_FWDTSN:
+ asoc->stream.si->handle_ftsn(&asoc->ulpq,
+ cmd->obj.chunk);
+ break;
+
+ case SCTP_CMD_GEN_SACK:
+ /* Generate a Selective ACK.
+ * The argument tells us whether to just count
+ * the packet and MAYBE generate a SACK, or
+ * force a SACK out.
+ */
+ force = cmd->obj.i32;
+ error = sctp_gen_sack(asoc, force, commands);
+ break;
+
+ case SCTP_CMD_PROCESS_SACK:
+ /* Process an inbound SACK. */
+ error = sctp_cmd_process_sack(commands, asoc,
+ cmd->obj.chunk);
+ break;
+
+ case SCTP_CMD_GEN_INIT_ACK:
+ /* Generate an INIT ACK chunk. */
+ new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
+ 0);
+ if (!new_obj) {
+ error = -ENOMEM;
+ break;
+ }
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(new_obj));
+ break;
+
+ case SCTP_CMD_PEER_INIT:
+ /* Process a unified INIT from the peer.
+ * Note: Only used during INIT-ACK processing. If
+ * there is an error just return to the outter
+ * layer which will bail.
+ */
+ error = sctp_cmd_process_init(commands, asoc, chunk,
+ cmd->obj.init, gfp);
+ break;
+
+ case SCTP_CMD_GEN_COOKIE_ECHO:
+ /* Generate a COOKIE ECHO chunk. */
+ new_obj = sctp_make_cookie_echo(asoc, chunk);
+ if (!new_obj) {
+ if (cmd->obj.chunk)
+ sctp_chunk_free(cmd->obj.chunk);
+ error = -ENOMEM;
+ break;
+ }
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(new_obj));
+
+ /* If there is an ERROR chunk to be sent along with
+ * the COOKIE_ECHO, send it, too.
+ */
+ if (cmd->obj.chunk)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(cmd->obj.chunk));
+
+ if (new_obj->transport) {
+ new_obj->transport->init_sent_count++;
+ asoc->init_last_sent_to = new_obj->transport;
+ }
+
+ /* FIXME - Eventually come up with a cleaner way to
+ * enabling COOKIE-ECHO + DATA bundling during
+ * multihoming stale cookie scenarios, the following
+ * command plays with asoc->peer.retran_path to
+ * avoid the problem of sending the COOKIE-ECHO and
+ * DATA in different paths, which could result
+ * in the association being ABORTed if the DATA chunk
+ * is processed first by the server. Checking the
+ * init error counter simply causes this command
+ * to be executed only during failed attempts of
+ * association establishment.
+ */
+ if ((asoc->peer.retran_path !=
+ asoc->peer.primary_path) &&
+ (asoc->init_err_counter > 0)) {
+ sctp_add_cmd_sf(commands,
+ SCTP_CMD_FORCE_PRIM_RETRAN,
+ SCTP_NULL());
+ }
+
+ break;
+
+ case SCTP_CMD_GEN_SHUTDOWN:
+ /* Generate SHUTDOWN when in SHUTDOWN_SENT state.
+ * Reset error counts.
+ */
+ asoc->overall_error_count = 0;
+
+ /* Generate a SHUTDOWN chunk. */
+ new_obj = sctp_make_shutdown(asoc, chunk);
+ if (!new_obj) {
+ error = -ENOMEM;
+ break;
+ }
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(new_obj));
+ break;
+
+ case SCTP_CMD_CHUNK_ULP:
+ /* Send a chunk to the sockets layer. */
+ pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
+ __func__, cmd->obj.chunk, &asoc->ulpq);
+
+ asoc->stream.si->ulpevent_data(&asoc->ulpq,
+ cmd->obj.chunk,
+ GFP_ATOMIC);
+ break;
+
+ case SCTP_CMD_EVENT_ULP:
+ /* Send a notification to the sockets layer. */
+ pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
+ __func__, cmd->obj.ulpevent, &asoc->ulpq);
+
+ asoc->stream.si->enqueue_event(&asoc->ulpq,
+ cmd->obj.ulpevent);
+ break;
+
+ case SCTP_CMD_REPLY:
+ /* If an caller has not already corked, do cork. */
+ if (!asoc->outqueue.cork) {
+ sctp_outq_cork(&asoc->outqueue);
+ local_cork = 1;
+ }
+ /* Send a chunk to our peer. */
+ sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp);
+ break;
+
+ case SCTP_CMD_SEND_PKT:
+ /* Send a full packet to our peer. */
+ packet = cmd->obj.packet;
+ sctp_packet_transmit(packet, gfp);
+ sctp_ootb_pkt_free(packet);
+ break;
+
+ case SCTP_CMD_T1_RETRAN:
+ /* Mark a transport for retransmission. */
+ sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
+ SCTP_RTXR_T1_RTX);
+ break;
+
+ case SCTP_CMD_RETRAN:
+ /* Mark a transport for retransmission. */
+ sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
+ SCTP_RTXR_T3_RTX);
+ break;
+
+ case SCTP_CMD_ECN_CE:
+ /* Do delayed CE processing. */
+ sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
+ break;
+
+ case SCTP_CMD_ECN_ECNE:
+ /* Do delayed ECNE processing. */
+ new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
+ chunk);
+ if (new_obj)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(new_obj));
+ break;
+
+ case SCTP_CMD_ECN_CWR:
+ /* Do delayed CWR processing. */
+ sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
+ break;
+
+ case SCTP_CMD_SETUP_T2:
+ sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk);
+ break;
+
+ case SCTP_CMD_TIMER_START_ONCE:
+ timer = &asoc->timers[cmd->obj.to];
+
+ if (timer_pending(timer))
+ break;
+ fallthrough;
+
+ case SCTP_CMD_TIMER_START:
+ timer = &asoc->timers[cmd->obj.to];
+ timeout = asoc->timeouts[cmd->obj.to];
+ BUG_ON(!timeout);
+
+ /*
+ * SCTP has a hard time with timer starts. Because we process
+ * timer starts as side effects, it can be hard to tell if we
+ * have already started a timer or not, which leads to BUG
+ * halts when we call add_timer. So here, instead of just starting
+ * a timer, if the timer is already started, and just mod
+ * the timer with the shorter of the two expiration times
+ */
+ if (!timer_pending(timer))
+ sctp_association_hold(asoc);
+ timer_reduce(timer, jiffies + timeout);
+ break;
+
+ case SCTP_CMD_TIMER_RESTART:
+ timer = &asoc->timers[cmd->obj.to];
+ timeout = asoc->timeouts[cmd->obj.to];
+ if (!mod_timer(timer, jiffies + timeout))
+ sctp_association_hold(asoc);
+ break;
+
+ case SCTP_CMD_TIMER_STOP:
+ timer = &asoc->timers[cmd->obj.to];
+ if (del_timer(timer))
+ sctp_association_put(asoc);
+ break;
+
+ case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
+ chunk = cmd->obj.chunk;
+ t = sctp_assoc_choose_alter_transport(asoc,
+ asoc->init_last_sent_to);
+ asoc->init_last_sent_to = t;
+ chunk->transport = t;
+ t->init_sent_count++;
+ /* Set the new transport as primary */
+ sctp_assoc_set_primary(asoc, t);
+ break;
+
+ case SCTP_CMD_INIT_RESTART:
+ /* Do the needed accounting and updates
+ * associated with restarting an initialization
+ * timer. Only multiply the timeout by two if
+ * all transports have been tried at the current
+ * timeout.
+ */
+ sctp_cmd_t1_timer_update(asoc,
+ SCTP_EVENT_TIMEOUT_T1_INIT,
+ "INIT");
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ break;
+
+ case SCTP_CMD_COOKIEECHO_RESTART:
+ /* Do the needed accounting and updates
+ * associated with restarting an initialization
+ * timer. Only multiply the timeout by two if
+ * all transports have been tried at the current
+ * timeout.
+ */
+ sctp_cmd_t1_timer_update(asoc,
+ SCTP_EVENT_TIMEOUT_T1_COOKIE,
+ "COOKIE");
+
+ /* If we've sent any data bundled with
+ * COOKIE-ECHO we need to resend.
+ */
+ list_for_each_entry(t, &asoc->peer.transport_addr_list,
+ transports) {
+ sctp_retransmit_mark(&asoc->outqueue, t,
+ SCTP_RTXR_T1_RTX);
+ }
+
+ sctp_add_cmd_sf(commands,
+ SCTP_CMD_TIMER_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
+ break;
+
+ case SCTP_CMD_INIT_FAILED:
+ sctp_cmd_init_failed(commands, asoc, cmd->obj.u16);
+ break;
+
+ case SCTP_CMD_ASSOC_FAILED:
+ sctp_cmd_assoc_failed(commands, asoc, event_type,
+ subtype, chunk, cmd->obj.u16);
+ break;
+
+ case SCTP_CMD_INIT_COUNTER_INC:
+ asoc->init_err_counter++;
+ break;
+
+ case SCTP_CMD_INIT_COUNTER_RESET:
+ asoc->init_err_counter = 0;
+ asoc->init_cycle = 0;
+ list_for_each_entry(t, &asoc->peer.transport_addr_list,
+ transports) {
+ t->init_sent_count = 0;
+ }
+ break;
+
+ case SCTP_CMD_REPORT_DUP:
+ sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
+ cmd->obj.u32);
+ break;
+
+ case SCTP_CMD_REPORT_BAD_TAG:
+ pr_debug("%s: vtag mismatch!\n", __func__);
+ break;
+
+ case SCTP_CMD_STRIKE:
+ /* Mark one strike against a transport. */
+ sctp_do_8_2_transport_strike(commands, asoc,
+ cmd->obj.transport, 0);
+ break;
+
+ case SCTP_CMD_TRANSPORT_IDLE:
+ t = cmd->obj.transport;
+ sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
+ break;
+
+ case SCTP_CMD_TRANSPORT_HB_SENT:
+ t = cmd->obj.transport;
+ sctp_do_8_2_transport_strike(commands, asoc,
+ t, 1);
+ t->hb_sent = 1;
+ break;
+
+ case SCTP_CMD_TRANSPORT_ON:
+ t = cmd->obj.transport;
+ sctp_cmd_transport_on(commands, asoc, t, chunk);
+ break;
+
+ case SCTP_CMD_HB_TIMERS_START:
+ sctp_cmd_hb_timers_start(commands, asoc);
+ break;
+
+ case SCTP_CMD_HB_TIMER_UPDATE:
+ t = cmd->obj.transport;
+ sctp_transport_reset_hb_timer(t);
+ break;
+
+ case SCTP_CMD_HB_TIMERS_STOP:
+ sctp_cmd_hb_timers_stop(commands, asoc);
+ break;
+
+ case SCTP_CMD_PROBE_TIMER_UPDATE:
+ t = cmd->obj.transport;
+ sctp_transport_reset_probe_timer(t);
+ break;
+
+ case SCTP_CMD_REPORT_ERROR:
+ error = cmd->obj.error;
+ break;
+
+ case SCTP_CMD_PROCESS_CTSN:
+ /* Dummy up a SACK for processing. */
+ sackh.cum_tsn_ack = cmd->obj.be32;
+ sackh.a_rwnd = htonl(asoc->peer.rwnd +
+ asoc->outqueue.outstanding_bytes);
+ sackh.num_gap_ack_blocks = 0;
+ sackh.num_dup_tsns = 0;
+ chunk->subh.sack_hdr = &sackh;
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
+ SCTP_CHUNK(chunk));
+ break;
+
+ case SCTP_CMD_DISCARD_PACKET:
+ /* We need to discard the whole packet.
+ * Uncork the queue since there might be
+ * responses pending
+ */
+ chunk->pdiscard = 1;
+ if (asoc) {
+ sctp_outq_uncork(&asoc->outqueue, gfp);
+ local_cork = 0;
+ }
+ break;
+
+ case SCTP_CMD_RTO_PENDING:
+ t = cmd->obj.transport;
+ t->rto_pending = 1;
+ break;
+
+ case SCTP_CMD_PART_DELIVER:
+ asoc->stream.si->start_pd(&asoc->ulpq, GFP_ATOMIC);
+ break;
+
+ case SCTP_CMD_RENEGE:
+ asoc->stream.si->renege_events(&asoc->ulpq,
+ cmd->obj.chunk,
+ GFP_ATOMIC);
+ break;
+
+ case SCTP_CMD_SETUP_T4:
+ sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk);
+ break;
+
+ case SCTP_CMD_PROCESS_OPERR:
+ sctp_cmd_process_operr(commands, asoc, chunk);
+ break;
+ case SCTP_CMD_CLEAR_INIT_TAG:
+ asoc->peer.i.init_tag = 0;
+ break;
+ case SCTP_CMD_DEL_NON_PRIMARY:
+ sctp_cmd_del_non_primary(asoc);
+ break;
+ case SCTP_CMD_T3_RTX_TIMERS_STOP:
+ sctp_cmd_t3_rtx_timers_stop(commands, asoc);
+ break;
+ case SCTP_CMD_FORCE_PRIM_RETRAN:
+ t = asoc->peer.retran_path;
+ asoc->peer.retran_path = asoc->peer.primary_path;
+ sctp_outq_uncork(&asoc->outqueue, gfp);
+ local_cork = 0;
+ asoc->peer.retran_path = t;
+ break;
+ case SCTP_CMD_SET_SK_ERR:
+ sctp_cmd_set_sk_err(asoc, cmd->obj.error);
+ break;
+ case SCTP_CMD_ASSOC_CHANGE:
+ sctp_cmd_assoc_change(commands, asoc,
+ cmd->obj.u8);
+ break;
+ case SCTP_CMD_ADAPTATION_IND:
+ sctp_cmd_adaptation_ind(commands, asoc);
+ break;
+ case SCTP_CMD_PEER_NO_AUTH:
+ sctp_cmd_peer_no_auth(commands, asoc);
+ break;
+
+ case SCTP_CMD_ASSOC_SHKEY:
+ error = sctp_auth_asoc_init_active_key(asoc,
+ GFP_ATOMIC);
+ break;
+ case SCTP_CMD_UPDATE_INITTAG:
+ asoc->peer.i.init_tag = cmd->obj.u32;
+ break;
+ case SCTP_CMD_SEND_MSG:
+ if (!asoc->outqueue.cork) {
+ sctp_outq_cork(&asoc->outqueue);
+ local_cork = 1;
+ }
+ sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
+ break;
+ case SCTP_CMD_PURGE_ASCONF_QUEUE:
+ sctp_asconf_queue_teardown(asoc);
+ break;
+
+ case SCTP_CMD_SET_ASOC:
+ if (asoc && local_cork) {
+ sctp_outq_uncork(&asoc->outqueue, gfp);
+ local_cork = 0;
+ }
+ asoc = cmd->obj.asoc;
+ break;
+
+ default:
+ pr_warn("Impossible command: %u\n",
+ cmd->verb);
+ break;
+ }
+
+ if (error) {
+ cmd = sctp_next_cmd(commands);
+ while (cmd) {
+ if (cmd->verb == SCTP_CMD_REPLY)
+ sctp_chunk_free(cmd->obj.chunk);
+ cmd = sctp_next_cmd(commands);
+ }
+ break;
+ }
+ }
+
+ /* If this is in response to a received chunk, wait until
+ * we are done with the packet to open the queue so that we don't
+ * send multiple packets in response to a single request.
+ */
+ if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
+ if (chunk->end_of_packet || chunk->singleton)
+ sctp_outq_uncork(&asoc->outqueue, gfp);
+ } else if (local_cork)
+ sctp_outq_uncork(&asoc->outqueue, gfp);
+
+ if (sp->data_ready_signalled)
+ sp->data_ready_signalled = 0;
+
+ return error;
+}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
new file mode 100644
index 000000000..5383b6a9d
--- /dev/null
+++ b/net/sctp/sm_statefuns.c
@@ -0,0 +1,6677 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001-2002 Intel Corp.
+ * Copyright (c) 2002 Nokia Corp.
+ *
+ * This is part of the SCTP Linux Kernel Implementation.
+ *
+ * These are the state functions for the state machine.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Mathew Kotowsky <kotowsky@sctp.org>
+ * Sridhar Samudrala <samudrala@us.ibm.com>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Hui Huang <hui.huang@nokia.com>
+ * Dajiang Zhang <dajiang.zhang@nokia.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Ryan Layer <rmlayer@us.ibm.com>
+ * Kevin Gao <kevin.gao@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/net.h>
+#include <linux/inet.h>
+#include <linux/slab.h>
+#include <net/sock.h>
+#include <net/inet_ecn.h>
+#include <linux/skbuff.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/structs.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/sctp.h>
+
+static struct sctp_packet *sctp_abort_pkt_new(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ const void *payload, size_t paylen);
+static int sctp_eat_data(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ struct sctp_cmd_seq *commands);
+static struct sctp_packet *sctp_ootb_pkt_new(
+ struct net *net,
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+static void sctp_send_stale_cookie_err(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ struct sctp_cmd_seq *commands,
+ struct sctp_chunk *err_chunk);
+static enum sctp_disposition sctp_sf_do_5_2_6_stale(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands);
+static enum sctp_disposition sctp_sf_shut_8_4_5(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands);
+static enum sctp_disposition sctp_sf_tabort_8_4_8(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands);
+static enum sctp_disposition sctp_sf_new_encap_port(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands);
+static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
+
+static enum sctp_disposition sctp_stop_t1_and_abort(
+ struct net *net,
+ struct sctp_cmd_seq *commands,
+ __be16 error, int sk_err,
+ const struct sctp_association *asoc,
+ struct sctp_transport *transport);
+
+static enum sctp_disposition sctp_sf_abort_violation(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ void *arg,
+ struct sctp_cmd_seq *commands,
+ const __u8 *payload,
+ const size_t paylen);
+
+static enum sctp_disposition sctp_sf_violation_chunklen(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands);
+
+static enum sctp_disposition sctp_sf_violation_paramlen(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg, void *ext,
+ struct sctp_cmd_seq *commands);
+
+static enum sctp_disposition sctp_sf_violation_ctsn(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands);
+
+static enum sctp_disposition sctp_sf_violation_chunk(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands);
+
+static enum sctp_ierror sctp_sf_authenticate(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk);
+
+static enum sctp_disposition __sctp_sf_do_9_1_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands);
+
+static enum sctp_disposition
+__sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type, void *arg,
+ struct sctp_cmd_seq *commands);
+
+/* Small helper function that checks if the chunk length
+ * is of the appropriate length. The 'required_length' argument
+ * is set to be the size of a specific chunk we are testing.
+ * Return Values: true = Valid length
+ * false = Invalid length
+ *
+ */
+static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk,
+ __u16 required_length)
+{
+ __u16 chunk_length = ntohs(chunk->chunk_hdr->length);
+
+ /* Previously already marked? */
+ if (unlikely(chunk->pdiscard))
+ return false;
+ if (unlikely(chunk_length < required_length))
+ return false;
+
+ return true;
+}
+
+/* Check for format error in an ABORT chunk */
+static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk)
+{
+ struct sctp_errhdr *err;
+
+ sctp_walk_errors(err, chunk->chunk_hdr);
+
+ return (void *)err == (void *)chunk->chunk_end;
+}
+
+/**********************************************************
+ * These are the state functions for handling chunk events.
+ **********************************************************/
+
+/*
+ * Process the final SHUTDOWN COMPLETE.
+ *
+ * Section: 4 (C) (diagram), 9.2
+ * Upon reception of the SHUTDOWN COMPLETE chunk the endpoint will verify
+ * that it is in SHUTDOWN-ACK-SENT state, if it is not the chunk should be
+ * discarded. If the endpoint is in the SHUTDOWN-ACK-SENT state the endpoint
+ * should stop the T2-shutdown timer and remove all knowledge of the
+ * association (and thus the association enters the CLOSED state).
+ *
+ * Verification Tag: 8.5.1(C), sctpimpguide 2.41.
+ * C) Rules for packet carrying SHUTDOWN COMPLETE:
+ * ...
+ * - The receiver of a SHUTDOWN COMPLETE shall accept the packet
+ * if the Verification Tag field of the packet matches its own tag and
+ * the T bit is not set
+ * OR
+ * it is set to its peer's tag and the T bit is set in the Chunk
+ * Flags.
+ * Otherwise, the receiver MUST silently discard the packet
+ * and take no further action. An endpoint MUST ignore the
+ * SHUTDOWN COMPLETE if it is not in the SHUTDOWN-ACK-SENT state.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_do_4_C(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg, struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ struct sctp_ulpevent *ev;
+
+ if (!sctp_vtag_verify_either(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* RFC 2960 6.10 Bundling
+ *
+ * An endpoint MUST NOT bundle INIT, INIT ACK or
+ * SHUTDOWN COMPLETE with any other chunks.
+ */
+ if (!chunk->singleton)
+ return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ /* RFC 2960 10.2 SCTP-to-ULP
+ *
+ * H) SHUTDOWN COMPLETE notification
+ *
+ * When SCTP completes the shutdown procedures (section 9.2) this
+ * notification is passed to the upper layer.
+ */
+ ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP,
+ 0, 0, 0, NULL, GFP_ATOMIC);
+ if (ev)
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ SCTP_ULPEVENT(ev));
+
+ /* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint
+ * will verify that it is in SHUTDOWN-ACK-SENT state, if it is
+ * not the chunk should be discarded. If the endpoint is in
+ * the SHUTDOWN-ACK-SENT state the endpoint should stop the
+ * T2-shutdown timer and remove all knowledge of the
+ * association (and thus the association enters the CLOSED
+ * state).
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_CLOSED));
+
+ SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
+
+ return SCTP_DISPOSITION_DELETE_TCB;
+}
+
+/*
+ * Respond to a normal INIT chunk.
+ * We are the side that is being asked for an association.
+ *
+ * Section: 5.1 Normal Establishment of an Association, B
+ * B) "Z" shall respond immediately with an INIT ACK chunk. The
+ * destination IP address of the INIT ACK MUST be set to the source
+ * IP address of the INIT to which this INIT ACK is responding. In
+ * the response, besides filling in other parameters, "Z" must set the
+ * Verification Tag field to Tag_A, and also provide its own
+ * Verification Tag (Tag_Z) in the Initiate Tag field.
+ *
+ * Verification Tag: Must be 0.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg, *repl, *err_chunk;
+ struct sctp_unrecognized_param *unk_param;
+ struct sctp_association *new_asoc;
+ struct sctp_packet *packet;
+ int len;
+
+ /* 6.10 Bundling
+ * An endpoint MUST NOT bundle INIT, INIT ACK or
+ * SHUTDOWN COMPLETE with any other chunks.
+ *
+ * IG Section 2.11.2
+ * Furthermore, we require that the receiver of an INIT chunk MUST
+ * enforce these rules by silently discarding an arriving packet
+ * with an INIT chunk that is bundled with other chunks.
+ */
+ if (!chunk->singleton)
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the INIT chunk has a valid length.
+ * Normally, this would cause an ABORT with a Protocol Violation
+ * error, but since we don't have an association, we'll
+ * just discard the packet.
+ */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* If the packet is an OOTB packet which is temporarily on the
+ * control endpoint, respond with an ABORT.
+ */
+ if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
+ SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+ return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
+ }
+
+ /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
+ * Tag.
+ */
+ if (chunk->sctp_hdr->vtag != 0)
+ return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
+
+ /* If the INIT is coming toward a closing socket, we'll send back
+ * and ABORT. Essentially, this catches the race of INIT being
+ * backloged to the socket at the same time as the user issues close().
+ * Since the socket and all its associations are going away, we
+ * can treat this OOTB
+ */
+ if (sctp_sstate(ep->base.sk, CLOSING))
+ return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
+
+ /* Verify the INIT chunk before processing it. */
+ err_chunk = NULL;
+ if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
+ (struct sctp_init_chunk *)chunk->chunk_hdr, chunk,
+ &err_chunk)) {
+ /* This chunk contains fatal error. It is to be discarded.
+ * Send an ABORT, with causes if there is any.
+ */
+ if (err_chunk) {
+ packet = sctp_abort_pkt_new(net, ep, asoc, arg,
+ (__u8 *)(err_chunk->chunk_hdr) +
+ sizeof(struct sctp_chunkhdr),
+ ntohs(err_chunk->chunk_hdr->length) -
+ sizeof(struct sctp_chunkhdr));
+
+ sctp_chunk_free(err_chunk);
+
+ if (packet) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
+ SCTP_PACKET(packet));
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+ return SCTP_DISPOSITION_CONSUME;
+ } else {
+ return SCTP_DISPOSITION_NOMEM;
+ }
+ } else {
+ return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
+ commands);
+ }
+ }
+
+ /* Grab the INIT header. */
+ chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data;
+
+ /* Tag the variable length parameters. */
+ chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr));
+
+ new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
+ if (!new_asoc)
+ goto nomem;
+
+ /* Update socket peer label if first association. */
+ if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
+ sctp_scope(sctp_source(chunk)),
+ GFP_ATOMIC) < 0)
+ goto nomem_init;
+
+ /* The call, sctp_process_init(), can fail on memory allocation. */
+ if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
+ (struct sctp_init_chunk *)chunk->chunk_hdr,
+ GFP_ATOMIC))
+ goto nomem_init;
+
+ /* B) "Z" shall respond immediately with an INIT ACK chunk. */
+
+ /* If there are errors need to be reported for unknown parameters,
+ * make sure to reserve enough room in the INIT ACK for them.
+ */
+ len = 0;
+ if (err_chunk)
+ len = ntohs(err_chunk->chunk_hdr->length) -
+ sizeof(struct sctp_chunkhdr);
+
+ repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
+ if (!repl)
+ goto nomem_init;
+
+ /* If there are errors need to be reported for unknown parameters,
+ * include them in the outgoing INIT ACK as "Unrecognized parameter"
+ * parameter.
+ */
+ if (err_chunk) {
+ /* Get the "Unrecognized parameter" parameter(s) out of the
+ * ERROR chunk generated by sctp_verify_init(). Since the
+ * error cause code for "unknown parameter" and the
+ * "Unrecognized parameter" type is the same, we can
+ * construct the parameters in INIT ACK by copying the
+ * ERROR causes over.
+ */
+ unk_param = (struct sctp_unrecognized_param *)
+ ((__u8 *)(err_chunk->chunk_hdr) +
+ sizeof(struct sctp_chunkhdr));
+ /* Replace the cause code with the "Unrecognized parameter"
+ * parameter type.
+ */
+ sctp_addto_chunk(repl, len, unk_param);
+ sctp_chunk_free(err_chunk);
+ }
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+
+ /*
+ * Note: After sending out INIT ACK with the State Cookie parameter,
+ * "Z" MUST NOT allocate any resources, nor keep any states for the
+ * new association. Otherwise, "Z" will be vulnerable to resource
+ * attacks.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
+
+ return SCTP_DISPOSITION_DELETE_TCB;
+
+nomem_init:
+ sctp_association_free(new_asoc);
+nomem:
+ if (err_chunk)
+ sctp_chunk_free(err_chunk);
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/*
+ * Respond to a normal INIT ACK chunk.
+ * We are the side that is initiating the association.
+ *
+ * Section: 5.1 Normal Establishment of an Association, C
+ * C) Upon reception of the INIT ACK from "Z", "A" shall stop the T1-init
+ * timer and leave COOKIE-WAIT state. "A" shall then send the State
+ * Cookie received in the INIT ACK chunk in a COOKIE ECHO chunk, start
+ * the T1-cookie timer, and enter the COOKIE-ECHOED state.
+ *
+ * Note: The COOKIE ECHO chunk can be bundled with any pending outbound
+ * DATA chunks, but it MUST be the first chunk in the packet and
+ * until the COOKIE ACK is returned the sender MUST NOT send any
+ * other packets to the peer.
+ *
+ * Verification Tag: 3.3.3
+ * If the value of the Initiate Tag in a received INIT ACK chunk is
+ * found to be 0, the receiver MUST treat it as an error and close the
+ * association by transmitting an ABORT.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_init_chunk *initchunk;
+ struct sctp_chunk *chunk = arg;
+ struct sctp_chunk *err_chunk;
+ struct sctp_packet *packet;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* 6.10 Bundling
+ * An endpoint MUST NOT bundle INIT, INIT ACK or
+ * SHUTDOWN COMPLETE with any other chunks.
+ */
+ if (!chunk->singleton)
+ return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the INIT-ACK chunk has a valid length */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_initack_chunk)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+ /* Grab the INIT header. */
+ chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data;
+
+ /* Verify the INIT chunk before processing it. */
+ err_chunk = NULL;
+ if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
+ (struct sctp_init_chunk *)chunk->chunk_hdr, chunk,
+ &err_chunk)) {
+
+ enum sctp_error error = SCTP_ERROR_NO_RESOURCE;
+
+ /* This chunk contains fatal error. It is to be discarded.
+ * Send an ABORT, with causes. If there are no causes,
+ * then there wasn't enough memory. Just terminate
+ * the association.
+ */
+ if (err_chunk) {
+ packet = sctp_abort_pkt_new(net, ep, asoc, arg,
+ (__u8 *)(err_chunk->chunk_hdr) +
+ sizeof(struct sctp_chunkhdr),
+ ntohs(err_chunk->chunk_hdr->length) -
+ sizeof(struct sctp_chunkhdr));
+
+ sctp_chunk_free(err_chunk);
+
+ if (packet) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
+ SCTP_PACKET(packet));
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+ error = SCTP_ERROR_INV_PARAM;
+ }
+ }
+
+ /* SCTP-AUTH, Section 6.3:
+ * It should be noted that if the receiver wants to tear
+ * down an association in an authenticated way only, the
+ * handling of malformed packets should not result in
+ * tearing down the association.
+ *
+ * This means that if we only want to abort associations
+ * in an authenticated way (i.e AUTH+ABORT), then we
+ * can't destroy this association just because the packet
+ * was malformed.
+ */
+ if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED,
+ asoc, chunk->transport);
+ }
+
+ /* Tag the variable length parameters. Note that we never
+ * convert the parameters in an INIT chunk.
+ */
+ chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr));
+
+ initchunk = (struct sctp_init_chunk *)chunk->chunk_hdr;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_PEER_INIT,
+ SCTP_PEER_INIT(initchunk));
+
+ /* Reset init error count upon receipt of INIT-ACK. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL());
+
+ /* 5.1 C) "A" shall stop the T1-init timer and leave
+ * COOKIE-WAIT state. "A" shall then ... start the T1-cookie
+ * timer, and enter the COOKIE-ECHOED state.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_COOKIE_ECHOED));
+
+ /* SCTP-AUTH: generate the association shared keys so that
+ * we can potentially sign the COOKIE-ECHO.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL());
+
+ /* 5.1 C) "A" shall then send the State Cookie received in the
+ * INIT ACK chunk in a COOKIE ECHO chunk, ...
+ */
+ /* If there is any errors to report, send the ERROR chunk generated
+ * for unknown parameters as well.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_COOKIE_ECHO,
+ SCTP_CHUNK(err_chunk));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
+ const struct sctp_association *asoc)
+{
+ struct sctp_chunk auth;
+
+ if (!chunk->auth_chunk)
+ return true;
+
+ /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
+ * is supposed to be authenticated and we have to do delayed
+ * authentication. We've just recreated the association using
+ * the information in the cookie and now it's much easier to
+ * do the authentication.
+ */
+
+ /* Make sure that we and the peer are AUTH capable */
+ if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
+ return false;
+
+ /* set-up our fake chunk so that we can process it */
+ auth.skb = chunk->auth_chunk;
+ auth.asoc = chunk->asoc;
+ auth.sctp_hdr = chunk->sctp_hdr;
+ auth.chunk_hdr = (struct sctp_chunkhdr *)
+ skb_push(chunk->auth_chunk,
+ sizeof(struct sctp_chunkhdr));
+ skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
+ auth.transport = chunk->transport;
+
+ return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
+}
+
+/*
+ * Respond to a normal COOKIE ECHO chunk.
+ * We are the side that is being asked for an association.
+ *
+ * Section: 5.1 Normal Establishment of an Association, D
+ * D) Upon reception of the COOKIE ECHO chunk, Endpoint "Z" will reply
+ * with a COOKIE ACK chunk after building a TCB and moving to
+ * the ESTABLISHED state. A COOKIE ACK chunk may be bundled with
+ * any pending DATA chunks (and/or SACK chunks), but the COOKIE ACK
+ * chunk MUST be the first chunk in the packet.
+ *
+ * IMPLEMENTATION NOTE: An implementation may choose to send the
+ * Communication Up notification to the SCTP user upon reception
+ * of a valid COOKIE ECHO chunk.
+ *
+ * Verification Tag: 8.5.1 Exceptions in Verification Tag Rules
+ * D) Rules for packet carrying a COOKIE ECHO
+ *
+ * - When sending a COOKIE ECHO, the endpoint MUST use the value of the
+ * Initial Tag received in the INIT ACK.
+ *
+ * - The receiver of a COOKIE ECHO follows the procedures in Section 5.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_ulpevent *ev, *ai_ev = NULL, *auth_ev = NULL;
+ struct sctp_association *new_asoc;
+ struct sctp_init_chunk *peer_init;
+ struct sctp_chunk *chunk = arg;
+ struct sctp_chunk *err_chk_p;
+ struct sctp_chunk *repl;
+ struct sock *sk;
+ int error = 0;
+
+ if (asoc && !sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* If the packet is an OOTB packet which is temporarily on the
+ * control endpoint, respond with an ABORT.
+ */
+ if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
+ SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+ return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
+ }
+
+ /* Make sure that the COOKIE_ECHO chunk has a valid length.
+ * In this case, we check that we have enough for at least a
+ * chunk header. More detailed verification is done
+ * in sctp_unpack_cookie().
+ */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ /* If the endpoint is not listening or if the number of associations
+ * on the TCP-style socket exceed the max backlog, respond with an
+ * ABORT.
+ */
+ sk = ep->base.sk;
+ if (!sctp_sstate(sk, LISTENING) ||
+ (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
+ return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
+
+ /* "Decode" the chunk. We have no optional parameters so we
+ * are in good shape.
+ */
+ chunk->subh.cookie_hdr =
+ (struct sctp_signed_cookie *)chunk->skb->data;
+ if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
+ sizeof(struct sctp_chunkhdr)))
+ goto nomem;
+
+ /* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint
+ * "Z" will reply with a COOKIE ACK chunk after building a TCB
+ * and moving to the ESTABLISHED state.
+ */
+ new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
+ &err_chk_p);
+
+ /* FIXME:
+ * If the re-build failed, what is the proper error path
+ * from here?
+ *
+ * [We should abort the association. --piggy]
+ */
+ if (!new_asoc) {
+ /* FIXME: Several errors are possible. A bad cookie should
+ * be silently discarded, but think about logging it too.
+ */
+ switch (error) {
+ case -SCTP_IERROR_NOMEM:
+ goto nomem;
+
+ case -SCTP_IERROR_STALE_COOKIE:
+ sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
+ err_chk_p);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ case -SCTP_IERROR_BAD_SIG:
+ default:
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+ }
+
+ if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ /* Delay state machine commands until later.
+ *
+ * Re-build the bind address for the association is done in
+ * the sctp_unpack_cookie() already.
+ */
+ /* This is a brand-new association, so these are not yet side
+ * effects--it is safe to run them here.
+ */
+ peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
+
+ if (!sctp_process_init(new_asoc, chunk,
+ &chunk->subh.cookie_hdr->c.peer_addr,
+ peer_init, GFP_ATOMIC))
+ goto nomem_init;
+
+ /* SCTP-AUTH: Now that we've populate required fields in
+ * sctp_process_init, set up the association shared keys as
+ * necessary so that we can potentially authenticate the ACK
+ */
+ error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC);
+ if (error)
+ goto nomem_init;
+
+ if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ repl = sctp_make_cookie_ack(new_asoc, chunk);
+ if (!repl)
+ goto nomem_init;
+
+ /* RFC 2960 5.1 Normal Establishment of an Association
+ *
+ * D) IMPLEMENTATION NOTE: An implementation may choose to
+ * send the Communication Up notification to the SCTP user
+ * upon reception of a valid COOKIE ECHO chunk.
+ */
+ ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0,
+ new_asoc->c.sinit_num_ostreams,
+ new_asoc->c.sinit_max_instreams,
+ NULL, GFP_ATOMIC);
+ if (!ev)
+ goto nomem_ev;
+
+ /* Sockets API Draft Section 5.3.1.6
+ * When a peer sends a Adaptation Layer Indication parameter , SCTP
+ * delivers this notification to inform the application that of the
+ * peers requested adaptation layer.
+ */
+ if (new_asoc->peer.adaptation_ind) {
+ ai_ev = sctp_ulpevent_make_adaptation_indication(new_asoc,
+ GFP_ATOMIC);
+ if (!ai_ev)
+ goto nomem_aiev;
+ }
+
+ if (!new_asoc->peer.auth_capable) {
+ auth_ev = sctp_ulpevent_make_authkey(new_asoc, 0,
+ SCTP_AUTH_NO_AUTH,
+ GFP_ATOMIC);
+ if (!auth_ev)
+ goto nomem_authev;
+ }
+
+ /* Add all the state machine commands now since we've created
+ * everything. This way we don't introduce memory corruptions
+ * during side-effect processing and correctly count established
+ * associations.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_ESTABLISHED));
+ SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
+ sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
+
+ if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
+
+ /* This will send the COOKIE ACK */
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+
+ /* Queue the ASSOC_CHANGE event */
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
+
+ /* Send up the Adaptation Layer Indication event */
+ if (ai_ev)
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ SCTP_ULPEVENT(ai_ev));
+
+ if (auth_ev)
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ SCTP_ULPEVENT(auth_ev));
+
+ return SCTP_DISPOSITION_CONSUME;
+
+nomem_authev:
+ sctp_ulpevent_free(ai_ev);
+nomem_aiev:
+ sctp_ulpevent_free(ev);
+nomem_ev:
+ sctp_chunk_free(repl);
+nomem_init:
+ sctp_association_free(new_asoc);
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/*
+ * Respond to a normal COOKIE ACK chunk.
+ * We are the side that is asking for an association.
+ *
+ * RFC 2960 5.1 Normal Establishment of an Association
+ *
+ * E) Upon reception of the COOKIE ACK, endpoint "A" will move from the
+ * COOKIE-ECHOED state to the ESTABLISHED state, stopping the T1-cookie
+ * timer. It may also notify its ULP about the successful
+ * establishment of the association with a Communication Up
+ * notification (see Section 10).
+ *
+ * Verification Tag:
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ struct sctp_ulpevent *ev;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Set peer label for connection. */
+ if (security_sctp_assoc_established((struct sctp_association *)asoc,
+ chunk->head_skb ?: chunk->skb))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Verify that the chunk length for the COOKIE-ACK is OK.
+ * If we don't do this, any bundled chunks may be junked.
+ */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ /* Reset init error count upon receipt of COOKIE-ACK,
+ * to avoid problems with the management of this
+ * counter in stale cookie situations when a transition back
+ * from the COOKIE-ECHOED state to the COOKIE-WAIT
+ * state is performed.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL());
+
+ /* RFC 2960 5.1 Normal Establishment of an Association
+ *
+ * E) Upon reception of the COOKIE ACK, endpoint "A" will move
+ * from the COOKIE-ECHOED state to the ESTABLISHED state,
+ * stopping the T1-cookie timer.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_ESTABLISHED));
+ SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS);
+ sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
+
+ /* It may also notify its ULP about the successful
+ * establishment of the association with a Communication Up
+ * notification (see Section 10).
+ */
+ ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP,
+ 0, asoc->c.sinit_num_ostreams,
+ asoc->c.sinit_max_instreams,
+ NULL, GFP_ATOMIC);
+
+ if (!ev)
+ goto nomem;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
+
+ /* Sockets API Draft Section 5.3.1.6
+ * When a peer sends a Adaptation Layer Indication parameter , SCTP
+ * delivers this notification to inform the application that of the
+ * peers requested adaptation layer.
+ */
+ if (asoc->peer.adaptation_ind) {
+ ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
+ if (!ev)
+ goto nomem;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ SCTP_ULPEVENT(ev));
+ }
+
+ if (!asoc->peer.auth_capable) {
+ ev = sctp_ulpevent_make_authkey(asoc, 0, SCTP_AUTH_NO_AUTH,
+ GFP_ATOMIC);
+ if (!ev)
+ goto nomem;
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ SCTP_ULPEVENT(ev));
+ }
+
+ return SCTP_DISPOSITION_CONSUME;
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/* Generate and sendout a heartbeat packet. */
+static enum sctp_disposition sctp_sf_heartbeat(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_transport *transport = (struct sctp_transport *) arg;
+ struct sctp_chunk *reply;
+
+ /* Send a heartbeat to our peer. */
+ reply = sctp_make_heartbeat(asoc, transport, 0);
+ if (!reply)
+ return SCTP_DISPOSITION_NOMEM;
+
+ /* Set rto_pending indicating that an RTT measurement
+ * is started with this heartbeat chunk.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_RTO_PENDING,
+ SCTP_TRANSPORT(transport));
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/* Generate a HEARTBEAT packet on the given transport. */
+enum sctp_disposition sctp_sf_sendbeat_8_3(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_transport *transport = (struct sctp_transport *) arg;
+
+ if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
+ /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_NO_ERROR));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+ return SCTP_DISPOSITION_DELETE_TCB;
+ }
+
+ /* Section 3.3.5.
+ * The Sender-specific Heartbeat Info field should normally include
+ * information about the sender's current time when this HEARTBEAT
+ * chunk is sent and the destination transport address to which this
+ * HEARTBEAT is sent (see Section 8.3).
+ */
+
+ if (transport->param_flags & SPP_HB_ENABLE) {
+ if (SCTP_DISPOSITION_NOMEM ==
+ sctp_sf_heartbeat(ep, asoc, type, arg,
+ commands))
+ return SCTP_DISPOSITION_NOMEM;
+
+ /* Set transport error counter and association error counter
+ * when sending heartbeat.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
+ SCTP_TRANSPORT(transport));
+ }
+ sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE,
+ SCTP_TRANSPORT(transport));
+ sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE,
+ SCTP_TRANSPORT(transport));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/* resend asoc strreset_chunk. */
+enum sctp_disposition sctp_sf_send_reconf(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_transport *transport = arg;
+
+ if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
+ /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_NO_ERROR));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+ return SCTP_DISPOSITION_DELETE_TCB;
+ }
+
+ sctp_chunk_hold(asoc->strreset_chunk);
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(asoc->strreset_chunk));
+ sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/* send hb chunk with padding for PLPMUTD. */
+enum sctp_disposition sctp_sf_send_probe(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_transport *transport = (struct sctp_transport *)arg;
+ struct sctp_chunk *reply;
+
+ if (!sctp_transport_pl_enabled(transport))
+ return SCTP_DISPOSITION_CONSUME;
+
+ sctp_transport_pl_send(transport);
+ reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
+ if (!reply)
+ return SCTP_DISPOSITION_NOMEM;
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE,
+ SCTP_TRANSPORT(transport));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * Process an heartbeat request.
+ *
+ * Section: 8.3 Path Heartbeat
+ * The receiver of the HEARTBEAT should immediately respond with a
+ * HEARTBEAT ACK that contains the Heartbeat Information field copied
+ * from the received HEARTBEAT chunk.
+ *
+ * Verification Tag: 8.5 Verification Tag [Normal verification]
+ * When receiving an SCTP packet, the endpoint MUST ensure that the
+ * value in the Verification Tag field of the received SCTP packet
+ * matches its own Tag. If the received Verification Tag value does not
+ * match the receiver's own tag value, the receiver shall silently
+ * discard the packet and shall not process it any further except for
+ * those cases listed in Section 8.5.1 below.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_beat_8_3(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg, struct sctp_cmd_seq *commands)
+{
+ struct sctp_paramhdr *param_hdr;
+ struct sctp_chunk *chunk = arg;
+ struct sctp_chunk *reply;
+ size_t paylen = 0;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the HEARTBEAT chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk,
+ sizeof(struct sctp_heartbeat_chunk)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ /* 8.3 The receiver of the HEARTBEAT should immediately
+ * respond with a HEARTBEAT ACK that contains the Heartbeat
+ * Information field copied from the received HEARTBEAT chunk.
+ */
+ chunk->subh.hb_hdr = (struct sctp_heartbeathdr *)chunk->skb->data;
+ param_hdr = (struct sctp_paramhdr *)chunk->subh.hb_hdr;
+ paylen = ntohs(chunk->chunk_hdr->length) - sizeof(struct sctp_chunkhdr);
+
+ if (ntohs(param_hdr->length) > paylen)
+ return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+ param_hdr, commands);
+
+ if (!pskb_pull(chunk->skb, paylen))
+ goto nomem;
+
+ reply = sctp_make_heartbeat_ack(asoc, chunk, param_hdr, paylen);
+ if (!reply)
+ goto nomem;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+ return SCTP_DISPOSITION_CONSUME;
+
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/*
+ * Process the returning HEARTBEAT ACK.
+ *
+ * Section: 8.3 Path Heartbeat
+ * Upon the receipt of the HEARTBEAT ACK, the sender of the HEARTBEAT
+ * should clear the error counter of the destination transport
+ * address to which the HEARTBEAT was sent, and mark the destination
+ * transport address as active if it is not so marked. The endpoint may
+ * optionally report to the upper layer when an inactive destination
+ * address is marked as active due to the reception of the latest
+ * HEARTBEAT ACK. The receiver of the HEARTBEAT ACK must also
+ * clear the association overall error count as well (as defined
+ * in section 8.1).
+ *
+ * The receiver of the HEARTBEAT ACK should also perform an RTT
+ * measurement for that destination transport address using the time
+ * value carried in the HEARTBEAT ACK chunk.
+ *
+ * Verification Tag: 8.5 Verification Tag [Normal verification]
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_sender_hb_info *hbinfo;
+ struct sctp_chunk *chunk = arg;
+ struct sctp_transport *link;
+ unsigned long max_interval;
+ union sctp_addr from_addr;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr) +
+ sizeof(*hbinfo)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data;
+ /* Make sure that the length of the parameter is what we expect */
+ if (ntohs(hbinfo->param_hdr.length) != sizeof(*hbinfo))
+ return SCTP_DISPOSITION_DISCARD;
+
+ from_addr = hbinfo->daddr;
+ link = sctp_assoc_lookup_paddr(asoc, &from_addr);
+
+ /* This should never happen, but lets log it if so. */
+ if (unlikely(!link)) {
+ if (from_addr.sa.sa_family == AF_INET6) {
+ net_warn_ratelimited("%s association %p could not find address %pI6\n",
+ __func__,
+ asoc,
+ &from_addr.v6.sin6_addr);
+ } else {
+ net_warn_ratelimited("%s association %p could not find address %pI4\n",
+ __func__,
+ asoc,
+ &from_addr.v4.sin_addr.s_addr);
+ }
+ return SCTP_DISPOSITION_DISCARD;
+ }
+
+ /* Validate the 64-bit random nonce. */
+ if (hbinfo->hb_nonce != link->hb_nonce)
+ return SCTP_DISPOSITION_DISCARD;
+
+ if (hbinfo->probe_size) {
+ if (hbinfo->probe_size != link->pl.probe_size ||
+ !sctp_transport_pl_enabled(link))
+ return SCTP_DISPOSITION_DISCARD;
+
+ if (sctp_transport_pl_recv(link))
+ return SCTP_DISPOSITION_CONSUME;
+
+ return sctp_sf_send_probe(net, ep, asoc, type, link, commands);
+ }
+
+ max_interval = link->hbinterval + link->rto;
+
+ /* Check if the timestamp looks valid. */
+ if (time_after(hbinfo->sent_at, jiffies) ||
+ time_after(jiffies, hbinfo->sent_at + max_interval)) {
+ pr_debug("%s: HEARTBEAT ACK with invalid timestamp received "
+ "for transport:%p\n", __func__, link);
+
+ return SCTP_DISPOSITION_DISCARD;
+ }
+
+ /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of
+ * the HEARTBEAT should clear the error counter of the
+ * destination transport address to which the HEARTBEAT was
+ * sent and mark the destination transport address as active if
+ * it is not so marked.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_ON, SCTP_TRANSPORT(link));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/* Helper function to send out an abort for the restart
+ * condition.
+ */
+static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa,
+ struct sctp_chunk *init,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_af *af = sctp_get_af_specific(ssa->v4.sin_family);
+ union sctp_addr_param *addrparm;
+ struct sctp_errhdr *errhdr;
+ char buffer[sizeof(*errhdr) + sizeof(*addrparm)];
+ struct sctp_endpoint *ep;
+ struct sctp_packet *pkt;
+ int len;
+
+ /* Build the error on the stack. We are way to malloc crazy
+ * throughout the code today.
+ */
+ errhdr = (struct sctp_errhdr *)buffer;
+ addrparm = (union sctp_addr_param *)errhdr->variable;
+
+ /* Copy into a parm format. */
+ len = af->to_addr_param(ssa, addrparm);
+ len += sizeof(*errhdr);
+
+ errhdr->cause = SCTP_ERROR_RESTART;
+ errhdr->length = htons(len);
+
+ /* Assign to the control socket. */
+ ep = sctp_sk(net->sctp.ctl_sock)->ep;
+
+ /* Association is NULL since this may be a restart attack and we
+ * want to send back the attacker's vtag.
+ */
+ pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len);
+
+ if (!pkt)
+ goto out;
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
+
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+
+ /* Discard the rest of the inbound packet. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
+
+out:
+ /* Even if there is no memory, treat as a failure so
+ * the packet will get dropped.
+ */
+ return 0;
+}
+
+static bool list_has_sctp_addr(const struct list_head *list,
+ union sctp_addr *ipaddr)
+{
+ struct sctp_transport *addr;
+
+ list_for_each_entry(addr, list, transports) {
+ if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr))
+ return true;
+ }
+
+ return false;
+}
+/* A restart is occurring, check to make sure no new addresses
+ * are being added as we may be under a takeover attack.
+ */
+static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
+ const struct sctp_association *asoc,
+ struct sctp_chunk *init,
+ struct sctp_cmd_seq *commands)
+{
+ struct net *net = new_asoc->base.net;
+ struct sctp_transport *new_addr;
+ int ret = 1;
+
+ /* Implementor's Guide - Section 5.2.2
+ * ...
+ * Before responding the endpoint MUST check to see if the
+ * unexpected INIT adds new addresses to the association. If new
+ * addresses are added to the association, the endpoint MUST respond
+ * with an ABORT..
+ */
+
+ /* Search through all current addresses and make sure
+ * we aren't adding any new ones.
+ */
+ list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list,
+ transports) {
+ if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
+ &new_addr->ipaddr)) {
+ sctp_sf_send_restart_abort(net, &new_addr->ipaddr, init,
+ commands);
+ ret = 0;
+ break;
+ }
+ }
+
+ /* Return success if all addresses were found. */
+ return ret;
+}
+
+/* Populate the verification/tie tags based on overlapping INIT
+ * scenario.
+ *
+ * Note: Do not use in CLOSED or SHUTDOWN-ACK-SENT state.
+ */
+static void sctp_tietags_populate(struct sctp_association *new_asoc,
+ const struct sctp_association *asoc)
+{
+ switch (asoc->state) {
+
+ /* 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State */
+
+ case SCTP_STATE_COOKIE_WAIT:
+ new_asoc->c.my_vtag = asoc->c.my_vtag;
+ new_asoc->c.my_ttag = asoc->c.my_vtag;
+ new_asoc->c.peer_ttag = 0;
+ break;
+
+ case SCTP_STATE_COOKIE_ECHOED:
+ new_asoc->c.my_vtag = asoc->c.my_vtag;
+ new_asoc->c.my_ttag = asoc->c.my_vtag;
+ new_asoc->c.peer_ttag = asoc->c.peer_vtag;
+ break;
+
+ /* 5.2.2 Unexpected INIT in States Other than CLOSED, COOKIE-ECHOED,
+ * COOKIE-WAIT and SHUTDOWN-ACK-SENT
+ */
+ default:
+ new_asoc->c.my_ttag = asoc->c.my_vtag;
+ new_asoc->c.peer_ttag = asoc->c.peer_vtag;
+ break;
+ }
+
+ /* Other parameters for the endpoint SHOULD be copied from the
+ * existing parameters of the association (e.g. number of
+ * outbound streams) into the INIT ACK and cookie.
+ */
+ new_asoc->rwnd = asoc->rwnd;
+ new_asoc->c.sinit_num_ostreams = asoc->c.sinit_num_ostreams;
+ new_asoc->c.sinit_max_instreams = asoc->c.sinit_max_instreams;
+ new_asoc->c.initial_tsn = asoc->c.initial_tsn;
+}
+
+/*
+ * Compare vtag/tietag values to determine unexpected COOKIE-ECHO
+ * handling action.
+ *
+ * RFC 2960 5.2.4 Handle a COOKIE ECHO when a TCB exists.
+ *
+ * Returns value representing action to be taken. These action values
+ * correspond to Action/Description values in RFC 2960, Table 2.
+ */
+static char sctp_tietags_compare(struct sctp_association *new_asoc,
+ const struct sctp_association *asoc)
+{
+ /* In this case, the peer may have restarted. */
+ if ((asoc->c.my_vtag != new_asoc->c.my_vtag) &&
+ (asoc->c.peer_vtag != new_asoc->c.peer_vtag) &&
+ (asoc->c.my_vtag == new_asoc->c.my_ttag) &&
+ (asoc->c.peer_vtag == new_asoc->c.peer_ttag))
+ return 'A';
+
+ /* Collision case B. */
+ if ((asoc->c.my_vtag == new_asoc->c.my_vtag) &&
+ ((asoc->c.peer_vtag != new_asoc->c.peer_vtag) ||
+ (0 == asoc->c.peer_vtag))) {
+ return 'B';
+ }
+
+ /* Collision case D. */
+ if ((asoc->c.my_vtag == new_asoc->c.my_vtag) &&
+ (asoc->c.peer_vtag == new_asoc->c.peer_vtag))
+ return 'D';
+
+ /* Collision case C. */
+ if ((asoc->c.my_vtag != new_asoc->c.my_vtag) &&
+ (asoc->c.peer_vtag == new_asoc->c.peer_vtag) &&
+ (0 == new_asoc->c.my_ttag) &&
+ (0 == new_asoc->c.peer_ttag))
+ return 'C';
+
+ /* No match to any of the special cases; discard this packet. */
+ return 'E';
+}
+
+/* Common helper routine for both duplicate and simultaneous INIT
+ * chunk handling.
+ */
+static enum sctp_disposition sctp_sf_do_unexpected_init(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg, *repl, *err_chunk;
+ struct sctp_unrecognized_param *unk_param;
+ struct sctp_association *new_asoc;
+ enum sctp_disposition retval;
+ struct sctp_packet *packet;
+ int len;
+
+ /* 6.10 Bundling
+ * An endpoint MUST NOT bundle INIT, INIT ACK or
+ * SHUTDOWN COMPLETE with any other chunks.
+ *
+ * IG Section 2.11.2
+ * Furthermore, we require that the receiver of an INIT chunk MUST
+ * enforce these rules by silently discarding an arriving packet
+ * with an INIT chunk that is bundled with other chunks.
+ */
+ if (!chunk->singleton)
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the INIT chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
+ * Tag.
+ */
+ if (chunk->sctp_hdr->vtag != 0)
+ return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
+
+ if (SCTP_INPUT_CB(chunk->skb)->encap_port != chunk->transport->encap_port)
+ return sctp_sf_new_encap_port(net, ep, asoc, type, arg, commands);
+
+ /* Grab the INIT header. */
+ chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data;
+
+ /* Tag the variable length parameters. */
+ chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(struct sctp_inithdr));
+
+ /* Verify the INIT chunk before processing it. */
+ err_chunk = NULL;
+ if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
+ (struct sctp_init_chunk *)chunk->chunk_hdr, chunk,
+ &err_chunk)) {
+ /* This chunk contains fatal error. It is to be discarded.
+ * Send an ABORT, with causes if there is any.
+ */
+ if (err_chunk) {
+ packet = sctp_abort_pkt_new(net, ep, asoc, arg,
+ (__u8 *)(err_chunk->chunk_hdr) +
+ sizeof(struct sctp_chunkhdr),
+ ntohs(err_chunk->chunk_hdr->length) -
+ sizeof(struct sctp_chunkhdr));
+
+ if (packet) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
+ SCTP_PACKET(packet));
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+ retval = SCTP_DISPOSITION_CONSUME;
+ } else {
+ retval = SCTP_DISPOSITION_NOMEM;
+ }
+ goto cleanup;
+ } else {
+ return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
+ commands);
+ }
+ }
+
+ /*
+ * Other parameters for the endpoint SHOULD be copied from the
+ * existing parameters of the association (e.g. number of
+ * outbound streams) into the INIT ACK and cookie.
+ * FIXME: We are copying parameters from the endpoint not the
+ * association.
+ */
+ new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
+ if (!new_asoc)
+ goto nomem;
+
+ /* Update socket peer label if first association. */
+ if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
+ sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0)
+ goto nomem;
+
+ /* In the outbound INIT ACK the endpoint MUST copy its current
+ * Verification Tag and Peers Verification tag into a reserved
+ * place (local tie-tag and per tie-tag) within the state cookie.
+ */
+ if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
+ (struct sctp_init_chunk *)chunk->chunk_hdr,
+ GFP_ATOMIC))
+ goto nomem;
+
+ /* Make sure no new addresses are being added during the
+ * restart. Do not do this check for COOKIE-WAIT state,
+ * since there are no peer addresses to check against.
+ * Upon return an ABORT will have been sent if needed.
+ */
+ if (!sctp_state(asoc, COOKIE_WAIT)) {
+ if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk,
+ commands)) {
+ retval = SCTP_DISPOSITION_CONSUME;
+ goto nomem_retval;
+ }
+ }
+
+ sctp_tietags_populate(new_asoc, asoc);
+
+ /* B) "Z" shall respond immediately with an INIT ACK chunk. */
+
+ /* If there are errors need to be reported for unknown parameters,
+ * make sure to reserve enough room in the INIT ACK for them.
+ */
+ len = 0;
+ if (err_chunk) {
+ len = ntohs(err_chunk->chunk_hdr->length) -
+ sizeof(struct sctp_chunkhdr);
+ }
+
+ repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
+ if (!repl)
+ goto nomem;
+
+ /* If there are errors need to be reported for unknown parameters,
+ * include them in the outgoing INIT ACK as "Unrecognized parameter"
+ * parameter.
+ */
+ if (err_chunk) {
+ /* Get the "Unrecognized parameter" parameter(s) out of the
+ * ERROR chunk generated by sctp_verify_init(). Since the
+ * error cause code for "unknown parameter" and the
+ * "Unrecognized parameter" type is the same, we can
+ * construct the parameters in INIT ACK by copying the
+ * ERROR causes over.
+ */
+ unk_param = (struct sctp_unrecognized_param *)
+ ((__u8 *)(err_chunk->chunk_hdr) +
+ sizeof(struct sctp_chunkhdr));
+ /* Replace the cause code with the "Unrecognized parameter"
+ * parameter type.
+ */
+ sctp_addto_chunk(repl, len, unk_param);
+ }
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+
+ /*
+ * Note: After sending out INIT ACK with the State Cookie parameter,
+ * "Z" MUST NOT allocate any resources for this new association.
+ * Otherwise, "Z" will be vulnerable to resource attacks.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
+ retval = SCTP_DISPOSITION_CONSUME;
+
+ return retval;
+
+nomem:
+ retval = SCTP_DISPOSITION_NOMEM;
+nomem_retval:
+ if (new_asoc)
+ sctp_association_free(new_asoc);
+cleanup:
+ if (err_chunk)
+ sctp_chunk_free(err_chunk);
+ return retval;
+}
+
+/*
+ * Handle simultaneous INIT.
+ * This means we started an INIT and then we got an INIT request from
+ * our peer.
+ *
+ * Section: 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State (Item B)
+ * This usually indicates an initialization collision, i.e., each
+ * endpoint is attempting, at about the same time, to establish an
+ * association with the other endpoint.
+ *
+ * Upon receipt of an INIT in the COOKIE-WAIT or COOKIE-ECHOED state, an
+ * endpoint MUST respond with an INIT ACK using the same parameters it
+ * sent in its original INIT chunk (including its Verification Tag,
+ * unchanged). These original parameters are combined with those from the
+ * newly received INIT chunk. The endpoint shall also generate a State
+ * Cookie with the INIT ACK. The endpoint uses the parameters sent in its
+ * INIT to calculate the State Cookie.
+ *
+ * After that, the endpoint MUST NOT change its state, the T1-init
+ * timer shall be left running and the corresponding TCB MUST NOT be
+ * destroyed. The normal procedures for handling State Cookies when
+ * a TCB exists will resolve the duplicate INITs to a single association.
+ *
+ * For an endpoint that is in the COOKIE-ECHOED state it MUST populate
+ * its Tie-Tags with the Tag information of itself and its peer (see
+ * section 5.2.2 for a description of the Tie-Tags).
+ *
+ * Verification Tag: Not explicit, but an INIT can not have a valid
+ * verification tag, so we skip the check.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_do_5_2_1_siminit(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ /* Call helper to do the real work for both simultaneous and
+ * duplicate INIT chunk handling.
+ */
+ return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * Handle duplicated INIT messages. These are usually delayed
+ * restransmissions.
+ *
+ * Section: 5.2.2 Unexpected INIT in States Other than CLOSED,
+ * COOKIE-ECHOED and COOKIE-WAIT
+ *
+ * Unless otherwise stated, upon reception of an unexpected INIT for
+ * this association, the endpoint shall generate an INIT ACK with a
+ * State Cookie. In the outbound INIT ACK the endpoint MUST copy its
+ * current Verification Tag and peer's Verification Tag into a reserved
+ * place within the state cookie. We shall refer to these locations as
+ * the Peer's-Tie-Tag and the Local-Tie-Tag. The outbound SCTP packet
+ * containing this INIT ACK MUST carry a Verification Tag value equal to
+ * the Initiation Tag found in the unexpected INIT. And the INIT ACK
+ * MUST contain a new Initiation Tag (randomly generated see Section
+ * 5.3.1). Other parameters for the endpoint SHOULD be copied from the
+ * existing parameters of the association (e.g. number of outbound
+ * streams) into the INIT ACK and cookie.
+ *
+ * After sending out the INIT ACK, the endpoint shall take no further
+ * actions, i.e., the existing association, including its current state,
+ * and the corresponding TCB MUST NOT be changed.
+ *
+ * Note: Only when a TCB exists and the association is not in a COOKIE-
+ * WAIT state are the Tie-Tags populated. For a normal association INIT
+ * (i.e. the endpoint is in a COOKIE-WAIT state), the Tie-Tags MUST be
+ * set to 0 (indicating that no previous TCB existed). The INIT ACK and
+ * State Cookie are populated as specified in section 5.2.1.
+ *
+ * Verification Tag: Not specified, but an INIT has no way of knowing
+ * what the verification tag could be, so we ignore it.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_do_5_2_2_dupinit(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ /* Call helper to do the real work for both simultaneous and
+ * duplicate INIT chunk handling.
+ */
+ return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
+}
+
+
+/*
+ * Unexpected INIT-ACK handler.
+ *
+ * Section 5.2.3
+ * If an INIT ACK received by an endpoint in any state other than the
+ * COOKIE-WAIT state, the endpoint should discard the INIT ACK chunk.
+ * An unexpected INIT ACK usually indicates the processing of an old or
+ * duplicated INIT chunk.
+*/
+enum sctp_disposition sctp_sf_do_5_2_3_initack(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ /* Per the above section, we'll discard the chunk if we have an
+ * endpoint. If this is an OOTB INIT-ACK, treat it as such.
+ */
+ if (ep == sctp_sk(net->sctp.ctl_sock)->ep)
+ return sctp_sf_ootb(net, ep, asoc, type, arg, commands);
+ else
+ return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
+}
+
+static int sctp_sf_do_assoc_update(struct sctp_association *asoc,
+ struct sctp_association *new,
+ struct sctp_cmd_seq *cmds)
+{
+ struct net *net = asoc->base.net;
+ struct sctp_chunk *abort;
+
+ if (!sctp_assoc_update(asoc, new))
+ return 0;
+
+ abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
+ if (abort) {
+ sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
+ sctp_add_cmd_sf(cmds, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+ }
+ sctp_add_cmd_sf(cmds, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
+ sctp_add_cmd_sf(cmds, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_RSRC_LOW));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+
+ return -ENOMEM;
+}
+
+/* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
+ *
+ * Section 5.2.4
+ * A) In this case, the peer may have restarted.
+ */
+static enum sctp_disposition sctp_sf_do_dupcook_a(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ struct sctp_cmd_seq *commands,
+ struct sctp_association *new_asoc)
+{
+ struct sctp_init_chunk *peer_init;
+ enum sctp_disposition disposition;
+ struct sctp_ulpevent *ev;
+ struct sctp_chunk *repl;
+ struct sctp_chunk *err;
+
+ /* new_asoc is a brand-new association, so these are not yet
+ * side effects--it is safe to run them here.
+ */
+ peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
+
+ if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
+ GFP_ATOMIC))
+ goto nomem;
+
+ if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC))
+ goto nomem;
+
+ if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+ return SCTP_DISPOSITION_DISCARD;
+
+ /* Make sure no new addresses are being added during the
+ * restart. Though this is a pretty complicated attack
+ * since you'd have to get inside the cookie.
+ */
+ if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
+ return SCTP_DISPOSITION_CONSUME;
+
+ /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
+ * the peer has restarted (Action A), it MUST NOT setup a new
+ * association but instead resend the SHUTDOWN ACK and send an ERROR
+ * chunk with a "Cookie Received while Shutting Down" error cause to
+ * its peer.
+ */
+ if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
+ disposition = __sctp_sf_do_9_2_reshutack(net, ep, asoc,
+ SCTP_ST_CHUNK(chunk->chunk_hdr->type),
+ chunk, commands);
+ if (SCTP_DISPOSITION_NOMEM == disposition)
+ goto nomem;
+
+ err = sctp_make_op_error(asoc, chunk,
+ SCTP_ERROR_COOKIE_IN_SHUTDOWN,
+ NULL, 0, 0);
+ if (err)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err));
+
+ return SCTP_DISPOSITION_CONSUME;
+ }
+
+ /* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked
+ * data. Consider the optional choice of resending of this data.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
+ sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL());
+
+ /* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue
+ * and ASCONF-ACK cache.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+ sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
+
+ /* Update the content of current association. */
+ if (sctp_sf_do_assoc_update((struct sctp_association *)asoc, new_asoc, commands))
+ goto nomem;
+
+ repl = sctp_make_cookie_ack(asoc, chunk);
+ if (!repl)
+ goto nomem;
+
+ /* Report association restart to upper layer. */
+ ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
+ asoc->c.sinit_num_ostreams,
+ asoc->c.sinit_max_instreams,
+ NULL, GFP_ATOMIC);
+ if (!ev)
+ goto nomem_ev;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
+ if ((sctp_state(asoc, SHUTDOWN_PENDING) ||
+ sctp_state(asoc, SHUTDOWN_SENT)) &&
+ (sctp_sstate(asoc->base.sk, CLOSING) ||
+ sock_flag(asoc->base.sk, SOCK_DEAD))) {
+ /* If the socket has been closed by user, don't
+ * transition to ESTABLISHED. Instead trigger SHUTDOWN
+ * bundled with COOKIE_ACK.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+ return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
+ SCTP_ST_CHUNK(0), repl,
+ commands);
+ } else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_ESTABLISHED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+ }
+ return SCTP_DISPOSITION_CONSUME;
+
+nomem_ev:
+ sctp_chunk_free(repl);
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'B')
+ *
+ * Section 5.2.4
+ * B) In this case, both sides may be attempting to start an association
+ * at about the same time but the peer endpoint started its INIT
+ * after responding to the local endpoint's INIT
+ */
+/* This case represents an initialization collision. */
+static enum sctp_disposition sctp_sf_do_dupcook_b(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ struct sctp_cmd_seq *commands,
+ struct sctp_association *new_asoc)
+{
+ struct sctp_init_chunk *peer_init;
+ struct sctp_chunk *repl;
+
+ /* new_asoc is a brand-new association, so these are not yet
+ * side effects--it is safe to run them here.
+ */
+ peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
+ if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
+ GFP_ATOMIC))
+ goto nomem;
+
+ if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC))
+ goto nomem;
+
+ if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+ return SCTP_DISPOSITION_DISCARD;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_ESTABLISHED));
+ if (asoc->state < SCTP_STATE_ESTABLISHED)
+ SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+ sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
+
+ /* Update the content of current association. */
+ if (sctp_sf_do_assoc_update((struct sctp_association *)asoc, new_asoc, commands))
+ goto nomem;
+
+ repl = sctp_make_cookie_ack(asoc, chunk);
+ if (!repl)
+ goto nomem;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+
+ /* RFC 2960 5.1 Normal Establishment of an Association
+ *
+ * D) IMPLEMENTATION NOTE: An implementation may choose to
+ * send the Communication Up notification to the SCTP user
+ * upon reception of a valid COOKIE ECHO chunk.
+ *
+ * Sadly, this needs to be implemented as a side-effect, because
+ * we are not guaranteed to have set the association id of the real
+ * association and so these notifications need to be delayed until
+ * the association id is allocated.
+ */
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_CHANGE, SCTP_U8(SCTP_COMM_UP));
+
+ /* Sockets API Draft Section 5.3.1.6
+ * When a peer sends a Adaptation Layer Indication parameter , SCTP
+ * delivers this notification to inform the application that of the
+ * peers requested adaptation layer.
+ *
+ * This also needs to be done as a side effect for the same reason as
+ * above.
+ */
+ if (asoc->peer.adaptation_ind)
+ sctp_add_cmd_sf(commands, SCTP_CMD_ADAPTATION_IND, SCTP_NULL());
+
+ if (!asoc->peer.auth_capable)
+ sctp_add_cmd_sf(commands, SCTP_CMD_PEER_NO_AUTH, SCTP_NULL());
+
+ return SCTP_DISPOSITION_CONSUME;
+
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'C')
+ *
+ * Section 5.2.4
+ * C) In this case, the local endpoint's cookie has arrived late.
+ * Before it arrived, the local endpoint sent an INIT and received an
+ * INIT-ACK and finally sent a COOKIE ECHO with the peer's same tag
+ * but a new tag of its own.
+ */
+/* This case represents an initialization collision. */
+static enum sctp_disposition sctp_sf_do_dupcook_c(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ struct sctp_cmd_seq *commands,
+ struct sctp_association *new_asoc)
+{
+ /* The cookie should be silently discarded.
+ * The endpoint SHOULD NOT change states and should leave
+ * any timers running.
+ */
+ return SCTP_DISPOSITION_DISCARD;
+}
+
+/* Unexpected COOKIE-ECHO handler lost chunk (Table 2, action 'D')
+ *
+ * Section 5.2.4
+ *
+ * D) When both local and remote tags match the endpoint should always
+ * enter the ESTABLISHED state, if it has not already done so.
+ */
+/* This case represents an initialization collision. */
+static enum sctp_disposition sctp_sf_do_dupcook_d(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ struct sctp_cmd_seq *commands,
+ struct sctp_association *new_asoc)
+{
+ struct sctp_ulpevent *ev = NULL, *ai_ev = NULL, *auth_ev = NULL;
+ struct sctp_chunk *repl;
+
+ /* Clarification from Implementor's Guide:
+ * D) When both local and remote tags match the endpoint should
+ * enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state.
+ * It should stop any cookie timer that may be running and send
+ * a COOKIE ACK.
+ */
+
+ if (!sctp_auth_chunk_verify(net, chunk, asoc))
+ return SCTP_DISPOSITION_DISCARD;
+
+ /* Don't accidentally move back into established state. */
+ if (asoc->state < SCTP_STATE_ESTABLISHED) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_ESTABLISHED));
+ SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+ sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
+ SCTP_NULL());
+
+ /* RFC 2960 5.1 Normal Establishment of an Association
+ *
+ * D) IMPLEMENTATION NOTE: An implementation may choose
+ * to send the Communication Up notification to the
+ * SCTP user upon reception of a valid COOKIE
+ * ECHO chunk.
+ */
+ ev = sctp_ulpevent_make_assoc_change(asoc, 0,
+ SCTP_COMM_UP, 0,
+ asoc->c.sinit_num_ostreams,
+ asoc->c.sinit_max_instreams,
+ NULL, GFP_ATOMIC);
+ if (!ev)
+ goto nomem;
+
+ /* Sockets API Draft Section 5.3.1.6
+ * When a peer sends a Adaptation Layer Indication parameter,
+ * SCTP delivers this notification to inform the application
+ * that of the peers requested adaptation layer.
+ */
+ if (asoc->peer.adaptation_ind) {
+ ai_ev = sctp_ulpevent_make_adaptation_indication(asoc,
+ GFP_ATOMIC);
+ if (!ai_ev)
+ goto nomem;
+
+ }
+
+ if (!asoc->peer.auth_capable) {
+ auth_ev = sctp_ulpevent_make_authkey(asoc, 0,
+ SCTP_AUTH_NO_AUTH,
+ GFP_ATOMIC);
+ if (!auth_ev)
+ goto nomem;
+ }
+ }
+
+ repl = sctp_make_cookie_ack(asoc, chunk);
+ if (!repl)
+ goto nomem;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+
+ if (ev)
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ SCTP_ULPEVENT(ev));
+ if (ai_ev)
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ SCTP_ULPEVENT(ai_ev));
+ if (auth_ev)
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ SCTP_ULPEVENT(auth_ev));
+
+ return SCTP_DISPOSITION_CONSUME;
+
+nomem:
+ if (auth_ev)
+ sctp_ulpevent_free(auth_ev);
+ if (ai_ev)
+ sctp_ulpevent_free(ai_ev);
+ if (ev)
+ sctp_ulpevent_free(ev);
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/*
+ * Handle a duplicate COOKIE-ECHO. This usually means a cookie-carrying
+ * chunk was retransmitted and then delayed in the network.
+ *
+ * Section: 5.2.4 Handle a COOKIE ECHO when a TCB exists
+ *
+ * Verification Tag: None. Do cookie validation.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_association *new_asoc;
+ struct sctp_chunk *chunk = arg;
+ enum sctp_disposition retval;
+ struct sctp_chunk *err_chk_p;
+ int error = 0;
+ char action;
+
+ /* Make sure that the chunk has a valid length from the protocol
+ * perspective. In this case check to make sure we have at least
+ * enough for the chunk header. Cookie length verification is
+ * done later.
+ */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr))) {
+ if (!sctp_vtag_verify(chunk, asoc))
+ asoc = NULL;
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands);
+ }
+
+ /* "Decode" the chunk. We have no optional parameters so we
+ * are in good shape.
+ */
+ chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
+ if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
+ sizeof(struct sctp_chunkhdr)))
+ goto nomem;
+
+ /* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie
+ * of a duplicate COOKIE ECHO match the Verification Tags of the
+ * current association, consider the State Cookie valid even if
+ * the lifespan is exceeded.
+ */
+ new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
+ &err_chk_p);
+
+ /* FIXME:
+ * If the re-build failed, what is the proper error path
+ * from here?
+ *
+ * [We should abort the association. --piggy]
+ */
+ if (!new_asoc) {
+ /* FIXME: Several errors are possible. A bad cookie should
+ * be silently discarded, but think about logging it too.
+ */
+ switch (error) {
+ case -SCTP_IERROR_NOMEM:
+ goto nomem;
+
+ case -SCTP_IERROR_STALE_COOKIE:
+ sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
+ err_chk_p);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ case -SCTP_IERROR_BAD_SIG:
+ default:
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+ }
+
+ /* Update socket peer label if first association. */
+ if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ /* Set temp so that it won't be added into hashtable */
+ new_asoc->temp = 1;
+
+ /* Compare the tie_tag in cookie with the verification tag of
+ * current association.
+ */
+ action = sctp_tietags_compare(new_asoc, asoc);
+
+ switch (action) {
+ case 'A': /* Association restart. */
+ retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
+ new_asoc);
+ break;
+
+ case 'B': /* Collision case B. */
+ retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands,
+ new_asoc);
+ break;
+
+ case 'C': /* Collision case C. */
+ retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands,
+ new_asoc);
+ break;
+
+ case 'D': /* Collision case D. */
+ retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands,
+ new_asoc);
+ break;
+
+ default: /* Discard packet for all others. */
+ retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ break;
+ }
+
+ /* Delete the temporary new association. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
+ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
+
+ /* Restore association pointer to provide SCTP command interpreter
+ * with a valid context in case it needs to manipulate
+ * the queues */
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
+ SCTP_ASOC((struct sctp_association *)asoc));
+
+ return retval;
+
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/*
+ * Process an ABORT. (SHUTDOWN-PENDING state)
+ *
+ * See sctp_sf_do_9_1_abort().
+ */
+enum sctp_disposition sctp_sf_shutdown_pending_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+
+ if (!sctp_vtag_verify_either(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the ABORT chunk has a valid length.
+ * Since this is an ABORT chunk, we have to discard it
+ * because of the following text:
+ * RFC 2960, Section 3.3.7
+ * If an endpoint receives an ABORT with a format error or for an
+ * association that doesn't exist, it MUST silently discard it.
+ * Because the length is "invalid", we can't really discard just
+ * as we do not know its true length. So, to be safe, discard the
+ * packet.
+ */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk)))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* ADD-IP: Special case for ABORT chunks
+ * F4) One special consideration is that ABORT Chunks arriving
+ * destined to the IP address being deleted MUST be
+ * ignored (see Section 5.3.1 for further details).
+ */
+ if (SCTP_ADDR_DEL ==
+ sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ if (!sctp_err_chunk_valid(chunk))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * Process an ABORT. (SHUTDOWN-SENT state)
+ *
+ * See sctp_sf_do_9_1_abort().
+ */
+enum sctp_disposition sctp_sf_shutdown_sent_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+
+ if (!sctp_vtag_verify_either(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the ABORT chunk has a valid length.
+ * Since this is an ABORT chunk, we have to discard it
+ * because of the following text:
+ * RFC 2960, Section 3.3.7
+ * If an endpoint receives an ABORT with a format error or for an
+ * association that doesn't exist, it MUST silently discard it.
+ * Because the length is "invalid", we can't really discard just
+ * as we do not know its true length. So, to be safe, discard the
+ * packet.
+ */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk)))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* ADD-IP: Special case for ABORT chunks
+ * F4) One special consideration is that ABORT Chunks arriving
+ * destined to the IP address being deleted MUST be
+ * ignored (see Section 5.3.1 for further details).
+ */
+ if (SCTP_ADDR_DEL ==
+ sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ if (!sctp_err_chunk_valid(chunk))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Stop the T2-shutdown timer. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
+
+ /* Stop the T5-shutdown guard timer. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
+
+ return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * Process an ABORT. (SHUTDOWN-ACK-SENT state)
+ *
+ * See sctp_sf_do_9_1_abort().
+ */
+enum sctp_disposition sctp_sf_shutdown_ack_sent_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ /* The same T2 timer, so we should be able to use
+ * common function with the SHUTDOWN-SENT state.
+ */
+ return sctp_sf_shutdown_sent_abort(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * Handle an Error received in COOKIE_ECHOED state.
+ *
+ * Only handle the error type of stale COOKIE Error, the other errors will
+ * be ignored.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_cookie_echoed_err(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ struct sctp_errhdr *err;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the ERROR chunk has a valid length.
+ * The parameter walking depends on this as well.
+ */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_operr_chunk)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ /* Process the error here */
+ /* FUTURE FIXME: When PR-SCTP related and other optional
+ * parms are emitted, this will have to change to handle multiple
+ * errors.
+ */
+ sctp_walk_errors(err, chunk->chunk_hdr) {
+ if (SCTP_ERROR_STALE_COOKIE == err->cause)
+ return sctp_sf_do_5_2_6_stale(net, ep, asoc, type,
+ arg, commands);
+ }
+
+ /* It is possible to have malformed error causes, and that
+ * will cause us to end the walk early. However, since
+ * we are discarding the packet, there should be no adverse
+ * affects.
+ */
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * Handle a Stale COOKIE Error
+ *
+ * Section: 5.2.6 Handle Stale COOKIE Error
+ * If the association is in the COOKIE-ECHOED state, the endpoint may elect
+ * one of the following three alternatives.
+ * ...
+ * 3) Send a new INIT chunk to the endpoint, adding a Cookie
+ * Preservative parameter requesting an extension to the lifetime of
+ * the State Cookie. When calculating the time extension, an
+ * implementation SHOULD use the RTT information measured based on the
+ * previous COOKIE ECHO / ERROR exchange, and should add no more
+ * than 1 second beyond the measured RTT, due to long State Cookie
+ * lifetimes making the endpoint more subject to a replay attack.
+ *
+ * Verification Tag: Not explicit, but safe to ignore.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+static enum sctp_disposition sctp_sf_do_5_2_6_stale(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ int attempts = asoc->init_err_counter + 1;
+ struct sctp_chunk *chunk = arg, *reply;
+ struct sctp_cookie_preserve_param bht;
+ struct sctp_bind_addr *bp;
+ struct sctp_errhdr *err;
+ u32 stale;
+
+ if (attempts > asoc->max_init_attempts) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
+ SCTP_PERR(SCTP_ERROR_STALE_COOKIE));
+ return SCTP_DISPOSITION_DELETE_TCB;
+ }
+
+ err = (struct sctp_errhdr *)(chunk->skb->data);
+
+ /* When calculating the time extension, an implementation
+ * SHOULD use the RTT information measured based on the
+ * previous COOKIE ECHO / ERROR exchange, and should add no
+ * more than 1 second beyond the measured RTT, due to long
+ * State Cookie lifetimes making the endpoint more subject to
+ * a replay attack.
+ * Measure of Staleness's unit is usec. (1/1000000 sec)
+ * Suggested Cookie Life-span Increment's unit is msec.
+ * (1/1000 sec)
+ * In general, if you use the suggested cookie life, the value
+ * found in the field of measure of staleness should be doubled
+ * to give ample time to retransmit the new cookie and thus
+ * yield a higher probability of success on the reattempt.
+ */
+ stale = ntohl(*(__be32 *)((u8 *)err + sizeof(*err)));
+ stale = (stale * 2) / 1000;
+
+ bht.param_hdr.type = SCTP_PARAM_COOKIE_PRESERVATIVE;
+ bht.param_hdr.length = htons(sizeof(bht));
+ bht.lifespan_increment = htonl(stale);
+
+ /* Build that new INIT chunk. */
+ bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
+ reply = sctp_make_init(asoc, bp, GFP_ATOMIC, sizeof(bht));
+ if (!reply)
+ goto nomem;
+
+ sctp_addto_chunk(reply, sizeof(bht), &bht);
+
+ /* Clear peer's init_tag cached in assoc as we are sending a new INIT */
+ sctp_add_cmd_sf(commands, SCTP_CMD_CLEAR_INIT_TAG, SCTP_NULL());
+
+ /* Stop pending T3-rtx and heartbeat timers */
+ sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
+
+ /* Delete non-primary peer ip addresses since we are transitioning
+ * back to the COOKIE-WAIT state
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL());
+
+ /* If we've sent any data bundled with COOKIE-ECHO we will need to
+ * resend
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN,
+ SCTP_TRANSPORT(asoc->peer.primary_path));
+
+ /* Cast away the const modifier, as we want to just
+ * rerun it through as a sideffect.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_INC, SCTP_NULL());
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_COOKIE_WAIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+
+ return SCTP_DISPOSITION_CONSUME;
+
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/*
+ * Process an ABORT.
+ *
+ * Section: 9.1
+ * After checking the Verification Tag, the receiving endpoint shall
+ * remove the association from its record, and shall report the
+ * termination to its upper layer.
+ *
+ * Verification Tag: 8.5.1 Exceptions in Verification Tag Rules
+ * B) Rules for packet carrying ABORT:
+ *
+ * - The endpoint shall always fill in the Verification Tag field of the
+ * outbound packet with the destination endpoint's tag value if it
+ * is known.
+ *
+ * - If the ABORT is sent in response to an OOTB packet, the endpoint
+ * MUST follow the procedure described in Section 8.4.
+ *
+ * - The receiver MUST accept the packet if the Verification Tag
+ * matches either its own tag, OR the tag of its peer. Otherwise, the
+ * receiver MUST silently discard the packet and take no further
+ * action.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_do_9_1_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+
+ if (!sctp_vtag_verify_either(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the ABORT chunk has a valid length.
+ * Since this is an ABORT chunk, we have to discard it
+ * because of the following text:
+ * RFC 2960, Section 3.3.7
+ * If an endpoint receives an ABORT with a format error or for an
+ * association that doesn't exist, it MUST silently discard it.
+ * Because the length is "invalid", we can't really discard just
+ * as we do not know its true length. So, to be safe, discard the
+ * packet.
+ */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk)))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* ADD-IP: Special case for ABORT chunks
+ * F4) One special consideration is that ABORT Chunks arriving
+ * destined to the IP address being deleted MUST be
+ * ignored (see Section 5.3.1 for further details).
+ */
+ if (SCTP_ADDR_DEL ==
+ sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ if (!sctp_err_chunk_valid(chunk))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
+}
+
+static enum sctp_disposition __sctp_sf_do_9_1_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ __be16 error = SCTP_ERROR_NO_ERROR;
+ struct sctp_chunk *chunk = arg;
+ unsigned int len;
+
+ /* See if we have an error cause code in the chunk. */
+ len = ntohs(chunk->chunk_hdr->length);
+ if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
+ error = ((struct sctp_errhdr *)chunk->skb->data)->cause;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
+ /* ASSOC_FAILED will DELETE_TCB. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+
+ return SCTP_DISPOSITION_ABORT;
+}
+
+/*
+ * Process an ABORT. (COOKIE-WAIT state)
+ *
+ * See sctp_sf_do_9_1_abort() above.
+ */
+enum sctp_disposition sctp_sf_cookie_wait_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ __be16 error = SCTP_ERROR_NO_ERROR;
+ struct sctp_chunk *chunk = arg;
+ unsigned int len;
+
+ if (!sctp_vtag_verify_either(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the ABORT chunk has a valid length.
+ * Since this is an ABORT chunk, we have to discard it
+ * because of the following text:
+ * RFC 2960, Section 3.3.7
+ * If an endpoint receives an ABORT with a format error or for an
+ * association that doesn't exist, it MUST silently discard it.
+ * Because the length is "invalid", we can't really discard just
+ * as we do not know its true length. So, to be safe, discard the
+ * packet.
+ */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk)))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* See if we have an error cause code in the chunk. */
+ len = ntohs(chunk->chunk_hdr->length);
+ if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
+ error = ((struct sctp_errhdr *)chunk->skb->data)->cause;
+
+ return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc,
+ chunk->transport);
+}
+
+/*
+ * Process an incoming ICMP as an ABORT. (COOKIE-WAIT state)
+ */
+enum sctp_disposition sctp_sf_cookie_wait_icmp_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ return sctp_stop_t1_and_abort(net, commands, SCTP_ERROR_NO_ERROR,
+ ENOPROTOOPT, asoc,
+ (struct sctp_transport *)arg);
+}
+
+/*
+ * Process an ABORT. (COOKIE-ECHOED state)
+ */
+enum sctp_disposition sctp_sf_cookie_echoed_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ /* There is a single T1 timer, so we should be able to use
+ * common function with the COOKIE-WAIT state.
+ */
+ return sctp_sf_cookie_wait_abort(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * Stop T1 timer and abort association with "INIT failed".
+ *
+ * This is common code called by several sctp_sf_*_abort() functions above.
+ */
+static enum sctp_disposition sctp_stop_t1_and_abort(
+ struct net *net,
+ struct sctp_cmd_seq *commands,
+ __be16 error, int sk_err,
+ const struct sctp_association *asoc,
+ struct sctp_transport *transport)
+{
+ pr_debug("%s: ABORT received (INIT)\n", __func__);
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_CLOSED));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
+ /* CMD_INIT_FAILED will DELETE_TCB. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
+ SCTP_PERR(error));
+
+ return SCTP_DISPOSITION_ABORT;
+}
+
+/*
+ * sctp_sf_do_9_2_shut
+ *
+ * Section: 9.2
+ * Upon the reception of the SHUTDOWN, the peer endpoint shall
+ * - enter the SHUTDOWN-RECEIVED state,
+ *
+ * - stop accepting new data from its SCTP user
+ *
+ * - verify, by checking the Cumulative TSN Ack field of the chunk,
+ * that all its outstanding DATA chunks have been received by the
+ * SHUTDOWN sender.
+ *
+ * Once an endpoint as reached the SHUTDOWN-RECEIVED state it MUST NOT
+ * send a SHUTDOWN in response to a ULP request. And should discard
+ * subsequent SHUTDOWN chunks.
+ *
+ * If there are still outstanding DATA chunks left, the SHUTDOWN
+ * receiver shall continue to follow normal data transmission
+ * procedures defined in Section 6 until all outstanding DATA chunks
+ * are acknowledged; however, the SHUTDOWN receiver MUST NOT accept
+ * new data from its SCTP user.
+ *
+ * Verification Tag: 8.5 Verification Tag [Normal verification]
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_do_9_2_shutdown(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ enum sctp_disposition disposition;
+ struct sctp_chunk *chunk = arg;
+ struct sctp_shutdownhdr *sdh;
+ struct sctp_ulpevent *ev;
+ __u32 ctsn;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the SHUTDOWN chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ /* Convert the elaborate header. */
+ sdh = (struct sctp_shutdownhdr *)chunk->skb->data;
+ skb_pull(chunk->skb, sizeof(*sdh));
+ chunk->subh.shutdown_hdr = sdh;
+ ctsn = ntohl(sdh->cum_tsn_ack);
+
+ if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
+ pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn,
+ asoc->ctsn_ack_point);
+
+ return SCTP_DISPOSITION_DISCARD;
+ }
+
+ /* If Cumulative TSN Ack beyond the max tsn currently
+ * send, terminating the association and respond to the
+ * sender with an ABORT.
+ */
+ if (!TSN_lt(ctsn, asoc->next_tsn))
+ return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
+
+ /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT
+ * When a peer sends a SHUTDOWN, SCTP delivers this notification to
+ * inform the application that it should cease sending data.
+ */
+ ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC);
+ if (!ev) {
+ disposition = SCTP_DISPOSITION_NOMEM;
+ goto out;
+ }
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
+
+ /* Upon the reception of the SHUTDOWN, the peer endpoint shall
+ * - enter the SHUTDOWN-RECEIVED state,
+ * - stop accepting new data from its SCTP user
+ *
+ * [This is implicit in the new state.]
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_SHUTDOWN_RECEIVED));
+ disposition = SCTP_DISPOSITION_CONSUME;
+
+ if (sctp_outq_is_empty(&asoc->outqueue)) {
+ disposition = sctp_sf_do_9_2_shutdown_ack(net, ep, asoc, type,
+ arg, commands);
+ }
+
+ if (SCTP_DISPOSITION_NOMEM == disposition)
+ goto out;
+
+ /* - verify, by checking the Cumulative TSN Ack field of the
+ * chunk, that all its outstanding DATA chunks have been
+ * received by the SHUTDOWN sender.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN,
+ SCTP_BE32(chunk->subh.shutdown_hdr->cum_tsn_ack));
+
+out:
+ return disposition;
+}
+
+/*
+ * sctp_sf_do_9_2_shut_ctsn
+ *
+ * Once an endpoint has reached the SHUTDOWN-RECEIVED state,
+ * it MUST NOT send a SHUTDOWN in response to a ULP request.
+ * The Cumulative TSN Ack of the received SHUTDOWN chunk
+ * MUST be processed.
+ */
+enum sctp_disposition sctp_sf_do_9_2_shut_ctsn(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ struct sctp_shutdownhdr *sdh;
+ __u32 ctsn;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the SHUTDOWN chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ sdh = (struct sctp_shutdownhdr *)chunk->skb->data;
+ ctsn = ntohl(sdh->cum_tsn_ack);
+
+ if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
+ pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn,
+ asoc->ctsn_ack_point);
+
+ return SCTP_DISPOSITION_DISCARD;
+ }
+
+ /* If Cumulative TSN Ack beyond the max tsn currently
+ * send, terminating the association and respond to the
+ * sender with an ABORT.
+ */
+ if (!TSN_lt(ctsn, asoc->next_tsn))
+ return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
+
+ /* verify, by checking the Cumulative TSN Ack field of the
+ * chunk, that all its outstanding DATA chunks have been
+ * received by the SHUTDOWN sender.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN,
+ SCTP_BE32(sdh->cum_tsn_ack));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/* RFC 2960 9.2
+ * If an endpoint is in SHUTDOWN-ACK-SENT state and receives an INIT chunk
+ * (e.g., if the SHUTDOWN COMPLETE was lost) with source and destination
+ * transport addresses (either in the IP addresses or in the INIT chunk)
+ * that belong to this association, it should discard the INIT chunk and
+ * retransmit the SHUTDOWN ACK chunk.
+ */
+static enum sctp_disposition
+__sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type, void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ struct sctp_chunk *reply;
+
+ /* Make sure that the chunk has a valid length */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ /* Since we are not going to really process this INIT, there
+ * is no point in verifying chunk boundaries. Just generate
+ * the SHUTDOWN ACK.
+ */
+ reply = sctp_make_shutdown_ack(asoc, chunk);
+ if (NULL == reply)
+ goto nomem;
+
+ /* Set the transport for the SHUTDOWN ACK chunk and the timeout for
+ * the T2-SHUTDOWN timer.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
+
+ /* and restart the T2-shutdown timer. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+
+ return SCTP_DISPOSITION_CONSUME;
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+enum sctp_disposition
+sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type, void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+
+ if (!chunk->singleton)
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ if (chunk->sctp_hdr->vtag != 0)
+ return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
+
+ return __sctp_sf_do_9_2_reshutack(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * sctp_sf_do_ecn_cwr
+ *
+ * Section: Appendix A: Explicit Congestion Notification
+ *
+ * CWR:
+ *
+ * RFC 2481 details a specific bit for a sender to send in the header of
+ * its next outbound TCP segment to indicate to its peer that it has
+ * reduced its congestion window. This is termed the CWR bit. For
+ * SCTP the same indication is made by including the CWR chunk.
+ * This chunk contains one data element, i.e. the TSN number that
+ * was sent in the ECNE chunk. This element represents the lowest
+ * TSN number in the datagram that was originally marked with the
+ * CE bit.
+ *
+ * Verification Tag: 8.5 Verification Tag [Normal verification]
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_do_ecn_cwr(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ struct sctp_cwrhdr *cwr;
+ u32 lowest_tsn;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_ecne_chunk)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ cwr = (struct sctp_cwrhdr *)chunk->skb->data;
+ skb_pull(chunk->skb, sizeof(*cwr));
+
+ lowest_tsn = ntohl(cwr->lowest_tsn);
+
+ /* Does this CWR ack the last sent congestion notification? */
+ if (TSN_lte(asoc->last_ecne_tsn, lowest_tsn)) {
+ /* Stop sending ECNE. */
+ sctp_add_cmd_sf(commands,
+ SCTP_CMD_ECN_CWR,
+ SCTP_U32(lowest_tsn));
+ }
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * sctp_sf_do_ecne
+ *
+ * Section: Appendix A: Explicit Congestion Notification
+ *
+ * ECN-Echo
+ *
+ * RFC 2481 details a specific bit for a receiver to send back in its
+ * TCP acknowledgements to notify the sender of the Congestion
+ * Experienced (CE) bit having arrived from the network. For SCTP this
+ * same indication is made by including the ECNE chunk. This chunk
+ * contains one data element, i.e. the lowest TSN associated with the IP
+ * datagram marked with the CE bit.....
+ *
+ * Verification Tag: 8.5 Verification Tag [Normal verification]
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_do_ecne(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg, struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ struct sctp_ecnehdr *ecne;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_ecne_chunk)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ ecne = (struct sctp_ecnehdr *)chunk->skb->data;
+ skb_pull(chunk->skb, sizeof(*ecne));
+
+ /* If this is a newer ECNE than the last CWR packet we sent out */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE,
+ SCTP_U32(ntohl(ecne->lowest_tsn)));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * Section: 6.2 Acknowledgement on Reception of DATA Chunks
+ *
+ * The SCTP endpoint MUST always acknowledge the reception of each valid
+ * DATA chunk.
+ *
+ * The guidelines on delayed acknowledgement algorithm specified in
+ * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an
+ * acknowledgement SHOULD be generated for at least every second packet
+ * (not every second DATA chunk) received, and SHOULD be generated within
+ * 200 ms of the arrival of any unacknowledged DATA chunk. In some
+ * situations it may be beneficial for an SCTP transmitter to be more
+ * conservative than the algorithms detailed in this document allow.
+ * However, an SCTP transmitter MUST NOT be more aggressive than the
+ * following algorithms allow.
+ *
+ * A SCTP receiver MUST NOT generate more than one SACK for every
+ * incoming packet, other than to update the offered window as the
+ * receiving application consumes new data.
+ *
+ * Verification Tag: 8.5 Verification Tag [Normal verification]
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ union sctp_arg force = SCTP_NOFORCE();
+ struct sctp_chunk *chunk = arg;
+ int error;
+
+ if (!sctp_vtag_verify(chunk, asoc)) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
+ SCTP_NULL());
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ error = sctp_eat_data(asoc, chunk, commands);
+ switch (error) {
+ case SCTP_IERROR_NO_ERROR:
+ break;
+ case SCTP_IERROR_HIGH_TSN:
+ case SCTP_IERROR_BAD_STREAM:
+ SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
+ goto discard_noforce;
+ case SCTP_IERROR_DUP_TSN:
+ case SCTP_IERROR_IGNORE_TSN:
+ SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
+ goto discard_force;
+ case SCTP_IERROR_NO_DATA:
+ return SCTP_DISPOSITION_ABORT;
+ case SCTP_IERROR_PROTO_VIOLATION:
+ return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
+ (u8 *)chunk->subh.data_hdr,
+ sctp_datahdr_len(&asoc->stream));
+ default:
+ BUG();
+ }
+
+ if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM)
+ force = SCTP_FORCE();
+
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
+ }
+
+ /* If this is the last chunk in a packet, we need to count it
+ * toward sack generation. Note that we need to SACK every
+ * OTHER packet containing data chunks, EVEN IF WE DISCARD
+ * THEM. We elect to NOT generate SACK's if the chunk fails
+ * the verification tag test.
+ *
+ * RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks
+ *
+ * The SCTP endpoint MUST always acknowledge the reception of
+ * each valid DATA chunk.
+ *
+ * The guidelines on delayed acknowledgement algorithm
+ * specified in Section 4.2 of [RFC2581] SHOULD be followed.
+ * Specifically, an acknowledgement SHOULD be generated for at
+ * least every second packet (not every second DATA chunk)
+ * received, and SHOULD be generated within 200 ms of the
+ * arrival of any unacknowledged DATA chunk. In some
+ * situations it may be beneficial for an SCTP transmitter to
+ * be more conservative than the algorithms detailed in this
+ * document allow. However, an SCTP transmitter MUST NOT be
+ * more aggressive than the following algorithms allow.
+ */
+ if (chunk->end_of_packet)
+ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
+
+ return SCTP_DISPOSITION_CONSUME;
+
+discard_force:
+ /* RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks
+ *
+ * When a packet arrives with duplicate DATA chunk(s) and with
+ * no new DATA chunk(s), the endpoint MUST immediately send a
+ * SACK with no delay. If a packet arrives with duplicate
+ * DATA chunk(s) bundled with new DATA chunks, the endpoint
+ * MAY immediately send a SACK. Normally receipt of duplicate
+ * DATA chunks will occur when the original SACK chunk was lost
+ * and the peer's RTO has expired. The duplicate TSN number(s)
+ * SHOULD be reported in the SACK as duplicate.
+ */
+ /* In our case, we split the MAY SACK advice up whether or not
+ * the last chunk is a duplicate.'
+ */
+ if (chunk->end_of_packet)
+ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
+ return SCTP_DISPOSITION_DISCARD;
+
+discard_noforce:
+ if (chunk->end_of_packet)
+ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
+
+ return SCTP_DISPOSITION_DISCARD;
+}
+
+/*
+ * sctp_sf_eat_data_fast_4_4
+ *
+ * Section: 4 (4)
+ * (4) In SHUTDOWN-SENT state the endpoint MUST acknowledge any received
+ * DATA chunks without delay.
+ *
+ * Verification Tag: 8.5 Verification Tag [Normal verification]
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_eat_data_fast_4_4(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ int error;
+
+ if (!sctp_vtag_verify(chunk, asoc)) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
+ SCTP_NULL());
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ error = sctp_eat_data(asoc, chunk, commands);
+ switch (error) {
+ case SCTP_IERROR_NO_ERROR:
+ case SCTP_IERROR_HIGH_TSN:
+ case SCTP_IERROR_DUP_TSN:
+ case SCTP_IERROR_IGNORE_TSN:
+ case SCTP_IERROR_BAD_STREAM:
+ break;
+ case SCTP_IERROR_NO_DATA:
+ return SCTP_DISPOSITION_ABORT;
+ case SCTP_IERROR_PROTO_VIOLATION:
+ return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
+ (u8 *)chunk->subh.data_hdr,
+ sctp_datahdr_len(&asoc->stream));
+ default:
+ BUG();
+ }
+
+ /* Go a head and force a SACK, since we are shutting down. */
+
+ /* Implementor's Guide.
+ *
+ * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
+ * respond to each received packet containing one or more DATA chunk(s)
+ * with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
+ */
+ if (chunk->end_of_packet) {
+ /* We must delay the chunk creation since the cumulative
+ * TSN has not been updated yet.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
+ }
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * Section: 6.2 Processing a Received SACK
+ * D) Any time a SACK arrives, the endpoint performs the following:
+ *
+ * i) If Cumulative TSN Ack is less than the Cumulative TSN Ack Point,
+ * then drop the SACK. Since Cumulative TSN Ack is monotonically
+ * increasing, a SACK whose Cumulative TSN Ack is less than the
+ * Cumulative TSN Ack Point indicates an out-of-order SACK.
+ *
+ * ii) Set rwnd equal to the newly received a_rwnd minus the number
+ * of bytes still outstanding after processing the Cumulative TSN Ack
+ * and the Gap Ack Blocks.
+ *
+ * iii) If the SACK is missing a TSN that was previously
+ * acknowledged via a Gap Ack Block (e.g., the data receiver
+ * reneged on the data), then mark the corresponding DATA chunk
+ * as available for retransmit: Mark it as missing for fast
+ * retransmit as described in Section 7.2.4 and if no retransmit
+ * timer is running for the destination address to which the DATA
+ * chunk was originally transmitted, then T3-rtx is started for
+ * that destination address.
+ *
+ * Verification Tag: 8.5 Verification Tag [Normal verification]
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_eat_sack_6_2(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ struct sctp_sackhdr *sackh;
+ __u32 ctsn;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the SACK chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_sack_chunk)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ /* Pull the SACK chunk from the data buffer */
+ sackh = sctp_sm_pull_sack(chunk);
+ /* Was this a bogus SACK? */
+ if (!sackh)
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ chunk->subh.sack_hdr = sackh;
+ ctsn = ntohl(sackh->cum_tsn_ack);
+
+ /* If Cumulative TSN Ack beyond the max tsn currently
+ * send, terminating the association and respond to the
+ * sender with an ABORT.
+ */
+ if (TSN_lte(asoc->next_tsn, ctsn))
+ return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
+
+ trace_sctp_probe(ep, asoc, chunk);
+
+ /* i) If Cumulative TSN Ack is less than the Cumulative TSN
+ * Ack Point, then drop the SACK. Since Cumulative TSN
+ * Ack is monotonically increasing, a SACK whose
+ * Cumulative TSN Ack is less than the Cumulative TSN Ack
+ * Point indicates an out-of-order SACK.
+ */
+ if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
+ pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn,
+ asoc->ctsn_ack_point);
+
+ return SCTP_DISPOSITION_DISCARD;
+ }
+
+ /* Return this SACK for further processing. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_CHUNK(chunk));
+
+ /* Note: We do the rest of the work on the PROCESS_SACK
+ * sideeffect.
+ */
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * Generate an ABORT in response to a packet.
+ *
+ * Section: 8.4 Handle "Out of the blue" Packets, sctpimpguide 2.41
+ *
+ * 8) The receiver should respond to the sender of the OOTB packet with
+ * an ABORT. When sending the ABORT, the receiver of the OOTB packet
+ * MUST fill in the Verification Tag field of the outbound packet
+ * with the value found in the Verification Tag field of the OOTB
+ * packet and set the T-bit in the Chunk Flags to indicate that the
+ * Verification Tag is reflected. After sending this ABORT, the
+ * receiver of the OOTB packet shall discard the OOTB packet and take
+ * no further action.
+ *
+ * Verification Tag:
+ *
+ * The return value is the disposition of the chunk.
+*/
+static enum sctp_disposition sctp_sf_tabort_8_4_8(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_packet *packet = NULL;
+ struct sctp_chunk *chunk = arg;
+ struct sctp_chunk *abort;
+
+ packet = sctp_ootb_pkt_new(net, asoc, chunk);
+ if (!packet)
+ return SCTP_DISPOSITION_NOMEM;
+
+ /* Make an ABORT. The T bit will be set if the asoc
+ * is NULL.
+ */
+ abort = sctp_make_abort(asoc, chunk, 0);
+ if (!abort) {
+ sctp_ootb_pkt_free(packet);
+ return SCTP_DISPOSITION_NOMEM;
+ }
+
+ /* Reflect vtag if T-Bit is set */
+ if (sctp_test_T_bit(abort))
+ packet->vtag = ntohl(chunk->sctp_hdr->vtag);
+
+ /* Set the skb to the belonging sock for accounting. */
+ abort->skb->sk = ep->base.sk;
+
+ sctp_packet_append_chunk(packet, abort);
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet));
+
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+
+ sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/* Handling of SCTP Packets Containing an INIT Chunk Matching an
+ * Existing Associations when the UDP encap port is incorrect.
+ *
+ * From Section 4 at draft-tuexen-tsvwg-sctp-udp-encaps-cons-03.
+ */
+static enum sctp_disposition sctp_sf_new_encap_port(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_packet *packet = NULL;
+ struct sctp_chunk *chunk = arg;
+ struct sctp_chunk *abort;
+
+ packet = sctp_ootb_pkt_new(net, asoc, chunk);
+ if (!packet)
+ return SCTP_DISPOSITION_NOMEM;
+
+ abort = sctp_make_new_encap_port(asoc, chunk);
+ if (!abort) {
+ sctp_ootb_pkt_free(packet);
+ return SCTP_DISPOSITION_NOMEM;
+ }
+
+ abort->skb->sk = ep->base.sk;
+
+ sctp_packet_append_chunk(packet, abort);
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
+ SCTP_PACKET(packet));
+
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+
+ sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * Received an ERROR chunk from peer. Generate SCTP_REMOTE_ERROR
+ * event as ULP notification for each cause included in the chunk.
+ *
+ * API 5.3.1.3 - SCTP_REMOTE_ERROR
+ *
+ * The return value is the disposition of the chunk.
+*/
+enum sctp_disposition sctp_sf_operr_notify(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ struct sctp_errhdr *err;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the ERROR chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_operr_chunk)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+ sctp_walk_errors(err, chunk->chunk_hdr);
+ if ((void *)err != (void *)chunk->chunk_end)
+ return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+ (void *)err, commands);
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
+ SCTP_CHUNK(chunk));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * Process an inbound SHUTDOWN ACK.
+ *
+ * From Section 9.2:
+ * Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall
+ * stop the T2-shutdown timer, send a SHUTDOWN COMPLETE chunk to its
+ * peer, and remove all record of the association.
+ *
+ * The return value is the disposition.
+ */
+enum sctp_disposition sctp_sf_do_9_2_final(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ struct sctp_chunk *reply;
+ struct sctp_ulpevent *ev;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+ /* 10.2 H) SHUTDOWN COMPLETE notification
+ *
+ * When SCTP completes the shutdown procedures (section 9.2) this
+ * notification is passed to the upper layer.
+ */
+ ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP,
+ 0, 0, 0, NULL, GFP_ATOMIC);
+ if (!ev)
+ goto nomem;
+
+ /* ...send a SHUTDOWN COMPLETE chunk to its peer, */
+ reply = sctp_make_shutdown_complete(asoc, chunk);
+ if (!reply)
+ goto nomem_chunk;
+
+ /* Do all the commands now (after allocation), so that we
+ * have consistent state if memory allocation fails
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
+
+ /* Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall
+ * stop the T2-shutdown timer,
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_CLOSED));
+ SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+
+ /* ...and remove all record of the association. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
+ return SCTP_DISPOSITION_DELETE_TCB;
+
+nomem_chunk:
+ sctp_ulpevent_free(ev);
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/*
+ * RFC 2960, 8.4 - Handle "Out of the blue" Packets, sctpimpguide 2.41.
+ *
+ * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should
+ * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE.
+ * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB
+ * packet must fill in the Verification Tag field of the outbound
+ * packet with the Verification Tag received in the SHUTDOWN ACK and
+ * set the T-bit in the Chunk Flags to indicate that the Verification
+ * Tag is reflected.
+ *
+ * 8) The receiver should respond to the sender of the OOTB packet with
+ * an ABORT. When sending the ABORT, the receiver of the OOTB packet
+ * MUST fill in the Verification Tag field of the outbound packet
+ * with the value found in the Verification Tag field of the OOTB
+ * packet and set the T-bit in the Chunk Flags to indicate that the
+ * Verification Tag is reflected. After sending this ABORT, the
+ * receiver of the OOTB packet shall discard the OOTB packet and take
+ * no further action.
+ */
+enum sctp_disposition sctp_sf_ootb(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg, struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ struct sk_buff *skb = chunk->skb;
+ struct sctp_chunkhdr *ch;
+ struct sctp_errhdr *err;
+ int ootb_cookie_ack = 0;
+ int ootb_shut_ack = 0;
+ __u8 *ch_end;
+
+ SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+
+ if (asoc && !sctp_vtag_verify(chunk, asoc))
+ asoc = NULL;
+
+ ch = (struct sctp_chunkhdr *)chunk->chunk_hdr;
+ do {
+ /* Report violation if the chunk is less then minimal */
+ if (ntohs(ch->length) < sizeof(*ch))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ /* Report violation if chunk len overflows */
+ ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
+ if (ch_end > skb_tail_pointer(skb))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ /* Now that we know we at least have a chunk header,
+ * do things that are type appropriate.
+ */
+ if (SCTP_CID_SHUTDOWN_ACK == ch->type)
+ ootb_shut_ack = 1;
+
+ /* RFC 2960, Section 3.3.7
+ * Moreover, under any circumstances, an endpoint that
+ * receives an ABORT MUST NOT respond to that ABORT by
+ * sending an ABORT of its own.
+ */
+ if (SCTP_CID_ABORT == ch->type)
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
+ * or a COOKIE ACK the SCTP Packet should be silently
+ * discarded.
+ */
+
+ if (SCTP_CID_COOKIE_ACK == ch->type)
+ ootb_cookie_ack = 1;
+
+ if (SCTP_CID_ERROR == ch->type) {
+ sctp_walk_errors(err, ch) {
+ if (SCTP_ERROR_STALE_COOKIE == err->cause) {
+ ootb_cookie_ack = 1;
+ break;
+ }
+ }
+ }
+
+ ch = (struct sctp_chunkhdr *)ch_end;
+ } while (ch_end < skb_tail_pointer(skb));
+
+ if (ootb_shut_ack)
+ return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands);
+ else if (ootb_cookie_ack)
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ else
+ return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * Handle an "Out of the blue" SHUTDOWN ACK.
+ *
+ * Section: 8.4 5, sctpimpguide 2.41.
+ *
+ * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should
+ * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE.
+ * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB
+ * packet must fill in the Verification Tag field of the outbound
+ * packet with the Verification Tag received in the SHUTDOWN ACK and
+ * set the T-bit in the Chunk Flags to indicate that the Verification
+ * Tag is reflected.
+ *
+ * Inputs
+ * (endpoint, asoc, type, arg, commands)
+ *
+ * Outputs
+ * (enum sctp_disposition)
+ *
+ * The return value is the disposition of the chunk.
+ */
+static enum sctp_disposition sctp_sf_shut_8_4_5(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_packet *packet = NULL;
+ struct sctp_chunk *chunk = arg;
+ struct sctp_chunk *shut;
+
+ packet = sctp_ootb_pkt_new(net, asoc, chunk);
+ if (!packet)
+ return SCTP_DISPOSITION_NOMEM;
+
+ /* Make an SHUTDOWN_COMPLETE.
+ * The T bit will be set if the asoc is NULL.
+ */
+ shut = sctp_make_shutdown_complete(asoc, chunk);
+ if (!shut) {
+ sctp_ootb_pkt_free(packet);
+ return SCTP_DISPOSITION_NOMEM;
+ }
+
+ /* Reflect vtag if T-Bit is set */
+ if (sctp_test_T_bit(shut))
+ packet->vtag = ntohl(chunk->sctp_hdr->vtag);
+
+ /* Set the skb to the belonging sock for accounting. */
+ shut->skb->sk = ep->base.sk;
+
+ sctp_packet_append_chunk(packet, shut);
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
+ SCTP_PACKET(packet));
+
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+
+ /* We need to discard the rest of the packet to prevent
+ * potential boomming attacks from additional bundled chunks.
+ * This is documented in SCTP Threats ID.
+ */
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * Handle SHUTDOWN ACK in COOKIE_ECHOED or COOKIE_WAIT state.
+ *
+ * Verification Tag: 8.5.1 E) Rules for packet carrying a SHUTDOWN ACK
+ * If the receiver is in COOKIE-ECHOED or COOKIE-WAIT state the
+ * procedures in section 8.4 SHOULD be followed, in other words it
+ * should be treated as an Out Of The Blue packet.
+ * [This means that we do NOT check the Verification Tag on these
+ * chunks. --piggy ]
+ *
+ */
+enum sctp_disposition sctp_sf_do_8_5_1_E_sa(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ asoc = NULL;
+
+ /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ /* Although we do have an association in this case, it corresponds
+ * to a restarted association. So the packet is treated as an OOTB
+ * packet and the state function that handles OOTB SHUTDOWN_ACK is
+ * called with a NULL association.
+ */
+ SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+
+ return sctp_sf_shut_8_4_5(net, ep, NULL, type, arg, commands);
+}
+
+/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. */
+enum sctp_disposition sctp_sf_do_asconf(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_paramhdr *err_param = NULL;
+ struct sctp_chunk *asconf_ack = NULL;
+ struct sctp_chunk *chunk = arg;
+ struct sctp_addiphdr *hdr;
+ __u32 serial;
+
+ if (!sctp_vtag_verify(chunk, asoc)) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
+ SCTP_NULL());
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ /* Make sure that the ASCONF ADDIP chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ /* ADD-IP: Section 4.1.1
+ * This chunk MUST be sent in an authenticated way by using
+ * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
+ * is received unauthenticated it MUST be silently discarded as
+ * described in [I-D.ietf-tsvwg-sctp-auth].
+ */
+ if (!asoc->peer.asconf_capable ||
+ (!net->sctp.addip_noauth && !chunk->auth))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ hdr = (struct sctp_addiphdr *)chunk->skb->data;
+ serial = ntohl(hdr->serial);
+
+ /* Verify the ASCONF chunk before processing it. */
+ if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
+ return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+ (void *)err_param, commands);
+
+ /* ADDIP 5.2 E1) Compare the value of the serial number to the value
+ * the endpoint stored in a new association variable
+ * 'Peer-Serial-Number'.
+ */
+ if (serial == asoc->peer.addip_serial + 1) {
+ /* If this is the first instance of ASCONF in the packet,
+ * we can clean our old ASCONF-ACKs.
+ */
+ if (!chunk->has_asconf)
+ sctp_assoc_clean_asconf_ack_cache(asoc);
+
+ /* ADDIP 5.2 E4) When the Sequence Number matches the next one
+ * expected, process the ASCONF as described below and after
+ * processing the ASCONF Chunk, append an ASCONF-ACK Chunk to
+ * the response packet and cache a copy of it (in the event it
+ * later needs to be retransmitted).
+ *
+ * Essentially, do V1-V5.
+ */
+ asconf_ack = sctp_process_asconf((struct sctp_association *)
+ asoc, chunk);
+ if (!asconf_ack)
+ return SCTP_DISPOSITION_NOMEM;
+ } else if (serial < asoc->peer.addip_serial + 1) {
+ /* ADDIP 5.2 E2)
+ * If the value found in the Sequence Number is less than the
+ * ('Peer- Sequence-Number' + 1), simply skip to the next
+ * ASCONF, and include in the outbound response packet
+ * any previously cached ASCONF-ACK response that was
+ * sent and saved that matches the Sequence Number of the
+ * ASCONF. Note: It is possible that no cached ASCONF-ACK
+ * Chunk exists. This will occur when an older ASCONF
+ * arrives out of order. In such a case, the receiver
+ * should skip the ASCONF Chunk and not include ASCONF-ACK
+ * Chunk for that chunk.
+ */
+ asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial);
+ if (!asconf_ack)
+ return SCTP_DISPOSITION_DISCARD;
+
+ /* Reset the transport so that we select the correct one
+ * this time around. This is to make sure that we don't
+ * accidentally use a stale transport that's been removed.
+ */
+ asconf_ack->transport = NULL;
+ } else {
+ /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since
+ * it must be either a stale packet or from an attacker.
+ */
+ return SCTP_DISPOSITION_DISCARD;
+ }
+
+ /* ADDIP 5.2 E6) The destination address of the SCTP packet
+ * containing the ASCONF-ACK Chunks MUST be the source address of
+ * the SCTP packet that held the ASCONF Chunks.
+ *
+ * To do this properly, we'll set the destination address of the chunk
+ * and at the transmit time, will try look up the transport to use.
+ * Since ASCONFs may be bundled, the correct transport may not be
+ * created until we process the entire packet, thus this workaround.
+ */
+ asconf_ack->dest = chunk->source;
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
+ if (asoc->new_transport) {
+ sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport, commands);
+ ((struct sctp_association *)asoc)->new_transport = NULL;
+ }
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+static enum sctp_disposition sctp_send_next_asconf(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ struct sctp_association *asoc,
+ const union sctp_subtype type,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *asconf;
+ struct list_head *entry;
+
+ if (list_empty(&asoc->addip_chunk_list))
+ return SCTP_DISPOSITION_CONSUME;
+
+ entry = asoc->addip_chunk_list.next;
+ asconf = list_entry(entry, struct sctp_chunk, list);
+
+ list_del_init(entry);
+ sctp_chunk_hold(asconf);
+ asoc->addip_last_asconf = asconf;
+
+ return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands);
+}
+
+/*
+ * ADDIP Section 4.3 General rules for address manipulation
+ * When building TLV parameters for the ASCONF Chunk that will add or
+ * delete IP addresses the D0 to D13 rules should be applied:
+ */
+enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *last_asconf = asoc->addip_last_asconf;
+ struct sctp_paramhdr *err_param = NULL;
+ struct sctp_chunk *asconf_ack = arg;
+ struct sctp_addiphdr *addip_hdr;
+ __u32 sent_serial, rcvd_serial;
+ struct sctp_chunk *abort;
+
+ if (!sctp_vtag_verify(asconf_ack, asoc)) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
+ SCTP_NULL());
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ /* Make sure that the ADDIP chunk has a valid length. */
+ if (!sctp_chunk_length_valid(asconf_ack,
+ sizeof(struct sctp_addip_chunk)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ /* ADD-IP, Section 4.1.2:
+ * This chunk MUST be sent in an authenticated way by using
+ * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
+ * is received unauthenticated it MUST be silently discarded as
+ * described in [I-D.ietf-tsvwg-sctp-auth].
+ */
+ if (!asoc->peer.asconf_capable ||
+ (!net->sctp.addip_noauth && !asconf_ack->auth))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ addip_hdr = (struct sctp_addiphdr *)asconf_ack->skb->data;
+ rcvd_serial = ntohl(addip_hdr->serial);
+
+ /* Verify the ASCONF-ACK chunk before processing it. */
+ if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
+ return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+ (void *)err_param, commands);
+
+ if (last_asconf) {
+ addip_hdr = (struct sctp_addiphdr *)last_asconf->subh.addip_hdr;
+ sent_serial = ntohl(addip_hdr->serial);
+ } else {
+ sent_serial = asoc->addip_serial - 1;
+ }
+
+ /* D0) If an endpoint receives an ASCONF-ACK that is greater than or
+ * equal to the next serial number to be used but no ASCONF chunk is
+ * outstanding the endpoint MUST ABORT the association. Note that a
+ * sequence number is greater than if it is no more than 2^^31-1
+ * larger than the current sequence number (using serial arithmetic).
+ */
+ if (ADDIP_SERIAL_gte(rcvd_serial, sent_serial + 1) &&
+ !(asoc->addip_last_asconf)) {
+ abort = sctp_make_abort(asoc, asconf_ack,
+ sizeof(struct sctp_errhdr));
+ if (abort) {
+ sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(abort));
+ }
+ /* We are going to ABORT, so we might as well stop
+ * processing the rest of the chunks in the packet.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+ return SCTP_DISPOSITION_ABORT;
+ }
+
+ if ((rcvd_serial == sent_serial) && asoc->addip_last_asconf) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+
+ if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
+ asconf_ack))
+ return sctp_send_next_asconf(net, ep,
+ (struct sctp_association *)asoc,
+ type, commands);
+
+ abort = sctp_make_abort(asoc, asconf_ack,
+ sizeof(struct sctp_errhdr));
+ if (abort) {
+ sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(abort));
+ }
+ /* We are going to ABORT, so we might as well stop
+ * processing the rest of the chunks in the packet.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+ return SCTP_DISPOSITION_ABORT;
+ }
+
+ return SCTP_DISPOSITION_DISCARD;
+}
+
+/* RE-CONFIG Section 5.2 Upon reception of an RECONF Chunk. */
+enum sctp_disposition sctp_sf_do_reconf(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_paramhdr *err_param = NULL;
+ struct sctp_chunk *chunk = arg;
+ struct sctp_reconf_chunk *hdr;
+ union sctp_params param;
+
+ if (!sctp_vtag_verify(chunk, asoc)) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
+ SCTP_NULL());
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ /* Make sure that the RECONF chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(*hdr)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ if (!sctp_verify_reconf(asoc, chunk, &err_param))
+ return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+ (void *)err_param, commands);
+
+ hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
+ sctp_walk_params(param, hdr, params) {
+ struct sctp_chunk *reply = NULL;
+ struct sctp_ulpevent *ev = NULL;
+
+ if (param.p->type == SCTP_PARAM_RESET_OUT_REQUEST)
+ reply = sctp_process_strreset_outreq(
+ (struct sctp_association *)asoc, param, &ev);
+ else if (param.p->type == SCTP_PARAM_RESET_IN_REQUEST)
+ reply = sctp_process_strreset_inreq(
+ (struct sctp_association *)asoc, param, &ev);
+ else if (param.p->type == SCTP_PARAM_RESET_TSN_REQUEST)
+ reply = sctp_process_strreset_tsnreq(
+ (struct sctp_association *)asoc, param, &ev);
+ else if (param.p->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS)
+ reply = sctp_process_strreset_addstrm_out(
+ (struct sctp_association *)asoc, param, &ev);
+ else if (param.p->type == SCTP_PARAM_RESET_ADD_IN_STREAMS)
+ reply = sctp_process_strreset_addstrm_in(
+ (struct sctp_association *)asoc, param, &ev);
+ else if (param.p->type == SCTP_PARAM_RESET_RESPONSE)
+ reply = sctp_process_strreset_resp(
+ (struct sctp_association *)asoc, param, &ev);
+
+ if (ev)
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ SCTP_ULPEVENT(ev));
+
+ if (reply)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(reply));
+ }
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * PR-SCTP Section 3.6 Receiver Side Implementation of PR-SCTP
+ *
+ * When a FORWARD TSN chunk arrives, the data receiver MUST first update
+ * its cumulative TSN point to the value carried in the FORWARD TSN
+ * chunk, and then MUST further advance its cumulative TSN point locally
+ * if possible.
+ * After the above processing, the data receiver MUST stop reporting any
+ * missing TSNs earlier than or equal to the new cumulative TSN point.
+ *
+ * Verification Tag: 8.5 Verification Tag [Normal verification]
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_fwdtsn_hdr *fwdtsn_hdr;
+ struct sctp_chunk *chunk = arg;
+ __u16 len;
+ __u32 tsn;
+
+ if (!sctp_vtag_verify(chunk, asoc)) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
+ SCTP_NULL());
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ if (!asoc->peer.prsctp_capable)
+ return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the FORWARD_TSN chunk has valid length. */
+ if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
+ chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
+ len = ntohs(chunk->chunk_hdr->length);
+ len -= sizeof(struct sctp_chunkhdr);
+ skb_pull(chunk->skb, len);
+
+ tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
+ pr_debug("%s: TSN 0x%x\n", __func__, tsn);
+
+ /* The TSN is too high--silently discard the chunk and count on it
+ * getting retransmitted later.
+ */
+ if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
+ goto discard_noforce;
+
+ if (!asoc->stream.si->validate_ftsn(chunk))
+ goto discard_noforce;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
+ if (len > sctp_ftsnhdr_len(&asoc->stream))
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
+ SCTP_CHUNK(chunk));
+
+ /* Count this as receiving DATA. */
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
+ }
+
+ /* FIXME: For now send a SACK, but DATA processing may
+ * send another.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
+
+ return SCTP_DISPOSITION_CONSUME;
+
+discard_noforce:
+ return SCTP_DISPOSITION_DISCARD;
+}
+
+enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_fwdtsn_hdr *fwdtsn_hdr;
+ struct sctp_chunk *chunk = arg;
+ __u16 len;
+ __u32 tsn;
+
+ if (!sctp_vtag_verify(chunk, asoc)) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
+ SCTP_NULL());
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ if (!asoc->peer.prsctp_capable)
+ return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the FORWARD_TSN chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
+ chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
+ len = ntohs(chunk->chunk_hdr->length);
+ len -= sizeof(struct sctp_chunkhdr);
+ skb_pull(chunk->skb, len);
+
+ tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
+ pr_debug("%s: TSN 0x%x\n", __func__, tsn);
+
+ /* The TSN is too high--silently discard the chunk and count on it
+ * getting retransmitted later.
+ */
+ if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
+ goto gen_shutdown;
+
+ if (!asoc->stream.si->validate_ftsn(chunk))
+ goto gen_shutdown;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
+ if (len > sctp_ftsnhdr_len(&asoc->stream))
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
+ SCTP_CHUNK(chunk));
+
+ /* Go a head and force a SACK, since we are shutting down. */
+gen_shutdown:
+ /* Implementor's Guide.
+ *
+ * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
+ * respond to each received packet containing one or more DATA chunk(s)
+ * with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * SCTP-AUTH Section 6.3 Receiving authenticated chunks
+ *
+ * The receiver MUST use the HMAC algorithm indicated in the HMAC
+ * Identifier field. If this algorithm was not specified by the
+ * receiver in the HMAC-ALGO parameter in the INIT or INIT-ACK chunk
+ * during association setup, the AUTH chunk and all chunks after it MUST
+ * be discarded and an ERROR chunk SHOULD be sent with the error cause
+ * defined in Section 4.1.
+ *
+ * If an endpoint with no shared key receives a Shared Key Identifier
+ * other than 0, it MUST silently discard all authenticated chunks. If
+ * the endpoint has at least one endpoint pair shared key for the peer,
+ * it MUST use the key specified by the Shared Key Identifier if a
+ * key has been configured for that Shared Key Identifier. If no
+ * endpoint pair shared key has been configured for that Shared Key
+ * Identifier, all authenticated chunks MUST be silently discarded.
+ *
+ * Verification Tag: 8.5 Verification Tag [Normal verification]
+ *
+ * The return value is the disposition of the chunk.
+ */
+static enum sctp_ierror sctp_sf_authenticate(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk)
+{
+ struct sctp_shared_key *sh_key = NULL;
+ struct sctp_authhdr *auth_hdr;
+ __u8 *save_digest, *digest;
+ struct sctp_hmac *hmac;
+ unsigned int sig_len;
+ __u16 key_id;
+
+ /* Pull in the auth header, so we can do some more verification */
+ auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
+ chunk->subh.auth_hdr = auth_hdr;
+ skb_pull(chunk->skb, sizeof(*auth_hdr));
+
+ /* Make sure that we support the HMAC algorithm from the auth
+ * chunk.
+ */
+ if (!sctp_auth_asoc_verify_hmac_id(asoc, auth_hdr->hmac_id))
+ return SCTP_IERROR_AUTH_BAD_HMAC;
+
+ /* Make sure that the provided shared key identifier has been
+ * configured
+ */
+ key_id = ntohs(auth_hdr->shkey_id);
+ if (key_id != asoc->active_key_id) {
+ sh_key = sctp_auth_get_shkey(asoc, key_id);
+ if (!sh_key)
+ return SCTP_IERROR_AUTH_BAD_KEYID;
+ }
+
+ /* Make sure that the length of the signature matches what
+ * we expect.
+ */
+ sig_len = ntohs(chunk->chunk_hdr->length) -
+ sizeof(struct sctp_auth_chunk);
+ hmac = sctp_auth_get_hmac(ntohs(auth_hdr->hmac_id));
+ if (sig_len != hmac->hmac_len)
+ return SCTP_IERROR_PROTO_VIOLATION;
+
+ /* Now that we've done validation checks, we can compute and
+ * verify the hmac. The steps involved are:
+ * 1. Save the digest from the chunk.
+ * 2. Zero out the digest in the chunk.
+ * 3. Compute the new digest
+ * 4. Compare saved and new digests.
+ */
+ digest = auth_hdr->hmac;
+ skb_pull(chunk->skb, sig_len);
+
+ save_digest = kmemdup(digest, sig_len, GFP_ATOMIC);
+ if (!save_digest)
+ goto nomem;
+
+ memset(digest, 0, sig_len);
+
+ sctp_auth_calculate_hmac(asoc, chunk->skb,
+ (struct sctp_auth_chunk *)chunk->chunk_hdr,
+ sh_key, GFP_ATOMIC);
+
+ /* Discard the packet if the digests do not match */
+ if (memcmp(save_digest, digest, sig_len)) {
+ kfree(save_digest);
+ return SCTP_IERROR_BAD_SIG;
+ }
+
+ kfree(save_digest);
+ chunk->auth = 1;
+
+ return SCTP_IERROR_NO_ERROR;
+nomem:
+ return SCTP_IERROR_NOMEM;
+}
+
+enum sctp_disposition sctp_sf_eat_auth(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg, struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ struct sctp_authhdr *auth_hdr;
+ struct sctp_chunk *err_chunk;
+ enum sctp_ierror error;
+
+ /* Make sure that the peer has AUTH capable */
+ if (!asoc->peer.auth_capable)
+ return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
+
+ if (!sctp_vtag_verify(chunk, asoc)) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
+ SCTP_NULL());
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
+ /* Make sure that the AUTH chunk has valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
+ error = sctp_sf_authenticate(asoc, chunk);
+ switch (error) {
+ case SCTP_IERROR_AUTH_BAD_HMAC:
+ /* Generate the ERROR chunk and discard the rest
+ * of the packet
+ */
+ err_chunk = sctp_make_op_error(asoc, chunk,
+ SCTP_ERROR_UNSUP_HMAC,
+ &auth_hdr->hmac_id,
+ sizeof(__u16), 0);
+ if (err_chunk) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err_chunk));
+ }
+ fallthrough;
+ case SCTP_IERROR_AUTH_BAD_KEYID:
+ case SCTP_IERROR_BAD_SIG:
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ case SCTP_IERROR_PROTO_VIOLATION:
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ case SCTP_IERROR_NOMEM:
+ return SCTP_DISPOSITION_NOMEM;
+
+ default: /* Prevent gcc warnings */
+ break;
+ }
+
+ if (asoc->active_key_id != ntohs(auth_hdr->shkey_id)) {
+ struct sctp_ulpevent *ev;
+
+ ev = sctp_ulpevent_make_authkey(asoc, ntohs(auth_hdr->shkey_id),
+ SCTP_AUTH_NEW_KEY, GFP_ATOMIC);
+
+ if (!ev)
+ return SCTP_DISPOSITION_NOMEM;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
+ SCTP_ULPEVENT(ev));
+ }
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * Process an unknown chunk.
+ *
+ * Section: 3.2. Also, 2.1 in the implementor's guide.
+ *
+ * Chunk Types are encoded such that the highest-order two bits specify
+ * the action that must be taken if the processing endpoint does not
+ * recognize the Chunk Type.
+ *
+ * 00 - Stop processing this SCTP packet and discard it, do not process
+ * any further chunks within it.
+ *
+ * 01 - Stop processing this SCTP packet and discard it, do not process
+ * any further chunks within it, and report the unrecognized
+ * chunk in an 'Unrecognized Chunk Type'.
+ *
+ * 10 - Skip this chunk and continue processing.
+ *
+ * 11 - Skip this chunk and continue processing, but report in an ERROR
+ * Chunk using the 'Unrecognized Chunk Type' cause of error.
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_unk_chunk(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *unk_chunk = arg;
+ struct sctp_chunk *err_chunk;
+ struct sctp_chunkhdr *hdr;
+
+ pr_debug("%s: processing unknown chunk id:%d\n", __func__, type.chunk);
+
+ if (!sctp_vtag_verify(unk_chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the chunk has a valid length.
+ * Since we don't know the chunk type, we use a general
+ * chunkhdr structure to make a comparison.
+ */
+ if (!sctp_chunk_length_valid(unk_chunk, sizeof(*hdr)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ switch (type.chunk & SCTP_CID_ACTION_MASK) {
+ case SCTP_CID_ACTION_DISCARD:
+ /* Discard the packet. */
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ case SCTP_CID_ACTION_DISCARD_ERR:
+ /* Generate an ERROR chunk as response. */
+ hdr = unk_chunk->chunk_hdr;
+ err_chunk = sctp_make_op_error(asoc, unk_chunk,
+ SCTP_ERROR_UNKNOWN_CHUNK, hdr,
+ SCTP_PAD4(ntohs(hdr->length)),
+ 0);
+ if (err_chunk) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err_chunk));
+ }
+
+ /* Discard the packet. */
+ sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ return SCTP_DISPOSITION_CONSUME;
+ case SCTP_CID_ACTION_SKIP:
+ /* Skip the chunk. */
+ return SCTP_DISPOSITION_DISCARD;
+ case SCTP_CID_ACTION_SKIP_ERR:
+ /* Generate an ERROR chunk as response. */
+ hdr = unk_chunk->chunk_hdr;
+ err_chunk = sctp_make_op_error(asoc, unk_chunk,
+ SCTP_ERROR_UNKNOWN_CHUNK, hdr,
+ SCTP_PAD4(ntohs(hdr->length)),
+ 0);
+ if (err_chunk) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err_chunk));
+ }
+ /* Skip the chunk. */
+ return SCTP_DISPOSITION_CONSUME;
+ default:
+ break;
+ }
+
+ return SCTP_DISPOSITION_DISCARD;
+}
+
+/*
+ * Discard the chunk.
+ *
+ * Section: 0.2, 5.2.3, 5.2.5, 5.2.6, 6.0, 8.4.6, 8.5.1c, 9.2
+ * [Too numerous to mention...]
+ * Verification Tag: No verification needed.
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_discard_chunk(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+
+ if (asoc && !sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the chunk has a valid length.
+ * Since we don't know the chunk type, we use a general
+ * chunkhdr structure to make a comparison.
+ */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ pr_debug("%s: chunk:%d is discarded\n", __func__, type.chunk);
+
+ return SCTP_DISPOSITION_DISCARD;
+}
+
+/*
+ * Discard the whole packet.
+ *
+ * Section: 8.4 2)
+ *
+ * 2) If the OOTB packet contains an ABORT chunk, the receiver MUST
+ * silently discard the OOTB packet and take no further action.
+ *
+ * Verification Tag: No verification necessary
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_pdiscard(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg, struct sctp_cmd_seq *commands)
+{
+ SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
+ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+
+/*
+ * The other end is violating protocol.
+ *
+ * Section: Not specified
+ * Verification Tag: Not specified
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (asoc, reply_msg, msg_up, timers, counters)
+ *
+ * We simply tag the chunk as a violation. The state machine will log
+ * the violation and continue.
+ */
+enum sctp_disposition sctp_sf_violation(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
+ /* Make sure that the chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
+ return SCTP_DISPOSITION_VIOLATION;
+}
+
+/*
+ * Common function to handle a protocol violation.
+ */
+static enum sctp_disposition sctp_sf_abort_violation(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ void *arg,
+ struct sctp_cmd_seq *commands,
+ const __u8 *payload,
+ const size_t paylen)
+{
+ struct sctp_packet *packet = NULL;
+ struct sctp_chunk *chunk = arg;
+ struct sctp_chunk *abort = NULL;
+
+ /* SCTP-AUTH, Section 6.3:
+ * It should be noted that if the receiver wants to tear
+ * down an association in an authenticated way only, the
+ * handling of malformed packets should not result in
+ * tearing down the association.
+ *
+ * This means that if we only want to abort associations
+ * in an authenticated way (i.e AUTH+ABORT), then we
+ * can't destroy this association just because the packet
+ * was malformed.
+ */
+ if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
+ goto discard;
+
+ /* Make the abort chunk. */
+ abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
+ if (!abort)
+ goto nomem;
+
+ if (asoc) {
+ /* Treat INIT-ACK as a special case during COOKIE-WAIT. */
+ if (chunk->chunk_hdr->type == SCTP_CID_INIT_ACK &&
+ !asoc->peer.i.init_tag) {
+ struct sctp_initack_chunk *initack;
+
+ initack = (struct sctp_initack_chunk *)chunk->chunk_hdr;
+ if (!sctp_chunk_length_valid(chunk, sizeof(*initack)))
+ abort->chunk_hdr->flags |= SCTP_CHUNK_FLAG_T;
+ else {
+ unsigned int inittag;
+
+ inittag = ntohl(initack->init_hdr.init_tag);
+ sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_INITTAG,
+ SCTP_U32(inittag));
+ }
+ }
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+
+ if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNREFUSED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
+ SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
+ } else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+ }
+ } else {
+ packet = sctp_ootb_pkt_new(net, asoc, chunk);
+
+ if (!packet)
+ goto nomem_pkt;
+
+ if (sctp_test_T_bit(abort))
+ packet->vtag = ntohl(chunk->sctp_hdr->vtag);
+
+ abort->skb->sk = ep->base.sk;
+
+ sctp_packet_append_chunk(packet, abort);
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
+ SCTP_PACKET(packet));
+
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+ }
+
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+
+discard:
+ sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
+ return SCTP_DISPOSITION_ABORT;
+
+nomem_pkt:
+ sctp_chunk_free(abort);
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/*
+ * Handle a protocol violation when the chunk length is invalid.
+ * "Invalid" length is identified as smaller than the minimal length a
+ * given chunk can be. For example, a SACK chunk has invalid length
+ * if its length is set to be smaller than the size of struct sctp_sack_chunk.
+ *
+ * We inform the other end by sending an ABORT with a Protocol Violation
+ * error code.
+ *
+ * Section: Not specified
+ * Verification Tag: Nothing to do
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (reply_msg, msg_up, counters)
+ *
+ * Generate an ABORT chunk and terminate the association.
+ */
+static enum sctp_disposition sctp_sf_violation_chunklen(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ static const char err_str[] = "The following chunk had invalid length:";
+
+ return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
+ sizeof(err_str));
+}
+
+/*
+ * Handle a protocol violation when the parameter length is invalid.
+ * If the length is smaller than the minimum length of a given parameter,
+ * or accumulated length in multi parameters exceeds the end of the chunk,
+ * the length is considered as invalid.
+ */
+static enum sctp_disposition sctp_sf_violation_paramlen(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg, void *ext,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_paramhdr *param = ext;
+ struct sctp_chunk *abort = NULL;
+ struct sctp_chunk *chunk = arg;
+
+ if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
+ goto discard;
+
+ /* Make the abort chunk. */
+ abort = sctp_make_violation_paramlen(asoc, chunk, param);
+ if (!abort)
+ goto nomem;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+
+discard:
+ sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
+ return SCTP_DISPOSITION_ABORT;
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/* Handle a protocol violation when the peer trying to advance the
+ * cumulative tsn ack to a point beyond the max tsn currently sent.
+ *
+ * We inform the other end by sending an ABORT with a Protocol Violation
+ * error code.
+ */
+static enum sctp_disposition sctp_sf_violation_ctsn(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ static const char err_str[] = "The cumulative tsn ack beyond the max tsn currently sent:";
+
+ return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
+ sizeof(err_str));
+}
+
+/* Handle protocol violation of an invalid chunk bundling. For example,
+ * when we have an association and we receive bundled INIT-ACK, or
+ * SHUTDOWN-COMPLETE, our peer is clearly violating the "MUST NOT bundle"
+ * statement from the specs. Additionally, there might be an attacker
+ * on the path and we may not want to continue this communication.
+ */
+static enum sctp_disposition sctp_sf_violation_chunk(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ static const char err_str[] = "The following chunk violates protocol:";
+
+ return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
+ sizeof(err_str));
+}
+/***************************************************************************
+ * These are the state functions for handling primitive (Section 10) events.
+ ***************************************************************************/
+/*
+ * sctp_sf_do_prm_asoc
+ *
+ * Section: 10.1 ULP-to-SCTP
+ * B) Associate
+ *
+ * Format: ASSOCIATE(local SCTP instance name, destination transport addr,
+ * outbound stream count)
+ * -> association id [,destination transport addr list] [,outbound stream
+ * count]
+ *
+ * This primitive allows the upper layer to initiate an association to a
+ * specific peer endpoint.
+ *
+ * The peer endpoint shall be specified by one of the transport addresses
+ * which defines the endpoint (see Section 1.4). If the local SCTP
+ * instance has not been initialized, the ASSOCIATE is considered an
+ * error.
+ * [This is not relevant for the kernel implementation since we do all
+ * initialization at boot time. It we hadn't initialized we wouldn't
+ * get anywhere near this code.]
+ *
+ * An association id, which is a local handle to the SCTP association,
+ * will be returned on successful establishment of the association. If
+ * SCTP is not able to open an SCTP association with the peer endpoint,
+ * an error is returned.
+ * [In the kernel implementation, the struct sctp_association needs to
+ * be created BEFORE causing this primitive to run.]
+ *
+ * Other association parameters may be returned, including the
+ * complete destination transport addresses of the peer as well as the
+ * outbound stream count of the local endpoint. One of the transport
+ * address from the returned destination addresses will be selected by
+ * the local endpoint as default primary path for sending SCTP packets
+ * to this peer. The returned "destination transport addr list" can
+ * be used by the ULP to change the default primary path or to force
+ * sending a packet to a specific transport address. [All of this
+ * stuff happens when the INIT ACK arrives. This is a NON-BLOCKING
+ * function.]
+ *
+ * Mandatory attributes:
+ *
+ * o local SCTP instance name - obtained from the INITIALIZE operation.
+ * [This is the argument asoc.]
+ * o destination transport addr - specified as one of the transport
+ * addresses of the peer endpoint with which the association is to be
+ * established.
+ * [This is asoc->peer.active_path.]
+ * o outbound stream count - the number of outbound streams the ULP
+ * would like to open towards this peer endpoint.
+ * [BUG: This is not currently implemented.]
+ * Optional attributes:
+ *
+ * None.
+ *
+ * The return value is a disposition.
+ */
+enum sctp_disposition sctp_sf_do_prm_asoc(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_association *my_asoc;
+ struct sctp_chunk *repl;
+
+ /* The comment below says that we enter COOKIE-WAIT AFTER
+ * sending the INIT, but that doesn't actually work in our
+ * implementation...
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_COOKIE_WAIT));
+
+ /* RFC 2960 5.1 Normal Establishment of an Association
+ *
+ * A) "A" first sends an INIT chunk to "Z". In the INIT, "A"
+ * must provide its Verification Tag (Tag_A) in the Initiate
+ * Tag field. Tag_A SHOULD be a random number in the range of
+ * 1 to 4294967295 (see 5.3.1 for Tag value selection). ...
+ */
+
+ repl = sctp_make_init(asoc, &asoc->base.bind_addr, GFP_ATOMIC, 0);
+ if (!repl)
+ goto nomem;
+
+ /* Choose transport for INIT. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
+ SCTP_CHUNK(repl));
+
+ /* Cast away the const modifier, as we want to just
+ * rerun it through as a sideffect.
+ */
+ my_asoc = (struct sctp_association *)asoc;
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(my_asoc));
+
+ /* After sending the INIT, "A" starts the T1-init timer and
+ * enters the COOKIE-WAIT state.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+ return SCTP_DISPOSITION_CONSUME;
+
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/*
+ * Process the SEND primitive.
+ *
+ * Section: 10.1 ULP-to-SCTP
+ * E) Send
+ *
+ * Format: SEND(association id, buffer address, byte count [,context]
+ * [,stream id] [,life time] [,destination transport address]
+ * [,unorder flag] [,no-bundle flag] [,payload protocol-id] )
+ * -> result
+ *
+ * This is the main method to send user data via SCTP.
+ *
+ * Mandatory attributes:
+ *
+ * o association id - local handle to the SCTP association
+ *
+ * o buffer address - the location where the user message to be
+ * transmitted is stored;
+ *
+ * o byte count - The size of the user data in number of bytes;
+ *
+ * Optional attributes:
+ *
+ * o context - an optional 32 bit integer that will be carried in the
+ * sending failure notification to the ULP if the transportation of
+ * this User Message fails.
+ *
+ * o stream id - to indicate which stream to send the data on. If not
+ * specified, stream 0 will be used.
+ *
+ * o life time - specifies the life time of the user data. The user data
+ * will not be sent by SCTP after the life time expires. This
+ * parameter can be used to avoid efforts to transmit stale
+ * user messages. SCTP notifies the ULP if the data cannot be
+ * initiated to transport (i.e. sent to the destination via SCTP's
+ * send primitive) within the life time variable. However, the
+ * user data will be transmitted if SCTP has attempted to transmit a
+ * chunk before the life time expired.
+ *
+ * o destination transport address - specified as one of the destination
+ * transport addresses of the peer endpoint to which this packet
+ * should be sent. Whenever possible, SCTP should use this destination
+ * transport address for sending the packets, instead of the current
+ * primary path.
+ *
+ * o unorder flag - this flag, if present, indicates that the user
+ * would like the data delivered in an unordered fashion to the peer
+ * (i.e., the U flag is set to 1 on all DATA chunks carrying this
+ * message).
+ *
+ * o no-bundle flag - instructs SCTP not to bundle this user data with
+ * other outbound DATA chunks. SCTP MAY still bundle even when
+ * this flag is present, when faced with network congestion.
+ *
+ * o payload protocol-id - A 32 bit unsigned integer that is to be
+ * passed to the peer indicating the type of payload protocol data
+ * being transmitted. This value is passed as opaque data by SCTP.
+ *
+ * The return value is the disposition.
+ */
+enum sctp_disposition sctp_sf_do_prm_send(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_datamsg *msg = arg;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg));
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * Process the SHUTDOWN primitive.
+ *
+ * Section: 10.1:
+ * C) Shutdown
+ *
+ * Format: SHUTDOWN(association id)
+ * -> result
+ *
+ * Gracefully closes an association. Any locally queued user data
+ * will be delivered to the peer. The association will be terminated only
+ * after the peer acknowledges all the SCTP packets sent. A success code
+ * will be returned on successful termination of the association. If
+ * attempting to terminate the association results in a failure, an error
+ * code shall be returned.
+ *
+ * Mandatory attributes:
+ *
+ * o association id - local handle to the SCTP association
+ *
+ * Optional attributes:
+ *
+ * None.
+ *
+ * The return value is the disposition.
+ */
+enum sctp_disposition sctp_sf_do_9_2_prm_shutdown(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ enum sctp_disposition disposition;
+
+ /* From 9.2 Shutdown of an Association
+ * Upon receipt of the SHUTDOWN primitive from its upper
+ * layer, the endpoint enters SHUTDOWN-PENDING state and
+ * remains there until all outstanding data has been
+ * acknowledged by its peer. The endpoint accepts no new data
+ * from its upper layer, but retransmits data to the far end
+ * if necessary to fill gaps.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING));
+
+ disposition = SCTP_DISPOSITION_CONSUME;
+ if (sctp_outq_is_empty(&asoc->outqueue)) {
+ disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
+ arg, commands);
+ }
+
+ return disposition;
+}
+
+/*
+ * Process the ABORT primitive.
+ *
+ * Section: 10.1:
+ * C) Abort
+ *
+ * Format: Abort(association id [, cause code])
+ * -> result
+ *
+ * Ungracefully closes an association. Any locally queued user data
+ * will be discarded and an ABORT chunk is sent to the peer. A success code
+ * will be returned on successful abortion of the association. If
+ * attempting to abort the association results in a failure, an error
+ * code shall be returned.
+ *
+ * Mandatory attributes:
+ *
+ * o association id - local handle to the SCTP association
+ *
+ * Optional attributes:
+ *
+ * o cause code - reason of the abort to be passed to the peer
+ *
+ * None.
+ *
+ * The return value is the disposition.
+ */
+enum sctp_disposition sctp_sf_do_9_1_prm_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ /* From 9.1 Abort of an Association
+ * Upon receipt of the ABORT primitive from its upper
+ * layer, the endpoint enters CLOSED state and
+ * discard all outstanding data has been
+ * acknowledged by its peer. The endpoint accepts no new data
+ * from its upper layer, but retransmits data to the far end
+ * if necessary to fill gaps.
+ */
+ struct sctp_chunk *abort = arg;
+
+ if (abort)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+
+ /* Even if we can't send the ABORT due to low memory delete the
+ * TCB. This is a departure from our typical NOMEM handling.
+ */
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
+ /* Delete the established association. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_USER_ABORT));
+
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+
+ return SCTP_DISPOSITION_ABORT;
+}
+
+/* We tried an illegal operation on an association which is closed. */
+enum sctp_disposition sctp_sf_error_closed(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-EINVAL));
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/* We tried an illegal operation on an association which is shutting
+ * down.
+ */
+enum sctp_disposition sctp_sf_error_shutdown(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR,
+ SCTP_ERROR(-ESHUTDOWN));
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * sctp_cookie_wait_prm_shutdown
+ *
+ * Section: 4 Note: 2
+ * Verification Tag:
+ * Inputs
+ * (endpoint, asoc)
+ *
+ * The RFC does not explicitly address this issue, but is the route through the
+ * state table when someone issues a shutdown while in COOKIE_WAIT state.
+ *
+ * Outputs
+ * (timers)
+ */
+enum sctp_disposition sctp_sf_cookie_wait_prm_shutdown(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_CLOSED));
+
+ SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
+
+ return SCTP_DISPOSITION_DELETE_TCB;
+}
+
+/*
+ * sctp_cookie_echoed_prm_shutdown
+ *
+ * Section: 4 Note: 2
+ * Verification Tag:
+ * Inputs
+ * (endpoint, asoc)
+ *
+ * The RFC does not explicitly address this issue, but is the route through the
+ * state table when someone issues a shutdown while in COOKIE_ECHOED state.
+ *
+ * Outputs
+ * (timers)
+ */
+enum sctp_disposition sctp_sf_cookie_echoed_prm_shutdown(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ /* There is a single T1 timer, so we should be able to use
+ * common function with the COOKIE-WAIT state.
+ */
+ return sctp_sf_cookie_wait_prm_shutdown(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * sctp_sf_cookie_wait_prm_abort
+ *
+ * Section: 4 Note: 2
+ * Verification Tag:
+ * Inputs
+ * (endpoint, asoc)
+ *
+ * The RFC does not explicitly address this issue, but is the route through the
+ * state table when someone issues an abort while in COOKIE_WAIT state.
+ *
+ * Outputs
+ * (timers)
+ */
+enum sctp_disposition sctp_sf_cookie_wait_prm_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *abort = arg;
+
+ /* Stop T1-init timer */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+
+ if (abort)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_CLOSED));
+
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+
+ /* Even if we can't send the ABORT due to low memory delete the
+ * TCB. This is a departure from our typical NOMEM handling.
+ */
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNREFUSED));
+ /* Delete the established association. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
+ SCTP_PERR(SCTP_ERROR_USER_ABORT));
+
+ return SCTP_DISPOSITION_ABORT;
+}
+
+/*
+ * sctp_sf_cookie_echoed_prm_abort
+ *
+ * Section: 4 Note: 3
+ * Verification Tag:
+ * Inputs
+ * (endpoint, asoc)
+ *
+ * The RFC does not explcitly address this issue, but is the route through the
+ * state table when someone issues an abort while in COOKIE_ECHOED state.
+ *
+ * Outputs
+ * (timers)
+ */
+enum sctp_disposition sctp_sf_cookie_echoed_prm_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ /* There is a single T1 timer, so we should be able to use
+ * common function with the COOKIE-WAIT state.
+ */
+ return sctp_sf_cookie_wait_prm_abort(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * sctp_sf_shutdown_pending_prm_abort
+ *
+ * Inputs
+ * (endpoint, asoc)
+ *
+ * The RFC does not explicitly address this issue, but is the route through the
+ * state table when someone issues an abort while in SHUTDOWN-PENDING state.
+ *
+ * Outputs
+ * (timers)
+ */
+enum sctp_disposition sctp_sf_shutdown_pending_prm_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ /* Stop the T5-shutdown guard timer. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
+
+ return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * sctp_sf_shutdown_sent_prm_abort
+ *
+ * Inputs
+ * (endpoint, asoc)
+ *
+ * The RFC does not explicitly address this issue, but is the route through the
+ * state table when someone issues an abort while in SHUTDOWN-SENT state.
+ *
+ * Outputs
+ * (timers)
+ */
+enum sctp_disposition sctp_sf_shutdown_sent_prm_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ /* Stop the T2-shutdown timer. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
+
+ /* Stop the T5-shutdown guard timer. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
+
+ return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * sctp_sf_cookie_echoed_prm_abort
+ *
+ * Inputs
+ * (endpoint, asoc)
+ *
+ * The RFC does not explcitly address this issue, but is the route through the
+ * state table when someone issues an abort while in COOKIE_ECHOED state.
+ *
+ * Outputs
+ * (timers)
+ */
+enum sctp_disposition sctp_sf_shutdown_ack_sent_prm_abort(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ /* The same T2 timer, so we should be able to use
+ * common function with the SHUTDOWN-SENT state.
+ */
+ return sctp_sf_shutdown_sent_prm_abort(net, ep, asoc, type, arg, commands);
+}
+
+/*
+ * Process the REQUESTHEARTBEAT primitive
+ *
+ * 10.1 ULP-to-SCTP
+ * J) Request Heartbeat
+ *
+ * Format: REQUESTHEARTBEAT(association id, destination transport address)
+ *
+ * -> result
+ *
+ * Instructs the local endpoint to perform a HeartBeat on the specified
+ * destination transport address of the given association. The returned
+ * result should indicate whether the transmission of the HEARTBEAT
+ * chunk to the destination address is successful.
+ *
+ * Mandatory attributes:
+ *
+ * o association id - local handle to the SCTP association
+ *
+ * o destination transport address - the transport address of the
+ * association on which a heartbeat should be issued.
+ */
+enum sctp_disposition sctp_sf_do_prm_requestheartbeat(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type,
+ (struct sctp_transport *)arg, commands))
+ return SCTP_DISPOSITION_NOMEM;
+
+ /*
+ * RFC 2960 (bis), section 8.3
+ *
+ * D) Request an on-demand HEARTBEAT on a specific destination
+ * transport address of a given association.
+ *
+ * The endpoint should increment the respective error counter of
+ * the destination transport address each time a HEARTBEAT is sent
+ * to that address and not acknowledged within one RTO.
+ *
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
+ SCTP_TRANSPORT(arg));
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * ADDIP Section 4.1 ASCONF Chunk Procedures
+ * When an endpoint has an ASCONF signaled change to be sent to the
+ * remote endpoint it should do A1 to A9
+ */
+enum sctp_disposition sctp_sf_do_prm_asconf(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk));
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk));
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/* RE-CONFIG Section 5.1 RECONF Chunk Procedures */
+enum sctp_disposition sctp_sf_do_prm_reconf(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk));
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * Ignore the primitive event
+ *
+ * The return value is the disposition of the primitive.
+ */
+enum sctp_disposition sctp_sf_ignore_primitive(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ pr_debug("%s: primitive type:%d is ignored\n", __func__,
+ type.primitive);
+
+ return SCTP_DISPOSITION_DISCARD;
+}
+
+/***************************************************************************
+ * These are the state functions for the OTHER events.
+ ***************************************************************************/
+
+/*
+ * When the SCTP stack has no more user data to send or retransmit, this
+ * notification is given to the user. Also, at the time when a user app
+ * subscribes to this event, if there is no data to be sent or
+ * retransmit, the stack will immediately send up this notification.
+ */
+enum sctp_disposition sctp_sf_do_no_pending_tsn(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_ulpevent *event;
+
+ event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC);
+ if (!event)
+ return SCTP_DISPOSITION_NOMEM;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * Start the shutdown negotiation.
+ *
+ * From Section 9.2:
+ * Once all its outstanding data has been acknowledged, the endpoint
+ * shall send a SHUTDOWN chunk to its peer including in the Cumulative
+ * TSN Ack field the last sequential TSN it has received from the peer.
+ * It shall then start the T2-shutdown timer and enter the SHUTDOWN-SENT
+ * state. If the timer expires, the endpoint must re-send the SHUTDOWN
+ * with the updated last sequential TSN received from its peer.
+ *
+ * The return value is the disposition.
+ */
+enum sctp_disposition sctp_sf_do_9_2_start_shutdown(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *reply;
+
+ /* Once all its outstanding data has been acknowledged, the
+ * endpoint shall send a SHUTDOWN chunk to its peer including
+ * in the Cumulative TSN Ack field the last sequential TSN it
+ * has received from the peer.
+ */
+ reply = sctp_make_shutdown(asoc, arg);
+ if (!reply)
+ goto nomem;
+
+ /* Set the transport for the SHUTDOWN chunk and the timeout for the
+ * T2-shutdown timer.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
+
+ /* It shall then start the T2-shutdown timer */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
+
+ /* RFC 4960 Section 9.2
+ * The sender of the SHUTDOWN MAY also start an overall guard timer
+ * 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
+
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
+
+ /* and enter the SHUTDOWN-SENT state. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_SHUTDOWN_SENT));
+
+ /* sctp-implguide 2.10 Issues with Heartbeating and failover
+ *
+ * HEARTBEAT ... is discontinued after sending either SHUTDOWN
+ * or SHUTDOWN-ACK.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+
+ return SCTP_DISPOSITION_CONSUME;
+
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/*
+ * Generate a SHUTDOWN ACK now that everything is SACK'd.
+ *
+ * From Section 9.2:
+ *
+ * If it has no more outstanding DATA chunks, the SHUTDOWN receiver
+ * shall send a SHUTDOWN ACK and start a T2-shutdown timer of its own,
+ * entering the SHUTDOWN-ACK-SENT state. If the timer expires, the
+ * endpoint must re-send the SHUTDOWN ACK.
+ *
+ * The return value is the disposition.
+ */
+enum sctp_disposition sctp_sf_do_9_2_shutdown_ack(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = arg;
+ struct sctp_chunk *reply;
+
+ /* There are 2 ways of getting here:
+ * 1) called in response to a SHUTDOWN chunk
+ * 2) called when SCTP_EVENT_NO_PENDING_TSN event is issued.
+ *
+ * For the case (2), the arg parameter is set to NULL. We need
+ * to check that we have a chunk before accessing it's fields.
+ */
+ if (chunk) {
+ if (!sctp_vtag_verify(chunk, asoc))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg,
+ commands);
+
+ /* Make sure that the SHUTDOWN chunk has a valid length. */
+ if (!sctp_chunk_length_valid(
+ chunk, sizeof(struct sctp_shutdown_chunk)))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type,
+ arg, commands);
+ }
+
+ /* If it has no more outstanding DATA chunks, the SHUTDOWN receiver
+ * shall send a SHUTDOWN ACK ...
+ */
+ reply = sctp_make_shutdown_ack(asoc, chunk);
+ if (!reply)
+ goto nomem;
+
+ /* Set the transport for the SHUTDOWN ACK chunk and the timeout for
+ * the T2-shutdown timer.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
+
+ /* and start/restart a T2-shutdown timer of its own, */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
+
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
+
+ /* Enter the SHUTDOWN-ACK-SENT state. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_SHUTDOWN_ACK_SENT));
+
+ /* sctp-implguide 2.10 Issues with Heartbeating and failover
+ *
+ * HEARTBEAT ... is discontinued after sending either SHUTDOWN
+ * or SHUTDOWN-ACK.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+
+ return SCTP_DISPOSITION_CONSUME;
+
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/*
+ * Ignore the event defined as other
+ *
+ * The return value is the disposition of the event.
+ */
+enum sctp_disposition sctp_sf_ignore_other(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ pr_debug("%s: the event other type:%d is ignored\n",
+ __func__, type.other);
+
+ return SCTP_DISPOSITION_DISCARD;
+}
+
+/************************************************************
+ * These are the state functions for handling timeout events.
+ ************************************************************/
+
+/*
+ * RTX Timeout
+ *
+ * Section: 6.3.3 Handle T3-rtx Expiration
+ *
+ * Whenever the retransmission timer T3-rtx expires for a destination
+ * address, do the following:
+ * [See below]
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_do_6_3_3_rtx(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_transport *transport = arg;
+
+ SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS);
+
+ if (asoc->overall_error_count >= asoc->max_retrans) {
+ if (asoc->peer.zero_window_announced &&
+ asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
+ /*
+ * We are here likely because the receiver had its rwnd
+ * closed for a while and we have not been able to
+ * transmit the locally queued data within the maximum
+ * retransmission attempts limit. Start the T5
+ * shutdown guard timer to give the receiver one last
+ * chance and some additional time to recover before
+ * aborting.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
+ } else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
+ /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_NO_ERROR));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+ return SCTP_DISPOSITION_DELETE_TCB;
+ }
+ }
+
+ /* E1) For the destination address for which the timer
+ * expires, adjust its ssthresh with rules defined in Section
+ * 7.2.3 and set the cwnd <- MTU.
+ */
+
+ /* E2) For the destination address for which the timer
+ * expires, set RTO <- RTO * 2 ("back off the timer"). The
+ * maximum value discussed in rule C7 above (RTO.max) may be
+ * used to provide an upper bound to this doubling operation.
+ */
+
+ /* E3) Determine how many of the earliest (i.e., lowest TSN)
+ * outstanding DATA chunks for the address for which the
+ * T3-rtx has expired will fit into a single packet, subject
+ * to the MTU constraint for the path corresponding to the
+ * destination transport address to which the retransmission
+ * is being sent (this may be different from the address for
+ * which the timer expires [see Section 6.4]). Call this
+ * value K. Bundle and retransmit those K DATA chunks in a
+ * single packet to the destination endpoint.
+ *
+ * Note: Any DATA chunks that were sent to the address for
+ * which the T3-rtx timer expired but did not fit in one MTU
+ * (rule E3 above), should be marked for retransmission and
+ * sent as soon as cwnd allows (normally when a SACK arrives).
+ */
+
+ /* Do some failure management (Section 8.2). */
+ sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport));
+
+ /* NB: Rules E4 and F1 are implicit in R1. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(transport));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * Generate delayed SACK on timeout
+ *
+ * Section: 6.2 Acknowledgement on Reception of DATA Chunks
+ *
+ * The guidelines on delayed acknowledgement algorithm specified in
+ * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an
+ * acknowledgement SHOULD be generated for at least every second packet
+ * (not every second DATA chunk) received, and SHOULD be generated
+ * within 200 ms of the arrival of any unacknowledged DATA chunk. In
+ * some situations it may be beneficial for an SCTP transmitter to be
+ * more conservative than the algorithms detailed in this document
+ * allow. However, an SCTP transmitter MUST NOT be more aggressive than
+ * the following algorithms allow.
+ */
+enum sctp_disposition sctp_sf_do_6_2_sack(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS);
+ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * sctp_sf_t1_init_timer_expire
+ *
+ * Section: 4 Note: 2
+ * Verification Tag:
+ * Inputs
+ * (endpoint, asoc)
+ *
+ * RFC 2960 Section 4 Notes
+ * 2) If the T1-init timer expires, the endpoint MUST retransmit INIT
+ * and re-start the T1-init timer without changing state. This MUST
+ * be repeated up to 'Max.Init.Retransmits' times. After that, the
+ * endpoint MUST abort the initialization process and report the
+ * error to SCTP user.
+ *
+ * Outputs
+ * (timers, events)
+ *
+ */
+enum sctp_disposition sctp_sf_t1_init_timer_expire(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ int attempts = asoc->init_err_counter + 1;
+ struct sctp_chunk *repl = NULL;
+ struct sctp_bind_addr *bp;
+
+ pr_debug("%s: timer T1 expired (INIT)\n", __func__);
+
+ SCTP_INC_STATS(net, SCTP_MIB_T1_INIT_EXPIREDS);
+
+ if (attempts <= asoc->max_init_attempts) {
+ bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
+ repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0);
+ if (!repl)
+ return SCTP_DISPOSITION_NOMEM;
+
+ /* Choose transport for INIT. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
+ SCTP_CHUNK(repl));
+
+ /* Issue a sideeffect to do the needed accounting. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+ } else {
+ pr_debug("%s: giving up on INIT, attempts:%d "
+ "max_init_attempts:%d\n", __func__, attempts,
+ asoc->max_init_attempts);
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
+ SCTP_PERR(SCTP_ERROR_NO_ERROR));
+ return SCTP_DISPOSITION_DELETE_TCB;
+ }
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/*
+ * sctp_sf_t1_cookie_timer_expire
+ *
+ * Section: 4 Note: 2
+ * Verification Tag:
+ * Inputs
+ * (endpoint, asoc)
+ *
+ * RFC 2960 Section 4 Notes
+ * 3) If the T1-cookie timer expires, the endpoint MUST retransmit
+ * COOKIE ECHO and re-start the T1-cookie timer without changing
+ * state. This MUST be repeated up to 'Max.Init.Retransmits' times.
+ * After that, the endpoint MUST abort the initialization process and
+ * report the error to SCTP user.
+ *
+ * Outputs
+ * (timers, events)
+ *
+ */
+enum sctp_disposition sctp_sf_t1_cookie_timer_expire(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ int attempts = asoc->init_err_counter + 1;
+ struct sctp_chunk *repl = NULL;
+
+ pr_debug("%s: timer T1 expired (COOKIE-ECHO)\n", __func__);
+
+ SCTP_INC_STATS(net, SCTP_MIB_T1_COOKIE_EXPIREDS);
+
+ if (attempts <= asoc->max_init_attempts) {
+ repl = sctp_make_cookie_echo(asoc, NULL);
+ if (!repl)
+ return SCTP_DISPOSITION_NOMEM;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
+ SCTP_CHUNK(repl));
+ /* Issue a sideeffect to do the needed accounting. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_COOKIEECHO_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
+ } else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
+ SCTP_PERR(SCTP_ERROR_NO_ERROR));
+ return SCTP_DISPOSITION_DELETE_TCB;
+ }
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/* RFC2960 9.2 If the timer expires, the endpoint must re-send the SHUTDOWN
+ * with the updated last sequential TSN received from its peer.
+ *
+ * An endpoint should limit the number of retransmission of the
+ * SHUTDOWN chunk to the protocol parameter 'Association.Max.Retrans'.
+ * If this threshold is exceeded the endpoint should destroy the TCB and
+ * MUST report the peer endpoint unreachable to the upper layer (and
+ * thus the association enters the CLOSED state). The reception of any
+ * packet from its peer (i.e. as the peer sends all of its queued DATA
+ * chunks) should clear the endpoint's retransmission count and restart
+ * the T2-Shutdown timer, giving its peer ample opportunity to transmit
+ * all of its queued DATA chunks that have not yet been sent.
+ */
+enum sctp_disposition sctp_sf_t2_timer_expire(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *reply = NULL;
+
+ pr_debug("%s: timer T2 expired\n", __func__);
+
+ SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
+
+ ((struct sctp_association *)asoc)->shutdown_retries++;
+
+ if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
+ /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_NO_ERROR));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+ return SCTP_DISPOSITION_DELETE_TCB;
+ }
+
+ switch (asoc->state) {
+ case SCTP_STATE_SHUTDOWN_SENT:
+ reply = sctp_make_shutdown(asoc, NULL);
+ break;
+
+ case SCTP_STATE_SHUTDOWN_ACK_SENT:
+ reply = sctp_make_shutdown_ack(asoc, NULL);
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ if (!reply)
+ goto nomem;
+
+ /* Do some failure management (Section 8.2).
+ * If we remove the transport an SHUTDOWN was last sent to, don't
+ * do failure management.
+ */
+ if (asoc->shutdown_last_sent_to)
+ sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE,
+ SCTP_TRANSPORT(asoc->shutdown_last_sent_to));
+
+ /* Set the transport for the SHUTDOWN/ACK chunk and the timeout for
+ * the T2-shutdown timer.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
+
+ /* Restart the T2-shutdown timer. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+ return SCTP_DISPOSITION_CONSUME;
+
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/*
+ * ADDIP Section 4.1 ASCONF Chunk Procedures
+ * If the T4 RTO timer expires the endpoint should do B1 to B5
+ */
+enum sctp_disposition sctp_sf_t4_timer_expire(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *chunk = asoc->addip_last_asconf;
+ struct sctp_transport *transport = chunk->transport;
+
+ SCTP_INC_STATS(net, SCTP_MIB_T4_RTO_EXPIREDS);
+
+ /* ADDIP 4.1 B1) Increment the error counters and perform path failure
+ * detection on the appropriate destination address as defined in
+ * RFC2960 [5] section 8.1 and 8.2.
+ */
+ if (transport)
+ sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE,
+ SCTP_TRANSPORT(transport));
+
+ /* Reconfig T4 timer and transport. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk));
+
+ /* ADDIP 4.1 B2) Increment the association error counters and perform
+ * endpoint failure detection on the association as defined in
+ * RFC2960 [5] section 8.1 and 8.2.
+ * association error counter is incremented in SCTP_CMD_STRIKE.
+ */
+ if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_NO_ERROR));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+ return SCTP_DISPOSITION_ABORT;
+ }
+
+ /* ADDIP 4.1 B3) Back-off the destination address RTO value to which
+ * the ASCONF chunk was sent by doubling the RTO timer value.
+ * This is done in SCTP_CMD_STRIKE.
+ */
+
+ /* ADDIP 4.1 B4) Re-transmit the ASCONF Chunk last sent and if possible
+ * choose an alternate destination address (please refer to RFC2960
+ * [5] section 6.4.1). An endpoint MUST NOT add new parameters to this
+ * chunk, it MUST be the same (including its serial number) as the last
+ * ASCONF sent.
+ */
+ sctp_chunk_hold(asoc->addip_last_asconf);
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(asoc->addip_last_asconf));
+
+ /* ADDIP 4.1 B5) Restart the T-4 RTO timer. Note that if a different
+ * destination is selected, then the RTO used will be that of the new
+ * destination address.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/* sctpimpguide-05 Section 2.12.2
+ * The sender of the SHUTDOWN MAY also start an overall guard timer
+ * 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
+ * At the expiration of this timer the sender SHOULD abort the association
+ * by sending an ABORT chunk.
+ */
+enum sctp_disposition sctp_sf_t5_timer_expire(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *reply = NULL;
+
+ pr_debug("%s: timer T5 expired\n", __func__);
+
+ SCTP_INC_STATS(net, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
+
+ reply = sctp_make_abort(asoc, NULL, 0);
+ if (!reply)
+ goto nomem;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_NO_ERROR));
+
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+
+ return SCTP_DISPOSITION_DELETE_TCB;
+nomem:
+ return SCTP_DISPOSITION_NOMEM;
+}
+
+/* Handle expiration of AUTOCLOSE timer. When the autoclose timer expires,
+ * the association is automatically closed by starting the shutdown process.
+ * The work that needs to be done is same as when SHUTDOWN is initiated by
+ * the user. So this routine looks same as sctp_sf_do_9_2_prm_shutdown().
+ */
+enum sctp_disposition sctp_sf_autoclose_timer_expire(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ enum sctp_disposition disposition;
+
+ SCTP_INC_STATS(net, SCTP_MIB_AUTOCLOSE_EXPIREDS);
+
+ /* From 9.2 Shutdown of an Association
+ * Upon receipt of the SHUTDOWN primitive from its upper
+ * layer, the endpoint enters SHUTDOWN-PENDING state and
+ * remains there until all outstanding data has been
+ * acknowledged by its peer. The endpoint accepts no new data
+ * from its upper layer, but retransmits data to the far end
+ * if necessary to fill gaps.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING));
+
+ disposition = SCTP_DISPOSITION_CONSUME;
+ if (sctp_outq_is_empty(&asoc->outqueue)) {
+ disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
+ NULL, commands);
+ }
+
+ return disposition;
+}
+
+/*****************************************************************************
+ * These are sa state functions which could apply to all types of events.
+ ****************************************************************************/
+
+/*
+ * This table entry is not implemented.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_not_impl(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg, struct sctp_cmd_seq *commands)
+{
+ return SCTP_DISPOSITION_NOT_IMPL;
+}
+
+/*
+ * This table entry represents a bug.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_bug(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg, struct sctp_cmd_seq *commands)
+{
+ return SCTP_DISPOSITION_BUG;
+}
+
+/*
+ * This table entry represents the firing of a timer in the wrong state.
+ * Since timer deletion cannot be guaranteed a timer 'may' end up firing
+ * when the association is in the wrong state. This event should
+ * be ignored, so as to prevent any rearming of the timer.
+ *
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * The return value is the disposition of the chunk.
+ */
+enum sctp_disposition sctp_sf_timer_ignore(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands)
+{
+ pr_debug("%s: timer %d ignored\n", __func__, type.chunk);
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
+/********************************************************************
+ * 2nd Level Abstractions
+ ********************************************************************/
+
+/* Pull the SACK chunk based on the SACK header. */
+static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk)
+{
+ struct sctp_sackhdr *sack;
+ __u16 num_dup_tsns;
+ unsigned int len;
+ __u16 num_blocks;
+
+ /* Protect ourselves from reading too far into
+ * the skb from a bogus sender.
+ */
+ sack = (struct sctp_sackhdr *) chunk->skb->data;
+
+ num_blocks = ntohs(sack->num_gap_ack_blocks);
+ num_dup_tsns = ntohs(sack->num_dup_tsns);
+ len = sizeof(struct sctp_sackhdr);
+ len += (num_blocks + num_dup_tsns) * sizeof(__u32);
+ if (len > chunk->skb->len)
+ return NULL;
+
+ skb_pull(chunk->skb, len);
+
+ return sack;
+}
+
+/* Create an ABORT packet to be sent as a response, with the specified
+ * error causes.
+ */
+static struct sctp_packet *sctp_abort_pkt_new(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ const void *payload, size_t paylen)
+{
+ struct sctp_packet *packet;
+ struct sctp_chunk *abort;
+
+ packet = sctp_ootb_pkt_new(net, asoc, chunk);
+
+ if (packet) {
+ /* Make an ABORT.
+ * The T bit will be set if the asoc is NULL.
+ */
+ abort = sctp_make_abort(asoc, chunk, paylen);
+ if (!abort) {
+ sctp_ootb_pkt_free(packet);
+ return NULL;
+ }
+
+ /* Reflect vtag if T-Bit is set */
+ if (sctp_test_T_bit(abort))
+ packet->vtag = ntohl(chunk->sctp_hdr->vtag);
+
+ /* Add specified error causes, i.e., payload, to the
+ * end of the chunk.
+ */
+ sctp_addto_chunk(abort, paylen, payload);
+
+ /* Set the skb to the belonging sock for accounting. */
+ abort->skb->sk = ep->base.sk;
+
+ sctp_packet_append_chunk(packet, abort);
+
+ }
+
+ return packet;
+}
+
+/* Allocate a packet for responding in the OOTB conditions. */
+static struct sctp_packet *sctp_ootb_pkt_new(
+ struct net *net,
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ struct sctp_transport *transport;
+ struct sctp_packet *packet;
+ __u16 sport, dport;
+ __u32 vtag;
+
+ /* Get the source and destination port from the inbound packet. */
+ sport = ntohs(chunk->sctp_hdr->dest);
+ dport = ntohs(chunk->sctp_hdr->source);
+
+ /* The V-tag is going to be the same as the inbound packet if no
+ * association exists, otherwise, use the peer's vtag.
+ */
+ if (asoc) {
+ /* Special case the INIT-ACK as there is no peer's vtag
+ * yet.
+ */
+ switch (chunk->chunk_hdr->type) {
+ case SCTP_CID_INIT:
+ case SCTP_CID_INIT_ACK:
+ {
+ struct sctp_initack_chunk *initack;
+
+ initack = (struct sctp_initack_chunk *)chunk->chunk_hdr;
+ vtag = ntohl(initack->init_hdr.init_tag);
+ break;
+ }
+ default:
+ vtag = asoc->peer.i.init_tag;
+ break;
+ }
+ } else {
+ /* Special case the INIT and stale COOKIE_ECHO as there is no
+ * vtag yet.
+ */
+ switch (chunk->chunk_hdr->type) {
+ case SCTP_CID_INIT:
+ {
+ struct sctp_init_chunk *init;
+
+ init = (struct sctp_init_chunk *)chunk->chunk_hdr;
+ vtag = ntohl(init->init_hdr.init_tag);
+ break;
+ }
+ default:
+ vtag = ntohl(chunk->sctp_hdr->vtag);
+ break;
+ }
+ }
+
+ /* Make a transport for the bucket, Eliza... */
+ transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC);
+ if (!transport)
+ goto nomem;
+
+ transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
+
+ /* Cache a route for the transport with the chunk's destination as
+ * the source address.
+ */
+ sctp_transport_route(transport, (union sctp_addr *)&chunk->dest,
+ sctp_sk(net->sctp.ctl_sock));
+
+ packet = &transport->packet;
+ sctp_packet_init(packet, transport, sport, dport);
+ sctp_packet_config(packet, vtag, 0);
+
+ return packet;
+
+nomem:
+ return NULL;
+}
+
+/* Free the packet allocated earlier for responding in the OOTB condition. */
+void sctp_ootb_pkt_free(struct sctp_packet *packet)
+{
+ sctp_transport_free(packet->transport);
+}
+
+/* Send a stale cookie error when a invalid COOKIE ECHO chunk is found */
+static void sctp_send_stale_cookie_err(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ struct sctp_cmd_seq *commands,
+ struct sctp_chunk *err_chunk)
+{
+ struct sctp_packet *packet;
+
+ if (err_chunk) {
+ packet = sctp_ootb_pkt_new(net, asoc, chunk);
+ if (packet) {
+ struct sctp_signed_cookie *cookie;
+
+ /* Override the OOTB vtag from the cookie. */
+ cookie = chunk->subh.cookie_hdr;
+ packet->vtag = cookie->c.peer_vtag;
+
+ /* Set the skb to the belonging sock for accounting. */
+ err_chunk->skb->sk = ep->base.sk;
+ sctp_packet_append_chunk(packet, err_chunk);
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
+ SCTP_PACKET(packet));
+ SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+ } else
+ sctp_chunk_free (err_chunk);
+ }
+}
+
+
+/* Process a data chunk */
+static int sctp_eat_data(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
+ struct sctp_datahdr *data_hdr;
+ struct sctp_chunk *err;
+ enum sctp_verb deliver;
+ size_t datalen;
+ __u32 tsn;
+ int tmp;
+
+ data_hdr = (struct sctp_datahdr *)chunk->skb->data;
+ chunk->subh.data_hdr = data_hdr;
+ skb_pull(chunk->skb, sctp_datahdr_len(&asoc->stream));
+
+ tsn = ntohl(data_hdr->tsn);
+ pr_debug("%s: TSN 0x%x\n", __func__, tsn);
+
+ /* ASSERT: Now skb->data is really the user data. */
+
+ /* Process ECN based congestion.
+ *
+ * Since the chunk structure is reused for all chunks within
+ * a packet, we use ecn_ce_done to track if we've already
+ * done CE processing for this packet.
+ *
+ * We need to do ECN processing even if we plan to discard the
+ * chunk later.
+ */
+
+ if (asoc->peer.ecn_capable && !chunk->ecn_ce_done) {
+ struct sctp_af *af = SCTP_INPUT_CB(chunk->skb)->af;
+ chunk->ecn_ce_done = 1;
+
+ if (af->is_ce(sctp_gso_headskb(chunk->skb))) {
+ /* Do real work as side effect. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
+ SCTP_U32(tsn));
+ }
+ }
+
+ tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
+ if (tmp < 0) {
+ /* The TSN is too high--silently discard the chunk and
+ * count on it getting retransmitted later.
+ */
+ if (chunk->asoc)
+ chunk->asoc->stats.outofseqtsns++;
+ return SCTP_IERROR_HIGH_TSN;
+ } else if (tmp > 0) {
+ /* This is a duplicate. Record it. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
+ return SCTP_IERROR_DUP_TSN;
+ }
+
+ /* This is a new TSN. */
+
+ /* Discard if there is no room in the receive window.
+ * Actually, allow a little bit of overflow (up to a MTU).
+ */
+ datalen = ntohs(chunk->chunk_hdr->length);
+ datalen -= sctp_datachk_len(&asoc->stream);
+
+ deliver = SCTP_CMD_CHUNK_ULP;
+
+ /* Think about partial delivery. */
+ if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
+
+ /* Even if we don't accept this chunk there is
+ * memory pressure.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
+ }
+
+ /* Spill over rwnd a little bit. Note: While allowed, this spill over
+ * seems a bit troublesome in that frag_point varies based on
+ * PMTU. In cases, such as loopback, this might be a rather
+ * large spill over.
+ */
+ if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over ||
+ (datalen > asoc->rwnd + asoc->frag_point))) {
+
+ /* If this is the next TSN, consider reneging to make
+ * room. Note: Playing nice with a confused sender. A
+ * malicious sender can still eat up all our buffer
+ * space and in the future we may want to detect and
+ * do more drastic reneging.
+ */
+ if (sctp_tsnmap_has_gap(map) &&
+ (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
+ pr_debug("%s: reneging for tsn:%u\n", __func__, tsn);
+ deliver = SCTP_CMD_RENEGE;
+ } else {
+ pr_debug("%s: discard tsn:%u len:%zu, rwnd:%d\n",
+ __func__, tsn, datalen, asoc->rwnd);
+
+ return SCTP_IERROR_IGNORE_TSN;
+ }
+ }
+
+ /*
+ * Also try to renege to limit our memory usage in the event that
+ * we are under memory pressure
+ * If we can't renege, don't worry about it, the sk_rmem_schedule
+ * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
+ * memory usage too much
+ */
+ if (sk_under_memory_pressure(sk)) {
+ if (sctp_tsnmap_has_gap(map) &&
+ (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
+ pr_debug("%s: under pressure, reneging for tsn:%u\n",
+ __func__, tsn);
+ deliver = SCTP_CMD_RENEGE;
+ }
+ }
+
+ /*
+ * Section 3.3.10.9 No User Data (9)
+ *
+ * Cause of error
+ * ---------------
+ * No User Data: This error cause is returned to the originator of a
+ * DATA chunk if a received DATA chunk has no user data.
+ */
+ if (unlikely(0 == datalen)) {
+ err = sctp_make_abort_no_data(asoc, chunk, tsn);
+ if (err) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err));
+ }
+ /* We are going to ABORT, so we might as well stop
+ * processing the rest of the chunks in the packet.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_NO_DATA));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+ return SCTP_IERROR_NO_DATA;
+ }
+
+ chunk->data_accepted = 1;
+
+ /* Note: Some chunks may get overcounted (if we drop) or overcounted
+ * if we renege and the chunk arrives again.
+ */
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
+ SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS);
+ if (chunk->asoc)
+ chunk->asoc->stats.iuodchunks++;
+ } else {
+ SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
+ if (chunk->asoc)
+ chunk->asoc->stats.iodchunks++;
+ }
+
+ /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
+ *
+ * If an endpoint receive a DATA chunk with an invalid stream
+ * identifier, it shall acknowledge the reception of the DATA chunk
+ * following the normal procedure, immediately send an ERROR chunk
+ * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
+ * and discard the DATA chunk.
+ */
+ if (ntohs(data_hdr->stream) >= asoc->stream.incnt) {
+ /* Mark tsn as received even though we drop it */
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
+
+ err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
+ &data_hdr->stream,
+ sizeof(data_hdr->stream),
+ sizeof(u16));
+ if (err)
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err));
+ return SCTP_IERROR_BAD_STREAM;
+ }
+
+ /* Check to see if the SSN is possible for this TSN.
+ * The biggest gap we can record is 4K wide. Since SSNs wrap
+ * at an unsigned short, there is no way that an SSN can
+ * wrap and for a valid TSN. We can simply check if the current
+ * SSN is smaller then the next expected one. If it is, it wrapped
+ * and is invalid.
+ */
+ if (!asoc->stream.si->validate_data(chunk))
+ return SCTP_IERROR_PROTO_VIOLATION;
+
+ /* Send the data up to the user. Note: Schedule the
+ * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
+ * chunk needs the updated rwnd.
+ */
+ sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk));
+
+ return SCTP_IERROR_NO_ERROR;
+}
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
new file mode 100644
index 000000000..1816a4410
--- /dev/null
+++ b/net/sctp/sm_statetable.c
@@ -0,0 +1,1041 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 Nokia, Inc.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These are the state tables for the SCTP state machine.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Hui Huang <hui.huang@nokia.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/skbuff.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+static const struct sctp_sm_table_entry
+primitive_event_table[SCTP_NUM_PRIMITIVE_TYPES][SCTP_STATE_NUM_STATES];
+static const struct sctp_sm_table_entry
+other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES];
+static const struct sctp_sm_table_entry
+timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES];
+
+static const struct sctp_sm_table_entry *sctp_chunk_event_lookup(
+ struct net *net,
+ enum sctp_cid cid,
+ enum sctp_state state);
+
+
+static const struct sctp_sm_table_entry bug = {
+ .fn = sctp_sf_bug,
+ .name = "sctp_sf_bug"
+};
+
+#define DO_LOOKUP(_max, _type, _table) \
+({ \
+ const struct sctp_sm_table_entry *rtn; \
+ \
+ if ((event_subtype._type > (_max))) { \
+ pr_warn("table %p possible attack: event %d exceeds max %d\n", \
+ _table, event_subtype._type, _max); \
+ rtn = &bug; \
+ } else \
+ rtn = &_table[event_subtype._type][(int)state]; \
+ \
+ rtn; \
+})
+
+const struct sctp_sm_table_entry *sctp_sm_lookup_event(
+ struct net *net,
+ enum sctp_event_type event_type,
+ enum sctp_state state,
+ union sctp_subtype event_subtype)
+{
+ switch (event_type) {
+ case SCTP_EVENT_T_CHUNK:
+ return sctp_chunk_event_lookup(net, event_subtype.chunk, state);
+ case SCTP_EVENT_T_TIMEOUT:
+ return DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
+ timeout_event_table);
+ case SCTP_EVENT_T_OTHER:
+ return DO_LOOKUP(SCTP_EVENT_OTHER_MAX, other,
+ other_event_table);
+ case SCTP_EVENT_T_PRIMITIVE:
+ return DO_LOOKUP(SCTP_EVENT_PRIMITIVE_MAX, primitive,
+ primitive_event_table);
+ default:
+ /* Yikes! We got an illegal event type. */
+ return &bug;
+ }
+}
+
+#define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func}
+
+#define TYPE_SCTP_DATA { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_data_6_2), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_data_6_2), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_data_fast_4_4), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+} /* TYPE_SCTP_DATA */
+
+#define TYPE_SCTP_INIT { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_1B_init), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_1_siminit), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_1_siminit), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_2_reshutack), \
+} /* TYPE_SCTP_INIT */
+
+#define TYPE_SCTP_INIT_ACK { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_3_initack), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_1C_ack), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+} /* TYPE_SCTP_INIT_ACK */
+
+#define TYPE_SCTP_SACK { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+} /* TYPE_SCTP_SACK */
+
+#define TYPE_SCTP_HEARTBEAT { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ /* This should not happen, but we are nice. */ \
+ TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \
+} /* TYPE_SCTP_HEARTBEAT */
+
+#define TYPE_SCTP_HEARTBEAT_ACK { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_violation), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+} /* TYPE_SCTP_HEARTBEAT_ACK */
+
+#define TYPE_SCTP_ABORT { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_pdiscard), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_cookie_wait_abort), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_abort), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_1_abort), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_shutdown_pending_abort), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_shutdown_sent_abort), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_1_abort), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_shutdown_ack_sent_abort), \
+} /* TYPE_SCTP_ABORT */
+
+#define TYPE_SCTP_SHUTDOWN { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown_ack), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_2_shut_ctsn), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+} /* TYPE_SCTP_SHUTDOWN */
+
+#define TYPE_SCTP_SHUTDOWN_ACK { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_8_5_1_E_sa), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_8_5_1_E_sa), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_violation), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_violation), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_2_final), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_violation), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_2_final), \
+} /* TYPE_SCTP_SHUTDOWN_ACK */
+
+#define TYPE_SCTP_ERROR { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_err), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_operr_notify), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_operr_notify), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_operr_notify), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+} /* TYPE_SCTP_ERROR */
+
+#define TYPE_SCTP_COOKIE_ECHO { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_1D_ce), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
+} /* TYPE_SCTP_COOKIE_ECHO */
+
+#define TYPE_SCTP_COOKIE_ACK { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_5_1E_ca), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+} /* TYPE_SCTP_COOKIE_ACK */
+
+#define TYPE_SCTP_ECN_ECNE { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_ecne), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_ecne), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_ecne), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_ecne), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_ecne), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+} /* TYPE_SCTP_ECN_ECNE */
+
+#define TYPE_SCTP_ECN_CWR { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+} /* TYPE_SCTP_ECN_CWR */
+
+#define TYPE_SCTP_SHUTDOWN_COMPLETE { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_4_C), \
+} /* TYPE_SCTP_SHUTDOWN_COMPLETE */
+
+/* The primary index for this table is the chunk type.
+ * The secondary index for this table is the state.
+ *
+ * For base protocol (RFC 2960).
+ */
+static const struct sctp_sm_table_entry
+chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = {
+ TYPE_SCTP_DATA,
+ TYPE_SCTP_INIT,
+ TYPE_SCTP_INIT_ACK,
+ TYPE_SCTP_SACK,
+ TYPE_SCTP_HEARTBEAT,
+ TYPE_SCTP_HEARTBEAT_ACK,
+ TYPE_SCTP_ABORT,
+ TYPE_SCTP_SHUTDOWN,
+ TYPE_SCTP_SHUTDOWN_ACK,
+ TYPE_SCTP_ERROR,
+ TYPE_SCTP_COOKIE_ECHO,
+ TYPE_SCTP_COOKIE_ACK,
+ TYPE_SCTP_ECN_ECNE,
+ TYPE_SCTP_ECN_CWR,
+ TYPE_SCTP_SHUTDOWN_COMPLETE,
+}; /* state_fn_t chunk_event_table[][] */
+
+#define TYPE_SCTP_ASCONF { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_asconf), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_asconf), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_asconf), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_asconf), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+} /* TYPE_SCTP_ASCONF */
+
+#define TYPE_SCTP_ASCONF_ACK { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+} /* TYPE_SCTP_ASCONF_ACK */
+
+/* The primary index for this table is the chunk type.
+ * The secondary index for this table is the state.
+ */
+static const struct sctp_sm_table_entry
+addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = {
+ TYPE_SCTP_ASCONF,
+ TYPE_SCTP_ASCONF_ACK,
+}; /*state_fn_t addip_chunk_event_table[][] */
+
+#define TYPE_SCTP_FWD_TSN { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn_fast), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+} /* TYPE_SCTP_FWD_TSN */
+
+/* The primary index for this table is the chunk type.
+ * The secondary index for this table is the state.
+ */
+static const struct sctp_sm_table_entry
+prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = {
+ TYPE_SCTP_FWD_TSN,
+}; /*state_fn_t prsctp_chunk_event_table[][] */
+
+#define TYPE_SCTP_RECONF { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_reconf), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_reconf), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+} /* TYPE_SCTP_RECONF */
+
+/* The primary index for this table is the chunk type.
+ * The secondary index for this table is the state.
+ */
+static const struct sctp_sm_table_entry
+reconf_chunk_event_table[SCTP_NUM_RECONF_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = {
+ TYPE_SCTP_RECONF,
+}; /*state_fn_t reconf_chunk_event_table[][] */
+
+#define TYPE_SCTP_AUTH { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_auth), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_auth), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_auth), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_auth), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_auth), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_eat_auth), \
+} /* TYPE_SCTP_AUTH */
+
+/* The primary index for this table is the chunk type.
+ * The secondary index for this table is the state.
+ */
+static const struct sctp_sm_table_entry
+auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = {
+ TYPE_SCTP_AUTH,
+}; /*state_fn_t auth_chunk_event_table[][] */
+
+static const struct sctp_sm_table_entry
+pad_chunk_event_table[SCTP_STATE_NUM_STATES] = {
+ /* SCTP_STATE_CLOSED */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_COOKIE_WAIT */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_COOKIE_ECHOED */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_ESTABLISHED */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_SHUTDOWN_PENDING */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_SHUTDOWN_SENT */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */
+ TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+}; /* chunk pad */
+
+static const struct sctp_sm_table_entry
+chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
+ /* SCTP_STATE_CLOSED */
+ TYPE_SCTP_FUNC(sctp_sf_ootb),
+ /* SCTP_STATE_COOKIE_WAIT */
+ TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
+ /* SCTP_STATE_COOKIE_ECHOED */
+ TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
+ /* SCTP_STATE_ESTABLISHED */
+ TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
+ /* SCTP_STATE_SHUTDOWN_PENDING */
+ TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
+ /* SCTP_STATE_SHUTDOWN_SENT */
+ TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */
+ TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */
+ TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
+}; /* chunk unknown */
+
+
+#define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_asoc), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_not_impl), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_not_impl), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_not_impl), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_not_impl), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_not_impl), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_not_impl), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_not_impl), \
+} /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */
+
+#define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_cookie_wait_prm_shutdown), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_prm_shutdown),\
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_2_prm_shutdown), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \
+} /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */
+
+#define TYPE_SCTP_PRIMITIVE_ABORT { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_cookie_wait_prm_abort), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_prm_abort), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_1_prm_abort), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_shutdown_pending_prm_abort), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_shutdown_sent_prm_abort), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_1_prm_abort), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_shutdown_ack_sent_prm_abort), \
+} /* TYPE_SCTP_PRIMITIVE_ABORT */
+
+#define TYPE_SCTP_PRIMITIVE_SEND { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
+} /* TYPE_SCTP_PRIMITIVE_SEND */
+
+#define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \
+} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */
+
+#define TYPE_SCTP_PRIMITIVE_ASCONF { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
+} /* TYPE_SCTP_PRIMITIVE_ASCONF */
+
+#define TYPE_SCTP_PRIMITIVE_RECONF { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
+} /* TYPE_SCTP_PRIMITIVE_RECONF */
+
+/* The primary index for this table is the primitive type.
+ * The secondary index for this table is the state.
+ */
+static const struct sctp_sm_table_entry
+primitive_event_table[SCTP_NUM_PRIMITIVE_TYPES][SCTP_STATE_NUM_STATES] = {
+ TYPE_SCTP_PRIMITIVE_ASSOCIATE,
+ TYPE_SCTP_PRIMITIVE_SHUTDOWN,
+ TYPE_SCTP_PRIMITIVE_ABORT,
+ TYPE_SCTP_PRIMITIVE_SEND,
+ TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT,
+ TYPE_SCTP_PRIMITIVE_ASCONF,
+ TYPE_SCTP_PRIMITIVE_RECONF,
+};
+
+#define TYPE_SCTP_OTHER_NO_PENDING_TSN { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_no_pending_tsn), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_2_start_shutdown), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown_ack), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
+}
+
+#define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_cookie_wait_icmp_abort), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
+}
+
+static const struct sctp_sm_table_entry
+other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES] = {
+ TYPE_SCTP_OTHER_NO_PENDING_TSN,
+ TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH,
+};
+
+#define TYPE_SCTP_EVENT_TIMEOUT_NONE { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_bug), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_bug), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_bug), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_bug), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_bug), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_bug), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_bug), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_bug), \
+}
+
+#define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_bug), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_t1_cookie_timer_expire), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
+#define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_t1_init_timer_expire), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
+#define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_t2_timer_expire), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_t2_timer_expire), \
+}
+
+#define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
+#define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_t4_timer_expire), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
+#define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
+#define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
+#define TYPE_SCTP_EVENT_TIMEOUT_SACK { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
+#define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_autoclose_timer_expire), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
+#define TYPE_SCTP_EVENT_TIMEOUT_RECONF { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_send_reconf), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
+#define TYPE_SCTP_EVENT_TIMEOUT_PROBE { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_send_probe), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
+static const struct sctp_sm_table_entry
+timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = {
+ TYPE_SCTP_EVENT_TIMEOUT_NONE,
+ TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE,
+ TYPE_SCTP_EVENT_TIMEOUT_T1_INIT,
+ TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN,
+ TYPE_SCTP_EVENT_TIMEOUT_T3_RTX,
+ TYPE_SCTP_EVENT_TIMEOUT_T4_RTO,
+ TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
+ TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT,
+ TYPE_SCTP_EVENT_TIMEOUT_RECONF,
+ TYPE_SCTP_EVENT_TIMEOUT_PROBE,
+ TYPE_SCTP_EVENT_TIMEOUT_SACK,
+ TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE,
+};
+
+static const struct sctp_sm_table_entry *sctp_chunk_event_lookup(
+ struct net *net,
+ enum sctp_cid cid,
+ enum sctp_state state)
+{
+ if (state > SCTP_STATE_MAX)
+ return &bug;
+
+ if (cid == SCTP_CID_I_DATA)
+ cid = SCTP_CID_DATA;
+
+ if (cid <= SCTP_CID_BASE_MAX)
+ return &chunk_event_table[cid][state];
+
+ switch ((u16)cid) {
+ case SCTP_CID_FWD_TSN:
+ case SCTP_CID_I_FWD_TSN:
+ return &prsctp_chunk_event_table[0][state];
+
+ case SCTP_CID_ASCONF:
+ return &addip_chunk_event_table[0][state];
+
+ case SCTP_CID_ASCONF_ACK:
+ return &addip_chunk_event_table[1][state];
+
+ case SCTP_CID_RECONF:
+ return &reconf_chunk_event_table[0][state];
+
+ case SCTP_CID_AUTH:
+ return &auth_chunk_event_table[0][state];
+
+ case SCTP_CID_PAD:
+ return &pad_chunk_event_table[state];
+ }
+
+ return &chunk_event_table_unknown[state];
+}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
new file mode 100644
index 000000000..237a6b04a
--- /dev/null
+++ b/net/sctp/socket.c
@@ -0,0 +1,9745 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001-2003 Intel Corp.
+ * Copyright (c) 2001-2002 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions interface with the sockets layer to implement the
+ * SCTP Extensions for the Sockets API.
+ *
+ * Note that the descriptions from the specification are USER level
+ * functions--this file is the functions which populate the struct proto
+ * for SCTP which is the BOTTOM of the sockets interface.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Narasimha Budihal <narsi@refcode.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Sridhar Samudrala <samudrala@us.ibm.com>
+ * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Ryan Layer <rmlayer@us.ibm.com>
+ * Anup Pemmaiah <pemmaiah@cc.usu.edu>
+ * Kevin Gao <kevin.gao@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/hash.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/sched/signal.h>
+#include <linux/ip.h>
+#include <linux/capability.h>
+#include <linux/fcntl.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/compat.h>
+#include <linux/rhashtable.h>
+
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/inet_common.h>
+#include <net/busy_poll.h>
+
+#include <linux/socket.h> /* for sa_family_t */
+#include <linux/export.h>
+#include <net/sock.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/stream_sched.h>
+
+/* Forward declarations for internal helper functions. */
+static bool sctp_writeable(const struct sock *sk);
+static void sctp_wfree(struct sk_buff *skb);
+static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ size_t msg_len);
+static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
+static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
+static int sctp_wait_for_accept(struct sock *sk, long timeo);
+static void sctp_wait_for_close(struct sock *sk, long timeo);
+static void sctp_destruct_sock(struct sock *sk);
+static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
+ union sctp_addr *addr, int len);
+static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
+static int sctp_bindx_rem(struct sock *, struct sockaddr *, int);
+static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int);
+static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int);
+static int sctp_send_asconf(struct sctp_association *asoc,
+ struct sctp_chunk *chunk);
+static int sctp_do_bind(struct sock *, union sctp_addr *, int);
+static int sctp_autobind(struct sock *sk);
+static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ struct sctp_association *assoc,
+ enum sctp_socket_type type);
+
+static unsigned long sctp_memory_pressure;
+static atomic_long_t sctp_memory_allocated;
+static DEFINE_PER_CPU(int, sctp_memory_per_cpu_fw_alloc);
+struct percpu_counter sctp_sockets_allocated;
+
+static void sctp_enter_memory_pressure(struct sock *sk)
+{
+ WRITE_ONCE(sctp_memory_pressure, 1);
+}
+
+
+/* Get the sndbuf space available at the time on the association. */
+static inline int sctp_wspace(struct sctp_association *asoc)
+{
+ struct sock *sk = asoc->base.sk;
+
+ return asoc->ep->sndbuf_policy ? sk->sk_sndbuf - asoc->sndbuf_used
+ : sk_stream_wspace(sk);
+}
+
+/* Increment the used sndbuf space count of the corresponding association by
+ * the size of the outgoing data chunk.
+ * Also, set the skb destructor for sndbuf accounting later.
+ *
+ * Since it is always 1-1 between chunk and skb, and also a new skb is always
+ * allocated for chunk bundling in sctp_packet_transmit(), we can use the
+ * destructor in the data chunk skb for the purpose of the sndbuf space
+ * tracking.
+ */
+static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
+{
+ struct sctp_association *asoc = chunk->asoc;
+ struct sock *sk = asoc->base.sk;
+
+ /* The sndbuf space is tracked per association. */
+ sctp_association_hold(asoc);
+
+ if (chunk->shkey)
+ sctp_auth_shkey_hold(chunk->shkey);
+
+ skb_set_owner_w(chunk->skb, sk);
+
+ chunk->skb->destructor = sctp_wfree;
+ /* Save the chunk pointer in skb for sctp_wfree to use later. */
+ skb_shinfo(chunk->skb)->destructor_arg = chunk;
+
+ refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
+ asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk);
+ sk_wmem_queued_add(sk, chunk->skb->truesize + sizeof(struct sctp_chunk));
+ sk_mem_charge(sk, chunk->skb->truesize);
+}
+
+static void sctp_clear_owner_w(struct sctp_chunk *chunk)
+{
+ skb_orphan(chunk->skb);
+}
+
+#define traverse_and_process() \
+do { \
+ msg = chunk->msg; \
+ if (msg == prev_msg) \
+ continue; \
+ list_for_each_entry(c, &msg->chunks, frag_list) { \
+ if ((clear && asoc->base.sk == c->skb->sk) || \
+ (!clear && asoc->base.sk != c->skb->sk)) \
+ cb(c); \
+ } \
+ prev_msg = msg; \
+} while (0)
+
+static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
+ bool clear,
+ void (*cb)(struct sctp_chunk *))
+
+{
+ struct sctp_datamsg *msg, *prev_msg = NULL;
+ struct sctp_outq *q = &asoc->outqueue;
+ struct sctp_chunk *chunk, *c;
+ struct sctp_transport *t;
+
+ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
+ list_for_each_entry(chunk, &t->transmitted, transmitted_list)
+ traverse_and_process();
+
+ list_for_each_entry(chunk, &q->retransmit, transmitted_list)
+ traverse_and_process();
+
+ list_for_each_entry(chunk, &q->sacked, transmitted_list)
+ traverse_and_process();
+
+ list_for_each_entry(chunk, &q->abandoned, transmitted_list)
+ traverse_and_process();
+
+ list_for_each_entry(chunk, &q->out_chunk_list, list)
+ traverse_and_process();
+}
+
+static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
+ void (*cb)(struct sk_buff *, struct sock *))
+
+{
+ struct sk_buff *skb, *tmp;
+
+ sctp_skb_for_each(skb, &asoc->ulpq.lobby, tmp)
+ cb(skb, sk);
+
+ sctp_skb_for_each(skb, &asoc->ulpq.reasm, tmp)
+ cb(skb, sk);
+
+ sctp_skb_for_each(skb, &asoc->ulpq.reasm_uo, tmp)
+ cb(skb, sk);
+}
+
+/* Verify that this is a valid address. */
+static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
+ int len)
+{
+ struct sctp_af *af;
+
+ /* Verify basic sockaddr. */
+ af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
+ if (!af)
+ return -EINVAL;
+
+ /* Is this a valid SCTP address? */
+ if (!af->addr_valid(addr, sctp_sk(sk), NULL))
+ return -EINVAL;
+
+ if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Look up the association by its id. If this is not a UDP-style
+ * socket, the ID field is always ignored.
+ */
+struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
+{
+ struct sctp_association *asoc = NULL;
+
+ /* If this is not a UDP-style socket, assoc id should be ignored. */
+ if (!sctp_style(sk, UDP)) {
+ /* Return NULL if the socket state is not ESTABLISHED. It
+ * could be a TCP-style listening socket or a socket which
+ * hasn't yet called connect() to establish an association.
+ */
+ if (!sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING))
+ return NULL;
+
+ /* Get the first and the only association from the list. */
+ if (!list_empty(&sctp_sk(sk)->ep->asocs))
+ asoc = list_entry(sctp_sk(sk)->ep->asocs.next,
+ struct sctp_association, asocs);
+ return asoc;
+ }
+
+ /* Otherwise this is a UDP-style socket. */
+ if (id <= SCTP_ALL_ASSOC)
+ return NULL;
+
+ spin_lock_bh(&sctp_assocs_id_lock);
+ asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
+ if (asoc && (asoc->base.sk != sk || asoc->base.dead))
+ asoc = NULL;
+ spin_unlock_bh(&sctp_assocs_id_lock);
+
+ return asoc;
+}
+
+/* Look up the transport from an address and an assoc id. If both address and
+ * id are specified, the associations matching the address and the id should be
+ * the same.
+ */
+static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
+ struct sockaddr_storage *addr,
+ sctp_assoc_t id)
+{
+ struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
+ struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
+ union sctp_addr *laddr = (union sctp_addr *)addr;
+ struct sctp_transport *transport;
+
+ if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
+ return NULL;
+
+ addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
+ laddr,
+ &transport);
+
+ if (!addr_asoc)
+ return NULL;
+
+ id_asoc = sctp_id2assoc(sk, id);
+ if (id_asoc && (id_asoc != addr_asoc))
+ return NULL;
+
+ sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
+ (union sctp_addr *)addr);
+
+ return transport;
+}
+
+/* API 3.1.2 bind() - UDP Style Syntax
+ * The syntax of bind() is,
+ *
+ * ret = bind(int sd, struct sockaddr *addr, int addrlen);
+ *
+ * sd - the socket descriptor returned by socket().
+ * addr - the address structure (struct sockaddr_in or struct
+ * sockaddr_in6 [RFC 2553]),
+ * addr_len - the size of the address structure.
+ */
+static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
+{
+ int retval = 0;
+
+ lock_sock(sk);
+
+ pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk,
+ addr, addr_len);
+
+ /* Disallow binding twice. */
+ if (!sctp_sk(sk)->ep->base.bind_addr.port)
+ retval = sctp_do_bind(sk, (union sctp_addr *)addr,
+ addr_len);
+ else
+ retval = -EINVAL;
+
+ release_sock(sk);
+
+ return retval;
+}
+
+static int sctp_get_port_local(struct sock *, union sctp_addr *);
+
+/* Verify this is a valid sockaddr. */
+static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
+ union sctp_addr *addr, int len)
+{
+ struct sctp_af *af;
+
+ /* Check minimum size. */
+ if (len < sizeof (struct sockaddr))
+ return NULL;
+
+ if (!opt->pf->af_supported(addr->sa.sa_family, opt))
+ return NULL;
+
+ if (addr->sa.sa_family == AF_INET6) {
+ if (len < SIN6_LEN_RFC2133)
+ return NULL;
+ /* V4 mapped address are really of AF_INET family */
+ if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
+ !opt->pf->af_supported(AF_INET, opt))
+ return NULL;
+ }
+
+ /* If we get this far, af is valid. */
+ af = sctp_get_af_specific(addr->sa.sa_family);
+
+ if (len < af->sockaddr_len)
+ return NULL;
+
+ return af;
+}
+
+static void sctp_auto_asconf_init(struct sctp_sock *sp)
+{
+ struct net *net = sock_net(&sp->inet.sk);
+
+ if (net->sctp.default_auto_asconf) {
+ spin_lock_bh(&net->sctp.addr_wq_lock);
+ list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
+ spin_unlock_bh(&net->sctp.addr_wq_lock);
+ sp->do_auto_asconf = 1;
+ }
+}
+
+/* Bind a local address either to an endpoint or to an association. */
+static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
+{
+ struct net *net = sock_net(sk);
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_endpoint *ep = sp->ep;
+ struct sctp_bind_addr *bp = &ep->base.bind_addr;
+ struct sctp_af *af;
+ unsigned short snum;
+ int ret = 0;
+
+ /* Common sockaddr verification. */
+ af = sctp_sockaddr_af(sp, addr, len);
+ if (!af) {
+ pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n",
+ __func__, sk, addr, len);
+ return -EINVAL;
+ }
+
+ snum = ntohs(addr->v4.sin_port);
+
+ pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n",
+ __func__, sk, &addr->sa, bp->port, snum, len);
+
+ /* PF specific bind() address verification. */
+ if (!sp->pf->bind_verify(sp, addr))
+ return -EADDRNOTAVAIL;
+
+ /* We must either be unbound, or bind to the same port.
+ * It's OK to allow 0 ports if we are already bound.
+ * We'll just inhert an already bound port in this case
+ */
+ if (bp->port) {
+ if (!snum)
+ snum = bp->port;
+ else if (snum != bp->port) {
+ pr_debug("%s: new port %d doesn't match existing port "
+ "%d\n", __func__, snum, bp->port);
+ return -EINVAL;
+ }
+ }
+
+ if (snum && inet_port_requires_bind_service(net, snum) &&
+ !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
+ return -EACCES;
+
+ /* See if the address matches any of the addresses we may have
+ * already bound before checking against other endpoints.
+ */
+ if (sctp_bind_addr_match(bp, addr, sp))
+ return -EINVAL;
+
+ /* Make sure we are allowed to bind here.
+ * The function sctp_get_port_local() does duplicate address
+ * detection.
+ */
+ addr->v4.sin_port = htons(snum);
+ if (sctp_get_port_local(sk, addr))
+ return -EADDRINUSE;
+
+ /* Refresh ephemeral port. */
+ if (!bp->port) {
+ bp->port = inet_sk(sk)->inet_num;
+ sctp_auto_asconf_init(sp);
+ }
+
+ /* Add the address to the bind address list.
+ * Use GFP_ATOMIC since BHs will be disabled.
+ */
+ ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len,
+ SCTP_ADDR_SRC, GFP_ATOMIC);
+
+ if (ret) {
+ sctp_put_port(sk);
+ return ret;
+ }
+ /* Copy back into socket for getsockname() use. */
+ inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
+ sp->pf->to_sk_saddr(addr, sk);
+
+ return ret;
+}
+
+ /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
+ *
+ * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
+ * at any one time. If a sender, after sending an ASCONF chunk, decides
+ * it needs to transfer another ASCONF Chunk, it MUST wait until the
+ * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
+ * subsequent ASCONF. Note this restriction binds each side, so at any
+ * time two ASCONF may be in-transit on any given association (one sent
+ * from each endpoint).
+ */
+static int sctp_send_asconf(struct sctp_association *asoc,
+ struct sctp_chunk *chunk)
+{
+ int retval = 0;
+
+ /* If there is an outstanding ASCONF chunk, queue it for later
+ * transmission.
+ */
+ if (asoc->addip_last_asconf) {
+ list_add_tail(&chunk->list, &asoc->addip_chunk_list);
+ goto out;
+ }
+
+ /* Hold the chunk until an ASCONF_ACK is received. */
+ sctp_chunk_hold(chunk);
+ retval = sctp_primitive_ASCONF(asoc->base.net, asoc, chunk);
+ if (retval)
+ sctp_chunk_free(chunk);
+ else
+ asoc->addip_last_asconf = chunk;
+
+out:
+ return retval;
+}
+
+/* Add a list of addresses as bind addresses to local endpoint or
+ * association.
+ *
+ * Basically run through each address specified in the addrs/addrcnt
+ * array/length pair, determine if it is IPv6 or IPv4 and call
+ * sctp_do_bind() on it.
+ *
+ * If any of them fails, then the operation will be reversed and the
+ * ones that were added will be removed.
+ *
+ * Only sctp_setsockopt_bindx() is supposed to call this function.
+ */
+static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
+{
+ int cnt;
+ int retval = 0;
+ void *addr_buf;
+ struct sockaddr *sa_addr;
+ struct sctp_af *af;
+
+ pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk,
+ addrs, addrcnt);
+
+ addr_buf = addrs;
+ for (cnt = 0; cnt < addrcnt; cnt++) {
+ /* The list may contain either IPv4 or IPv6 address;
+ * determine the address length for walking thru the list.
+ */
+ sa_addr = addr_buf;
+ af = sctp_get_af_specific(sa_addr->sa_family);
+ if (!af) {
+ retval = -EINVAL;
+ goto err_bindx_add;
+ }
+
+ retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
+ af->sockaddr_len);
+
+ addr_buf += af->sockaddr_len;
+
+err_bindx_add:
+ if (retval < 0) {
+ /* Failed. Cleanup the ones that have been added */
+ if (cnt > 0)
+ sctp_bindx_rem(sk, addrs, cnt);
+ return retval;
+ }
+ }
+
+ return retval;
+}
+
+/* Send an ASCONF chunk with Add IP address parameters to all the peers of the
+ * associations that are part of the endpoint indicating that a list of local
+ * addresses are added to the endpoint.
+ *
+ * If any of the addresses is already in the bind address list of the
+ * association, we do not send the chunk for that association. But it will not
+ * affect other associations.
+ *
+ * Only sctp_setsockopt_bindx() is supposed to call this function.
+ */
+static int sctp_send_asconf_add_ip(struct sock *sk,
+ struct sockaddr *addrs,
+ int addrcnt)
+{
+ struct sctp_sock *sp;
+ struct sctp_endpoint *ep;
+ struct sctp_association *asoc;
+ struct sctp_bind_addr *bp;
+ struct sctp_chunk *chunk;
+ struct sctp_sockaddr_entry *laddr;
+ union sctp_addr *addr;
+ union sctp_addr saveaddr;
+ void *addr_buf;
+ struct sctp_af *af;
+ struct list_head *p;
+ int i;
+ int retval = 0;
+
+ sp = sctp_sk(sk);
+ ep = sp->ep;
+
+ if (!ep->asconf_enable)
+ return retval;
+
+ pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
+ __func__, sk, addrs, addrcnt);
+
+ list_for_each_entry(asoc, &ep->asocs, asocs) {
+ if (!asoc->peer.asconf_capable)
+ continue;
+
+ if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP)
+ continue;
+
+ if (!sctp_state(asoc, ESTABLISHED))
+ continue;
+
+ /* Check if any address in the packed array of addresses is
+ * in the bind address list of the association. If so,
+ * do not send the asconf chunk to its peer, but continue with
+ * other associations.
+ */
+ addr_buf = addrs;
+ for (i = 0; i < addrcnt; i++) {
+ addr = addr_buf;
+ af = sctp_get_af_specific(addr->v4.sin_family);
+ if (!af) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (sctp_assoc_lookup_laddr(asoc, addr))
+ break;
+
+ addr_buf += af->sockaddr_len;
+ }
+ if (i < addrcnt)
+ continue;
+
+ /* Use the first valid address in bind addr list of
+ * association as Address Parameter of ASCONF CHUNK.
+ */
+ bp = &asoc->base.bind_addr;
+ p = bp->address_list.next;
+ laddr = list_entry(p, struct sctp_sockaddr_entry, list);
+ chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
+ addrcnt, SCTP_PARAM_ADD_IP);
+ if (!chunk) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ /* Add the new addresses to the bind address list with
+ * use_as_src set to 0.
+ */
+ addr_buf = addrs;
+ for (i = 0; i < addrcnt; i++) {
+ addr = addr_buf;
+ af = sctp_get_af_specific(addr->v4.sin_family);
+ memcpy(&saveaddr, addr, af->sockaddr_len);
+ retval = sctp_add_bind_addr(bp, &saveaddr,
+ sizeof(saveaddr),
+ SCTP_ADDR_NEW, GFP_ATOMIC);
+ addr_buf += af->sockaddr_len;
+ }
+ if (asoc->src_out_of_asoc_ok) {
+ struct sctp_transport *trans;
+
+ list_for_each_entry(trans,
+ &asoc->peer.transport_addr_list, transports) {
+ trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
+ 2*asoc->pathmtu, 4380));
+ trans->ssthresh = asoc->peer.i.a_rwnd;
+ trans->rto = asoc->rto_initial;
+ sctp_max_rto(asoc, trans);
+ trans->rtt = trans->srtt = trans->rttvar = 0;
+ /* Clear the source and route cache */
+ sctp_transport_route(trans, NULL,
+ sctp_sk(asoc->base.sk));
+ }
+ }
+ retval = sctp_send_asconf(asoc, chunk);
+ }
+
+out:
+ return retval;
+}
+
+/* Remove a list of addresses from bind addresses list. Do not remove the
+ * last address.
+ *
+ * Basically run through each address specified in the addrs/addrcnt
+ * array/length pair, determine if it is IPv6 or IPv4 and call
+ * sctp_del_bind() on it.
+ *
+ * If any of them fails, then the operation will be reversed and the
+ * ones that were removed will be added back.
+ *
+ * At least one address has to be left; if only one address is
+ * available, the operation will return -EBUSY.
+ *
+ * Only sctp_setsockopt_bindx() is supposed to call this function.
+ */
+static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_endpoint *ep = sp->ep;
+ int cnt;
+ struct sctp_bind_addr *bp = &ep->base.bind_addr;
+ int retval = 0;
+ void *addr_buf;
+ union sctp_addr *sa_addr;
+ struct sctp_af *af;
+
+ pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
+ __func__, sk, addrs, addrcnt);
+
+ addr_buf = addrs;
+ for (cnt = 0; cnt < addrcnt; cnt++) {
+ /* If the bind address list is empty or if there is only one
+ * bind address, there is nothing more to be removed (we need
+ * at least one address here).
+ */
+ if (list_empty(&bp->address_list) ||
+ (sctp_list_single_entry(&bp->address_list))) {
+ retval = -EBUSY;
+ goto err_bindx_rem;
+ }
+
+ sa_addr = addr_buf;
+ af = sctp_get_af_specific(sa_addr->sa.sa_family);
+ if (!af) {
+ retval = -EINVAL;
+ goto err_bindx_rem;
+ }
+
+ if (!af->addr_valid(sa_addr, sp, NULL)) {
+ retval = -EADDRNOTAVAIL;
+ goto err_bindx_rem;
+ }
+
+ if (sa_addr->v4.sin_port &&
+ sa_addr->v4.sin_port != htons(bp->port)) {
+ retval = -EINVAL;
+ goto err_bindx_rem;
+ }
+
+ if (!sa_addr->v4.sin_port)
+ sa_addr->v4.sin_port = htons(bp->port);
+
+ /* FIXME - There is probably a need to check if sk->sk_saddr and
+ * sk->sk_rcv_addr are currently set to one of the addresses to
+ * be removed. This is something which needs to be looked into
+ * when we are fixing the outstanding issues with multi-homing
+ * socket routing and failover schemes. Refer to comments in
+ * sctp_do_bind(). -daisy
+ */
+ retval = sctp_del_bind_addr(bp, sa_addr);
+
+ addr_buf += af->sockaddr_len;
+err_bindx_rem:
+ if (retval < 0) {
+ /* Failed. Add the ones that has been removed back */
+ if (cnt > 0)
+ sctp_bindx_add(sk, addrs, cnt);
+ return retval;
+ }
+ }
+
+ return retval;
+}
+
+/* Send an ASCONF chunk with Delete IP address parameters to all the peers of
+ * the associations that are part of the endpoint indicating that a list of
+ * local addresses are removed from the endpoint.
+ *
+ * If any of the addresses is already in the bind address list of the
+ * association, we do not send the chunk for that association. But it will not
+ * affect other associations.
+ *
+ * Only sctp_setsockopt_bindx() is supposed to call this function.
+ */
+static int sctp_send_asconf_del_ip(struct sock *sk,
+ struct sockaddr *addrs,
+ int addrcnt)
+{
+ struct sctp_sock *sp;
+ struct sctp_endpoint *ep;
+ struct sctp_association *asoc;
+ struct sctp_transport *transport;
+ struct sctp_bind_addr *bp;
+ struct sctp_chunk *chunk;
+ union sctp_addr *laddr;
+ void *addr_buf;
+ struct sctp_af *af;
+ struct sctp_sockaddr_entry *saddr;
+ int i;
+ int retval = 0;
+ int stored = 0;
+
+ chunk = NULL;
+ sp = sctp_sk(sk);
+ ep = sp->ep;
+
+ if (!ep->asconf_enable)
+ return retval;
+
+ pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
+ __func__, sk, addrs, addrcnt);
+
+ list_for_each_entry(asoc, &ep->asocs, asocs) {
+
+ if (!asoc->peer.asconf_capable)
+ continue;
+
+ if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP)
+ continue;
+
+ if (!sctp_state(asoc, ESTABLISHED))
+ continue;
+
+ /* Check if any address in the packed array of addresses is
+ * not present in the bind address list of the association.
+ * If so, do not send the asconf chunk to its peer, but
+ * continue with other associations.
+ */
+ addr_buf = addrs;
+ for (i = 0; i < addrcnt; i++) {
+ laddr = addr_buf;
+ af = sctp_get_af_specific(laddr->v4.sin_family);
+ if (!af) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (!sctp_assoc_lookup_laddr(asoc, laddr))
+ break;
+
+ addr_buf += af->sockaddr_len;
+ }
+ if (i < addrcnt)
+ continue;
+
+ /* Find one address in the association's bind address list
+ * that is not in the packed array of addresses. This is to
+ * make sure that we do not delete all the addresses in the
+ * association.
+ */
+ bp = &asoc->base.bind_addr;
+ laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
+ addrcnt, sp);
+ if ((laddr == NULL) && (addrcnt == 1)) {
+ if (asoc->asconf_addr_del_pending)
+ continue;
+ asoc->asconf_addr_del_pending =
+ kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
+ if (asoc->asconf_addr_del_pending == NULL) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ asoc->asconf_addr_del_pending->sa.sa_family =
+ addrs->sa_family;
+ asoc->asconf_addr_del_pending->v4.sin_port =
+ htons(bp->port);
+ if (addrs->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)addrs;
+ asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
+ } else if (addrs->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addrs;
+ asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
+ }
+
+ pr_debug("%s: keep the last address asoc:%p %pISc at %p\n",
+ __func__, asoc, &asoc->asconf_addr_del_pending->sa,
+ asoc->asconf_addr_del_pending);
+
+ asoc->src_out_of_asoc_ok = 1;
+ stored = 1;
+ goto skip_mkasconf;
+ }
+
+ if (laddr == NULL)
+ return -EINVAL;
+
+ /* We do not need RCU protection throughout this loop
+ * because this is done under a socket lock from the
+ * setsockopt call.
+ */
+ chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
+ SCTP_PARAM_DEL_IP);
+ if (!chunk) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+skip_mkasconf:
+ /* Reset use_as_src flag for the addresses in the bind address
+ * list that are to be deleted.
+ */
+ addr_buf = addrs;
+ for (i = 0; i < addrcnt; i++) {
+ laddr = addr_buf;
+ af = sctp_get_af_specific(laddr->v4.sin_family);
+ list_for_each_entry(saddr, &bp->address_list, list) {
+ if (sctp_cmp_addr_exact(&saddr->a, laddr))
+ saddr->state = SCTP_ADDR_DEL;
+ }
+ addr_buf += af->sockaddr_len;
+ }
+
+ /* Update the route and saddr entries for all the transports
+ * as some of the addresses in the bind address list are
+ * about to be deleted and cannot be used as source addresses.
+ */
+ list_for_each_entry(transport, &asoc->peer.transport_addr_list,
+ transports) {
+ sctp_transport_route(transport, NULL,
+ sctp_sk(asoc->base.sk));
+ }
+
+ if (stored)
+ /* We don't need to transmit ASCONF */
+ continue;
+ retval = sctp_send_asconf(asoc, chunk);
+ }
+out:
+ return retval;
+}
+
+/* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
+int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
+{
+ struct sock *sk = sctp_opt2sk(sp);
+ union sctp_addr *addr;
+ struct sctp_af *af;
+
+ /* It is safe to write port space in caller. */
+ addr = &addrw->a;
+ addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
+ af = sctp_get_af_specific(addr->sa.sa_family);
+ if (!af)
+ return -EINVAL;
+ if (sctp_verify_addr(sk, addr, af->sockaddr_len))
+ return -EINVAL;
+
+ if (addrw->state == SCTP_ADDR_NEW)
+ return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
+ else
+ return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
+}
+
+/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
+ *
+ * API 8.1
+ * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
+ * int flags);
+ *
+ * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
+ * If the sd is an IPv6 socket, the addresses passed can either be IPv4
+ * or IPv6 addresses.
+ *
+ * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
+ * Section 3.1.2 for this usage.
+ *
+ * addrs is a pointer to an array of one or more socket addresses. Each
+ * address is contained in its appropriate structure (i.e. struct
+ * sockaddr_in or struct sockaddr_in6) the family of the address type
+ * must be used to distinguish the address length (note that this
+ * representation is termed a "packed array" of addresses). The caller
+ * specifies the number of addresses in the array with addrcnt.
+ *
+ * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
+ * -1, and sets errno to the appropriate error code.
+ *
+ * For SCTP, the port given in each socket address must be the same, or
+ * sctp_bindx() will fail, setting errno to EINVAL.
+ *
+ * The flags parameter is formed from the bitwise OR of zero or more of
+ * the following currently defined flags:
+ *
+ * SCTP_BINDX_ADD_ADDR
+ *
+ * SCTP_BINDX_REM_ADDR
+ *
+ * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
+ * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
+ * addresses from the association. The two flags are mutually exclusive;
+ * if both are given, sctp_bindx() will fail with EINVAL. A caller may
+ * not remove all addresses from an association; sctp_bindx() will
+ * reject such an attempt with EINVAL.
+ *
+ * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
+ * additional addresses with an endpoint after calling bind(). Or use
+ * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
+ * socket is associated with so that no new association accepted will be
+ * associated with those addresses. If the endpoint supports dynamic
+ * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
+ * endpoint to send the appropriate message to the peer to change the
+ * peers address lists.
+ *
+ * Adding and removing addresses from a connected association is
+ * optional functionality. Implementations that do not support this
+ * functionality should return EOPNOTSUPP.
+ *
+ * Basically do nothing but copying the addresses from user to kernel
+ * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
+ * This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
+ * from userspace.
+ *
+ * On exit there is no need to do sockfd_put(), sys_setsockopt() does
+ * it.
+ *
+ * sk The sk of the socket
+ * addrs The pointer to the addresses
+ * addrssize Size of the addrs buffer
+ * op Operation to perform (add or remove, see the flags of
+ * sctp_bindx)
+ *
+ * Returns 0 if ok, <0 errno code on error.
+ */
+static int sctp_setsockopt_bindx(struct sock *sk, struct sockaddr *addrs,
+ int addrs_size, int op)
+{
+ int err;
+ int addrcnt = 0;
+ int walk_size = 0;
+ struct sockaddr *sa_addr;
+ void *addr_buf = addrs;
+ struct sctp_af *af;
+
+ pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
+ __func__, sk, addr_buf, addrs_size, op);
+
+ if (unlikely(addrs_size <= 0))
+ return -EINVAL;
+
+ /* Walk through the addrs buffer and count the number of addresses. */
+ while (walk_size < addrs_size) {
+ if (walk_size + sizeof(sa_family_t) > addrs_size)
+ return -EINVAL;
+
+ sa_addr = addr_buf;
+ af = sctp_get_af_specific(sa_addr->sa_family);
+
+ /* If the address family is not supported or if this address
+ * causes the address buffer to overflow return EINVAL.
+ */
+ if (!af || (walk_size + af->sockaddr_len) > addrs_size)
+ return -EINVAL;
+ addrcnt++;
+ addr_buf += af->sockaddr_len;
+ walk_size += af->sockaddr_len;
+ }
+
+ /* Do the work. */
+ switch (op) {
+ case SCTP_BINDX_ADD_ADDR:
+ /* Allow security module to validate bindx addresses. */
+ err = security_sctp_bind_connect(sk, SCTP_SOCKOPT_BINDX_ADD,
+ addrs, addrs_size);
+ if (err)
+ return err;
+ err = sctp_bindx_add(sk, addrs, addrcnt);
+ if (err)
+ return err;
+ return sctp_send_asconf_add_ip(sk, addrs, addrcnt);
+ case SCTP_BINDX_REM_ADDR:
+ err = sctp_bindx_rem(sk, addrs, addrcnt);
+ if (err)
+ return err;
+ return sctp_send_asconf_del_ip(sk, addrs, addrcnt);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sctp_bind_add(struct sock *sk, struct sockaddr *addrs,
+ int addrlen)
+{
+ int err;
+
+ lock_sock(sk);
+ err = sctp_setsockopt_bindx(sk, addrs, addrlen, SCTP_BINDX_ADD_ADDR);
+ release_sock(sk);
+ return err;
+}
+
+static int sctp_connect_new_asoc(struct sctp_endpoint *ep,
+ const union sctp_addr *daddr,
+ const struct sctp_initmsg *init,
+ struct sctp_transport **tp)
+{
+ struct sctp_association *asoc;
+ struct sock *sk = ep->base.sk;
+ struct net *net = sock_net(sk);
+ enum sctp_scope scope;
+ int err;
+
+ if (sctp_endpoint_is_peeled_off(ep, daddr))
+ return -EADDRNOTAVAIL;
+
+ if (!ep->base.bind_addr.port) {
+ if (sctp_autobind(sk))
+ return -EAGAIN;
+ } else {
+ if (inet_port_requires_bind_service(net, ep->base.bind_addr.port) &&
+ !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
+ return -EACCES;
+ }
+
+ scope = sctp_scope(daddr);
+ asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
+ if (!asoc)
+ return -ENOMEM;
+
+ err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
+ if (err < 0)
+ goto free;
+
+ *tp = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, SCTP_UNKNOWN);
+ if (!*tp) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ if (!init)
+ return 0;
+
+ if (init->sinit_num_ostreams) {
+ __u16 outcnt = init->sinit_num_ostreams;
+
+ asoc->c.sinit_num_ostreams = outcnt;
+ /* outcnt has been changed, need to re-init stream */
+ err = sctp_stream_init(&asoc->stream, outcnt, 0, GFP_KERNEL);
+ if (err)
+ goto free;
+ }
+
+ if (init->sinit_max_instreams)
+ asoc->c.sinit_max_instreams = init->sinit_max_instreams;
+
+ if (init->sinit_max_attempts)
+ asoc->max_init_attempts = init->sinit_max_attempts;
+
+ if (init->sinit_max_init_timeo)
+ asoc->max_init_timeo =
+ msecs_to_jiffies(init->sinit_max_init_timeo);
+
+ return 0;
+free:
+ sctp_association_free(asoc);
+ return err;
+}
+
+static int sctp_connect_add_peer(struct sctp_association *asoc,
+ union sctp_addr *daddr, int addr_len)
+{
+ struct sctp_endpoint *ep = asoc->ep;
+ struct sctp_association *old;
+ struct sctp_transport *t;
+ int err;
+
+ err = sctp_verify_addr(ep->base.sk, daddr, addr_len);
+ if (err)
+ return err;
+
+ old = sctp_endpoint_lookup_assoc(ep, daddr, &t);
+ if (old && old != asoc)
+ return old->state >= SCTP_STATE_ESTABLISHED ? -EISCONN
+ : -EALREADY;
+
+ if (sctp_endpoint_is_peeled_off(ep, daddr))
+ return -EADDRNOTAVAIL;
+
+ t = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, SCTP_UNKNOWN);
+ if (!t)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
+ *
+ * Common routine for handling connect() and sctp_connectx().
+ * Connect will come in with just a single address.
+ */
+static int __sctp_connect(struct sock *sk, struct sockaddr *kaddrs,
+ int addrs_size, int flags, sctp_assoc_t *assoc_id)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_endpoint *ep = sp->ep;
+ struct sctp_transport *transport;
+ struct sctp_association *asoc;
+ void *addr_buf = kaddrs;
+ union sctp_addr *daddr;
+ struct sctp_af *af;
+ int walk_size, err;
+ long timeo;
+
+ if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) ||
+ (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)))
+ return -EISCONN;
+
+ daddr = addr_buf;
+ af = sctp_get_af_specific(daddr->sa.sa_family);
+ if (!af || af->sockaddr_len > addrs_size)
+ return -EINVAL;
+
+ err = sctp_verify_addr(sk, daddr, af->sockaddr_len);
+ if (err)
+ return err;
+
+ asoc = sctp_endpoint_lookup_assoc(ep, daddr, &transport);
+ if (asoc)
+ return asoc->state >= SCTP_STATE_ESTABLISHED ? -EISCONN
+ : -EALREADY;
+
+ err = sctp_connect_new_asoc(ep, daddr, NULL, &transport);
+ if (err)
+ return err;
+ asoc = transport->asoc;
+
+ addr_buf += af->sockaddr_len;
+ walk_size = af->sockaddr_len;
+ while (walk_size < addrs_size) {
+ err = -EINVAL;
+ if (walk_size + sizeof(sa_family_t) > addrs_size)
+ goto out_free;
+
+ daddr = addr_buf;
+ af = sctp_get_af_specific(daddr->sa.sa_family);
+ if (!af || af->sockaddr_len + walk_size > addrs_size)
+ goto out_free;
+
+ if (asoc->peer.port != ntohs(daddr->v4.sin_port))
+ goto out_free;
+
+ err = sctp_connect_add_peer(asoc, daddr, af->sockaddr_len);
+ if (err)
+ goto out_free;
+
+ addr_buf += af->sockaddr_len;
+ walk_size += af->sockaddr_len;
+ }
+
+ /* In case the user of sctp_connectx() wants an association
+ * id back, assign one now.
+ */
+ if (assoc_id) {
+ err = sctp_assoc_set_id(asoc, GFP_KERNEL);
+ if (err < 0)
+ goto out_free;
+ }
+
+ err = sctp_primitive_ASSOCIATE(sock_net(sk), asoc, NULL);
+ if (err < 0)
+ goto out_free;
+
+ /* Initialize sk's dport and daddr for getpeername() */
+ inet_sk(sk)->inet_dport = htons(asoc->peer.port);
+ sp->pf->to_sk_daddr(daddr, sk);
+ sk->sk_err = 0;
+
+ if (assoc_id)
+ *assoc_id = asoc->assoc_id;
+
+ timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
+ return sctp_wait_for_connect(asoc, &timeo);
+
+out_free:
+ pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
+ __func__, asoc, kaddrs, err);
+ sctp_association_free(asoc);
+ return err;
+}
+
+/* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
+ *
+ * API 8.9
+ * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
+ * sctp_assoc_t *asoc);
+ *
+ * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
+ * If the sd is an IPv6 socket, the addresses passed can either be IPv4
+ * or IPv6 addresses.
+ *
+ * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
+ * Section 3.1.2 for this usage.
+ *
+ * addrs is a pointer to an array of one or more socket addresses. Each
+ * address is contained in its appropriate structure (i.e. struct
+ * sockaddr_in or struct sockaddr_in6) the family of the address type
+ * must be used to distengish the address length (note that this
+ * representation is termed a "packed array" of addresses). The caller
+ * specifies the number of addresses in the array with addrcnt.
+ *
+ * On success, sctp_connectx() returns 0. It also sets the assoc_id to
+ * the association id of the new association. On failure, sctp_connectx()
+ * returns -1, and sets errno to the appropriate error code. The assoc_id
+ * is not touched by the kernel.
+ *
+ * For SCTP, the port given in each socket address must be the same, or
+ * sctp_connectx() will fail, setting errno to EINVAL.
+ *
+ * An application can use sctp_connectx to initiate an association with
+ * an endpoint that is multi-homed. Much like sctp_bindx() this call
+ * allows a caller to specify multiple addresses at which a peer can be
+ * reached. The way the SCTP stack uses the list of addresses to set up
+ * the association is implementation dependent. This function only
+ * specifies that the stack will try to make use of all the addresses in
+ * the list when needed.
+ *
+ * Note that the list of addresses passed in is only used for setting up
+ * the association. It does not necessarily equal the set of addresses
+ * the peer uses for the resulting association. If the caller wants to
+ * find out the set of peer addresses, it must use sctp_getpaddrs() to
+ * retrieve them after the association has been set up.
+ *
+ * Basically do nothing but copying the addresses from user to kernel
+ * land and invoking either sctp_connectx(). This is used for tunneling
+ * the sctp_connectx() request through sctp_setsockopt() from userspace.
+ *
+ * On exit there is no need to do sockfd_put(), sys_setsockopt() does
+ * it.
+ *
+ * sk The sk of the socket
+ * addrs The pointer to the addresses
+ * addrssize Size of the addrs buffer
+ *
+ * Returns >=0 if ok, <0 errno code on error.
+ */
+static int __sctp_setsockopt_connectx(struct sock *sk, struct sockaddr *kaddrs,
+ int addrs_size, sctp_assoc_t *assoc_id)
+{
+ int err = 0, flags = 0;
+
+ pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
+ __func__, sk, kaddrs, addrs_size);
+
+ /* make sure the 1st addr's sa_family is accessible later */
+ if (unlikely(addrs_size < sizeof(sa_family_t)))
+ return -EINVAL;
+
+ /* Allow security module to validate connectx addresses. */
+ err = security_sctp_bind_connect(sk, SCTP_SOCKOPT_CONNECTX,
+ (struct sockaddr *)kaddrs,
+ addrs_size);
+ if (err)
+ return err;
+
+ /* in-kernel sockets don't generally have a file allocated to them
+ * if all they do is call sock_create_kern().
+ */
+ if (sk->sk_socket->file)
+ flags = sk->sk_socket->file->f_flags;
+
+ return __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
+}
+
+/*
+ * This is an older interface. It's kept for backward compatibility
+ * to the option that doesn't provide association id.
+ */
+static int sctp_setsockopt_connectx_old(struct sock *sk,
+ struct sockaddr *kaddrs,
+ int addrs_size)
+{
+ return __sctp_setsockopt_connectx(sk, kaddrs, addrs_size, NULL);
+}
+
+/*
+ * New interface for the API. The since the API is done with a socket
+ * option, to make it simple we feed back the association id is as a return
+ * indication to the call. Error is always negative and association id is
+ * always positive.
+ */
+static int sctp_setsockopt_connectx(struct sock *sk,
+ struct sockaddr *kaddrs,
+ int addrs_size)
+{
+ sctp_assoc_t assoc_id = 0;
+ int err = 0;
+
+ err = __sctp_setsockopt_connectx(sk, kaddrs, addrs_size, &assoc_id);
+
+ if (err)
+ return err;
+ else
+ return assoc_id;
+}
+
+/*
+ * New (hopefully final) interface for the API.
+ * We use the sctp_getaddrs_old structure so that use-space library
+ * can avoid any unnecessary allocations. The only different part
+ * is that we store the actual length of the address buffer into the
+ * addrs_num structure member. That way we can re-use the existing
+ * code.
+ */
+#ifdef CONFIG_COMPAT
+struct compat_sctp_getaddrs_old {
+ sctp_assoc_t assoc_id;
+ s32 addr_num;
+ compat_uptr_t addrs; /* struct sockaddr * */
+};
+#endif
+
+static int sctp_getsockopt_connectx3(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_getaddrs_old param;
+ sctp_assoc_t assoc_id = 0;
+ struct sockaddr *kaddrs;
+ int err = 0;
+
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall()) {
+ struct compat_sctp_getaddrs_old param32;
+
+ if (len < sizeof(param32))
+ return -EINVAL;
+ if (copy_from_user(&param32, optval, sizeof(param32)))
+ return -EFAULT;
+
+ param.assoc_id = param32.assoc_id;
+ param.addr_num = param32.addr_num;
+ param.addrs = compat_ptr(param32.addrs);
+ } else
+#endif
+ {
+ if (len < sizeof(param))
+ return -EINVAL;
+ if (copy_from_user(&param, optval, sizeof(param)))
+ return -EFAULT;
+ }
+
+ kaddrs = memdup_user(param.addrs, param.addr_num);
+ if (IS_ERR(kaddrs))
+ return PTR_ERR(kaddrs);
+
+ err = __sctp_setsockopt_connectx(sk, kaddrs, param.addr_num, &assoc_id);
+ kfree(kaddrs);
+ if (err == 0 || err == -EINPROGRESS) {
+ if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
+ return -EFAULT;
+ if (put_user(sizeof(assoc_id), optlen))
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+/* API 3.1.4 close() - UDP Style Syntax
+ * Applications use close() to perform graceful shutdown (as described in
+ * Section 10.1 of [SCTP]) on ALL the associations currently represented
+ * by a UDP-style socket.
+ *
+ * The syntax is
+ *
+ * ret = close(int sd);
+ *
+ * sd - the socket descriptor of the associations to be closed.
+ *
+ * To gracefully shutdown a specific association represented by the
+ * UDP-style socket, an application should use the sendmsg() call,
+ * passing no user data, but including the appropriate flag in the
+ * ancillary data (see Section xxxx).
+ *
+ * If sd in the close() call is a branched-off socket representing only
+ * one association, the shutdown is performed on that association only.
+ *
+ * 4.1.6 close() - TCP Style Syntax
+ *
+ * Applications use close() to gracefully close down an association.
+ *
+ * The syntax is:
+ *
+ * int close(int sd);
+ *
+ * sd - the socket descriptor of the association to be closed.
+ *
+ * After an application calls close() on a socket descriptor, no further
+ * socket operations will succeed on that descriptor.
+ *
+ * API 7.1.4 SO_LINGER
+ *
+ * An application using the TCP-style socket can use this option to
+ * perform the SCTP ABORT primitive. The linger option structure is:
+ *
+ * struct linger {
+ * int l_onoff; // option on/off
+ * int l_linger; // linger time
+ * };
+ *
+ * To enable the option, set l_onoff to 1. If the l_linger value is set
+ * to 0, calling close() is the same as the ABORT primitive. If the
+ * value is set to a negative value, the setsockopt() call will return
+ * an error. If the value is set to a positive value linger_time, the
+ * close() can be blocked for at most linger_time ms. If the graceful
+ * shutdown phase does not finish during this period, close() will
+ * return but the graceful shutdown phase continues in the system.
+ */
+static void sctp_close(struct sock *sk, long timeout)
+{
+ struct net *net = sock_net(sk);
+ struct sctp_endpoint *ep;
+ struct sctp_association *asoc;
+ struct list_head *pos, *temp;
+ unsigned int data_was_unread;
+
+ pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
+
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ inet_sk_set_state(sk, SCTP_SS_CLOSING);
+
+ ep = sctp_sk(sk)->ep;
+
+ /* Clean up any skbs sitting on the receive queue. */
+ data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
+ data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
+
+ /* Walk all associations on an endpoint. */
+ list_for_each_safe(pos, temp, &ep->asocs) {
+ asoc = list_entry(pos, struct sctp_association, asocs);
+
+ if (sctp_style(sk, TCP)) {
+ /* A closed association can still be in the list if
+ * it belongs to a TCP-style listening socket that is
+ * not yet accepted. If so, free it. If not, send an
+ * ABORT or SHUTDOWN based on the linger options.
+ */
+ if (sctp_state(asoc, CLOSED)) {
+ sctp_association_free(asoc);
+ continue;
+ }
+ }
+
+ if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
+ !skb_queue_empty(&asoc->ulpq.reasm) ||
+ !skb_queue_empty(&asoc->ulpq.reasm_uo) ||
+ (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
+ struct sctp_chunk *chunk;
+
+ chunk = sctp_make_abort_user(asoc, NULL, 0);
+ sctp_primitive_ABORT(net, asoc, chunk);
+ } else
+ sctp_primitive_SHUTDOWN(net, asoc, NULL);
+ }
+
+ /* On a TCP-style socket, block for at most linger_time if set. */
+ if (sctp_style(sk, TCP) && timeout)
+ sctp_wait_for_close(sk, timeout);
+
+ /* This will run the backlog queue. */
+ release_sock(sk);
+
+ /* Supposedly, no process has access to the socket, but
+ * the net layers still may.
+ * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
+ * held and that should be grabbed before socket lock.
+ */
+ spin_lock_bh(&net->sctp.addr_wq_lock);
+ bh_lock_sock_nested(sk);
+
+ /* Hold the sock, since sk_common_release() will put sock_put()
+ * and we have just a little more cleanup.
+ */
+ sock_hold(sk);
+ sk_common_release(sk);
+
+ bh_unlock_sock(sk);
+ spin_unlock_bh(&net->sctp.addr_wq_lock);
+
+ sock_put(sk);
+
+ SCTP_DBG_OBJCNT_DEC(sock);
+}
+
+/* Handle EPIPE error. */
+static int sctp_error(struct sock *sk, int flags, int err)
+{
+ if (err == -EPIPE)
+ err = sock_error(sk) ? : -EPIPE;
+ if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
+ send_sig(SIGPIPE, current, 0);
+ return err;
+}
+
+/* API 3.1.3 sendmsg() - UDP Style Syntax
+ *
+ * An application uses sendmsg() and recvmsg() calls to transmit data to
+ * and receive data from its peer.
+ *
+ * ssize_t sendmsg(int socket, const struct msghdr *message,
+ * int flags);
+ *
+ * socket - the socket descriptor of the endpoint.
+ * message - pointer to the msghdr structure which contains a single
+ * user message and possibly some ancillary data.
+ *
+ * See Section 5 for complete description of the data
+ * structures.
+ *
+ * flags - flags sent or received with the user message, see Section
+ * 5 for complete description of the flags.
+ *
+ * Note: This function could use a rewrite especially when explicit
+ * connect support comes in.
+ */
+/* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
+
+static int sctp_msghdr_parse(const struct msghdr *msg,
+ struct sctp_cmsgs *cmsgs);
+
+static int sctp_sendmsg_parse(struct sock *sk, struct sctp_cmsgs *cmsgs,
+ struct sctp_sndrcvinfo *srinfo,
+ const struct msghdr *msg, size_t msg_len)
+{
+ __u16 sflags;
+ int err;
+
+ if (sctp_sstate(sk, LISTENING) && sctp_style(sk, TCP))
+ return -EPIPE;
+
+ if (msg_len > sk->sk_sndbuf)
+ return -EMSGSIZE;
+
+ memset(cmsgs, 0, sizeof(*cmsgs));
+ err = sctp_msghdr_parse(msg, cmsgs);
+ if (err) {
+ pr_debug("%s: msghdr parse err:%x\n", __func__, err);
+ return err;
+ }
+
+ memset(srinfo, 0, sizeof(*srinfo));
+ if (cmsgs->srinfo) {
+ srinfo->sinfo_stream = cmsgs->srinfo->sinfo_stream;
+ srinfo->sinfo_flags = cmsgs->srinfo->sinfo_flags;
+ srinfo->sinfo_ppid = cmsgs->srinfo->sinfo_ppid;
+ srinfo->sinfo_context = cmsgs->srinfo->sinfo_context;
+ srinfo->sinfo_assoc_id = cmsgs->srinfo->sinfo_assoc_id;
+ srinfo->sinfo_timetolive = cmsgs->srinfo->sinfo_timetolive;
+ }
+
+ if (cmsgs->sinfo) {
+ srinfo->sinfo_stream = cmsgs->sinfo->snd_sid;
+ srinfo->sinfo_flags = cmsgs->sinfo->snd_flags;
+ srinfo->sinfo_ppid = cmsgs->sinfo->snd_ppid;
+ srinfo->sinfo_context = cmsgs->sinfo->snd_context;
+ srinfo->sinfo_assoc_id = cmsgs->sinfo->snd_assoc_id;
+ }
+
+ if (cmsgs->prinfo) {
+ srinfo->sinfo_timetolive = cmsgs->prinfo->pr_value;
+ SCTP_PR_SET_POLICY(srinfo->sinfo_flags,
+ cmsgs->prinfo->pr_policy);
+ }
+
+ sflags = srinfo->sinfo_flags;
+ if (!sflags && msg_len)
+ return 0;
+
+ if (sctp_style(sk, TCP) && (sflags & (SCTP_EOF | SCTP_ABORT)))
+ return -EINVAL;
+
+ if (((sflags & SCTP_EOF) && msg_len > 0) ||
+ (!(sflags & (SCTP_EOF | SCTP_ABORT)) && msg_len == 0))
+ return -EINVAL;
+
+ if ((sflags & SCTP_ADDR_OVER) && !msg->msg_name)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags,
+ struct sctp_cmsgs *cmsgs,
+ union sctp_addr *daddr,
+ struct sctp_transport **tp)
+{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_association *asoc;
+ struct cmsghdr *cmsg;
+ __be32 flowinfo = 0;
+ struct sctp_af *af;
+ int err;
+
+ *tp = NULL;
+
+ if (sflags & (SCTP_EOF | SCTP_ABORT))
+ return -EINVAL;
+
+ if (sctp_style(sk, TCP) && (sctp_sstate(sk, ESTABLISHED) ||
+ sctp_sstate(sk, CLOSING)))
+ return -EADDRNOTAVAIL;
+
+ /* Label connection socket for first association 1-to-many
+ * style for client sequence socket()->sendmsg(). This
+ * needs to be done before sctp_assoc_add_peer() as that will
+ * set up the initial packet that needs to account for any
+ * security ip options (CIPSO/CALIPSO) added to the packet.
+ */
+ af = sctp_get_af_specific(daddr->sa.sa_family);
+ if (!af)
+ return -EINVAL;
+ err = security_sctp_bind_connect(sk, SCTP_SENDMSG_CONNECT,
+ (struct sockaddr *)daddr,
+ af->sockaddr_len);
+ if (err < 0)
+ return err;
+
+ err = sctp_connect_new_asoc(ep, daddr, cmsgs->init, tp);
+ if (err)
+ return err;
+ asoc = (*tp)->asoc;
+
+ if (!cmsgs->addrs_msg)
+ return 0;
+
+ if (daddr->sa.sa_family == AF_INET6)
+ flowinfo = daddr->v6.sin6_flowinfo;
+
+ /* sendv addr list parse */
+ for_each_cmsghdr(cmsg, cmsgs->addrs_msg) {
+ union sctp_addr _daddr;
+ int dlen;
+
+ if (cmsg->cmsg_level != IPPROTO_SCTP ||
+ (cmsg->cmsg_type != SCTP_DSTADDRV4 &&
+ cmsg->cmsg_type != SCTP_DSTADDRV6))
+ continue;
+
+ daddr = &_daddr;
+ memset(daddr, 0, sizeof(*daddr));
+ dlen = cmsg->cmsg_len - sizeof(struct cmsghdr);
+ if (cmsg->cmsg_type == SCTP_DSTADDRV4) {
+ if (dlen < sizeof(struct in_addr)) {
+ err = -EINVAL;
+ goto free;
+ }
+
+ dlen = sizeof(struct in_addr);
+ daddr->v4.sin_family = AF_INET;
+ daddr->v4.sin_port = htons(asoc->peer.port);
+ memcpy(&daddr->v4.sin_addr, CMSG_DATA(cmsg), dlen);
+ } else {
+ if (dlen < sizeof(struct in6_addr)) {
+ err = -EINVAL;
+ goto free;
+ }
+
+ dlen = sizeof(struct in6_addr);
+ daddr->v6.sin6_flowinfo = flowinfo;
+ daddr->v6.sin6_family = AF_INET6;
+ daddr->v6.sin6_port = htons(asoc->peer.port);
+ memcpy(&daddr->v6.sin6_addr, CMSG_DATA(cmsg), dlen);
+ }
+
+ err = sctp_connect_add_peer(asoc, daddr, sizeof(*daddr));
+ if (err)
+ goto free;
+ }
+
+ return 0;
+
+free:
+ sctp_association_free(asoc);
+ return err;
+}
+
+static int sctp_sendmsg_check_sflags(struct sctp_association *asoc,
+ __u16 sflags, struct msghdr *msg,
+ size_t msg_len)
+{
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
+
+ if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP))
+ return -EPIPE;
+
+ if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP) &&
+ !sctp_state(asoc, ESTABLISHED))
+ return 0;
+
+ if (sflags & SCTP_EOF) {
+ pr_debug("%s: shutting down association:%p\n", __func__, asoc);
+ sctp_primitive_SHUTDOWN(net, asoc, NULL);
+
+ return 0;
+ }
+
+ if (sflags & SCTP_ABORT) {
+ struct sctp_chunk *chunk;
+
+ chunk = sctp_make_abort_user(asoc, msg, msg_len);
+ if (!chunk)
+ return -ENOMEM;
+
+ pr_debug("%s: aborting association:%p\n", __func__, asoc);
+ sctp_primitive_ABORT(net, asoc, chunk);
+ iov_iter_revert(&msg->msg_iter, msg_len);
+
+ return 0;
+ }
+
+ return 1;
+}
+
+static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
+ struct msghdr *msg, size_t msg_len,
+ struct sctp_transport *transport,
+ struct sctp_sndrcvinfo *sinfo)
+{
+ struct sock *sk = asoc->base.sk;
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct net *net = sock_net(sk);
+ struct sctp_datamsg *datamsg;
+ bool wait_connect = false;
+ struct sctp_chunk *chunk;
+ long timeo;
+ int err;
+
+ if (sinfo->sinfo_stream >= asoc->stream.outcnt) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ if (unlikely(!SCTP_SO(&asoc->stream, sinfo->sinfo_stream)->ext)) {
+ err = sctp_stream_init_ext(&asoc->stream, sinfo->sinfo_stream);
+ if (err)
+ goto err;
+ }
+
+ if (sp->disable_fragments && msg_len > asoc->frag_point) {
+ err = -EMSGSIZE;
+ goto err;
+ }
+
+ if (asoc->pmtu_pending) {
+ if (sp->param_flags & SPP_PMTUD_ENABLE)
+ sctp_assoc_sync_pmtu(asoc);
+ asoc->pmtu_pending = 0;
+ }
+
+ if (sctp_wspace(asoc) < (int)msg_len)
+ sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
+
+ if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
+ timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
+ if (err)
+ goto err;
+ if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) {
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (sctp_state(asoc, CLOSED)) {
+ err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
+ if (err)
+ goto err;
+
+ if (asoc->ep->intl_enable) {
+ timeo = sock_sndtimeo(sk, 0);
+ err = sctp_wait_for_connect(asoc, &timeo);
+ if (err) {
+ err = -ESRCH;
+ goto err;
+ }
+ } else {
+ wait_connect = true;
+ }
+
+ pr_debug("%s: we associated primitively\n", __func__);
+ }
+
+ datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter);
+ if (IS_ERR(datamsg)) {
+ err = PTR_ERR(datamsg);
+ goto err;
+ }
+
+ asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
+
+ list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
+ sctp_chunk_hold(chunk);
+ sctp_set_owner_w(chunk);
+ chunk->transport = transport;
+ }
+
+ err = sctp_primitive_SEND(net, asoc, datamsg);
+ if (err) {
+ sctp_datamsg_free(datamsg);
+ goto err;
+ }
+
+ pr_debug("%s: we sent primitively\n", __func__);
+
+ sctp_datamsg_put(datamsg);
+
+ if (unlikely(wait_connect)) {
+ timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ sctp_wait_for_connect(asoc, &timeo);
+ }
+
+ err = msg_len;
+
+err:
+ return err;
+}
+
+static union sctp_addr *sctp_sendmsg_get_daddr(struct sock *sk,
+ const struct msghdr *msg,
+ struct sctp_cmsgs *cmsgs)
+{
+ union sctp_addr *daddr = NULL;
+ int err;
+
+ if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) {
+ int len = msg->msg_namelen;
+
+ if (len > sizeof(*daddr))
+ len = sizeof(*daddr);
+
+ daddr = (union sctp_addr *)msg->msg_name;
+
+ err = sctp_verify_addr(sk, daddr, len);
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ return daddr;
+}
+
+static void sctp_sendmsg_update_sinfo(struct sctp_association *asoc,
+ struct sctp_sndrcvinfo *sinfo,
+ struct sctp_cmsgs *cmsgs)
+{
+ if (!cmsgs->srinfo && !cmsgs->sinfo) {
+ sinfo->sinfo_stream = asoc->default_stream;
+ sinfo->sinfo_ppid = asoc->default_ppid;
+ sinfo->sinfo_context = asoc->default_context;
+ sinfo->sinfo_assoc_id = sctp_assoc2id(asoc);
+
+ if (!cmsgs->prinfo)
+ sinfo->sinfo_flags = asoc->default_flags;
+ }
+
+ if (!cmsgs->srinfo && !cmsgs->prinfo)
+ sinfo->sinfo_timetolive = asoc->default_timetolive;
+
+ if (cmsgs->authinfo) {
+ /* Reuse sinfo_tsn to indicate that authinfo was set and
+ * sinfo_ssn to save the keyid on tx path.
+ */
+ sinfo->sinfo_tsn = 1;
+ sinfo->sinfo_ssn = cmsgs->authinfo->auth_keynumber;
+ }
+}
+
+static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
+{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_transport *transport = NULL;
+ struct sctp_sndrcvinfo _sinfo, *sinfo;
+ struct sctp_association *asoc, *tmp;
+ struct sctp_cmsgs cmsgs;
+ union sctp_addr *daddr;
+ bool new = false;
+ __u16 sflags;
+ int err;
+
+ /* Parse and get snd_info */
+ err = sctp_sendmsg_parse(sk, &cmsgs, &_sinfo, msg, msg_len);
+ if (err)
+ goto out;
+
+ sinfo = &_sinfo;
+ sflags = sinfo->sinfo_flags;
+
+ /* Get daddr from msg */
+ daddr = sctp_sendmsg_get_daddr(sk, msg, &cmsgs);
+ if (IS_ERR(daddr)) {
+ err = PTR_ERR(daddr);
+ goto out;
+ }
+
+ lock_sock(sk);
+
+ /* SCTP_SENDALL process */
+ if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
+ list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) {
+ err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
+ msg_len);
+ if (err == 0)
+ continue;
+ if (err < 0)
+ goto out_unlock;
+
+ sctp_sendmsg_update_sinfo(asoc, sinfo, &cmsgs);
+
+ err = sctp_sendmsg_to_asoc(asoc, msg, msg_len,
+ NULL, sinfo);
+ if (err < 0)
+ goto out_unlock;
+
+ iov_iter_revert(&msg->msg_iter, err);
+ }
+
+ goto out_unlock;
+ }
+
+ /* Get and check or create asoc */
+ if (daddr) {
+ asoc = sctp_endpoint_lookup_assoc(ep, daddr, &transport);
+ if (asoc) {
+ err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
+ msg_len);
+ if (err <= 0)
+ goto out_unlock;
+ } else {
+ err = sctp_sendmsg_new_asoc(sk, sflags, &cmsgs, daddr,
+ &transport);
+ if (err)
+ goto out_unlock;
+
+ asoc = transport->asoc;
+ new = true;
+ }
+
+ if (!sctp_style(sk, TCP) && !(sflags & SCTP_ADDR_OVER))
+ transport = NULL;
+ } else {
+ asoc = sctp_id2assoc(sk, sinfo->sinfo_assoc_id);
+ if (!asoc) {
+ err = -EPIPE;
+ goto out_unlock;
+ }
+
+ err = sctp_sendmsg_check_sflags(asoc, sflags, msg, msg_len);
+ if (err <= 0)
+ goto out_unlock;
+ }
+
+ /* Update snd_info with the asoc */
+ sctp_sendmsg_update_sinfo(asoc, sinfo, &cmsgs);
+
+ /* Send msg to the asoc */
+ err = sctp_sendmsg_to_asoc(asoc, msg, msg_len, transport, sinfo);
+ if (err < 0 && err != -ESRCH && new)
+ sctp_association_free(asoc);
+
+out_unlock:
+ release_sock(sk);
+out:
+ return sctp_error(sk, msg->msg_flags, err);
+}
+
+/* This is an extended version of skb_pull() that removes the data from the
+ * start of a skb even when data is spread across the list of skb's in the
+ * frag_list. len specifies the total amount of data that needs to be removed.
+ * when 'len' bytes could be removed from the skb, it returns 0.
+ * If 'len' exceeds the total skb length, it returns the no. of bytes that
+ * could not be removed.
+ */
+static int sctp_skb_pull(struct sk_buff *skb, int len)
+{
+ struct sk_buff *list;
+ int skb_len = skb_headlen(skb);
+ int rlen;
+
+ if (len <= skb_len) {
+ __skb_pull(skb, len);
+ return 0;
+ }
+ len -= skb_len;
+ __skb_pull(skb, skb_len);
+
+ skb_walk_frags(skb, list) {
+ rlen = sctp_skb_pull(list, len);
+ skb->len -= (len-rlen);
+ skb->data_len -= (len-rlen);
+
+ if (!rlen)
+ return 0;
+
+ len = rlen;
+ }
+
+ return len;
+}
+
+/* API 3.1.3 recvmsg() - UDP Style Syntax
+ *
+ * ssize_t recvmsg(int socket, struct msghdr *message,
+ * int flags);
+ *
+ * socket - the socket descriptor of the endpoint.
+ * message - pointer to the msghdr structure which contains a single
+ * user message and possibly some ancillary data.
+ *
+ * See Section 5 for complete description of the data
+ * structures.
+ *
+ * flags - flags sent or received with the user message, see Section
+ * 5 for complete description of the flags.
+ */
+static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int flags, int *addr_len)
+{
+ struct sctp_ulpevent *event = NULL;
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sk_buff *skb, *head_skb;
+ int copied;
+ int err = 0;
+ int skb_len;
+
+ pr_debug("%s: sk:%p, msghdr:%p, len:%zd, flags:0x%x, addr_len:%p)\n",
+ __func__, sk, msg, len, flags, addr_len);
+
+ if (unlikely(flags & MSG_ERRQUEUE))
+ return inet_recv_error(sk, msg, len, addr_len);
+
+ if (sk_can_busy_loop(sk) &&
+ skb_queue_empty_lockless(&sk->sk_receive_queue))
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
+
+ lock_sock(sk);
+
+ if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) &&
+ !sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) {
+ err = -ENOTCONN;
+ goto out;
+ }
+
+ skb = sctp_skb_recv_datagram(sk, flags, &err);
+ if (!skb)
+ goto out;
+
+ /* Get the total length of the skb including any skb's in the
+ * frag_list.
+ */
+ skb_len = skb->len;
+
+ copied = skb_len;
+ if (copied > len)
+ copied = len;
+
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
+
+ event = sctp_skb2event(skb);
+
+ if (err)
+ goto out_free;
+
+ if (event->chunk && event->chunk->head_skb)
+ head_skb = event->chunk->head_skb;
+ else
+ head_skb = skb;
+ sock_recv_cmsgs(msg, sk, head_skb);
+ if (sctp_ulpevent_is_notification(event)) {
+ msg->msg_flags |= MSG_NOTIFICATION;
+ sp->pf->event_msgname(event, msg->msg_name, addr_len);
+ } else {
+ sp->pf->skb_msgname(head_skb, msg->msg_name, addr_len);
+ }
+
+ /* Check if we allow SCTP_NXTINFO. */
+ if (sp->recvnxtinfo)
+ sctp_ulpevent_read_nxtinfo(event, msg, sk);
+ /* Check if we allow SCTP_RCVINFO. */
+ if (sp->recvrcvinfo)
+ sctp_ulpevent_read_rcvinfo(event, msg);
+ /* Check if we allow SCTP_SNDRCVINFO. */
+ if (sctp_ulpevent_type_enabled(sp->subscribe, SCTP_DATA_IO_EVENT))
+ sctp_ulpevent_read_sndrcvinfo(event, msg);
+
+ err = copied;
+
+ /* If skb's length exceeds the user's buffer, update the skb and
+ * push it back to the receive_queue so that the next call to
+ * recvmsg() will return the remaining data. Don't set MSG_EOR.
+ */
+ if (skb_len > copied) {
+ msg->msg_flags &= ~MSG_EOR;
+ if (flags & MSG_PEEK)
+ goto out_free;
+ sctp_skb_pull(skb, copied);
+ skb_queue_head(&sk->sk_receive_queue, skb);
+
+ /* When only partial message is copied to the user, increase
+ * rwnd by that amount. If all the data in the skb is read,
+ * rwnd is updated when the event is freed.
+ */
+ if (!sctp_ulpevent_is_notification(event))
+ sctp_assoc_rwnd_increase(event->asoc, copied);
+ goto out;
+ } else if ((event->msg_flags & MSG_NOTIFICATION) ||
+ (event->msg_flags & MSG_EOR))
+ msg->msg_flags |= MSG_EOR;
+ else
+ msg->msg_flags &= ~MSG_EOR;
+
+out_free:
+ if (flags & MSG_PEEK) {
+ /* Release the skb reference acquired after peeking the skb in
+ * sctp_skb_recv_datagram().
+ */
+ kfree_skb(skb);
+ } else {
+ /* Free the event which includes releasing the reference to
+ * the owner of the skb, freeing the skb and updating the
+ * rwnd.
+ */
+ sctp_ulpevent_free(event);
+ }
+out:
+ release_sock(sk);
+ return err;
+}
+
+/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
+ *
+ * This option is a on/off flag. If enabled no SCTP message
+ * fragmentation will be performed. Instead if a message being sent
+ * exceeds the current PMTU size, the message will NOT be sent and
+ * instead a error will be indicated to the user.
+ */
+static int sctp_setsockopt_disable_fragments(struct sock *sk, int *val,
+ unsigned int optlen)
+{
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ sctp_sk(sk)->disable_fragments = (*val == 0) ? 0 : 1;
+ return 0;
+}
+
+static int sctp_setsockopt_events(struct sock *sk, __u8 *sn_type,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+ int i;
+
+ if (optlen > sizeof(struct sctp_event_subscribe))
+ return -EINVAL;
+
+ for (i = 0; i < optlen; i++)
+ sctp_ulpevent_type_set(&sp->subscribe, SCTP_SN_TYPE_BASE + i,
+ sn_type[i]);
+
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs)
+ asoc->subscribe = sctp_sk(sk)->subscribe;
+
+ /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
+ * if there is no data to be sent or retransmit, the stack will
+ * immediately send up this notification.
+ */
+ if (sctp_ulpevent_type_enabled(sp->subscribe, SCTP_SENDER_DRY_EVENT)) {
+ struct sctp_ulpevent *event;
+
+ asoc = sctp_id2assoc(sk, 0);
+ if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
+ event = sctp_ulpevent_make_sender_dry_event(asoc,
+ GFP_USER | __GFP_NOWARN);
+ if (!event)
+ return -ENOMEM;
+
+ asoc->stream.si->enqueue_event(&asoc->ulpq, event);
+ }
+ }
+
+ return 0;
+}
+
+/* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
+ *
+ * This socket option is applicable to the UDP-style socket only. When
+ * set it will cause associations that are idle for more than the
+ * specified number of seconds to automatically close. An association
+ * being idle is defined an association that has NOT sent or received
+ * user data. The special value of '0' indicates that no automatic
+ * close of any associations should be performed. The option expects an
+ * integer defining the number of seconds of idle time before an
+ * association is closed.
+ */
+static int sctp_setsockopt_autoclose(struct sock *sk, u32 *optval,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct net *net = sock_net(sk);
+
+ /* Applicable to UDP-style socket only */
+ if (sctp_style(sk, TCP))
+ return -EOPNOTSUPP;
+ if (optlen != sizeof(int))
+ return -EINVAL;
+
+ sp->autoclose = *optval;
+ if (sp->autoclose > net->sctp.max_autoclose)
+ sp->autoclose = net->sctp.max_autoclose;
+
+ return 0;
+}
+
+/* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
+ *
+ * Applications can enable or disable heartbeats for any peer address of
+ * an association, modify an address's heartbeat interval, force a
+ * heartbeat to be sent immediately, and adjust the address's maximum
+ * number of retransmissions sent before an address is considered
+ * unreachable. The following structure is used to access and modify an
+ * address's parameters:
+ *
+ * struct sctp_paddrparams {
+ * sctp_assoc_t spp_assoc_id;
+ * struct sockaddr_storage spp_address;
+ * uint32_t spp_hbinterval;
+ * uint16_t spp_pathmaxrxt;
+ * uint32_t spp_pathmtu;
+ * uint32_t spp_sackdelay;
+ * uint32_t spp_flags;
+ * uint32_t spp_ipv6_flowlabel;
+ * uint8_t spp_dscp;
+ * };
+ *
+ * spp_assoc_id - (one-to-many style socket) This is filled in the
+ * application, and identifies the association for
+ * this query.
+ * spp_address - This specifies which address is of interest.
+ * spp_hbinterval - This contains the value of the heartbeat interval,
+ * in milliseconds. If a value of zero
+ * is present in this field then no changes are to
+ * be made to this parameter.
+ * spp_pathmaxrxt - This contains the maximum number of
+ * retransmissions before this address shall be
+ * considered unreachable. If a value of zero
+ * is present in this field then no changes are to
+ * be made to this parameter.
+ * spp_pathmtu - When Path MTU discovery is disabled the value
+ * specified here will be the "fixed" path mtu.
+ * Note that if the spp_address field is empty
+ * then all associations on this address will
+ * have this fixed path mtu set upon them.
+ *
+ * spp_sackdelay - When delayed sack is enabled, this value specifies
+ * the number of milliseconds that sacks will be delayed
+ * for. This value will apply to all addresses of an
+ * association if the spp_address field is empty. Note
+ * also, that if delayed sack is enabled and this
+ * value is set to 0, no change is made to the last
+ * recorded delayed sack timer value.
+ *
+ * spp_flags - These flags are used to control various features
+ * on an association. The flag field may contain
+ * zero or more of the following options.
+ *
+ * SPP_HB_ENABLE - Enable heartbeats on the
+ * specified address. Note that if the address
+ * field is empty all addresses for the association
+ * have heartbeats enabled upon them.
+ *
+ * SPP_HB_DISABLE - Disable heartbeats on the
+ * speicifed address. Note that if the address
+ * field is empty all addresses for the association
+ * will have their heartbeats disabled. Note also
+ * that SPP_HB_ENABLE and SPP_HB_DISABLE are
+ * mutually exclusive, only one of these two should
+ * be specified. Enabling both fields will have
+ * undetermined results.
+ *
+ * SPP_HB_DEMAND - Request a user initiated heartbeat
+ * to be made immediately.
+ *
+ * SPP_HB_TIME_IS_ZERO - Specify's that the time for
+ * heartbeat delayis to be set to the value of 0
+ * milliseconds.
+ *
+ * SPP_PMTUD_ENABLE - This field will enable PMTU
+ * discovery upon the specified address. Note that
+ * if the address feild is empty then all addresses
+ * on the association are effected.
+ *
+ * SPP_PMTUD_DISABLE - This field will disable PMTU
+ * discovery upon the specified address. Note that
+ * if the address feild is empty then all addresses
+ * on the association are effected. Not also that
+ * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
+ * exclusive. Enabling both will have undetermined
+ * results.
+ *
+ * SPP_SACKDELAY_ENABLE - Setting this flag turns
+ * on delayed sack. The time specified in spp_sackdelay
+ * is used to specify the sack delay for this address. Note
+ * that if spp_address is empty then all addresses will
+ * enable delayed sack and take on the sack delay
+ * value specified in spp_sackdelay.
+ * SPP_SACKDELAY_DISABLE - Setting this flag turns
+ * off delayed sack. If the spp_address field is blank then
+ * delayed sack is disabled for the entire association. Note
+ * also that this field is mutually exclusive to
+ * SPP_SACKDELAY_ENABLE, setting both will have undefined
+ * results.
+ *
+ * SPP_IPV6_FLOWLABEL: Setting this flag enables the
+ * setting of the IPV6 flow label value. The value is
+ * contained in the spp_ipv6_flowlabel field.
+ * Upon retrieval, this flag will be set to indicate that
+ * the spp_ipv6_flowlabel field has a valid value returned.
+ * If a specific destination address is set (in the
+ * spp_address field), then the value returned is that of
+ * the address. If just an association is specified (and
+ * no address), then the association's default flow label
+ * is returned. If neither an association nor a destination
+ * is specified, then the socket's default flow label is
+ * returned. For non-IPv6 sockets, this flag will be left
+ * cleared.
+ *
+ * SPP_DSCP: Setting this flag enables the setting of the
+ * Differentiated Services Code Point (DSCP) value
+ * associated with either the association or a specific
+ * address. The value is obtained in the spp_dscp field.
+ * Upon retrieval, this flag will be set to indicate that
+ * the spp_dscp field has a valid value returned. If a
+ * specific destination address is set when called (in the
+ * spp_address field), then that specific destination
+ * address's DSCP value is returned. If just an association
+ * is specified, then the association's default DSCP is
+ * returned. If neither an association nor a destination is
+ * specified, then the socket's default DSCP is returned.
+ *
+ * spp_ipv6_flowlabel
+ * - This field is used in conjunction with the
+ * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
+ * The 20 least significant bits are used for the flow
+ * label. This setting has precedence over any IPv6-layer
+ * setting.
+ *
+ * spp_dscp - This field is used in conjunction with the SPP_DSCP flag
+ * and contains the DSCP. The 6 most significant bits are
+ * used for the DSCP. This setting has precedence over any
+ * IPv4- or IPv6- layer setting.
+ */
+static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
+ struct sctp_transport *trans,
+ struct sctp_association *asoc,
+ struct sctp_sock *sp,
+ int hb_change,
+ int pmtud_change,
+ int sackdelay_change)
+{
+ int error;
+
+ if (params->spp_flags & SPP_HB_DEMAND && trans) {
+ error = sctp_primitive_REQUESTHEARTBEAT(trans->asoc->base.net,
+ trans->asoc, trans);
+ if (error)
+ return error;
+ }
+
+ /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
+ * this field is ignored. Note also that a value of zero indicates
+ * the current setting should be left unchanged.
+ */
+ if (params->spp_flags & SPP_HB_ENABLE) {
+
+ /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
+ * set. This lets us use 0 value when this flag
+ * is set.
+ */
+ if (params->spp_flags & SPP_HB_TIME_IS_ZERO)
+ params->spp_hbinterval = 0;
+
+ if (params->spp_hbinterval ||
+ (params->spp_flags & SPP_HB_TIME_IS_ZERO)) {
+ if (trans) {
+ trans->hbinterval =
+ msecs_to_jiffies(params->spp_hbinterval);
+ sctp_transport_reset_hb_timer(trans);
+ } else if (asoc) {
+ asoc->hbinterval =
+ msecs_to_jiffies(params->spp_hbinterval);
+ } else {
+ sp->hbinterval = params->spp_hbinterval;
+ }
+ }
+ }
+
+ if (hb_change) {
+ if (trans) {
+ trans->param_flags =
+ (trans->param_flags & ~SPP_HB) | hb_change;
+ } else if (asoc) {
+ asoc->param_flags =
+ (asoc->param_flags & ~SPP_HB) | hb_change;
+ } else {
+ sp->param_flags =
+ (sp->param_flags & ~SPP_HB) | hb_change;
+ }
+ }
+
+ /* When Path MTU discovery is disabled the value specified here will
+ * be the "fixed" path mtu (i.e. the value of the spp_flags field must
+ * include the flag SPP_PMTUD_DISABLE for this field to have any
+ * effect).
+ */
+ if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
+ if (trans) {
+ trans->pathmtu = params->spp_pathmtu;
+ sctp_assoc_sync_pmtu(asoc);
+ } else if (asoc) {
+ sctp_assoc_set_pmtu(asoc, params->spp_pathmtu);
+ } else {
+ sp->pathmtu = params->spp_pathmtu;
+ }
+ }
+
+ if (pmtud_change) {
+ if (trans) {
+ int update = (trans->param_flags & SPP_PMTUD_DISABLE) &&
+ (params->spp_flags & SPP_PMTUD_ENABLE);
+ trans->param_flags =
+ (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
+ if (update) {
+ sctp_transport_pmtu(trans, sctp_opt2sk(sp));
+ sctp_assoc_sync_pmtu(asoc);
+ }
+ sctp_transport_pl_reset(trans);
+ } else if (asoc) {
+ asoc->param_flags =
+ (asoc->param_flags & ~SPP_PMTUD) | pmtud_change;
+ } else {
+ sp->param_flags =
+ (sp->param_flags & ~SPP_PMTUD) | pmtud_change;
+ }
+ }
+
+ /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
+ * value of this field is ignored. Note also that a value of zero
+ * indicates the current setting should be left unchanged.
+ */
+ if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) {
+ if (trans) {
+ trans->sackdelay =
+ msecs_to_jiffies(params->spp_sackdelay);
+ } else if (asoc) {
+ asoc->sackdelay =
+ msecs_to_jiffies(params->spp_sackdelay);
+ } else {
+ sp->sackdelay = params->spp_sackdelay;
+ }
+ }
+
+ if (sackdelay_change) {
+ if (trans) {
+ trans->param_flags =
+ (trans->param_flags & ~SPP_SACKDELAY) |
+ sackdelay_change;
+ } else if (asoc) {
+ asoc->param_flags =
+ (asoc->param_flags & ~SPP_SACKDELAY) |
+ sackdelay_change;
+ } else {
+ sp->param_flags =
+ (sp->param_flags & ~SPP_SACKDELAY) |
+ sackdelay_change;
+ }
+ }
+
+ /* Note that a value of zero indicates the current setting should be
+ left unchanged.
+ */
+ if (params->spp_pathmaxrxt) {
+ if (trans) {
+ trans->pathmaxrxt = params->spp_pathmaxrxt;
+ } else if (asoc) {
+ asoc->pathmaxrxt = params->spp_pathmaxrxt;
+ } else {
+ sp->pathmaxrxt = params->spp_pathmaxrxt;
+ }
+ }
+
+ if (params->spp_flags & SPP_IPV6_FLOWLABEL) {
+ if (trans) {
+ if (trans->ipaddr.sa.sa_family == AF_INET6) {
+ trans->flowlabel = params->spp_ipv6_flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+ }
+ } else if (asoc) {
+ struct sctp_transport *t;
+
+ list_for_each_entry(t, &asoc->peer.transport_addr_list,
+ transports) {
+ if (t->ipaddr.sa.sa_family != AF_INET6)
+ continue;
+ t->flowlabel = params->spp_ipv6_flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ t->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+ }
+ asoc->flowlabel = params->spp_ipv6_flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+ } else if (sctp_opt2sk(sp)->sk_family == AF_INET6) {
+ sp->flowlabel = params->spp_ipv6_flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ sp->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+ }
+ }
+
+ if (params->spp_flags & SPP_DSCP) {
+ if (trans) {
+ trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
+ trans->dscp |= SCTP_DSCP_SET_MASK;
+ } else if (asoc) {
+ struct sctp_transport *t;
+
+ list_for_each_entry(t, &asoc->peer.transport_addr_list,
+ transports) {
+ t->dscp = params->spp_dscp &
+ SCTP_DSCP_VAL_MASK;
+ t->dscp |= SCTP_DSCP_SET_MASK;
+ }
+ asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
+ asoc->dscp |= SCTP_DSCP_SET_MASK;
+ } else {
+ sp->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
+ sp->dscp |= SCTP_DSCP_SET_MASK;
+ }
+ }
+
+ return 0;
+}
+
+static int sctp_setsockopt_peer_addr_params(struct sock *sk,
+ struct sctp_paddrparams *params,
+ unsigned int optlen)
+{
+ struct sctp_transport *trans = NULL;
+ struct sctp_association *asoc = NULL;
+ struct sctp_sock *sp = sctp_sk(sk);
+ int error;
+ int hb_change, pmtud_change, sackdelay_change;
+
+ if (optlen == ALIGN(offsetof(struct sctp_paddrparams,
+ spp_ipv6_flowlabel), 4)) {
+ if (params->spp_flags & (SPP_DSCP | SPP_IPV6_FLOWLABEL))
+ return -EINVAL;
+ } else if (optlen != sizeof(*params)) {
+ return -EINVAL;
+ }
+
+ /* Validate flags and value parameters. */
+ hb_change = params->spp_flags & SPP_HB;
+ pmtud_change = params->spp_flags & SPP_PMTUD;
+ sackdelay_change = params->spp_flags & SPP_SACKDELAY;
+
+ if (hb_change == SPP_HB ||
+ pmtud_change == SPP_PMTUD ||
+ sackdelay_change == SPP_SACKDELAY ||
+ params->spp_sackdelay > 500 ||
+ (params->spp_pathmtu &&
+ params->spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
+ return -EINVAL;
+
+ /* If an address other than INADDR_ANY is specified, and
+ * no transport is found, then the request is invalid.
+ */
+ if (!sctp_is_any(sk, (union sctp_addr *)&params->spp_address)) {
+ trans = sctp_addr_id2transport(sk, &params->spp_address,
+ params->spp_assoc_id);
+ if (!trans)
+ return -EINVAL;
+ }
+
+ /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
+ */
+ asoc = sctp_id2assoc(sk, params->spp_assoc_id);
+ if (!asoc && params->spp_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ /* Heartbeat demand can only be sent on a transport or
+ * association, but not a socket.
+ */
+ if (params->spp_flags & SPP_HB_DEMAND && !trans && !asoc)
+ return -EINVAL;
+
+ /* Process parameters. */
+ error = sctp_apply_peer_addr_params(params, trans, asoc, sp,
+ hb_change, pmtud_change,
+ sackdelay_change);
+
+ if (error)
+ return error;
+
+ /* If changes are for association, also apply parameters to each
+ * transport.
+ */
+ if (!trans && asoc) {
+ list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+ transports) {
+ sctp_apply_peer_addr_params(params, trans, asoc, sp,
+ hb_change, pmtud_change,
+ sackdelay_change);
+ }
+ }
+
+ return 0;
+}
+
+static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags)
+{
+ return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE;
+}
+
+static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags)
+{
+ return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE;
+}
+
+static void sctp_apply_asoc_delayed_ack(struct sctp_sack_info *params,
+ struct sctp_association *asoc)
+{
+ struct sctp_transport *trans;
+
+ if (params->sack_delay) {
+ asoc->sackdelay = msecs_to_jiffies(params->sack_delay);
+ asoc->param_flags =
+ sctp_spp_sackdelay_enable(asoc->param_flags);
+ }
+ if (params->sack_freq == 1) {
+ asoc->param_flags =
+ sctp_spp_sackdelay_disable(asoc->param_flags);
+ } else if (params->sack_freq > 1) {
+ asoc->sackfreq = params->sack_freq;
+ asoc->param_flags =
+ sctp_spp_sackdelay_enable(asoc->param_flags);
+ }
+
+ list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+ transports) {
+ if (params->sack_delay) {
+ trans->sackdelay = msecs_to_jiffies(params->sack_delay);
+ trans->param_flags =
+ sctp_spp_sackdelay_enable(trans->param_flags);
+ }
+ if (params->sack_freq == 1) {
+ trans->param_flags =
+ sctp_spp_sackdelay_disable(trans->param_flags);
+ } else if (params->sack_freq > 1) {
+ trans->sackfreq = params->sack_freq;
+ trans->param_flags =
+ sctp_spp_sackdelay_enable(trans->param_flags);
+ }
+ }
+}
+
+/*
+ * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
+ *
+ * This option will effect the way delayed acks are performed. This
+ * option allows you to get or set the delayed ack time, in
+ * milliseconds. It also allows changing the delayed ack frequency.
+ * Changing the frequency to 1 disables the delayed sack algorithm. If
+ * the assoc_id is 0, then this sets or gets the endpoints default
+ * values. If the assoc_id field is non-zero, then the set or get
+ * effects the specified association for the one to many model (the
+ * assoc_id field is ignored by the one to one model). Note that if
+ * sack_delay or sack_freq are 0 when setting this option, then the
+ * current values will remain unchanged.
+ *
+ * struct sctp_sack_info {
+ * sctp_assoc_t sack_assoc_id;
+ * uint32_t sack_delay;
+ * uint32_t sack_freq;
+ * };
+ *
+ * sack_assoc_id - This parameter, indicates which association the user
+ * is performing an action upon. Note that if this field's value is
+ * zero then the endpoints default value is changed (effecting future
+ * associations only).
+ *
+ * sack_delay - This parameter contains the number of milliseconds that
+ * the user is requesting the delayed ACK timer be set to. Note that
+ * this value is defined in the standard to be between 200 and 500
+ * milliseconds.
+ *
+ * sack_freq - This parameter contains the number of packets that must
+ * be received before a sack is sent without waiting for the delay
+ * timer to expire. The default value for this is 2, setting this
+ * value to 1 will disable the delayed sack algorithm.
+ */
+static int __sctp_setsockopt_delayed_ack(struct sock *sk,
+ struct sctp_sack_info *params)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+
+ /* Validate value parameter. */
+ if (params->sack_delay > 500)
+ return -EINVAL;
+
+ /* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
+ */
+ asoc = sctp_id2assoc(sk, params->sack_assoc_id);
+ if (!asoc && params->sack_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
+ sctp_apply_asoc_delayed_ack(params, asoc);
+
+ return 0;
+ }
+
+ if (sctp_style(sk, TCP))
+ params->sack_assoc_id = SCTP_FUTURE_ASSOC;
+
+ if (params->sack_assoc_id == SCTP_FUTURE_ASSOC ||
+ params->sack_assoc_id == SCTP_ALL_ASSOC) {
+ if (params->sack_delay) {
+ sp->sackdelay = params->sack_delay;
+ sp->param_flags =
+ sctp_spp_sackdelay_enable(sp->param_flags);
+ }
+ if (params->sack_freq == 1) {
+ sp->param_flags =
+ sctp_spp_sackdelay_disable(sp->param_flags);
+ } else if (params->sack_freq > 1) {
+ sp->sackfreq = params->sack_freq;
+ sp->param_flags =
+ sctp_spp_sackdelay_enable(sp->param_flags);
+ }
+ }
+
+ if (params->sack_assoc_id == SCTP_CURRENT_ASSOC ||
+ params->sack_assoc_id == SCTP_ALL_ASSOC)
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs)
+ sctp_apply_asoc_delayed_ack(params, asoc);
+
+ return 0;
+}
+
+static int sctp_setsockopt_delayed_ack(struct sock *sk,
+ struct sctp_sack_info *params,
+ unsigned int optlen)
+{
+ if (optlen == sizeof(struct sctp_assoc_value)) {
+ struct sctp_assoc_value *v = (struct sctp_assoc_value *)params;
+ struct sctp_sack_info p;
+
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
+ "Use struct sctp_sack_info instead\n",
+ current->comm, task_pid_nr(current));
+
+ p.sack_assoc_id = v->assoc_id;
+ p.sack_delay = v->assoc_value;
+ p.sack_freq = v->assoc_value ? 0 : 1;
+ return __sctp_setsockopt_delayed_ack(sk, &p);
+ }
+
+ if (optlen != sizeof(struct sctp_sack_info))
+ return -EINVAL;
+ if (params->sack_delay == 0 && params->sack_freq == 0)
+ return 0;
+ return __sctp_setsockopt_delayed_ack(sk, params);
+}
+
+/* 7.1.3 Initialization Parameters (SCTP_INITMSG)
+ *
+ * Applications can specify protocol parameters for the default association
+ * initialization. The option name argument to setsockopt() and getsockopt()
+ * is SCTP_INITMSG.
+ *
+ * Setting initialization parameters is effective only on an unconnected
+ * socket (for UDP-style sockets only future associations are effected
+ * by the change). With TCP-style sockets, this option is inherited by
+ * sockets derived from a listener socket.
+ */
+static int sctp_setsockopt_initmsg(struct sock *sk, struct sctp_initmsg *sinit,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ if (optlen != sizeof(struct sctp_initmsg))
+ return -EINVAL;
+
+ if (sinit->sinit_num_ostreams)
+ sp->initmsg.sinit_num_ostreams = sinit->sinit_num_ostreams;
+ if (sinit->sinit_max_instreams)
+ sp->initmsg.sinit_max_instreams = sinit->sinit_max_instreams;
+ if (sinit->sinit_max_attempts)
+ sp->initmsg.sinit_max_attempts = sinit->sinit_max_attempts;
+ if (sinit->sinit_max_init_timeo)
+ sp->initmsg.sinit_max_init_timeo = sinit->sinit_max_init_timeo;
+
+ return 0;
+}
+
+/*
+ * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
+ *
+ * Applications that wish to use the sendto() system call may wish to
+ * specify a default set of parameters that would normally be supplied
+ * through the inclusion of ancillary data. This socket option allows
+ * such an application to set the default sctp_sndrcvinfo structure.
+ * The application that wishes to use this socket option simply passes
+ * in to this call the sctp_sndrcvinfo structure defined in Section
+ * 5.2.2) The input parameters accepted by this call include
+ * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
+ * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
+ * to this call if the caller is using the UDP model.
+ */
+static int sctp_setsockopt_default_send_param(struct sock *sk,
+ struct sctp_sndrcvinfo *info,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+
+ if (optlen != sizeof(*info))
+ return -EINVAL;
+ if (info->sinfo_flags &
+ ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+ SCTP_ABORT | SCTP_EOF))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, info->sinfo_assoc_id);
+ if (!asoc && info->sinfo_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
+ asoc->default_stream = info->sinfo_stream;
+ asoc->default_flags = info->sinfo_flags;
+ asoc->default_ppid = info->sinfo_ppid;
+ asoc->default_context = info->sinfo_context;
+ asoc->default_timetolive = info->sinfo_timetolive;
+
+ return 0;
+ }
+
+ if (sctp_style(sk, TCP))
+ info->sinfo_assoc_id = SCTP_FUTURE_ASSOC;
+
+ if (info->sinfo_assoc_id == SCTP_FUTURE_ASSOC ||
+ info->sinfo_assoc_id == SCTP_ALL_ASSOC) {
+ sp->default_stream = info->sinfo_stream;
+ sp->default_flags = info->sinfo_flags;
+ sp->default_ppid = info->sinfo_ppid;
+ sp->default_context = info->sinfo_context;
+ sp->default_timetolive = info->sinfo_timetolive;
+ }
+
+ if (info->sinfo_assoc_id == SCTP_CURRENT_ASSOC ||
+ info->sinfo_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
+ asoc->default_stream = info->sinfo_stream;
+ asoc->default_flags = info->sinfo_flags;
+ asoc->default_ppid = info->sinfo_ppid;
+ asoc->default_context = info->sinfo_context;
+ asoc->default_timetolive = info->sinfo_timetolive;
+ }
+ }
+
+ return 0;
+}
+
+/* RFC6458, Section 8.1.31. Set/get Default Send Parameters
+ * (SCTP_DEFAULT_SNDINFO)
+ */
+static int sctp_setsockopt_default_sndinfo(struct sock *sk,
+ struct sctp_sndinfo *info,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+
+ if (optlen != sizeof(*info))
+ return -EINVAL;
+ if (info->snd_flags &
+ ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+ SCTP_ABORT | SCTP_EOF))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, info->snd_assoc_id);
+ if (!asoc && info->snd_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
+ asoc->default_stream = info->snd_sid;
+ asoc->default_flags = info->snd_flags;
+ asoc->default_ppid = info->snd_ppid;
+ asoc->default_context = info->snd_context;
+
+ return 0;
+ }
+
+ if (sctp_style(sk, TCP))
+ info->snd_assoc_id = SCTP_FUTURE_ASSOC;
+
+ if (info->snd_assoc_id == SCTP_FUTURE_ASSOC ||
+ info->snd_assoc_id == SCTP_ALL_ASSOC) {
+ sp->default_stream = info->snd_sid;
+ sp->default_flags = info->snd_flags;
+ sp->default_ppid = info->snd_ppid;
+ sp->default_context = info->snd_context;
+ }
+
+ if (info->snd_assoc_id == SCTP_CURRENT_ASSOC ||
+ info->snd_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
+ asoc->default_stream = info->snd_sid;
+ asoc->default_flags = info->snd_flags;
+ asoc->default_ppid = info->snd_ppid;
+ asoc->default_context = info->snd_context;
+ }
+ }
+
+ return 0;
+}
+
+/* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
+ *
+ * Requests that the local SCTP stack use the enclosed peer address as
+ * the association primary. The enclosed address must be one of the
+ * association peer's addresses.
+ */
+static int sctp_setsockopt_primary_addr(struct sock *sk, struct sctp_prim *prim,
+ unsigned int optlen)
+{
+ struct sctp_transport *trans;
+ struct sctp_af *af;
+ int err;
+
+ if (optlen != sizeof(struct sctp_prim))
+ return -EINVAL;
+
+ /* Allow security module to validate address but need address len. */
+ af = sctp_get_af_specific(prim->ssp_addr.ss_family);
+ if (!af)
+ return -EINVAL;
+
+ err = security_sctp_bind_connect(sk, SCTP_PRIMARY_ADDR,
+ (struct sockaddr *)&prim->ssp_addr,
+ af->sockaddr_len);
+ if (err)
+ return err;
+
+ trans = sctp_addr_id2transport(sk, &prim->ssp_addr, prim->ssp_assoc_id);
+ if (!trans)
+ return -EINVAL;
+
+ sctp_assoc_set_primary(trans->asoc, trans);
+
+ return 0;
+}
+
+/*
+ * 7.1.5 SCTP_NODELAY
+ *
+ * Turn on/off any Nagle-like algorithm. This means that packets are
+ * generally sent as soon as possible and no unnecessary delays are
+ * introduced, at the cost of more packets in the network. Expects an
+ * integer boolean flag.
+ */
+static int sctp_setsockopt_nodelay(struct sock *sk, int *val,
+ unsigned int optlen)
+{
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ sctp_sk(sk)->nodelay = (*val == 0) ? 0 : 1;
+ return 0;
+}
+
+/*
+ *
+ * 7.1.1 SCTP_RTOINFO
+ *
+ * The protocol parameters used to initialize and bound retransmission
+ * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
+ * and modify these parameters.
+ * All parameters are time values, in milliseconds. A value of 0, when
+ * modifying the parameters, indicates that the current value should not
+ * be changed.
+ *
+ */
+static int sctp_setsockopt_rtoinfo(struct sock *sk,
+ struct sctp_rtoinfo *rtoinfo,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+ unsigned long rto_min, rto_max;
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ if (optlen != sizeof (struct sctp_rtoinfo))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, rtoinfo->srto_assoc_id);
+
+ /* Set the values to the specific association */
+ if (!asoc && rtoinfo->srto_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ rto_max = rtoinfo->srto_max;
+ rto_min = rtoinfo->srto_min;
+
+ if (rto_max)
+ rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
+ else
+ rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
+
+ if (rto_min)
+ rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
+ else
+ rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
+
+ if (rto_min > rto_max)
+ return -EINVAL;
+
+ if (asoc) {
+ if (rtoinfo->srto_initial != 0)
+ asoc->rto_initial =
+ msecs_to_jiffies(rtoinfo->srto_initial);
+ asoc->rto_max = rto_max;
+ asoc->rto_min = rto_min;
+ } else {
+ /* If there is no association or the association-id = 0
+ * set the values to the endpoint.
+ */
+ if (rtoinfo->srto_initial != 0)
+ sp->rtoinfo.srto_initial = rtoinfo->srto_initial;
+ sp->rtoinfo.srto_max = rto_max;
+ sp->rtoinfo.srto_min = rto_min;
+ }
+
+ return 0;
+}
+
+/*
+ *
+ * 7.1.2 SCTP_ASSOCINFO
+ *
+ * This option is used to tune the maximum retransmission attempts
+ * of the association.
+ * Returns an error if the new association retransmission value is
+ * greater than the sum of the retransmission value of the peer.
+ * See [SCTP] for more information.
+ *
+ */
+static int sctp_setsockopt_associnfo(struct sock *sk,
+ struct sctp_assocparams *assocparams,
+ unsigned int optlen)
+{
+
+ struct sctp_association *asoc;
+
+ if (optlen != sizeof(struct sctp_assocparams))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, assocparams->sasoc_assoc_id);
+
+ if (!asoc && assocparams->sasoc_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ /* Set the values to the specific association */
+ if (asoc) {
+ if (assocparams->sasoc_asocmaxrxt != 0) {
+ __u32 path_sum = 0;
+ int paths = 0;
+ struct sctp_transport *peer_addr;
+
+ list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list,
+ transports) {
+ path_sum += peer_addr->pathmaxrxt;
+ paths++;
+ }
+
+ /* Only validate asocmaxrxt if we have more than
+ * one path/transport. We do this because path
+ * retransmissions are only counted when we have more
+ * then one path.
+ */
+ if (paths > 1 &&
+ assocparams->sasoc_asocmaxrxt > path_sum)
+ return -EINVAL;
+
+ asoc->max_retrans = assocparams->sasoc_asocmaxrxt;
+ }
+
+ if (assocparams->sasoc_cookie_life != 0)
+ asoc->cookie_life =
+ ms_to_ktime(assocparams->sasoc_cookie_life);
+ } else {
+ /* Set the values to the endpoint */
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ if (assocparams->sasoc_asocmaxrxt != 0)
+ sp->assocparams.sasoc_asocmaxrxt =
+ assocparams->sasoc_asocmaxrxt;
+ if (assocparams->sasoc_cookie_life != 0)
+ sp->assocparams.sasoc_cookie_life =
+ assocparams->sasoc_cookie_life;
+ }
+ return 0;
+}
+
+/*
+ * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
+ *
+ * This socket option is a boolean flag which turns on or off mapped V4
+ * addresses. If this option is turned on and the socket is type
+ * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
+ * If this option is turned off, then no mapping will be done of V4
+ * addresses and a user will receive both PF_INET6 and PF_INET type
+ * addresses on the socket.
+ */
+static int sctp_setsockopt_mappedv4(struct sock *sk, int *val,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ if (*val)
+ sp->v4mapped = 1;
+ else
+ sp->v4mapped = 0;
+
+ return 0;
+}
+
+/*
+ * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
+ * This option will get or set the maximum size to put in any outgoing
+ * SCTP DATA chunk. If a message is larger than this size it will be
+ * fragmented by SCTP into the specified size. Note that the underlying
+ * SCTP implementation may fragment into smaller sized chunks when the
+ * PMTU of the underlying association is smaller than the value set by
+ * the user. The default value for this option is '0' which indicates
+ * the user is NOT limiting fragmentation and only the PMTU will effect
+ * SCTP's choice of DATA chunk size. Note also that values set larger
+ * than the maximum size of an IP datagram will effectively let SCTP
+ * control fragmentation (i.e. the same as setting this option to 0).
+ *
+ * The following structure is used to access and modify this parameter:
+ *
+ * struct sctp_assoc_value {
+ * sctp_assoc_t assoc_id;
+ * uint32_t assoc_value;
+ * };
+ *
+ * assoc_id: This parameter is ignored for one-to-one style sockets.
+ * For one-to-many style sockets this parameter indicates which
+ * association the user is performing an action upon. Note that if
+ * this field's value is zero then the endpoints default value is
+ * changed (effecting future associations only).
+ * assoc_value: This parameter specifies the maximum size in bytes.
+ */
+static int sctp_setsockopt_maxseg(struct sock *sk,
+ struct sctp_assoc_value *params,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+ sctp_assoc_t assoc_id;
+ int val;
+
+ if (optlen == sizeof(int)) {
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of int in maxseg socket option.\n"
+ "Use struct sctp_assoc_value instead\n",
+ current->comm, task_pid_nr(current));
+ assoc_id = SCTP_FUTURE_ASSOC;
+ val = *(int *)params;
+ } else if (optlen == sizeof(struct sctp_assoc_value)) {
+ assoc_id = params->assoc_id;
+ val = params->assoc_value;
+ } else {
+ return -EINVAL;
+ }
+
+ asoc = sctp_id2assoc(sk, assoc_id);
+ if (!asoc && assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (val) {
+ int min_len, max_len;
+ __u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) :
+ sizeof(struct sctp_data_chunk);
+
+ min_len = sctp_min_frag_point(sp, datasize);
+ max_len = SCTP_MAX_CHUNK_LEN - datasize;
+
+ if (val < min_len || val > max_len)
+ return -EINVAL;
+ }
+
+ if (asoc) {
+ asoc->user_frag = val;
+ sctp_assoc_update_frag_point(asoc);
+ } else {
+ sp->user_frag = val;
+ }
+
+ return 0;
+}
+
+
+/*
+ * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
+ *
+ * Requests that the peer mark the enclosed address as the association
+ * primary. The enclosed address must be one of the association's
+ * locally bound addresses. The following structure is used to make a
+ * set primary request:
+ */
+static int sctp_setsockopt_peer_primary_addr(struct sock *sk,
+ struct sctp_setpeerprim *prim,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp;
+ struct sctp_association *asoc = NULL;
+ struct sctp_chunk *chunk;
+ struct sctp_af *af;
+ int err;
+
+ sp = sctp_sk(sk);
+
+ if (!sp->ep->asconf_enable)
+ return -EPERM;
+
+ if (optlen != sizeof(struct sctp_setpeerprim))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, prim->sspp_assoc_id);
+ if (!asoc)
+ return -EINVAL;
+
+ if (!asoc->peer.asconf_capable)
+ return -EPERM;
+
+ if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY)
+ return -EPERM;
+
+ if (!sctp_state(asoc, ESTABLISHED))
+ return -ENOTCONN;
+
+ af = sctp_get_af_specific(prim->sspp_addr.ss_family);
+ if (!af)
+ return -EINVAL;
+
+ if (!af->addr_valid((union sctp_addr *)&prim->sspp_addr, sp, NULL))
+ return -EADDRNOTAVAIL;
+
+ if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim->sspp_addr))
+ return -EADDRNOTAVAIL;
+
+ /* Allow security module to validate address. */
+ err = security_sctp_bind_connect(sk, SCTP_SET_PEER_PRIMARY_ADDR,
+ (struct sockaddr *)&prim->sspp_addr,
+ af->sockaddr_len);
+ if (err)
+ return err;
+
+ /* Create an ASCONF chunk with SET_PRIMARY parameter */
+ chunk = sctp_make_asconf_set_prim(asoc,
+ (union sctp_addr *)&prim->sspp_addr);
+ if (!chunk)
+ return -ENOMEM;
+
+ err = sctp_send_asconf(asoc, chunk);
+
+ pr_debug("%s: we set peer primary addr primitively\n", __func__);
+
+ return err;
+}
+
+static int sctp_setsockopt_adaptation_layer(struct sock *sk,
+ struct sctp_setadaptation *adapt,
+ unsigned int optlen)
+{
+ if (optlen != sizeof(struct sctp_setadaptation))
+ return -EINVAL;
+
+ sctp_sk(sk)->adaptation_ind = adapt->ssb_adaptation_ind;
+
+ return 0;
+}
+
+/*
+ * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
+ *
+ * The context field in the sctp_sndrcvinfo structure is normally only
+ * used when a failed message is retrieved holding the value that was
+ * sent down on the actual send call. This option allows the setting of
+ * a default context on an association basis that will be received on
+ * reading messages from the peer. This is especially helpful in the
+ * one-2-many model for an application to keep some reference to an
+ * internal state machine that is processing messages on the
+ * association. Note that the setting of this value only effects
+ * received messages from the peer and does not effect the value that is
+ * saved with outbound messages.
+ */
+static int sctp_setsockopt_context(struct sock *sk,
+ struct sctp_assoc_value *params,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+
+ if (optlen != sizeof(struct sctp_assoc_value))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, params->assoc_id);
+ if (!asoc && params->assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
+ asoc->default_rcv_context = params->assoc_value;
+
+ return 0;
+ }
+
+ if (sctp_style(sk, TCP))
+ params->assoc_id = SCTP_FUTURE_ASSOC;
+
+ if (params->assoc_id == SCTP_FUTURE_ASSOC ||
+ params->assoc_id == SCTP_ALL_ASSOC)
+ sp->default_rcv_context = params->assoc_value;
+
+ if (params->assoc_id == SCTP_CURRENT_ASSOC ||
+ params->assoc_id == SCTP_ALL_ASSOC)
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs)
+ asoc->default_rcv_context = params->assoc_value;
+
+ return 0;
+}
+
+/*
+ * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
+ *
+ * This options will at a minimum specify if the implementation is doing
+ * fragmented interleave. Fragmented interleave, for a one to many
+ * socket, is when subsequent calls to receive a message may return
+ * parts of messages from different associations. Some implementations
+ * may allow you to turn this value on or off. If so, when turned off,
+ * no fragment interleave will occur (which will cause a head of line
+ * blocking amongst multiple associations sharing the same one to many
+ * socket). When this option is turned on, then each receive call may
+ * come from a different association (thus the user must receive data
+ * with the extended calls (e.g. sctp_recvmsg) to keep track of which
+ * association each receive belongs to.
+ *
+ * This option takes a boolean value. A non-zero value indicates that
+ * fragmented interleave is on. A value of zero indicates that
+ * fragmented interleave is off.
+ *
+ * Note that it is important that an implementation that allows this
+ * option to be turned on, have it off by default. Otherwise an unaware
+ * application using the one to many model may become confused and act
+ * incorrectly.
+ */
+static int sctp_setsockopt_fragment_interleave(struct sock *sk, int *val,
+ unsigned int optlen)
+{
+ if (optlen != sizeof(int))
+ return -EINVAL;
+
+ sctp_sk(sk)->frag_interleave = !!*val;
+
+ if (!sctp_sk(sk)->frag_interleave)
+ sctp_sk(sk)->ep->intl_enable = 0;
+
+ return 0;
+}
+
+/*
+ * 8.1.21. Set or Get the SCTP Partial Delivery Point
+ * (SCTP_PARTIAL_DELIVERY_POINT)
+ *
+ * This option will set or get the SCTP partial delivery point. This
+ * point is the size of a message where the partial delivery API will be
+ * invoked to help free up rwnd space for the peer. Setting this to a
+ * lower value will cause partial deliveries to happen more often. The
+ * calls argument is an integer that sets or gets the partial delivery
+ * point. Note also that the call will fail if the user attempts to set
+ * this value larger than the socket receive buffer size.
+ *
+ * Note that any single message having a length smaller than or equal to
+ * the SCTP partial delivery point will be delivered in one single read
+ * call as long as the user provided buffer is large enough to hold the
+ * message.
+ */
+static int sctp_setsockopt_partial_delivery_point(struct sock *sk, u32 *val,
+ unsigned int optlen)
+{
+ if (optlen != sizeof(u32))
+ return -EINVAL;
+
+ /* Note: We double the receive buffer from what the user sets
+ * it to be, also initial rwnd is based on rcvbuf/2.
+ */
+ if (*val > (sk->sk_rcvbuf >> 1))
+ return -EINVAL;
+
+ sctp_sk(sk)->pd_point = *val;
+
+ return 0; /* is this the right error code? */
+}
+
+/*
+ * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
+ *
+ * This option will allow a user to change the maximum burst of packets
+ * that can be emitted by this association. Note that the default value
+ * is 4, and some implementations may restrict this setting so that it
+ * can only be lowered.
+ *
+ * NOTE: This text doesn't seem right. Do this on a socket basis with
+ * future associations inheriting the socket value.
+ */
+static int sctp_setsockopt_maxburst(struct sock *sk,
+ struct sctp_assoc_value *params,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+ sctp_assoc_t assoc_id;
+ u32 assoc_value;
+
+ if (optlen == sizeof(int)) {
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of int in max_burst socket option deprecated.\n"
+ "Use struct sctp_assoc_value instead\n",
+ current->comm, task_pid_nr(current));
+ assoc_id = SCTP_FUTURE_ASSOC;
+ assoc_value = *((int *)params);
+ } else if (optlen == sizeof(struct sctp_assoc_value)) {
+ assoc_id = params->assoc_id;
+ assoc_value = params->assoc_value;
+ } else
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, assoc_id);
+ if (!asoc && assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
+ asoc->max_burst = assoc_value;
+
+ return 0;
+ }
+
+ if (sctp_style(sk, TCP))
+ assoc_id = SCTP_FUTURE_ASSOC;
+
+ if (assoc_id == SCTP_FUTURE_ASSOC || assoc_id == SCTP_ALL_ASSOC)
+ sp->max_burst = assoc_value;
+
+ if (assoc_id == SCTP_CURRENT_ASSOC || assoc_id == SCTP_ALL_ASSOC)
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs)
+ asoc->max_burst = assoc_value;
+
+ return 0;
+}
+
+/*
+ * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
+ *
+ * This set option adds a chunk type that the user is requesting to be
+ * received only in an authenticated way. Changes to the list of chunks
+ * will only effect future associations on the socket.
+ */
+static int sctp_setsockopt_auth_chunk(struct sock *sk,
+ struct sctp_authchunk *val,
+ unsigned int optlen)
+{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+
+ if (!ep->auth_enable)
+ return -EACCES;
+
+ if (optlen != sizeof(struct sctp_authchunk))
+ return -EINVAL;
+
+ switch (val->sauth_chunk) {
+ case SCTP_CID_INIT:
+ case SCTP_CID_INIT_ACK:
+ case SCTP_CID_SHUTDOWN_COMPLETE:
+ case SCTP_CID_AUTH:
+ return -EINVAL;
+ }
+
+ /* add this chunk id to the endpoint */
+ return sctp_auth_ep_add_chunkid(ep, val->sauth_chunk);
+}
+
+/*
+ * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
+ *
+ * This option gets or sets the list of HMAC algorithms that the local
+ * endpoint requires the peer to use.
+ */
+static int sctp_setsockopt_hmac_ident(struct sock *sk,
+ struct sctp_hmacalgo *hmacs,
+ unsigned int optlen)
+{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ u32 idents;
+
+ if (!ep->auth_enable)
+ return -EACCES;
+
+ if (optlen < sizeof(struct sctp_hmacalgo))
+ return -EINVAL;
+ optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) +
+ SCTP_AUTH_NUM_HMACS * sizeof(u16));
+
+ idents = hmacs->shmac_num_idents;
+ if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
+ (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo)))
+ return -EINVAL;
+
+ return sctp_auth_ep_set_hmacs(ep, hmacs);
+}
+
+/*
+ * 7.1.20. Set a shared key (SCTP_AUTH_KEY)
+ *
+ * This option will set a shared secret key which is used to build an
+ * association shared key.
+ */
+static int sctp_setsockopt_auth_key(struct sock *sk,
+ struct sctp_authkey *authkey,
+ unsigned int optlen)
+{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_association *asoc;
+ int ret = -EINVAL;
+
+ if (optlen <= sizeof(struct sctp_authkey))
+ return -EINVAL;
+ /* authkey->sca_keylength is u16, so optlen can't be bigger than
+ * this.
+ */
+ optlen = min_t(unsigned int, optlen, USHRT_MAX + sizeof(*authkey));
+
+ if (authkey->sca_keylength > optlen - sizeof(*authkey))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
+ if (!asoc && authkey->sca_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ if (asoc) {
+ ret = sctp_auth_set_key(ep, asoc, authkey);
+ goto out;
+ }
+
+ if (sctp_style(sk, TCP))
+ authkey->sca_assoc_id = SCTP_FUTURE_ASSOC;
+
+ if (authkey->sca_assoc_id == SCTP_FUTURE_ASSOC ||
+ authkey->sca_assoc_id == SCTP_ALL_ASSOC) {
+ ret = sctp_auth_set_key(ep, asoc, authkey);
+ if (ret)
+ goto out;
+ }
+
+ ret = 0;
+
+ if (authkey->sca_assoc_id == SCTP_CURRENT_ASSOC ||
+ authkey->sca_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &ep->asocs, asocs) {
+ int res = sctp_auth_set_key(ep, asoc, authkey);
+
+ if (res && !ret)
+ ret = res;
+ }
+ }
+
+out:
+ memzero_explicit(authkey, optlen);
+ return ret;
+}
+
+/*
+ * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
+ *
+ * This option will get or set the active shared key to be used to build
+ * the association shared key.
+ */
+static int sctp_setsockopt_active_key(struct sock *sk,
+ struct sctp_authkeyid *val,
+ unsigned int optlen)
+{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_association *asoc;
+ int ret = 0;
+
+ if (optlen != sizeof(struct sctp_authkeyid))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, val->scact_assoc_id);
+ if (!asoc && val->scact_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc)
+ return sctp_auth_set_active_key(ep, asoc, val->scact_keynumber);
+
+ if (sctp_style(sk, TCP))
+ val->scact_assoc_id = SCTP_FUTURE_ASSOC;
+
+ if (val->scact_assoc_id == SCTP_FUTURE_ASSOC ||
+ val->scact_assoc_id == SCTP_ALL_ASSOC) {
+ ret = sctp_auth_set_active_key(ep, asoc, val->scact_keynumber);
+ if (ret)
+ return ret;
+ }
+
+ if (val->scact_assoc_id == SCTP_CURRENT_ASSOC ||
+ val->scact_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &ep->asocs, asocs) {
+ int res = sctp_auth_set_active_key(ep, asoc,
+ val->scact_keynumber);
+
+ if (res && !ret)
+ ret = res;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
+ *
+ * This set option will delete a shared secret key from use.
+ */
+static int sctp_setsockopt_del_key(struct sock *sk,
+ struct sctp_authkeyid *val,
+ unsigned int optlen)
+{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_association *asoc;
+ int ret = 0;
+
+ if (optlen != sizeof(struct sctp_authkeyid))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, val->scact_assoc_id);
+ if (!asoc && val->scact_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc)
+ return sctp_auth_del_key_id(ep, asoc, val->scact_keynumber);
+
+ if (sctp_style(sk, TCP))
+ val->scact_assoc_id = SCTP_FUTURE_ASSOC;
+
+ if (val->scact_assoc_id == SCTP_FUTURE_ASSOC ||
+ val->scact_assoc_id == SCTP_ALL_ASSOC) {
+ ret = sctp_auth_del_key_id(ep, asoc, val->scact_keynumber);
+ if (ret)
+ return ret;
+ }
+
+ if (val->scact_assoc_id == SCTP_CURRENT_ASSOC ||
+ val->scact_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &ep->asocs, asocs) {
+ int res = sctp_auth_del_key_id(ep, asoc,
+ val->scact_keynumber);
+
+ if (res && !ret)
+ ret = res;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * 8.3.4 Deactivate a Shared Key (SCTP_AUTH_DEACTIVATE_KEY)
+ *
+ * This set option will deactivate a shared secret key.
+ */
+static int sctp_setsockopt_deactivate_key(struct sock *sk,
+ struct sctp_authkeyid *val,
+ unsigned int optlen)
+{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_association *asoc;
+ int ret = 0;
+
+ if (optlen != sizeof(struct sctp_authkeyid))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, val->scact_assoc_id);
+ if (!asoc && val->scact_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc)
+ return sctp_auth_deact_key_id(ep, asoc, val->scact_keynumber);
+
+ if (sctp_style(sk, TCP))
+ val->scact_assoc_id = SCTP_FUTURE_ASSOC;
+
+ if (val->scact_assoc_id == SCTP_FUTURE_ASSOC ||
+ val->scact_assoc_id == SCTP_ALL_ASSOC) {
+ ret = sctp_auth_deact_key_id(ep, asoc, val->scact_keynumber);
+ if (ret)
+ return ret;
+ }
+
+ if (val->scact_assoc_id == SCTP_CURRENT_ASSOC ||
+ val->scact_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &ep->asocs, asocs) {
+ int res = sctp_auth_deact_key_id(ep, asoc,
+ val->scact_keynumber);
+
+ if (res && !ret)
+ ret = res;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * 8.1.23 SCTP_AUTO_ASCONF
+ *
+ * This option will enable or disable the use of the automatic generation of
+ * ASCONF chunks to add and delete addresses to an existing association. Note
+ * that this option has two caveats namely: a) it only affects sockets that
+ * are bound to all addresses available to the SCTP stack, and b) the system
+ * administrator may have an overriding control that turns the ASCONF feature
+ * off no matter what setting the socket option may have.
+ * This option expects an integer boolean flag, where a non-zero value turns on
+ * the option, and a zero value turns off the option.
+ * Note. In this implementation, socket operation overrides default parameter
+ * being set by sysctl as well as FreeBSD implementation
+ */
+static int sctp_setsockopt_auto_asconf(struct sock *sk, int *val,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ if (!sctp_is_ep_boundall(sk) && *val)
+ return -EINVAL;
+ if ((*val && sp->do_auto_asconf) || (!*val && !sp->do_auto_asconf))
+ return 0;
+
+ spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ if (*val == 0 && sp->do_auto_asconf) {
+ list_del(&sp->auto_asconf_list);
+ sp->do_auto_asconf = 0;
+ } else if (*val && !sp->do_auto_asconf) {
+ list_add_tail(&sp->auto_asconf_list,
+ &sock_net(sk)->sctp.auto_asconf_splist);
+ sp->do_auto_asconf = 1;
+ }
+ spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ return 0;
+}
+
+/*
+ * SCTP_PEER_ADDR_THLDS
+ *
+ * This option allows us to alter the partially failed threshold for one or all
+ * transports in an association. See Section 6.1 of:
+ * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
+ */
+static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
+ struct sctp_paddrthlds_v2 *val,
+ unsigned int optlen, bool v2)
+{
+ struct sctp_transport *trans;
+ struct sctp_association *asoc;
+ int len;
+
+ len = v2 ? sizeof(*val) : sizeof(struct sctp_paddrthlds);
+ if (optlen < len)
+ return -EINVAL;
+
+ if (v2 && val->spt_pathpfthld > val->spt_pathcpthld)
+ return -EINVAL;
+
+ if (!sctp_is_any(sk, (const union sctp_addr *)&val->spt_address)) {
+ trans = sctp_addr_id2transport(sk, &val->spt_address,
+ val->spt_assoc_id);
+ if (!trans)
+ return -ENOENT;
+
+ if (val->spt_pathmaxrxt)
+ trans->pathmaxrxt = val->spt_pathmaxrxt;
+ if (v2)
+ trans->ps_retrans = val->spt_pathcpthld;
+ trans->pf_retrans = val->spt_pathpfthld;
+
+ return 0;
+ }
+
+ asoc = sctp_id2assoc(sk, val->spt_assoc_id);
+ if (!asoc && val->spt_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
+ list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+ transports) {
+ if (val->spt_pathmaxrxt)
+ trans->pathmaxrxt = val->spt_pathmaxrxt;
+ if (v2)
+ trans->ps_retrans = val->spt_pathcpthld;
+ trans->pf_retrans = val->spt_pathpfthld;
+ }
+
+ if (val->spt_pathmaxrxt)
+ asoc->pathmaxrxt = val->spt_pathmaxrxt;
+ if (v2)
+ asoc->ps_retrans = val->spt_pathcpthld;
+ asoc->pf_retrans = val->spt_pathpfthld;
+ } else {
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ if (val->spt_pathmaxrxt)
+ sp->pathmaxrxt = val->spt_pathmaxrxt;
+ if (v2)
+ sp->ps_retrans = val->spt_pathcpthld;
+ sp->pf_retrans = val->spt_pathpfthld;
+ }
+
+ return 0;
+}
+
+static int sctp_setsockopt_recvrcvinfo(struct sock *sk, int *val,
+ unsigned int optlen)
+{
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ sctp_sk(sk)->recvrcvinfo = (*val == 0) ? 0 : 1;
+
+ return 0;
+}
+
+static int sctp_setsockopt_recvnxtinfo(struct sock *sk, int *val,
+ unsigned int optlen)
+{
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ sctp_sk(sk)->recvnxtinfo = (*val == 0) ? 0 : 1;
+
+ return 0;
+}
+
+static int sctp_setsockopt_pr_supported(struct sock *sk,
+ struct sctp_assoc_value *params,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+
+ if (optlen != sizeof(*params))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, params->assoc_id);
+ if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ sctp_sk(sk)->ep->prsctp_enable = !!params->assoc_value;
+
+ return 0;
+}
+
+static int sctp_setsockopt_default_prinfo(struct sock *sk,
+ struct sctp_default_prinfo *info,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+ int retval = -EINVAL;
+
+ if (optlen != sizeof(*info))
+ goto out;
+
+ if (info->pr_policy & ~SCTP_PR_SCTP_MASK)
+ goto out;
+
+ if (info->pr_policy == SCTP_PR_SCTP_NONE)
+ info->pr_value = 0;
+
+ asoc = sctp_id2assoc(sk, info->pr_assoc_id);
+ if (!asoc && info->pr_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ retval = 0;
+
+ if (asoc) {
+ SCTP_PR_SET_POLICY(asoc->default_flags, info->pr_policy);
+ asoc->default_timetolive = info->pr_value;
+ goto out;
+ }
+
+ if (sctp_style(sk, TCP))
+ info->pr_assoc_id = SCTP_FUTURE_ASSOC;
+
+ if (info->pr_assoc_id == SCTP_FUTURE_ASSOC ||
+ info->pr_assoc_id == SCTP_ALL_ASSOC) {
+ SCTP_PR_SET_POLICY(sp->default_flags, info->pr_policy);
+ sp->default_timetolive = info->pr_value;
+ }
+
+ if (info->pr_assoc_id == SCTP_CURRENT_ASSOC ||
+ info->pr_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
+ SCTP_PR_SET_POLICY(asoc->default_flags,
+ info->pr_policy);
+ asoc->default_timetolive = info->pr_value;
+ }
+ }
+
+out:
+ return retval;
+}
+
+static int sctp_setsockopt_reconfig_supported(struct sock *sk,
+ struct sctp_assoc_value *params,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+ int retval = -EINVAL;
+
+ if (optlen != sizeof(*params))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params->assoc_id);
+ if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ sctp_sk(sk)->ep->reconf_enable = !!params->assoc_value;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_setsockopt_enable_strreset(struct sock *sk,
+ struct sctp_assoc_value *params,
+ unsigned int optlen)
+{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_association *asoc;
+ int retval = -EINVAL;
+
+ if (optlen != sizeof(*params))
+ goto out;
+
+ if (params->assoc_value & (~SCTP_ENABLE_STRRESET_MASK))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params->assoc_id);
+ if (!asoc && params->assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ retval = 0;
+
+ if (asoc) {
+ asoc->strreset_enable = params->assoc_value;
+ goto out;
+ }
+
+ if (sctp_style(sk, TCP))
+ params->assoc_id = SCTP_FUTURE_ASSOC;
+
+ if (params->assoc_id == SCTP_FUTURE_ASSOC ||
+ params->assoc_id == SCTP_ALL_ASSOC)
+ ep->strreset_enable = params->assoc_value;
+
+ if (params->assoc_id == SCTP_CURRENT_ASSOC ||
+ params->assoc_id == SCTP_ALL_ASSOC)
+ list_for_each_entry(asoc, &ep->asocs, asocs)
+ asoc->strreset_enable = params->assoc_value;
+
+out:
+ return retval;
+}
+
+static int sctp_setsockopt_reset_streams(struct sock *sk,
+ struct sctp_reset_streams *params,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+
+ if (optlen < sizeof(*params))
+ return -EINVAL;
+ /* srs_number_streams is u16, so optlen can't be bigger than this. */
+ optlen = min_t(unsigned int, optlen, USHRT_MAX +
+ sizeof(__u16) * sizeof(*params));
+
+ if (params->srs_number_streams * sizeof(__u16) >
+ optlen - sizeof(*params))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, params->srs_assoc_id);
+ if (!asoc)
+ return -EINVAL;
+
+ return sctp_send_reset_streams(asoc, params);
+}
+
+static int sctp_setsockopt_reset_assoc(struct sock *sk, sctp_assoc_t *associd,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+
+ if (optlen != sizeof(*associd))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, *associd);
+ if (!asoc)
+ return -EINVAL;
+
+ return sctp_send_reset_assoc(asoc);
+}
+
+static int sctp_setsockopt_add_streams(struct sock *sk,
+ struct sctp_add_streams *params,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+
+ if (optlen != sizeof(*params))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, params->sas_assoc_id);
+ if (!asoc)
+ return -EINVAL;
+
+ return sctp_send_add_streams(asoc, params);
+}
+
+static int sctp_setsockopt_scheduler(struct sock *sk,
+ struct sctp_assoc_value *params,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+ int retval = 0;
+
+ if (optlen < sizeof(*params))
+ return -EINVAL;
+
+ if (params->assoc_value > SCTP_SS_MAX)
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, params->assoc_id);
+ if (!asoc && params->assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc)
+ return sctp_sched_set_sched(asoc, params->assoc_value);
+
+ if (sctp_style(sk, TCP))
+ params->assoc_id = SCTP_FUTURE_ASSOC;
+
+ if (params->assoc_id == SCTP_FUTURE_ASSOC ||
+ params->assoc_id == SCTP_ALL_ASSOC)
+ sp->default_ss = params->assoc_value;
+
+ if (params->assoc_id == SCTP_CURRENT_ASSOC ||
+ params->assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
+ int ret = sctp_sched_set_sched(asoc,
+ params->assoc_value);
+
+ if (ret && !retval)
+ retval = ret;
+ }
+ }
+
+ return retval;
+}
+
+static int sctp_setsockopt_scheduler_value(struct sock *sk,
+ struct sctp_stream_value *params,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+ int retval = -EINVAL;
+
+ if (optlen < sizeof(*params))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params->assoc_id);
+ if (!asoc && params->assoc_id != SCTP_CURRENT_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ if (asoc) {
+ retval = sctp_sched_set_value(asoc, params->stream_id,
+ params->stream_value, GFP_KERNEL);
+ goto out;
+ }
+
+ retval = 0;
+
+ list_for_each_entry(asoc, &sctp_sk(sk)->ep->asocs, asocs) {
+ int ret = sctp_sched_set_value(asoc, params->stream_id,
+ params->stream_value,
+ GFP_KERNEL);
+ if (ret && !retval) /* try to return the 1st error. */
+ retval = ret;
+ }
+
+out:
+ return retval;
+}
+
+static int sctp_setsockopt_interleaving_supported(struct sock *sk,
+ struct sctp_assoc_value *p,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+
+ if (optlen < sizeof(*p))
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, p->assoc_id);
+ if (!asoc && p->assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (!sock_net(sk)->sctp.intl_enable || !sp->frag_interleave) {
+ return -EPERM;
+ }
+
+ sp->ep->intl_enable = !!p->assoc_value;
+ return 0;
+}
+
+static int sctp_setsockopt_reuse_port(struct sock *sk, int *val,
+ unsigned int optlen)
+{
+ if (!sctp_style(sk, TCP))
+ return -EOPNOTSUPP;
+
+ if (sctp_sk(sk)->ep->base.bind_addr.port)
+ return -EFAULT;
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ sctp_sk(sk)->reuse = !!*val;
+
+ return 0;
+}
+
+static int sctp_assoc_ulpevent_type_set(struct sctp_event *param,
+ struct sctp_association *asoc)
+{
+ struct sctp_ulpevent *event;
+
+ sctp_ulpevent_type_set(&asoc->subscribe, param->se_type, param->se_on);
+
+ if (param->se_type == SCTP_SENDER_DRY_EVENT && param->se_on) {
+ if (sctp_outq_is_empty(&asoc->outqueue)) {
+ event = sctp_ulpevent_make_sender_dry_event(asoc,
+ GFP_USER | __GFP_NOWARN);
+ if (!event)
+ return -ENOMEM;
+
+ asoc->stream.si->enqueue_event(&asoc->ulpq, event);
+ }
+ }
+
+ return 0;
+}
+
+static int sctp_setsockopt_event(struct sock *sk, struct sctp_event *param,
+ unsigned int optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+ int retval = 0;
+
+ if (optlen < sizeof(*param))
+ return -EINVAL;
+
+ if (param->se_type < SCTP_SN_TYPE_BASE ||
+ param->se_type > SCTP_SN_TYPE_MAX)
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, param->se_assoc_id);
+ if (!asoc && param->se_assoc_id > SCTP_ALL_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc)
+ return sctp_assoc_ulpevent_type_set(param, asoc);
+
+ if (sctp_style(sk, TCP))
+ param->se_assoc_id = SCTP_FUTURE_ASSOC;
+
+ if (param->se_assoc_id == SCTP_FUTURE_ASSOC ||
+ param->se_assoc_id == SCTP_ALL_ASSOC)
+ sctp_ulpevent_type_set(&sp->subscribe,
+ param->se_type, param->se_on);
+
+ if (param->se_assoc_id == SCTP_CURRENT_ASSOC ||
+ param->se_assoc_id == SCTP_ALL_ASSOC) {
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
+ int ret = sctp_assoc_ulpevent_type_set(param, asoc);
+
+ if (ret && !retval)
+ retval = ret;
+ }
+ }
+
+ return retval;
+}
+
+static int sctp_setsockopt_asconf_supported(struct sock *sk,
+ struct sctp_assoc_value *params,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+ struct sctp_endpoint *ep;
+ int retval = -EINVAL;
+
+ if (optlen != sizeof(*params))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params->assoc_id);
+ if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ ep = sctp_sk(sk)->ep;
+ ep->asconf_enable = !!params->assoc_value;
+
+ if (ep->asconf_enable && ep->auth_enable) {
+ sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF);
+ sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF_ACK);
+ }
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_setsockopt_auth_supported(struct sock *sk,
+ struct sctp_assoc_value *params,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+ struct sctp_endpoint *ep;
+ int retval = -EINVAL;
+
+ if (optlen != sizeof(*params))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params->assoc_id);
+ if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ ep = sctp_sk(sk)->ep;
+ if (params->assoc_value) {
+ retval = sctp_auth_init(ep, GFP_KERNEL);
+ if (retval)
+ goto out;
+ if (ep->asconf_enable) {
+ sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF);
+ sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF_ACK);
+ }
+ }
+
+ ep->auth_enable = !!params->assoc_value;
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_setsockopt_ecn_supported(struct sock *sk,
+ struct sctp_assoc_value *params,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+ int retval = -EINVAL;
+
+ if (optlen != sizeof(*params))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params->assoc_id);
+ if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ sctp_sk(sk)->ep->ecn_enable = !!params->assoc_value;
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_setsockopt_pf_expose(struct sock *sk,
+ struct sctp_assoc_value *params,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+ int retval = -EINVAL;
+
+ if (optlen != sizeof(*params))
+ goto out;
+
+ if (params->assoc_value > SCTP_PF_EXPOSE_MAX)
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params->assoc_id);
+ if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ goto out;
+
+ if (asoc)
+ asoc->pf_expose = params->assoc_value;
+ else
+ sctp_sk(sk)->pf_expose = params->assoc_value;
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_setsockopt_encap_port(struct sock *sk,
+ struct sctp_udpencaps *encap,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+ struct sctp_transport *t;
+ __be16 encap_port;
+
+ if (optlen != sizeof(*encap))
+ return -EINVAL;
+
+ /* If an address other than INADDR_ANY is specified, and
+ * no transport is found, then the request is invalid.
+ */
+ encap_port = (__force __be16)encap->sue_port;
+ if (!sctp_is_any(sk, (union sctp_addr *)&encap->sue_address)) {
+ t = sctp_addr_id2transport(sk, &encap->sue_address,
+ encap->sue_assoc_id);
+ if (!t)
+ return -EINVAL;
+
+ t->encap_port = encap_port;
+ return 0;
+ }
+
+ /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
+ */
+ asoc = sctp_id2assoc(sk, encap->sue_assoc_id);
+ if (!asoc && encap->sue_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ /* If changes are for association, also apply encap_port to
+ * each transport.
+ */
+ if (asoc) {
+ list_for_each_entry(t, &asoc->peer.transport_addr_list,
+ transports)
+ t->encap_port = encap_port;
+
+ asoc->encap_port = encap_port;
+ return 0;
+ }
+
+ sctp_sk(sk)->encap_port = encap_port;
+ return 0;
+}
+
+static int sctp_setsockopt_probe_interval(struct sock *sk,
+ struct sctp_probeinterval *params,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+ struct sctp_transport *t;
+ __u32 probe_interval;
+
+ if (optlen != sizeof(*params))
+ return -EINVAL;
+
+ probe_interval = params->spi_interval;
+ if (probe_interval && probe_interval < SCTP_PROBE_TIMER_MIN)
+ return -EINVAL;
+
+ /* If an address other than INADDR_ANY is specified, and
+ * no transport is found, then the request is invalid.
+ */
+ if (!sctp_is_any(sk, (union sctp_addr *)&params->spi_address)) {
+ t = sctp_addr_id2transport(sk, &params->spi_address,
+ params->spi_assoc_id);
+ if (!t)
+ return -EINVAL;
+
+ t->probe_interval = msecs_to_jiffies(probe_interval);
+ sctp_transport_pl_reset(t);
+ return 0;
+ }
+
+ /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
+ */
+ asoc = sctp_id2assoc(sk, params->spi_assoc_id);
+ if (!asoc && params->spi_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ /* If changes are for association, also apply probe_interval to
+ * each transport.
+ */
+ if (asoc) {
+ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
+ t->probe_interval = msecs_to_jiffies(probe_interval);
+ sctp_transport_pl_reset(t);
+ }
+
+ asoc->probe_interval = msecs_to_jiffies(probe_interval);
+ return 0;
+ }
+
+ sctp_sk(sk)->probe_interval = probe_interval;
+ return 0;
+}
+
+/* API 6.2 setsockopt(), getsockopt()
+ *
+ * Applications use setsockopt() and getsockopt() to set or retrieve
+ * socket options. Socket options are used to change the default
+ * behavior of sockets calls. They are described in Section 7.
+ *
+ * The syntax is:
+ *
+ * ret = getsockopt(int sd, int level, int optname, void __user *optval,
+ * int __user *optlen);
+ * ret = setsockopt(int sd, int level, int optname, const void __user *optval,
+ * int optlen);
+ *
+ * sd - the socket descript.
+ * level - set to IPPROTO_SCTP for all SCTP options.
+ * optname - the option name.
+ * optval - the buffer to store the value of the option.
+ * optlen - the size of the buffer.
+ */
+static int sctp_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ void *kopt = NULL;
+ int retval = 0;
+
+ pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
+
+ /* I can hardly begin to describe how wrong this is. This is
+ * so broken as to be worse than useless. The API draft
+ * REALLY is NOT helpful here... I am not convinced that the
+ * semantics of setsockopt() with a level OTHER THAN SOL_SCTP
+ * are at all well-founded.
+ */
+ if (level != SOL_SCTP) {
+ struct sctp_af *af = sctp_sk(sk)->pf->af;
+
+ return af->setsockopt(sk, level, optname, optval, optlen);
+ }
+
+ if (optlen > 0) {
+ /* Trim it to the biggest size sctp sockopt may need if necessary */
+ optlen = min_t(unsigned int, optlen,
+ PAGE_ALIGN(USHRT_MAX +
+ sizeof(__u16) * sizeof(struct sctp_reset_streams)));
+ kopt = memdup_sockptr(optval, optlen);
+ if (IS_ERR(kopt))
+ return PTR_ERR(kopt);
+ }
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case SCTP_SOCKOPT_BINDX_ADD:
+ /* 'optlen' is the size of the addresses buffer. */
+ retval = sctp_setsockopt_bindx(sk, kopt, optlen,
+ SCTP_BINDX_ADD_ADDR);
+ break;
+
+ case SCTP_SOCKOPT_BINDX_REM:
+ /* 'optlen' is the size of the addresses buffer. */
+ retval = sctp_setsockopt_bindx(sk, kopt, optlen,
+ SCTP_BINDX_REM_ADDR);
+ break;
+
+ case SCTP_SOCKOPT_CONNECTX_OLD:
+ /* 'optlen' is the size of the addresses buffer. */
+ retval = sctp_setsockopt_connectx_old(sk, kopt, optlen);
+ break;
+
+ case SCTP_SOCKOPT_CONNECTX:
+ /* 'optlen' is the size of the addresses buffer. */
+ retval = sctp_setsockopt_connectx(sk, kopt, optlen);
+ break;
+
+ case SCTP_DISABLE_FRAGMENTS:
+ retval = sctp_setsockopt_disable_fragments(sk, kopt, optlen);
+ break;
+
+ case SCTP_EVENTS:
+ retval = sctp_setsockopt_events(sk, kopt, optlen);
+ break;
+
+ case SCTP_AUTOCLOSE:
+ retval = sctp_setsockopt_autoclose(sk, kopt, optlen);
+ break;
+
+ case SCTP_PEER_ADDR_PARAMS:
+ retval = sctp_setsockopt_peer_addr_params(sk, kopt, optlen);
+ break;
+
+ case SCTP_DELAYED_SACK:
+ retval = sctp_setsockopt_delayed_ack(sk, kopt, optlen);
+ break;
+ case SCTP_PARTIAL_DELIVERY_POINT:
+ retval = sctp_setsockopt_partial_delivery_point(sk, kopt, optlen);
+ break;
+
+ case SCTP_INITMSG:
+ retval = sctp_setsockopt_initmsg(sk, kopt, optlen);
+ break;
+ case SCTP_DEFAULT_SEND_PARAM:
+ retval = sctp_setsockopt_default_send_param(sk, kopt, optlen);
+ break;
+ case SCTP_DEFAULT_SNDINFO:
+ retval = sctp_setsockopt_default_sndinfo(sk, kopt, optlen);
+ break;
+ case SCTP_PRIMARY_ADDR:
+ retval = sctp_setsockopt_primary_addr(sk, kopt, optlen);
+ break;
+ case SCTP_SET_PEER_PRIMARY_ADDR:
+ retval = sctp_setsockopt_peer_primary_addr(sk, kopt, optlen);
+ break;
+ case SCTP_NODELAY:
+ retval = sctp_setsockopt_nodelay(sk, kopt, optlen);
+ break;
+ case SCTP_RTOINFO:
+ retval = sctp_setsockopt_rtoinfo(sk, kopt, optlen);
+ break;
+ case SCTP_ASSOCINFO:
+ retval = sctp_setsockopt_associnfo(sk, kopt, optlen);
+ break;
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ retval = sctp_setsockopt_mappedv4(sk, kopt, optlen);
+ break;
+ case SCTP_MAXSEG:
+ retval = sctp_setsockopt_maxseg(sk, kopt, optlen);
+ break;
+ case SCTP_ADAPTATION_LAYER:
+ retval = sctp_setsockopt_adaptation_layer(sk, kopt, optlen);
+ break;
+ case SCTP_CONTEXT:
+ retval = sctp_setsockopt_context(sk, kopt, optlen);
+ break;
+ case SCTP_FRAGMENT_INTERLEAVE:
+ retval = sctp_setsockopt_fragment_interleave(sk, kopt, optlen);
+ break;
+ case SCTP_MAX_BURST:
+ retval = sctp_setsockopt_maxburst(sk, kopt, optlen);
+ break;
+ case SCTP_AUTH_CHUNK:
+ retval = sctp_setsockopt_auth_chunk(sk, kopt, optlen);
+ break;
+ case SCTP_HMAC_IDENT:
+ retval = sctp_setsockopt_hmac_ident(sk, kopt, optlen);
+ break;
+ case SCTP_AUTH_KEY:
+ retval = sctp_setsockopt_auth_key(sk, kopt, optlen);
+ break;
+ case SCTP_AUTH_ACTIVE_KEY:
+ retval = sctp_setsockopt_active_key(sk, kopt, optlen);
+ break;
+ case SCTP_AUTH_DELETE_KEY:
+ retval = sctp_setsockopt_del_key(sk, kopt, optlen);
+ break;
+ case SCTP_AUTH_DEACTIVATE_KEY:
+ retval = sctp_setsockopt_deactivate_key(sk, kopt, optlen);
+ break;
+ case SCTP_AUTO_ASCONF:
+ retval = sctp_setsockopt_auto_asconf(sk, kopt, optlen);
+ break;
+ case SCTP_PEER_ADDR_THLDS:
+ retval = sctp_setsockopt_paddr_thresholds(sk, kopt, optlen,
+ false);
+ break;
+ case SCTP_PEER_ADDR_THLDS_V2:
+ retval = sctp_setsockopt_paddr_thresholds(sk, kopt, optlen,
+ true);
+ break;
+ case SCTP_RECVRCVINFO:
+ retval = sctp_setsockopt_recvrcvinfo(sk, kopt, optlen);
+ break;
+ case SCTP_RECVNXTINFO:
+ retval = sctp_setsockopt_recvnxtinfo(sk, kopt, optlen);
+ break;
+ case SCTP_PR_SUPPORTED:
+ retval = sctp_setsockopt_pr_supported(sk, kopt, optlen);
+ break;
+ case SCTP_DEFAULT_PRINFO:
+ retval = sctp_setsockopt_default_prinfo(sk, kopt, optlen);
+ break;
+ case SCTP_RECONFIG_SUPPORTED:
+ retval = sctp_setsockopt_reconfig_supported(sk, kopt, optlen);
+ break;
+ case SCTP_ENABLE_STREAM_RESET:
+ retval = sctp_setsockopt_enable_strreset(sk, kopt, optlen);
+ break;
+ case SCTP_RESET_STREAMS:
+ retval = sctp_setsockopt_reset_streams(sk, kopt, optlen);
+ break;
+ case SCTP_RESET_ASSOC:
+ retval = sctp_setsockopt_reset_assoc(sk, kopt, optlen);
+ break;
+ case SCTP_ADD_STREAMS:
+ retval = sctp_setsockopt_add_streams(sk, kopt, optlen);
+ break;
+ case SCTP_STREAM_SCHEDULER:
+ retval = sctp_setsockopt_scheduler(sk, kopt, optlen);
+ break;
+ case SCTP_STREAM_SCHEDULER_VALUE:
+ retval = sctp_setsockopt_scheduler_value(sk, kopt, optlen);
+ break;
+ case SCTP_INTERLEAVING_SUPPORTED:
+ retval = sctp_setsockopt_interleaving_supported(sk, kopt,
+ optlen);
+ break;
+ case SCTP_REUSE_PORT:
+ retval = sctp_setsockopt_reuse_port(sk, kopt, optlen);
+ break;
+ case SCTP_EVENT:
+ retval = sctp_setsockopt_event(sk, kopt, optlen);
+ break;
+ case SCTP_ASCONF_SUPPORTED:
+ retval = sctp_setsockopt_asconf_supported(sk, kopt, optlen);
+ break;
+ case SCTP_AUTH_SUPPORTED:
+ retval = sctp_setsockopt_auth_supported(sk, kopt, optlen);
+ break;
+ case SCTP_ECN_SUPPORTED:
+ retval = sctp_setsockopt_ecn_supported(sk, kopt, optlen);
+ break;
+ case SCTP_EXPOSE_POTENTIALLY_FAILED_STATE:
+ retval = sctp_setsockopt_pf_expose(sk, kopt, optlen);
+ break;
+ case SCTP_REMOTE_UDP_ENCAPS_PORT:
+ retval = sctp_setsockopt_encap_port(sk, kopt, optlen);
+ break;
+ case SCTP_PLPMTUD_PROBE_INTERVAL:
+ retval = sctp_setsockopt_probe_interval(sk, kopt, optlen);
+ break;
+ default:
+ retval = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ kfree(kopt);
+ return retval;
+}
+
+/* API 3.1.6 connect() - UDP Style Syntax
+ *
+ * An application may use the connect() call in the UDP model to initiate an
+ * association without sending data.
+ *
+ * The syntax is:
+ *
+ * ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
+ *
+ * sd: the socket descriptor to have a new association added to.
+ *
+ * nam: the address structure (either struct sockaddr_in or struct
+ * sockaddr_in6 defined in RFC2553 [7]).
+ *
+ * len: the size of the address.
+ */
+static int sctp_connect(struct sock *sk, struct sockaddr *addr,
+ int addr_len, int flags)
+{
+ struct sctp_af *af;
+ int err = -EINVAL;
+
+ lock_sock(sk);
+ pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
+ addr, addr_len);
+
+ /* Validate addr_len before calling common connect/connectx routine. */
+ af = sctp_get_af_specific(addr->sa_family);
+ if (af && addr_len >= af->sockaddr_len)
+ err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
+
+ release_sock(sk);
+ return err;
+}
+
+int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
+{
+ if (addr_len < sizeof(uaddr->sa_family))
+ return -EINVAL;
+
+ if (uaddr->sa_family == AF_UNSPEC)
+ return -EOPNOTSUPP;
+
+ return sctp_connect(sock->sk, uaddr, addr_len, flags);
+}
+
+/* FIXME: Write comments. */
+static int sctp_disconnect(struct sock *sk, int flags)
+{
+ return -EOPNOTSUPP; /* STUB */
+}
+
+/* 4.1.4 accept() - TCP Style Syntax
+ *
+ * Applications use accept() call to remove an established SCTP
+ * association from the accept queue of the endpoint. A new socket
+ * descriptor will be returned from accept() to represent the newly
+ * formed association.
+ */
+static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
+{
+ struct sctp_sock *sp;
+ struct sctp_endpoint *ep;
+ struct sock *newsk = NULL;
+ struct sctp_association *asoc;
+ long timeo;
+ int error = 0;
+
+ lock_sock(sk);
+
+ sp = sctp_sk(sk);
+ ep = sp->ep;
+
+ if (!sctp_style(sk, TCP)) {
+ error = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!sctp_sstate(sk, LISTENING)) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+
+ error = sctp_wait_for_accept(sk, timeo);
+ if (error)
+ goto out;
+
+ /* We treat the list of associations on the endpoint as the accept
+ * queue and pick the first association on the list.
+ */
+ asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
+
+ newsk = sp->pf->create_accept_sk(sk, asoc, kern);
+ if (!newsk) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ /* Populate the fields of the newsk from the oldsk and migrate the
+ * asoc to the newsk.
+ */
+ error = sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
+ if (error) {
+ sk_common_release(newsk);
+ newsk = NULL;
+ }
+
+out:
+ release_sock(sk);
+ *err = error;
+ return newsk;
+}
+
+/* The SCTP ioctl handler. */
+static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+ int rc = -ENOTCONN;
+
+ lock_sock(sk);
+
+ /*
+ * SEQPACKET-style sockets in LISTENING state are valid, for
+ * SCTP, so only discard TCP-style sockets in LISTENING state.
+ */
+ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
+ goto out;
+
+ switch (cmd) {
+ case SIOCINQ: {
+ struct sk_buff *skb;
+ unsigned int amount = 0;
+
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb != NULL) {
+ /*
+ * We will only return the amount of this packet since
+ * that is all that will be read.
+ */
+ amount = skb->len;
+ }
+ rc = put_user(amount, (int __user *)arg);
+ break;
+ }
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+out:
+ release_sock(sk);
+ return rc;
+}
+
+/* This is the function which gets called during socket creation to
+ * initialized the SCTP-specific portion of the sock.
+ * The sock structure should already be zero-filled memory.
+ */
+static int sctp_init_sock(struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ struct sctp_sock *sp;
+
+ pr_debug("%s: sk:%p\n", __func__, sk);
+
+ sp = sctp_sk(sk);
+
+ /* Initialize the SCTP per socket area. */
+ switch (sk->sk_type) {
+ case SOCK_SEQPACKET:
+ sp->type = SCTP_SOCKET_UDP;
+ break;
+ case SOCK_STREAM:
+ sp->type = SCTP_SOCKET_TCP;
+ break;
+ default:
+ return -ESOCKTNOSUPPORT;
+ }
+
+ sk->sk_gso_type = SKB_GSO_SCTP;
+
+ /* Initialize default send parameters. These parameters can be
+ * modified with the SCTP_DEFAULT_SEND_PARAM socket option.
+ */
+ sp->default_stream = 0;
+ sp->default_ppid = 0;
+ sp->default_flags = 0;
+ sp->default_context = 0;
+ sp->default_timetolive = 0;
+
+ sp->default_rcv_context = 0;
+ sp->max_burst = net->sctp.max_burst;
+
+ sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
+
+ /* Initialize default setup parameters. These parameters
+ * can be modified with the SCTP_INITMSG socket option or
+ * overridden by the SCTP_INIT CMSG.
+ */
+ sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
+ sp->initmsg.sinit_max_instreams = sctp_max_instreams;
+ sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init;
+ sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
+
+ /* Initialize default RTO related parameters. These parameters can
+ * be modified for with the SCTP_RTOINFO socket option.
+ */
+ sp->rtoinfo.srto_initial = net->sctp.rto_initial;
+ sp->rtoinfo.srto_max = net->sctp.rto_max;
+ sp->rtoinfo.srto_min = net->sctp.rto_min;
+
+ /* Initialize default association related parameters. These parameters
+ * can be modified with the SCTP_ASSOCINFO socket option.
+ */
+ sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
+ sp->assocparams.sasoc_number_peer_destinations = 0;
+ sp->assocparams.sasoc_peer_rwnd = 0;
+ sp->assocparams.sasoc_local_rwnd = 0;
+ sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
+
+ /* Initialize default event subscriptions. By default, all the
+ * options are off.
+ */
+ sp->subscribe = 0;
+
+ /* Default Peer Address Parameters. These defaults can
+ * be modified via SCTP_PEER_ADDR_PARAMS
+ */
+ sp->hbinterval = net->sctp.hb_interval;
+ sp->udp_port = htons(net->sctp.udp_port);
+ sp->encap_port = htons(net->sctp.encap_port);
+ sp->pathmaxrxt = net->sctp.max_retrans_path;
+ sp->pf_retrans = net->sctp.pf_retrans;
+ sp->ps_retrans = net->sctp.ps_retrans;
+ sp->pf_expose = net->sctp.pf_expose;
+ sp->pathmtu = 0; /* allow default discovery */
+ sp->sackdelay = net->sctp.sack_timeout;
+ sp->sackfreq = 2;
+ sp->param_flags = SPP_HB_ENABLE |
+ SPP_PMTUD_ENABLE |
+ SPP_SACKDELAY_ENABLE;
+ sp->default_ss = SCTP_SS_DEFAULT;
+
+ /* If enabled no SCTP message fragmentation will be performed.
+ * Configure through SCTP_DISABLE_FRAGMENTS socket option.
+ */
+ sp->disable_fragments = 0;
+
+ /* Enable Nagle algorithm by default. */
+ sp->nodelay = 0;
+
+ sp->recvrcvinfo = 0;
+ sp->recvnxtinfo = 0;
+
+ /* Enable by default. */
+ sp->v4mapped = 1;
+
+ /* Auto-close idle associations after the configured
+ * number of seconds. A value of 0 disables this
+ * feature. Configure through the SCTP_AUTOCLOSE socket option,
+ * for UDP-style sockets only.
+ */
+ sp->autoclose = 0;
+
+ /* User specified fragmentation limit. */
+ sp->user_frag = 0;
+
+ sp->adaptation_ind = 0;
+
+ sp->pf = sctp_get_pf_specific(sk->sk_family);
+
+ /* Control variables for partial data delivery. */
+ atomic_set(&sp->pd_mode, 0);
+ skb_queue_head_init(&sp->pd_lobby);
+ sp->frag_interleave = 0;
+ sp->probe_interval = net->sctp.probe_interval;
+
+ /* Create a per socket endpoint structure. Even if we
+ * change the data structure relationships, this may still
+ * be useful for storing pre-connect address information.
+ */
+ sp->ep = sctp_endpoint_new(sk, GFP_KERNEL);
+ if (!sp->ep)
+ return -ENOMEM;
+
+ sp->hmac = NULL;
+
+ sk->sk_destruct = sctp_destruct_sock;
+
+ SCTP_DBG_OBJCNT_INC(sock);
+
+ sk_sockets_allocated_inc(sk);
+ sock_prot_inuse_add(net, sk->sk_prot, 1);
+
+ return 0;
+}
+
+/* Cleanup any SCTP per socket resources. Must be called with
+ * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
+ */
+static void sctp_destroy_sock(struct sock *sk)
+{
+ struct sctp_sock *sp;
+
+ pr_debug("%s: sk:%p\n", __func__, sk);
+
+ /* Release our hold on the endpoint. */
+ sp = sctp_sk(sk);
+ /* This could happen during socket init, thus we bail out
+ * early, since the rest of the below is not setup either.
+ */
+ if (sp->ep == NULL)
+ return;
+
+ if (sp->do_auto_asconf) {
+ sp->do_auto_asconf = 0;
+ list_del(&sp->auto_asconf_list);
+ }
+ sctp_endpoint_free(sp->ep);
+ sk_sockets_allocated_dec(sk);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+}
+
+/* Triggered when there are no references on the socket anymore */
+static void sctp_destruct_common(struct sock *sk)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ /* Free up the HMAC transform. */
+ crypto_free_shash(sp->hmac);
+}
+
+static void sctp_destruct_sock(struct sock *sk)
+{
+ sctp_destruct_common(sk);
+ inet_sock_destruct(sk);
+}
+
+/* API 4.1.7 shutdown() - TCP Style Syntax
+ * int shutdown(int socket, int how);
+ *
+ * sd - the socket descriptor of the association to be closed.
+ * how - Specifies the type of shutdown. The values are
+ * as follows:
+ * SHUT_RD
+ * Disables further receive operations. No SCTP
+ * protocol action is taken.
+ * SHUT_WR
+ * Disables further send operations, and initiates
+ * the SCTP shutdown sequence.
+ * SHUT_RDWR
+ * Disables further send and receive operations
+ * and initiates the SCTP shutdown sequence.
+ */
+static void sctp_shutdown(struct sock *sk, int how)
+{
+ struct net *net = sock_net(sk);
+ struct sctp_endpoint *ep;
+
+ if (!sctp_style(sk, TCP))
+ return;
+
+ ep = sctp_sk(sk)->ep;
+ if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
+ struct sctp_association *asoc;
+
+ inet_sk_set_state(sk, SCTP_SS_CLOSING);
+ asoc = list_entry(ep->asocs.next,
+ struct sctp_association, asocs);
+ sctp_primitive_SHUTDOWN(net, asoc, NULL);
+ }
+}
+
+int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
+ struct sctp_info *info)
+{
+ struct sctp_transport *prim;
+ struct list_head *pos;
+ int mask;
+
+ memset(info, 0, sizeof(*info));
+ if (!asoc) {
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ info->sctpi_s_autoclose = sp->autoclose;
+ info->sctpi_s_adaptation_ind = sp->adaptation_ind;
+ info->sctpi_s_pd_point = sp->pd_point;
+ info->sctpi_s_nodelay = sp->nodelay;
+ info->sctpi_s_disable_fragments = sp->disable_fragments;
+ info->sctpi_s_v4mapped = sp->v4mapped;
+ info->sctpi_s_frag_interleave = sp->frag_interleave;
+ info->sctpi_s_type = sp->type;
+
+ return 0;
+ }
+
+ info->sctpi_tag = asoc->c.my_vtag;
+ info->sctpi_state = asoc->state;
+ info->sctpi_rwnd = asoc->a_rwnd;
+ info->sctpi_unackdata = asoc->unack_data;
+ info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
+ info->sctpi_instrms = asoc->stream.incnt;
+ info->sctpi_outstrms = asoc->stream.outcnt;
+ list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
+ info->sctpi_inqueue++;
+ list_for_each(pos, &asoc->outqueue.out_chunk_list)
+ info->sctpi_outqueue++;
+ info->sctpi_overall_error = asoc->overall_error_count;
+ info->sctpi_max_burst = asoc->max_burst;
+ info->sctpi_maxseg = asoc->frag_point;
+ info->sctpi_peer_rwnd = asoc->peer.rwnd;
+ info->sctpi_peer_tag = asoc->c.peer_vtag;
+
+ mask = asoc->peer.ecn_capable << 1;
+ mask = (mask | asoc->peer.ipv4_address) << 1;
+ mask = (mask | asoc->peer.ipv6_address) << 1;
+ mask = (mask | asoc->peer.hostname_address) << 1;
+ mask = (mask | asoc->peer.asconf_capable) << 1;
+ mask = (mask | asoc->peer.prsctp_capable) << 1;
+ mask = (mask | asoc->peer.auth_capable);
+ info->sctpi_peer_capable = mask;
+ mask = asoc->peer.sack_needed << 1;
+ mask = (mask | asoc->peer.sack_generation) << 1;
+ mask = (mask | asoc->peer.zero_window_announced);
+ info->sctpi_peer_sack = mask;
+
+ info->sctpi_isacks = asoc->stats.isacks;
+ info->sctpi_osacks = asoc->stats.osacks;
+ info->sctpi_opackets = asoc->stats.opackets;
+ info->sctpi_ipackets = asoc->stats.ipackets;
+ info->sctpi_rtxchunks = asoc->stats.rtxchunks;
+ info->sctpi_outofseqtsns = asoc->stats.outofseqtsns;
+ info->sctpi_idupchunks = asoc->stats.idupchunks;
+ info->sctpi_gapcnt = asoc->stats.gapcnt;
+ info->sctpi_ouodchunks = asoc->stats.ouodchunks;
+ info->sctpi_iuodchunks = asoc->stats.iuodchunks;
+ info->sctpi_oodchunks = asoc->stats.oodchunks;
+ info->sctpi_iodchunks = asoc->stats.iodchunks;
+ info->sctpi_octrlchunks = asoc->stats.octrlchunks;
+ info->sctpi_ictrlchunks = asoc->stats.ictrlchunks;
+
+ prim = asoc->peer.primary_path;
+ memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr));
+ info->sctpi_p_state = prim->state;
+ info->sctpi_p_cwnd = prim->cwnd;
+ info->sctpi_p_srtt = prim->srtt;
+ info->sctpi_p_rto = jiffies_to_msecs(prim->rto);
+ info->sctpi_p_hbinterval = prim->hbinterval;
+ info->sctpi_p_pathmaxrxt = prim->pathmaxrxt;
+ info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay);
+ info->sctpi_p_ssthresh = prim->ssthresh;
+ info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked;
+ info->sctpi_p_flight_size = prim->flight_size;
+ info->sctpi_p_error = prim->error_count;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
+
+/* use callback to avoid exporting the core structure */
+void sctp_transport_walk_start(struct rhashtable_iter *iter) __acquires(RCU)
+{
+ rhltable_walk_enter(&sctp_transport_hashtable, iter);
+
+ rhashtable_walk_start(iter);
+}
+
+void sctp_transport_walk_stop(struct rhashtable_iter *iter) __releases(RCU)
+{
+ rhashtable_walk_stop(iter);
+ rhashtable_walk_exit(iter);
+}
+
+struct sctp_transport *sctp_transport_get_next(struct net *net,
+ struct rhashtable_iter *iter)
+{
+ struct sctp_transport *t;
+
+ t = rhashtable_walk_next(iter);
+ for (; t; t = rhashtable_walk_next(iter)) {
+ if (IS_ERR(t)) {
+ if (PTR_ERR(t) == -EAGAIN)
+ continue;
+ break;
+ }
+
+ if (!sctp_transport_hold(t))
+ continue;
+
+ if (net_eq(t->asoc->base.net, net) &&
+ t->asoc->peer.primary_path == t)
+ break;
+
+ sctp_transport_put(t);
+ }
+
+ return t;
+}
+
+struct sctp_transport *sctp_transport_get_idx(struct net *net,
+ struct rhashtable_iter *iter,
+ int pos)
+{
+ struct sctp_transport *t;
+
+ if (!pos)
+ return SEQ_START_TOKEN;
+
+ while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
+ if (!--pos)
+ break;
+ sctp_transport_put(t);
+ }
+
+ return t;
+}
+
+int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
+ void *p) {
+ int err = 0;
+ int hash = 0;
+ struct sctp_endpoint *ep;
+ struct sctp_hashbucket *head;
+
+ for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
+ hash++, head++) {
+ read_lock_bh(&head->lock);
+ sctp_for_each_hentry(ep, &head->chain) {
+ err = cb(ep, p);
+ if (err)
+ break;
+ }
+ read_unlock_bh(&head->lock);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
+
+int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
+ const union sctp_addr *laddr,
+ const union sctp_addr *paddr, void *p)
+{
+ struct sctp_transport *transport;
+ struct sctp_endpoint *ep;
+ int err = -ENOENT;
+
+ rcu_read_lock();
+ transport = sctp_addrs_lookup_transport(net, laddr, paddr);
+ if (!transport) {
+ rcu_read_unlock();
+ return err;
+ }
+ ep = transport->asoc->ep;
+ if (!sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
+ sctp_transport_put(transport);
+ rcu_read_unlock();
+ return err;
+ }
+ rcu_read_unlock();
+
+ err = cb(ep, transport, p);
+ sctp_endpoint_put(ep);
+ sctp_transport_put(transport);
+ return err;
+}
+EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
+
+int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
+ struct net *net, int *pos, void *p)
+{
+ struct rhashtable_iter hti;
+ struct sctp_transport *tsp;
+ struct sctp_endpoint *ep;
+ int ret;
+
+again:
+ ret = 0;
+ sctp_transport_walk_start(&hti);
+
+ tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
+ for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
+ ep = tsp->asoc->ep;
+ if (sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
+ ret = cb(ep, tsp, p);
+ if (ret)
+ break;
+ sctp_endpoint_put(ep);
+ }
+ (*pos)++;
+ sctp_transport_put(tsp);
+ }
+ sctp_transport_walk_stop(&hti);
+
+ if (ret) {
+ if (cb_done && !cb_done(ep, tsp, p)) {
+ (*pos)++;
+ sctp_endpoint_put(ep);
+ sctp_transport_put(tsp);
+ goto again;
+ }
+ sctp_endpoint_put(ep);
+ sctp_transport_put(tsp);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sctp_transport_traverse_process);
+
+/* 7.2.1 Association Status (SCTP_STATUS)
+
+ * Applications can retrieve current status information about an
+ * association, including association state, peer receiver window size,
+ * number of unacked data chunks, and number of data chunks pending
+ * receipt. This information is read-only.
+ */
+static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_status status;
+ struct sctp_association *asoc = NULL;
+ struct sctp_transport *transport;
+ sctp_assoc_t associd;
+ int retval = 0;
+
+ if (len < sizeof(status)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(status);
+ if (copy_from_user(&status, optval, len)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ associd = status.sstat_assoc_id;
+ asoc = sctp_id2assoc(sk, associd);
+ if (!asoc) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ transport = asoc->peer.primary_path;
+
+ status.sstat_assoc_id = sctp_assoc2id(asoc);
+ status.sstat_state = sctp_assoc_to_state(asoc);
+ status.sstat_rwnd = asoc->peer.rwnd;
+ status.sstat_unackdata = asoc->unack_data;
+
+ status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
+ status.sstat_instrms = asoc->stream.incnt;
+ status.sstat_outstrms = asoc->stream.outcnt;
+ status.sstat_fragmentation_point = asoc->frag_point;
+ status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
+ memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
+ transport->af_specific->sockaddr_len);
+ /* Map ipv4 address into v4-mapped-on-v6 address. */
+ sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
+ (union sctp_addr *)&status.sstat_primary.spinfo_address);
+ status.sstat_primary.spinfo_state = transport->state;
+ status.sstat_primary.spinfo_cwnd = transport->cwnd;
+ status.sstat_primary.spinfo_srtt = transport->srtt;
+ status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto);
+ status.sstat_primary.spinfo_mtu = transport->pathmtu;
+
+ if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN)
+ status.sstat_primary.spinfo_state = SCTP_ACTIVE;
+
+ if (put_user(len, optlen)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n",
+ __func__, len, status.sstat_state, status.sstat_rwnd,
+ status.sstat_assoc_id);
+
+ if (copy_to_user(optval, &status, len)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+out:
+ return retval;
+}
+
+
+/* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
+ *
+ * Applications can retrieve information about a specific peer address
+ * of an association, including its reachability state, congestion
+ * window, and retransmission timer values. This information is
+ * read-only.
+ */
+static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_paddrinfo pinfo;
+ struct sctp_transport *transport;
+ int retval = 0;
+
+ if (len < sizeof(pinfo)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(pinfo);
+ if (copy_from_user(&pinfo, optval, len)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
+ pinfo.spinfo_assoc_id);
+ if (!transport) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (transport->state == SCTP_PF &&
+ transport->asoc->pf_expose == SCTP_PF_EXPOSE_DISABLE) {
+ retval = -EACCES;
+ goto out;
+ }
+
+ pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
+ pinfo.spinfo_state = transport->state;
+ pinfo.spinfo_cwnd = transport->cwnd;
+ pinfo.spinfo_srtt = transport->srtt;
+ pinfo.spinfo_rto = jiffies_to_msecs(transport->rto);
+ pinfo.spinfo_mtu = transport->pathmtu;
+
+ if (pinfo.spinfo_state == SCTP_UNKNOWN)
+ pinfo.spinfo_state = SCTP_ACTIVE;
+
+ if (put_user(len, optlen)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (copy_to_user(optval, &pinfo, len)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+out:
+ return retval;
+}
+
+/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
+ *
+ * This option is a on/off flag. If enabled no SCTP message
+ * fragmentation will be performed. Instead if a message being sent
+ * exceeds the current PMTU size, the message will NOT be sent and
+ * instead a error will be indicated to the user.
+ */
+static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ int val;
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+ val = (sctp_sk(sk)->disable_fragments == 1);
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+ return 0;
+}
+
+/* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
+ *
+ * This socket option is used to specify various notifications and
+ * ancillary data the user wishes to receive.
+ */
+static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_event_subscribe subscribe;
+ __u8 *sn_type = (__u8 *)&subscribe;
+ int i;
+
+ if (len == 0)
+ return -EINVAL;
+ if (len > sizeof(struct sctp_event_subscribe))
+ len = sizeof(struct sctp_event_subscribe);
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ for (i = 0; i < len; i++)
+ sn_type[i] = sctp_ulpevent_type_enabled(sctp_sk(sk)->subscribe,
+ SCTP_SN_TYPE_BASE + i);
+
+ if (copy_to_user(optval, &subscribe, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
+ *
+ * This socket option is applicable to the UDP-style socket only. When
+ * set it will cause associations that are idle for more than the
+ * specified number of seconds to automatically close. An association
+ * being idle is defined an association that has NOT sent or received
+ * user data. The special value of '0' indicates that no automatic
+ * close of any associations should be performed. The option expects an
+ * integer defining the number of seconds of idle time before an
+ * association is closed.
+ */
+static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
+{
+ /* Applicable to UDP-style socket only */
+ if (sctp_style(sk, TCP))
+ return -EOPNOTSUPP;
+ if (len < sizeof(int))
+ return -EINVAL;
+ len = sizeof(int);
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (put_user(sctp_sk(sk)->autoclose, (int __user *)optval))
+ return -EFAULT;
+ return 0;
+}
+
+/* Helper routine to branch off an association to a new socket. */
+int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
+{
+ struct sctp_association *asoc = sctp_id2assoc(sk, id);
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct socket *sock;
+ int err = 0;
+
+ /* Do not peel off from one netns to another one. */
+ if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
+ return -EINVAL;
+
+ if (!asoc)
+ return -EINVAL;
+
+ /* An association cannot be branched off from an already peeled-off
+ * socket, nor is this supported for tcp style sockets.
+ */
+ if (!sctp_style(sk, UDP))
+ return -EINVAL;
+
+ /* Create a new socket. */
+ err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
+ if (err < 0)
+ return err;
+
+ sctp_copy_sock(sock->sk, sk, asoc);
+
+ /* Make peeled-off sockets more like 1-1 accepted sockets.
+ * Set the daddr and initialize id to something more random and also
+ * copy over any ip options.
+ */
+ sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sock->sk);
+ sp->pf->copy_ip_options(sk, sock->sk);
+
+ /* Populate the fields of the newsk from the oldsk and migrate the
+ * asoc to the newsk.
+ */
+ err = sctp_sock_migrate(sk, sock->sk, asoc,
+ SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
+ if (err) {
+ sock_release(sock);
+ sock = NULL;
+ }
+
+ *sockp = sock;
+
+ return err;
+}
+EXPORT_SYMBOL(sctp_do_peeloff);
+
+static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *peeloff,
+ struct file **newfile, unsigned flags)
+{
+ struct socket *newsock;
+ int retval;
+
+ retval = sctp_do_peeloff(sk, peeloff->associd, &newsock);
+ if (retval < 0)
+ goto out;
+
+ /* Map the socket to an unused fd that can be returned to the user. */
+ retval = get_unused_fd_flags(flags & SOCK_CLOEXEC);
+ if (retval < 0) {
+ sock_release(newsock);
+ goto out;
+ }
+
+ *newfile = sock_alloc_file(newsock, 0, NULL);
+ if (IS_ERR(*newfile)) {
+ put_unused_fd(retval);
+ retval = PTR_ERR(*newfile);
+ *newfile = NULL;
+ return retval;
+ }
+
+ pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk,
+ retval);
+
+ peeloff->sd = retval;
+
+ if (flags & SOCK_NONBLOCK)
+ (*newfile)->f_flags |= O_NONBLOCK;
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
+{
+ sctp_peeloff_arg_t peeloff;
+ struct file *newfile = NULL;
+ int retval = 0;
+
+ if (len < sizeof(sctp_peeloff_arg_t))
+ return -EINVAL;
+ len = sizeof(sctp_peeloff_arg_t);
+ if (copy_from_user(&peeloff, optval, len))
+ return -EFAULT;
+
+ retval = sctp_getsockopt_peeloff_common(sk, &peeloff, &newfile, 0);
+ if (retval < 0)
+ goto out;
+
+ /* Return the fd mapped to the new socket. */
+ if (put_user(len, optlen)) {
+ fput(newfile);
+ put_unused_fd(retval);
+ return -EFAULT;
+ }
+
+ if (copy_to_user(optval, &peeloff, len)) {
+ fput(newfile);
+ put_unused_fd(retval);
+ return -EFAULT;
+ }
+ fd_install(retval, newfile);
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_peeloff_flags(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ sctp_peeloff_flags_arg_t peeloff;
+ struct file *newfile = NULL;
+ int retval = 0;
+
+ if (len < sizeof(sctp_peeloff_flags_arg_t))
+ return -EINVAL;
+ len = sizeof(sctp_peeloff_flags_arg_t);
+ if (copy_from_user(&peeloff, optval, len))
+ return -EFAULT;
+
+ retval = sctp_getsockopt_peeloff_common(sk, &peeloff.p_arg,
+ &newfile, peeloff.flags);
+ if (retval < 0)
+ goto out;
+
+ /* Return the fd mapped to the new socket. */
+ if (put_user(len, optlen)) {
+ fput(newfile);
+ put_unused_fd(retval);
+ return -EFAULT;
+ }
+
+ if (copy_to_user(optval, &peeloff, len)) {
+ fput(newfile);
+ put_unused_fd(retval);
+ return -EFAULT;
+ }
+ fd_install(retval, newfile);
+out:
+ return retval;
+}
+
+/* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
+ *
+ * Applications can enable or disable heartbeats for any peer address of
+ * an association, modify an address's heartbeat interval, force a
+ * heartbeat to be sent immediately, and adjust the address's maximum
+ * number of retransmissions sent before an address is considered
+ * unreachable. The following structure is used to access and modify an
+ * address's parameters:
+ *
+ * struct sctp_paddrparams {
+ * sctp_assoc_t spp_assoc_id;
+ * struct sockaddr_storage spp_address;
+ * uint32_t spp_hbinterval;
+ * uint16_t spp_pathmaxrxt;
+ * uint32_t spp_pathmtu;
+ * uint32_t spp_sackdelay;
+ * uint32_t spp_flags;
+ * };
+ *
+ * spp_assoc_id - (one-to-many style socket) This is filled in the
+ * application, and identifies the association for
+ * this query.
+ * spp_address - This specifies which address is of interest.
+ * spp_hbinterval - This contains the value of the heartbeat interval,
+ * in milliseconds. If a value of zero
+ * is present in this field then no changes are to
+ * be made to this parameter.
+ * spp_pathmaxrxt - This contains the maximum number of
+ * retransmissions before this address shall be
+ * considered unreachable. If a value of zero
+ * is present in this field then no changes are to
+ * be made to this parameter.
+ * spp_pathmtu - When Path MTU discovery is disabled the value
+ * specified here will be the "fixed" path mtu.
+ * Note that if the spp_address field is empty
+ * then all associations on this address will
+ * have this fixed path mtu set upon them.
+ *
+ * spp_sackdelay - When delayed sack is enabled, this value specifies
+ * the number of milliseconds that sacks will be delayed
+ * for. This value will apply to all addresses of an
+ * association if the spp_address field is empty. Note
+ * also, that if delayed sack is enabled and this
+ * value is set to 0, no change is made to the last
+ * recorded delayed sack timer value.
+ *
+ * spp_flags - These flags are used to control various features
+ * on an association. The flag field may contain
+ * zero or more of the following options.
+ *
+ * SPP_HB_ENABLE - Enable heartbeats on the
+ * specified address. Note that if the address
+ * field is empty all addresses for the association
+ * have heartbeats enabled upon them.
+ *
+ * SPP_HB_DISABLE - Disable heartbeats on the
+ * speicifed address. Note that if the address
+ * field is empty all addresses for the association
+ * will have their heartbeats disabled. Note also
+ * that SPP_HB_ENABLE and SPP_HB_DISABLE are
+ * mutually exclusive, only one of these two should
+ * be specified. Enabling both fields will have
+ * undetermined results.
+ *
+ * SPP_HB_DEMAND - Request a user initiated heartbeat
+ * to be made immediately.
+ *
+ * SPP_PMTUD_ENABLE - This field will enable PMTU
+ * discovery upon the specified address. Note that
+ * if the address feild is empty then all addresses
+ * on the association are effected.
+ *
+ * SPP_PMTUD_DISABLE - This field will disable PMTU
+ * discovery upon the specified address. Note that
+ * if the address feild is empty then all addresses
+ * on the association are effected. Not also that
+ * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
+ * exclusive. Enabling both will have undetermined
+ * results.
+ *
+ * SPP_SACKDELAY_ENABLE - Setting this flag turns
+ * on delayed sack. The time specified in spp_sackdelay
+ * is used to specify the sack delay for this address. Note
+ * that if spp_address is empty then all addresses will
+ * enable delayed sack and take on the sack delay
+ * value specified in spp_sackdelay.
+ * SPP_SACKDELAY_DISABLE - Setting this flag turns
+ * off delayed sack. If the spp_address field is blank then
+ * delayed sack is disabled for the entire association. Note
+ * also that this field is mutually exclusive to
+ * SPP_SACKDELAY_ENABLE, setting both will have undefined
+ * results.
+ *
+ * SPP_IPV6_FLOWLABEL: Setting this flag enables the
+ * setting of the IPV6 flow label value. The value is
+ * contained in the spp_ipv6_flowlabel field.
+ * Upon retrieval, this flag will be set to indicate that
+ * the spp_ipv6_flowlabel field has a valid value returned.
+ * If a specific destination address is set (in the
+ * spp_address field), then the value returned is that of
+ * the address. If just an association is specified (and
+ * no address), then the association's default flow label
+ * is returned. If neither an association nor a destination
+ * is specified, then the socket's default flow label is
+ * returned. For non-IPv6 sockets, this flag will be left
+ * cleared.
+ *
+ * SPP_DSCP: Setting this flag enables the setting of the
+ * Differentiated Services Code Point (DSCP) value
+ * associated with either the association or a specific
+ * address. The value is obtained in the spp_dscp field.
+ * Upon retrieval, this flag will be set to indicate that
+ * the spp_dscp field has a valid value returned. If a
+ * specific destination address is set when called (in the
+ * spp_address field), then that specific destination
+ * address's DSCP value is returned. If just an association
+ * is specified, then the association's default DSCP is
+ * returned. If neither an association nor a destination is
+ * specified, then the socket's default DSCP is returned.
+ *
+ * spp_ipv6_flowlabel
+ * - This field is used in conjunction with the
+ * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
+ * The 20 least significant bits are used for the flow
+ * label. This setting has precedence over any IPv6-layer
+ * setting.
+ *
+ * spp_dscp - This field is used in conjunction with the SPP_DSCP flag
+ * and contains the DSCP. The 6 most significant bits are
+ * used for the DSCP. This setting has precedence over any
+ * IPv4- or IPv6- layer setting.
+ */
+static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_paddrparams params;
+ struct sctp_transport *trans = NULL;
+ struct sctp_association *asoc = NULL;
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ if (len >= sizeof(params))
+ len = sizeof(params);
+ else if (len >= ALIGN(offsetof(struct sctp_paddrparams,
+ spp_ipv6_flowlabel), 4))
+ len = ALIGN(offsetof(struct sctp_paddrparams,
+ spp_ipv6_flowlabel), 4);
+ else
+ return -EINVAL;
+
+ if (copy_from_user(&params, optval, len))
+ return -EFAULT;
+
+ /* If an address other than INADDR_ANY is specified, and
+ * no transport is found, then the request is invalid.
+ */
+ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
+ trans = sctp_addr_id2transport(sk, &params.spp_address,
+ params.spp_assoc_id);
+ if (!trans) {
+ pr_debug("%s: failed no transport\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
+ */
+ asoc = sctp_id2assoc(sk, params.spp_assoc_id);
+ if (!asoc && params.spp_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ pr_debug("%s: failed no association\n", __func__);
+ return -EINVAL;
+ }
+
+ if (trans) {
+ /* Fetch transport values. */
+ params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval);
+ params.spp_pathmtu = trans->pathmtu;
+ params.spp_pathmaxrxt = trans->pathmaxrxt;
+ params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay);
+
+ /*draft-11 doesn't say what to return in spp_flags*/
+ params.spp_flags = trans->param_flags;
+ if (trans->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
+ params.spp_ipv6_flowlabel = trans->flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ params.spp_flags |= SPP_IPV6_FLOWLABEL;
+ }
+ if (trans->dscp & SCTP_DSCP_SET_MASK) {
+ params.spp_dscp = trans->dscp & SCTP_DSCP_VAL_MASK;
+ params.spp_flags |= SPP_DSCP;
+ }
+ } else if (asoc) {
+ /* Fetch association values. */
+ params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval);
+ params.spp_pathmtu = asoc->pathmtu;
+ params.spp_pathmaxrxt = asoc->pathmaxrxt;
+ params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay);
+
+ /*draft-11 doesn't say what to return in spp_flags*/
+ params.spp_flags = asoc->param_flags;
+ if (asoc->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
+ params.spp_ipv6_flowlabel = asoc->flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ params.spp_flags |= SPP_IPV6_FLOWLABEL;
+ }
+ if (asoc->dscp & SCTP_DSCP_SET_MASK) {
+ params.spp_dscp = asoc->dscp & SCTP_DSCP_VAL_MASK;
+ params.spp_flags |= SPP_DSCP;
+ }
+ } else {
+ /* Fetch socket values. */
+ params.spp_hbinterval = sp->hbinterval;
+ params.spp_pathmtu = sp->pathmtu;
+ params.spp_sackdelay = sp->sackdelay;
+ params.spp_pathmaxrxt = sp->pathmaxrxt;
+
+ /*draft-11 doesn't say what to return in spp_flags*/
+ params.spp_flags = sp->param_flags;
+ if (sp->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
+ params.spp_ipv6_flowlabel = sp->flowlabel &
+ SCTP_FLOWLABEL_VAL_MASK;
+ params.spp_flags |= SPP_IPV6_FLOWLABEL;
+ }
+ if (sp->dscp & SCTP_DSCP_SET_MASK) {
+ params.spp_dscp = sp->dscp & SCTP_DSCP_VAL_MASK;
+ params.spp_flags |= SPP_DSCP;
+ }
+ }
+
+ if (copy_to_user(optval, &params, len))
+ return -EFAULT;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
+ *
+ * This option will effect the way delayed acks are performed. This
+ * option allows you to get or set the delayed ack time, in
+ * milliseconds. It also allows changing the delayed ack frequency.
+ * Changing the frequency to 1 disables the delayed sack algorithm. If
+ * the assoc_id is 0, then this sets or gets the endpoints default
+ * values. If the assoc_id field is non-zero, then the set or get
+ * effects the specified association for the one to many model (the
+ * assoc_id field is ignored by the one to one model). Note that if
+ * sack_delay or sack_freq are 0 when setting this option, then the
+ * current values will remain unchanged.
+ *
+ * struct sctp_sack_info {
+ * sctp_assoc_t sack_assoc_id;
+ * uint32_t sack_delay;
+ * uint32_t sack_freq;
+ * };
+ *
+ * sack_assoc_id - This parameter, indicates which association the user
+ * is performing an action upon. Note that if this field's value is
+ * zero then the endpoints default value is changed (effecting future
+ * associations only).
+ *
+ * sack_delay - This parameter contains the number of milliseconds that
+ * the user is requesting the delayed ACK timer be set to. Note that
+ * this value is defined in the standard to be between 200 and 500
+ * milliseconds.
+ *
+ * sack_freq - This parameter contains the number of packets that must
+ * be received before a sack is sent without waiting for the delay
+ * timer to expire. The default value for this is 2, setting this
+ * value to 1 will disable the delayed sack algorithm.
+ */
+static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_sack_info params;
+ struct sctp_association *asoc = NULL;
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ if (len >= sizeof(struct sctp_sack_info)) {
+ len = sizeof(struct sctp_sack_info);
+
+ if (copy_from_user(&params, optval, len))
+ return -EFAULT;
+ } else if (len == sizeof(struct sctp_assoc_value)) {
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
+ "Use struct sctp_sack_info instead\n",
+ current->comm, task_pid_nr(current));
+ if (copy_from_user(&params, optval, len))
+ return -EFAULT;
+ } else
+ return -EINVAL;
+
+ /* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
+ */
+ asoc = sctp_id2assoc(sk, params.sack_assoc_id);
+ if (!asoc && params.sack_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
+ /* Fetch association values. */
+ if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
+ params.sack_delay = jiffies_to_msecs(asoc->sackdelay);
+ params.sack_freq = asoc->sackfreq;
+
+ } else {
+ params.sack_delay = 0;
+ params.sack_freq = 1;
+ }
+ } else {
+ /* Fetch socket values. */
+ if (sp->param_flags & SPP_SACKDELAY_ENABLE) {
+ params.sack_delay = sp->sackdelay;
+ params.sack_freq = sp->sackfreq;
+ } else {
+ params.sack_delay = 0;
+ params.sack_freq = 1;
+ }
+ }
+
+ if (copy_to_user(optval, &params, len))
+ return -EFAULT;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* 7.1.3 Initialization Parameters (SCTP_INITMSG)
+ *
+ * Applications can specify protocol parameters for the default association
+ * initialization. The option name argument to setsockopt() and getsockopt()
+ * is SCTP_INITMSG.
+ *
+ * Setting initialization parameters is effective only on an unconnected
+ * socket (for UDP-style sockets only future associations are effected
+ * by the change). With TCP-style sockets, this option is inherited by
+ * sockets derived from a listener socket.
+ */
+static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
+{
+ if (len < sizeof(struct sctp_initmsg))
+ return -EINVAL;
+ len = sizeof(struct sctp_initmsg);
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
+ return -EFAULT;
+ return 0;
+}
+
+
+static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_association *asoc;
+ int cnt = 0;
+ struct sctp_getaddrs getaddrs;
+ struct sctp_transport *from;
+ void __user *to;
+ union sctp_addr temp;
+ struct sctp_sock *sp = sctp_sk(sk);
+ int addrlen;
+ size_t space_left;
+ int bytes_copied;
+
+ if (len < sizeof(struct sctp_getaddrs))
+ return -EINVAL;
+
+ if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
+ return -EFAULT;
+
+ /* For UDP-style sockets, id specifies the association to query. */
+ asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
+ if (!asoc)
+ return -EINVAL;
+
+ to = optval + offsetof(struct sctp_getaddrs, addrs);
+ space_left = len - offsetof(struct sctp_getaddrs, addrs);
+
+ list_for_each_entry(from, &asoc->peer.transport_addr_list,
+ transports) {
+ memcpy(&temp, &from->ipaddr, sizeof(temp));
+ addrlen = sctp_get_pf_specific(sk->sk_family)
+ ->addr_to_user(sp, &temp);
+ if (space_left < addrlen)
+ return -ENOMEM;
+ if (copy_to_user(to, &temp, addrlen))
+ return -EFAULT;
+ to += addrlen;
+ cnt++;
+ space_left -= addrlen;
+ }
+
+ if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
+ return -EFAULT;
+ bytes_copied = ((char __user *)to) - optval;
+ if (put_user(bytes_copied, optlen))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
+ size_t space_left, int *bytes_copied)
+{
+ struct sctp_sockaddr_entry *addr;
+ union sctp_addr temp;
+ int cnt = 0;
+ int addrlen;
+ struct net *net = sock_net(sk);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
+ if (!addr->valid)
+ continue;
+
+ if ((PF_INET == sk->sk_family) &&
+ (AF_INET6 == addr->a.sa.sa_family))
+ continue;
+ if ((PF_INET6 == sk->sk_family) &&
+ inet_v6_ipv6only(sk) &&
+ (AF_INET == addr->a.sa.sa_family))
+ continue;
+ memcpy(&temp, &addr->a, sizeof(temp));
+ if (!temp.v4.sin_port)
+ temp.v4.sin_port = htons(port);
+
+ addrlen = sctp_get_pf_specific(sk->sk_family)
+ ->addr_to_user(sctp_sk(sk), &temp);
+
+ if (space_left < addrlen) {
+ cnt = -ENOMEM;
+ break;
+ }
+ memcpy(to, &temp, addrlen);
+
+ to += addrlen;
+ cnt++;
+ space_left -= addrlen;
+ *bytes_copied += addrlen;
+ }
+ rcu_read_unlock();
+
+ return cnt;
+}
+
+
+static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_bind_addr *bp;
+ struct sctp_association *asoc;
+ int cnt = 0;
+ struct sctp_getaddrs getaddrs;
+ struct sctp_sockaddr_entry *addr;
+ void __user *to;
+ union sctp_addr temp;
+ struct sctp_sock *sp = sctp_sk(sk);
+ int addrlen;
+ int err = 0;
+ size_t space_left;
+ int bytes_copied = 0;
+ void *addrs;
+ void *buf;
+
+ if (len < sizeof(struct sctp_getaddrs))
+ return -EINVAL;
+
+ if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
+ return -EFAULT;
+
+ /*
+ * For UDP-style sockets, id specifies the association to query.
+ * If the id field is set to the value '0' then the locally bound
+ * addresses are returned without regard to any particular
+ * association.
+ */
+ if (0 == getaddrs.assoc_id) {
+ bp = &sctp_sk(sk)->ep->base.bind_addr;
+ } else {
+ asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
+ if (!asoc)
+ return -EINVAL;
+ bp = &asoc->base.bind_addr;
+ }
+
+ to = optval + offsetof(struct sctp_getaddrs, addrs);
+ space_left = len - offsetof(struct sctp_getaddrs, addrs);
+
+ addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN);
+ if (!addrs)
+ return -ENOMEM;
+
+ /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
+ * addresses from the global local address list.
+ */
+ if (sctp_list_single_entry(&bp->address_list)) {
+ addr = list_entry(bp->address_list.next,
+ struct sctp_sockaddr_entry, list);
+ if (sctp_is_any(sk, &addr->a)) {
+ cnt = sctp_copy_laddrs(sk, bp->port, addrs,
+ space_left, &bytes_copied);
+ if (cnt < 0) {
+ err = cnt;
+ goto out;
+ }
+ goto copy_getaddrs;
+ }
+ }
+
+ buf = addrs;
+ /* Protection on the bound address list is not needed since
+ * in the socket option context we hold a socket lock and
+ * thus the bound address list can't change.
+ */
+ list_for_each_entry(addr, &bp->address_list, list) {
+ memcpy(&temp, &addr->a, sizeof(temp));
+ addrlen = sctp_get_pf_specific(sk->sk_family)
+ ->addr_to_user(sp, &temp);
+ if (space_left < addrlen) {
+ err = -ENOMEM; /*fixme: right error?*/
+ goto out;
+ }
+ memcpy(buf, &temp, addrlen);
+ buf += addrlen;
+ bytes_copied += addrlen;
+ cnt++;
+ space_left -= addrlen;
+ }
+
+copy_getaddrs:
+ if (copy_to_user(to, addrs, bytes_copied)) {
+ err = -EFAULT;
+ goto out;
+ }
+ if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
+ err = -EFAULT;
+ goto out;
+ }
+ /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
+ * but we can't change it anymore.
+ */
+ if (put_user(bytes_copied, optlen))
+ err = -EFAULT;
+out:
+ kfree(addrs);
+ return err;
+}
+
+/* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
+ *
+ * Requests that the local SCTP stack use the enclosed peer address as
+ * the association primary. The enclosed address must be one of the
+ * association peer's addresses.
+ */
+static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_prim prim;
+ struct sctp_association *asoc;
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ if (len < sizeof(struct sctp_prim))
+ return -EINVAL;
+
+ len = sizeof(struct sctp_prim);
+
+ if (copy_from_user(&prim, optval, len))
+ return -EFAULT;
+
+ asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
+ if (!asoc)
+ return -EINVAL;
+
+ if (!asoc->peer.primary_path)
+ return -ENOTCONN;
+
+ memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
+ asoc->peer.primary_path->af_specific->sockaddr_len);
+
+ sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp,
+ (union sctp_addr *)&prim.ssp_addr);
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &prim, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
+ *
+ * Requests that the local endpoint set the specified Adaptation Layer
+ * Indication parameter for all future INIT and INIT-ACK exchanges.
+ */
+static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_setadaptation adaptation;
+
+ if (len < sizeof(struct sctp_setadaptation))
+ return -EINVAL;
+
+ len = sizeof(struct sctp_setadaptation);
+
+ adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &adaptation, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ *
+ * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
+ *
+ * Applications that wish to use the sendto() system call may wish to
+ * specify a default set of parameters that would normally be supplied
+ * through the inclusion of ancillary data. This socket option allows
+ * such an application to set the default sctp_sndrcvinfo structure.
+
+
+ * The application that wishes to use this socket option simply passes
+ * in to this call the sctp_sndrcvinfo structure defined in Section
+ * 5.2.2) The input parameters accepted by this call include
+ * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
+ * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
+ * to this call if the caller is using the UDP model.
+ *
+ * For getsockopt, it get the default sctp_sndrcvinfo structure.
+ */
+static int sctp_getsockopt_default_send_param(struct sock *sk,
+ int len, char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+ struct sctp_sndrcvinfo info;
+
+ if (len < sizeof(info))
+ return -EINVAL;
+
+ len = sizeof(info);
+
+ if (copy_from_user(&info, optval, len))
+ return -EFAULT;
+
+ asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
+ if (!asoc && info.sinfo_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
+ info.sinfo_stream = asoc->default_stream;
+ info.sinfo_flags = asoc->default_flags;
+ info.sinfo_ppid = asoc->default_ppid;
+ info.sinfo_context = asoc->default_context;
+ info.sinfo_timetolive = asoc->default_timetolive;
+ } else {
+ info.sinfo_stream = sp->default_stream;
+ info.sinfo_flags = sp->default_flags;
+ info.sinfo_ppid = sp->default_ppid;
+ info.sinfo_context = sp->default_context;
+ info.sinfo_timetolive = sp->default_timetolive;
+ }
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &info, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* RFC6458, Section 8.1.31. Set/get Default Send Parameters
+ * (SCTP_DEFAULT_SNDINFO)
+ */
+static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+ struct sctp_sndinfo info;
+
+ if (len < sizeof(info))
+ return -EINVAL;
+
+ len = sizeof(info);
+
+ if (copy_from_user(&info, optval, len))
+ return -EFAULT;
+
+ asoc = sctp_id2assoc(sk, info.snd_assoc_id);
+ if (!asoc && info.snd_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
+ info.snd_sid = asoc->default_stream;
+ info.snd_flags = asoc->default_flags;
+ info.snd_ppid = asoc->default_ppid;
+ info.snd_context = asoc->default_context;
+ } else {
+ info.snd_sid = sp->default_stream;
+ info.snd_flags = sp->default_flags;
+ info.snd_ppid = sp->default_ppid;
+ info.snd_context = sp->default_context;
+ }
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &info, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ *
+ * 7.1.5 SCTP_NODELAY
+ *
+ * Turn on/off any Nagle-like algorithm. This means that packets are
+ * generally sent as soon as possible and no unnecessary delays are
+ * introduced, at the cost of more packets in the network. Expects an
+ * integer boolean flag.
+ */
+
+static int sctp_getsockopt_nodelay(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ int val;
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+ val = (sctp_sk(sk)->nodelay == 1);
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ *
+ * 7.1.1 SCTP_RTOINFO
+ *
+ * The protocol parameters used to initialize and bound retransmission
+ * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
+ * and modify these parameters.
+ * All parameters are time values, in milliseconds. A value of 0, when
+ * modifying the parameters, indicates that the current value should not
+ * be changed.
+ *
+ */
+static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen) {
+ struct sctp_rtoinfo rtoinfo;
+ struct sctp_association *asoc;
+
+ if (len < sizeof (struct sctp_rtoinfo))
+ return -EINVAL;
+
+ len = sizeof(struct sctp_rtoinfo);
+
+ if (copy_from_user(&rtoinfo, optval, len))
+ return -EFAULT;
+
+ asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
+
+ if (!asoc && rtoinfo.srto_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ /* Values corresponding to the specific association. */
+ if (asoc) {
+ rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial);
+ rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max);
+ rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min);
+ } else {
+ /* Values corresponding to the endpoint. */
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ rtoinfo.srto_initial = sp->rtoinfo.srto_initial;
+ rtoinfo.srto_max = sp->rtoinfo.srto_max;
+ rtoinfo.srto_min = sp->rtoinfo.srto_min;
+ }
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ if (copy_to_user(optval, &rtoinfo, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ *
+ * 7.1.2 SCTP_ASSOCINFO
+ *
+ * This option is used to tune the maximum retransmission attempts
+ * of the association.
+ * Returns an error if the new association retransmission value is
+ * greater than the sum of the retransmission value of the peer.
+ * See [SCTP] for more information.
+ *
+ */
+static int sctp_getsockopt_associnfo(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+
+ struct sctp_assocparams assocparams;
+ struct sctp_association *asoc;
+ struct list_head *pos;
+ int cnt = 0;
+
+ if (len < sizeof (struct sctp_assocparams))
+ return -EINVAL;
+
+ len = sizeof(struct sctp_assocparams);
+
+ if (copy_from_user(&assocparams, optval, len))
+ return -EFAULT;
+
+ asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
+
+ if (!asoc && assocparams.sasoc_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ /* Values correspoinding to the specific association */
+ if (asoc) {
+ assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
+ assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
+ assocparams.sasoc_local_rwnd = asoc->a_rwnd;
+ assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life);
+
+ list_for_each(pos, &asoc->peer.transport_addr_list) {
+ cnt++;
+ }
+
+ assocparams.sasoc_number_peer_destinations = cnt;
+ } else {
+ /* Values corresponding to the endpoint */
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt;
+ assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd;
+ assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd;
+ assocparams.sasoc_cookie_life =
+ sp->assocparams.sasoc_cookie_life;
+ assocparams.sasoc_number_peer_destinations =
+ sp->assocparams.
+ sasoc_number_peer_destinations;
+ }
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ if (copy_to_user(optval, &assocparams, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
+ *
+ * This socket option is a boolean flag which turns on or off mapped V4
+ * addresses. If this option is turned on and the socket is type
+ * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
+ * If this option is turned off, then no mapping will be done of V4
+ * addresses and a user will receive both PF_INET6 and PF_INET type
+ * addresses on the socket.
+ */
+static int sctp_getsockopt_mappedv4(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ int val;
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+ val = sp->v4mapped;
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
+ * (chapter and verse is quoted at sctp_setsockopt_context())
+ */
+static int sctp_getsockopt_context(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+
+ if (len < sizeof(struct sctp_assoc_value))
+ return -EINVAL;
+
+ len = sizeof(struct sctp_assoc_value);
+
+ if (copy_from_user(&params, optval, len))
+ return -EFAULT;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ params.assoc_value = asoc ? asoc->default_rcv_context
+ : sctp_sk(sk)->default_rcv_context;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &params, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
+ * This option will get or set the maximum size to put in any outgoing
+ * SCTP DATA chunk. If a message is larger than this size it will be
+ * fragmented by SCTP into the specified size. Note that the underlying
+ * SCTP implementation may fragment into smaller sized chunks when the
+ * PMTU of the underlying association is smaller than the value set by
+ * the user. The default value for this option is '0' which indicates
+ * the user is NOT limiting fragmentation and only the PMTU will effect
+ * SCTP's choice of DATA chunk size. Note also that values set larger
+ * than the maximum size of an IP datagram will effectively let SCTP
+ * control fragmentation (i.e. the same as setting this option to 0).
+ *
+ * The following structure is used to access and modify this parameter:
+ *
+ * struct sctp_assoc_value {
+ * sctp_assoc_t assoc_id;
+ * uint32_t assoc_value;
+ * };
+ *
+ * assoc_id: This parameter is ignored for one-to-one style sockets.
+ * For one-to-many style sockets this parameter indicates which
+ * association the user is performing an action upon. Note that if
+ * this field's value is zero then the endpoints default value is
+ * changed (effecting future associations only).
+ * assoc_value: This parameter specifies the maximum size in bytes.
+ */
+static int sctp_getsockopt_maxseg(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+
+ if (len == sizeof(int)) {
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of int in maxseg socket option.\n"
+ "Use struct sctp_assoc_value instead\n",
+ current->comm, task_pid_nr(current));
+ params.assoc_id = SCTP_FUTURE_ASSOC;
+ } else if (len >= sizeof(struct sctp_assoc_value)) {
+ len = sizeof(struct sctp_assoc_value);
+ if (copy_from_user(&params, optval, len))
+ return -EFAULT;
+ } else
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc)
+ params.assoc_value = asoc->frag_point;
+ else
+ params.assoc_value = sctp_sk(sk)->user_frag;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (len == sizeof(int)) {
+ if (copy_to_user(optval, &params.assoc_value, len))
+ return -EFAULT;
+ } else {
+ if (copy_to_user(optval, &params, len))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
+ * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
+ */
+static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ int val;
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+
+ val = sctp_sk(sk)->frag_interleave;
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * 7.1.25. Set or Get the sctp partial delivery point
+ * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
+ */
+static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ u32 val;
+
+ if (len < sizeof(u32))
+ return -EINVAL;
+
+ len = sizeof(u32);
+
+ val = sctp_sk(sk)->pd_point;
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
+ * (chapter and verse is quoted at sctp_setsockopt_maxburst())
+ */
+static int sctp_getsockopt_maxburst(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+
+ if (len == sizeof(int)) {
+ pr_warn_ratelimited(DEPRECATED
+ "%s (pid %d) "
+ "Use of int in max_burst socket option.\n"
+ "Use struct sctp_assoc_value instead\n",
+ current->comm, task_pid_nr(current));
+ params.assoc_id = SCTP_FUTURE_ASSOC;
+ } else if (len >= sizeof(struct sctp_assoc_value)) {
+ len = sizeof(struct sctp_assoc_value);
+ if (copy_from_user(&params, optval, len))
+ return -EFAULT;
+ } else
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ params.assoc_value = asoc ? asoc->max_burst : sctp_sk(sk)->max_burst;
+
+ if (len == sizeof(int)) {
+ if (copy_to_user(optval, &params.assoc_value, len))
+ return -EFAULT;
+ } else {
+ if (copy_to_user(optval, &params, len))
+ return -EFAULT;
+ }
+
+ return 0;
+
+}
+
+static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_hmacalgo __user *p = (void __user *)optval;
+ struct sctp_hmac_algo_param *hmacs;
+ __u16 data_len = 0;
+ u32 num_idents;
+ int i;
+
+ if (!ep->auth_enable)
+ return -EACCES;
+
+ hmacs = ep->auth_hmacs_list;
+ data_len = ntohs(hmacs->param_hdr.length) -
+ sizeof(struct sctp_paramhdr);
+
+ if (len < sizeof(struct sctp_hmacalgo) + data_len)
+ return -EINVAL;
+
+ len = sizeof(struct sctp_hmacalgo) + data_len;
+ num_idents = data_len / sizeof(u16);
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (put_user(num_idents, &p->shmac_num_idents))
+ return -EFAULT;
+ for (i = 0; i < num_idents; i++) {
+ __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
+
+ if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int sctp_getsockopt_active_key(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_authkeyid val;
+ struct sctp_association *asoc;
+
+ if (len < sizeof(struct sctp_authkeyid))
+ return -EINVAL;
+
+ len = sizeof(struct sctp_authkeyid);
+ if (copy_from_user(&val, optval, len))
+ return -EFAULT;
+
+ asoc = sctp_id2assoc(sk, val.scact_assoc_id);
+ if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
+ if (!asoc->peer.auth_capable)
+ return -EACCES;
+ val.scact_keynumber = asoc->active_key_id;
+ } else {
+ if (!ep->auth_enable)
+ return -EACCES;
+ val.scact_keynumber = ep->active_key_id;
+ }
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_authchunks __user *p = (void __user *)optval;
+ struct sctp_authchunks val;
+ struct sctp_association *asoc;
+ struct sctp_chunks_param *ch;
+ u32 num_chunks = 0;
+ char __user *to;
+
+ if (len < sizeof(struct sctp_authchunks))
+ return -EINVAL;
+
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+
+ to = p->gauth_chunks;
+ asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
+ if (!asoc)
+ return -EINVAL;
+
+ if (!asoc->peer.auth_capable)
+ return -EACCES;
+
+ ch = asoc->peer.peer_chunks;
+ if (!ch)
+ goto num;
+
+ /* See if the user provided enough room for all the data */
+ num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
+ if (len < num_chunks)
+ return -EINVAL;
+
+ if (copy_to_user(to, ch->chunks, num_chunks))
+ return -EFAULT;
+num:
+ len = sizeof(struct sctp_authchunks) + num_chunks;
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (put_user(num_chunks, &p->gauth_number_of_chunks))
+ return -EFAULT;
+ return 0;
+}
+
+static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_authchunks __user *p = (void __user *)optval;
+ struct sctp_authchunks val;
+ struct sctp_association *asoc;
+ struct sctp_chunks_param *ch;
+ u32 num_chunks = 0;
+ char __user *to;
+
+ if (len < sizeof(struct sctp_authchunks))
+ return -EINVAL;
+
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+
+ to = p->gauth_chunks;
+ asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
+ if (!asoc && val.gauth_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
+ if (!asoc->peer.auth_capable)
+ return -EACCES;
+ ch = (struct sctp_chunks_param *)asoc->c.auth_chunks;
+ } else {
+ if (!ep->auth_enable)
+ return -EACCES;
+ ch = ep->auth_chunk_list;
+ }
+ if (!ch)
+ goto num;
+
+ num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
+ if (len < sizeof(struct sctp_authchunks) + num_chunks)
+ return -EINVAL;
+
+ if (copy_to_user(to, ch->chunks, num_chunks))
+ return -EFAULT;
+num:
+ len = sizeof(struct sctp_authchunks) + num_chunks;
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (put_user(num_chunks, &p->gauth_number_of_chunks))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER)
+ * This option gets the current number of associations that are attached
+ * to a one-to-many style socket. The option value is an uint32_t.
+ */
+static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+ u32 val = 0;
+
+ if (sctp_style(sk, TCP))
+ return -EOPNOTSUPP;
+
+ if (len < sizeof(u32))
+ return -EINVAL;
+
+ len = sizeof(u32);
+
+ list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
+ val++;
+ }
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * 8.1.23 SCTP_AUTO_ASCONF
+ * See the corresponding setsockopt entry as description
+ */
+static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ int val = 0;
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+ if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
+ val = 1;
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * 8.2.6. Get the Current Identifiers of Associations
+ * (SCTP_GET_ASSOC_ID_LIST)
+ *
+ * This option gets the current list of SCTP association identifiers of
+ * the SCTP associations handled by a one-to-many style socket.
+ */
+static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_association *asoc;
+ struct sctp_assoc_ids *ids;
+ u32 num = 0;
+
+ if (sctp_style(sk, TCP))
+ return -EOPNOTSUPP;
+
+ if (len < sizeof(struct sctp_assoc_ids))
+ return -EINVAL;
+
+ list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
+ num++;
+ }
+
+ if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
+ return -EINVAL;
+
+ len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
+
+ ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
+ if (unlikely(!ids))
+ return -ENOMEM;
+
+ ids->gaids_number_of_ids = num;
+ num = 0;
+ list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
+ ids->gaids_assoc_id[num++] = asoc->assoc_id;
+ }
+
+ if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
+ kfree(ids);
+ return -EFAULT;
+ }
+
+ kfree(ids);
+ return 0;
+}
+
+/*
+ * SCTP_PEER_ADDR_THLDS
+ *
+ * This option allows us to fetch the partially failed threshold for one or all
+ * transports in an association. See Section 6.1 of:
+ * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
+ */
+static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
+ char __user *optval, int len,
+ int __user *optlen, bool v2)
+{
+ struct sctp_paddrthlds_v2 val;
+ struct sctp_transport *trans;
+ struct sctp_association *asoc;
+ int min;
+
+ min = v2 ? sizeof(val) : sizeof(struct sctp_paddrthlds);
+ if (len < min)
+ return -EINVAL;
+ len = min;
+ if (copy_from_user(&val, optval, len))
+ return -EFAULT;
+
+ if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
+ trans = sctp_addr_id2transport(sk, &val.spt_address,
+ val.spt_assoc_id);
+ if (!trans)
+ return -ENOENT;
+
+ val.spt_pathmaxrxt = trans->pathmaxrxt;
+ val.spt_pathpfthld = trans->pf_retrans;
+ val.spt_pathcpthld = trans->ps_retrans;
+
+ goto out;
+ }
+
+ asoc = sctp_id2assoc(sk, val.spt_assoc_id);
+ if (!asoc && val.spt_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ if (asoc) {
+ val.spt_pathpfthld = asoc->pf_retrans;
+ val.spt_pathmaxrxt = asoc->pathmaxrxt;
+ val.spt_pathcpthld = asoc->ps_retrans;
+ } else {
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ val.spt_pathpfthld = sp->pf_retrans;
+ val.spt_pathmaxrxt = sp->pathmaxrxt;
+ val.spt_pathcpthld = sp->ps_retrans;
+ }
+
+out:
+ if (put_user(len, optlen) || copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * SCTP_GET_ASSOC_STATS
+ *
+ * This option retrieves local per endpoint statistics. It is modeled
+ * after OpenSolaris' implementation
+ */
+static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_stats sas;
+ struct sctp_association *asoc = NULL;
+
+ /* User must provide at least the assoc id */
+ if (len < sizeof(sctp_assoc_t))
+ return -EINVAL;
+
+ /* Allow the struct to grow and fill in as much as possible */
+ len = min_t(size_t, len, sizeof(sas));
+
+ if (copy_from_user(&sas, optval, len))
+ return -EFAULT;
+
+ asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
+ if (!asoc)
+ return -EINVAL;
+
+ sas.sas_rtxchunks = asoc->stats.rtxchunks;
+ sas.sas_gapcnt = asoc->stats.gapcnt;
+ sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
+ sas.sas_osacks = asoc->stats.osacks;
+ sas.sas_isacks = asoc->stats.isacks;
+ sas.sas_octrlchunks = asoc->stats.octrlchunks;
+ sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
+ sas.sas_oodchunks = asoc->stats.oodchunks;
+ sas.sas_iodchunks = asoc->stats.iodchunks;
+ sas.sas_ouodchunks = asoc->stats.ouodchunks;
+ sas.sas_iuodchunks = asoc->stats.iuodchunks;
+ sas.sas_idupchunks = asoc->stats.idupchunks;
+ sas.sas_opackets = asoc->stats.opackets;
+ sas.sas_ipackets = asoc->stats.ipackets;
+
+ /* New high max rto observed, will return 0 if not a single
+ * RTO update took place. obs_rto_ipaddr will be bogus
+ * in such a case
+ */
+ sas.sas_maxrto = asoc->stats.max_obs_rto;
+ memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
+ sizeof(struct sockaddr_storage));
+
+ /* Mark beginning of a new observation period */
+ asoc->stats.max_obs_rto = asoc->rto_min;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id);
+
+ if (copy_to_user(optval, &sas, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ int val = 0;
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+ if (sctp_sk(sk)->recvrcvinfo)
+ val = 1;
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ int val = 0;
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+ if (sctp_sk(sk)->recvnxtinfo)
+ val = 1;
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sctp_getsockopt_pr_supported(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ params.assoc_value = asoc ? asoc->peer.prsctp_capable
+ : sctp_sk(sk)->ep->prsctp_enable;
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_default_prinfo(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_default_prinfo info;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(info)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(info);
+ if (copy_from_user(&info, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, info.pr_assoc_id);
+ if (!asoc && info.pr_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (asoc) {
+ info.pr_policy = SCTP_PR_POLICY(asoc->default_flags);
+ info.pr_value = asoc->default_timetolive;
+ } else {
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ info.pr_policy = SCTP_PR_POLICY(sp->default_flags);
+ info.pr_value = sp->default_timetolive;
+ }
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &info, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_prstatus params;
+ struct sctp_association *asoc;
+ int policy;
+ int retval = -EINVAL;
+
+ if (len < sizeof(params))
+ goto out;
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ policy = params.sprstat_policy;
+ if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)) ||
+ ((policy & SCTP_PR_SCTP_ALL) && (policy & SCTP_PR_SCTP_MASK)))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
+ if (!asoc)
+ goto out;
+
+ if (policy == SCTP_PR_SCTP_ALL) {
+ params.sprstat_abandoned_unsent = 0;
+ params.sprstat_abandoned_sent = 0;
+ for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
+ params.sprstat_abandoned_unsent +=
+ asoc->abandoned_unsent[policy];
+ params.sprstat_abandoned_sent +=
+ asoc->abandoned_sent[policy];
+ }
+ } else {
+ params.sprstat_abandoned_unsent =
+ asoc->abandoned_unsent[__SCTP_PR_INDEX(policy)];
+ params.sprstat_abandoned_sent =
+ asoc->abandoned_sent[__SCTP_PR_INDEX(policy)];
+ }
+
+ if (put_user(len, optlen)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (copy_to_user(optval, &params, len)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_stream_out_ext *streamoute;
+ struct sctp_association *asoc;
+ struct sctp_prstatus params;
+ int retval = -EINVAL;
+ int policy;
+
+ if (len < sizeof(params))
+ goto out;
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ policy = params.sprstat_policy;
+ if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)) ||
+ ((policy & SCTP_PR_SCTP_ALL) && (policy & SCTP_PR_SCTP_MASK)))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
+ if (!asoc || params.sprstat_sid >= asoc->stream.outcnt)
+ goto out;
+
+ streamoute = SCTP_SO(&asoc->stream, params.sprstat_sid)->ext;
+ if (!streamoute) {
+ /* Not allocated yet, means all stats are 0 */
+ params.sprstat_abandoned_unsent = 0;
+ params.sprstat_abandoned_sent = 0;
+ retval = 0;
+ goto out;
+ }
+
+ if (policy == SCTP_PR_SCTP_ALL) {
+ params.sprstat_abandoned_unsent = 0;
+ params.sprstat_abandoned_sent = 0;
+ for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
+ params.sprstat_abandoned_unsent +=
+ streamoute->abandoned_unsent[policy];
+ params.sprstat_abandoned_sent +=
+ streamoute->abandoned_sent[policy];
+ }
+ } else {
+ params.sprstat_abandoned_unsent =
+ streamoute->abandoned_unsent[__SCTP_PR_INDEX(policy)];
+ params.sprstat_abandoned_sent =
+ streamoute->abandoned_sent[__SCTP_PR_INDEX(policy)];
+ }
+
+ if (put_user(len, optlen) || copy_to_user(optval, &params, len)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ params.assoc_value = asoc ? asoc->peer.reconf_capable
+ : sctp_sk(sk)->ep->reconf_enable;
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ params.assoc_value = asoc ? asoc->strreset_enable
+ : sctp_sk(sk)->ep->strreset_enable;
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_scheduler(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ params.assoc_value = asoc ? sctp_sched_get_sched(asoc)
+ : sctp_sk(sk)->default_ss;
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_scheduler_value(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_stream_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ retval = sctp_sched_get_value(asoc, params.stream_id,
+ &params.stream_value);
+ if (retval)
+ goto out;
+
+ if (put_user(len, optlen)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (copy_to_user(optval, &params, len)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_interleaving_supported(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ params.assoc_value = asoc ? asoc->peer.intl_capable
+ : sctp_sk(sk)->ep->intl_enable;
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_reuse_port(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ int val;
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+ val = sctp_sk(sk)->reuse;
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sctp_getsockopt_event(struct sock *sk, int len, char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_association *asoc;
+ struct sctp_event param;
+ __u16 subscribe;
+
+ if (len < sizeof(param))
+ return -EINVAL;
+
+ len = sizeof(param);
+ if (copy_from_user(&param, optval, len))
+ return -EFAULT;
+
+ if (param.se_type < SCTP_SN_TYPE_BASE ||
+ param.se_type > SCTP_SN_TYPE_MAX)
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, param.se_assoc_id);
+ if (!asoc && param.se_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP))
+ return -EINVAL;
+
+ subscribe = asoc ? asoc->subscribe : sctp_sk(sk)->subscribe;
+ param.se_on = sctp_ulpevent_type_enabled(subscribe, param.se_type);
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ if (copy_to_user(optval, &param, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sctp_getsockopt_asconf_supported(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ params.assoc_value = asoc ? asoc->peer.asconf_capable
+ : sctp_sk(sk)->ep->asconf_enable;
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_auth_supported(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ params.assoc_value = asoc ? asoc->peer.auth_capable
+ : sctp_sk(sk)->ep->auth_enable;
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_ecn_supported(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ params.assoc_value = asoc ? asoc->peer.ecn_capable
+ : sctp_sk(sk)->ep->ecn_enable;
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_pf_expose(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ params.assoc_value = asoc ? asoc->pf_expose
+ : sctp_sk(sk)->pf_expose;
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_getsockopt_encap_port(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_association *asoc;
+ struct sctp_udpencaps encap;
+ struct sctp_transport *t;
+ __be16 encap_port;
+
+ if (len < sizeof(encap))
+ return -EINVAL;
+
+ len = sizeof(encap);
+ if (copy_from_user(&encap, optval, len))
+ return -EFAULT;
+
+ /* If an address other than INADDR_ANY is specified, and
+ * no transport is found, then the request is invalid.
+ */
+ if (!sctp_is_any(sk, (union sctp_addr *)&encap.sue_address)) {
+ t = sctp_addr_id2transport(sk, &encap.sue_address,
+ encap.sue_assoc_id);
+ if (!t) {
+ pr_debug("%s: failed no transport\n", __func__);
+ return -EINVAL;
+ }
+
+ encap_port = t->encap_port;
+ goto out;
+ }
+
+ /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
+ */
+ asoc = sctp_id2assoc(sk, encap.sue_assoc_id);
+ if (!asoc && encap.sue_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ pr_debug("%s: failed no association\n", __func__);
+ return -EINVAL;
+ }
+
+ if (asoc) {
+ encap_port = asoc->encap_port;
+ goto out;
+ }
+
+ encap_port = sctp_sk(sk)->encap_port;
+
+out:
+ encap.sue_port = (__force uint16_t)encap_port;
+ if (copy_to_user(optval, &encap, len))
+ return -EFAULT;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sctp_getsockopt_probe_interval(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_probeinterval params;
+ struct sctp_association *asoc;
+ struct sctp_transport *t;
+ __u32 probe_interval;
+
+ if (len < sizeof(params))
+ return -EINVAL;
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ return -EFAULT;
+
+ /* If an address other than INADDR_ANY is specified, and
+ * no transport is found, then the request is invalid.
+ */
+ if (!sctp_is_any(sk, (union sctp_addr *)&params.spi_address)) {
+ t = sctp_addr_id2transport(sk, &params.spi_address,
+ params.spi_assoc_id);
+ if (!t) {
+ pr_debug("%s: failed no transport\n", __func__);
+ return -EINVAL;
+ }
+
+ probe_interval = jiffies_to_msecs(t->probe_interval);
+ goto out;
+ }
+
+ /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+ * socket is a one to many style socket, and an association
+ * was not found, then the id was invalid.
+ */
+ asoc = sctp_id2assoc(sk, params.spi_assoc_id);
+ if (!asoc && params.spi_assoc_id != SCTP_FUTURE_ASSOC &&
+ sctp_style(sk, UDP)) {
+ pr_debug("%s: failed no association\n", __func__);
+ return -EINVAL;
+ }
+
+ if (asoc) {
+ probe_interval = jiffies_to_msecs(asoc->probe_interval);
+ goto out;
+ }
+
+ probe_interval = sctp_sk(sk)->probe_interval;
+
+out:
+ params.spi_interval = probe_interval;
+ if (copy_to_user(optval, &params, len))
+ return -EFAULT;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int sctp_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ int retval = 0;
+ int len;
+
+ pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
+
+ /* I can hardly begin to describe how wrong this is. This is
+ * so broken as to be worse than useless. The API draft
+ * REALLY is NOT helpful here... I am not convinced that the
+ * semantics of getsockopt() with a level OTHER THAN SOL_SCTP
+ * are at all well-founded.
+ */
+ if (level != SOL_SCTP) {
+ struct sctp_af *af = sctp_sk(sk)->pf->af;
+
+ retval = af->getsockopt(sk, level, optname, optval, optlen);
+ return retval;
+ }
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ if (len < 0)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case SCTP_STATUS:
+ retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen);
+ break;
+ case SCTP_DISABLE_FRAGMENTS:
+ retval = sctp_getsockopt_disable_fragments(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_EVENTS:
+ retval = sctp_getsockopt_events(sk, len, optval, optlen);
+ break;
+ case SCTP_AUTOCLOSE:
+ retval = sctp_getsockopt_autoclose(sk, len, optval, optlen);
+ break;
+ case SCTP_SOCKOPT_PEELOFF:
+ retval = sctp_getsockopt_peeloff(sk, len, optval, optlen);
+ break;
+ case SCTP_SOCKOPT_PEELOFF_FLAGS:
+ retval = sctp_getsockopt_peeloff_flags(sk, len, optval, optlen);
+ break;
+ case SCTP_PEER_ADDR_PARAMS:
+ retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_DELAYED_SACK:
+ retval = sctp_getsockopt_delayed_ack(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_INITMSG:
+ retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
+ break;
+ case SCTP_GET_PEER_ADDRS:
+ retval = sctp_getsockopt_peer_addrs(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_GET_LOCAL_ADDRS:
+ retval = sctp_getsockopt_local_addrs(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_SOCKOPT_CONNECTX3:
+ retval = sctp_getsockopt_connectx3(sk, len, optval, optlen);
+ break;
+ case SCTP_DEFAULT_SEND_PARAM:
+ retval = sctp_getsockopt_default_send_param(sk, len,
+ optval, optlen);
+ break;
+ case SCTP_DEFAULT_SNDINFO:
+ retval = sctp_getsockopt_default_sndinfo(sk, len,
+ optval, optlen);
+ break;
+ case SCTP_PRIMARY_ADDR:
+ retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen);
+ break;
+ case SCTP_NODELAY:
+ retval = sctp_getsockopt_nodelay(sk, len, optval, optlen);
+ break;
+ case SCTP_RTOINFO:
+ retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen);
+ break;
+ case SCTP_ASSOCINFO:
+ retval = sctp_getsockopt_associnfo(sk, len, optval, optlen);
+ break;
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen);
+ break;
+ case SCTP_MAXSEG:
+ retval = sctp_getsockopt_maxseg(sk, len, optval, optlen);
+ break;
+ case SCTP_GET_PEER_ADDR_INFO:
+ retval = sctp_getsockopt_peer_addr_info(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_ADAPTATION_LAYER:
+ retval = sctp_getsockopt_adaptation_layer(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_CONTEXT:
+ retval = sctp_getsockopt_context(sk, len, optval, optlen);
+ break;
+ case SCTP_FRAGMENT_INTERLEAVE:
+ retval = sctp_getsockopt_fragment_interleave(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_PARTIAL_DELIVERY_POINT:
+ retval = sctp_getsockopt_partial_delivery_point(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_MAX_BURST:
+ retval = sctp_getsockopt_maxburst(sk, len, optval, optlen);
+ break;
+ case SCTP_AUTH_KEY:
+ case SCTP_AUTH_CHUNK:
+ case SCTP_AUTH_DELETE_KEY:
+ case SCTP_AUTH_DEACTIVATE_KEY:
+ retval = -EOPNOTSUPP;
+ break;
+ case SCTP_HMAC_IDENT:
+ retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen);
+ break;
+ case SCTP_AUTH_ACTIVE_KEY:
+ retval = sctp_getsockopt_active_key(sk, len, optval, optlen);
+ break;
+ case SCTP_PEER_AUTH_CHUNKS:
+ retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_LOCAL_AUTH_CHUNKS:
+ retval = sctp_getsockopt_local_auth_chunks(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_GET_ASSOC_NUMBER:
+ retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
+ break;
+ case SCTP_GET_ASSOC_ID_LIST:
+ retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
+ break;
+ case SCTP_AUTO_ASCONF:
+ retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
+ break;
+ case SCTP_PEER_ADDR_THLDS:
+ retval = sctp_getsockopt_paddr_thresholds(sk, optval, len,
+ optlen, false);
+ break;
+ case SCTP_PEER_ADDR_THLDS_V2:
+ retval = sctp_getsockopt_paddr_thresholds(sk, optval, len,
+ optlen, true);
+ break;
+ case SCTP_GET_ASSOC_STATS:
+ retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
+ break;
+ case SCTP_RECVRCVINFO:
+ retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen);
+ break;
+ case SCTP_RECVNXTINFO:
+ retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen);
+ break;
+ case SCTP_PR_SUPPORTED:
+ retval = sctp_getsockopt_pr_supported(sk, len, optval, optlen);
+ break;
+ case SCTP_DEFAULT_PRINFO:
+ retval = sctp_getsockopt_default_prinfo(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_PR_ASSOC_STATUS:
+ retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_PR_STREAM_STATUS:
+ retval = sctp_getsockopt_pr_streamstatus(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_RECONFIG_SUPPORTED:
+ retval = sctp_getsockopt_reconfig_supported(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_ENABLE_STREAM_RESET:
+ retval = sctp_getsockopt_enable_strreset(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_STREAM_SCHEDULER:
+ retval = sctp_getsockopt_scheduler(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_STREAM_SCHEDULER_VALUE:
+ retval = sctp_getsockopt_scheduler_value(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_INTERLEAVING_SUPPORTED:
+ retval = sctp_getsockopt_interleaving_supported(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_REUSE_PORT:
+ retval = sctp_getsockopt_reuse_port(sk, len, optval, optlen);
+ break;
+ case SCTP_EVENT:
+ retval = sctp_getsockopt_event(sk, len, optval, optlen);
+ break;
+ case SCTP_ASCONF_SUPPORTED:
+ retval = sctp_getsockopt_asconf_supported(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_AUTH_SUPPORTED:
+ retval = sctp_getsockopt_auth_supported(sk, len, optval,
+ optlen);
+ break;
+ case SCTP_ECN_SUPPORTED:
+ retval = sctp_getsockopt_ecn_supported(sk, len, optval, optlen);
+ break;
+ case SCTP_EXPOSE_POTENTIALLY_FAILED_STATE:
+ retval = sctp_getsockopt_pf_expose(sk, len, optval, optlen);
+ break;
+ case SCTP_REMOTE_UDP_ENCAPS_PORT:
+ retval = sctp_getsockopt_encap_port(sk, len, optval, optlen);
+ break;
+ case SCTP_PLPMTUD_PROBE_INTERVAL:
+ retval = sctp_getsockopt_probe_interval(sk, len, optval, optlen);
+ break;
+ default:
+ retval = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return retval;
+}
+
+static bool sctp_bpf_bypass_getsockopt(int level, int optname)
+{
+ if (level == SOL_SCTP) {
+ switch (optname) {
+ case SCTP_SOCKOPT_PEELOFF:
+ case SCTP_SOCKOPT_PEELOFF_FLAGS:
+ case SCTP_SOCKOPT_CONNECTX3:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ return false;
+}
+
+static int sctp_hash(struct sock *sk)
+{
+ /* STUB */
+ return 0;
+}
+
+static void sctp_unhash(struct sock *sk)
+{
+ /* STUB */
+}
+
+/* Check if port is acceptable. Possibly find first available port.
+ *
+ * The port hash table (contained in the 'global' SCTP protocol storage
+ * returned by struct sctp_protocol *sctp_get_protocol()). The hash
+ * table is an array of 4096 lists (sctp_bind_hashbucket). Each
+ * list (the list number is the port number hashed out, so as you
+ * would expect from a hash function, all the ports in a given list have
+ * such a number that hashes out to the same list number; you were
+ * expecting that, right?); so each list has a set of ports, with a
+ * link to the socket (struct sock) that uses it, the port number and
+ * a fastreuse flag (FIXME: NPI ipg).
+ */
+static struct sctp_bind_bucket *sctp_bucket_create(
+ struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
+
+static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ bool reuse = (sk->sk_reuse || sp->reuse);
+ struct sctp_bind_hashbucket *head; /* hash list */
+ struct net *net = sock_net(sk);
+ kuid_t uid = sock_i_uid(sk);
+ struct sctp_bind_bucket *pp;
+ unsigned short snum;
+ int ret;
+
+ snum = ntohs(addr->v4.sin_port);
+
+ pr_debug("%s: begins, snum:%d\n", __func__, snum);
+
+ if (snum == 0) {
+ /* Search for an available port. */
+ int low, high, remaining, index;
+ unsigned int rover;
+
+ inet_sk_get_local_port_range(sk, &low, &high);
+ remaining = (high - low) + 1;
+ rover = prandom_u32_max(remaining) + low;
+
+ do {
+ rover++;
+ if ((rover < low) || (rover > high))
+ rover = low;
+ if (inet_is_local_reserved_port(net, rover))
+ continue;
+ index = sctp_phashfn(net, rover);
+ head = &sctp_port_hashtable[index];
+ spin_lock_bh(&head->lock);
+ sctp_for_each_hentry(pp, &head->chain)
+ if ((pp->port == rover) &&
+ net_eq(net, pp->net))
+ goto next;
+ break;
+ next:
+ spin_unlock_bh(&head->lock);
+ cond_resched();
+ } while (--remaining > 0);
+
+ /* Exhausted local port range during search? */
+ ret = 1;
+ if (remaining <= 0)
+ return ret;
+
+ /* OK, here is the one we will use. HEAD (the port
+ * hash table list entry) is non-NULL and we hold it's
+ * mutex.
+ */
+ snum = rover;
+ } else {
+ /* We are given an specific port number; we verify
+ * that it is not being used. If it is used, we will
+ * exahust the search in the hash list corresponding
+ * to the port number (snum) - we detect that with the
+ * port iterator, pp being NULL.
+ */
+ head = &sctp_port_hashtable[sctp_phashfn(net, snum)];
+ spin_lock_bh(&head->lock);
+ sctp_for_each_hentry(pp, &head->chain) {
+ if ((pp->port == snum) && net_eq(pp->net, net))
+ goto pp_found;
+ }
+ }
+ pp = NULL;
+ goto pp_not_found;
+pp_found:
+ if (!hlist_empty(&pp->owner)) {
+ /* We had a port hash table hit - there is an
+ * available port (pp != NULL) and it is being
+ * used by other socket (pp->owner not empty); that other
+ * socket is going to be sk2.
+ */
+ struct sock *sk2;
+
+ pr_debug("%s: found a possible match\n", __func__);
+
+ if ((pp->fastreuse && reuse &&
+ sk->sk_state != SCTP_SS_LISTENING) ||
+ (pp->fastreuseport && sk->sk_reuseport &&
+ uid_eq(pp->fastuid, uid)))
+ goto success;
+
+ /* Run through the list of sockets bound to the port
+ * (pp->port) [via the pointers bind_next and
+ * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
+ * we get the endpoint they describe and run through
+ * the endpoint's list of IP (v4 or v6) addresses,
+ * comparing each of the addresses with the address of
+ * the socket sk. If we find a match, then that means
+ * that this port/socket (sk) combination are already
+ * in an endpoint.
+ */
+ sk_for_each_bound(sk2, &pp->owner) {
+ struct sctp_sock *sp2 = sctp_sk(sk2);
+ struct sctp_endpoint *ep2 = sp2->ep;
+
+ if (sk == sk2 ||
+ (reuse && (sk2->sk_reuse || sp2->reuse) &&
+ sk2->sk_state != SCTP_SS_LISTENING) ||
+ (sk->sk_reuseport && sk2->sk_reuseport &&
+ uid_eq(uid, sock_i_uid(sk2))))
+ continue;
+
+ if (sctp_bind_addr_conflict(&ep2->base.bind_addr,
+ addr, sp2, sp)) {
+ ret = 1;
+ goto fail_unlock;
+ }
+ }
+
+ pr_debug("%s: found a match\n", __func__);
+ }
+pp_not_found:
+ /* If there was a hash table miss, create a new port. */
+ ret = 1;
+ if (!pp && !(pp = sctp_bucket_create(head, net, snum)))
+ goto fail_unlock;
+
+ /* In either case (hit or miss), make sure fastreuse is 1 only
+ * if sk->sk_reuse is too (that is, if the caller requested
+ * SO_REUSEADDR on this socket -sk-).
+ */
+ if (hlist_empty(&pp->owner)) {
+ if (reuse && sk->sk_state != SCTP_SS_LISTENING)
+ pp->fastreuse = 1;
+ else
+ pp->fastreuse = 0;
+
+ if (sk->sk_reuseport) {
+ pp->fastreuseport = 1;
+ pp->fastuid = uid;
+ } else {
+ pp->fastreuseport = 0;
+ }
+ } else {
+ if (pp->fastreuse &&
+ (!reuse || sk->sk_state == SCTP_SS_LISTENING))
+ pp->fastreuse = 0;
+
+ if (pp->fastreuseport &&
+ (!sk->sk_reuseport || !uid_eq(pp->fastuid, uid)))
+ pp->fastreuseport = 0;
+ }
+
+ /* We are set, so fill up all the data in the hash table
+ * entry, tie the socket list information with the rest of the
+ * sockets FIXME: Blurry, NPI (ipg).
+ */
+success:
+ if (!sp->bind_hash) {
+ inet_sk(sk)->inet_num = snum;
+ sk_add_bind_node(sk, &pp->owner);
+ sp->bind_hash = pp;
+ }
+ ret = 0;
+
+fail_unlock:
+ spin_unlock_bh(&head->lock);
+ return ret;
+}
+
+/* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
+ * port is requested.
+ */
+static int sctp_get_port(struct sock *sk, unsigned short snum)
+{
+ union sctp_addr addr;
+ struct sctp_af *af = sctp_sk(sk)->pf->af;
+
+ /* Set up a dummy address struct from the sk. */
+ af->from_sk(&addr, sk);
+ addr.v4.sin_port = htons(snum);
+
+ /* Note: sk->sk_num gets filled in if ephemeral port request. */
+ return sctp_get_port_local(sk, &addr);
+}
+
+/*
+ * Move a socket to LISTENING state.
+ */
+static int sctp_listen_start(struct sock *sk, int backlog)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_endpoint *ep = sp->ep;
+ struct crypto_shash *tfm = NULL;
+ char alg[32];
+
+ /* Allocate HMAC for generating cookie. */
+ if (!sp->hmac && sp->sctp_hmac_alg) {
+ sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
+ tfm = crypto_alloc_shash(alg, 0, 0);
+ if (IS_ERR(tfm)) {
+ net_info_ratelimited("failed to load transform for %s: %ld\n",
+ sp->sctp_hmac_alg, PTR_ERR(tfm));
+ return -ENOSYS;
+ }
+ sctp_sk(sk)->hmac = tfm;
+ }
+
+ /*
+ * If a bind() or sctp_bindx() is not called prior to a listen()
+ * call that allows new associations to be accepted, the system
+ * picks an ephemeral port and will choose an address set equivalent
+ * to binding with a wildcard address.
+ *
+ * This is not currently spelled out in the SCTP sockets
+ * extensions draft, but follows the practice as seen in TCP
+ * sockets.
+ *
+ */
+ inet_sk_set_state(sk, SCTP_SS_LISTENING);
+ if (!ep->base.bind_addr.port) {
+ if (sctp_autobind(sk))
+ return -EAGAIN;
+ } else {
+ if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
+ inet_sk_set_state(sk, SCTP_SS_CLOSED);
+ return -EADDRINUSE;
+ }
+ }
+
+ WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
+ return sctp_hash_endpoint(ep);
+}
+
+/*
+ * 4.1.3 / 5.1.3 listen()
+ *
+ * By default, new associations are not accepted for UDP style sockets.
+ * An application uses listen() to mark a socket as being able to
+ * accept new associations.
+ *
+ * On TCP style sockets, applications use listen() to ready the SCTP
+ * endpoint for accepting inbound associations.
+ *
+ * On both types of endpoints a backlog of '0' disables listening.
+ *
+ * Move a socket to LISTENING state.
+ */
+int sctp_inet_listen(struct socket *sock, int backlog)
+{
+ struct sock *sk = sock->sk;
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ int err = -EINVAL;
+
+ if (unlikely(backlog < 0))
+ return err;
+
+ lock_sock(sk);
+
+ /* Peeled-off sockets are not allowed to listen(). */
+ if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
+ goto out;
+
+ if (sock->state != SS_UNCONNECTED)
+ goto out;
+
+ if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
+ goto out;
+
+ /* If backlog is zero, disable listening. */
+ if (!backlog) {
+ if (sctp_sstate(sk, CLOSED))
+ goto out;
+
+ err = 0;
+ sctp_unhash_endpoint(ep);
+ sk->sk_state = SCTP_SS_CLOSED;
+ if (sk->sk_reuse || sctp_sk(sk)->reuse)
+ sctp_sk(sk)->bind_hash->fastreuse = 1;
+ goto out;
+ }
+
+ /* If we are already listening, just update the backlog */
+ if (sctp_sstate(sk, LISTENING))
+ WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
+ else {
+ err = sctp_listen_start(sk, backlog);
+ if (err)
+ goto out;
+ }
+
+ err = 0;
+out:
+ release_sock(sk);
+ return err;
+}
+
+/*
+ * This function is done by modeling the current datagram_poll() and the
+ * tcp_poll(). Note that, based on these implementations, we don't
+ * lock the socket in this function, even though it seems that,
+ * ideally, locking or some other mechanisms can be used to ensure
+ * the integrity of the counters (sndbuf and wmem_alloc) used
+ * in this place. We assume that we don't need locks either until proven
+ * otherwise.
+ *
+ * Another thing to note is that we include the Async I/O support
+ * here, again, by modeling the current TCP/UDP code. We don't have
+ * a good way to test with it yet.
+ */
+__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct sctp_sock *sp = sctp_sk(sk);
+ __poll_t mask;
+
+ poll_wait(file, sk_sleep(sk), wait);
+
+ sock_rps_record_flow(sk);
+
+ /* A TCP-style listening socket becomes readable when the accept queue
+ * is not empty.
+ */
+ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
+ return (!list_empty(&sp->ep->asocs)) ?
+ (EPOLLIN | EPOLLRDNORM) : 0;
+
+ mask = 0;
+
+ /* Is there any exceptional events? */
+ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+ mask |= EPOLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
+ mask |= EPOLLHUP;
+
+ /* Is it readable? Reconsider this code with TCP-style support. */
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ /* The association is either gone or not ready. */
+ if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED))
+ return mask;
+
+ /* Is it writable? */
+ if (sctp_writeable(sk)) {
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ } else {
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+ /*
+ * Since the socket is not locked, the buffer
+ * might be made available after the writeable check and
+ * before the bit is set. This could cause a lost I/O
+ * signal. tcp_poll() has a race breaker for this race
+ * condition. Based on their implementation, we put
+ * in the following code to cover it as well.
+ */
+ if (sctp_writeable(sk))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ }
+ return mask;
+}
+
+/********************************************************************
+ * 2nd Level Abstractions
+ ********************************************************************/
+
+static struct sctp_bind_bucket *sctp_bucket_create(
+ struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
+{
+ struct sctp_bind_bucket *pp;
+
+ pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
+ if (pp) {
+ SCTP_DBG_OBJCNT_INC(bind_bucket);
+ pp->port = snum;
+ pp->fastreuse = 0;
+ INIT_HLIST_HEAD(&pp->owner);
+ pp->net = net;
+ hlist_add_head(&pp->node, &head->chain);
+ }
+ return pp;
+}
+
+/* Caller must hold hashbucket lock for this tb with local BH disabled */
+static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
+{
+ if (pp && hlist_empty(&pp->owner)) {
+ __hlist_del(&pp->node);
+ kmem_cache_free(sctp_bucket_cachep, pp);
+ SCTP_DBG_OBJCNT_DEC(bind_bucket);
+ }
+}
+
+/* Release this socket's reference to a local port. */
+static inline void __sctp_put_port(struct sock *sk)
+{
+ struct sctp_bind_hashbucket *head =
+ &sctp_port_hashtable[sctp_phashfn(sock_net(sk),
+ inet_sk(sk)->inet_num)];
+ struct sctp_bind_bucket *pp;
+
+ spin_lock(&head->lock);
+ pp = sctp_sk(sk)->bind_hash;
+ __sk_del_bind_node(sk);
+ sctp_sk(sk)->bind_hash = NULL;
+ inet_sk(sk)->inet_num = 0;
+ sctp_bucket_destroy(pp);
+ spin_unlock(&head->lock);
+}
+
+void sctp_put_port(struct sock *sk)
+{
+ local_bh_disable();
+ __sctp_put_port(sk);
+ local_bh_enable();
+}
+
+/*
+ * The system picks an ephemeral port and choose an address set equivalent
+ * to binding with a wildcard address.
+ * One of those addresses will be the primary address for the association.
+ * This automatically enables the multihoming capability of SCTP.
+ */
+static int sctp_autobind(struct sock *sk)
+{
+ union sctp_addr autoaddr;
+ struct sctp_af *af;
+ __be16 port;
+
+ /* Initialize a local sockaddr structure to INADDR_ANY. */
+ af = sctp_sk(sk)->pf->af;
+
+ port = htons(inet_sk(sk)->inet_num);
+ af->inaddr_any(&autoaddr, port);
+
+ return sctp_do_bind(sk, &autoaddr, af->sockaddr_len);
+}
+
+/* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
+ *
+ * From RFC 2292
+ * 4.2 The cmsghdr Structure *
+ *
+ * When ancillary data is sent or received, any number of ancillary data
+ * objects can be specified by the msg_control and msg_controllen members of
+ * the msghdr structure, because each object is preceded by
+ * a cmsghdr structure defining the object's length (the cmsg_len member).
+ * Historically Berkeley-derived implementations have passed only one object
+ * at a time, but this API allows multiple objects to be
+ * passed in a single call to sendmsg() or recvmsg(). The following example
+ * shows two ancillary data objects in a control buffer.
+ *
+ * |<--------------------------- msg_controllen -------------------------->|
+ * | |
+ *
+ * |<----- ancillary data object ----->|<----- ancillary data object ----->|
+ *
+ * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
+ * | | |
+ *
+ * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
+ *
+ * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
+ * | | | | |
+ *
+ * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
+ * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
+ *
+ * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
+ *
+ * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
+ * ^
+ * |
+ *
+ * msg_control
+ * points here
+ */
+static int sctp_msghdr_parse(const struct msghdr *msg, struct sctp_cmsgs *cmsgs)
+{
+ struct msghdr *my_msg = (struct msghdr *)msg;
+ struct cmsghdr *cmsg;
+
+ for_each_cmsghdr(cmsg, my_msg) {
+ if (!CMSG_OK(my_msg, cmsg))
+ return -EINVAL;
+
+ /* Should we parse this header or ignore? */
+ if (cmsg->cmsg_level != IPPROTO_SCTP)
+ continue;
+
+ /* Strictly check lengths following example in SCM code. */
+ switch (cmsg->cmsg_type) {
+ case SCTP_INIT:
+ /* SCTP Socket API Extension
+ * 5.3.1 SCTP Initiation Structure (SCTP_INIT)
+ *
+ * This cmsghdr structure provides information for
+ * initializing new SCTP associations with sendmsg().
+ * The SCTP_INITMSG socket option uses this same data
+ * structure. This structure is not used for
+ * recvmsg().
+ *
+ * cmsg_level cmsg_type cmsg_data[]
+ * ------------ ------------ ----------------------
+ * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
+ */
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg)))
+ return -EINVAL;
+
+ cmsgs->init = CMSG_DATA(cmsg);
+ break;
+
+ case SCTP_SNDRCV:
+ /* SCTP Socket API Extension
+ * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV)
+ *
+ * This cmsghdr structure specifies SCTP options for
+ * sendmsg() and describes SCTP header information
+ * about a received message through recvmsg().
+ *
+ * cmsg_level cmsg_type cmsg_data[]
+ * ------------ ------------ ----------------------
+ * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
+ */
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
+ return -EINVAL;
+
+ cmsgs->srinfo = CMSG_DATA(cmsg);
+
+ if (cmsgs->srinfo->sinfo_flags &
+ ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+ SCTP_SACK_IMMEDIATELY | SCTP_SENDALL |
+ SCTP_PR_SCTP_MASK | SCTP_ABORT | SCTP_EOF))
+ return -EINVAL;
+ break;
+
+ case SCTP_SNDINFO:
+ /* SCTP Socket API Extension
+ * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
+ *
+ * This cmsghdr structure specifies SCTP options for
+ * sendmsg(). This structure and SCTP_RCVINFO replaces
+ * SCTP_SNDRCV which has been deprecated.
+ *
+ * cmsg_level cmsg_type cmsg_data[]
+ * ------------ ------------ ---------------------
+ * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo
+ */
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo)))
+ return -EINVAL;
+
+ cmsgs->sinfo = CMSG_DATA(cmsg);
+
+ if (cmsgs->sinfo->snd_flags &
+ ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+ SCTP_SACK_IMMEDIATELY | SCTP_SENDALL |
+ SCTP_PR_SCTP_MASK | SCTP_ABORT | SCTP_EOF))
+ return -EINVAL;
+ break;
+ case SCTP_PRINFO:
+ /* SCTP Socket API Extension
+ * 5.3.7 SCTP PR-SCTP Information Structure (SCTP_PRINFO)
+ *
+ * This cmsghdr structure specifies SCTP options for sendmsg().
+ *
+ * cmsg_level cmsg_type cmsg_data[]
+ * ------------ ------------ ---------------------
+ * IPPROTO_SCTP SCTP_PRINFO struct sctp_prinfo
+ */
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_prinfo)))
+ return -EINVAL;
+
+ cmsgs->prinfo = CMSG_DATA(cmsg);
+ if (cmsgs->prinfo->pr_policy & ~SCTP_PR_SCTP_MASK)
+ return -EINVAL;
+
+ if (cmsgs->prinfo->pr_policy == SCTP_PR_SCTP_NONE)
+ cmsgs->prinfo->pr_value = 0;
+ break;
+ case SCTP_AUTHINFO:
+ /* SCTP Socket API Extension
+ * 5.3.8 SCTP AUTH Information Structure (SCTP_AUTHINFO)
+ *
+ * This cmsghdr structure specifies SCTP options for sendmsg().
+ *
+ * cmsg_level cmsg_type cmsg_data[]
+ * ------------ ------------ ---------------------
+ * IPPROTO_SCTP SCTP_AUTHINFO struct sctp_authinfo
+ */
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_authinfo)))
+ return -EINVAL;
+
+ cmsgs->authinfo = CMSG_DATA(cmsg);
+ break;
+ case SCTP_DSTADDRV4:
+ case SCTP_DSTADDRV6:
+ /* SCTP Socket API Extension
+ * 5.3.9/10 SCTP Destination IPv4/6 Address Structure (SCTP_DSTADDRV4/6)
+ *
+ * This cmsghdr structure specifies SCTP options for sendmsg().
+ *
+ * cmsg_level cmsg_type cmsg_data[]
+ * ------------ ------------ ---------------------
+ * IPPROTO_SCTP SCTP_DSTADDRV4 struct in_addr
+ * ------------ ------------ ---------------------
+ * IPPROTO_SCTP SCTP_DSTADDRV6 struct in6_addr
+ */
+ cmsgs->addrs_msg = my_msg;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Wait for a packet..
+ * Note: This function is the same function as in core/datagram.c
+ * with a few modifications to make lksctp work.
+ */
+static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
+{
+ int error;
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+
+ /* Socket errors? */
+ error = sock_error(sk);
+ if (error)
+ goto out;
+
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ goto ready;
+
+ /* Socket shut down? */
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ goto out;
+
+ /* Sequenced packets can come disconnected. If so we report the
+ * problem.
+ */
+ error = -ENOTCONN;
+
+ /* Is there a good reason to think that we may receive some data? */
+ if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING))
+ goto out;
+
+ /* Handle signals. */
+ if (signal_pending(current))
+ goto interrupted;
+
+ /* Let another process have a go. Since we are going to sleep
+ * anyway. Note: This may cause odd behaviors if the message
+ * does not fit in the user's buffer, but this seems to be the
+ * only way to honor MSG_DONTWAIT realistically.
+ */
+ release_sock(sk);
+ *timeo_p = schedule_timeout(*timeo_p);
+ lock_sock(sk);
+
+ready:
+ finish_wait(sk_sleep(sk), &wait);
+ return 0;
+
+interrupted:
+ error = sock_intr_errno(*timeo_p);
+
+out:
+ finish_wait(sk_sleep(sk), &wait);
+ *err = error;
+ return error;
+}
+
+/* Receive a datagram.
+ * Note: This is pretty much the same routine as in core/datagram.c
+ * with a few changes to make lksctp work.
+ */
+struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int *err)
+{
+ int error;
+ struct sk_buff *skb;
+ long timeo;
+
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+
+ pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo,
+ MAX_SCHEDULE_TIMEOUT);
+
+ do {
+ /* Again only user level code calls this function,
+ * so nothing interrupt level
+ * will suddenly eat the receive_queue.
+ *
+ * Look at current nfs client by the way...
+ * However, this function was correct in any case. 8)
+ */
+ if (flags & MSG_PEEK) {
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb)
+ refcount_inc(&skb->users);
+ } else {
+ skb = __skb_dequeue(&sk->sk_receive_queue);
+ }
+
+ if (skb)
+ return skb;
+
+ /* Caller is allowed not to check sk->sk_err before calling. */
+ error = sock_error(sk);
+ if (error)
+ goto no_packet;
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ break;
+
+
+ /* User doesn't want to wait. */
+ error = -EAGAIN;
+ if (!timeo)
+ goto no_packet;
+ } while (sctp_wait_for_packet(sk, err, &timeo) == 0);
+
+ return NULL;
+
+no_packet:
+ *err = error;
+ return NULL;
+}
+
+/* If sndbuf has changed, wake up per association sndbuf waiters. */
+static void __sctp_write_space(struct sctp_association *asoc)
+{
+ struct sock *sk = asoc->base.sk;
+
+ if (sctp_wspace(asoc) <= 0)
+ return;
+
+ if (waitqueue_active(&asoc->wait))
+ wake_up_interruptible(&asoc->wait);
+
+ if (sctp_writeable(sk)) {
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq) {
+ if (waitqueue_active(&wq->wait))
+ wake_up_interruptible(&wq->wait);
+
+ /* Note that we try to include the Async I/O support
+ * here by modeling from the current TCP/UDP code.
+ * We have not tested with it yet.
+ */
+ if (!(sk->sk_shutdown & SEND_SHUTDOWN))
+ sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
+ }
+ rcu_read_unlock();
+ }
+}
+
+static void sctp_wake_up_waiters(struct sock *sk,
+ struct sctp_association *asoc)
+{
+ struct sctp_association *tmp = asoc;
+
+ /* We do accounting for the sndbuf space per association,
+ * so we only need to wake our own association.
+ */
+ if (asoc->ep->sndbuf_policy)
+ return __sctp_write_space(asoc);
+
+ /* If association goes down and is just flushing its
+ * outq, then just normally notify others.
+ */
+ if (asoc->base.dead)
+ return sctp_write_space(sk);
+
+ /* Accounting for the sndbuf space is per socket, so we
+ * need to wake up others, try to be fair and in case of
+ * other associations, let them have a go first instead
+ * of just doing a sctp_write_space() call.
+ *
+ * Note that we reach sctp_wake_up_waiters() only when
+ * associations free up queued chunks, thus we are under
+ * lock and the list of associations on a socket is
+ * guaranteed not to change.
+ */
+ for (tmp = list_next_entry(tmp, asocs); 1;
+ tmp = list_next_entry(tmp, asocs)) {
+ /* Manually skip the head element. */
+ if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
+ continue;
+ /* Wake up association. */
+ __sctp_write_space(tmp);
+ /* We've reached the end. */
+ if (tmp == asoc)
+ break;
+ }
+}
+
+/* Do accounting for the sndbuf space.
+ * Decrement the used sndbuf space of the corresponding association by the
+ * data size which was just transmitted(freed).
+ */
+static void sctp_wfree(struct sk_buff *skb)
+{
+ struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
+ struct sctp_association *asoc = chunk->asoc;
+ struct sock *sk = asoc->base.sk;
+
+ sk_mem_uncharge(sk, skb->truesize);
+ sk_wmem_queued_add(sk, -(skb->truesize + sizeof(struct sctp_chunk)));
+ asoc->sndbuf_used -= skb->truesize + sizeof(struct sctp_chunk);
+ WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk),
+ &sk->sk_wmem_alloc));
+
+ if (chunk->shkey) {
+ struct sctp_shared_key *shkey = chunk->shkey;
+
+ /* refcnt == 2 and !list_empty mean after this release, it's
+ * not being used anywhere, and it's time to notify userland
+ * that this shkey can be freed if it's been deactivated.
+ */
+ if (shkey->deactivated && !list_empty(&shkey->key_list) &&
+ refcount_read(&shkey->refcnt) == 2) {
+ struct sctp_ulpevent *ev;
+
+ ev = sctp_ulpevent_make_authkey(asoc, shkey->key_id,
+ SCTP_AUTH_FREE_KEY,
+ GFP_KERNEL);
+ if (ev)
+ asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
+ }
+ sctp_auth_shkey_release(chunk->shkey);
+ }
+
+ sock_wfree(skb);
+ sctp_wake_up_waiters(sk, asoc);
+
+ sctp_association_put(asoc);
+}
+
+/* Do accounting for the receive space on the socket.
+ * Accounting for the association is done in ulpevent.c
+ * We set this as a destructor for the cloned data skbs so that
+ * accounting is done at the correct time.
+ */
+void sctp_sock_rfree(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+ atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
+
+ /*
+ * Mimic the behavior of sock_rfree
+ */
+ sk_mem_uncharge(sk, event->rmem_len);
+}
+
+
+/* Helper function to wait for space in the sndbuf. */
+static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ size_t msg_len)
+{
+ struct sock *sk = asoc->base.sk;
+ long current_timeo = *timeo_p;
+ DEFINE_WAIT(wait);
+ int err = 0;
+
+ pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
+ *timeo_p, msg_len);
+
+ /* Increment the association's refcnt. */
+ sctp_association_hold(asoc);
+
+ /* Wait on the association specific sndbuf space. */
+ for (;;) {
+ prepare_to_wait_exclusive(&asoc->wait, &wait,
+ TASK_INTERRUPTIBLE);
+ if (asoc->base.dead)
+ goto do_dead;
+ if (!*timeo_p)
+ goto do_nonblock;
+ if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
+ goto do_error;
+ if (signal_pending(current))
+ goto do_interrupted;
+ if ((int)msg_len <= sctp_wspace(asoc) &&
+ sk_wmem_schedule(sk, msg_len))
+ break;
+
+ /* Let another process have a go. Since we are going
+ * to sleep anyway.
+ */
+ release_sock(sk);
+ current_timeo = schedule_timeout(current_timeo);
+ lock_sock(sk);
+ if (sk != asoc->base.sk)
+ goto do_error;
+
+ *timeo_p = current_timeo;
+ }
+
+out:
+ finish_wait(&asoc->wait, &wait);
+
+ /* Release the association's refcnt. */
+ sctp_association_put(asoc);
+
+ return err;
+
+do_dead:
+ err = -ESRCH;
+ goto out;
+
+do_error:
+ err = -EPIPE;
+ goto out;
+
+do_interrupted:
+ err = sock_intr_errno(*timeo_p);
+ goto out;
+
+do_nonblock:
+ err = -EAGAIN;
+ goto out;
+}
+
+void sctp_data_ready(struct sock *sk)
+{
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (skwq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
+ EPOLLRDNORM | EPOLLRDBAND);
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ rcu_read_unlock();
+}
+
+/* If socket sndbuf has changed, wake up all per association waiters. */
+void sctp_write_space(struct sock *sk)
+{
+ struct sctp_association *asoc;
+
+ /* Wake up the tasks in each wait queue. */
+ list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) {
+ __sctp_write_space(asoc);
+ }
+}
+
+/* Is there any sndbuf space available on the socket?
+ *
+ * Note that sk_wmem_alloc is the sum of the send buffers on all of the
+ * associations on the same socket. For a UDP-style socket with
+ * multiple associations, it is possible for it to be "unwriteable"
+ * prematurely. I assume that this is acceptable because
+ * a premature "unwriteable" is better than an accidental "writeable" which
+ * would cause an unwanted block under certain circumstances. For the 1-1
+ * UDP-style sockets or TCP-style sockets, this code should work.
+ * - Daisy
+ */
+static bool sctp_writeable(const struct sock *sk)
+{
+ return READ_ONCE(sk->sk_sndbuf) > READ_ONCE(sk->sk_wmem_queued);
+}
+
+/* Wait for an association to go into ESTABLISHED state. If timeout is 0,
+ * returns immediately with EINPROGRESS.
+ */
+static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
+{
+ struct sock *sk = asoc->base.sk;
+ int err = 0;
+ long current_timeo = *timeo_p;
+ DEFINE_WAIT(wait);
+
+ pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p);
+
+ /* Increment the association's refcnt. */
+ sctp_association_hold(asoc);
+
+ for (;;) {
+ prepare_to_wait_exclusive(&asoc->wait, &wait,
+ TASK_INTERRUPTIBLE);
+ if (!*timeo_p)
+ goto do_nonblock;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ break;
+ if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
+ asoc->base.dead)
+ goto do_error;
+ if (signal_pending(current))
+ goto do_interrupted;
+
+ if (sctp_state(asoc, ESTABLISHED))
+ break;
+
+ /* Let another process have a go. Since we are going
+ * to sleep anyway.
+ */
+ release_sock(sk);
+ current_timeo = schedule_timeout(current_timeo);
+ lock_sock(sk);
+
+ *timeo_p = current_timeo;
+ }
+
+out:
+ finish_wait(&asoc->wait, &wait);
+
+ /* Release the association's refcnt. */
+ sctp_association_put(asoc);
+
+ return err;
+
+do_error:
+ if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
+ err = -ETIMEDOUT;
+ else
+ err = -ECONNREFUSED;
+ goto out;
+
+do_interrupted:
+ err = sock_intr_errno(*timeo_p);
+ goto out;
+
+do_nonblock:
+ err = -EINPROGRESS;
+ goto out;
+}
+
+static int sctp_wait_for_accept(struct sock *sk, long timeo)
+{
+ struct sctp_endpoint *ep;
+ int err = 0;
+ DEFINE_WAIT(wait);
+
+ ep = sctp_sk(sk)->ep;
+
+
+ for (;;) {
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait,
+ TASK_INTERRUPTIBLE);
+
+ if (list_empty(&ep->asocs)) {
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ }
+
+ err = -EINVAL;
+ if (!sctp_sstate(sk, LISTENING))
+ break;
+
+ err = 0;
+ if (!list_empty(&ep->asocs))
+ break;
+
+ err = sock_intr_errno(timeo);
+ if (signal_pending(current))
+ break;
+
+ err = -EAGAIN;
+ if (!timeo)
+ break;
+ }
+
+ finish_wait(sk_sleep(sk), &wait);
+
+ return err;
+}
+
+static void sctp_wait_for_close(struct sock *sk, long timeout)
+{
+ DEFINE_WAIT(wait);
+
+ do {
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ if (list_empty(&sctp_sk(sk)->ep->asocs))
+ break;
+ release_sock(sk);
+ timeout = schedule_timeout(timeout);
+ lock_sock(sk);
+ } while (!signal_pending(current) && timeout);
+
+ finish_wait(sk_sleep(sk), &wait);
+}
+
+static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
+{
+ struct sk_buff *frag;
+
+ if (!skb->data_len)
+ goto done;
+
+ /* Don't forget the fragments. */
+ skb_walk_frags(skb, frag)
+ sctp_skb_set_owner_r_frag(frag, sk);
+
+done:
+ sctp_skb_set_owner_r(skb, sk);
+}
+
+void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+ struct sctp_association *asoc)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct inet_sock *newinet;
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ newsk->sk_type = sk->sk_type;
+ newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
+ newsk->sk_flags = sk->sk_flags;
+ newsk->sk_tsflags = sk->sk_tsflags;
+ newsk->sk_no_check_tx = sk->sk_no_check_tx;
+ newsk->sk_no_check_rx = sk->sk_no_check_rx;
+ newsk->sk_reuse = sk->sk_reuse;
+ sctp_sk(newsk)->reuse = sp->reuse;
+
+ newsk->sk_shutdown = sk->sk_shutdown;
+ newsk->sk_destruct = sk->sk_destruct;
+ newsk->sk_family = sk->sk_family;
+ newsk->sk_protocol = IPPROTO_SCTP;
+ newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
+ newsk->sk_sndbuf = sk->sk_sndbuf;
+ newsk->sk_rcvbuf = sk->sk_rcvbuf;
+ newsk->sk_lingertime = sk->sk_lingertime;
+ newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
+ newsk->sk_sndtimeo = sk->sk_sndtimeo;
+ newsk->sk_rxhash = sk->sk_rxhash;
+
+ newinet = inet_sk(newsk);
+
+ /* Initialize sk's sport, dport, rcv_saddr and daddr for
+ * getsockname() and getpeername()
+ */
+ newinet->inet_sport = inet->inet_sport;
+ newinet->inet_saddr = inet->inet_saddr;
+ newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
+ newinet->inet_dport = htons(asoc->peer.port);
+ newinet->pmtudisc = inet->pmtudisc;
+ atomic_set(&newinet->inet_id, get_random_u16());
+
+ newinet->uc_ttl = inet->uc_ttl;
+ newinet->mc_loop = 1;
+ newinet->mc_ttl = 1;
+ newinet->mc_index = 0;
+ newinet->mc_list = NULL;
+
+ if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
+ net_enable_timestamp();
+
+ /* Set newsk security attributes from original sk and connection
+ * security attribute from asoc.
+ */
+ security_sctp_sk_clone(asoc, sk, newsk);
+}
+
+static inline void sctp_copy_descendant(struct sock *sk_to,
+ const struct sock *sk_from)
+{
+ size_t ancestor_size = sizeof(struct inet_sock);
+
+ ancestor_size += sk_from->sk_prot->obj_size;
+ ancestor_size -= offsetof(struct sctp_sock, pd_lobby);
+ __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
+}
+
+/* Populate the fields of the newsk from the oldsk and migrate the assoc
+ * and its messages to the newsk.
+ */
+static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ struct sctp_association *assoc,
+ enum sctp_socket_type type)
+{
+ struct sctp_sock *oldsp = sctp_sk(oldsk);
+ struct sctp_sock *newsp = sctp_sk(newsk);
+ struct sctp_bind_bucket *pp; /* hash list port iterator */
+ struct sctp_endpoint *newep = newsp->ep;
+ struct sk_buff *skb, *tmp;
+ struct sctp_ulpevent *event;
+ struct sctp_bind_hashbucket *head;
+ int err;
+
+ /* Migrate socket buffer sizes and all the socket level options to the
+ * new socket.
+ */
+ newsk->sk_sndbuf = oldsk->sk_sndbuf;
+ newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
+ /* Brute force copy old sctp opt. */
+ sctp_copy_descendant(newsk, oldsk);
+
+ /* Restore the ep value that was overwritten with the above structure
+ * copy.
+ */
+ newsp->ep = newep;
+ newsp->hmac = NULL;
+
+ /* Hook this new socket in to the bind_hash list. */
+ head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
+ inet_sk(oldsk)->inet_num)];
+ spin_lock_bh(&head->lock);
+ pp = sctp_sk(oldsk)->bind_hash;
+ sk_add_bind_node(newsk, &pp->owner);
+ sctp_sk(newsk)->bind_hash = pp;
+ inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
+ spin_unlock_bh(&head->lock);
+
+ /* Copy the bind_addr list from the original endpoint to the new
+ * endpoint so that we can handle restarts properly
+ */
+ err = sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
+ &oldsp->ep->base.bind_addr, GFP_KERNEL);
+ if (err)
+ return err;
+
+ /* New ep's auth_hmacs should be set if old ep's is set, in case
+ * that net->sctp.auth_enable has been changed to 0 by users and
+ * new ep's auth_hmacs couldn't be set in sctp_endpoint_init().
+ */
+ if (oldsp->ep->auth_hmacs) {
+ err = sctp_auth_init_hmacs(newsp->ep, GFP_KERNEL);
+ if (err)
+ return err;
+ }
+
+ sctp_auto_asconf_init(newsp);
+
+ /* Move any messages in the old socket's receive queue that are for the
+ * peeled off association to the new socket's receive queue.
+ */
+ sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
+ event = sctp_skb2event(skb);
+ if (event->asoc == assoc) {
+ __skb_unlink(skb, &oldsk->sk_receive_queue);
+ __skb_queue_tail(&newsk->sk_receive_queue, skb);
+ sctp_skb_set_owner_r_frag(skb, newsk);
+ }
+ }
+
+ /* Clean up any messages pending delivery due to partial
+ * delivery. Three cases:
+ * 1) No partial deliver; no work.
+ * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
+ * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
+ */
+ atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
+
+ if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
+ struct sk_buff_head *queue;
+
+ /* Decide which queue to move pd_lobby skbs to. */
+ if (assoc->ulpq.pd_mode) {
+ queue = &newsp->pd_lobby;
+ } else
+ queue = &newsk->sk_receive_queue;
+
+ /* Walk through the pd_lobby, looking for skbs that
+ * need moved to the new socket.
+ */
+ sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
+ event = sctp_skb2event(skb);
+ if (event->asoc == assoc) {
+ __skb_unlink(skb, &oldsp->pd_lobby);
+ __skb_queue_tail(queue, skb);
+ sctp_skb_set_owner_r_frag(skb, newsk);
+ }
+ }
+
+ /* Clear up any skbs waiting for the partial
+ * delivery to finish.
+ */
+ if (assoc->ulpq.pd_mode)
+ sctp_clear_pd(oldsk, NULL);
+
+ }
+
+ sctp_for_each_rx_skb(assoc, newsk, sctp_skb_set_owner_r_frag);
+
+ /* Set the type of socket to indicate that it is peeled off from the
+ * original UDP-style socket or created with the accept() call on a
+ * TCP-style socket..
+ */
+ newsp->type = type;
+
+ /* Mark the new socket "in-use" by the user so that any packets
+ * that may arrive on the association after we've moved it are
+ * queued to the backlog. This prevents a potential race between
+ * backlog processing on the old socket and new-packet processing
+ * on the new socket.
+ *
+ * The caller has just allocated newsk so we can guarantee that other
+ * paths won't try to lock it and then oldsk.
+ */
+ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
+ sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w);
+ sctp_assoc_migrate(assoc, newsk);
+ sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w);
+
+ /* If the association on the newsk is already closed before accept()
+ * is called, set RCV_SHUTDOWN flag.
+ */
+ if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
+ inet_sk_set_state(newsk, SCTP_SS_CLOSED);
+ newsk->sk_shutdown |= RCV_SHUTDOWN;
+ } else {
+ inet_sk_set_state(newsk, SCTP_SS_ESTABLISHED);
+ }
+
+ release_sock(newsk);
+
+ return 0;
+}
+
+
+/* This proto struct describes the ULP interface for SCTP. */
+struct proto sctp_prot = {
+ .name = "SCTP",
+ .owner = THIS_MODULE,
+ .close = sctp_close,
+ .disconnect = sctp_disconnect,
+ .accept = sctp_accept,
+ .ioctl = sctp_ioctl,
+ .init = sctp_init_sock,
+ .destroy = sctp_destroy_sock,
+ .shutdown = sctp_shutdown,
+ .setsockopt = sctp_setsockopt,
+ .getsockopt = sctp_getsockopt,
+ .bpf_bypass_getsockopt = sctp_bpf_bypass_getsockopt,
+ .sendmsg = sctp_sendmsg,
+ .recvmsg = sctp_recvmsg,
+ .bind = sctp_bind,
+ .bind_add = sctp_bind_add,
+ .backlog_rcv = sctp_backlog_rcv,
+ .hash = sctp_hash,
+ .unhash = sctp_unhash,
+ .no_autobind = true,
+ .obj_size = sizeof(struct sctp_sock),
+ .useroffset = offsetof(struct sctp_sock, subscribe),
+ .usersize = offsetof(struct sctp_sock, initmsg) -
+ offsetof(struct sctp_sock, subscribe) +
+ sizeof_field(struct sctp_sock, initmsg),
+ .sysctl_mem = sysctl_sctp_mem,
+ .sysctl_rmem = sysctl_sctp_rmem,
+ .sysctl_wmem = sysctl_sctp_wmem,
+ .memory_pressure = &sctp_memory_pressure,
+ .enter_memory_pressure = sctp_enter_memory_pressure,
+
+ .memory_allocated = &sctp_memory_allocated,
+ .per_cpu_fw_alloc = &sctp_memory_per_cpu_fw_alloc,
+
+ .sockets_allocated = &sctp_sockets_allocated,
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+static void sctp_v6_destruct_sock(struct sock *sk)
+{
+ sctp_destruct_common(sk);
+ inet6_sock_destruct(sk);
+}
+
+static int sctp_v6_init_sock(struct sock *sk)
+{
+ int ret = sctp_init_sock(sk);
+
+ if (!ret)
+ sk->sk_destruct = sctp_v6_destruct_sock;
+
+ return ret;
+}
+
+struct proto sctpv6_prot = {
+ .name = "SCTPv6",
+ .owner = THIS_MODULE,
+ .close = sctp_close,
+ .disconnect = sctp_disconnect,
+ .accept = sctp_accept,
+ .ioctl = sctp_ioctl,
+ .init = sctp_v6_init_sock,
+ .destroy = sctp_destroy_sock,
+ .shutdown = sctp_shutdown,
+ .setsockopt = sctp_setsockopt,
+ .getsockopt = sctp_getsockopt,
+ .bpf_bypass_getsockopt = sctp_bpf_bypass_getsockopt,
+ .sendmsg = sctp_sendmsg,
+ .recvmsg = sctp_recvmsg,
+ .bind = sctp_bind,
+ .bind_add = sctp_bind_add,
+ .backlog_rcv = sctp_backlog_rcv,
+ .hash = sctp_hash,
+ .unhash = sctp_unhash,
+ .no_autobind = true,
+ .obj_size = sizeof(struct sctp6_sock),
+ .useroffset = offsetof(struct sctp6_sock, sctp.subscribe),
+ .usersize = offsetof(struct sctp6_sock, sctp.initmsg) -
+ offsetof(struct sctp6_sock, sctp.subscribe) +
+ sizeof_field(struct sctp6_sock, sctp.initmsg),
+ .sysctl_mem = sysctl_sctp_mem,
+ .sysctl_rmem = sysctl_sctp_rmem,
+ .sysctl_wmem = sysctl_sctp_wmem,
+ .memory_pressure = &sctp_memory_pressure,
+ .enter_memory_pressure = sctp_enter_memory_pressure,
+
+ .memory_allocated = &sctp_memory_allocated,
+ .per_cpu_fw_alloc = &sctp_memory_per_cpu_fw_alloc,
+
+ .sockets_allocated = &sctp_sockets_allocated,
+};
+#endif /* IS_ENABLED(CONFIG_IPV6) */
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
new file mode 100644
index 000000000..ee6514af8
--- /dev/null
+++ b/net/sctp/stream.c
@@ -0,0 +1,1087 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * This file contains sctp stream maniuplation primitives and helpers.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Xin Long <lucien.xin@gmail.com>
+ */
+
+#include <linux/list.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/stream_sched.h>
+
+static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt)
+{
+ struct sctp_association *asoc;
+ struct sctp_chunk *ch, *temp;
+ struct sctp_outq *outq;
+
+ asoc = container_of(stream, struct sctp_association, stream);
+ outq = &asoc->outqueue;
+
+ list_for_each_entry_safe(ch, temp, &outq->out_chunk_list, list) {
+ __u16 sid = sctp_chunk_stream_no(ch);
+
+ if (sid < outcnt)
+ continue;
+
+ sctp_sched_dequeue_common(outq, ch);
+ /* No need to call dequeue_done here because
+ * the chunks are not scheduled by now.
+ */
+
+ /* Mark as failed send. */
+ sctp_chunk_fail(ch, (__force __u32)SCTP_ERROR_INV_STRM);
+ if (asoc->peer.prsctp_capable &&
+ SCTP_PR_PRIO_ENABLED(ch->sinfo.sinfo_flags))
+ asoc->sent_cnt_removable--;
+
+ sctp_chunk_free(ch);
+ }
+}
+
+static void sctp_stream_free_ext(struct sctp_stream *stream, __u16 sid)
+{
+ struct sctp_sched_ops *sched;
+
+ if (!SCTP_SO(stream, sid)->ext)
+ return;
+
+ sched = sctp_sched_ops_from_stream(stream);
+ sched->free_sid(stream, sid);
+ kfree(SCTP_SO(stream, sid)->ext);
+ SCTP_SO(stream, sid)->ext = NULL;
+}
+
+/* Migrates chunks from stream queues to new stream queues if needed,
+ * but not across associations. Also, removes those chunks to streams
+ * higher than the new max.
+ */
+static void sctp_stream_outq_migrate(struct sctp_stream *stream,
+ struct sctp_stream *new, __u16 outcnt)
+{
+ int i;
+
+ if (stream->outcnt > outcnt)
+ sctp_stream_shrink_out(stream, outcnt);
+
+ if (new) {
+ /* Here we actually move the old ext stuff into the new
+ * buffer, because we want to keep it. Then
+ * sctp_stream_update will swap ->out pointers.
+ */
+ for (i = 0; i < outcnt; i++) {
+ sctp_stream_free_ext(new, i);
+ SCTP_SO(new, i)->ext = SCTP_SO(stream, i)->ext;
+ SCTP_SO(stream, i)->ext = NULL;
+ }
+ }
+
+ for (i = outcnt; i < stream->outcnt; i++)
+ sctp_stream_free_ext(stream, i);
+}
+
+static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
+ gfp_t gfp)
+{
+ int ret;
+
+ if (outcnt <= stream->outcnt)
+ goto out;
+
+ ret = genradix_prealloc(&stream->out, outcnt, gfp);
+ if (ret)
+ return ret;
+
+out:
+ stream->outcnt = outcnt;
+ return 0;
+}
+
+static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
+ gfp_t gfp)
+{
+ int ret;
+
+ if (incnt <= stream->incnt)
+ goto out;
+
+ ret = genradix_prealloc(&stream->in, incnt, gfp);
+ if (ret)
+ return ret;
+
+out:
+ stream->incnt = incnt;
+ return 0;
+}
+
+int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
+ gfp_t gfp)
+{
+ struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
+ int i, ret = 0;
+
+ gfp |= __GFP_NOWARN;
+
+ /* Initial stream->out size may be very big, so free it and alloc
+ * a new one with new outcnt to save memory if needed.
+ */
+ if (outcnt == stream->outcnt)
+ goto handle_in;
+
+ /* Filter out chunks queued on streams that won't exist anymore */
+ sched->unsched_all(stream);
+ sctp_stream_outq_migrate(stream, NULL, outcnt);
+ sched->sched_all(stream);
+
+ ret = sctp_stream_alloc_out(stream, outcnt, gfp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < stream->outcnt; i++)
+ SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
+
+handle_in:
+ sctp_stream_interleave_init(stream);
+ if (!incnt)
+ return 0;
+
+ return sctp_stream_alloc_in(stream, incnt, gfp);
+}
+
+int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
+{
+ struct sctp_stream_out_ext *soute;
+ int ret;
+
+ soute = kzalloc(sizeof(*soute), GFP_KERNEL);
+ if (!soute)
+ return -ENOMEM;
+ SCTP_SO(stream, sid)->ext = soute;
+
+ ret = sctp_sched_init_sid(stream, sid, GFP_KERNEL);
+ if (ret) {
+ kfree(SCTP_SO(stream, sid)->ext);
+ SCTP_SO(stream, sid)->ext = NULL;
+ }
+
+ return ret;
+}
+
+void sctp_stream_free(struct sctp_stream *stream)
+{
+ struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
+ int i;
+
+ sched->unsched_all(stream);
+ for (i = 0; i < stream->outcnt; i++)
+ sctp_stream_free_ext(stream, i);
+ genradix_free(&stream->out);
+ genradix_free(&stream->in);
+}
+
+void sctp_stream_clear(struct sctp_stream *stream)
+{
+ int i;
+
+ for (i = 0; i < stream->outcnt; i++) {
+ SCTP_SO(stream, i)->mid = 0;
+ SCTP_SO(stream, i)->mid_uo = 0;
+ }
+
+ for (i = 0; i < stream->incnt; i++)
+ SCTP_SI(stream, i)->mid = 0;
+}
+
+void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
+{
+ struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
+
+ sched->unsched_all(stream);
+ sctp_stream_outq_migrate(stream, new, new->outcnt);
+ sctp_stream_free(stream);
+
+ stream->out = new->out;
+ stream->in = new->in;
+ stream->outcnt = new->outcnt;
+ stream->incnt = new->incnt;
+
+ sched->sched_all(stream);
+
+ new->out.tree.root = NULL;
+ new->in.tree.root = NULL;
+ new->outcnt = 0;
+ new->incnt = 0;
+}
+
+static int sctp_send_reconf(struct sctp_association *asoc,
+ struct sctp_chunk *chunk)
+{
+ int retval = 0;
+
+ retval = sctp_primitive_RECONF(asoc->base.net, asoc, chunk);
+ if (retval)
+ sctp_chunk_free(chunk);
+
+ return retval;
+}
+
+static bool sctp_stream_outq_is_empty(struct sctp_stream *stream,
+ __u16 str_nums, __be16 *str_list)
+{
+ struct sctp_association *asoc;
+ __u16 i;
+
+ asoc = container_of(stream, struct sctp_association, stream);
+ if (!asoc->outqueue.out_qlen)
+ return true;
+
+ if (!str_nums)
+ return false;
+
+ for (i = 0; i < str_nums; i++) {
+ __u16 sid = ntohs(str_list[i]);
+
+ if (SCTP_SO(stream, sid)->ext &&
+ !list_empty(&SCTP_SO(stream, sid)->ext->outq))
+ return false;
+ }
+
+ return true;
+}
+
+int sctp_send_reset_streams(struct sctp_association *asoc,
+ struct sctp_reset_streams *params)
+{
+ struct sctp_stream *stream = &asoc->stream;
+ __u16 i, str_nums, *str_list;
+ struct sctp_chunk *chunk;
+ int retval = -EINVAL;
+ __be16 *nstr_list;
+ bool out, in;
+
+ if (!asoc->peer.reconf_capable ||
+ !(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) {
+ retval = -ENOPROTOOPT;
+ goto out;
+ }
+
+ if (asoc->strreset_outstanding) {
+ retval = -EINPROGRESS;
+ goto out;
+ }
+
+ out = params->srs_flags & SCTP_STREAM_RESET_OUTGOING;
+ in = params->srs_flags & SCTP_STREAM_RESET_INCOMING;
+ if (!out && !in)
+ goto out;
+
+ str_nums = params->srs_number_streams;
+ str_list = params->srs_stream_list;
+ if (str_nums) {
+ int param_len = 0;
+
+ if (out) {
+ for (i = 0; i < str_nums; i++)
+ if (str_list[i] >= stream->outcnt)
+ goto out;
+
+ param_len = str_nums * sizeof(__u16) +
+ sizeof(struct sctp_strreset_outreq);
+ }
+
+ if (in) {
+ for (i = 0; i < str_nums; i++)
+ if (str_list[i] >= stream->incnt)
+ goto out;
+
+ param_len += str_nums * sizeof(__u16) +
+ sizeof(struct sctp_strreset_inreq);
+ }
+
+ if (param_len > SCTP_MAX_CHUNK_LEN -
+ sizeof(struct sctp_reconf_chunk))
+ goto out;
+ }
+
+ nstr_list = kcalloc(str_nums, sizeof(__be16), GFP_KERNEL);
+ if (!nstr_list) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < str_nums; i++)
+ nstr_list[i] = htons(str_list[i]);
+
+ if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
+ kfree(nstr_list);
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
+
+ kfree(nstr_list);
+
+ if (!chunk) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ if (out) {
+ if (str_nums)
+ for (i = 0; i < str_nums; i++)
+ SCTP_SO(stream, str_list[i])->state =
+ SCTP_STREAM_CLOSED;
+ else
+ for (i = 0; i < stream->outcnt; i++)
+ SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED;
+ }
+
+ asoc->strreset_chunk = chunk;
+ sctp_chunk_hold(asoc->strreset_chunk);
+
+ retval = sctp_send_reconf(asoc, chunk);
+ if (retval) {
+ sctp_chunk_put(asoc->strreset_chunk);
+ asoc->strreset_chunk = NULL;
+ if (!out)
+ goto out;
+
+ if (str_nums)
+ for (i = 0; i < str_nums; i++)
+ SCTP_SO(stream, str_list[i])->state =
+ SCTP_STREAM_OPEN;
+ else
+ for (i = 0; i < stream->outcnt; i++)
+ SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
+
+ goto out;
+ }
+
+ asoc->strreset_outstanding = out + in;
+
+out:
+ return retval;
+}
+
+int sctp_send_reset_assoc(struct sctp_association *asoc)
+{
+ struct sctp_stream *stream = &asoc->stream;
+ struct sctp_chunk *chunk = NULL;
+ int retval;
+ __u16 i;
+
+ if (!asoc->peer.reconf_capable ||
+ !(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
+ return -ENOPROTOOPT;
+
+ if (asoc->strreset_outstanding)
+ return -EINPROGRESS;
+
+ if (!sctp_outq_is_empty(&asoc->outqueue))
+ return -EAGAIN;
+
+ chunk = sctp_make_strreset_tsnreq(asoc);
+ if (!chunk)
+ return -ENOMEM;
+
+ /* Block further xmit of data until this request is completed */
+ for (i = 0; i < stream->outcnt; i++)
+ SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED;
+
+ asoc->strreset_chunk = chunk;
+ sctp_chunk_hold(asoc->strreset_chunk);
+
+ retval = sctp_send_reconf(asoc, chunk);
+ if (retval) {
+ sctp_chunk_put(asoc->strreset_chunk);
+ asoc->strreset_chunk = NULL;
+
+ for (i = 0; i < stream->outcnt; i++)
+ SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
+
+ return retval;
+ }
+
+ asoc->strreset_outstanding = 1;
+
+ return 0;
+}
+
+int sctp_send_add_streams(struct sctp_association *asoc,
+ struct sctp_add_streams *params)
+{
+ struct sctp_stream *stream = &asoc->stream;
+ struct sctp_chunk *chunk = NULL;
+ int retval;
+ __u32 outcnt, incnt;
+ __u16 out, in;
+
+ if (!asoc->peer.reconf_capable ||
+ !(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
+ retval = -ENOPROTOOPT;
+ goto out;
+ }
+
+ if (asoc->strreset_outstanding) {
+ retval = -EINPROGRESS;
+ goto out;
+ }
+
+ out = params->sas_outstrms;
+ in = params->sas_instrms;
+ outcnt = stream->outcnt + out;
+ incnt = stream->incnt + in;
+ if (outcnt > SCTP_MAX_STREAM || incnt > SCTP_MAX_STREAM ||
+ (!out && !in)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (out) {
+ retval = sctp_stream_alloc_out(stream, outcnt, GFP_KERNEL);
+ if (retval)
+ goto out;
+ }
+
+ chunk = sctp_make_strreset_addstrm(asoc, out, in);
+ if (!chunk) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ asoc->strreset_chunk = chunk;
+ sctp_chunk_hold(asoc->strreset_chunk);
+
+ retval = sctp_send_reconf(asoc, chunk);
+ if (retval) {
+ sctp_chunk_put(asoc->strreset_chunk);
+ asoc->strreset_chunk = NULL;
+ goto out;
+ }
+
+ asoc->strreset_outstanding = !!out + !!in;
+
+out:
+ return retval;
+}
+
+static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param(
+ struct sctp_association *asoc, __be32 resp_seq,
+ __be16 type)
+{
+ struct sctp_chunk *chunk = asoc->strreset_chunk;
+ struct sctp_reconf_chunk *hdr;
+ union sctp_params param;
+
+ if (!chunk)
+ return NULL;
+
+ hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
+ sctp_walk_params(param, hdr, params) {
+ /* sctp_strreset_tsnreq is actually the basic structure
+ * of all stream reconf params, so it's safe to use it
+ * to access request_seq.
+ */
+ struct sctp_strreset_tsnreq *req = param.v;
+
+ if ((!resp_seq || req->request_seq == resp_seq) &&
+ (!type || type == req->param_hdr.type))
+ return param.v;
+ }
+
+ return NULL;
+}
+
+static void sctp_update_strreset_result(struct sctp_association *asoc,
+ __u32 result)
+{
+ asoc->strreset_result[1] = asoc->strreset_result[0];
+ asoc->strreset_result[0] = result;
+}
+
+struct sctp_chunk *sctp_process_strreset_outreq(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp)
+{
+ struct sctp_strreset_outreq *outreq = param.v;
+ struct sctp_stream *stream = &asoc->stream;
+ __u32 result = SCTP_STRRESET_DENIED;
+ __be16 *str_p = NULL;
+ __u32 request_seq;
+ __u16 i, nums;
+
+ request_seq = ntohl(outreq->request_seq);
+
+ if (ntohl(outreq->send_reset_at_tsn) >
+ sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)) {
+ result = SCTP_STRRESET_IN_PROGRESS;
+ goto err;
+ }
+
+ if (TSN_lt(asoc->strreset_inseq, request_seq) ||
+ TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
+ result = SCTP_STRRESET_ERR_BAD_SEQNO;
+ goto err;
+ } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
+ i = asoc->strreset_inseq - request_seq - 1;
+ result = asoc->strreset_result[i];
+ goto err;
+ }
+ asoc->strreset_inseq++;
+
+ /* Check strreset_enable after inseq inc, as sender cannot tell
+ * the peer doesn't enable strreset after receiving response with
+ * result denied, as well as to keep consistent with bsd.
+ */
+ if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
+ goto out;
+
+ nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
+ str_p = outreq->list_of_streams;
+ for (i = 0; i < nums; i++) {
+ if (ntohs(str_p[i]) >= stream->incnt) {
+ result = SCTP_STRRESET_ERR_WRONG_SSN;
+ goto out;
+ }
+ }
+
+ if (asoc->strreset_chunk) {
+ if (!sctp_chunk_lookup_strreset_param(
+ asoc, outreq->response_seq,
+ SCTP_PARAM_RESET_IN_REQUEST)) {
+ /* same process with outstanding isn't 0 */
+ result = SCTP_STRRESET_ERR_IN_PROGRESS;
+ goto out;
+ }
+
+ asoc->strreset_outstanding--;
+ asoc->strreset_outseq++;
+
+ if (!asoc->strreset_outstanding) {
+ struct sctp_transport *t;
+
+ t = asoc->strreset_chunk->transport;
+ if (del_timer(&t->reconf_timer))
+ sctp_transport_put(t);
+
+ sctp_chunk_put(asoc->strreset_chunk);
+ asoc->strreset_chunk = NULL;
+ }
+ }
+
+ if (nums)
+ for (i = 0; i < nums; i++)
+ SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
+ else
+ for (i = 0; i < stream->incnt; i++)
+ SCTP_SI(stream, i)->mid = 0;
+
+ result = SCTP_STRRESET_PERFORMED;
+
+ *evp = sctp_ulpevent_make_stream_reset_event(asoc,
+ SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
+
+out:
+ sctp_update_strreset_result(asoc, result);
+err:
+ return sctp_make_strreset_resp(asoc, result, request_seq);
+}
+
+struct sctp_chunk *sctp_process_strreset_inreq(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp)
+{
+ struct sctp_strreset_inreq *inreq = param.v;
+ struct sctp_stream *stream = &asoc->stream;
+ __u32 result = SCTP_STRRESET_DENIED;
+ struct sctp_chunk *chunk = NULL;
+ __u32 request_seq;
+ __u16 i, nums;
+ __be16 *str_p;
+
+ request_seq = ntohl(inreq->request_seq);
+ if (TSN_lt(asoc->strreset_inseq, request_seq) ||
+ TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
+ result = SCTP_STRRESET_ERR_BAD_SEQNO;
+ goto err;
+ } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
+ i = asoc->strreset_inseq - request_seq - 1;
+ result = asoc->strreset_result[i];
+ if (result == SCTP_STRRESET_PERFORMED)
+ return NULL;
+ goto err;
+ }
+ asoc->strreset_inseq++;
+
+ if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
+ goto out;
+
+ if (asoc->strreset_outstanding) {
+ result = SCTP_STRRESET_ERR_IN_PROGRESS;
+ goto out;
+ }
+
+ nums = (ntohs(param.p->length) - sizeof(*inreq)) / sizeof(__u16);
+ str_p = inreq->list_of_streams;
+ for (i = 0; i < nums; i++) {
+ if (ntohs(str_p[i]) >= stream->outcnt) {
+ result = SCTP_STRRESET_ERR_WRONG_SSN;
+ goto out;
+ }
+ }
+
+ if (!sctp_stream_outq_is_empty(stream, nums, str_p)) {
+ result = SCTP_STRRESET_IN_PROGRESS;
+ asoc->strreset_inseq--;
+ goto err;
+ }
+
+ chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0);
+ if (!chunk)
+ goto out;
+
+ if (nums)
+ for (i = 0; i < nums; i++)
+ SCTP_SO(stream, ntohs(str_p[i]))->state =
+ SCTP_STREAM_CLOSED;
+ else
+ for (i = 0; i < stream->outcnt; i++)
+ SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED;
+
+ asoc->strreset_chunk = chunk;
+ asoc->strreset_outstanding = 1;
+ sctp_chunk_hold(asoc->strreset_chunk);
+
+ result = SCTP_STRRESET_PERFORMED;
+
+out:
+ sctp_update_strreset_result(asoc, result);
+err:
+ if (!chunk)
+ chunk = sctp_make_strreset_resp(asoc, result, request_seq);
+
+ return chunk;
+}
+
+struct sctp_chunk *sctp_process_strreset_tsnreq(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp)
+{
+ __u32 init_tsn = 0, next_tsn = 0, max_tsn_seen;
+ struct sctp_strreset_tsnreq *tsnreq = param.v;
+ struct sctp_stream *stream = &asoc->stream;
+ __u32 result = SCTP_STRRESET_DENIED;
+ __u32 request_seq;
+ __u16 i;
+
+ request_seq = ntohl(tsnreq->request_seq);
+ if (TSN_lt(asoc->strreset_inseq, request_seq) ||
+ TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
+ result = SCTP_STRRESET_ERR_BAD_SEQNO;
+ goto err;
+ } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
+ i = asoc->strreset_inseq - request_seq - 1;
+ result = asoc->strreset_result[i];
+ if (result == SCTP_STRRESET_PERFORMED) {
+ next_tsn = asoc->ctsn_ack_point + 1;
+ init_tsn =
+ sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1;
+ }
+ goto err;
+ }
+
+ if (!sctp_outq_is_empty(&asoc->outqueue)) {
+ result = SCTP_STRRESET_IN_PROGRESS;
+ goto err;
+ }
+
+ asoc->strreset_inseq++;
+
+ if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
+ goto out;
+
+ if (asoc->strreset_outstanding) {
+ result = SCTP_STRRESET_ERR_IN_PROGRESS;
+ goto out;
+ }
+
+ /* G4: The same processing as though a FWD-TSN chunk (as defined in
+ * [RFC3758]) with all streams affected and a new cumulative TSN
+ * ACK of the Receiver's Next TSN minus 1 were received MUST be
+ * performed.
+ */
+ max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
+ asoc->stream.si->report_ftsn(&asoc->ulpq, max_tsn_seen);
+
+ /* G1: Compute an appropriate value for the Receiver's Next TSN -- the
+ * TSN that the peer should use to send the next DATA chunk. The
+ * value SHOULD be the smallest TSN not acknowledged by the
+ * receiver of the request plus 2^31.
+ */
+ init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1 << 31);
+ sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
+ init_tsn, GFP_ATOMIC);
+
+ /* G3: The same processing as though a SACK chunk with no gap report
+ * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
+ * received MUST be performed.
+ */
+ sctp_outq_free(&asoc->outqueue);
+
+ /* G2: Compute an appropriate value for the local endpoint's next TSN,
+ * i.e., the next TSN assigned by the receiver of the SSN/TSN reset
+ * chunk. The value SHOULD be the highest TSN sent by the receiver
+ * of the request plus 1.
+ */
+ next_tsn = asoc->next_tsn;
+ asoc->ctsn_ack_point = next_tsn - 1;
+ asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
+
+ /* G5: The next expected and outgoing SSNs MUST be reset to 0 for all
+ * incoming and outgoing streams.
+ */
+ for (i = 0; i < stream->outcnt; i++) {
+ SCTP_SO(stream, i)->mid = 0;
+ SCTP_SO(stream, i)->mid_uo = 0;
+ }
+ for (i = 0; i < stream->incnt; i++)
+ SCTP_SI(stream, i)->mid = 0;
+
+ result = SCTP_STRRESET_PERFORMED;
+
+ *evp = sctp_ulpevent_make_assoc_reset_event(asoc, 0, init_tsn,
+ next_tsn, GFP_ATOMIC);
+
+out:
+ sctp_update_strreset_result(asoc, result);
+err:
+ return sctp_make_strreset_tsnresp(asoc, result, request_seq,
+ next_tsn, init_tsn);
+}
+
+struct sctp_chunk *sctp_process_strreset_addstrm_out(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp)
+{
+ struct sctp_strreset_addstrm *addstrm = param.v;
+ struct sctp_stream *stream = &asoc->stream;
+ __u32 result = SCTP_STRRESET_DENIED;
+ __u32 request_seq, incnt;
+ __u16 in, i;
+
+ request_seq = ntohl(addstrm->request_seq);
+ if (TSN_lt(asoc->strreset_inseq, request_seq) ||
+ TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
+ result = SCTP_STRRESET_ERR_BAD_SEQNO;
+ goto err;
+ } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
+ i = asoc->strreset_inseq - request_seq - 1;
+ result = asoc->strreset_result[i];
+ goto err;
+ }
+ asoc->strreset_inseq++;
+
+ if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
+ goto out;
+
+ in = ntohs(addstrm->number_of_streams);
+ incnt = stream->incnt + in;
+ if (!in || incnt > SCTP_MAX_STREAM)
+ goto out;
+
+ if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
+ goto out;
+
+ if (asoc->strreset_chunk) {
+ if (!sctp_chunk_lookup_strreset_param(
+ asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
+ /* same process with outstanding isn't 0 */
+ result = SCTP_STRRESET_ERR_IN_PROGRESS;
+ goto out;
+ }
+
+ asoc->strreset_outstanding--;
+ asoc->strreset_outseq++;
+
+ if (!asoc->strreset_outstanding) {
+ struct sctp_transport *t;
+
+ t = asoc->strreset_chunk->transport;
+ if (del_timer(&t->reconf_timer))
+ sctp_transport_put(t);
+
+ sctp_chunk_put(asoc->strreset_chunk);
+ asoc->strreset_chunk = NULL;
+ }
+ }
+
+ stream->incnt = incnt;
+
+ result = SCTP_STRRESET_PERFORMED;
+
+ *evp = sctp_ulpevent_make_stream_change_event(asoc,
+ 0, ntohs(addstrm->number_of_streams), 0, GFP_ATOMIC);
+
+out:
+ sctp_update_strreset_result(asoc, result);
+err:
+ return sctp_make_strreset_resp(asoc, result, request_seq);
+}
+
+struct sctp_chunk *sctp_process_strreset_addstrm_in(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp)
+{
+ struct sctp_strreset_addstrm *addstrm = param.v;
+ struct sctp_stream *stream = &asoc->stream;
+ __u32 result = SCTP_STRRESET_DENIED;
+ struct sctp_chunk *chunk = NULL;
+ __u32 request_seq, outcnt;
+ __u16 out, i;
+ int ret;
+
+ request_seq = ntohl(addstrm->request_seq);
+ if (TSN_lt(asoc->strreset_inseq, request_seq) ||
+ TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
+ result = SCTP_STRRESET_ERR_BAD_SEQNO;
+ goto err;
+ } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
+ i = asoc->strreset_inseq - request_seq - 1;
+ result = asoc->strreset_result[i];
+ if (result == SCTP_STRRESET_PERFORMED)
+ return NULL;
+ goto err;
+ }
+ asoc->strreset_inseq++;
+
+ if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
+ goto out;
+
+ if (asoc->strreset_outstanding) {
+ result = SCTP_STRRESET_ERR_IN_PROGRESS;
+ goto out;
+ }
+
+ out = ntohs(addstrm->number_of_streams);
+ outcnt = stream->outcnt + out;
+ if (!out || outcnt > SCTP_MAX_STREAM)
+ goto out;
+
+ ret = sctp_stream_alloc_out(stream, outcnt, GFP_ATOMIC);
+ if (ret)
+ goto out;
+
+ chunk = sctp_make_strreset_addstrm(asoc, out, 0);
+ if (!chunk)
+ goto out;
+
+ asoc->strreset_chunk = chunk;
+ asoc->strreset_outstanding = 1;
+ sctp_chunk_hold(asoc->strreset_chunk);
+
+ stream->outcnt = outcnt;
+
+ result = SCTP_STRRESET_PERFORMED;
+
+out:
+ sctp_update_strreset_result(asoc, result);
+err:
+ if (!chunk)
+ chunk = sctp_make_strreset_resp(asoc, result, request_seq);
+
+ return chunk;
+}
+
+struct sctp_chunk *sctp_process_strreset_resp(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp)
+{
+ struct sctp_stream *stream = &asoc->stream;
+ struct sctp_strreset_resp *resp = param.v;
+ struct sctp_transport *t;
+ __u16 i, nums, flags = 0;
+ struct sctp_paramhdr *req;
+ __u32 result;
+
+ req = sctp_chunk_lookup_strreset_param(asoc, resp->response_seq, 0);
+ if (!req)
+ return NULL;
+
+ result = ntohl(resp->result);
+ if (result != SCTP_STRRESET_PERFORMED) {
+ /* if in progress, do nothing but retransmit */
+ if (result == SCTP_STRRESET_IN_PROGRESS)
+ return NULL;
+ else if (result == SCTP_STRRESET_DENIED)
+ flags = SCTP_STREAM_RESET_DENIED;
+ else
+ flags = SCTP_STREAM_RESET_FAILED;
+ }
+
+ if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) {
+ struct sctp_strreset_outreq *outreq;
+ __be16 *str_p;
+
+ outreq = (struct sctp_strreset_outreq *)req;
+ str_p = outreq->list_of_streams;
+ nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) /
+ sizeof(__u16);
+
+ if (result == SCTP_STRRESET_PERFORMED) {
+ struct sctp_stream_out *sout;
+ if (nums) {
+ for (i = 0; i < nums; i++) {
+ sout = SCTP_SO(stream, ntohs(str_p[i]));
+ sout->mid = 0;
+ sout->mid_uo = 0;
+ }
+ } else {
+ for (i = 0; i < stream->outcnt; i++) {
+ sout = SCTP_SO(stream, i);
+ sout->mid = 0;
+ sout->mid_uo = 0;
+ }
+ }
+ }
+
+ flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
+
+ for (i = 0; i < stream->outcnt; i++)
+ SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
+
+ *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
+ nums, str_p, GFP_ATOMIC);
+ } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) {
+ struct sctp_strreset_inreq *inreq;
+ __be16 *str_p;
+
+ /* if the result is performed, it's impossible for inreq */
+ if (result == SCTP_STRRESET_PERFORMED)
+ return NULL;
+
+ inreq = (struct sctp_strreset_inreq *)req;
+ str_p = inreq->list_of_streams;
+ nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
+ sizeof(__u16);
+
+ flags |= SCTP_STREAM_RESET_INCOMING_SSN;
+
+ *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
+ nums, str_p, GFP_ATOMIC);
+ } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
+ struct sctp_strreset_resptsn *resptsn;
+ __u32 stsn, rtsn;
+
+ /* check for resptsn, as sctp_verify_reconf didn't do it*/
+ if (ntohs(param.p->length) != sizeof(*resptsn))
+ return NULL;
+
+ resptsn = (struct sctp_strreset_resptsn *)resp;
+ stsn = ntohl(resptsn->senders_next_tsn);
+ rtsn = ntohl(resptsn->receivers_next_tsn);
+
+ if (result == SCTP_STRRESET_PERFORMED) {
+ __u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
+ &asoc->peer.tsn_map);
+ LIST_HEAD(temp);
+
+ asoc->stream.si->report_ftsn(&asoc->ulpq, mtsn);
+
+ sctp_tsnmap_init(&asoc->peer.tsn_map,
+ SCTP_TSN_MAP_INITIAL,
+ stsn, GFP_ATOMIC);
+
+ /* Clean up sacked and abandoned queues only. As the
+ * out_chunk_list may not be empty, splice it to temp,
+ * then get it back after sctp_outq_free is done.
+ */
+ list_splice_init(&asoc->outqueue.out_chunk_list, &temp);
+ sctp_outq_free(&asoc->outqueue);
+ list_splice_init(&temp, &asoc->outqueue.out_chunk_list);
+
+ asoc->next_tsn = rtsn;
+ asoc->ctsn_ack_point = asoc->next_tsn - 1;
+ asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
+
+ for (i = 0; i < stream->outcnt; i++) {
+ SCTP_SO(stream, i)->mid = 0;
+ SCTP_SO(stream, i)->mid_uo = 0;
+ }
+ for (i = 0; i < stream->incnt; i++)
+ SCTP_SI(stream, i)->mid = 0;
+ }
+
+ for (i = 0; i < stream->outcnt; i++)
+ SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
+
+ *evp = sctp_ulpevent_make_assoc_reset_event(asoc, flags,
+ stsn, rtsn, GFP_ATOMIC);
+ } else if (req->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS) {
+ struct sctp_strreset_addstrm *addstrm;
+ __u16 number;
+
+ addstrm = (struct sctp_strreset_addstrm *)req;
+ nums = ntohs(addstrm->number_of_streams);
+ number = stream->outcnt - nums;
+
+ if (result == SCTP_STRRESET_PERFORMED) {
+ for (i = number; i < stream->outcnt; i++)
+ SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
+ } else {
+ sctp_stream_shrink_out(stream, number);
+ stream->outcnt = number;
+ }
+
+ *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
+ 0, nums, GFP_ATOMIC);
+ } else if (req->type == SCTP_PARAM_RESET_ADD_IN_STREAMS) {
+ struct sctp_strreset_addstrm *addstrm;
+
+ /* if the result is performed, it's impossible for addstrm in
+ * request.
+ */
+ if (result == SCTP_STRRESET_PERFORMED)
+ return NULL;
+
+ addstrm = (struct sctp_strreset_addstrm *)req;
+ nums = ntohs(addstrm->number_of_streams);
+
+ *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
+ nums, 0, GFP_ATOMIC);
+ }
+
+ asoc->strreset_outstanding--;
+ asoc->strreset_outseq++;
+
+ /* remove everything for this reconf request */
+ if (!asoc->strreset_outstanding) {
+ t = asoc->strreset_chunk->transport;
+ if (del_timer(&t->reconf_timer))
+ sctp_transport_put(t);
+
+ sctp_chunk_put(asoc->strreset_chunk);
+ asoc->strreset_chunk = NULL;
+ }
+
+ return NULL;
+}
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
new file mode 100644
index 000000000..4fbd6d052
--- /dev/null
+++ b/net/sctp/stream_interleave.c
@@ -0,0 +1,1359 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions implement sctp stream message interleaving, mostly
+ * including I-DATA and I-FORWARD-TSN chunks process.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresched(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Xin Long <lucien.xin@gmail.com>
+ */
+
+#include <net/busy_poll.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/ulpevent.h>
+#include <linux/sctp.h>
+
+static struct sctp_chunk *sctp_make_idatafrag_empty(
+ const struct sctp_association *asoc,
+ const struct sctp_sndrcvinfo *sinfo,
+ int len, __u8 flags, gfp_t gfp)
+{
+ struct sctp_chunk *retval;
+ struct sctp_idatahdr dp;
+
+ memset(&dp, 0, sizeof(dp));
+ dp.stream = htons(sinfo->sinfo_stream);
+
+ if (sinfo->sinfo_flags & SCTP_UNORDERED)
+ flags |= SCTP_DATA_UNORDERED;
+
+ retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
+ if (!retval)
+ return NULL;
+
+ retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
+ memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
+
+ return retval;
+}
+
+static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
+{
+ struct sctp_stream *stream;
+ struct sctp_chunk *lchunk;
+ __u32 cfsn = 0;
+ __u16 sid;
+
+ if (chunk->has_mid)
+ return;
+
+ sid = sctp_chunk_stream_no(chunk);
+ stream = &chunk->asoc->stream;
+
+ list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
+ struct sctp_idatahdr *hdr;
+ __u32 mid;
+
+ lchunk->has_mid = 1;
+
+ hdr = lchunk->subh.idata_hdr;
+
+ if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
+ hdr->ppid = lchunk->sinfo.sinfo_ppid;
+ else
+ hdr->fsn = htonl(cfsn++);
+
+ if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
+ mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
+ sctp_mid_uo_next(stream, out, sid) :
+ sctp_mid_uo_peek(stream, out, sid);
+ } else {
+ mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
+ sctp_mid_next(stream, out, sid) :
+ sctp_mid_peek(stream, out, sid);
+ }
+ hdr->mid = htonl(mid);
+ }
+}
+
+static bool sctp_validate_data(struct sctp_chunk *chunk)
+{
+ struct sctp_stream *stream;
+ __u16 sid, ssn;
+
+ if (chunk->chunk_hdr->type != SCTP_CID_DATA)
+ return false;
+
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ return true;
+
+ stream = &chunk->asoc->stream;
+ sid = sctp_chunk_stream_no(chunk);
+ ssn = ntohs(chunk->subh.data_hdr->ssn);
+
+ return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
+}
+
+static bool sctp_validate_idata(struct sctp_chunk *chunk)
+{
+ struct sctp_stream *stream;
+ __u32 mid;
+ __u16 sid;
+
+ if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
+ return false;
+
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ return true;
+
+ stream = &chunk->asoc->stream;
+ sid = sctp_chunk_stream_no(chunk);
+ mid = ntohl(chunk->subh.idata_hdr->mid);
+
+ return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
+}
+
+static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_ulpevent *cevent;
+ struct sk_buff *pos, *loc;
+
+ pos = skb_peek_tail(&ulpq->reasm);
+ if (!pos) {
+ __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+ return;
+ }
+
+ cevent = sctp_skb2event(pos);
+
+ if (event->stream == cevent->stream &&
+ event->mid == cevent->mid &&
+ (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
+ (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+ event->fsn > cevent->fsn))) {
+ __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+ return;
+ }
+
+ if ((event->stream == cevent->stream &&
+ MID_lt(cevent->mid, event->mid)) ||
+ event->stream > cevent->stream) {
+ __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+ return;
+ }
+
+ loc = NULL;
+ skb_queue_walk(&ulpq->reasm, pos) {
+ cevent = sctp_skb2event(pos);
+
+ if (event->stream < cevent->stream ||
+ (event->stream == cevent->stream &&
+ MID_lt(event->mid, cevent->mid))) {
+ loc = pos;
+ break;
+ }
+ if (event->stream == cevent->stream &&
+ event->mid == cevent->mid &&
+ !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+ (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
+ event->fsn < cevent->fsn)) {
+ loc = pos;
+ break;
+ }
+ }
+
+ if (!loc)
+ __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+ else
+ __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_partial(
+ struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sk_buff *first_frag = NULL;
+ struct sk_buff *last_frag = NULL;
+ struct sctp_ulpevent *retval;
+ struct sctp_stream_in *sin;
+ struct sk_buff *pos;
+ __u32 next_fsn = 0;
+ int is_last = 0;
+
+ sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
+
+ skb_queue_walk(&ulpq->reasm, pos) {
+ struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+ if (cevent->stream < event->stream)
+ continue;
+
+ if (cevent->stream > event->stream ||
+ cevent->mid != sin->mid)
+ break;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ goto out;
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (!first_frag) {
+ if (cevent->fsn == sin->fsn) {
+ first_frag = pos;
+ last_frag = pos;
+ next_fsn = cevent->fsn + 1;
+ }
+ } else if (cevent->fsn == next_fsn) {
+ last_frag = pos;
+ next_fsn++;
+ } else {
+ goto out;
+ }
+ break;
+ case SCTP_DATA_LAST_FRAG:
+ if (!first_frag) {
+ if (cevent->fsn == sin->fsn) {
+ first_frag = pos;
+ last_frag = pos;
+ next_fsn = 0;
+ is_last = 1;
+ }
+ } else if (cevent->fsn == next_fsn) {
+ last_frag = pos;
+ next_fsn = 0;
+ is_last = 1;
+ }
+ goto out;
+ default:
+ goto out;
+ }
+ }
+
+out:
+ if (!first_frag)
+ return NULL;
+
+ retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
+ first_frag, last_frag);
+ if (retval) {
+ sin->fsn = next_fsn;
+ if (is_last) {
+ retval->msg_flags |= MSG_EOR;
+ sin->pd_mode = 0;
+ }
+ }
+
+ return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
+ struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_association *asoc = ulpq->asoc;
+ struct sk_buff *pos, *first_frag = NULL;
+ struct sctp_ulpevent *retval = NULL;
+ struct sk_buff *pd_first = NULL;
+ struct sk_buff *pd_last = NULL;
+ struct sctp_stream_in *sin;
+ __u32 next_fsn = 0;
+ __u32 pd_point = 0;
+ __u32 pd_len = 0;
+ __u32 mid = 0;
+
+ sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
+
+ skb_queue_walk(&ulpq->reasm, pos) {
+ struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+ if (cevent->stream < event->stream)
+ continue;
+ if (cevent->stream > event->stream)
+ break;
+
+ if (MID_lt(cevent->mid, event->mid))
+ continue;
+ if (MID_lt(event->mid, cevent->mid))
+ break;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ if (cevent->mid == sin->mid) {
+ pd_first = pos;
+ pd_last = pos;
+ pd_len = pos->len;
+ }
+
+ first_frag = pos;
+ next_fsn = 0;
+ mid = cevent->mid;
+ break;
+
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (first_frag && cevent->mid == mid &&
+ cevent->fsn == next_fsn) {
+ next_fsn++;
+ if (pd_first) {
+ pd_last = pos;
+ pd_len += pos->len;
+ }
+ } else {
+ first_frag = NULL;
+ }
+ break;
+
+ case SCTP_DATA_LAST_FRAG:
+ if (first_frag && cevent->mid == mid &&
+ cevent->fsn == next_fsn)
+ goto found;
+ else
+ first_frag = NULL;
+ break;
+ }
+ }
+
+ if (!pd_first)
+ goto out;
+
+ pd_point = sctp_sk(asoc->base.sk)->pd_point;
+ if (pd_point && pd_point <= pd_len) {
+ retval = sctp_make_reassembled_event(asoc->base.net,
+ &ulpq->reasm,
+ pd_first, pd_last);
+ if (retval) {
+ sin->fsn = next_fsn;
+ sin->pd_mode = 1;
+ }
+ }
+ goto out;
+
+found:
+ retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm,
+ first_frag, pos);
+ if (retval)
+ retval->msg_flags |= MSG_EOR;
+
+out:
+ return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_ulpevent *retval = NULL;
+ struct sctp_stream_in *sin;
+
+ if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
+ event->msg_flags |= MSG_EOR;
+ return event;
+ }
+
+ sctp_intl_store_reasm(ulpq, event);
+
+ sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
+ if (sin->pd_mode && event->mid == sin->mid &&
+ event->fsn == sin->fsn)
+ retval = sctp_intl_retrieve_partial(ulpq, event);
+
+ if (!retval)
+ retval = sctp_intl_retrieve_reassembled(ulpq, event);
+
+ return retval;
+}
+
+static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_ulpevent *cevent;
+ struct sk_buff *pos, *loc;
+
+ pos = skb_peek_tail(&ulpq->lobby);
+ if (!pos) {
+ __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ return;
+ }
+
+ cevent = (struct sctp_ulpevent *)pos->cb;
+ if (event->stream == cevent->stream &&
+ MID_lt(cevent->mid, event->mid)) {
+ __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ return;
+ }
+
+ if (event->stream > cevent->stream) {
+ __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ return;
+ }
+
+ loc = NULL;
+ skb_queue_walk(&ulpq->lobby, pos) {
+ cevent = (struct sctp_ulpevent *)pos->cb;
+
+ if (cevent->stream > event->stream) {
+ loc = pos;
+ break;
+ }
+ if (cevent->stream == event->stream &&
+ MID_lt(event->mid, cevent->mid)) {
+ loc = pos;
+ break;
+ }
+ }
+
+ if (!loc)
+ __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ else
+ __skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
+}
+
+static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sk_buff_head *event_list;
+ struct sctp_stream *stream;
+ struct sk_buff *pos, *tmp;
+ __u16 sid = event->stream;
+
+ stream = &ulpq->asoc->stream;
+ event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
+
+ sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
+ struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
+
+ if (cevent->stream > sid)
+ break;
+
+ if (cevent->stream < sid)
+ continue;
+
+ if (cevent->mid != sctp_mid_peek(stream, in, sid))
+ break;
+
+ sctp_mid_next(stream, in, sid);
+
+ __skb_unlink(pos, &ulpq->lobby);
+
+ __skb_queue_tail(event_list, pos);
+ }
+}
+
+static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_stream *stream;
+ __u16 sid;
+
+ stream = &ulpq->asoc->stream;
+ sid = event->stream;
+
+ if (event->mid != sctp_mid_peek(stream, in, sid)) {
+ sctp_intl_store_ordered(ulpq, event);
+ return NULL;
+ }
+
+ sctp_mid_next(stream, in, sid);
+
+ sctp_intl_retrieve_ordered(ulpq, event);
+
+ return event;
+}
+
+static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
+ struct sk_buff_head *skb_list)
+{
+ struct sock *sk = ulpq->asoc->base.sk;
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_ulpevent *event;
+ struct sk_buff *skb;
+
+ skb = __skb_peek(skb_list);
+ event = sctp_skb2event(skb);
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN &&
+ (sk->sk_shutdown & SEND_SHUTDOWN ||
+ !sctp_ulpevent_is_notification(event)))
+ goto out_free;
+
+ if (!sctp_ulpevent_is_notification(event)) {
+ sk_mark_napi_id(sk, skb);
+ sk_incoming_cpu_update(sk);
+ }
+
+ if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
+ goto out_free;
+
+ if (skb_list)
+ skb_queue_splice_tail_init(skb_list,
+ &sk->sk_receive_queue);
+ else
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+
+ if (!sp->data_ready_signalled) {
+ sp->data_ready_signalled = 1;
+ sk->sk_data_ready(sk);
+ }
+
+ return 1;
+
+out_free:
+ if (skb_list)
+ sctp_queue_purge_ulpevents(skb_list);
+ else
+ sctp_ulpevent_free(event);
+
+ return 0;
+}
+
+static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_ulpevent *cevent;
+ struct sk_buff *pos;
+
+ pos = skb_peek_tail(&ulpq->reasm_uo);
+ if (!pos) {
+ __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
+ return;
+ }
+
+ cevent = sctp_skb2event(pos);
+
+ if (event->stream == cevent->stream &&
+ event->mid == cevent->mid &&
+ (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
+ (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+ event->fsn > cevent->fsn))) {
+ __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
+ return;
+ }
+
+ if ((event->stream == cevent->stream &&
+ MID_lt(cevent->mid, event->mid)) ||
+ event->stream > cevent->stream) {
+ __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
+ return;
+ }
+
+ skb_queue_walk(&ulpq->reasm_uo, pos) {
+ cevent = sctp_skb2event(pos);
+
+ if (event->stream < cevent->stream ||
+ (event->stream == cevent->stream &&
+ MID_lt(event->mid, cevent->mid)))
+ break;
+
+ if (event->stream == cevent->stream &&
+ event->mid == cevent->mid &&
+ !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+ (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
+ event->fsn < cevent->fsn))
+ break;
+ }
+
+ __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
+ struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sk_buff *first_frag = NULL;
+ struct sk_buff *last_frag = NULL;
+ struct sctp_ulpevent *retval;
+ struct sctp_stream_in *sin;
+ struct sk_buff *pos;
+ __u32 next_fsn = 0;
+ int is_last = 0;
+
+ sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
+
+ skb_queue_walk(&ulpq->reasm_uo, pos) {
+ struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+ if (cevent->stream < event->stream)
+ continue;
+ if (cevent->stream > event->stream)
+ break;
+
+ if (MID_lt(cevent->mid, sin->mid_uo))
+ continue;
+ if (MID_lt(sin->mid_uo, cevent->mid))
+ break;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ goto out;
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (!first_frag) {
+ if (cevent->fsn == sin->fsn_uo) {
+ first_frag = pos;
+ last_frag = pos;
+ next_fsn = cevent->fsn + 1;
+ }
+ } else if (cevent->fsn == next_fsn) {
+ last_frag = pos;
+ next_fsn++;
+ } else {
+ goto out;
+ }
+ break;
+ case SCTP_DATA_LAST_FRAG:
+ if (!first_frag) {
+ if (cevent->fsn == sin->fsn_uo) {
+ first_frag = pos;
+ last_frag = pos;
+ next_fsn = 0;
+ is_last = 1;
+ }
+ } else if (cevent->fsn == next_fsn) {
+ last_frag = pos;
+ next_fsn = 0;
+ is_last = 1;
+ }
+ goto out;
+ default:
+ goto out;
+ }
+ }
+
+out:
+ if (!first_frag)
+ return NULL;
+
+ retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
+ &ulpq->reasm_uo, first_frag,
+ last_frag);
+ if (retval) {
+ sin->fsn_uo = next_fsn;
+ if (is_last) {
+ retval->msg_flags |= MSG_EOR;
+ sin->pd_mode_uo = 0;
+ }
+ }
+
+ return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
+ struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_association *asoc = ulpq->asoc;
+ struct sk_buff *pos, *first_frag = NULL;
+ struct sctp_ulpevent *retval = NULL;
+ struct sk_buff *pd_first = NULL;
+ struct sk_buff *pd_last = NULL;
+ struct sctp_stream_in *sin;
+ __u32 next_fsn = 0;
+ __u32 pd_point = 0;
+ __u32 pd_len = 0;
+ __u32 mid = 0;
+
+ sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
+
+ skb_queue_walk(&ulpq->reasm_uo, pos) {
+ struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+ if (cevent->stream < event->stream)
+ continue;
+ if (cevent->stream > event->stream)
+ break;
+
+ if (MID_lt(cevent->mid, event->mid))
+ continue;
+ if (MID_lt(event->mid, cevent->mid))
+ break;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ if (!sin->pd_mode_uo) {
+ sin->mid_uo = cevent->mid;
+ pd_first = pos;
+ pd_last = pos;
+ pd_len = pos->len;
+ }
+
+ first_frag = pos;
+ next_fsn = 0;
+ mid = cevent->mid;
+ break;
+
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (first_frag && cevent->mid == mid &&
+ cevent->fsn == next_fsn) {
+ next_fsn++;
+ if (pd_first) {
+ pd_last = pos;
+ pd_len += pos->len;
+ }
+ } else {
+ first_frag = NULL;
+ }
+ break;
+
+ case SCTP_DATA_LAST_FRAG:
+ if (first_frag && cevent->mid == mid &&
+ cevent->fsn == next_fsn)
+ goto found;
+ else
+ first_frag = NULL;
+ break;
+ }
+ }
+
+ if (!pd_first)
+ goto out;
+
+ pd_point = sctp_sk(asoc->base.sk)->pd_point;
+ if (pd_point && pd_point <= pd_len) {
+ retval = sctp_make_reassembled_event(asoc->base.net,
+ &ulpq->reasm_uo,
+ pd_first, pd_last);
+ if (retval) {
+ sin->fsn_uo = next_fsn;
+ sin->pd_mode_uo = 1;
+ }
+ }
+ goto out;
+
+found:
+ retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm_uo,
+ first_frag, pos);
+ if (retval)
+ retval->msg_flags |= MSG_EOR;
+
+out:
+ return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_ulpevent *retval = NULL;
+ struct sctp_stream_in *sin;
+
+ if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
+ event->msg_flags |= MSG_EOR;
+ return event;
+ }
+
+ sctp_intl_store_reasm_uo(ulpq, event);
+
+ sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
+ if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
+ event->fsn == sin->fsn_uo)
+ retval = sctp_intl_retrieve_partial_uo(ulpq, event);
+
+ if (!retval)
+ retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
+
+ return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
+{
+ struct sctp_stream_in *csin, *sin = NULL;
+ struct sk_buff *first_frag = NULL;
+ struct sk_buff *last_frag = NULL;
+ struct sctp_ulpevent *retval;
+ struct sk_buff *pos;
+ __u32 next_fsn = 0;
+ __u16 sid = 0;
+
+ skb_queue_walk(&ulpq->reasm_uo, pos) {
+ struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+ csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
+ if (csin->pd_mode_uo)
+ continue;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ if (first_frag)
+ goto out;
+ first_frag = pos;
+ last_frag = pos;
+ next_fsn = 0;
+ sin = csin;
+ sid = cevent->stream;
+ sin->mid_uo = cevent->mid;
+ break;
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (!first_frag)
+ break;
+ if (cevent->stream == sid &&
+ cevent->mid == sin->mid_uo &&
+ cevent->fsn == next_fsn) {
+ next_fsn++;
+ last_frag = pos;
+ } else {
+ goto out;
+ }
+ break;
+ case SCTP_DATA_LAST_FRAG:
+ if (first_frag)
+ goto out;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!first_frag)
+ return NULL;
+
+out:
+ retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
+ &ulpq->reasm_uo, first_frag,
+ last_frag);
+ if (retval) {
+ sin->fsn_uo = next_fsn;
+ sin->pd_mode_uo = 1;
+ }
+
+ return retval;
+}
+
+static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk, gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+ struct sk_buff_head temp;
+ int event_eor = 0;
+
+ event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
+ if (!event)
+ return -ENOMEM;
+
+ event->mid = ntohl(chunk->subh.idata_hdr->mid);
+ if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
+ event->ppid = chunk->subh.idata_hdr->ppid;
+ else
+ event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
+
+ if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
+ event = sctp_intl_reasm(ulpq, event);
+ if (event) {
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+
+ if (event->msg_flags & MSG_EOR)
+ event = sctp_intl_order(ulpq, event);
+ }
+ } else {
+ event = sctp_intl_reasm_uo(ulpq, event);
+ if (event) {
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ }
+ }
+
+ if (event) {
+ event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
+ sctp_enqueue_event(ulpq, &temp);
+ }
+
+ return event_eor;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
+{
+ struct sctp_stream_in *csin, *sin = NULL;
+ struct sk_buff *first_frag = NULL;
+ struct sk_buff *last_frag = NULL;
+ struct sctp_ulpevent *retval;
+ struct sk_buff *pos;
+ __u32 next_fsn = 0;
+ __u16 sid = 0;
+
+ skb_queue_walk(&ulpq->reasm, pos) {
+ struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+ csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
+ if (csin->pd_mode)
+ continue;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ if (first_frag)
+ goto out;
+ if (cevent->mid == csin->mid) {
+ first_frag = pos;
+ last_frag = pos;
+ next_fsn = 0;
+ sin = csin;
+ sid = cevent->stream;
+ }
+ break;
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (!first_frag)
+ break;
+ if (cevent->stream == sid &&
+ cevent->mid == sin->mid &&
+ cevent->fsn == next_fsn) {
+ next_fsn++;
+ last_frag = pos;
+ } else {
+ goto out;
+ }
+ break;
+ case SCTP_DATA_LAST_FRAG:
+ if (first_frag)
+ goto out;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!first_frag)
+ return NULL;
+
+out:
+ retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
+ &ulpq->reasm, first_frag,
+ last_frag);
+ if (retval) {
+ sin->fsn = next_fsn;
+ sin->pd_mode = 1;
+ }
+
+ return retval;
+}
+
+static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+ struct sk_buff_head temp;
+
+ if (!skb_queue_empty(&ulpq->reasm)) {
+ do {
+ event = sctp_intl_retrieve_first(ulpq);
+ if (event) {
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ sctp_enqueue_event(ulpq, &temp);
+ }
+ } while (event);
+ }
+
+ if (!skb_queue_empty(&ulpq->reasm_uo)) {
+ do {
+ event = sctp_intl_retrieve_first_uo(ulpq);
+ if (event) {
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ sctp_enqueue_event(ulpq, &temp);
+ }
+ } while (event);
+ }
+}
+
+static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
+ gfp_t gfp)
+{
+ struct sctp_association *asoc = ulpq->asoc;
+ __u32 freed = 0;
+ __u16 needed;
+
+ needed = ntohs(chunk->chunk_hdr->length) -
+ sizeof(struct sctp_idata_chunk);
+
+ if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
+ freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
+ if (freed < needed)
+ freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
+ needed);
+ if (freed < needed)
+ freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
+ needed);
+ }
+
+ if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
+ sctp_intl_start_pd(ulpq, gfp);
+}
+
+static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
+ __u32 mid, __u16 flags, gfp_t gfp)
+{
+ struct sock *sk = ulpq->asoc->base.sk;
+ struct sctp_ulpevent *ev = NULL;
+
+ if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
+ SCTP_PARTIAL_DELIVERY_EVENT))
+ return;
+
+ ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
+ sid, mid, flags, gfp);
+ if (ev) {
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
+
+ if (!sp->data_ready_signalled) {
+ sp->data_ready_signalled = 1;
+ sk->sk_data_ready(sk);
+ }
+ }
+}
+
+static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
+{
+ struct sctp_stream *stream = &ulpq->asoc->stream;
+ struct sctp_ulpevent *cevent, *event = NULL;
+ struct sk_buff_head *lobby = &ulpq->lobby;
+ struct sk_buff *pos, *tmp;
+ struct sk_buff_head temp;
+ __u16 csid;
+ __u32 cmid;
+
+ skb_queue_head_init(&temp);
+ sctp_skb_for_each(pos, lobby, tmp) {
+ cevent = (struct sctp_ulpevent *)pos->cb;
+ csid = cevent->stream;
+ cmid = cevent->mid;
+
+ if (csid > sid)
+ break;
+
+ if (csid < sid)
+ continue;
+
+ if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
+ break;
+
+ __skb_unlink(pos, lobby);
+ if (!event)
+ event = sctp_skb2event(pos);
+
+ __skb_queue_tail(&temp, pos);
+ }
+
+ if (!event && pos != (struct sk_buff *)lobby) {
+ cevent = (struct sctp_ulpevent *)pos->cb;
+ csid = cevent->stream;
+ cmid = cevent->mid;
+
+ if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
+ sctp_mid_next(stream, in, csid);
+ __skb_unlink(pos, lobby);
+ __skb_queue_tail(&temp, pos);
+ event = sctp_skb2event(pos);
+ }
+ }
+
+ if (event) {
+ sctp_intl_retrieve_ordered(ulpq, event);
+ sctp_enqueue_event(ulpq, &temp);
+ }
+}
+
+static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
+{
+ struct sctp_stream *stream = &ulpq->asoc->stream;
+ __u16 sid;
+
+ for (sid = 0; sid < stream->incnt; sid++) {
+ struct sctp_stream_in *sin = SCTP_SI(stream, sid);
+ __u32 mid;
+
+ if (sin->pd_mode_uo) {
+ sin->pd_mode_uo = 0;
+
+ mid = sin->mid_uo;
+ sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
+ }
+
+ if (sin->pd_mode) {
+ sin->pd_mode = 0;
+
+ mid = sin->mid;
+ sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
+ sctp_mid_skip(stream, in, sid, mid);
+
+ sctp_intl_reap_ordered(ulpq, sid);
+ }
+ }
+
+ /* intl abort pd happens only when all data needs to be cleaned */
+ sctp_ulpq_flush(ulpq);
+}
+
+static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
+ int nskips, __be16 stream, __u8 flags)
+{
+ int i;
+
+ for (i = 0; i < nskips; i++)
+ if (skiplist[i].stream == stream &&
+ skiplist[i].flags == flags)
+ return i;
+
+ return i;
+}
+
+#define SCTP_FTSN_U_BIT 0x1
+static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
+{
+ struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
+ struct sctp_association *asoc = q->asoc;
+ struct sctp_chunk *ftsn_chunk = NULL;
+ struct list_head *lchunk, *temp;
+ int nskips = 0, skip_pos;
+ struct sctp_chunk *chunk;
+ __u32 tsn;
+
+ if (!asoc->peer.prsctp_capable)
+ return;
+
+ if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
+ asoc->adv_peer_ack_point = ctsn;
+
+ list_for_each_safe(lchunk, temp, &q->abandoned) {
+ chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
+ tsn = ntohl(chunk->subh.data_hdr->tsn);
+
+ if (TSN_lte(tsn, ctsn)) {
+ list_del_init(lchunk);
+ sctp_chunk_free(chunk);
+ } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
+ __be16 sid = chunk->subh.idata_hdr->stream;
+ __be32 mid = chunk->subh.idata_hdr->mid;
+ __u8 flags = 0;
+
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ flags |= SCTP_FTSN_U_BIT;
+
+ asoc->adv_peer_ack_point = tsn;
+ skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
+ sid, flags);
+ ftsn_skip_arr[skip_pos].stream = sid;
+ ftsn_skip_arr[skip_pos].reserved = 0;
+ ftsn_skip_arr[skip_pos].flags = flags;
+ ftsn_skip_arr[skip_pos].mid = mid;
+ if (skip_pos == nskips)
+ nskips++;
+ if (nskips == 10)
+ break;
+ } else {
+ break;
+ }
+ }
+
+ if (asoc->adv_peer_ack_point > ctsn)
+ ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
+ nskips, &ftsn_skip_arr[0]);
+
+ if (ftsn_chunk) {
+ list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
+ SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS);
+ }
+}
+
+#define _sctp_walk_ifwdtsn(pos, chunk, end) \
+ for (pos = chunk->subh.ifwdtsn_hdr->skip; \
+ (void *)pos <= (void *)chunk->subh.ifwdtsn_hdr->skip + (end) - \
+ sizeof(struct sctp_ifwdtsn_skip); pos++)
+
+#define sctp_walk_ifwdtsn(pos, ch) \
+ _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
+ sizeof(struct sctp_ifwdtsn_chunk))
+
+static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
+{
+ struct sctp_fwdtsn_skip *skip;
+ __u16 incnt;
+
+ if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
+ return false;
+
+ incnt = chunk->asoc->stream.incnt;
+ sctp_walk_fwdtsn(skip, chunk)
+ if (ntohs(skip->stream) >= incnt)
+ return false;
+
+ return true;
+}
+
+static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
+{
+ struct sctp_ifwdtsn_skip *skip;
+ __u16 incnt;
+
+ if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
+ return false;
+
+ incnt = chunk->asoc->stream.incnt;
+ sctp_walk_ifwdtsn(skip, chunk)
+ if (ntohs(skip->stream) >= incnt)
+ return false;
+
+ return true;
+}
+
+static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
+{
+ /* Move the Cumulattive TSN Ack ahead. */
+ sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
+ /* purge the fragmentation queue */
+ sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
+ /* Abort any in progress partial delivery. */
+ sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
+}
+
+static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
+{
+ struct sk_buff *pos, *tmp;
+
+ skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
+ struct sctp_ulpevent *event = sctp_skb2event(pos);
+ __u32 tsn = event->tsn;
+
+ if (TSN_lte(tsn, ftsn)) {
+ __skb_unlink(pos, &ulpq->reasm);
+ sctp_ulpevent_free(event);
+ }
+ }
+
+ skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
+ struct sctp_ulpevent *event = sctp_skb2event(pos);
+ __u32 tsn = event->tsn;
+
+ if (TSN_lte(tsn, ftsn)) {
+ __skb_unlink(pos, &ulpq->reasm_uo);
+ sctp_ulpevent_free(event);
+ }
+ }
+}
+
+static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
+{
+ /* Move the Cumulattive TSN Ack ahead. */
+ sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
+ /* purge the fragmentation queue */
+ sctp_intl_reasm_flushtsn(ulpq, ftsn);
+ /* abort only when it's for all data */
+ if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
+ sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
+}
+
+static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
+{
+ struct sctp_fwdtsn_skip *skip;
+
+ /* Walk through all the skipped SSNs */
+ sctp_walk_fwdtsn(skip, chunk)
+ sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
+}
+
+static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
+ __u8 flags)
+{
+ struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
+ struct sctp_stream *stream = &ulpq->asoc->stream;
+
+ if (flags & SCTP_FTSN_U_BIT) {
+ if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
+ sin->pd_mode_uo = 0;
+ sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
+ GFP_ATOMIC);
+ }
+ return;
+ }
+
+ if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
+ return;
+
+ if (sin->pd_mode) {
+ sin->pd_mode = 0;
+ sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
+ }
+
+ sctp_mid_skip(stream, in, sid, mid);
+
+ sctp_intl_reap_ordered(ulpq, sid);
+}
+
+static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
+{
+ struct sctp_ifwdtsn_skip *skip;
+
+ /* Walk through all the skipped MIDs and abort stream pd if possible */
+ sctp_walk_ifwdtsn(skip, chunk)
+ sctp_intl_skip(ulpq, ntohs(skip->stream),
+ ntohl(skip->mid), skip->flags);
+}
+
+static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
+{
+ struct sk_buff_head temp;
+
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ return sctp_ulpq_tail_event(ulpq, &temp);
+}
+
+static struct sctp_stream_interleave sctp_stream_interleave_0 = {
+ .data_chunk_len = sizeof(struct sctp_data_chunk),
+ .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk),
+ /* DATA process functions */
+ .make_datafrag = sctp_make_datafrag_empty,
+ .assign_number = sctp_chunk_assign_ssn,
+ .validate_data = sctp_validate_data,
+ .ulpevent_data = sctp_ulpq_tail_data,
+ .enqueue_event = do_ulpq_tail_event,
+ .renege_events = sctp_ulpq_renege,
+ .start_pd = sctp_ulpq_partial_delivery,
+ .abort_pd = sctp_ulpq_abort_pd,
+ /* FORWARD-TSN process functions */
+ .generate_ftsn = sctp_generate_fwdtsn,
+ .validate_ftsn = sctp_validate_fwdtsn,
+ .report_ftsn = sctp_report_fwdtsn,
+ .handle_ftsn = sctp_handle_fwdtsn,
+};
+
+static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sk_buff_head temp;
+
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ return sctp_enqueue_event(ulpq, &temp);
+}
+
+static struct sctp_stream_interleave sctp_stream_interleave_1 = {
+ .data_chunk_len = sizeof(struct sctp_idata_chunk),
+ .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
+ /* I-DATA process functions */
+ .make_datafrag = sctp_make_idatafrag_empty,
+ .assign_number = sctp_chunk_assign_mid,
+ .validate_data = sctp_validate_idata,
+ .ulpevent_data = sctp_ulpevent_idata,
+ .enqueue_event = do_sctp_enqueue_event,
+ .renege_events = sctp_renege_events,
+ .start_pd = sctp_intl_start_pd,
+ .abort_pd = sctp_intl_abort_pd,
+ /* I-FORWARD-TSN process functions */
+ .generate_ftsn = sctp_generate_iftsn,
+ .validate_ftsn = sctp_validate_iftsn,
+ .report_ftsn = sctp_report_iftsn,
+ .handle_ftsn = sctp_handle_iftsn,
+};
+
+void sctp_stream_interleave_init(struct sctp_stream *stream)
+{
+ struct sctp_association *asoc;
+
+ asoc = container_of(stream, struct sctp_association, stream);
+ stream->si = asoc->peer.intl_capable ? &sctp_stream_interleave_1
+ : &sctp_stream_interleave_0;
+}
diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c
new file mode 100644
index 000000000..7c8f9d89e
--- /dev/null
+++ b/net/sctp/stream_sched.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions manipulate sctp stream queue/scheduling.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresched(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+ */
+
+#include <linux/list.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/stream_sched.h>
+
+/* First Come First Serve (a.k.a. FIFO)
+ * RFC DRAFT ndata Section 3.1
+ */
+static int sctp_sched_fcfs_set(struct sctp_stream *stream, __u16 sid,
+ __u16 value, gfp_t gfp)
+{
+ return 0;
+}
+
+static int sctp_sched_fcfs_get(struct sctp_stream *stream, __u16 sid,
+ __u16 *value)
+{
+ *value = 0;
+ return 0;
+}
+
+static int sctp_sched_fcfs_init(struct sctp_stream *stream)
+{
+ return 0;
+}
+
+static int sctp_sched_fcfs_init_sid(struct sctp_stream *stream, __u16 sid,
+ gfp_t gfp)
+{
+ return 0;
+}
+
+static void sctp_sched_fcfs_free_sid(struct sctp_stream *stream, __u16 sid)
+{
+}
+
+static void sctp_sched_fcfs_free(struct sctp_stream *stream)
+{
+}
+
+static void sctp_sched_fcfs_enqueue(struct sctp_outq *q,
+ struct sctp_datamsg *msg)
+{
+}
+
+static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q)
+{
+ struct sctp_stream *stream = &q->asoc->stream;
+ struct sctp_chunk *ch = NULL;
+ struct list_head *entry;
+
+ if (list_empty(&q->out_chunk_list))
+ goto out;
+
+ if (stream->out_curr) {
+ ch = list_entry(stream->out_curr->ext->outq.next,
+ struct sctp_chunk, stream_list);
+ } else {
+ entry = q->out_chunk_list.next;
+ ch = list_entry(entry, struct sctp_chunk, list);
+ }
+
+ sctp_sched_dequeue_common(q, ch);
+
+out:
+ return ch;
+}
+
+static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q,
+ struct sctp_chunk *chunk)
+{
+}
+
+static void sctp_sched_fcfs_sched_all(struct sctp_stream *stream)
+{
+}
+
+static void sctp_sched_fcfs_unsched_all(struct sctp_stream *stream)
+{
+}
+
+static struct sctp_sched_ops sctp_sched_fcfs = {
+ .set = sctp_sched_fcfs_set,
+ .get = sctp_sched_fcfs_get,
+ .init = sctp_sched_fcfs_init,
+ .init_sid = sctp_sched_fcfs_init_sid,
+ .free_sid = sctp_sched_fcfs_free_sid,
+ .free = sctp_sched_fcfs_free,
+ .enqueue = sctp_sched_fcfs_enqueue,
+ .dequeue = sctp_sched_fcfs_dequeue,
+ .dequeue_done = sctp_sched_fcfs_dequeue_done,
+ .sched_all = sctp_sched_fcfs_sched_all,
+ .unsched_all = sctp_sched_fcfs_unsched_all,
+};
+
+static void sctp_sched_ops_fcfs_init(void)
+{
+ sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs);
+}
+
+/* API to other parts of the stack */
+
+static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1];
+
+void sctp_sched_ops_register(enum sctp_sched_type sched,
+ struct sctp_sched_ops *sched_ops)
+{
+ sctp_sched_ops[sched] = sched_ops;
+}
+
+void sctp_sched_ops_init(void)
+{
+ sctp_sched_ops_fcfs_init();
+ sctp_sched_ops_prio_init();
+ sctp_sched_ops_rr_init();
+}
+
+int sctp_sched_set_sched(struct sctp_association *asoc,
+ enum sctp_sched_type sched)
+{
+ struct sctp_sched_ops *n = sctp_sched_ops[sched];
+ struct sctp_sched_ops *old = asoc->outqueue.sched;
+ struct sctp_datamsg *msg = NULL;
+ struct sctp_chunk *ch;
+ int i, ret = 0;
+
+ if (old == n)
+ return ret;
+
+ if (sched > SCTP_SS_MAX)
+ return -EINVAL;
+
+ if (old) {
+ old->free(&asoc->stream);
+
+ /* Give the next scheduler a clean slate. */
+ for (i = 0; i < asoc->stream.outcnt; i++) {
+ struct sctp_stream_out_ext *ext = SCTP_SO(&asoc->stream, i)->ext;
+
+ if (!ext)
+ continue;
+ memset_after(ext, 0, outq);
+ }
+ }
+
+ asoc->outqueue.sched = n;
+ n->init(&asoc->stream);
+ for (i = 0; i < asoc->stream.outcnt; i++) {
+ if (!SCTP_SO(&asoc->stream, i)->ext)
+ continue;
+
+ ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC);
+ if (ret)
+ goto err;
+ }
+
+ /* We have to requeue all chunks already queued. */
+ list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) {
+ if (ch->msg == msg)
+ continue;
+ msg = ch->msg;
+ n->enqueue(&asoc->outqueue, msg);
+ }
+
+ return ret;
+
+err:
+ n->free(&asoc->stream);
+ asoc->outqueue.sched = &sctp_sched_fcfs; /* Always safe */
+
+ return ret;
+}
+
+int sctp_sched_get_sched(struct sctp_association *asoc)
+{
+ int i;
+
+ for (i = 0; i <= SCTP_SS_MAX; i++)
+ if (asoc->outqueue.sched == sctp_sched_ops[i])
+ return i;
+
+ return 0;
+}
+
+int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid,
+ __u16 value, gfp_t gfp)
+{
+ if (sid >= asoc->stream.outcnt)
+ return -EINVAL;
+
+ if (!SCTP_SO(&asoc->stream, sid)->ext) {
+ int ret;
+
+ ret = sctp_stream_init_ext(&asoc->stream, sid);
+ if (ret)
+ return ret;
+ }
+
+ return asoc->outqueue.sched->set(&asoc->stream, sid, value, gfp);
+}
+
+int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
+ __u16 *value)
+{
+ if (sid >= asoc->stream.outcnt)
+ return -EINVAL;
+
+ if (!SCTP_SO(&asoc->stream, sid)->ext)
+ return 0;
+
+ return asoc->outqueue.sched->get(&asoc->stream, sid, value);
+}
+
+void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
+{
+ if (!list_is_last(&ch->frag_list, &ch->msg->chunks) &&
+ !q->asoc->peer.intl_capable) {
+ struct sctp_stream_out *sout;
+ __u16 sid;
+
+ /* datamsg is not finish, so save it as current one,
+ * in case application switch scheduler or a higher
+ * priority stream comes in.
+ */
+ sid = sctp_chunk_stream_no(ch);
+ sout = SCTP_SO(&q->asoc->stream, sid);
+ q->asoc->stream.out_curr = sout;
+ return;
+ }
+
+ q->asoc->stream.out_curr = NULL;
+ q->sched->dequeue_done(q, ch);
+}
+
+/* Auxiliary functions for the schedulers */
+void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch)
+{
+ list_del_init(&ch->list);
+ list_del_init(&ch->stream_list);
+ q->out_qlen -= ch->skb->len;
+}
+
+int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp)
+{
+ struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
+ struct sctp_stream_out_ext *ext = SCTP_SO(stream, sid)->ext;
+
+ INIT_LIST_HEAD(&ext->outq);
+ return sched->init_sid(stream, sid, gfp);
+}
+
+struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream)
+{
+ struct sctp_association *asoc;
+
+ asoc = container_of(stream, struct sctp_association, stream);
+
+ return asoc->outqueue.sched;
+}
diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c
new file mode 100644
index 000000000..7dd9f8b38
--- /dev/null
+++ b/net/sctp/stream_sched_prio.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions manipulate sctp stream queue/scheduling.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresched(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+ */
+
+#include <linux/list.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/stream_sched.h>
+
+/* Priority handling
+ * RFC DRAFT ndata section 3.4
+ */
+
+static void sctp_sched_prio_unsched_all(struct sctp_stream *stream);
+
+static struct sctp_stream_priorities *sctp_sched_prio_head_get(struct sctp_stream_priorities *p)
+{
+ p->users++;
+ return p;
+}
+
+static void sctp_sched_prio_head_put(struct sctp_stream_priorities *p)
+{
+ if (p && --p->users == 0)
+ kfree(p);
+}
+
+static struct sctp_stream_priorities *sctp_sched_prio_new_head(
+ struct sctp_stream *stream, int prio, gfp_t gfp)
+{
+ struct sctp_stream_priorities *p;
+
+ p = kmalloc(sizeof(*p), gfp);
+ if (!p)
+ return NULL;
+
+ INIT_LIST_HEAD(&p->prio_sched);
+ INIT_LIST_HEAD(&p->active);
+ p->next = NULL;
+ p->prio = prio;
+ p->users = 1;
+
+ return p;
+}
+
+static struct sctp_stream_priorities *sctp_sched_prio_get_head(
+ struct sctp_stream *stream, int prio, gfp_t gfp)
+{
+ struct sctp_stream_priorities *p;
+ int i;
+
+ /* Look into scheduled priorities first, as they are sorted and
+ * we can find it fast IF it's scheduled.
+ */
+ list_for_each_entry(p, &stream->prio_list, prio_sched) {
+ if (p->prio == prio)
+ return sctp_sched_prio_head_get(p);
+ if (p->prio > prio)
+ break;
+ }
+
+ /* No luck. So we search on all streams now. */
+ for (i = 0; i < stream->outcnt; i++) {
+ if (!SCTP_SO(stream, i)->ext)
+ continue;
+
+ p = SCTP_SO(stream, i)->ext->prio_head;
+ if (!p)
+ /* Means all other streams won't be initialized
+ * as well.
+ */
+ break;
+ if (p->prio == prio)
+ return sctp_sched_prio_head_get(p);
+ }
+
+ /* If not even there, allocate a new one. */
+ return sctp_sched_prio_new_head(stream, prio, gfp);
+}
+
+static void sctp_sched_prio_next_stream(struct sctp_stream_priorities *p)
+{
+ struct list_head *pos;
+
+ pos = p->next->prio_list.next;
+ if (pos == &p->active)
+ pos = pos->next;
+ p->next = list_entry(pos, struct sctp_stream_out_ext, prio_list);
+}
+
+static bool sctp_sched_prio_unsched(struct sctp_stream_out_ext *soute)
+{
+ bool scheduled = false;
+
+ if (!list_empty(&soute->prio_list)) {
+ struct sctp_stream_priorities *prio_head = soute->prio_head;
+
+ /* Scheduled */
+ scheduled = true;
+
+ if (prio_head->next == soute)
+ /* Try to move to the next stream */
+ sctp_sched_prio_next_stream(prio_head);
+
+ list_del_init(&soute->prio_list);
+
+ /* Also unsched the priority if this was the last stream */
+ if (list_empty(&prio_head->active)) {
+ list_del_init(&prio_head->prio_sched);
+ /* If there is no stream left, clear next */
+ prio_head->next = NULL;
+ }
+ }
+
+ return scheduled;
+}
+
+static void sctp_sched_prio_sched(struct sctp_stream *stream,
+ struct sctp_stream_out_ext *soute)
+{
+ struct sctp_stream_priorities *prio, *prio_head;
+
+ prio_head = soute->prio_head;
+
+ /* Nothing to do if already scheduled */
+ if (!list_empty(&soute->prio_list))
+ return;
+
+ /* Schedule the stream. If there is a next, we schedule the new
+ * one before it, so it's the last in round robin order.
+ * If there isn't, we also have to schedule the priority.
+ */
+ if (prio_head->next) {
+ list_add(&soute->prio_list, prio_head->next->prio_list.prev);
+ return;
+ }
+
+ list_add(&soute->prio_list, &prio_head->active);
+ prio_head->next = soute;
+
+ list_for_each_entry(prio, &stream->prio_list, prio_sched) {
+ if (prio->prio > prio_head->prio) {
+ list_add(&prio_head->prio_sched, prio->prio_sched.prev);
+ return;
+ }
+ }
+
+ list_add_tail(&prio_head->prio_sched, &stream->prio_list);
+}
+
+static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid,
+ __u16 prio, gfp_t gfp)
+{
+ struct sctp_stream_out *sout = SCTP_SO(stream, sid);
+ struct sctp_stream_out_ext *soute = sout->ext;
+ struct sctp_stream_priorities *prio_head, *old;
+ bool reschedule = false;
+
+ old = soute->prio_head;
+ if (old && old->prio == prio)
+ return 0;
+
+ prio_head = sctp_sched_prio_get_head(stream, prio, gfp);
+ if (!prio_head)
+ return -ENOMEM;
+
+ reschedule = sctp_sched_prio_unsched(soute);
+ soute->prio_head = prio_head;
+ if (reschedule)
+ sctp_sched_prio_sched(stream, soute);
+
+ sctp_sched_prio_head_put(old);
+ return 0;
+}
+
+static int sctp_sched_prio_get(struct sctp_stream *stream, __u16 sid,
+ __u16 *value)
+{
+ *value = SCTP_SO(stream, sid)->ext->prio_head->prio;
+ return 0;
+}
+
+static int sctp_sched_prio_init(struct sctp_stream *stream)
+{
+ INIT_LIST_HEAD(&stream->prio_list);
+
+ return 0;
+}
+
+static int sctp_sched_prio_init_sid(struct sctp_stream *stream, __u16 sid,
+ gfp_t gfp)
+{
+ INIT_LIST_HEAD(&SCTP_SO(stream, sid)->ext->prio_list);
+ return sctp_sched_prio_set(stream, sid, 0, gfp);
+}
+
+static void sctp_sched_prio_free_sid(struct sctp_stream *stream, __u16 sid)
+{
+ sctp_sched_prio_head_put(SCTP_SO(stream, sid)->ext->prio_head);
+ SCTP_SO(stream, sid)->ext->prio_head = NULL;
+}
+
+static void sctp_sched_prio_free(struct sctp_stream *stream)
+{
+ struct sctp_stream_priorities *prio, *n;
+ LIST_HEAD(list);
+ int i;
+
+ /* As we don't keep a list of priorities, to avoid multiple
+ * frees we have to do it in 3 steps:
+ * 1. unsched everyone, so the lists are free to use in 2.
+ * 2. build the list of the priorities
+ * 3. free the list
+ */
+ sctp_sched_prio_unsched_all(stream);
+ for (i = 0; i < stream->outcnt; i++) {
+ if (!SCTP_SO(stream, i)->ext)
+ continue;
+ prio = SCTP_SO(stream, i)->ext->prio_head;
+ if (prio && list_empty(&prio->prio_sched))
+ list_add(&prio->prio_sched, &list);
+ }
+ list_for_each_entry_safe(prio, n, &list, prio_sched) {
+ list_del_init(&prio->prio_sched);
+ kfree(prio);
+ }
+}
+
+static void sctp_sched_prio_enqueue(struct sctp_outq *q,
+ struct sctp_datamsg *msg)
+{
+ struct sctp_stream *stream;
+ struct sctp_chunk *ch;
+ __u16 sid;
+
+ ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list);
+ sid = sctp_chunk_stream_no(ch);
+ stream = &q->asoc->stream;
+ sctp_sched_prio_sched(stream, SCTP_SO(stream, sid)->ext);
+}
+
+static struct sctp_chunk *sctp_sched_prio_dequeue(struct sctp_outq *q)
+{
+ struct sctp_stream *stream = &q->asoc->stream;
+ struct sctp_stream_priorities *prio;
+ struct sctp_stream_out_ext *soute;
+ struct sctp_chunk *ch = NULL;
+
+ /* Bail out quickly if queue is empty */
+ if (list_empty(&q->out_chunk_list))
+ goto out;
+
+ /* Find which chunk is next. It's easy, it's either the current
+ * one or the first chunk on the next active stream.
+ */
+ if (stream->out_curr) {
+ soute = stream->out_curr->ext;
+ } else {
+ prio = list_entry(stream->prio_list.next,
+ struct sctp_stream_priorities, prio_sched);
+ soute = prio->next;
+ }
+ ch = list_entry(soute->outq.next, struct sctp_chunk, stream_list);
+ sctp_sched_dequeue_common(q, ch);
+
+out:
+ return ch;
+}
+
+static void sctp_sched_prio_dequeue_done(struct sctp_outq *q,
+ struct sctp_chunk *ch)
+{
+ struct sctp_stream_priorities *prio;
+ struct sctp_stream_out_ext *soute;
+ __u16 sid;
+
+ /* Last chunk on that msg, move to the next stream on
+ * this priority.
+ */
+ sid = sctp_chunk_stream_no(ch);
+ soute = SCTP_SO(&q->asoc->stream, sid)->ext;
+ prio = soute->prio_head;
+
+ sctp_sched_prio_next_stream(prio);
+
+ if (list_empty(&soute->outq))
+ sctp_sched_prio_unsched(soute);
+}
+
+static void sctp_sched_prio_sched_all(struct sctp_stream *stream)
+{
+ struct sctp_association *asoc;
+ struct sctp_stream_out *sout;
+ struct sctp_chunk *ch;
+
+ asoc = container_of(stream, struct sctp_association, stream);
+ list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) {
+ __u16 sid;
+
+ sid = sctp_chunk_stream_no(ch);
+ sout = SCTP_SO(stream, sid);
+ if (sout->ext)
+ sctp_sched_prio_sched(stream, sout->ext);
+ }
+}
+
+static void sctp_sched_prio_unsched_all(struct sctp_stream *stream)
+{
+ struct sctp_stream_priorities *p, *tmp;
+ struct sctp_stream_out_ext *soute, *souttmp;
+
+ list_for_each_entry_safe(p, tmp, &stream->prio_list, prio_sched)
+ list_for_each_entry_safe(soute, souttmp, &p->active, prio_list)
+ sctp_sched_prio_unsched(soute);
+}
+
+static struct sctp_sched_ops sctp_sched_prio = {
+ .set = sctp_sched_prio_set,
+ .get = sctp_sched_prio_get,
+ .init = sctp_sched_prio_init,
+ .init_sid = sctp_sched_prio_init_sid,
+ .free_sid = sctp_sched_prio_free_sid,
+ .free = sctp_sched_prio_free,
+ .enqueue = sctp_sched_prio_enqueue,
+ .dequeue = sctp_sched_prio_dequeue,
+ .dequeue_done = sctp_sched_prio_dequeue_done,
+ .sched_all = sctp_sched_prio_sched_all,
+ .unsched_all = sctp_sched_prio_unsched_all,
+};
+
+void sctp_sched_ops_prio_init(void)
+{
+ sctp_sched_ops_register(SCTP_SS_PRIO, &sctp_sched_prio);
+}
diff --git a/net/sctp/stream_sched_rr.c b/net/sctp/stream_sched_rr.c
new file mode 100644
index 000000000..cc444fe0d
--- /dev/null
+++ b/net/sctp/stream_sched_rr.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions manipulate sctp stream queue/scheduling.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresched(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+ */
+
+#include <linux/list.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/stream_sched.h>
+
+/* Priority handling
+ * RFC DRAFT ndata section 3.2
+ */
+static void sctp_sched_rr_unsched_all(struct sctp_stream *stream);
+
+static void sctp_sched_rr_next_stream(struct sctp_stream *stream)
+{
+ struct list_head *pos;
+
+ pos = stream->rr_next->rr_list.next;
+ if (pos == &stream->rr_list)
+ pos = pos->next;
+ stream->rr_next = list_entry(pos, struct sctp_stream_out_ext, rr_list);
+}
+
+static void sctp_sched_rr_unsched(struct sctp_stream *stream,
+ struct sctp_stream_out_ext *soute)
+{
+ if (stream->rr_next == soute)
+ /* Try to move to the next stream */
+ sctp_sched_rr_next_stream(stream);
+
+ list_del_init(&soute->rr_list);
+
+ /* If we have no other stream queued, clear next */
+ if (list_empty(&stream->rr_list))
+ stream->rr_next = NULL;
+}
+
+static void sctp_sched_rr_sched(struct sctp_stream *stream,
+ struct sctp_stream_out_ext *soute)
+{
+ if (!list_empty(&soute->rr_list))
+ /* Already scheduled. */
+ return;
+
+ /* Schedule the stream */
+ list_add_tail(&soute->rr_list, &stream->rr_list);
+
+ if (!stream->rr_next)
+ stream->rr_next = soute;
+}
+
+static int sctp_sched_rr_set(struct sctp_stream *stream, __u16 sid,
+ __u16 prio, gfp_t gfp)
+{
+ return 0;
+}
+
+static int sctp_sched_rr_get(struct sctp_stream *stream, __u16 sid,
+ __u16 *value)
+{
+ return 0;
+}
+
+static int sctp_sched_rr_init(struct sctp_stream *stream)
+{
+ INIT_LIST_HEAD(&stream->rr_list);
+ stream->rr_next = NULL;
+
+ return 0;
+}
+
+static int sctp_sched_rr_init_sid(struct sctp_stream *stream, __u16 sid,
+ gfp_t gfp)
+{
+ INIT_LIST_HEAD(&SCTP_SO(stream, sid)->ext->rr_list);
+
+ return 0;
+}
+
+static void sctp_sched_rr_free_sid(struct sctp_stream *stream, __u16 sid)
+{
+}
+
+static void sctp_sched_rr_free(struct sctp_stream *stream)
+{
+ sctp_sched_rr_unsched_all(stream);
+}
+
+static void sctp_sched_rr_enqueue(struct sctp_outq *q,
+ struct sctp_datamsg *msg)
+{
+ struct sctp_stream *stream;
+ struct sctp_chunk *ch;
+ __u16 sid;
+
+ ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list);
+ sid = sctp_chunk_stream_no(ch);
+ stream = &q->asoc->stream;
+ sctp_sched_rr_sched(stream, SCTP_SO(stream, sid)->ext);
+}
+
+static struct sctp_chunk *sctp_sched_rr_dequeue(struct sctp_outq *q)
+{
+ struct sctp_stream *stream = &q->asoc->stream;
+ struct sctp_stream_out_ext *soute;
+ struct sctp_chunk *ch = NULL;
+
+ /* Bail out quickly if queue is empty */
+ if (list_empty(&q->out_chunk_list))
+ goto out;
+
+ /* Find which chunk is next */
+ if (stream->out_curr)
+ soute = stream->out_curr->ext;
+ else
+ soute = stream->rr_next;
+ ch = list_entry(soute->outq.next, struct sctp_chunk, stream_list);
+
+ sctp_sched_dequeue_common(q, ch);
+
+out:
+ return ch;
+}
+
+static void sctp_sched_rr_dequeue_done(struct sctp_outq *q,
+ struct sctp_chunk *ch)
+{
+ struct sctp_stream_out_ext *soute;
+ __u16 sid;
+
+ /* Last chunk on that msg, move to the next stream */
+ sid = sctp_chunk_stream_no(ch);
+ soute = SCTP_SO(&q->asoc->stream, sid)->ext;
+
+ sctp_sched_rr_next_stream(&q->asoc->stream);
+
+ if (list_empty(&soute->outq))
+ sctp_sched_rr_unsched(&q->asoc->stream, soute);
+}
+
+static void sctp_sched_rr_sched_all(struct sctp_stream *stream)
+{
+ struct sctp_association *asoc;
+ struct sctp_stream_out_ext *soute;
+ struct sctp_chunk *ch;
+
+ asoc = container_of(stream, struct sctp_association, stream);
+ list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) {
+ __u16 sid;
+
+ sid = sctp_chunk_stream_no(ch);
+ soute = SCTP_SO(stream, sid)->ext;
+ if (soute)
+ sctp_sched_rr_sched(stream, soute);
+ }
+}
+
+static void sctp_sched_rr_unsched_all(struct sctp_stream *stream)
+{
+ struct sctp_stream_out_ext *soute, *tmp;
+
+ list_for_each_entry_safe(soute, tmp, &stream->rr_list, rr_list)
+ sctp_sched_rr_unsched(stream, soute);
+}
+
+static struct sctp_sched_ops sctp_sched_rr = {
+ .set = sctp_sched_rr_set,
+ .get = sctp_sched_rr_get,
+ .init = sctp_sched_rr_init,
+ .init_sid = sctp_sched_rr_init_sid,
+ .free_sid = sctp_sched_rr_free_sid,
+ .free = sctp_sched_rr_free,
+ .enqueue = sctp_sched_rr_enqueue,
+ .dequeue = sctp_sched_rr_dequeue,
+ .dequeue_done = sctp_sched_rr_dequeue_done,
+ .sched_all = sctp_sched_rr_sched_all,
+ .unsched_all = sctp_sched_rr_unsched_all,
+};
+
+void sctp_sched_ops_rr_init(void)
+{
+ sctp_sched_ops_register(SCTP_SS_RR, &sctp_sched_rr);
+}
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
new file mode 100644
index 000000000..43ebf0900
--- /dev/null
+++ b/net/sctp/sysctl.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2002, 2004
+ * Copyright (c) 2002 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * Sysctl related interfaces for SCTP.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Mingqin Liu <liuming@us.ibm.com>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Ryan Layer <rmlayer@us.ibm.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <net/sctp/structs.h>
+#include <net/sctp/sctp.h>
+#include <linux/sysctl.h>
+
+static int timer_max = 86400000; /* ms in one day */
+static int sack_timer_min = 1;
+static int sack_timer_max = 500;
+static int addr_scope_max = SCTP_SCOPE_POLICY_MAX;
+static int rwnd_scale_max = 16;
+static int rto_alpha_min = 0;
+static int rto_beta_min = 0;
+static int rto_alpha_max = 1000;
+static int rto_beta_max = 1000;
+static int pf_expose_max = SCTP_PF_EXPOSE_MAX;
+static int ps_retrans_max = SCTP_PS_RETRANS_MAX;
+static int udp_port_max = 65535;
+
+static unsigned long max_autoclose_min = 0;
+static unsigned long max_autoclose_max =
+ (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
+ ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
+
+static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
+static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
+static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+
+static struct ctl_table sctp_table[] = {
+ {
+ .procname = "sctp_mem",
+ .data = &sysctl_sctp_mem,
+ .maxlen = sizeof(sysctl_sctp_mem),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax
+ },
+ {
+ .procname = "sctp_rmem",
+ .data = &sysctl_sctp_rmem,
+ .maxlen = sizeof(sysctl_sctp_rmem),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sctp_wmem",
+ .data = &sysctl_sctp_wmem,
+ .maxlen = sizeof(sysctl_sctp_wmem),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+
+ { /* sentinel */ }
+};
+
+/* The following index defines are used in sctp_sysctl_net_register().
+ * If you add new items to the sctp_net_table, please ensure that
+ * the index values of these defines hold the same meaning indicated by
+ * their macro names when they appear in sctp_net_table.
+ */
+#define SCTP_RTO_MIN_IDX 0
+#define SCTP_RTO_MAX_IDX 1
+#define SCTP_PF_RETRANS_IDX 2
+#define SCTP_PS_RETRANS_IDX 3
+
+static struct ctl_table sctp_net_table[] = {
+ [SCTP_RTO_MIN_IDX] = {
+ .procname = "rto_min",
+ .data = &init_net.sctp.rto_min,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_sctp_do_rto_min,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = &init_net.sctp.rto_max
+ },
+ [SCTP_RTO_MAX_IDX] = {
+ .procname = "rto_max",
+ .data = &init_net.sctp.rto_max,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_sctp_do_rto_max,
+ .extra1 = &init_net.sctp.rto_min,
+ .extra2 = &timer_max
+ },
+ [SCTP_PF_RETRANS_IDX] = {
+ .procname = "pf_retrans",
+ .data = &init_net.sctp.pf_retrans,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &init_net.sctp.ps_retrans,
+ },
+ [SCTP_PS_RETRANS_IDX] = {
+ .procname = "ps_retrans",
+ .data = &init_net.sctp.ps_retrans,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &init_net.sctp.pf_retrans,
+ .extra2 = &ps_retrans_max,
+ },
+ {
+ .procname = "rto_initial",
+ .data = &init_net.sctp.rto_initial,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = &timer_max
+ },
+ {
+ .procname = "rto_alpha_exp_divisor",
+ .data = &init_net.sctp.rto_alpha,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_sctp_do_alpha_beta,
+ .extra1 = &rto_alpha_min,
+ .extra2 = &rto_alpha_max,
+ },
+ {
+ .procname = "rto_beta_exp_divisor",
+ .data = &init_net.sctp.rto_beta,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_sctp_do_alpha_beta,
+ .extra1 = &rto_beta_min,
+ .extra2 = &rto_beta_max,
+ },
+ {
+ .procname = "max_burst",
+ .data = &init_net.sctp.max_burst,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "cookie_preserve_enable",
+ .data = &init_net.sctp.cookie_preserve_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "cookie_hmac_alg",
+ .data = &init_net.sctp.sctp_hmac_alg,
+ .maxlen = 8,
+ .mode = 0644,
+ .proc_handler = proc_sctp_do_hmac_alg,
+ },
+ {
+ .procname = "valid_cookie_life",
+ .data = &init_net.sctp.valid_cookie_life,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = &timer_max
+ },
+ {
+ .procname = "sack_timeout",
+ .data = &init_net.sctp.sack_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &sack_timer_min,
+ .extra2 = &sack_timer_max,
+ },
+ {
+ .procname = "hb_interval",
+ .data = &init_net.sctp.hb_interval,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = &timer_max
+ },
+ {
+ .procname = "association_max_retrans",
+ .data = &init_net.sctp.max_retrans_association,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "path_max_retrans",
+ .data = &init_net.sctp.max_retrans_path,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "max_init_retransmits",
+ .data = &init_net.sctp.max_retrans_init,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "sndbuf_policy",
+ .data = &init_net.sctp.sndbuf_policy,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "rcvbuf_policy",
+ .data = &init_net.sctp.rcvbuf_policy,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "default_auto_asconf",
+ .data = &init_net.sctp.default_auto_asconf,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "addip_enable",
+ .data = &init_net.sctp.addip_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "addip_noauth_enable",
+ .data = &init_net.sctp.addip_noauth,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "prsctp_enable",
+ .data = &init_net.sctp.prsctp_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "reconf_enable",
+ .data = &init_net.sctp.reconf_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "auth_enable",
+ .data = &init_net.sctp.auth_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_sctp_do_auth,
+ },
+ {
+ .procname = "intl_enable",
+ .data = &init_net.sctp.intl_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "ecn_enable",
+ .data = &init_net.sctp.ecn_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "plpmtud_probe_interval",
+ .data = &init_net.sctp.probe_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_sctp_do_probe_interval,
+ },
+ {
+ .procname = "udp_port",
+ .data = &init_net.sctp.udp_port,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_sctp_do_udp_port,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &udp_port_max,
+ },
+ {
+ .procname = "encap_port",
+ .data = &init_net.sctp.encap_port,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &udp_port_max,
+ },
+ {
+ .procname = "addr_scope_policy",
+ .data = &init_net.sctp.scope_policy,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &addr_scope_max,
+ },
+ {
+ .procname = "rwnd_update_shift",
+ .data = &init_net.sctp.rwnd_upd_shift,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ .extra2 = &rwnd_scale_max,
+ },
+ {
+ .procname = "max_autoclose",
+ .data = &init_net.sctp.max_autoclose,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_minmax,
+ .extra1 = &max_autoclose_min,
+ .extra2 = &max_autoclose_max,
+ },
+ {
+ .procname = "pf_enable",
+ .data = &init_net.sctp.pf_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "pf_expose",
+ .data = &init_net.sctp.pf_expose,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &pf_expose_max,
+ },
+
+ { /* sentinel */ }
+};
+
+static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct ctl_table tbl;
+ bool changed = false;
+ char *none = "none";
+ char tmp[8] = {0};
+ int ret;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+
+ if (write) {
+ tbl.data = tmp;
+ tbl.maxlen = sizeof(tmp);
+ } else {
+ tbl.data = net->sctp.sctp_hmac_alg ? : none;
+ tbl.maxlen = strlen(tbl.data);
+ }
+
+ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+ if (write && ret == 0) {
+#ifdef CONFIG_CRYPTO_MD5
+ if (!strncmp(tmp, "md5", 3)) {
+ net->sctp.sctp_hmac_alg = "md5";
+ changed = true;
+ }
+#endif
+#ifdef CONFIG_CRYPTO_SHA1
+ if (!strncmp(tmp, "sha1", 4)) {
+ net->sctp.sctp_hmac_alg = "sha1";
+ changed = true;
+ }
+#endif
+ if (!strncmp(tmp, "none", 4)) {
+ net->sctp.sctp_hmac_alg = NULL;
+ changed = true;
+ }
+ if (!changed)
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct net *net = current->nsproxy->net_ns;
+ unsigned int min = *(unsigned int *) ctl->extra1;
+ unsigned int max = *(unsigned int *) ctl->extra2;
+ struct ctl_table tbl;
+ int ret, new_value;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+ tbl.maxlen = sizeof(unsigned int);
+
+ if (write)
+ tbl.data = &new_value;
+ else
+ tbl.data = &net->sctp.rto_min;
+
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+ if (write && ret == 0) {
+ if (new_value > max || new_value < min)
+ return -EINVAL;
+
+ net->sctp.rto_min = new_value;
+ }
+
+ return ret;
+}
+
+static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct net *net = current->nsproxy->net_ns;
+ unsigned int min = *(unsigned int *) ctl->extra1;
+ unsigned int max = *(unsigned int *) ctl->extra2;
+ struct ctl_table tbl;
+ int ret, new_value;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+ tbl.maxlen = sizeof(unsigned int);
+
+ if (write)
+ tbl.data = &new_value;
+ else
+ tbl.data = &net->sctp.rto_max;
+
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+ if (write && ret == 0) {
+ if (new_value > max || new_value < min)
+ return -EINVAL;
+
+ net->sctp.rto_max = new_value;
+ }
+
+ return ret;
+}
+
+static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ if (write)
+ pr_warn_once("Changing rto_alpha or rto_beta may lead to "
+ "suboptimal rtt/srtt estimations!\n");
+
+ return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
+}
+
+static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct ctl_table tbl;
+ int new_value, ret;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+ tbl.maxlen = sizeof(unsigned int);
+
+ if (write)
+ tbl.data = &new_value;
+ else
+ tbl.data = &net->sctp.auth_enable;
+
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+ if (write && ret == 0) {
+ struct sock *sk = net->sctp.ctl_sock;
+
+ net->sctp.auth_enable = new_value;
+ /* Update the value in the control socket */
+ lock_sock(sk);
+ sctp_sk(sk)->ep->auth_enable = new_value;
+ release_sock(sk);
+ }
+
+ return ret;
+}
+
+static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct net *net = current->nsproxy->net_ns;
+ unsigned int min = *(unsigned int *)ctl->extra1;
+ unsigned int max = *(unsigned int *)ctl->extra2;
+ struct ctl_table tbl;
+ int ret, new_value;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+ tbl.maxlen = sizeof(unsigned int);
+
+ if (write)
+ tbl.data = &new_value;
+ else
+ tbl.data = &net->sctp.udp_port;
+
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+ if (write && ret == 0) {
+ struct sock *sk = net->sctp.ctl_sock;
+
+ if (new_value > max || new_value < min)
+ return -EINVAL;
+
+ net->sctp.udp_port = new_value;
+ sctp_udp_sock_stop(net);
+ if (new_value) {
+ ret = sctp_udp_sock_start(net);
+ if (ret)
+ net->sctp.udp_port = 0;
+ }
+
+ /* Update the value in the control socket */
+ lock_sock(sk);
+ sctp_sk(sk)->udp_port = htons(net->sctp.udp_port);
+ release_sock(sk);
+ }
+
+ return ret;
+}
+
+static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct ctl_table tbl;
+ int ret, new_value;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+ tbl.maxlen = sizeof(unsigned int);
+
+ if (write)
+ tbl.data = &new_value;
+ else
+ tbl.data = &net->sctp.probe_interval;
+
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+ if (write && ret == 0) {
+ if (new_value && new_value < SCTP_PROBE_TIMER_MIN)
+ return -EINVAL;
+
+ net->sctp.probe_interval = new_value;
+ }
+
+ return ret;
+}
+
+int sctp_sysctl_net_register(struct net *net)
+{
+ struct ctl_table *table;
+ int i;
+
+ table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ for (i = 0; table[i].data; i++)
+ table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
+
+ table[SCTP_RTO_MIN_IDX].extra2 = &net->sctp.rto_max;
+ table[SCTP_RTO_MAX_IDX].extra1 = &net->sctp.rto_min;
+ table[SCTP_PF_RETRANS_IDX].extra2 = &net->sctp.ps_retrans;
+ table[SCTP_PS_RETRANS_IDX].extra1 = &net->sctp.pf_retrans;
+
+ net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
+ if (net->sctp.sysctl_header == NULL) {
+ kfree(table);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void sctp_sysctl_net_unregister(struct net *net)
+{
+ struct ctl_table *table;
+
+ table = net->sctp.sysctl_header->ctl_table_arg;
+ unregister_net_sysctl_table(net->sctp.sysctl_header);
+ kfree(table);
+}
+
+static struct ctl_table_header *sctp_sysctl_header;
+
+/* Sysctl registration. */
+void sctp_sysctl_register(void)
+{
+ sctp_sysctl_header = register_net_sysctl(&init_net, "net/sctp", sctp_table);
+}
+
+/* Sysctl deregistration. */
+void sctp_sysctl_unregister(void)
+{
+ unregister_net_sysctl_table(sctp_sysctl_header);
+}
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
new file mode 100644
index 000000000..2990365c2
--- /dev/null
+++ b/net/sctp/transport.c
@@ -0,0 +1,854 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001-2003 International Business Machines Corp.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * This module provides the abstraction for an SCTP transport representing
+ * a remote transport address. For local transport addresses, we just use
+ * union sctp_addr.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Hui Huang <hui.huang@nokia.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/random.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+/* 1st Level Abstractions. */
+
+/* Initialize a new transport from provided memory. */
+static struct sctp_transport *sctp_transport_init(struct net *net,
+ struct sctp_transport *peer,
+ const union sctp_addr *addr,
+ gfp_t gfp)
+{
+ /* Copy in the address. */
+ peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
+ memcpy(&peer->ipaddr, addr, peer->af_specific->sockaddr_len);
+ memset(&peer->saddr, 0, sizeof(union sctp_addr));
+
+ peer->sack_generation = 0;
+
+ /* From 6.3.1 RTO Calculation:
+ *
+ * C1) Until an RTT measurement has been made for a packet sent to the
+ * given destination transport address, set RTO to the protocol
+ * parameter 'RTO.Initial'.
+ */
+ peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
+
+ peer->last_time_heard = 0;
+ peer->last_time_ecne_reduced = jiffies;
+
+ peer->param_flags = SPP_HB_DISABLE |
+ SPP_PMTUD_ENABLE |
+ SPP_SACKDELAY_ENABLE;
+
+ /* Initialize the default path max_retrans. */
+ peer->pathmaxrxt = net->sctp.max_retrans_path;
+ peer->pf_retrans = net->sctp.pf_retrans;
+
+ INIT_LIST_HEAD(&peer->transmitted);
+ INIT_LIST_HEAD(&peer->send_ready);
+ INIT_LIST_HEAD(&peer->transports);
+
+ timer_setup(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 0);
+ timer_setup(&peer->hb_timer, sctp_generate_heartbeat_event, 0);
+ timer_setup(&peer->reconf_timer, sctp_generate_reconf_event, 0);
+ timer_setup(&peer->probe_timer, sctp_generate_probe_event, 0);
+ timer_setup(&peer->proto_unreach_timer,
+ sctp_generate_proto_unreach_event, 0);
+
+ /* Initialize the 64-bit random nonce sent with heartbeat. */
+ get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
+
+ refcount_set(&peer->refcnt, 1);
+
+ return peer;
+}
+
+/* Allocate and initialize a new transport. */
+struct sctp_transport *sctp_transport_new(struct net *net,
+ const union sctp_addr *addr,
+ gfp_t gfp)
+{
+ struct sctp_transport *transport;
+
+ transport = kzalloc(sizeof(*transport), gfp);
+ if (!transport)
+ goto fail;
+
+ if (!sctp_transport_init(net, transport, addr, gfp))
+ goto fail_init;
+
+ SCTP_DBG_OBJCNT_INC(transport);
+
+ return transport;
+
+fail_init:
+ kfree(transport);
+
+fail:
+ return NULL;
+}
+
+/* This transport is no longer needed. Free up if possible, or
+ * delay until it last reference count.
+ */
+void sctp_transport_free(struct sctp_transport *transport)
+{
+ /* Try to delete the heartbeat timer. */
+ if (del_timer(&transport->hb_timer))
+ sctp_transport_put(transport);
+
+ /* Delete the T3_rtx timer if it's active.
+ * There is no point in not doing this now and letting
+ * structure hang around in memory since we know
+ * the transport is going away.
+ */
+ if (del_timer(&transport->T3_rtx_timer))
+ sctp_transport_put(transport);
+
+ if (del_timer(&transport->reconf_timer))
+ sctp_transport_put(transport);
+
+ if (del_timer(&transport->probe_timer))
+ sctp_transport_put(transport);
+
+ /* Delete the ICMP proto unreachable timer if it's active. */
+ if (del_timer(&transport->proto_unreach_timer))
+ sctp_transport_put(transport);
+
+ sctp_transport_put(transport);
+}
+
+static void sctp_transport_destroy_rcu(struct rcu_head *head)
+{
+ struct sctp_transport *transport;
+
+ transport = container_of(head, struct sctp_transport, rcu);
+
+ dst_release(transport->dst);
+ kfree(transport);
+ SCTP_DBG_OBJCNT_DEC(transport);
+}
+
+/* Destroy the transport data structure.
+ * Assumes there are no more users of this structure.
+ */
+static void sctp_transport_destroy(struct sctp_transport *transport)
+{
+ if (unlikely(refcount_read(&transport->refcnt))) {
+ WARN(1, "Attempt to destroy undead transport %p!\n", transport);
+ return;
+ }
+
+ sctp_packet_free(&transport->packet);
+
+ if (transport->asoc)
+ sctp_association_put(transport->asoc);
+
+ call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
+}
+
+/* Start T3_rtx timer if it is not already running and update the heartbeat
+ * timer. This routine is called every time a DATA chunk is sent.
+ */
+void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
+{
+ /* RFC 2960 6.3.2 Retransmission Timer Rules
+ *
+ * R1) Every time a DATA chunk is sent to any address(including a
+ * retransmission), if the T3-rtx timer of that address is not running
+ * start it running so that it will expire after the RTO of that
+ * address.
+ */
+
+ if (!timer_pending(&transport->T3_rtx_timer))
+ if (!mod_timer(&transport->T3_rtx_timer,
+ jiffies + transport->rto))
+ sctp_transport_hold(transport);
+}
+
+void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
+{
+ unsigned long expires;
+
+ /* When a data chunk is sent, reset the heartbeat interval. */
+ expires = jiffies + sctp_transport_timeout(transport);
+ if (!mod_timer(&transport->hb_timer,
+ expires + prandom_u32_max(transport->rto)))
+ sctp_transport_hold(transport);
+}
+
+void sctp_transport_reset_reconf_timer(struct sctp_transport *transport)
+{
+ if (!timer_pending(&transport->reconf_timer))
+ if (!mod_timer(&transport->reconf_timer,
+ jiffies + transport->rto))
+ sctp_transport_hold(transport);
+}
+
+void sctp_transport_reset_probe_timer(struct sctp_transport *transport)
+{
+ if (!mod_timer(&transport->probe_timer,
+ jiffies + transport->probe_interval))
+ sctp_transport_hold(transport);
+}
+
+void sctp_transport_reset_raise_timer(struct sctp_transport *transport)
+{
+ if (!mod_timer(&transport->probe_timer,
+ jiffies + transport->probe_interval * 30))
+ sctp_transport_hold(transport);
+}
+
+/* This transport has been assigned to an association.
+ * Initialize fields from the association or from the sock itself.
+ * Register the reference count in the association.
+ */
+void sctp_transport_set_owner(struct sctp_transport *transport,
+ struct sctp_association *asoc)
+{
+ transport->asoc = asoc;
+ sctp_association_hold(asoc);
+}
+
+/* Initialize the pmtu of a transport. */
+void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
+{
+ /* If we don't have a fresh route, look one up */
+ if (!transport->dst || transport->dst->obsolete) {
+ sctp_transport_dst_release(transport);
+ transport->af_specific->get_dst(transport, &transport->saddr,
+ &transport->fl, sk);
+ }
+
+ if (transport->param_flags & SPP_PMTUD_DISABLE) {
+ struct sctp_association *asoc = transport->asoc;
+
+ if (!transport->pathmtu && asoc && asoc->pathmtu)
+ transport->pathmtu = asoc->pathmtu;
+ if (transport->pathmtu)
+ return;
+ }
+
+ if (transport->dst)
+ transport->pathmtu = sctp_dst_mtu(transport->dst);
+ else
+ transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
+
+ sctp_transport_pl_update(transport);
+}
+
+void sctp_transport_pl_send(struct sctp_transport *t)
+{
+ if (t->pl.probe_count < SCTP_MAX_PROBES)
+ goto out;
+
+ t->pl.probe_count = 0;
+ if (t->pl.state == SCTP_PL_BASE) {
+ if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */
+ t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
+
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ sctp_assoc_sync_pmtu(t->asoc);
+ }
+ } else if (t->pl.state == SCTP_PL_SEARCH) {
+ if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */
+ t->pl.state = SCTP_PL_BASE; /* Search -> Base */
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+ t->pl.probe_high = 0;
+
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ sctp_assoc_sync_pmtu(t->asoc);
+ } else { /* Normal probe failure. */
+ t->pl.probe_high = t->pl.probe_size;
+ t->pl.probe_size = t->pl.pmtu;
+ }
+ } else if (t->pl.state == SCTP_PL_COMPLETE) {
+ if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */
+ t->pl.state = SCTP_PL_BASE; /* Search Complete -> Base */
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ sctp_assoc_sync_pmtu(t->asoc);
+ }
+ }
+
+out:
+ pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
+ __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
+ t->pl.probe_count++;
+}
+
+bool sctp_transport_pl_recv(struct sctp_transport *t)
+{
+ pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
+ __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
+
+ t->pl.pmtu = t->pl.probe_size;
+ t->pl.probe_count = 0;
+ if (t->pl.state == SCTP_PL_BASE) {
+ t->pl.state = SCTP_PL_SEARCH; /* Base -> Search */
+ t->pl.probe_size += SCTP_PL_BIG_STEP;
+ } else if (t->pl.state == SCTP_PL_ERROR) {
+ t->pl.state = SCTP_PL_SEARCH; /* Error -> Search */
+
+ t->pl.pmtu = t->pl.probe_size;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ sctp_assoc_sync_pmtu(t->asoc);
+ t->pl.probe_size += SCTP_PL_BIG_STEP;
+ } else if (t->pl.state == SCTP_PL_SEARCH) {
+ if (!t->pl.probe_high) {
+ if (t->pl.probe_size < SCTP_MAX_PLPMTU) {
+ t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
+ SCTP_MAX_PLPMTU);
+ return false;
+ }
+ t->pl.probe_high = SCTP_MAX_PLPMTU;
+ }
+ t->pl.probe_size += SCTP_PL_MIN_STEP;
+ if (t->pl.probe_size >= t->pl.probe_high) {
+ t->pl.probe_high = 0;
+ t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */
+
+ t->pl.probe_size = t->pl.pmtu;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ sctp_assoc_sync_pmtu(t->asoc);
+ sctp_transport_reset_raise_timer(t);
+ }
+ } else if (t->pl.state == SCTP_PL_COMPLETE) {
+ /* Raise probe_size again after 30 * interval in Search Complete */
+ t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
+ t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_MIN_STEP, SCTP_MAX_PLPMTU);
+ }
+
+ return t->pl.state == SCTP_PL_COMPLETE;
+}
+
+static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu)
+{
+ pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, ptb: %d\n",
+ __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, pmtu);
+
+ if (pmtu < SCTP_MIN_PLPMTU || pmtu >= t->pl.probe_size)
+ return false;
+
+ if (t->pl.state == SCTP_PL_BASE) {
+ if (pmtu >= SCTP_MIN_PLPMTU && pmtu < SCTP_BASE_PLPMTU) {
+ t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
+
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ return true;
+ }
+ } else if (t->pl.state == SCTP_PL_SEARCH) {
+ if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) {
+ t->pl.state = SCTP_PL_BASE; /* Search -> Base */
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+ t->pl.probe_count = 0;
+
+ t->pl.probe_high = 0;
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ return true;
+ } else if (pmtu > t->pl.pmtu && pmtu < t->pl.probe_size) {
+ t->pl.probe_size = pmtu;
+ t->pl.probe_count = 0;
+ }
+ } else if (t->pl.state == SCTP_PL_COMPLETE) {
+ if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) {
+ t->pl.state = SCTP_PL_BASE; /* Complete -> Base */
+ t->pl.probe_size = SCTP_BASE_PLPMTU;
+ t->pl.probe_count = 0;
+
+ t->pl.probe_high = 0;
+ t->pl.pmtu = SCTP_BASE_PLPMTU;
+ t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ sctp_transport_reset_probe_timer(t);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
+{
+ struct sock *sk = t->asoc->base.sk;
+ struct dst_entry *dst;
+ bool change = true;
+
+ if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
+ pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n",
+ __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
+ /* Use default minimum segment instead */
+ pmtu = SCTP_DEFAULT_MINSEGMENT;
+ }
+ pmtu = SCTP_TRUNC4(pmtu);
+
+ if (sctp_transport_pl_enabled(t))
+ return sctp_transport_pl_toobig(t, pmtu - sctp_transport_pl_hlen(t));
+
+ dst = sctp_transport_dst_check(t);
+ if (dst) {
+ struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
+ union sctp_addr addr;
+
+ pf->af->from_sk(&addr, sk);
+ pf->to_sk_daddr(&t->ipaddr, sk);
+ dst->ops->update_pmtu(dst, sk, NULL, pmtu, true);
+ pf->to_sk_daddr(&addr, sk);
+
+ dst = sctp_transport_dst_check(t);
+ }
+
+ if (!dst) {
+ t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
+ dst = t->dst;
+ }
+
+ if (dst) {
+ /* Re-fetch, as under layers may have a higher minimum size */
+ pmtu = sctp_dst_mtu(dst);
+ change = t->pathmtu != pmtu;
+ }
+ t->pathmtu = pmtu;
+
+ return change;
+}
+
+/* Caches the dst entry and source address for a transport's destination
+ * address.
+ */
+void sctp_transport_route(struct sctp_transport *transport,
+ union sctp_addr *saddr, struct sctp_sock *opt)
+{
+ struct sctp_association *asoc = transport->asoc;
+ struct sctp_af *af = transport->af_specific;
+
+ sctp_transport_dst_release(transport);
+ af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
+
+ if (saddr)
+ memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
+ else
+ af->get_saddr(opt, transport, &transport->fl);
+
+ sctp_transport_pmtu(transport, sctp_opt2sk(opt));
+
+ /* Initialize sk->sk_rcv_saddr, if the transport is the
+ * association's active path for getsockname().
+ */
+ if (transport->dst && asoc &&
+ (!asoc->peer.primary_path || transport == asoc->peer.active_path))
+ opt->pf->to_sk_saddr(&transport->saddr, asoc->base.sk);
+}
+
+/* Hold a reference to a transport. */
+int sctp_transport_hold(struct sctp_transport *transport)
+{
+ return refcount_inc_not_zero(&transport->refcnt);
+}
+
+/* Release a reference to a transport and clean up
+ * if there are no more references.
+ */
+void sctp_transport_put(struct sctp_transport *transport)
+{
+ if (refcount_dec_and_test(&transport->refcnt))
+ sctp_transport_destroy(transport);
+}
+
+/* Update transport's RTO based on the newly calculated RTT. */
+void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
+{
+ if (unlikely(!tp->rto_pending))
+ /* We should not be doing any RTO updates unless rto_pending is set. */
+ pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp);
+
+ if (tp->rttvar || tp->srtt) {
+ struct net *net = tp->asoc->base.net;
+ /* 6.3.1 C3) When a new RTT measurement R' is made, set
+ * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
+ * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
+ */
+
+ /* Note: The above algorithm has been rewritten to
+ * express rto_beta and rto_alpha as inverse powers
+ * of two.
+ * For example, assuming the default value of RTO.Alpha of
+ * 1/8, rto_alpha would be expressed as 3.
+ */
+ tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
+ + (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
+ tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
+ + (rtt >> net->sctp.rto_alpha);
+ } else {
+ /* 6.3.1 C2) When the first RTT measurement R is made, set
+ * SRTT <- R, RTTVAR <- R/2.
+ */
+ tp->srtt = rtt;
+ tp->rttvar = rtt >> 1;
+ }
+
+ /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
+ * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
+ */
+ if (tp->rttvar == 0)
+ tp->rttvar = SCTP_CLOCK_GRANULARITY;
+
+ /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
+ tp->rto = tp->srtt + (tp->rttvar << 2);
+
+ /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
+ * seconds then it is rounded up to RTO.Min seconds.
+ */
+ if (tp->rto < tp->asoc->rto_min)
+ tp->rto = tp->asoc->rto_min;
+
+ /* 6.3.1 C7) A maximum value may be placed on RTO provided it is
+ * at least RTO.max seconds.
+ */
+ if (tp->rto > tp->asoc->rto_max)
+ tp->rto = tp->asoc->rto_max;
+
+ sctp_max_rto(tp->asoc, tp);
+ tp->rtt = rtt;
+
+ /* Reset rto_pending so that a new RTT measurement is started when a
+ * new data chunk is sent.
+ */
+ tp->rto_pending = 0;
+
+ pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n",
+ __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto);
+}
+
+/* This routine updates the transport's cwnd and partial_bytes_acked
+ * parameters based on the bytes acked in the received SACK.
+ */
+void sctp_transport_raise_cwnd(struct sctp_transport *transport,
+ __u32 sack_ctsn, __u32 bytes_acked)
+{
+ struct sctp_association *asoc = transport->asoc;
+ __u32 cwnd, ssthresh, flight_size, pba, pmtu;
+
+ cwnd = transport->cwnd;
+ flight_size = transport->flight_size;
+
+ /* See if we need to exit Fast Recovery first */
+ if (asoc->fast_recovery &&
+ TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
+ asoc->fast_recovery = 0;
+
+ ssthresh = transport->ssthresh;
+ pba = transport->partial_bytes_acked;
+ pmtu = transport->asoc->pathmtu;
+
+ if (cwnd <= ssthresh) {
+ /* RFC 4960 7.2.1
+ * o When cwnd is less than or equal to ssthresh, an SCTP
+ * endpoint MUST use the slow-start algorithm to increase
+ * cwnd only if the current congestion window is being fully
+ * utilized, an incoming SACK advances the Cumulative TSN
+ * Ack Point, and the data sender is not in Fast Recovery.
+ * Only when these three conditions are met can the cwnd be
+ * increased; otherwise, the cwnd MUST not be increased.
+ * If these conditions are met, then cwnd MUST be increased
+ * by, at most, the lesser of 1) the total size of the
+ * previously outstanding DATA chunk(s) acknowledged, and
+ * 2) the destination's path MTU. This upper bound protects
+ * against the ACK-Splitting attack outlined in [SAVAGE99].
+ */
+ if (asoc->fast_recovery)
+ return;
+
+ /* The appropriate cwnd increase algorithm is performed
+ * if, and only if the congestion window is being fully
+ * utilized. Note that RFC4960 Errata 3.22 removed the
+ * other condition on ctsn moving.
+ */
+ if (flight_size < cwnd)
+ return;
+
+ if (bytes_acked > pmtu)
+ cwnd += pmtu;
+ else
+ cwnd += bytes_acked;
+
+ pr_debug("%s: slow start: transport:%p, bytes_acked:%d, "
+ "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n",
+ __func__, transport, bytes_acked, cwnd, ssthresh,
+ flight_size, pba);
+ } else {
+ /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
+ * upon each SACK arrival, increase partial_bytes_acked
+ * by the total number of bytes of all new chunks
+ * acknowledged in that SACK including chunks
+ * acknowledged by the new Cumulative TSN Ack and by Gap
+ * Ack Blocks. (updated by RFC4960 Errata 3.22)
+ *
+ * When partial_bytes_acked is greater than cwnd and
+ * before the arrival of the SACK the sender had less
+ * bytes of data outstanding than cwnd (i.e., before
+ * arrival of the SACK, flightsize was less than cwnd),
+ * reset partial_bytes_acked to cwnd. (RFC 4960 Errata
+ * 3.26)
+ *
+ * When partial_bytes_acked is equal to or greater than
+ * cwnd and before the arrival of the SACK the sender
+ * had cwnd or more bytes of data outstanding (i.e.,
+ * before arrival of the SACK, flightsize was greater
+ * than or equal to cwnd), partial_bytes_acked is reset
+ * to (partial_bytes_acked - cwnd). Next, cwnd is
+ * increased by MTU. (RFC 4960 Errata 3.12)
+ */
+ pba += bytes_acked;
+ if (pba > cwnd && flight_size < cwnd)
+ pba = cwnd;
+ if (pba >= cwnd && flight_size >= cwnd) {
+ pba = pba - cwnd;
+ cwnd += pmtu;
+ }
+
+ pr_debug("%s: congestion avoidance: transport:%p, "
+ "bytes_acked:%d, cwnd:%d, ssthresh:%d, "
+ "flight_size:%d, pba:%d\n", __func__,
+ transport, bytes_acked, cwnd, ssthresh,
+ flight_size, pba);
+ }
+
+ transport->cwnd = cwnd;
+ transport->partial_bytes_acked = pba;
+}
+
+/* This routine is used to lower the transport's cwnd when congestion is
+ * detected.
+ */
+void sctp_transport_lower_cwnd(struct sctp_transport *transport,
+ enum sctp_lower_cwnd reason)
+{
+ struct sctp_association *asoc = transport->asoc;
+
+ switch (reason) {
+ case SCTP_LOWER_CWND_T3_RTX:
+ /* RFC 2960 Section 7.2.3, sctpimpguide
+ * When the T3-rtx timer expires on an address, SCTP should
+ * perform slow start by:
+ * ssthresh = max(cwnd/2, 4*MTU)
+ * cwnd = 1*MTU
+ * partial_bytes_acked = 0
+ */
+ transport->ssthresh = max(transport->cwnd/2,
+ 4*asoc->pathmtu);
+ transport->cwnd = asoc->pathmtu;
+
+ /* T3-rtx also clears fast recovery */
+ asoc->fast_recovery = 0;
+ break;
+
+ case SCTP_LOWER_CWND_FAST_RTX:
+ /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
+ * destination address(es) to which the missing DATA chunks
+ * were last sent, according to the formula described in
+ * Section 7.2.3.
+ *
+ * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
+ * losses from SACK (see Section 7.2.4), An endpoint
+ * should do the following:
+ * ssthresh = max(cwnd/2, 4*MTU)
+ * cwnd = ssthresh
+ * partial_bytes_acked = 0
+ */
+ if (asoc->fast_recovery)
+ return;
+
+ /* Mark Fast recovery */
+ asoc->fast_recovery = 1;
+ asoc->fast_recovery_exit = asoc->next_tsn - 1;
+
+ transport->ssthresh = max(transport->cwnd/2,
+ 4*asoc->pathmtu);
+ transport->cwnd = transport->ssthresh;
+ break;
+
+ case SCTP_LOWER_CWND_ECNE:
+ /* RFC 2481 Section 6.1.2.
+ * If the sender receives an ECN-Echo ACK packet
+ * then the sender knows that congestion was encountered in the
+ * network on the path from the sender to the receiver. The
+ * indication of congestion should be treated just as a
+ * congestion loss in non-ECN Capable TCP. That is, the TCP
+ * source halves the congestion window "cwnd" and reduces the
+ * slow start threshold "ssthresh".
+ * A critical condition is that TCP does not react to
+ * congestion indications more than once every window of
+ * data (or more loosely more than once every round-trip time).
+ */
+ if (time_after(jiffies, transport->last_time_ecne_reduced +
+ transport->rtt)) {
+ transport->ssthresh = max(transport->cwnd/2,
+ 4*asoc->pathmtu);
+ transport->cwnd = transport->ssthresh;
+ transport->last_time_ecne_reduced = jiffies;
+ }
+ break;
+
+ case SCTP_LOWER_CWND_INACTIVE:
+ /* RFC 2960 Section 7.2.1, sctpimpguide
+ * When the endpoint does not transmit data on a given
+ * transport address, the cwnd of the transport address
+ * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
+ * NOTE: Although the draft recommends that this check needs
+ * to be done every RTO interval, we do it every hearbeat
+ * interval.
+ */
+ transport->cwnd = max(transport->cwnd/2,
+ 4*asoc->pathmtu);
+ /* RFC 4960 Errata 3.27.2: also adjust sshthresh */
+ transport->ssthresh = transport->cwnd;
+ break;
+ }
+
+ transport->partial_bytes_acked = 0;
+
+ pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n",
+ __func__, transport, reason, transport->cwnd,
+ transport->ssthresh);
+}
+
+/* Apply Max.Burst limit to the congestion window:
+ * sctpimpguide-05 2.14.2
+ * D) When the time comes for the sender to
+ * transmit new DATA chunks, the protocol parameter Max.Burst MUST
+ * first be applied to limit how many new DATA chunks may be sent.
+ * The limit is applied by adjusting cwnd as follows:
+ * if ((flightsize+ Max.Burst * MTU) < cwnd)
+ * cwnd = flightsize + Max.Burst * MTU
+ */
+
+void sctp_transport_burst_limited(struct sctp_transport *t)
+{
+ struct sctp_association *asoc = t->asoc;
+ u32 old_cwnd = t->cwnd;
+ u32 max_burst_bytes;
+
+ if (t->burst_limited || asoc->max_burst == 0)
+ return;
+
+ max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
+ if (max_burst_bytes < old_cwnd) {
+ t->cwnd = max_burst_bytes;
+ t->burst_limited = old_cwnd;
+ }
+}
+
+/* Restore the old cwnd congestion window, after the burst had it's
+ * desired effect.
+ */
+void sctp_transport_burst_reset(struct sctp_transport *t)
+{
+ if (t->burst_limited) {
+ t->cwnd = t->burst_limited;
+ t->burst_limited = 0;
+ }
+}
+
+/* What is the next timeout value for this transport? */
+unsigned long sctp_transport_timeout(struct sctp_transport *trans)
+{
+ /* RTO + timer slack +/- 50% of RTO */
+ unsigned long timeout = trans->rto >> 1;
+
+ if (trans->state != SCTP_UNCONFIRMED &&
+ trans->state != SCTP_PF)
+ timeout += trans->hbinterval;
+
+ return max_t(unsigned long, timeout, HZ / 5);
+}
+
+/* Reset transport variables to their initial values */
+void sctp_transport_reset(struct sctp_transport *t)
+{
+ struct sctp_association *asoc = t->asoc;
+
+ /* RFC 2960 (bis), Section 5.2.4
+ * All the congestion control parameters (e.g., cwnd, ssthresh)
+ * related to this peer MUST be reset to their initial values
+ * (see Section 6.2.1)
+ */
+ t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
+ t->burst_limited = 0;
+ t->ssthresh = asoc->peer.i.a_rwnd;
+ t->rto = asoc->rto_initial;
+ sctp_max_rto(asoc, t);
+ t->rtt = 0;
+ t->srtt = 0;
+ t->rttvar = 0;
+
+ /* Reset these additional variables so that we have a clean slate. */
+ t->partial_bytes_acked = 0;
+ t->flight_size = 0;
+ t->error_count = 0;
+ t->rto_pending = 0;
+ t->hb_sent = 0;
+
+ /* Initialize the state information for SFR-CACC */
+ t->cacc.changeover_active = 0;
+ t->cacc.cycling_changeover = 0;
+ t->cacc.next_tsn_at_change = 0;
+ t->cacc.cacc_saw_newack = 0;
+}
+
+/* Schedule retransmission on the given transport */
+void sctp_transport_immediate_rtx(struct sctp_transport *t)
+{
+ /* Stop pending T3_rtx_timer */
+ if (del_timer(&t->T3_rtx_timer))
+ sctp_transport_put(t);
+
+ sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
+ if (!timer_pending(&t->T3_rtx_timer)) {
+ if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
+ sctp_transport_hold(t);
+ }
+}
+
+/* Drop dst */
+void sctp_transport_dst_release(struct sctp_transport *t)
+{
+ dst_release(t->dst);
+ t->dst = NULL;
+ t->dst_pending_confirm = 0;
+}
+
+/* Schedule neighbour confirm */
+void sctp_transport_dst_confirm(struct sctp_transport *t)
+{
+ t->dst_pending_confirm = 1;
+}
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
new file mode 100644
index 000000000..5ba456727
--- /dev/null
+++ b/net/sctp/tsnmap.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions manipulate sctp tsn mapping array.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/bitmap.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+static void sctp_tsnmap_update(struct sctp_tsnmap *map);
+static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,
+ __u16 len, __u16 *start, __u16 *end);
+static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size);
+
+/* Initialize a block of memory as a tsnmap. */
+struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len,
+ __u32 initial_tsn, gfp_t gfp)
+{
+ if (!map->tsn_map) {
+ map->tsn_map = kzalloc(len>>3, gfp);
+ if (map->tsn_map == NULL)
+ return NULL;
+
+ map->len = len;
+ } else {
+ bitmap_zero(map->tsn_map, map->len);
+ }
+
+ /* Keep track of TSNs represented by tsn_map. */
+ map->base_tsn = initial_tsn;
+ map->cumulative_tsn_ack_point = initial_tsn - 1;
+ map->max_tsn_seen = map->cumulative_tsn_ack_point;
+ map->num_dup_tsns = 0;
+
+ return map;
+}
+
+void sctp_tsnmap_free(struct sctp_tsnmap *map)
+{
+ map->len = 0;
+ kfree(map->tsn_map);
+}
+
+/* Test the tracking state of this TSN.
+ * Returns:
+ * 0 if the TSN has not yet been seen
+ * >0 if the TSN has been seen (duplicate)
+ * <0 if the TSN is invalid (too large to track)
+ */
+int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn)
+{
+ u32 gap;
+
+ /* Check to see if this is an old TSN */
+ if (TSN_lte(tsn, map->cumulative_tsn_ack_point))
+ return 1;
+
+ /* Verify that we can hold this TSN and that it will not
+ * overflow our map
+ */
+ if (!TSN_lt(tsn, map->base_tsn + SCTP_TSN_MAP_SIZE))
+ return -1;
+
+ /* Calculate the index into the mapping arrays. */
+ gap = tsn - map->base_tsn;
+
+ /* Check to see if TSN has already been recorded. */
+ if (gap < map->len && test_bit(gap, map->tsn_map))
+ return 1;
+ else
+ return 0;
+}
+
+
+/* Mark this TSN as seen. */
+int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
+ struct sctp_transport *trans)
+{
+ u16 gap;
+
+ if (TSN_lt(tsn, map->base_tsn))
+ return 0;
+
+ gap = tsn - map->base_tsn;
+
+ if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1))
+ return -ENOMEM;
+
+ if (!sctp_tsnmap_has_gap(map) && gap == 0) {
+ /* In this case the map has no gaps and the tsn we are
+ * recording is the next expected tsn. We don't touch
+ * the map but simply bump the values.
+ */
+ map->max_tsn_seen++;
+ map->cumulative_tsn_ack_point++;
+ if (trans)
+ trans->sack_generation =
+ trans->asoc->peer.sack_generation;
+ map->base_tsn++;
+ } else {
+ /* Either we already have a gap, or about to record a gap, so
+ * have work to do.
+ *
+ * Bump the max.
+ */
+ if (TSN_lt(map->max_tsn_seen, tsn))
+ map->max_tsn_seen = tsn;
+
+ /* Mark the TSN as received. */
+ set_bit(gap, map->tsn_map);
+
+ /* Go fixup any internal TSN mapping variables including
+ * cumulative_tsn_ack_point.
+ */
+ sctp_tsnmap_update(map);
+ }
+
+ return 0;
+}
+
+
+/* Initialize a Gap Ack Block iterator from memory being provided. */
+static void sctp_tsnmap_iter_init(const struct sctp_tsnmap *map,
+ struct sctp_tsnmap_iter *iter)
+{
+ /* Only start looking one past the Cumulative TSN Ack Point. */
+ iter->start = map->cumulative_tsn_ack_point + 1;
+}
+
+/* Get the next Gap Ack Blocks. Returns 0 if there was not another block
+ * to get.
+ */
+static int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
+ struct sctp_tsnmap_iter *iter,
+ __u16 *start, __u16 *end)
+{
+ int ended = 0;
+ __u16 start_ = 0, end_ = 0, offset;
+
+ /* If there are no more gap acks possible, get out fast. */
+ if (TSN_lte(map->max_tsn_seen, iter->start))
+ return 0;
+
+ offset = iter->start - map->base_tsn;
+ sctp_tsnmap_find_gap_ack(map->tsn_map, offset, map->len,
+ &start_, &end_);
+
+ /* The Gap Ack Block happens to end at the end of the map. */
+ if (start_ && !end_)
+ end_ = map->len - 1;
+
+ /* If we found a Gap Ack Block, return the start and end and
+ * bump the iterator forward.
+ */
+ if (end_) {
+ /* Fix up the start and end based on the
+ * Cumulative TSN Ack which is always 1 behind base.
+ */
+ *start = start_ + 1;
+ *end = end_ + 1;
+
+ /* Move the iterator forward. */
+ iter->start = map->cumulative_tsn_ack_point + *end + 1;
+ ended = 1;
+ }
+
+ return ended;
+}
+
+/* Mark this and any lower TSN as seen. */
+void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn)
+{
+ u32 gap;
+
+ if (TSN_lt(tsn, map->base_tsn))
+ return;
+ if (!TSN_lt(tsn, map->base_tsn + SCTP_TSN_MAP_SIZE))
+ return;
+
+ /* Bump the max. */
+ if (TSN_lt(map->max_tsn_seen, tsn))
+ map->max_tsn_seen = tsn;
+
+ gap = tsn - map->base_tsn + 1;
+
+ map->base_tsn += gap;
+ map->cumulative_tsn_ack_point += gap;
+ if (gap >= map->len) {
+ /* If our gap is larger then the map size, just
+ * zero out the map.
+ */
+ bitmap_zero(map->tsn_map, map->len);
+ } else {
+ /* If the gap is smaller than the map size,
+ * shift the map by 'gap' bits and update further.
+ */
+ bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len);
+ sctp_tsnmap_update(map);
+ }
+}
+
+/********************************************************************
+ * 2nd Level Abstractions
+ ********************************************************************/
+
+/* This private helper function updates the tsnmap buffers and
+ * the Cumulative TSN Ack Point.
+ */
+static void sctp_tsnmap_update(struct sctp_tsnmap *map)
+{
+ u16 len;
+ unsigned long zero_bit;
+
+
+ len = map->max_tsn_seen - map->cumulative_tsn_ack_point;
+ zero_bit = find_first_zero_bit(map->tsn_map, len);
+ if (!zero_bit)
+ return; /* The first 0-bit is bit 0. nothing to do */
+
+ map->base_tsn += zero_bit;
+ map->cumulative_tsn_ack_point += zero_bit;
+
+ bitmap_shift_right(map->tsn_map, map->tsn_map, zero_bit, map->len);
+}
+
+/* How many data chunks are we missing from our peer?
+ */
+__u16 sctp_tsnmap_pending(struct sctp_tsnmap *map)
+{
+ __u32 cum_tsn = map->cumulative_tsn_ack_point;
+ __u32 max_tsn = map->max_tsn_seen;
+ __u32 base_tsn = map->base_tsn;
+ __u16 pending_data;
+ u32 gap;
+
+ pending_data = max_tsn - cum_tsn;
+ gap = max_tsn - base_tsn;
+
+ if (gap == 0 || gap >= map->len)
+ goto out;
+
+ pending_data -= bitmap_weight(map->tsn_map, gap + 1);
+out:
+ return pending_data;
+}
+
+/* This is a private helper for finding Gap Ack Blocks. It searches a
+ * single array for the start and end of a Gap Ack Block.
+ *
+ * The flags "started" and "ended" tell is if we found the beginning
+ * or (respectively) the end of a Gap Ack Block.
+ */
+static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,
+ __u16 len, __u16 *start, __u16 *end)
+{
+ int i = off;
+
+ /* Look through the entire array, but break out
+ * early if we have found the end of the Gap Ack Block.
+ */
+
+ /* Also, stop looking past the maximum TSN seen. */
+
+ /* Look for the start. */
+ i = find_next_bit(map, len, off);
+ if (i < len)
+ *start = i;
+
+ /* Look for the end. */
+ if (*start) {
+ /* We have found the start, let's find the
+ * end. If we find the end, break out.
+ */
+ i = find_next_zero_bit(map, len, i);
+ if (i < len)
+ *end = i - 1;
+ }
+}
+
+/* Renege that we have seen a TSN. */
+void sctp_tsnmap_renege(struct sctp_tsnmap *map, __u32 tsn)
+{
+ u32 gap;
+
+ if (TSN_lt(tsn, map->base_tsn))
+ return;
+ /* Assert: TSN is in range. */
+ if (!TSN_lt(tsn, map->base_tsn + map->len))
+ return;
+
+ gap = tsn - map->base_tsn;
+
+ /* Pretend we never saw the TSN. */
+ clear_bit(gap, map->tsn_map);
+}
+
+/* How many gap ack blocks do we have recorded? */
+__u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
+ struct sctp_gap_ack_block *gabs)
+{
+ struct sctp_tsnmap_iter iter;
+ int ngaps = 0;
+
+ /* Refresh the gap ack information. */
+ if (sctp_tsnmap_has_gap(map)) {
+ __u16 start = 0, end = 0;
+ sctp_tsnmap_iter_init(map, &iter);
+ while (sctp_tsnmap_next_gap_ack(map, &iter,
+ &start,
+ &end)) {
+
+ gabs[ngaps].start = htons(start);
+ gabs[ngaps].end = htons(end);
+ ngaps++;
+ if (ngaps >= SCTP_MAX_GABS)
+ break;
+ }
+ }
+ return ngaps;
+}
+
+static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size)
+{
+ unsigned long *new;
+ unsigned long inc;
+ u16 len;
+
+ if (size > SCTP_TSN_MAP_SIZE)
+ return 0;
+
+ inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
+ len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE);
+
+ new = kzalloc(len>>3, GFP_ATOMIC);
+ if (!new)
+ return 0;
+
+ bitmap_copy(new, map->tsn_map,
+ map->max_tsn_seen - map->cumulative_tsn_ack_point);
+ kfree(map->tsn_map);
+ map->tsn_map = new;
+ map->len = len;
+
+ return 1;
+}
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
new file mode 100644
index 000000000..8920ca92a
--- /dev/null
+++ b/net/sctp/ulpevent.c
@@ -0,0 +1,1190 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * These functions manipulate an sctp event. The struct ulpevent is used
+ * to carry notifications and data to the ULP (sockets).
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <net/sctp/structs.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
+ struct sctp_association *asoc);
+static void sctp_ulpevent_release_data(struct sctp_ulpevent *event);
+static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event);
+
+
+/* Initialize an ULP event from an given skb. */
+static void sctp_ulpevent_init(struct sctp_ulpevent *event,
+ __u16 msg_flags,
+ unsigned int len)
+{
+ memset(event, 0, sizeof(struct sctp_ulpevent));
+ event->msg_flags = msg_flags;
+ event->rmem_len = len;
+}
+
+/* Create a new sctp_ulpevent. */
+static struct sctp_ulpevent *sctp_ulpevent_new(int size, __u16 msg_flags,
+ gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(size, gfp);
+ if (!skb)
+ goto fail;
+
+ event = sctp_skb2event(skb);
+ sctp_ulpevent_init(event, msg_flags, skb->truesize);
+
+ return event;
+
+fail:
+ return NULL;
+}
+
+/* Is this a MSG_NOTIFICATION? */
+int sctp_ulpevent_is_notification(const struct sctp_ulpevent *event)
+{
+ return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION);
+}
+
+/* Hold the association in case the msg_name needs read out of
+ * the association.
+ */
+static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event,
+ const struct sctp_association *asoc)
+{
+ struct sctp_chunk *chunk = event->chunk;
+ struct sk_buff *skb;
+
+ /* Cast away the const, as we are just wanting to
+ * bump the reference count.
+ */
+ sctp_association_hold((struct sctp_association *)asoc);
+ skb = sctp_event2skb(event);
+ event->asoc = (struct sctp_association *)asoc;
+ atomic_add(event->rmem_len, &event->asoc->rmem_alloc);
+ sctp_skb_set_owner_r(skb, asoc->base.sk);
+ if (chunk && chunk->head_skb && !chunk->head_skb->sk)
+ chunk->head_skb->sk = asoc->base.sk;
+}
+
+/* A simple destructor to give up the reference to the association. */
+static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event)
+{
+ struct sctp_association *asoc = event->asoc;
+
+ atomic_sub(event->rmem_len, &asoc->rmem_alloc);
+ sctp_association_put(asoc);
+}
+
+/* Create and initialize an SCTP_ASSOC_CHANGE event.
+ *
+ * 5.3.1.1 SCTP_ASSOC_CHANGE
+ *
+ * Communication notifications inform the ULP that an SCTP association
+ * has either begun or ended. The identifier for a new association is
+ * provided by this notification.
+ *
+ * Note: There is no field checking here. If a field is unused it will be
+ * zero'd out.
+ */
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
+ const struct sctp_association *asoc,
+ __u16 flags, __u16 state, __u16 error, __u16 outbound,
+ __u16 inbound, struct sctp_chunk *chunk, gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+ struct sctp_assoc_change *sac;
+ struct sk_buff *skb;
+
+ /* If the lower layer passed in the chunk, it will be
+ * an ABORT, so we need to include it in the sac_info.
+ */
+ if (chunk) {
+ /* Copy the chunk data to a new skb and reserve enough
+ * head room to use as notification.
+ */
+ skb = skb_copy_expand(chunk->skb,
+ sizeof(struct sctp_assoc_change), 0, gfp);
+
+ if (!skb)
+ goto fail;
+
+ /* Embed the event fields inside the cloned skb. */
+ event = sctp_skb2event(skb);
+ sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
+
+ /* Include the notification structure */
+ sac = skb_push(skb, sizeof(struct sctp_assoc_change));
+
+ /* Trim the buffer to the right length. */
+ skb_trim(skb, sizeof(struct sctp_assoc_change) +
+ ntohs(chunk->chunk_hdr->length) -
+ sizeof(struct sctp_chunkhdr));
+ } else {
+ event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change),
+ MSG_NOTIFICATION, gfp);
+ if (!event)
+ goto fail;
+
+ skb = sctp_event2skb(event);
+ sac = skb_put(skb, sizeof(struct sctp_assoc_change));
+ }
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.1 SCTP_ASSOC_CHANGE
+ *
+ * sac_type:
+ * It should be SCTP_ASSOC_CHANGE.
+ */
+ sac->sac_type = SCTP_ASSOC_CHANGE;
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.1 SCTP_ASSOC_CHANGE
+ *
+ * sac_state: 32 bits (signed integer)
+ * This field holds one of a number of values that communicate the
+ * event that happened to the association.
+ */
+ sac->sac_state = state;
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.1 SCTP_ASSOC_CHANGE
+ *
+ * sac_flags: 16 bits (unsigned integer)
+ * Currently unused.
+ */
+ sac->sac_flags = 0;
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.1 SCTP_ASSOC_CHANGE
+ *
+ * sac_length: sizeof (__u32)
+ * This field is the total length of the notification data, including
+ * the notification header.
+ */
+ sac->sac_length = skb->len;
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.1 SCTP_ASSOC_CHANGE
+ *
+ * sac_error: 32 bits (signed integer)
+ *
+ * If the state was reached due to a error condition (e.g.
+ * COMMUNICATION_LOST) any relevant error information is available in
+ * this field. This corresponds to the protocol error codes defined in
+ * [SCTP].
+ */
+ sac->sac_error = error;
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.1 SCTP_ASSOC_CHANGE
+ *
+ * sac_outbound_streams: 16 bits (unsigned integer)
+ * sac_inbound_streams: 16 bits (unsigned integer)
+ *
+ * The maximum number of streams allowed in each direction are
+ * available in sac_outbound_streams and sac_inbound streams.
+ */
+ sac->sac_outbound_streams = outbound;
+ sac->sac_inbound_streams = inbound;
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.1 SCTP_ASSOC_CHANGE
+ *
+ * sac_assoc_id: sizeof (sctp_assoc_t)
+ *
+ * The association id field, holds the identifier for the association.
+ * All notifications for a given association have the same association
+ * identifier. For TCP style socket, this field is ignored.
+ */
+ sctp_ulpevent_set_owner(event, asoc);
+ sac->sac_assoc_id = sctp_assoc2id(asoc);
+
+ return event;
+
+fail:
+ return NULL;
+}
+
+/* Create and initialize an SCTP_PEER_ADDR_CHANGE event.
+ *
+ * Socket Extensions for SCTP - draft-01
+ * 5.3.1.2 SCTP_PEER_ADDR_CHANGE
+ *
+ * When a destination address on a multi-homed peer encounters a change
+ * an interface details event is sent.
+ */
+static struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
+ const struct sctp_association *asoc,
+ const struct sockaddr_storage *aaddr,
+ int flags, int state, int error, gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+ struct sctp_paddr_change *spc;
+ struct sk_buff *skb;
+
+ event = sctp_ulpevent_new(sizeof(struct sctp_paddr_change),
+ MSG_NOTIFICATION, gfp);
+ if (!event)
+ goto fail;
+
+ skb = sctp_event2skb(event);
+ spc = skb_put(skb, sizeof(struct sctp_paddr_change));
+
+ /* Sockets API Extensions for SCTP
+ * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
+ *
+ * spc_type:
+ *
+ * It should be SCTP_PEER_ADDR_CHANGE.
+ */
+ spc->spc_type = SCTP_PEER_ADDR_CHANGE;
+
+ /* Sockets API Extensions for SCTP
+ * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
+ *
+ * spc_length: sizeof (__u32)
+ *
+ * This field is the total length of the notification data, including
+ * the notification header.
+ */
+ spc->spc_length = sizeof(struct sctp_paddr_change);
+
+ /* Sockets API Extensions for SCTP
+ * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
+ *
+ * spc_flags: 16 bits (unsigned integer)
+ * Currently unused.
+ */
+ spc->spc_flags = 0;
+
+ /* Sockets API Extensions for SCTP
+ * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
+ *
+ * spc_state: 32 bits (signed integer)
+ *
+ * This field holds one of a number of values that communicate the
+ * event that happened to the address.
+ */
+ spc->spc_state = state;
+
+ /* Sockets API Extensions for SCTP
+ * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
+ *
+ * spc_error: 32 bits (signed integer)
+ *
+ * If the state was reached due to any error condition (e.g.
+ * ADDRESS_UNREACHABLE) any relevant error information is available in
+ * this field.
+ */
+ spc->spc_error = error;
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.1 SCTP_ASSOC_CHANGE
+ *
+ * spc_assoc_id: sizeof (sctp_assoc_t)
+ *
+ * The association id field, holds the identifier for the association.
+ * All notifications for a given association have the same association
+ * identifier. For TCP style socket, this field is ignored.
+ */
+ sctp_ulpevent_set_owner(event, asoc);
+ spc->spc_assoc_id = sctp_assoc2id(asoc);
+
+ /* Sockets API Extensions for SCTP
+ * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
+ *
+ * spc_aaddr: sizeof (struct sockaddr_storage)
+ *
+ * The affected address field, holds the remote peer's address that is
+ * encountering the change of state.
+ */
+ memcpy(&spc->spc_aaddr, aaddr, sizeof(struct sockaddr_storage));
+
+ /* Map ipv4 address into v4-mapped-on-v6 address. */
+ sctp_get_pf_specific(asoc->base.sk->sk_family)->addr_to_user(
+ sctp_sk(asoc->base.sk),
+ (union sctp_addr *)&spc->spc_aaddr);
+
+ return event;
+
+fail:
+ return NULL;
+}
+
+void sctp_ulpevent_notify_peer_addr_change(struct sctp_transport *transport,
+ int state, int error)
+{
+ struct sctp_association *asoc = transport->asoc;
+ struct sockaddr_storage addr;
+ struct sctp_ulpevent *event;
+
+ if (asoc->state < SCTP_STATE_ESTABLISHED)
+ return;
+
+ memset(&addr, 0, sizeof(struct sockaddr_storage));
+ memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len);
+
+ event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, 0, state,
+ error, GFP_ATOMIC);
+ if (event)
+ asoc->stream.si->enqueue_event(&asoc->ulpq, event);
+}
+
+/* Create and initialize an SCTP_REMOTE_ERROR notification.
+ *
+ * Note: This assumes that the chunk->skb->data already points to the
+ * operation error payload.
+ *
+ * Socket Extensions for SCTP - draft-01
+ * 5.3.1.3 SCTP_REMOTE_ERROR
+ *
+ * A remote peer may send an Operational Error message to its peer.
+ * This message indicates a variety of error conditions on an
+ * association. The entire error TLV as it appears on the wire is
+ * included in a SCTP_REMOTE_ERROR event. Please refer to the SCTP
+ * specification [SCTP] and any extensions for a list of possible
+ * error formats.
+ */
+struct sctp_ulpevent *
+sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk, __u16 flags,
+ gfp_t gfp)
+{
+ struct sctp_remote_error *sre;
+ struct sctp_ulpevent *event;
+ struct sctp_errhdr *ch;
+ struct sk_buff *skb;
+ __be16 cause;
+ int elen;
+
+ ch = (struct sctp_errhdr *)(chunk->skb->data);
+ cause = ch->cause;
+ elen = SCTP_PAD4(ntohs(ch->length)) - sizeof(*ch);
+
+ /* Pull off the ERROR header. */
+ skb_pull(chunk->skb, sizeof(*ch));
+
+ /* Copy the skb to a new skb with room for us to prepend
+ * notification with.
+ */
+ skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
+
+ /* Pull off the rest of the cause TLV from the chunk. */
+ skb_pull(chunk->skb, elen);
+ if (!skb)
+ goto fail;
+
+ /* Embed the event fields inside the cloned skb. */
+ event = sctp_skb2event(skb);
+ sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
+
+ sre = skb_push(skb, sizeof(*sre));
+
+ /* Trim the buffer to the right length. */
+ skb_trim(skb, sizeof(*sre) + elen);
+
+ /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
+ memset(sre, 0, sizeof(*sre));
+ sre->sre_type = SCTP_REMOTE_ERROR;
+ sre->sre_flags = 0;
+ sre->sre_length = skb->len;
+ sre->sre_error = cause;
+ sctp_ulpevent_set_owner(event, asoc);
+ sre->sre_assoc_id = sctp_assoc2id(asoc);
+
+ return event;
+fail:
+ return NULL;
+}
+
+/* Create and initialize a SCTP_SEND_FAILED notification.
+ *
+ * Socket Extensions for SCTP - draft-01
+ * 5.3.1.4 SCTP_SEND_FAILED
+ */
+struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
+ const struct sctp_association *asoc, struct sctp_chunk *chunk,
+ __u16 flags, __u32 error, gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+ struct sctp_send_failed *ssf;
+ struct sk_buff *skb;
+
+ /* Pull off any padding. */
+ int len = ntohs(chunk->chunk_hdr->length);
+
+ /* Make skb with more room so we can prepend notification. */
+ skb = skb_copy_expand(chunk->skb,
+ sizeof(struct sctp_send_failed), /* headroom */
+ 0, /* tailroom */
+ gfp);
+ if (!skb)
+ goto fail;
+
+ /* Pull off the common chunk header and DATA header. */
+ skb_pull(skb, sctp_datachk_len(&asoc->stream));
+ len -= sctp_datachk_len(&asoc->stream);
+
+ /* Embed the event fields inside the cloned skb. */
+ event = sctp_skb2event(skb);
+ sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
+
+ ssf = skb_push(skb, sizeof(struct sctp_send_failed));
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.4 SCTP_SEND_FAILED
+ *
+ * ssf_type:
+ * It should be SCTP_SEND_FAILED.
+ */
+ ssf->ssf_type = SCTP_SEND_FAILED;
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.4 SCTP_SEND_FAILED
+ *
+ * ssf_flags: 16 bits (unsigned integer)
+ * The flag value will take one of the following values
+ *
+ * SCTP_DATA_UNSENT - Indicates that the data was never put on
+ * the wire.
+ *
+ * SCTP_DATA_SENT - Indicates that the data was put on the wire.
+ * Note that this does not necessarily mean that the
+ * data was (or was not) successfully delivered.
+ */
+ ssf->ssf_flags = flags;
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.4 SCTP_SEND_FAILED
+ *
+ * ssf_length: sizeof (__u32)
+ * This field is the total length of the notification data, including
+ * the notification header.
+ */
+ ssf->ssf_length = sizeof(struct sctp_send_failed) + len;
+ skb_trim(skb, ssf->ssf_length);
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.4 SCTP_SEND_FAILED
+ *
+ * ssf_error: 16 bits (unsigned integer)
+ * This value represents the reason why the send failed, and if set,
+ * will be a SCTP protocol error code as defined in [SCTP] section
+ * 3.3.10.
+ */
+ ssf->ssf_error = error;
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.4 SCTP_SEND_FAILED
+ *
+ * ssf_info: sizeof (struct sctp_sndrcvinfo)
+ * The original send information associated with the undelivered
+ * message.
+ */
+ memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo));
+
+ /* Per TSVWG discussion with Randy. Allow the application to
+ * reassemble a fragmented message.
+ */
+ ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags;
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.4 SCTP_SEND_FAILED
+ *
+ * ssf_assoc_id: sizeof (sctp_assoc_t)
+ * The association id field, sf_assoc_id, holds the identifier for the
+ * association. All notifications for a given association have the
+ * same association identifier. For TCP style socket, this field is
+ * ignored.
+ */
+ sctp_ulpevent_set_owner(event, asoc);
+ ssf->ssf_assoc_id = sctp_assoc2id(asoc);
+ return event;
+
+fail:
+ return NULL;
+}
+
+struct sctp_ulpevent *sctp_ulpevent_make_send_failed_event(
+ const struct sctp_association *asoc, struct sctp_chunk *chunk,
+ __u16 flags, __u32 error, gfp_t gfp)
+{
+ struct sctp_send_failed_event *ssf;
+ struct sctp_ulpevent *event;
+ struct sk_buff *skb;
+ int len;
+
+ skb = skb_copy_expand(chunk->skb, sizeof(*ssf), 0, gfp);
+ if (!skb)
+ return NULL;
+
+ len = ntohs(chunk->chunk_hdr->length);
+ len -= sctp_datachk_len(&asoc->stream);
+
+ skb_pull(skb, sctp_datachk_len(&asoc->stream));
+ event = sctp_skb2event(skb);
+ sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
+
+ ssf = skb_push(skb, sizeof(*ssf));
+ ssf->ssf_type = SCTP_SEND_FAILED_EVENT;
+ ssf->ssf_flags = flags;
+ ssf->ssf_length = sizeof(*ssf) + len;
+ skb_trim(skb, ssf->ssf_length);
+ ssf->ssf_error = error;
+
+ ssf->ssfe_info.snd_sid = chunk->sinfo.sinfo_stream;
+ ssf->ssfe_info.snd_ppid = chunk->sinfo.sinfo_ppid;
+ ssf->ssfe_info.snd_context = chunk->sinfo.sinfo_context;
+ ssf->ssfe_info.snd_assoc_id = chunk->sinfo.sinfo_assoc_id;
+ ssf->ssfe_info.snd_flags = chunk->chunk_hdr->flags;
+
+ sctp_ulpevent_set_owner(event, asoc);
+ ssf->ssf_assoc_id = sctp_assoc2id(asoc);
+
+ return event;
+}
+
+/* Create and initialize a SCTP_SHUTDOWN_EVENT notification.
+ *
+ * Socket Extensions for SCTP - draft-01
+ * 5.3.1.5 SCTP_SHUTDOWN_EVENT
+ */
+struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
+ const struct sctp_association *asoc,
+ __u16 flags, gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+ struct sctp_shutdown_event *sse;
+ struct sk_buff *skb;
+
+ event = sctp_ulpevent_new(sizeof(struct sctp_shutdown_event),
+ MSG_NOTIFICATION, gfp);
+ if (!event)
+ goto fail;
+
+ skb = sctp_event2skb(event);
+ sse = skb_put(skb, sizeof(struct sctp_shutdown_event));
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.5 SCTP_SHUTDOWN_EVENT
+ *
+ * sse_type
+ * It should be SCTP_SHUTDOWN_EVENT
+ */
+ sse->sse_type = SCTP_SHUTDOWN_EVENT;
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.5 SCTP_SHUTDOWN_EVENT
+ *
+ * sse_flags: 16 bits (unsigned integer)
+ * Currently unused.
+ */
+ sse->sse_flags = 0;
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.5 SCTP_SHUTDOWN_EVENT
+ *
+ * sse_length: sizeof (__u32)
+ * This field is the total length of the notification data, including
+ * the notification header.
+ */
+ sse->sse_length = sizeof(struct sctp_shutdown_event);
+
+ /* Socket Extensions for SCTP
+ * 5.3.1.5 SCTP_SHUTDOWN_EVENT
+ *
+ * sse_assoc_id: sizeof (sctp_assoc_t)
+ * The association id field, holds the identifier for the association.
+ * All notifications for a given association have the same association
+ * identifier. For TCP style socket, this field is ignored.
+ */
+ sctp_ulpevent_set_owner(event, asoc);
+ sse->sse_assoc_id = sctp_assoc2id(asoc);
+
+ return event;
+
+fail:
+ return NULL;
+}
+
+/* Create and initialize a SCTP_ADAPTATION_INDICATION notification.
+ *
+ * Socket Extensions for SCTP
+ * 5.3.1.6 SCTP_ADAPTATION_INDICATION
+ */
+struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
+ const struct sctp_association *asoc, gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+ struct sctp_adaptation_event *sai;
+ struct sk_buff *skb;
+
+ event = sctp_ulpevent_new(sizeof(struct sctp_adaptation_event),
+ MSG_NOTIFICATION, gfp);
+ if (!event)
+ goto fail;
+
+ skb = sctp_event2skb(event);
+ sai = skb_put(skb, sizeof(struct sctp_adaptation_event));
+
+ sai->sai_type = SCTP_ADAPTATION_INDICATION;
+ sai->sai_flags = 0;
+ sai->sai_length = sizeof(struct sctp_adaptation_event);
+ sai->sai_adaptation_ind = asoc->peer.adaptation_ind;
+ sctp_ulpevent_set_owner(event, asoc);
+ sai->sai_assoc_id = sctp_assoc2id(asoc);
+
+ return event;
+
+fail:
+ return NULL;
+}
+
+/* A message has been received. Package this message as a notification
+ * to pass it to the upper layers. Go ahead and calculate the sndrcvinfo
+ * even if filtered out later.
+ *
+ * Socket Extensions for SCTP
+ * 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
+ */
+struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ gfp_t gfp)
+{
+ struct sctp_ulpevent *event = NULL;
+ struct sk_buff *skb = chunk->skb;
+ struct sock *sk = asoc->base.sk;
+ size_t padding, datalen;
+ int rx_count;
+
+ /*
+ * check to see if we need to make space for this
+ * new skb, expand the rcvbuffer if needed, or drop
+ * the frame
+ */
+ if (asoc->ep->rcvbuf_policy)
+ rx_count = atomic_read(&asoc->rmem_alloc);
+ else
+ rx_count = atomic_read(&sk->sk_rmem_alloc);
+
+ datalen = ntohs(chunk->chunk_hdr->length);
+
+ if (rx_count >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, datalen))
+ goto fail;
+
+ /* Clone the original skb, sharing the data. */
+ skb = skb_clone(chunk->skb, gfp);
+ if (!skb)
+ goto fail;
+
+ /* Now that all memory allocations for this chunk succeeded, we
+ * can mark it as received so the tsn_map is updated correctly.
+ */
+ if (sctp_tsnmap_mark(&asoc->peer.tsn_map,
+ ntohl(chunk->subh.data_hdr->tsn),
+ chunk->transport))
+ goto fail_mark;
+
+ /* First calculate the padding, so we don't inadvertently
+ * pass up the wrong length to the user.
+ *
+ * RFC 2960 - Section 3.2 Chunk Field Descriptions
+ *
+ * The total length of a chunk(including Type, Length and Value fields)
+ * MUST be a multiple of 4 bytes. If the length of the chunk is not a
+ * multiple of 4 bytes, the sender MUST pad the chunk with all zero
+ * bytes and this padding is not included in the chunk length field.
+ * The sender should never pad with more than 3 bytes. The receiver
+ * MUST ignore the padding bytes.
+ */
+ padding = SCTP_PAD4(datalen) - datalen;
+
+ /* Fixup cloned skb with just this chunks data. */
+ skb_trim(skb, chunk->chunk_end - padding - skb->data);
+
+ /* Embed the event fields inside the cloned skb. */
+ event = sctp_skb2event(skb);
+
+ /* Initialize event with flags 0 and correct length
+ * Since this is a clone of the original skb, only account for
+ * the data of this chunk as other chunks will be accounted separately.
+ */
+ sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff));
+
+ /* And hold the chunk as we need it for getting the IP headers
+ * later in recvmsg
+ */
+ sctp_chunk_hold(chunk);
+ event->chunk = chunk;
+
+ sctp_ulpevent_receive_data(event, asoc);
+
+ event->stream = ntohs(chunk->subh.data_hdr->stream);
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
+ event->flags |= SCTP_UNORDERED;
+ event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
+ }
+ event->tsn = ntohl(chunk->subh.data_hdr->tsn);
+ event->msg_flags |= chunk->chunk_hdr->flags;
+
+ return event;
+
+fail_mark:
+ kfree_skb(skb);
+fail:
+ return NULL;
+}
+
+/* Create a partial delivery related event.
+ *
+ * 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT
+ *
+ * When a receiver is engaged in a partial delivery of a
+ * message this notification will be used to indicate
+ * various events.
+ */
+struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
+ const struct sctp_association *asoc,
+ __u32 indication, __u32 sid, __u32 seq,
+ __u32 flags, gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+ struct sctp_pdapi_event *pd;
+ struct sk_buff *skb;
+
+ event = sctp_ulpevent_new(sizeof(struct sctp_pdapi_event),
+ MSG_NOTIFICATION, gfp);
+ if (!event)
+ goto fail;
+
+ skb = sctp_event2skb(event);
+ pd = skb_put(skb, sizeof(struct sctp_pdapi_event));
+
+ /* pdapi_type
+ * It should be SCTP_PARTIAL_DELIVERY_EVENT
+ *
+ * pdapi_flags: 16 bits (unsigned integer)
+ * Currently unused.
+ */
+ pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
+ pd->pdapi_flags = flags;
+ pd->pdapi_stream = sid;
+ pd->pdapi_seq = seq;
+
+ /* pdapi_length: 32 bits (unsigned integer)
+ *
+ * This field is the total length of the notification data, including
+ * the notification header. It will generally be sizeof (struct
+ * sctp_pdapi_event).
+ */
+ pd->pdapi_length = sizeof(struct sctp_pdapi_event);
+
+ /* pdapi_indication: 32 bits (unsigned integer)
+ *
+ * This field holds the indication being sent to the application.
+ */
+ pd->pdapi_indication = indication;
+
+ /* pdapi_assoc_id: sizeof (sctp_assoc_t)
+ *
+ * The association id field, holds the identifier for the association.
+ */
+ sctp_ulpevent_set_owner(event, asoc);
+ pd->pdapi_assoc_id = sctp_assoc2id(asoc);
+
+ return event;
+fail:
+ return NULL;
+}
+
+struct sctp_ulpevent *sctp_ulpevent_make_authkey(
+ const struct sctp_association *asoc, __u16 key_id,
+ __u32 indication, gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+ struct sctp_authkey_event *ak;
+ struct sk_buff *skb;
+
+ event = sctp_ulpevent_new(sizeof(struct sctp_authkey_event),
+ MSG_NOTIFICATION, gfp);
+ if (!event)
+ goto fail;
+
+ skb = sctp_event2skb(event);
+ ak = skb_put(skb, sizeof(struct sctp_authkey_event));
+
+ ak->auth_type = SCTP_AUTHENTICATION_EVENT;
+ ak->auth_flags = 0;
+ ak->auth_length = sizeof(struct sctp_authkey_event);
+
+ ak->auth_keynumber = key_id;
+ ak->auth_altkeynumber = 0;
+ ak->auth_indication = indication;
+
+ /*
+ * The association id field, holds the identifier for the association.
+ */
+ sctp_ulpevent_set_owner(event, asoc);
+ ak->auth_assoc_id = sctp_assoc2id(asoc);
+
+ return event;
+fail:
+ return NULL;
+}
+
+/*
+ * Socket Extensions for SCTP
+ * 6.3.10. SCTP_SENDER_DRY_EVENT
+ */
+struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
+ const struct sctp_association *asoc, gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+ struct sctp_sender_dry_event *sdry;
+ struct sk_buff *skb;
+
+ event = sctp_ulpevent_new(sizeof(struct sctp_sender_dry_event),
+ MSG_NOTIFICATION, gfp);
+ if (!event)
+ return NULL;
+
+ skb = sctp_event2skb(event);
+ sdry = skb_put(skb, sizeof(struct sctp_sender_dry_event));
+
+ sdry->sender_dry_type = SCTP_SENDER_DRY_EVENT;
+ sdry->sender_dry_flags = 0;
+ sdry->sender_dry_length = sizeof(struct sctp_sender_dry_event);
+ sctp_ulpevent_set_owner(event, asoc);
+ sdry->sender_dry_assoc_id = sctp_assoc2id(asoc);
+
+ return event;
+}
+
+struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
+ const struct sctp_association *asoc, __u16 flags, __u16 stream_num,
+ __be16 *stream_list, gfp_t gfp)
+{
+ struct sctp_stream_reset_event *sreset;
+ struct sctp_ulpevent *event;
+ struct sk_buff *skb;
+ int length, i;
+
+ length = sizeof(struct sctp_stream_reset_event) + 2 * stream_num;
+ event = sctp_ulpevent_new(length, MSG_NOTIFICATION, gfp);
+ if (!event)
+ return NULL;
+
+ skb = sctp_event2skb(event);
+ sreset = skb_put(skb, length);
+
+ sreset->strreset_type = SCTP_STREAM_RESET_EVENT;
+ sreset->strreset_flags = flags;
+ sreset->strreset_length = length;
+ sctp_ulpevent_set_owner(event, asoc);
+ sreset->strreset_assoc_id = sctp_assoc2id(asoc);
+
+ for (i = 0; i < stream_num; i++)
+ sreset->strreset_stream_list[i] = ntohs(stream_list[i]);
+
+ return event;
+}
+
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
+ const struct sctp_association *asoc, __u16 flags, __u32 local_tsn,
+ __u32 remote_tsn, gfp_t gfp)
+{
+ struct sctp_assoc_reset_event *areset;
+ struct sctp_ulpevent *event;
+ struct sk_buff *skb;
+
+ event = sctp_ulpevent_new(sizeof(struct sctp_assoc_reset_event),
+ MSG_NOTIFICATION, gfp);
+ if (!event)
+ return NULL;
+
+ skb = sctp_event2skb(event);
+ areset = skb_put(skb, sizeof(struct sctp_assoc_reset_event));
+
+ areset->assocreset_type = SCTP_ASSOC_RESET_EVENT;
+ areset->assocreset_flags = flags;
+ areset->assocreset_length = sizeof(struct sctp_assoc_reset_event);
+ sctp_ulpevent_set_owner(event, asoc);
+ areset->assocreset_assoc_id = sctp_assoc2id(asoc);
+ areset->assocreset_local_tsn = local_tsn;
+ areset->assocreset_remote_tsn = remote_tsn;
+
+ return event;
+}
+
+struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
+ const struct sctp_association *asoc, __u16 flags,
+ __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp)
+{
+ struct sctp_stream_change_event *schange;
+ struct sctp_ulpevent *event;
+ struct sk_buff *skb;
+
+ event = sctp_ulpevent_new(sizeof(struct sctp_stream_change_event),
+ MSG_NOTIFICATION, gfp);
+ if (!event)
+ return NULL;
+
+ skb = sctp_event2skb(event);
+ schange = skb_put(skb, sizeof(struct sctp_stream_change_event));
+
+ schange->strchange_type = SCTP_STREAM_CHANGE_EVENT;
+ schange->strchange_flags = flags;
+ schange->strchange_length = sizeof(struct sctp_stream_change_event);
+ sctp_ulpevent_set_owner(event, asoc);
+ schange->strchange_assoc_id = sctp_assoc2id(asoc);
+ schange->strchange_instrms = strchange_instrms;
+ schange->strchange_outstrms = strchange_outstrms;
+
+ return event;
+}
+
+/* Return the notification type, assuming this is a notification
+ * event.
+ */
+__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
+{
+ union sctp_notification *notification;
+ struct sk_buff *skb;
+
+ skb = sctp_event2skb(event);
+ notification = (union sctp_notification *) skb->data;
+ return notification->sn_header.sn_type;
+}
+
+/* RFC6458, Section 5.3.2. SCTP Header Information Structure
+ * (SCTP_SNDRCV, DEPRECATED)
+ */
+void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
+ struct msghdr *msghdr)
+{
+ struct sctp_sndrcvinfo sinfo;
+
+ if (sctp_ulpevent_is_notification(event))
+ return;
+
+ memset(&sinfo, 0, sizeof(sinfo));
+ sinfo.sinfo_stream = event->stream;
+ sinfo.sinfo_ssn = event->ssn;
+ sinfo.sinfo_ppid = event->ppid;
+ sinfo.sinfo_flags = event->flags;
+ sinfo.sinfo_tsn = event->tsn;
+ sinfo.sinfo_cumtsn = event->cumtsn;
+ sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
+ /* Context value that is set via SCTP_CONTEXT socket option. */
+ sinfo.sinfo_context = event->asoc->default_rcv_context;
+ /* These fields are not used while receiving. */
+ sinfo.sinfo_timetolive = 0;
+
+ put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
+ sizeof(sinfo), &sinfo);
+}
+
+/* RFC6458, Section 5.3.5 SCTP Receive Information Structure
+ * (SCTP_SNDRCV)
+ */
+void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
+ struct msghdr *msghdr)
+{
+ struct sctp_rcvinfo rinfo;
+
+ if (sctp_ulpevent_is_notification(event))
+ return;
+
+ memset(&rinfo, 0, sizeof(struct sctp_rcvinfo));
+ rinfo.rcv_sid = event->stream;
+ rinfo.rcv_ssn = event->ssn;
+ rinfo.rcv_ppid = event->ppid;
+ rinfo.rcv_flags = event->flags;
+ rinfo.rcv_tsn = event->tsn;
+ rinfo.rcv_cumtsn = event->cumtsn;
+ rinfo.rcv_assoc_id = sctp_assoc2id(event->asoc);
+ rinfo.rcv_context = event->asoc->default_rcv_context;
+
+ put_cmsg(msghdr, IPPROTO_SCTP, SCTP_RCVINFO,
+ sizeof(rinfo), &rinfo);
+}
+
+/* RFC6458, Section 5.3.6. SCTP Next Receive Information Structure
+ * (SCTP_NXTINFO)
+ */
+static void __sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
+ struct msghdr *msghdr,
+ const struct sk_buff *skb)
+{
+ struct sctp_nxtinfo nxtinfo;
+
+ memset(&nxtinfo, 0, sizeof(nxtinfo));
+ nxtinfo.nxt_sid = event->stream;
+ nxtinfo.nxt_ppid = event->ppid;
+ nxtinfo.nxt_flags = event->flags;
+ if (sctp_ulpevent_is_notification(event))
+ nxtinfo.nxt_flags |= SCTP_NOTIFICATION;
+ nxtinfo.nxt_length = skb->len;
+ nxtinfo.nxt_assoc_id = sctp_assoc2id(event->asoc);
+
+ put_cmsg(msghdr, IPPROTO_SCTP, SCTP_NXTINFO,
+ sizeof(nxtinfo), &nxtinfo);
+}
+
+void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
+ struct msghdr *msghdr,
+ struct sock *sk)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = sctp_skb_recv_datagram(sk, MSG_PEEK | MSG_DONTWAIT, &err);
+ if (skb != NULL) {
+ __sctp_ulpevent_read_nxtinfo(sctp_skb2event(skb),
+ msghdr, skb);
+ /* Just release refcount here. */
+ kfree_skb(skb);
+ }
+}
+
+/* Do accounting for bytes received and hold a reference to the association
+ * for each skb.
+ */
+static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
+ struct sctp_association *asoc)
+{
+ struct sk_buff *skb, *frag;
+
+ skb = sctp_event2skb(event);
+ /* Set the owner and charge rwnd for bytes received. */
+ sctp_ulpevent_set_owner(event, asoc);
+ sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb));
+
+ if (!skb->data_len)
+ return;
+
+ /* Note: Not clearing the entire event struct as this is just a
+ * fragment of the real event. However, we still need to do rwnd
+ * accounting.
+ * In general, the skb passed from IP can have only 1 level of
+ * fragments. But we allow multiple levels of fragments.
+ */
+ skb_walk_frags(skb, frag)
+ sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc);
+}
+
+/* Do accounting for bytes just read by user and release the references to
+ * the association.
+ */
+static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
+{
+ struct sk_buff *skb, *frag;
+ unsigned int len;
+
+ /* Current stack structures assume that the rcv buffer is
+ * per socket. For UDP style sockets this is not true as
+ * multiple associations may be on a single UDP-style socket.
+ * Use the local private area of the skb to track the owning
+ * association.
+ */
+
+ skb = sctp_event2skb(event);
+ len = skb->len;
+
+ if (!skb->data_len)
+ goto done;
+
+ /* Don't forget the fragments. */
+ skb_walk_frags(skb, frag) {
+ /* NOTE: skb_shinfos are recursive. Although IP returns
+ * skb's with only 1 level of fragments, SCTP reassembly can
+ * increase the levels.
+ */
+ sctp_ulpevent_release_frag_data(sctp_skb2event(frag));
+ }
+
+done:
+ sctp_assoc_rwnd_increase(event->asoc, len);
+ sctp_chunk_put(event->chunk);
+ sctp_ulpevent_release_owner(event);
+}
+
+static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
+{
+ struct sk_buff *skb, *frag;
+
+ skb = sctp_event2skb(event);
+
+ if (!skb->data_len)
+ goto done;
+
+ /* Don't forget the fragments. */
+ skb_walk_frags(skb, frag) {
+ /* NOTE: skb_shinfos are recursive. Although IP returns
+ * skb's with only 1 level of fragments, SCTP reassembly can
+ * increase the levels.
+ */
+ sctp_ulpevent_release_frag_data(sctp_skb2event(frag));
+ }
+
+done:
+ sctp_chunk_put(event->chunk);
+ sctp_ulpevent_release_owner(event);
+}
+
+/* Free a ulpevent that has an owner. It includes releasing the reference
+ * to the owner, updating the rwnd in case of a DATA event and freeing the
+ * skb.
+ */
+void sctp_ulpevent_free(struct sctp_ulpevent *event)
+{
+ if (sctp_ulpevent_is_notification(event))
+ sctp_ulpevent_release_owner(event);
+ else
+ sctp_ulpevent_release_data(event);
+
+ kfree_skb(sctp_event2skb(event));
+}
+
+/* Purge the skb lists holding ulpevents. */
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+ unsigned int data_unread = 0;
+
+ while ((skb = skb_dequeue(list)) != NULL) {
+ struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+ if (!sctp_ulpevent_is_notification(event))
+ data_unread += skb->len;
+
+ sctp_ulpevent_free(event);
+ }
+
+ return data_unread;
+}
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
new file mode 100644
index 000000000..0a8510a0c
--- /dev/null
+++ b/net/sctp/ulpqueue.c
@@ -0,0 +1,1132 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * This abstraction carries sctp events to the ULP (sockets).
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/busy_poll.h>
+#include <net/sctp/structs.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+/* Forward declarations for internal helpers. */
+static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *);
+static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
+ struct sctp_ulpevent *);
+static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
+
+/* 1st Level Abstractions */
+
+/* Initialize a ULP queue from a block of memory. */
+struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
+ struct sctp_association *asoc)
+{
+ memset(ulpq, 0, sizeof(struct sctp_ulpq));
+
+ ulpq->asoc = asoc;
+ skb_queue_head_init(&ulpq->reasm);
+ skb_queue_head_init(&ulpq->reasm_uo);
+ skb_queue_head_init(&ulpq->lobby);
+ ulpq->pd_mode = 0;
+
+ return ulpq;
+}
+
+
+/* Flush the reassembly and ordering queues. */
+void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
+{
+ struct sk_buff *skb;
+ struct sctp_ulpevent *event;
+
+ while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
+ event = sctp_skb2event(skb);
+ sctp_ulpevent_free(event);
+ }
+
+ while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
+ event = sctp_skb2event(skb);
+ sctp_ulpevent_free(event);
+ }
+
+ while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
+ event = sctp_skb2event(skb);
+ sctp_ulpevent_free(event);
+ }
+}
+
+/* Dispose of a ulpqueue. */
+void sctp_ulpq_free(struct sctp_ulpq *ulpq)
+{
+ sctp_ulpq_flush(ulpq);
+}
+
+/* Process an incoming DATA chunk. */
+int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
+ gfp_t gfp)
+{
+ struct sk_buff_head temp;
+ struct sctp_ulpevent *event;
+ int event_eor = 0;
+
+ /* Create an event from the incoming chunk. */
+ event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
+ if (!event)
+ return -ENOMEM;
+
+ event->ssn = ntohs(chunk->subh.data_hdr->ssn);
+ event->ppid = chunk->subh.data_hdr->ppid;
+
+ /* Do reassembly if needed. */
+ event = sctp_ulpq_reasm(ulpq, event);
+
+ /* Do ordering if needed. */
+ if (event) {
+ /* Create a temporary list to collect chunks on. */
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+
+ if (event->msg_flags & MSG_EOR)
+ event = sctp_ulpq_order(ulpq, event);
+ }
+
+ /* Send event to the ULP. 'event' is the sctp_ulpevent for
+ * very first SKB on the 'temp' list.
+ */
+ if (event) {
+ event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
+ sctp_ulpq_tail_event(ulpq, &temp);
+ }
+
+ return event_eor;
+}
+
+/* Add a new event for propagation to the ULP. */
+/* Clear the partial delivery mode for this socket. Note: This
+ * assumes that no association is currently in partial delivery mode.
+ */
+int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
+{
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ if (atomic_dec_and_test(&sp->pd_mode)) {
+ /* This means there are no other associations in PD, so
+ * we can go ahead and clear out the lobby in one shot
+ */
+ if (!skb_queue_empty(&sp->pd_lobby)) {
+ skb_queue_splice_tail_init(&sp->pd_lobby,
+ &sk->sk_receive_queue);
+ return 1;
+ }
+ } else {
+ /* There are other associations in PD, so we only need to
+ * pull stuff out of the lobby that belongs to the
+ * associations that is exiting PD (all of its notifications
+ * are posted here).
+ */
+ if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
+ struct sk_buff *skb, *tmp;
+ struct sctp_ulpevent *event;
+
+ sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
+ event = sctp_skb2event(skb);
+ if (event->asoc == asoc) {
+ __skb_unlink(skb, &sp->pd_lobby);
+ __skb_queue_tail(&sk->sk_receive_queue,
+ skb);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Set the pd_mode on the socket and ulpq */
+static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
+{
+ struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
+
+ atomic_inc(&sp->pd_mode);
+ ulpq->pd_mode = 1;
+}
+
+/* Clear the pd_mode and restart any pending messages waiting for delivery. */
+static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
+{
+ ulpq->pd_mode = 0;
+ sctp_ulpq_reasm_drain(ulpq);
+ return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
+}
+
+int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
+{
+ struct sock *sk = ulpq->asoc->base.sk;
+ struct sctp_sock *sp = sctp_sk(sk);
+ struct sctp_ulpevent *event;
+ struct sk_buff_head *queue;
+ struct sk_buff *skb;
+ int clear_pd = 0;
+
+ skb = __skb_peek(skb_list);
+ event = sctp_skb2event(skb);
+
+ /* If the socket is just going to throw this away, do not
+ * even try to deliver it.
+ */
+ if (sk->sk_shutdown & RCV_SHUTDOWN &&
+ (sk->sk_shutdown & SEND_SHUTDOWN ||
+ !sctp_ulpevent_is_notification(event)))
+ goto out_free;
+
+ if (!sctp_ulpevent_is_notification(event)) {
+ sk_mark_napi_id(sk, skb);
+ sk_incoming_cpu_update(sk);
+ }
+ /* Check if the user wishes to receive this event. */
+ if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
+ goto out_free;
+
+ /* If we are in partial delivery mode, post to the lobby until
+ * partial delivery is cleared, unless, of course _this_ is
+ * the association the cause of the partial delivery.
+ */
+
+ if (atomic_read(&sp->pd_mode) == 0) {
+ queue = &sk->sk_receive_queue;
+ } else {
+ if (ulpq->pd_mode) {
+ /* If the association is in partial delivery, we
+ * need to finish delivering the partially processed
+ * packet before passing any other data. This is
+ * because we don't truly support stream interleaving.
+ */
+ if ((event->msg_flags & MSG_NOTIFICATION) ||
+ (SCTP_DATA_NOT_FRAG ==
+ (event->msg_flags & SCTP_DATA_FRAG_MASK)))
+ queue = &sp->pd_lobby;
+ else {
+ clear_pd = event->msg_flags & MSG_EOR;
+ queue = &sk->sk_receive_queue;
+ }
+ } else {
+ /*
+ * If fragment interleave is enabled, we
+ * can queue this to the receive queue instead
+ * of the lobby.
+ */
+ if (sp->frag_interleave)
+ queue = &sk->sk_receive_queue;
+ else
+ queue = &sp->pd_lobby;
+ }
+ }
+
+ skb_queue_splice_tail_init(skb_list, queue);
+
+ /* Did we just complete partial delivery and need to get
+ * rolling again? Move pending data to the receive
+ * queue.
+ */
+ if (clear_pd)
+ sctp_ulpq_clear_pd(ulpq);
+
+ if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
+ if (!sock_owned_by_user(sk))
+ sp->data_ready_signalled = 1;
+ sk->sk_data_ready(sk);
+ }
+ return 1;
+
+out_free:
+ if (skb_list)
+ sctp_queue_purge_ulpevents(skb_list);
+ else
+ sctp_ulpevent_free(event);
+
+ return 0;
+}
+
+/* 2nd Level Abstractions */
+
+/* Helper function to store chunks that need to be reassembled. */
+static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sk_buff *pos;
+ struct sctp_ulpevent *cevent;
+ __u32 tsn, ctsn;
+
+ tsn = event->tsn;
+
+ /* See if it belongs at the end. */
+ pos = skb_peek_tail(&ulpq->reasm);
+ if (!pos) {
+ __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+ return;
+ }
+
+ /* Short circuit just dropping it at the end. */
+ cevent = sctp_skb2event(pos);
+ ctsn = cevent->tsn;
+ if (TSN_lt(ctsn, tsn)) {
+ __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+ return;
+ }
+
+ /* Find the right place in this list. We store them by TSN. */
+ skb_queue_walk(&ulpq->reasm, pos) {
+ cevent = sctp_skb2event(pos);
+ ctsn = cevent->tsn;
+
+ if (TSN_lt(tsn, ctsn))
+ break;
+ }
+
+ /* Insert before pos. */
+ __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
+
+}
+
+/* Helper function to return an event corresponding to the reassembled
+ * datagram.
+ * This routine creates a re-assembled skb given the first and last skb's
+ * as stored in the reassembly queue. The skb's may be non-linear if the sctp
+ * payload was fragmented on the way and ip had to reassemble them.
+ * We add the rest of skb's to the first skb's fraglist.
+ */
+struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
+ struct sk_buff_head *queue,
+ struct sk_buff *f_frag,
+ struct sk_buff *l_frag)
+{
+ struct sk_buff *pos;
+ struct sk_buff *new = NULL;
+ struct sctp_ulpevent *event;
+ struct sk_buff *pnext, *last;
+ struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
+
+ /* Store the pointer to the 2nd skb */
+ if (f_frag == l_frag)
+ pos = NULL;
+ else
+ pos = f_frag->next;
+
+ /* Get the last skb in the f_frag's frag_list if present. */
+ for (last = list; list; last = list, list = list->next)
+ ;
+
+ /* Add the list of remaining fragments to the first fragments
+ * frag_list.
+ */
+ if (last)
+ last->next = pos;
+ else {
+ if (skb_cloned(f_frag)) {
+ /* This is a cloned skb, we can't just modify
+ * the frag_list. We need a new skb to do that.
+ * Instead of calling skb_unshare(), we'll do it
+ * ourselves since we need to delay the free.
+ */
+ new = skb_copy(f_frag, GFP_ATOMIC);
+ if (!new)
+ return NULL; /* try again later */
+
+ sctp_skb_set_owner_r(new, f_frag->sk);
+
+ skb_shinfo(new)->frag_list = pos;
+ } else
+ skb_shinfo(f_frag)->frag_list = pos;
+ }
+
+ /* Remove the first fragment from the reassembly queue. */
+ __skb_unlink(f_frag, queue);
+
+ /* if we did unshare, then free the old skb and re-assign */
+ if (new) {
+ kfree_skb(f_frag);
+ f_frag = new;
+ }
+
+ while (pos) {
+
+ pnext = pos->next;
+
+ /* Update the len and data_len fields of the first fragment. */
+ f_frag->len += pos->len;
+ f_frag->data_len += pos->len;
+
+ /* Remove the fragment from the reassembly queue. */
+ __skb_unlink(pos, queue);
+
+ /* Break if we have reached the last fragment. */
+ if (pos == l_frag)
+ break;
+ pos->next = pnext;
+ pos = pnext;
+ }
+
+ event = sctp_skb2event(f_frag);
+ SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
+
+ return event;
+}
+
+
+/* Helper function to check if an incoming chunk has filled up the last
+ * missing fragment in a SCTP datagram and return the corresponding event.
+ */
+static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
+{
+ struct sk_buff *pos;
+ struct sctp_ulpevent *cevent;
+ struct sk_buff *first_frag = NULL;
+ __u32 ctsn, next_tsn;
+ struct sctp_ulpevent *retval = NULL;
+ struct sk_buff *pd_first = NULL;
+ struct sk_buff *pd_last = NULL;
+ size_t pd_len = 0;
+ struct sctp_association *asoc;
+ u32 pd_point;
+
+ /* Initialized to 0 just to avoid compiler warning message. Will
+ * never be used with this value. It is referenced only after it
+ * is set when we find the first fragment of a message.
+ */
+ next_tsn = 0;
+
+ /* The chunks are held in the reasm queue sorted by TSN.
+ * Walk through the queue sequentially and look for a sequence of
+ * fragmented chunks that complete a datagram.
+ * 'first_frag' and next_tsn are reset when we find a chunk which
+ * is the first fragment of a datagram. Once these 2 fields are set
+ * we expect to find the remaining middle fragments and the last
+ * fragment in order. If not, first_frag is reset to NULL and we
+ * start the next pass when we find another first fragment.
+ *
+ * There is a potential to do partial delivery if user sets
+ * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
+ * to see if can do PD.
+ */
+ skb_queue_walk(&ulpq->reasm, pos) {
+ cevent = sctp_skb2event(pos);
+ ctsn = cevent->tsn;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ /* If this "FIRST_FRAG" is the first
+ * element in the queue, then count it towards
+ * possible PD.
+ */
+ if (skb_queue_is_first(&ulpq->reasm, pos)) {
+ pd_first = pos;
+ pd_last = pos;
+ pd_len = pos->len;
+ } else {
+ pd_first = NULL;
+ pd_last = NULL;
+ pd_len = 0;
+ }
+
+ first_frag = pos;
+ next_tsn = ctsn + 1;
+ break;
+
+ case SCTP_DATA_MIDDLE_FRAG:
+ if ((first_frag) && (ctsn == next_tsn)) {
+ next_tsn++;
+ if (pd_first) {
+ pd_last = pos;
+ pd_len += pos->len;
+ }
+ } else
+ first_frag = NULL;
+ break;
+
+ case SCTP_DATA_LAST_FRAG:
+ if (first_frag && (ctsn == next_tsn))
+ goto found;
+ else
+ first_frag = NULL;
+ break;
+ }
+ }
+
+ asoc = ulpq->asoc;
+ if (pd_first) {
+ /* Make sure we can enter partial deliver.
+ * We can trigger partial delivery only if framgent
+ * interleave is set, or the socket is not already
+ * in partial delivery.
+ */
+ if (!sctp_sk(asoc->base.sk)->frag_interleave &&
+ atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
+ goto done;
+
+ cevent = sctp_skb2event(pd_first);
+ pd_point = sctp_sk(asoc->base.sk)->pd_point;
+ if (pd_point && pd_point <= pd_len) {
+ retval = sctp_make_reassembled_event(asoc->base.net,
+ &ulpq->reasm,
+ pd_first, pd_last);
+ if (retval)
+ sctp_ulpq_set_pd(ulpq);
+ }
+ }
+done:
+ return retval;
+found:
+ retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
+ &ulpq->reasm, first_frag, pos);
+ if (retval)
+ retval->msg_flags |= MSG_EOR;
+ goto done;
+}
+
+/* Retrieve the next set of fragments of a partial message. */
+static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
+{
+ struct sk_buff *pos, *last_frag, *first_frag;
+ struct sctp_ulpevent *cevent;
+ __u32 ctsn, next_tsn;
+ int is_last;
+ struct sctp_ulpevent *retval;
+
+ /* The chunks are held in the reasm queue sorted by TSN.
+ * Walk through the queue sequentially and look for the first
+ * sequence of fragmented chunks.
+ */
+
+ if (skb_queue_empty(&ulpq->reasm))
+ return NULL;
+
+ last_frag = first_frag = NULL;
+ retval = NULL;
+ next_tsn = 0;
+ is_last = 0;
+
+ skb_queue_walk(&ulpq->reasm, pos) {
+ cevent = sctp_skb2event(pos);
+ ctsn = cevent->tsn;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ if (!first_frag)
+ return NULL;
+ goto done;
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (!first_frag) {
+ first_frag = pos;
+ next_tsn = ctsn + 1;
+ last_frag = pos;
+ } else if (next_tsn == ctsn) {
+ next_tsn++;
+ last_frag = pos;
+ } else
+ goto done;
+ break;
+ case SCTP_DATA_LAST_FRAG:
+ if (!first_frag)
+ first_frag = pos;
+ else if (ctsn != next_tsn)
+ goto done;
+ last_frag = pos;
+ is_last = 1;
+ goto done;
+ default:
+ return NULL;
+ }
+ }
+
+ /* We have the reassembled event. There is no need to look
+ * further.
+ */
+done:
+ retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
+ first_frag, last_frag);
+ if (retval && is_last)
+ retval->msg_flags |= MSG_EOR;
+
+ return retval;
+}
+
+
+/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
+ * need reassembling.
+ */
+static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sctp_ulpevent *retval = NULL;
+
+ /* Check if this is part of a fragmented message. */
+ if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
+ event->msg_flags |= MSG_EOR;
+ return event;
+ }
+
+ sctp_ulpq_store_reasm(ulpq, event);
+ if (!ulpq->pd_mode)
+ retval = sctp_ulpq_retrieve_reassembled(ulpq);
+ else {
+ __u32 ctsn, ctsnap;
+
+ /* Do not even bother unless this is the next tsn to
+ * be delivered.
+ */
+ ctsn = event->tsn;
+ ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
+ if (TSN_lte(ctsn, ctsnap))
+ retval = sctp_ulpq_retrieve_partial(ulpq);
+ }
+
+ return retval;
+}
+
+/* Retrieve the first part (sequential fragments) for partial delivery. */
+static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
+{
+ struct sk_buff *pos, *last_frag, *first_frag;
+ struct sctp_ulpevent *cevent;
+ __u32 ctsn, next_tsn;
+ struct sctp_ulpevent *retval;
+
+ /* The chunks are held in the reasm queue sorted by TSN.
+ * Walk through the queue sequentially and look for a sequence of
+ * fragmented chunks that start a datagram.
+ */
+
+ if (skb_queue_empty(&ulpq->reasm))
+ return NULL;
+
+ last_frag = first_frag = NULL;
+ retval = NULL;
+ next_tsn = 0;
+
+ skb_queue_walk(&ulpq->reasm, pos) {
+ cevent = sctp_skb2event(pos);
+ ctsn = cevent->tsn;
+
+ switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+ case SCTP_DATA_FIRST_FRAG:
+ if (!first_frag) {
+ first_frag = pos;
+ next_tsn = ctsn + 1;
+ last_frag = pos;
+ } else
+ goto done;
+ break;
+
+ case SCTP_DATA_MIDDLE_FRAG:
+ if (!first_frag)
+ return NULL;
+ if (ctsn == next_tsn) {
+ next_tsn++;
+ last_frag = pos;
+ } else
+ goto done;
+ break;
+
+ case SCTP_DATA_LAST_FRAG:
+ if (!first_frag)
+ return NULL;
+ else
+ goto done;
+ break;
+
+ default:
+ return NULL;
+ }
+ }
+
+ /* We have the reassembled event. There is no need to look
+ * further.
+ */
+done:
+ retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
+ first_frag, last_frag);
+ return retval;
+}
+
+/*
+ * Flush out stale fragments from the reassembly queue when processing
+ * a Forward TSN.
+ *
+ * RFC 3758, Section 3.6
+ *
+ * After receiving and processing a FORWARD TSN, the data receiver MUST
+ * take cautions in updating its re-assembly queue. The receiver MUST
+ * remove any partially reassembled message, which is still missing one
+ * or more TSNs earlier than or equal to the new cumulative TSN point.
+ * In the event that the receiver has invoked the partial delivery API,
+ * a notification SHOULD also be generated to inform the upper layer API
+ * that the message being partially delivered will NOT be completed.
+ */
+void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
+{
+ struct sk_buff *pos, *tmp;
+ struct sctp_ulpevent *event;
+ __u32 tsn;
+
+ if (skb_queue_empty(&ulpq->reasm))
+ return;
+
+ skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
+ event = sctp_skb2event(pos);
+ tsn = event->tsn;
+
+ /* Since the entire message must be abandoned by the
+ * sender (item A3 in Section 3.5, RFC 3758), we can
+ * free all fragments on the list that are less then
+ * or equal to ctsn_point
+ */
+ if (TSN_lte(tsn, fwd_tsn)) {
+ __skb_unlink(pos, &ulpq->reasm);
+ sctp_ulpevent_free(event);
+ } else
+ break;
+ }
+}
+
+/*
+ * Drain the reassembly queue. If we just cleared parted delivery, it
+ * is possible that the reassembly queue will contain already reassembled
+ * messages. Retrieve any such messages and give them to the user.
+ */
+static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
+{
+ struct sctp_ulpevent *event = NULL;
+
+ if (skb_queue_empty(&ulpq->reasm))
+ return;
+
+ while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
+ struct sk_buff_head temp;
+
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+
+ /* Do ordering if needed. */
+ if (event->msg_flags & MSG_EOR)
+ event = sctp_ulpq_order(ulpq, event);
+
+ /* Send event to the ULP. 'event' is the
+ * sctp_ulpevent for very first SKB on the temp' list.
+ */
+ if (event)
+ sctp_ulpq_tail_event(ulpq, &temp);
+ }
+}
+
+
+/* Helper function to gather skbs that have possibly become
+ * ordered by an incoming chunk.
+ */
+static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sk_buff_head *event_list;
+ struct sk_buff *pos, *tmp;
+ struct sctp_ulpevent *cevent;
+ struct sctp_stream *stream;
+ __u16 sid, csid, cssn;
+
+ sid = event->stream;
+ stream = &ulpq->asoc->stream;
+
+ event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
+
+ /* We are holding the chunks by stream, by SSN. */
+ sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
+ cevent = (struct sctp_ulpevent *) pos->cb;
+ csid = cevent->stream;
+ cssn = cevent->ssn;
+
+ /* Have we gone too far? */
+ if (csid > sid)
+ break;
+
+ /* Have we not gone far enough? */
+ if (csid < sid)
+ continue;
+
+ if (cssn != sctp_ssn_peek(stream, in, sid))
+ break;
+
+ /* Found it, so mark in the stream. */
+ sctp_ssn_next(stream, in, sid);
+
+ __skb_unlink(pos, &ulpq->lobby);
+
+ /* Attach all gathered skbs to the event. */
+ __skb_queue_tail(event_list, pos);
+ }
+}
+
+/* Helper function to store chunks needing ordering. */
+static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ struct sk_buff *pos;
+ struct sctp_ulpevent *cevent;
+ __u16 sid, csid;
+ __u16 ssn, cssn;
+
+ pos = skb_peek_tail(&ulpq->lobby);
+ if (!pos) {
+ __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ return;
+ }
+
+ sid = event->stream;
+ ssn = event->ssn;
+
+ cevent = (struct sctp_ulpevent *) pos->cb;
+ csid = cevent->stream;
+ cssn = cevent->ssn;
+ if (sid > csid) {
+ __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ return;
+ }
+
+ if ((sid == csid) && SSN_lt(cssn, ssn)) {
+ __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ return;
+ }
+
+ /* Find the right place in this list. We store them by
+ * stream ID and then by SSN.
+ */
+ skb_queue_walk(&ulpq->lobby, pos) {
+ cevent = (struct sctp_ulpevent *) pos->cb;
+ csid = cevent->stream;
+ cssn = cevent->ssn;
+
+ if (csid > sid)
+ break;
+ if (csid == sid && SSN_lt(ssn, cssn))
+ break;
+ }
+
+
+ /* Insert before pos. */
+ __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
+}
+
+static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event)
+{
+ __u16 sid, ssn;
+ struct sctp_stream *stream;
+
+ /* Check if this message needs ordering. */
+ if (event->msg_flags & SCTP_DATA_UNORDERED)
+ return event;
+
+ /* Note: The stream ID must be verified before this routine. */
+ sid = event->stream;
+ ssn = event->ssn;
+ stream = &ulpq->asoc->stream;
+
+ /* Is this the expected SSN for this stream ID? */
+ if (ssn != sctp_ssn_peek(stream, in, sid)) {
+ /* We've received something out of order, so find where it
+ * needs to be placed. We order by stream and then by SSN.
+ */
+ sctp_ulpq_store_ordered(ulpq, event);
+ return NULL;
+ }
+
+ /* Mark that the next chunk has been found. */
+ sctp_ssn_next(stream, in, sid);
+
+ /* Go find any other chunks that were waiting for
+ * ordering.
+ */
+ sctp_ulpq_retrieve_ordered(ulpq, event);
+
+ return event;
+}
+
+/* Helper function to gather skbs that have possibly become
+ * ordered by forward tsn skipping their dependencies.
+ */
+static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
+{
+ struct sk_buff *pos, *tmp;
+ struct sctp_ulpevent *cevent;
+ struct sctp_ulpevent *event;
+ struct sctp_stream *stream;
+ struct sk_buff_head temp;
+ struct sk_buff_head *lobby = &ulpq->lobby;
+ __u16 csid, cssn;
+
+ stream = &ulpq->asoc->stream;
+
+ /* We are holding the chunks by stream, by SSN. */
+ skb_queue_head_init(&temp);
+ event = NULL;
+ sctp_skb_for_each(pos, lobby, tmp) {
+ cevent = (struct sctp_ulpevent *) pos->cb;
+ csid = cevent->stream;
+ cssn = cevent->ssn;
+
+ /* Have we gone too far? */
+ if (csid > sid)
+ break;
+
+ /* Have we not gone far enough? */
+ if (csid < sid)
+ continue;
+
+ /* see if this ssn has been marked by skipping */
+ if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
+ break;
+
+ __skb_unlink(pos, lobby);
+ if (!event)
+ /* Create a temporary list to collect chunks on. */
+ event = sctp_skb2event(pos);
+
+ /* Attach all gathered skbs to the event. */
+ __skb_queue_tail(&temp, pos);
+ }
+
+ /* If we didn't reap any data, see if the next expected SSN
+ * is next on the queue and if so, use that.
+ */
+ if (event == NULL && pos != (struct sk_buff *)lobby) {
+ cevent = (struct sctp_ulpevent *) pos->cb;
+ csid = cevent->stream;
+ cssn = cevent->ssn;
+
+ if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
+ sctp_ssn_next(stream, in, csid);
+ __skb_unlink(pos, lobby);
+ __skb_queue_tail(&temp, pos);
+ event = sctp_skb2event(pos);
+ }
+ }
+
+ /* Send event to the ULP. 'event' is the sctp_ulpevent for
+ * very first SKB on the 'temp' list.
+ */
+ if (event) {
+ /* see if we have more ordered that we can deliver */
+ sctp_ulpq_retrieve_ordered(ulpq, event);
+ sctp_ulpq_tail_event(ulpq, &temp);
+ }
+}
+
+/* Skip over an SSN. This is used during the processing of
+ * Forwared TSN chunk to skip over the abandoned ordered data
+ */
+void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
+{
+ struct sctp_stream *stream;
+
+ /* Note: The stream ID must be verified before this routine. */
+ stream = &ulpq->asoc->stream;
+
+ /* Is this an old SSN? If so ignore. */
+ if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
+ return;
+
+ /* Mark that we are no longer expecting this SSN or lower. */
+ sctp_ssn_skip(stream, in, sid, ssn);
+
+ /* Go find any other chunks that were waiting for
+ * ordering and deliver them if needed.
+ */
+ sctp_ulpq_reap_ordered(ulpq, sid);
+}
+
+__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
+ __u16 needed)
+{
+ __u16 freed = 0;
+ __u32 tsn, last_tsn;
+ struct sk_buff *skb, *flist, *last;
+ struct sctp_ulpevent *event;
+ struct sctp_tsnmap *tsnmap;
+
+ tsnmap = &ulpq->asoc->peer.tsn_map;
+
+ while ((skb = skb_peek_tail(list)) != NULL) {
+ event = sctp_skb2event(skb);
+ tsn = event->tsn;
+
+ /* Don't renege below the Cumulative TSN ACK Point. */
+ if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
+ break;
+
+ /* Events in ordering queue may have multiple fragments
+ * corresponding to additional TSNs. Sum the total
+ * freed space; find the last TSN.
+ */
+ freed += skb_headlen(skb);
+ flist = skb_shinfo(skb)->frag_list;
+ for (last = flist; flist; flist = flist->next) {
+ last = flist;
+ freed += skb_headlen(last);
+ }
+ if (last)
+ last_tsn = sctp_skb2event(last)->tsn;
+ else
+ last_tsn = tsn;
+
+ /* Unlink the event, then renege all applicable TSNs. */
+ __skb_unlink(skb, list);
+ sctp_ulpevent_free(event);
+ while (TSN_lte(tsn, last_tsn)) {
+ sctp_tsnmap_renege(tsnmap, tsn);
+ tsn++;
+ }
+ if (freed >= needed)
+ return freed;
+ }
+
+ return freed;
+}
+
+/* Renege 'needed' bytes from the ordering queue. */
+static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
+{
+ return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
+}
+
+/* Renege 'needed' bytes from the reassembly queue. */
+static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
+{
+ return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
+}
+
+/* Partial deliver the first message as there is pressure on rwnd. */
+void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
+ gfp_t gfp)
+{
+ struct sctp_ulpevent *event;
+ struct sctp_association *asoc;
+ struct sctp_sock *sp;
+ __u32 ctsn;
+ struct sk_buff *skb;
+
+ asoc = ulpq->asoc;
+ sp = sctp_sk(asoc->base.sk);
+
+ /* If the association is already in Partial Delivery mode
+ * we have nothing to do.
+ */
+ if (ulpq->pd_mode)
+ return;
+
+ /* Data must be at or below the Cumulative TSN ACK Point to
+ * start partial delivery.
+ */
+ skb = skb_peek(&asoc->ulpq.reasm);
+ if (skb != NULL) {
+ ctsn = sctp_skb2event(skb)->tsn;
+ if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
+ return;
+ }
+
+ /* If the user enabled fragment interleave socket option,
+ * multiple associations can enter partial delivery.
+ * Otherwise, we can only enter partial delivery if the
+ * socket is not in partial deliver mode.
+ */
+ if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
+ /* Is partial delivery possible? */
+ event = sctp_ulpq_retrieve_first(ulpq);
+ /* Send event to the ULP. */
+ if (event) {
+ struct sk_buff_head temp;
+
+ skb_queue_head_init(&temp);
+ __skb_queue_tail(&temp, sctp_event2skb(event));
+ sctp_ulpq_tail_event(ulpq, &temp);
+ sctp_ulpq_set_pd(ulpq);
+ return;
+ }
+ }
+}
+
+/* Renege some packets to make room for an incoming chunk. */
+void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
+ gfp_t gfp)
+{
+ struct sctp_association *asoc = ulpq->asoc;
+ __u32 freed = 0;
+ __u16 needed;
+
+ needed = ntohs(chunk->chunk_hdr->length) -
+ sizeof(struct sctp_data_chunk);
+
+ if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
+ freed = sctp_ulpq_renege_order(ulpq, needed);
+ if (freed < needed)
+ freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
+ }
+ /* If able to free enough room, accept this chunk. */
+ if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
+ freed >= needed) {
+ int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
+ /*
+ * Enter partial delivery if chunk has not been
+ * delivered; otherwise, drain the reassembly queue.
+ */
+ if (retval <= 0)
+ sctp_ulpq_partial_delivery(ulpq, gfp);
+ else if (retval == 1)
+ sctp_ulpq_reasm_drain(ulpq);
+ }
+}
+
+/* Notify the application if an association is aborted and in
+ * partial delivery mode. Send up any pending received messages.
+ */
+void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
+{
+ struct sctp_ulpevent *ev = NULL;
+ struct sctp_sock *sp;
+ struct sock *sk;
+
+ if (!ulpq->pd_mode)
+ return;
+
+ sk = ulpq->asoc->base.sk;
+ sp = sctp_sk(sk);
+ if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
+ SCTP_PARTIAL_DELIVERY_EVENT))
+ ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
+ SCTP_PARTIAL_DELIVERY_ABORTED,
+ 0, 0, 0, gfp);
+ if (ev)
+ __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
+
+ /* If there is data waiting, send it up the socket now. */
+ if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
+ sp->data_ready_signalled = 1;
+ sk->sk_data_ready(sk);
+ }
+}