summaryrefslogtreecommitdiffstats
path: root/include/net/sctp
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--include/net/sctp/auth.h113
-rw-r--r--include/net/sctp/checksum.h66
-rw-r--r--include/net/sctp/command.h236
-rw-r--r--include/net/sctp/constants.h423
-rw-r--r--include/net/sctp/sctp.h625
-rw-r--r--include/net/sctp/sm.h424
-rw-r--r--include/net/sctp/stream_interleave.h46
-rw-r--r--include/net/sctp/stream_sched.h64
-rw-r--r--include/net/sctp/structs.h2177
-rw-r--r--include/net/sctp/tsnmap.h157
-rw-r--r--include/net/sctp/ulpevent.h189
-rw-r--r--include/net/sctp/ulpqueue.h69
12 files changed, 4589 insertions, 0 deletions
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h
new file mode 100644
index 000000000..d4b3b2dcd
--- /dev/null
+++ b/include/net/sctp/auth.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright 2007 Hewlett-Packard Development Company, L.P.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Vlad Yasevich <vladislav.yasevich@hp.com>
+ */
+
+#ifndef __sctp_auth_h__
+#define __sctp_auth_h__
+
+#include <linux/list.h>
+#include <linux/refcount.h>
+
+struct sctp_endpoint;
+struct sctp_association;
+struct sctp_authkey;
+struct sctp_hmacalgo;
+struct crypto_shash;
+
+/*
+ * Define a generic struct that will hold all the info
+ * necessary for an HMAC transform
+ */
+struct sctp_hmac {
+ __u16 hmac_id; /* one of the above ids */
+ char *hmac_name; /* name for loading */
+ __u16 hmac_len; /* length of the signature */
+};
+
+/* This is generic structure that containst authentication bytes used
+ * as keying material. It's a what is referred to as byte-vector all
+ * over SCTP-AUTH
+ */
+struct sctp_auth_bytes {
+ refcount_t refcnt;
+ __u32 len;
+ __u8 data[];
+};
+
+/* Definition for a shared key, weather endpoint or association */
+struct sctp_shared_key {
+ struct list_head key_list;
+ struct sctp_auth_bytes *key;
+ refcount_t refcnt;
+ __u16 key_id;
+ __u8 deactivated;
+};
+
+#define key_for_each(__key, __list_head) \
+ list_for_each_entry(__key, __list_head, key_list)
+
+#define key_for_each_safe(__key, __tmp, __list_head) \
+ list_for_each_entry_safe(__key, __tmp, __list_head, key_list)
+
+static inline void sctp_auth_key_hold(struct sctp_auth_bytes *key)
+{
+ if (!key)
+ return;
+
+ refcount_inc(&key->refcnt);
+}
+
+void sctp_auth_key_put(struct sctp_auth_bytes *key);
+struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp);
+void sctp_auth_destroy_keys(struct list_head *keys);
+int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp);
+struct sctp_shared_key *sctp_auth_get_shkey(
+ const struct sctp_association *asoc,
+ __u16 key_id);
+int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep,
+ struct sctp_association *asoc,
+ gfp_t gfp);
+int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp);
+void sctp_auth_destroy_hmacs(struct crypto_shash *auth_hmacs[]);
+struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id);
+struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc);
+void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc,
+ struct sctp_hmac_algo_param *hmacs);
+int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc,
+ __be16 hmac_id);
+int sctp_auth_send_cid(enum sctp_cid chunk,
+ const struct sctp_association *asoc);
+int sctp_auth_recv_cid(enum sctp_cid chunk,
+ const struct sctp_association *asoc);
+void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
+ struct sk_buff *skb, struct sctp_auth_chunk *auth,
+ struct sctp_shared_key *ep_key, gfp_t gfp);
+void sctp_auth_shkey_release(struct sctp_shared_key *sh_key);
+void sctp_auth_shkey_hold(struct sctp_shared_key *sh_key);
+
+/* API Helpers */
+int sctp_auth_ep_add_chunkid(struct sctp_endpoint *ep, __u8 chunk_id);
+int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
+ struct sctp_hmacalgo *hmacs);
+int sctp_auth_set_key(struct sctp_endpoint *ep, struct sctp_association *asoc,
+ struct sctp_authkey *auth_key);
+int sctp_auth_set_active_key(struct sctp_endpoint *ep,
+ struct sctp_association *asoc, __u16 key_id);
+int sctp_auth_del_key_id(struct sctp_endpoint *ep,
+ struct sctp_association *asoc, __u16 key_id);
+int sctp_auth_deact_key_id(struct sctp_endpoint *ep,
+ struct sctp_association *asoc, __u16 key_id);
+int sctp_auth_init(struct sctp_endpoint *ep, gfp_t gfp);
+void sctp_auth_free(struct sctp_endpoint *ep);
+
+#endif
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
new file mode 100644
index 000000000..5a9bb09f3
--- /dev/null
+++ b/include/net/sctp/checksum.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel reference Implementation
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001-2003 International Business Machines, Corp.
+ *
+ * This file is part of the SCTP kernel reference Implementation
+ *
+ * SCTP Checksum functions
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Dinakaran Joseph
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ *
+ * Rewritten to use libcrc32c by:
+ * Vlad Yasevich <vladislav.yasevich@hp.com>
+ */
+
+#ifndef __sctp_checksum_h__
+#define __sctp_checksum_h__
+
+#include <linux/types.h>
+#include <net/sctp/sctp.h>
+#include <linux/crc32c.h>
+#include <linux/crc32.h>
+
+static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum)
+{
+ /* This uses the crypto implementation of crc32c, which is either
+ * implemented w/ hardware support or resolves to __crc32c_le().
+ */
+ return (__force __wsum)crc32c((__force __u32)sum, buff, len);
+}
+
+static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
+ int offset, int len)
+{
+ return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
+ (__force __u32)csum2, len);
+}
+
+static const struct skb_checksum_ops sctp_csum_ops = {
+ .update = sctp_csum_update,
+ .combine = sctp_csum_combine,
+};
+
+static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
+ unsigned int offset)
+{
+ struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
+ __le32 old = sh->checksum;
+ __wsum new;
+
+ sh->checksum = 0;
+ new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0,
+ &sctp_csum_ops);
+ sh->checksum = old;
+
+ return cpu_to_le32((__force __u32)new);
+}
+
+#endif /* __sctp_checksum_h__ */
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
new file mode 100644
index 000000000..e8df72e16
--- /dev/null
+++ b/include/net/sctp/command.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel Implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (C) 1999-2001 Cisco, Motorola
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These are the definitions needed for the command object.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#ifndef __net_sctp_command_h__
+#define __net_sctp_command_h__
+
+#include <net/sctp/constants.h>
+#include <net/sctp/structs.h>
+
+
+enum sctp_verb {
+ SCTP_CMD_NOP = 0, /* Do nothing. */
+ SCTP_CMD_NEW_ASOC, /* Register a new association. */
+ SCTP_CMD_DELETE_TCB, /* Delete the current association. */
+ SCTP_CMD_NEW_STATE, /* Enter a new state. */
+ SCTP_CMD_REPORT_TSN, /* Record the arrival of a TSN. */
+ SCTP_CMD_GEN_SACK, /* Send a Selective ACK (maybe). */
+ SCTP_CMD_PROCESS_SACK, /* Process an inbound SACK. */
+ SCTP_CMD_GEN_INIT_ACK, /* Generate an INIT ACK chunk. */
+ SCTP_CMD_PEER_INIT, /* Process a INIT from the peer. */
+ SCTP_CMD_GEN_COOKIE_ECHO, /* Generate a COOKIE ECHO chunk. */
+ SCTP_CMD_CHUNK_ULP, /* Send a chunk to the sockets layer. */
+ SCTP_CMD_EVENT_ULP, /* Send a notification to the sockets layer. */
+ SCTP_CMD_REPLY, /* Send a chunk to our peer. */
+ SCTP_CMD_SEND_PKT, /* Send a full packet to our peer. */
+ SCTP_CMD_RETRAN, /* Mark a transport for retransmission. */
+ SCTP_CMD_ECN_CE, /* Do delayed CE processing. */
+ SCTP_CMD_ECN_ECNE, /* Do delayed ECNE processing. */
+ SCTP_CMD_ECN_CWR, /* Do delayed CWR processing. */
+ SCTP_CMD_TIMER_START, /* Start a timer. */
+ SCTP_CMD_TIMER_START_ONCE, /* Start a timer once */
+ SCTP_CMD_TIMER_RESTART, /* Restart a timer. */
+ SCTP_CMD_TIMER_STOP, /* Stop a timer. */
+ SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */
+ SCTP_CMD_INIT_COUNTER_RESET, /* Reset init counter. */
+ SCTP_CMD_INIT_COUNTER_INC, /* Increment init counter. */
+ SCTP_CMD_INIT_RESTART, /* High level, do init timer work. */
+ SCTP_CMD_COOKIEECHO_RESTART, /* High level, do cookie-echo timer work. */
+ SCTP_CMD_INIT_FAILED, /* High level, do init failure work. */
+ SCTP_CMD_REPORT_DUP, /* Report a duplicate TSN. */
+ SCTP_CMD_STRIKE, /* Mark a strike against a transport. */
+ SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */
+ SCTP_CMD_HB_TIMER_UPDATE, /* Update a heartbeat timers. */
+ SCTP_CMD_HB_TIMERS_STOP, /* Stop the heartbeat timers. */
+ SCTP_CMD_TRANSPORT_HB_SENT, /* Reset the status of a transport. */
+ SCTP_CMD_TRANSPORT_IDLE, /* Do manipulations on idle transport */
+ SCTP_CMD_TRANSPORT_ON, /* Mark the transport as active. */
+ SCTP_CMD_REPORT_ERROR, /* Pass this error back out of the sm. */
+ SCTP_CMD_REPORT_BAD_TAG, /* Verification tags didn't match. */
+ SCTP_CMD_PROCESS_CTSN, /* Sideeffect from shutdown. */
+ SCTP_CMD_ASSOC_FAILED, /* Handle association failure. */
+ SCTP_CMD_DISCARD_PACKET, /* Discard the whole packet. */
+ SCTP_CMD_GEN_SHUTDOWN, /* Generate a SHUTDOWN chunk. */
+ SCTP_CMD_UPDATE_ASSOC, /* Update association information. */
+ SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */
+ SCTP_CMD_SETUP_T2, /* Hi-level, setup T2-shutdown parms. */
+ SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */
+ SCTP_CMD_PART_DELIVER, /* Partial data delivery considerations. */
+ SCTP_CMD_RENEGE, /* Renege data on an association. */
+ SCTP_CMD_SETUP_T4, /* ADDIP, setup T4 RTO timer parms. */
+ SCTP_CMD_PROCESS_OPERR, /* Process an ERROR chunk. */
+ SCTP_CMD_REPORT_FWDTSN, /* Report new cumulative TSN Ack. */
+ SCTP_CMD_PROCESS_FWDTSN, /* Skips were reported, so process further. */
+ SCTP_CMD_CLEAR_INIT_TAG, /* Clears association peer's inittag. */
+ SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */
+ SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */
+ SCTP_CMD_FORCE_PRIM_RETRAN, /* Forces retrans. over primary path. */
+ SCTP_CMD_SET_SK_ERR, /* Set sk_err */
+ SCTP_CMD_ASSOC_CHANGE, /* generate and send assoc_change event */
+ SCTP_CMD_ADAPTATION_IND, /* generate and send adaptation event */
+ SCTP_CMD_PEER_NO_AUTH, /* generate and send authentication event */
+ SCTP_CMD_ASSOC_SHKEY, /* generate the association shared keys */
+ SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
+ SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
+ SCTP_CMD_SEND_MSG, /* Send the whole use message */
+ SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
+ SCTP_CMD_SET_ASOC, /* Restore association context */
+ SCTP_CMD_LAST
+};
+
+/* How many commands can you put in an struct sctp_cmd_seq?
+ * This is a rather arbitrary number, ideally derived from a careful
+ * analysis of the state functions, but in reality just taken from
+ * thin air in the hopes othat we don't trigger a kernel panic.
+ */
+#define SCTP_MAX_NUM_COMMANDS 20
+
+union sctp_arg {
+ void *zero_all; /* Set to NULL to clear the entire union */
+ __s32 i32;
+ __u32 u32;
+ __be32 be32;
+ __u16 u16;
+ __u8 u8;
+ int error;
+ __be16 err;
+ enum sctp_state state;
+ enum sctp_event_timeout to;
+ struct sctp_chunk *chunk;
+ struct sctp_association *asoc;
+ struct sctp_transport *transport;
+ struct sctp_bind_addr *bp;
+ struct sctp_init_chunk *init;
+ struct sctp_ulpevent *ulpevent;
+ struct sctp_packet *packet;
+ struct sctp_sackhdr *sackh;
+ struct sctp_datamsg *msg;
+};
+
+/* We are simulating ML type constructors here.
+ *
+ * SCTP_ARG_CONSTRUCTOR(NAME, TYPE, ELT) builds a function called
+ * SCTP_NAME() which takes an argument of type TYPE and returns an
+ * union sctp_arg. It does this by inserting the sole argument into
+ * the ELT union element of a local union sctp_arg.
+ *
+ * E.g., SCTP_ARG_CONSTRUCTOR(I32, __s32, i32) builds SCTP_I32(arg),
+ * which takes an __s32 and returns a union sctp_arg containing the
+ * __s32. So, after foo = SCTP_I32(arg), foo.i32 == arg.
+ */
+
+#define SCTP_ARG_CONSTRUCTOR(name, type, elt) \
+static inline union sctp_arg \
+SCTP_## name (type arg) \
+{ union sctp_arg retval;\
+ retval.zero_all = NULL;\
+ retval.elt = arg;\
+ return retval;\
+}
+
+SCTP_ARG_CONSTRUCTOR(I32, __s32, i32)
+SCTP_ARG_CONSTRUCTOR(U32, __u32, u32)
+SCTP_ARG_CONSTRUCTOR(BE32, __be32, be32)
+SCTP_ARG_CONSTRUCTOR(U16, __u16, u16)
+SCTP_ARG_CONSTRUCTOR(U8, __u8, u8)
+SCTP_ARG_CONSTRUCTOR(ERROR, int, error)
+SCTP_ARG_CONSTRUCTOR(PERR, __be16, err) /* protocol error */
+SCTP_ARG_CONSTRUCTOR(STATE, enum sctp_state, state)
+SCTP_ARG_CONSTRUCTOR(TO, enum sctp_event_timeout, to)
+SCTP_ARG_CONSTRUCTOR(CHUNK, struct sctp_chunk *, chunk)
+SCTP_ARG_CONSTRUCTOR(ASOC, struct sctp_association *, asoc)
+SCTP_ARG_CONSTRUCTOR(TRANSPORT, struct sctp_transport *, transport)
+SCTP_ARG_CONSTRUCTOR(BA, struct sctp_bind_addr *, bp)
+SCTP_ARG_CONSTRUCTOR(PEER_INIT, struct sctp_init_chunk *, init)
+SCTP_ARG_CONSTRUCTOR(ULPEVENT, struct sctp_ulpevent *, ulpevent)
+SCTP_ARG_CONSTRUCTOR(PACKET, struct sctp_packet *, packet)
+SCTP_ARG_CONSTRUCTOR(SACKH, struct sctp_sackhdr *, sackh)
+SCTP_ARG_CONSTRUCTOR(DATAMSG, struct sctp_datamsg *, msg)
+
+static inline union sctp_arg SCTP_FORCE(void)
+{
+ return SCTP_I32(1);
+}
+
+static inline union sctp_arg SCTP_NOFORCE(void)
+{
+ return SCTP_I32(0);
+}
+
+static inline union sctp_arg SCTP_NULL(void)
+{
+ union sctp_arg retval;
+ retval.zero_all = NULL;
+ return retval;
+}
+
+struct sctp_cmd {
+ union sctp_arg obj;
+ enum sctp_verb verb;
+};
+
+struct sctp_cmd_seq {
+ struct sctp_cmd cmds[SCTP_MAX_NUM_COMMANDS];
+ struct sctp_cmd *last_used_slot;
+ struct sctp_cmd *next_cmd;
+};
+
+
+/* Initialize a block of memory as a command sequence.
+ * Return 0 if the initialization fails.
+ */
+static inline int sctp_init_cmd_seq(struct sctp_cmd_seq *seq)
+{
+ /* cmds[] is filled backwards to simplify the overflow BUG() check */
+ seq->last_used_slot = seq->cmds + SCTP_MAX_NUM_COMMANDS;
+ seq->next_cmd = seq->last_used_slot;
+ return 1; /* We always succeed. */
+}
+
+
+/* Add a command to an struct sctp_cmd_seq.
+ *
+ * Use the SCTP_* constructors defined by SCTP_ARG_CONSTRUCTOR() above
+ * to wrap data which goes in the obj argument.
+ */
+static inline void sctp_add_cmd_sf(struct sctp_cmd_seq *seq,
+ enum sctp_verb verb, union sctp_arg obj)
+{
+ struct sctp_cmd *cmd = seq->last_used_slot - 1;
+
+ BUG_ON(cmd < seq->cmds);
+
+ cmd->verb = verb;
+ cmd->obj = obj;
+ seq->last_used_slot = cmd;
+}
+
+/* Return the next command structure in an sctp_cmd_seq.
+ * Return NULL at the end of the sequence.
+ */
+static inline struct sctp_cmd *sctp_next_cmd(struct sctp_cmd_seq *seq)
+{
+ if (seq->next_cmd <= seq->last_used_slot)
+ return NULL;
+
+ return --seq->next_cmd;
+}
+
+#endif /* __net_sctp_command_h__ */
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
new file mode 100644
index 000000000..1ad049ac2
--- /dev/null
+++ b/include/net/sctp/constants.h
@@ -0,0 +1,423 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Randall Stewart <randall@stewart.chicago.il.us>
+ * Ken Morneau <kmorneau@cisco.com>
+ * Qiaobing Xie <qxie1@motorola.com>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Sridhar Samudrala <samudrala@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ */
+
+#ifndef __sctp_constants_h__
+#define __sctp_constants_h__
+
+#include <linux/sctp.h>
+#include <linux/ipv6.h> /* For ipv6hdr. */
+#include <net/tcp_states.h> /* For TCP states used in enum sctp_sock_state */
+
+/* Value used for stream negotiation. */
+enum { SCTP_MAX_STREAM = 0xffff };
+enum { SCTP_DEFAULT_OUTSTREAMS = 10 };
+enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
+
+/* Since CIDs are sparse, we need all four of the following
+ * symbols. CIDs are dense through SCTP_CID_BASE_MAX.
+ */
+#define SCTP_CID_BASE_MAX SCTP_CID_SHUTDOWN_COMPLETE
+
+#define SCTP_NUM_BASE_CHUNK_TYPES (SCTP_CID_BASE_MAX + 1)
+
+#define SCTP_NUM_ADDIP_CHUNK_TYPES 2
+
+#define SCTP_NUM_PRSCTP_CHUNK_TYPES 1
+
+#define SCTP_NUM_RECONF_CHUNK_TYPES 1
+
+#define SCTP_NUM_AUTH_CHUNK_TYPES 1
+
+#define SCTP_NUM_CHUNK_TYPES (SCTP_NUM_BASE_CHUNK_TYPES + \
+ SCTP_NUM_ADDIP_CHUNK_TYPES +\
+ SCTP_NUM_PRSCTP_CHUNK_TYPES +\
+ SCTP_NUM_RECONF_CHUNK_TYPES +\
+ SCTP_NUM_AUTH_CHUNK_TYPES)
+
+/* These are the different flavours of event. */
+enum sctp_event_type {
+ SCTP_EVENT_T_CHUNK = 1,
+ SCTP_EVENT_T_TIMEOUT,
+ SCTP_EVENT_T_OTHER,
+ SCTP_EVENT_T_PRIMITIVE
+};
+
+/* As a convenience for the state machine, we append SCTP_EVENT_* and
+ * SCTP_ULP_* to the list of possible chunks.
+ */
+
+enum sctp_event_timeout {
+ SCTP_EVENT_TIMEOUT_NONE = 0,
+ SCTP_EVENT_TIMEOUT_T1_COOKIE,
+ SCTP_EVENT_TIMEOUT_T1_INIT,
+ SCTP_EVENT_TIMEOUT_T2_SHUTDOWN,
+ SCTP_EVENT_TIMEOUT_T3_RTX,
+ SCTP_EVENT_TIMEOUT_T4_RTO,
+ SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
+ SCTP_EVENT_TIMEOUT_HEARTBEAT,
+ SCTP_EVENT_TIMEOUT_RECONF,
+ SCTP_EVENT_TIMEOUT_SACK,
+ SCTP_EVENT_TIMEOUT_AUTOCLOSE,
+};
+
+#define SCTP_EVENT_TIMEOUT_MAX SCTP_EVENT_TIMEOUT_AUTOCLOSE
+#define SCTP_NUM_TIMEOUT_TYPES (SCTP_EVENT_TIMEOUT_MAX + 1)
+
+enum sctp_event_other {
+ SCTP_EVENT_NO_PENDING_TSN = 0,
+ SCTP_EVENT_ICMP_PROTO_UNREACH,
+};
+
+#define SCTP_EVENT_OTHER_MAX SCTP_EVENT_ICMP_PROTO_UNREACH
+#define SCTP_NUM_OTHER_TYPES (SCTP_EVENT_OTHER_MAX + 1)
+
+/* These are primitive requests from the ULP. */
+enum sctp_event_primitive {
+ SCTP_PRIMITIVE_ASSOCIATE = 0,
+ SCTP_PRIMITIVE_SHUTDOWN,
+ SCTP_PRIMITIVE_ABORT,
+ SCTP_PRIMITIVE_SEND,
+ SCTP_PRIMITIVE_REQUESTHEARTBEAT,
+ SCTP_PRIMITIVE_ASCONF,
+ SCTP_PRIMITIVE_RECONF,
+};
+
+#define SCTP_EVENT_PRIMITIVE_MAX SCTP_PRIMITIVE_RECONF
+#define SCTP_NUM_PRIMITIVE_TYPES (SCTP_EVENT_PRIMITIVE_MAX + 1)
+
+/* We define here a utility type for manipulating subtypes.
+ * The subtype constructors all work like this:
+ *
+ * union sctp_subtype foo = SCTP_ST_CHUNK(SCTP_CID_INIT);
+ */
+
+union sctp_subtype {
+ enum sctp_cid chunk;
+ enum sctp_event_timeout timeout;
+ enum sctp_event_other other;
+ enum sctp_event_primitive primitive;
+};
+
+#define SCTP_SUBTYPE_CONSTRUCTOR(_name, _type, _elt) \
+static inline union sctp_subtype \
+SCTP_ST_## _name (_type _arg) \
+{ union sctp_subtype _retval; _retval._elt = _arg; return _retval; }
+
+SCTP_SUBTYPE_CONSTRUCTOR(CHUNK, enum sctp_cid, chunk)
+SCTP_SUBTYPE_CONSTRUCTOR(TIMEOUT, enum sctp_event_timeout, timeout)
+SCTP_SUBTYPE_CONSTRUCTOR(OTHER, enum sctp_event_other, other)
+SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive)
+
+
+#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA || \
+ a->chunk_hdr->type == SCTP_CID_I_DATA)
+
+/* Internal error codes */
+enum sctp_ierror {
+ SCTP_IERROR_NO_ERROR = 0,
+ SCTP_IERROR_BASE = 1000,
+ SCTP_IERROR_NO_COOKIE,
+ SCTP_IERROR_BAD_SIG,
+ SCTP_IERROR_STALE_COOKIE,
+ SCTP_IERROR_NOMEM,
+ SCTP_IERROR_MALFORMED,
+ SCTP_IERROR_BAD_TAG,
+ SCTP_IERROR_BIG_GAP,
+ SCTP_IERROR_DUP_TSN,
+ SCTP_IERROR_HIGH_TSN,
+ SCTP_IERROR_IGNORE_TSN,
+ SCTP_IERROR_NO_DATA,
+ SCTP_IERROR_BAD_STREAM,
+ SCTP_IERROR_BAD_PORTS,
+ SCTP_IERROR_AUTH_BAD_HMAC,
+ SCTP_IERROR_AUTH_BAD_KEYID,
+ SCTP_IERROR_PROTO_VIOLATION,
+ SCTP_IERROR_ERROR,
+ SCTP_IERROR_ABORT,
+};
+
+
+
+/* SCTP state defines for internal state machine */
+enum sctp_state {
+
+ SCTP_STATE_CLOSED = 0,
+ SCTP_STATE_COOKIE_WAIT = 1,
+ SCTP_STATE_COOKIE_ECHOED = 2,
+ SCTP_STATE_ESTABLISHED = 3,
+ SCTP_STATE_SHUTDOWN_PENDING = 4,
+ SCTP_STATE_SHUTDOWN_SENT = 5,
+ SCTP_STATE_SHUTDOWN_RECEIVED = 6,
+ SCTP_STATE_SHUTDOWN_ACK_SENT = 7,
+
+};
+
+#define SCTP_STATE_MAX SCTP_STATE_SHUTDOWN_ACK_SENT
+#define SCTP_STATE_NUM_STATES (SCTP_STATE_MAX + 1)
+
+/* These are values for sk->state.
+ * For a UDP-style SCTP socket, the states are defined as follows
+ * - A socket in SCTP_SS_CLOSED state indicates that it is not willing to
+ * accept new associations, but it can initiate the creation of new ones.
+ * - A socket in SCTP_SS_LISTENING state indicates that it is willing to
+ * accept new associations and can initiate the creation of new ones.
+ * - A socket in SCTP_SS_ESTABLISHED state indicates that it is a peeled off
+ * socket with one association.
+ * For a TCP-style SCTP socket, the states are defined as follows
+ * - A socket in SCTP_SS_CLOSED state indicates that it is not willing to
+ * accept new associations, but it can initiate the creation of new ones.
+ * - A socket in SCTP_SS_LISTENING state indicates that it is willing to
+ * accept new associations, but cannot initiate the creation of new ones.
+ * - A socket in SCTP_SS_ESTABLISHED state indicates that it has a single
+ * association.
+ */
+enum sctp_sock_state {
+ SCTP_SS_CLOSED = TCP_CLOSE,
+ SCTP_SS_LISTENING = TCP_LISTEN,
+ SCTP_SS_ESTABLISHING = TCP_SYN_SENT,
+ SCTP_SS_ESTABLISHED = TCP_ESTABLISHED,
+ SCTP_SS_CLOSING = TCP_CLOSE_WAIT,
+};
+
+/* These functions map various type to printable names. */
+const char *sctp_cname(const union sctp_subtype id); /* chunk types */
+const char *sctp_oname(const union sctp_subtype id); /* other events */
+const char *sctp_tname(const union sctp_subtype id); /* timeouts */
+const char *sctp_pname(const union sctp_subtype id); /* primitives */
+
+/* This is a table of printable names of sctp_state_t's. */
+extern const char *const sctp_state_tbl[];
+extern const char *const sctp_evttype_tbl[];
+extern const char *const sctp_status_tbl[];
+
+/* Maximum chunk length considering padding requirements. */
+enum { SCTP_MAX_CHUNK_LEN = ((1<<16) - sizeof(__u32)) };
+
+/* Encourage Cookie-Echo bundling by pre-fragmenting chunks a little
+ * harder (until reaching ESTABLISHED state).
+ */
+enum { SCTP_ARBITRARY_COOKIE_ECHO_LEN = 200 };
+
+/* Guess at how big to make the TSN mapping array.
+ * We guarantee that we can handle at least this big a gap between the
+ * cumulative ACK and the highest TSN. In practice, we can often
+ * handle up to twice this value.
+ *
+ * NEVER make this more than 32767 (2^15-1). The Gap Ack Blocks in a
+ * SACK (see section 3.3.4) are only 16 bits, so 2*SCTP_TSN_MAP_SIZE
+ * must be less than 65535 (2^16 - 1), or we will have overflow
+ * problems creating SACK's.
+ */
+#define SCTP_TSN_MAP_INITIAL BITS_PER_LONG
+#define SCTP_TSN_MAP_INCREMENT SCTP_TSN_MAP_INITIAL
+#define SCTP_TSN_MAP_SIZE 4096
+
+/* We will not record more than this many duplicate TSNs between two
+ * SACKs. The minimum PMTU is 512. Remove all the headers and there
+ * is enough room for 117 duplicate reports. Round down to the
+ * nearest power of 2.
+ */
+enum { SCTP_MAX_DUP_TSNS = 16 };
+enum { SCTP_MAX_GABS = 16 };
+
+/* Heartbeat interval - 30 secs */
+#define SCTP_DEFAULT_TIMEOUT_HEARTBEAT (30*1000)
+
+/* Delayed sack timer - 200ms */
+#define SCTP_DEFAULT_TIMEOUT_SACK (200)
+
+/* RTO.Initial - 3 seconds
+ * RTO.Min - 1 second
+ * RTO.Max - 60 seconds
+ * RTO.Alpha - 1/8
+ * RTO.Beta - 1/4
+ */
+#define SCTP_RTO_INITIAL (3 * 1000)
+#define SCTP_RTO_MIN (1 * 1000)
+#define SCTP_RTO_MAX (60 * 1000)
+
+#define SCTP_RTO_ALPHA 3 /* 1/8 when converted to right shifts. */
+#define SCTP_RTO_BETA 2 /* 1/4 when converted to right shifts. */
+
+/* Maximum number of new data packets that can be sent in a burst. */
+#define SCTP_DEFAULT_MAX_BURST 4
+
+#define SCTP_CLOCK_GRANULARITY 1 /* 1 jiffy */
+
+#define SCTP_DEFAULT_COOKIE_LIFE (60 * 1000) /* 60 seconds */
+
+#define SCTP_DEFAULT_MINWINDOW 1500 /* default minimum rwnd size */
+#define SCTP_DEFAULT_MAXWINDOW 65535 /* default rwnd size */
+#define SCTP_DEFAULT_RWND_SHIFT 4 /* by default, update on 1/16 of
+ * rcvbuf, which is 1/8 of initial
+ * window
+ */
+#define SCTP_DEFAULT_MAXSEGMENT 1500 /* MTU size, this is the limit
+ * to which we will raise the P-MTU.
+ */
+#define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */
+
+#define SCTP_SECRET_SIZE 32 /* Number of octets in a 256 bits. */
+
+#define SCTP_SIGNATURE_SIZE 20 /* size of a SLA-1 signature */
+
+#define SCTP_COOKIE_MULTIPLE 32 /* Pad out our cookie to make our hash
+ * functions simpler to write.
+ */
+
+/* These are the values for pf exposure, UNUSED is to keep compatible with old
+ * applications by default.
+ */
+enum {
+ SCTP_PF_EXPOSE_UNSET,
+ SCTP_PF_EXPOSE_DISABLE,
+ SCTP_PF_EXPOSE_ENABLE,
+};
+#define SCTP_PF_EXPOSE_MAX SCTP_PF_EXPOSE_ENABLE
+
+#define SCTP_PS_RETRANS_MAX 0xffff
+
+/* These return values describe the success or failure of a number of
+ * routines which form the lower interface to SCTP_outqueue.
+ */
+enum sctp_xmit {
+ SCTP_XMIT_OK,
+ SCTP_XMIT_PMTU_FULL,
+ SCTP_XMIT_RWND_FULL,
+ SCTP_XMIT_DELAY,
+};
+
+/* These are the commands for manipulating transports. */
+enum sctp_transport_cmd {
+ SCTP_TRANSPORT_UP,
+ SCTP_TRANSPORT_DOWN,
+ SCTP_TRANSPORT_PF,
+};
+
+/* These are the address scopes defined mainly for IPv4 addresses
+ * based on draft of SCTP IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>.
+ * These scopes are hopefully generic enough to be used on scoping both
+ * IPv4 and IPv6 addresses in SCTP.
+ * At this point, the IPv6 scopes will be mapped to these internal scopes
+ * as much as possible.
+ */
+enum sctp_scope {
+ SCTP_SCOPE_GLOBAL, /* IPv4 global addresses */
+ SCTP_SCOPE_PRIVATE, /* IPv4 private addresses */
+ SCTP_SCOPE_LINK, /* IPv4 link local address */
+ SCTP_SCOPE_LOOPBACK, /* IPv4 loopback address */
+ SCTP_SCOPE_UNUSABLE, /* IPv4 unusable addresses */
+};
+
+enum {
+ SCTP_SCOPE_POLICY_DISABLE, /* Disable IPv4 address scoping */
+ SCTP_SCOPE_POLICY_ENABLE, /* Enable IPv4 address scoping */
+ SCTP_SCOPE_POLICY_PRIVATE, /* Follow draft but allow IPv4 private addresses */
+ SCTP_SCOPE_POLICY_LINK, /* Follow draft but allow IPv4 link local addresses */
+};
+
+#define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
+
+/* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
+ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
+ * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
+ * addresses.
+ */
+#define IS_IPV4_UNUSABLE_ADDRESS(a) \
+ ((htonl(INADDR_BROADCAST) == a) || \
+ ipv4_is_multicast(a) || \
+ ipv4_is_zeronet(a) || \
+ ipv4_is_anycast_6to4(a))
+
+/* Flags used for the bind address copy functions. */
+#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by
+ local sock family */
+#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by
+ local sock family */
+#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by
+ peer */
+#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by
+ peer */
+
+/* Reasons to retransmit. */
+enum sctp_retransmit_reason {
+ SCTP_RTXR_T3_RTX,
+ SCTP_RTXR_FAST_RTX,
+ SCTP_RTXR_PMTUD,
+ SCTP_RTXR_T1_RTX,
+};
+
+/* Reasons to lower cwnd. */
+enum sctp_lower_cwnd {
+ SCTP_LOWER_CWND_T3_RTX,
+ SCTP_LOWER_CWND_FAST_RTX,
+ SCTP_LOWER_CWND_ECNE,
+ SCTP_LOWER_CWND_INACTIVE,
+};
+
+
+/* SCTP-AUTH Necessary constants */
+
+/* SCTP-AUTH, Section 3.3
+ *
+ * The following Table 2 shows the currently defined values for HMAC
+ * identifiers.
+ *
+ * +-----------------+--------------------------+
+ * | HMAC Identifier | Message Digest Algorithm |
+ * +-----------------+--------------------------+
+ * | 0 | Reserved |
+ * | 1 | SHA-1 defined in [8] |
+ * | 2 | Reserved |
+ * | 3 | SHA-256 defined in [8] |
+ * +-----------------+--------------------------+
+ */
+enum {
+ SCTP_AUTH_HMAC_ID_RESERVED_0,
+ SCTP_AUTH_HMAC_ID_SHA1,
+ SCTP_AUTH_HMAC_ID_RESERVED_2,
+#if defined (CONFIG_CRYPTO_SHA256) || defined (CONFIG_CRYPTO_SHA256_MODULE)
+ SCTP_AUTH_HMAC_ID_SHA256,
+#endif
+ __SCTP_AUTH_HMAC_MAX
+};
+
+#define SCTP_AUTH_HMAC_ID_MAX __SCTP_AUTH_HMAC_MAX - 1
+#define SCTP_AUTH_NUM_HMACS __SCTP_AUTH_HMAC_MAX
+#define SCTP_SHA1_SIG_SIZE 20
+#define SCTP_SHA256_SIG_SIZE 32
+
+/* SCTP-AUTH, Section 3.2
+ * The chunk types for INIT, INIT-ACK, SHUTDOWN-COMPLETE and AUTH chunks
+ * MUST NOT be listed in the CHUNKS parameter
+ */
+#define SCTP_NUM_NOAUTH_CHUNKS 4
+#define SCTP_AUTH_MAX_CHUNKS (SCTP_NUM_CHUNK_TYPES - SCTP_NUM_NOAUTH_CHUNKS)
+
+/* SCTP-AUTH Section 6.1
+ * The RANDOM parameter MUST contain a 32 byte random number.
+ */
+#define SCTP_AUTH_RANDOM_LENGTH 32
+
+#endif /* __sctp_constants_h__ */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
new file mode 100644
index 000000000..33475d061
--- /dev/null
+++ b/include/net/sctp/sctp.h
@@ -0,0 +1,625 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001-2003 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * The base lksctp header.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Ryan Layer <rmlayer@us.ibm.com>
+ * Kevin Gao <kevin.gao@intel.com>
+ */
+
+#ifndef __net_sctp_h__
+#define __net_sctp_h__
+
+/* Header Strategy.
+ * Start getting some control over the header file depencies:
+ * includes
+ * constants
+ * structs
+ * prototypes
+ * macros, externs, and inlines
+ *
+ * Move test_frame specific items out of the kernel headers
+ * and into the test frame headers. This is not perfect in any sense
+ * and will continue to evolve.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <linux/tty.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/idr.h>
+
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#endif
+
+#include <linux/uaccess.h>
+#include <asm/page.h>
+#include <net/sock.h>
+#include <net/snmp.h>
+#include <net/sctp/structs.h>
+#include <net/sctp/constants.h>
+
+#ifdef CONFIG_IP_SCTP_MODULE
+#define SCTP_PROTOSW_FLAG 0
+#else /* static! */
+#define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT
+#endif
+
+/* Round an int up to the next multiple of 4. */
+#define SCTP_PAD4(s) (((s)+3)&~3)
+/* Truncate to the previous multiple of 4. */
+#define SCTP_TRUNC4(s) ((s)&~3)
+
+/*
+ * Function declarations.
+ */
+
+/*
+ * sctp/protocol.c
+ */
+int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *addr,
+ enum sctp_scope, gfp_t gfp, int flags);
+struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
+int sctp_register_pf(struct sctp_pf *, sa_family_t);
+void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
+
+/*
+ * sctp/socket.c
+ */
+int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
+int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+int sctp_inet_listen(struct socket *sock, int backlog);
+void sctp_write_space(struct sock *sk);
+void sctp_data_ready(struct sock *sk);
+__poll_t sctp_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
+void sctp_sock_rfree(struct sk_buff *skb);
+void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+ struct sctp_association *asoc);
+extern struct percpu_counter sctp_sockets_allocated;
+int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
+struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
+
+typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
+void sctp_transport_walk_start(struct rhashtable_iter *iter);
+void sctp_transport_walk_stop(struct rhashtable_iter *iter);
+struct sctp_transport *sctp_transport_get_next(struct net *net,
+ struct rhashtable_iter *iter);
+struct sctp_transport *sctp_transport_get_idx(struct net *net,
+ struct rhashtable_iter *iter, int pos);
+int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
+ struct net *net,
+ const union sctp_addr *laddr,
+ const union sctp_addr *paddr, void *p);
+int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
+ struct net *net, int *pos, void *p);
+int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
+int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
+ struct sctp_info *info);
+
+/*
+ * sctp/primitive.c
+ */
+int sctp_primitive_ASSOCIATE(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SHUTDOWN(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_RECONF(struct net *net, struct sctp_association *asoc,
+ void *arg);
+
+/*
+ * sctp/input.c
+ */
+int sctp_rcv(struct sk_buff *skb);
+int sctp_v4_err(struct sk_buff *skb, u32 info);
+int sctp_hash_endpoint(struct sctp_endpoint *ep);
+void sctp_unhash_endpoint(struct sctp_endpoint *);
+struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
+ struct sctphdr *, struct sctp_association **,
+ struct sctp_transport **);
+void sctp_err_finish(struct sock *, struct sctp_transport *);
+void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
+ struct sctp_transport *t, __u32 pmtu);
+void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
+ struct sk_buff *);
+void sctp_icmp_proto_unreachable(struct sock *sk,
+ struct sctp_association *asoc,
+ struct sctp_transport *t);
+void sctp_backlog_migrate(struct sctp_association *assoc,
+ struct sock *oldsk, struct sock *newsk);
+int sctp_transport_hashtable_init(void);
+void sctp_transport_hashtable_destroy(void);
+int sctp_hash_transport(struct sctp_transport *t);
+void sctp_unhash_transport(struct sctp_transport *t);
+struct sctp_transport *sctp_addrs_lookup_transport(
+ struct net *net,
+ const union sctp_addr *laddr,
+ const union sctp_addr *paddr);
+struct sctp_transport *sctp_epaddr_lookup_transport(
+ const struct sctp_endpoint *ep,
+ const union sctp_addr *paddr);
+
+/*
+ * sctp/proc.c
+ */
+int __net_init sctp_proc_init(struct net *net);
+
+/*
+ * sctp/offload.c
+ */
+int sctp_offload_init(void);
+
+/*
+ * sctp/stream_sched.c
+ */
+void sctp_sched_ops_init(void);
+
+/*
+ * sctp/stream.c
+ */
+int sctp_send_reset_streams(struct sctp_association *asoc,
+ struct sctp_reset_streams *params);
+int sctp_send_reset_assoc(struct sctp_association *asoc);
+int sctp_send_add_streams(struct sctp_association *asoc,
+ struct sctp_add_streams *params);
+
+/*
+ * Module global variables
+ */
+
+ /*
+ * sctp/protocol.c
+ */
+extern struct kmem_cache *sctp_chunk_cachep __read_mostly;
+extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
+extern long sysctl_sctp_mem[3];
+extern int sysctl_sctp_rmem[3];
+extern int sysctl_sctp_wmem[3];
+
+/*
+ * Section: Macros, externs, and inlines
+ */
+
+/* SCTP SNMP MIB stats handlers */
+#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define __SCTP_INC_STATS(net, field) __SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
+
+/* sctp mib definitions */
+enum {
+ SCTP_MIB_NUM = 0,
+ SCTP_MIB_CURRESTAB, /* CurrEstab */
+ SCTP_MIB_ACTIVEESTABS, /* ActiveEstabs */
+ SCTP_MIB_PASSIVEESTABS, /* PassiveEstabs */
+ SCTP_MIB_ABORTEDS, /* Aborteds */
+ SCTP_MIB_SHUTDOWNS, /* Shutdowns */
+ SCTP_MIB_OUTOFBLUES, /* OutOfBlues */
+ SCTP_MIB_CHECKSUMERRORS, /* ChecksumErrors */
+ SCTP_MIB_OUTCTRLCHUNKS, /* OutCtrlChunks */
+ SCTP_MIB_OUTORDERCHUNKS, /* OutOrderChunks */
+ SCTP_MIB_OUTUNORDERCHUNKS, /* OutUnorderChunks */
+ SCTP_MIB_INCTRLCHUNKS, /* InCtrlChunks */
+ SCTP_MIB_INORDERCHUNKS, /* InOrderChunks */
+ SCTP_MIB_INUNORDERCHUNKS, /* InUnorderChunks */
+ SCTP_MIB_FRAGUSRMSGS, /* FragUsrMsgs */
+ SCTP_MIB_REASMUSRMSGS, /* ReasmUsrMsgs */
+ SCTP_MIB_OUTSCTPPACKS, /* OutSCTPPacks */
+ SCTP_MIB_INSCTPPACKS, /* InSCTPPacks */
+ SCTP_MIB_T1_INIT_EXPIREDS,
+ SCTP_MIB_T1_COOKIE_EXPIREDS,
+ SCTP_MIB_T2_SHUTDOWN_EXPIREDS,
+ SCTP_MIB_T3_RTX_EXPIREDS,
+ SCTP_MIB_T4_RTO_EXPIREDS,
+ SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS,
+ SCTP_MIB_DELAY_SACK_EXPIREDS,
+ SCTP_MIB_AUTOCLOSE_EXPIREDS,
+ SCTP_MIB_T1_RETRANSMITS,
+ SCTP_MIB_T3_RETRANSMITS,
+ SCTP_MIB_PMTUD_RETRANSMITS,
+ SCTP_MIB_FAST_RETRANSMITS,
+ SCTP_MIB_IN_PKT_SOFTIRQ,
+ SCTP_MIB_IN_PKT_BACKLOG,
+ SCTP_MIB_IN_PKT_DISCARDS,
+ SCTP_MIB_IN_DATA_CHUNK_DISCARDS,
+ __SCTP_MIB_MAX
+};
+
+#define SCTP_MIB_MAX __SCTP_MIB_MAX
+struct sctp_mib {
+ unsigned long mibs[SCTP_MIB_MAX];
+};
+
+/* helper function to track stats about max rto and related transport */
+static inline void sctp_max_rto(struct sctp_association *asoc,
+ struct sctp_transport *trans)
+{
+ if (asoc->stats.max_obs_rto < (__u64)trans->rto) {
+ asoc->stats.max_obs_rto = trans->rto;
+ memset(&asoc->stats.obs_rto_ipaddr, 0,
+ sizeof(struct sockaddr_storage));
+ memcpy(&asoc->stats.obs_rto_ipaddr, &trans->ipaddr,
+ trans->af_specific->sockaddr_len);
+ }
+}
+
+/*
+ * Macros for keeping a global reference of object allocations.
+ */
+#ifdef CONFIG_SCTP_DBG_OBJCNT
+
+extern atomic_t sctp_dbg_objcnt_sock;
+extern atomic_t sctp_dbg_objcnt_ep;
+extern atomic_t sctp_dbg_objcnt_assoc;
+extern atomic_t sctp_dbg_objcnt_transport;
+extern atomic_t sctp_dbg_objcnt_chunk;
+extern atomic_t sctp_dbg_objcnt_bind_addr;
+extern atomic_t sctp_dbg_objcnt_bind_bucket;
+extern atomic_t sctp_dbg_objcnt_addr;
+extern atomic_t sctp_dbg_objcnt_datamsg;
+extern atomic_t sctp_dbg_objcnt_keys;
+
+/* Macros to atomically increment/decrement objcnt counters. */
+#define SCTP_DBG_OBJCNT_INC(name) \
+atomic_inc(&sctp_dbg_objcnt_## name)
+#define SCTP_DBG_OBJCNT_DEC(name) \
+atomic_dec(&sctp_dbg_objcnt_## name)
+#define SCTP_DBG_OBJCNT(name) \
+atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
+
+/* Macro to help create new entries in the global array of
+ * objcnt counters.
+ */
+#define SCTP_DBG_OBJCNT_ENTRY(name) \
+{.label= #name, .counter= &sctp_dbg_objcnt_## name}
+
+void sctp_dbg_objcnt_init(struct net *);
+
+#else
+
+#define SCTP_DBG_OBJCNT_INC(name)
+#define SCTP_DBG_OBJCNT_DEC(name)
+
+static inline void sctp_dbg_objcnt_init(struct net *net) { return; }
+
+#endif /* CONFIG_SCTP_DBG_OBJCOUNT */
+
+#if defined CONFIG_SYSCTL
+void sctp_sysctl_register(void);
+void sctp_sysctl_unregister(void);
+int sctp_sysctl_net_register(struct net *net);
+void sctp_sysctl_net_unregister(struct net *net);
+#else
+static inline void sctp_sysctl_register(void) { return; }
+static inline void sctp_sysctl_unregister(void) { return; }
+static inline int sctp_sysctl_net_register(struct net *net) { return 0; }
+static inline void sctp_sysctl_net_unregister(struct net *net) { return; }
+#endif
+
+/* Size of Supported Address Parameter for 'x' address types. */
+#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+void sctp_v6_pf_init(void);
+void sctp_v6_pf_exit(void);
+int sctp_v6_protosw_init(void);
+void sctp_v6_protosw_exit(void);
+int sctp_v6_add_protocol(void);
+void sctp_v6_del_protocol(void);
+
+#else /* #ifdef defined(CONFIG_IPV6) */
+
+static inline void sctp_v6_pf_init(void) { return; }
+static inline void sctp_v6_pf_exit(void) { return; }
+static inline int sctp_v6_protosw_init(void) { return 0; }
+static inline void sctp_v6_protosw_exit(void) { return; }
+static inline int sctp_v6_add_protocol(void) { return 0; }
+static inline void sctp_v6_del_protocol(void) { return; }
+
+#endif /* #if defined(CONFIG_IPV6) */
+
+
+/* Map an association to an assoc_id. */
+static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
+{
+ return asoc ? asoc->assoc_id : 0;
+}
+
+static inline enum sctp_sstat_state
+sctp_assoc_to_state(const struct sctp_association *asoc)
+{
+ /* SCTP's uapi always had SCTP_EMPTY(=0) as a dummy state, but we
+ * got rid of it in kernel space. Therefore SCTP_CLOSED et al
+ * start at =1 in user space, but actually as =0 in kernel space.
+ * Now that we can not break user space and SCTP_EMPTY is exposed
+ * there, we need to fix it up with an ugly offset not to break
+ * applications. :(
+ */
+ return asoc->state + 1;
+}
+
+/* Look up the association by its id. */
+struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
+
+int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp);
+
+/* A macro to walk a list of skbs. */
+#define sctp_skb_for_each(pos, head, tmp) \
+ skb_queue_walk_safe(head, pos, tmp)
+
+/**
+ * sctp_list_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. The head item is
+ * returned or %NULL if the list is empty.
+ */
+
+static inline struct list_head *sctp_list_dequeue(struct list_head *list)
+{
+ struct list_head *result = NULL;
+
+ if (!list_empty(list)) {
+ result = list->next;
+ list_del_init(result);
+ }
+ return result;
+}
+
+/* SCTP version of skb_set_owner_r. We need this one because
+ * of the way we have to do receive buffer accounting on bundled
+ * chunks.
+ */
+static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+{
+ struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sctp_sock_rfree;
+ atomic_add(event->rmem_len, &sk->sk_rmem_alloc);
+ /*
+ * This mimics the behavior of skb_set_owner_r
+ */
+ sk_mem_charge(sk, event->rmem_len);
+}
+
+/* Tests if the list has one and only one entry. */
+static inline int sctp_list_single_entry(struct list_head *head)
+{
+ return list_is_singular(head);
+}
+
+static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
+{
+ return !list_empty(&chunk->list);
+}
+
+/* Walk through a list of TLV parameters. Don't trust the
+ * individual parameter lengths and instead depend on
+ * the chunk length to indicate when to stop. Make sure
+ * there is room for a param header too.
+ */
+#define sctp_walk_params(pos, chunk, member)\
+_sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
+
+#define _sctp_walk_params(pos, chunk, end, member)\
+for (pos.v = chunk->member;\
+ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
+ (void *)chunk + end) &&\
+ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
+ ntohs(pos.p->length) >= sizeof(struct sctp_paramhdr);\
+ pos.v += SCTP_PAD4(ntohs(pos.p->length)))
+
+#define sctp_walk_errors(err, chunk_hdr)\
+_sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
+
+#define _sctp_walk_errors(err, chunk_hdr, end)\
+for (err = (struct sctp_errhdr *)((void *)chunk_hdr + \
+ sizeof(struct sctp_chunkhdr));\
+ ((void *)err + offsetof(struct sctp_errhdr, length) + sizeof(err->length) <=\
+ (void *)chunk_hdr + end) &&\
+ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
+ ntohs(err->length) >= sizeof(struct sctp_errhdr); \
+ err = (struct sctp_errhdr *)((void *)err + SCTP_PAD4(ntohs(err->length))))
+
+#define sctp_walk_fwdtsn(pos, chunk)\
+_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
+
+#define _sctp_walk_fwdtsn(pos, chunk, end)\
+for (pos = chunk->subh.fwdtsn_hdr->skip;\
+ (void *)pos <= (void *)chunk->subh.fwdtsn_hdr->skip + end - sizeof(struct sctp_fwdtsn_skip);\
+ pos++)
+
+/* External references. */
+
+extern struct proto sctp_prot;
+extern struct proto sctpv6_prot;
+void sctp_put_port(struct sock *sk);
+
+extern struct idr sctp_assocs_id;
+extern spinlock_t sctp_assocs_id_lock;
+
+/* Static inline functions. */
+
+/* Convert from an IP version number to an Address Family symbol. */
+static inline int ipver2af(__u8 ipver)
+{
+ switch (ipver) {
+ case 4:
+ return AF_INET;
+ case 6:
+ return AF_INET6;
+ default:
+ return 0;
+ }
+}
+
+/* Convert from an address parameter type to an address family. */
+static inline int param_type2af(__be16 type)
+{
+ switch (type) {
+ case SCTP_PARAM_IPV4_ADDRESS:
+ return AF_INET;
+ case SCTP_PARAM_IPV6_ADDRESS:
+ return AF_INET6;
+ default:
+ return 0;
+ }
+}
+
+/* Warning: The following hash functions assume a power of two 'size'. */
+/* This is the hash function for the SCTP port hash table. */
+static inline int sctp_phashfn(struct net *net, __u16 lport)
+{
+ return (net_hash_mix(net) + lport) & (sctp_port_hashsize - 1);
+}
+
+/* This is the hash function for the endpoint hash table. */
+static inline int sctp_ep_hashfn(struct net *net, __u16 lport)
+{
+ return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1);
+}
+
+#define sctp_for_each_hentry(epb, head) \
+ hlist_for_each_entry(epb, head, node)
+
+/* Is a socket of this style? */
+#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
+static inline int __sctp_style(const struct sock *sk,
+ enum sctp_socket_type style)
+{
+ return sctp_sk(sk)->type == style;
+}
+
+/* Is the association in this state? */
+#define sctp_state(asoc, state) __sctp_state((asoc), (SCTP_STATE_##state))
+static inline int __sctp_state(const struct sctp_association *asoc,
+ enum sctp_state state)
+{
+ return asoc->state == state;
+}
+
+/* Is the socket in this state? */
+#define sctp_sstate(sk, state) __sctp_sstate((sk), (SCTP_SS_##state))
+static inline int __sctp_sstate(const struct sock *sk,
+ enum sctp_sock_state state)
+{
+ return sk->sk_state == state;
+}
+
+/* Map v4-mapped v6 address back to v4 address */
+static inline void sctp_v6_map_v4(union sctp_addr *addr)
+{
+ addr->v4.sin_family = AF_INET;
+ addr->v4.sin_port = addr->v6.sin6_port;
+ addr->v4.sin_addr.s_addr = addr->v6.sin6_addr.s6_addr32[3];
+}
+
+/* Map v4 address to v4-mapped v6 address */
+static inline void sctp_v4_map_v6(union sctp_addr *addr)
+{
+ __be16 port;
+
+ port = addr->v4.sin_port;
+ addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+ addr->v6.sin6_port = port;
+ addr->v6.sin6_family = AF_INET6;
+ addr->v6.sin6_flowinfo = 0;
+ addr->v6.sin6_scope_id = 0;
+ addr->v6.sin6_addr.s6_addr32[0] = 0;
+ addr->v6.sin6_addr.s6_addr32[1] = 0;
+ addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
+}
+
+/* The cookie is always 0 since this is how it's used in the
+ * pmtu code.
+ */
+static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
+{
+ if (t->dst && !dst_check(t->dst, t->dst_cookie))
+ sctp_transport_dst_release(t);
+
+ return t->dst;
+}
+
+/* Calculate max payload size given a MTU, or the total overhead if
+ * given MTU is zero
+ */
+static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
+ __u32 mtu, __u32 extra)
+{
+ __u32 overhead = sizeof(struct sctphdr) + extra;
+
+ if (sp)
+ overhead += sp->pf->af->net_header_len;
+ else
+ overhead += sizeof(struct ipv6hdr);
+
+ if (WARN_ON_ONCE(mtu && mtu <= overhead))
+ mtu = overhead;
+
+ return mtu ? mtu - overhead : overhead;
+}
+
+static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
+{
+ return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst),
+ SCTP_DEFAULT_MINSEGMENT));
+}
+
+static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+{
+ __u32 pmtu = sctp_dst_mtu(t->dst);
+
+ if (t->pathmtu == pmtu)
+ return true;
+
+ t->pathmtu = pmtu;
+
+ return false;
+}
+
+static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
+{
+ return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
+}
+
+static inline bool sctp_newsk_ready(const struct sock *sk)
+{
+ return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
+}
+
+static inline void sctp_sock_set_nodelay(struct sock *sk)
+{
+ lock_sock(sk);
+ sctp_sk(sk)->nodelay = true;
+ release_sock(sk);
+}
+
+#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
new file mode 100644
index 000000000..5c491a3bc
--- /dev/null
+++ b/include/net/sctp/sm.h
@@ -0,0 +1,424 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These are definitions needed by the state machine.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Dajiang Zhang <dajiang.zhang@nokia.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Kevin Gao <kevin.gao@intel.com>
+ */
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/in.h>
+#include <net/sctp/command.h>
+#include <net/sctp/sctp.h>
+
+#ifndef __sctp_sm_h__
+#define __sctp_sm_h__
+
+/*
+ * Possible values for the disposition are:
+ */
+enum sctp_disposition {
+ SCTP_DISPOSITION_DISCARD, /* No further processing. */
+ SCTP_DISPOSITION_CONSUME, /* Process return values normally. */
+ SCTP_DISPOSITION_NOMEM, /* We ran out of memory--recover. */
+ SCTP_DISPOSITION_DELETE_TCB, /* Close the association. */
+ SCTP_DISPOSITION_ABORT, /* Close the association NOW. */
+ SCTP_DISPOSITION_VIOLATION, /* The peer is misbehaving. */
+ SCTP_DISPOSITION_NOT_IMPL, /* This entry is not implemented. */
+ SCTP_DISPOSITION_ERROR, /* This is plain old user error. */
+ SCTP_DISPOSITION_BUG, /* This is a bug. */
+};
+
+typedef enum sctp_disposition (sctp_state_fn_t) (
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const union sctp_subtype type,
+ void *arg,
+ struct sctp_cmd_seq *commands);
+typedef void (sctp_timer_event_t) (struct timer_list *);
+struct sctp_sm_table_entry {
+ sctp_state_fn_t *fn;
+ const char *name;
+};
+
+/* A naming convention of "sctp_sf_xxx" applies to all the state functions
+ * currently in use.
+ */
+
+/* Prototypes for generic state functions. */
+sctp_state_fn_t sctp_sf_not_impl;
+sctp_state_fn_t sctp_sf_bug;
+
+/* Prototypes for gener timer state functions. */
+sctp_state_fn_t sctp_sf_timer_ignore;
+
+/* Prototypes for chunk state functions. */
+sctp_state_fn_t sctp_sf_do_9_1_abort;
+sctp_state_fn_t sctp_sf_cookie_wait_abort;
+sctp_state_fn_t sctp_sf_cookie_echoed_abort;
+sctp_state_fn_t sctp_sf_shutdown_pending_abort;
+sctp_state_fn_t sctp_sf_shutdown_sent_abort;
+sctp_state_fn_t sctp_sf_shutdown_ack_sent_abort;
+sctp_state_fn_t sctp_sf_do_5_1B_init;
+sctp_state_fn_t sctp_sf_do_5_1C_ack;
+sctp_state_fn_t sctp_sf_do_5_1D_ce;
+sctp_state_fn_t sctp_sf_do_5_1E_ca;
+sctp_state_fn_t sctp_sf_do_4_C;
+sctp_state_fn_t sctp_sf_eat_data_6_2;
+sctp_state_fn_t sctp_sf_eat_data_fast_4_4;
+sctp_state_fn_t sctp_sf_eat_sack_6_2;
+sctp_state_fn_t sctp_sf_operr_notify;
+sctp_state_fn_t sctp_sf_t1_init_timer_expire;
+sctp_state_fn_t sctp_sf_t1_cookie_timer_expire;
+sctp_state_fn_t sctp_sf_t2_timer_expire;
+sctp_state_fn_t sctp_sf_t4_timer_expire;
+sctp_state_fn_t sctp_sf_t5_timer_expire;
+sctp_state_fn_t sctp_sf_sendbeat_8_3;
+sctp_state_fn_t sctp_sf_beat_8_3;
+sctp_state_fn_t sctp_sf_backbeat_8_3;
+sctp_state_fn_t sctp_sf_do_9_2_final;
+sctp_state_fn_t sctp_sf_do_9_2_shutdown;
+sctp_state_fn_t sctp_sf_do_9_2_shut_ctsn;
+sctp_state_fn_t sctp_sf_do_ecn_cwr;
+sctp_state_fn_t sctp_sf_do_ecne;
+sctp_state_fn_t sctp_sf_ootb;
+sctp_state_fn_t sctp_sf_pdiscard;
+sctp_state_fn_t sctp_sf_violation;
+sctp_state_fn_t sctp_sf_discard_chunk;
+sctp_state_fn_t sctp_sf_do_5_2_1_siminit;
+sctp_state_fn_t sctp_sf_do_5_2_2_dupinit;
+sctp_state_fn_t sctp_sf_do_5_2_3_initack;
+sctp_state_fn_t sctp_sf_do_5_2_4_dupcook;
+sctp_state_fn_t sctp_sf_unk_chunk;
+sctp_state_fn_t sctp_sf_do_8_5_1_E_sa;
+sctp_state_fn_t sctp_sf_cookie_echoed_err;
+sctp_state_fn_t sctp_sf_do_asconf;
+sctp_state_fn_t sctp_sf_do_asconf_ack;
+sctp_state_fn_t sctp_sf_do_reconf;
+sctp_state_fn_t sctp_sf_do_9_2_reshutack;
+sctp_state_fn_t sctp_sf_eat_fwd_tsn;
+sctp_state_fn_t sctp_sf_eat_fwd_tsn_fast;
+sctp_state_fn_t sctp_sf_eat_auth;
+
+/* Prototypes for primitive event state functions. */
+sctp_state_fn_t sctp_sf_do_prm_asoc;
+sctp_state_fn_t sctp_sf_do_prm_send;
+sctp_state_fn_t sctp_sf_do_9_2_prm_shutdown;
+sctp_state_fn_t sctp_sf_cookie_wait_prm_shutdown;
+sctp_state_fn_t sctp_sf_cookie_echoed_prm_shutdown;
+sctp_state_fn_t sctp_sf_do_9_1_prm_abort;
+sctp_state_fn_t sctp_sf_cookie_wait_prm_abort;
+sctp_state_fn_t sctp_sf_cookie_echoed_prm_abort;
+sctp_state_fn_t sctp_sf_shutdown_pending_prm_abort;
+sctp_state_fn_t sctp_sf_shutdown_sent_prm_abort;
+sctp_state_fn_t sctp_sf_shutdown_ack_sent_prm_abort;
+sctp_state_fn_t sctp_sf_error_closed;
+sctp_state_fn_t sctp_sf_error_shutdown;
+sctp_state_fn_t sctp_sf_ignore_primitive;
+sctp_state_fn_t sctp_sf_do_prm_requestheartbeat;
+sctp_state_fn_t sctp_sf_do_prm_asconf;
+sctp_state_fn_t sctp_sf_do_prm_reconf;
+
+/* Prototypes for other event state functions. */
+sctp_state_fn_t sctp_sf_do_no_pending_tsn;
+sctp_state_fn_t sctp_sf_do_9_2_start_shutdown;
+sctp_state_fn_t sctp_sf_do_9_2_shutdown_ack;
+sctp_state_fn_t sctp_sf_ignore_other;
+sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort;
+
+/* Prototypes for timeout event state functions. */
+sctp_state_fn_t sctp_sf_do_6_3_3_rtx;
+sctp_state_fn_t sctp_sf_send_reconf;
+sctp_state_fn_t sctp_sf_do_6_2_sack;
+sctp_state_fn_t sctp_sf_autoclose_timer_expire;
+
+/* Prototypes for utility support functions. */
+__u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
+const struct sctp_sm_table_entry *sctp_sm_lookup_event(
+ struct net *net,
+ enum sctp_event_type event_type,
+ enum sctp_state state,
+ union sctp_subtype event_subtype);
+int sctp_chunk_iif(const struct sctp_chunk *);
+struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *,
+ struct sctp_chunk *,
+ gfp_t gfp);
+__u32 sctp_generate_verification_tag(void);
+void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag);
+
+/* Prototypes for chunk-building functions. */
+struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
+ const struct sctp_bind_addr *bp,
+ gfp_t gfp, int vparam_len);
+struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const gfp_t gfp, const int unkparam_len);
+struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
+ const __u32 lowest_tsn,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
+ __u8 flags, int paylen, gfp_t gfp);
+struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
+ __u32 new_cum_tsn, size_t nstreams,
+ struct sctp_ifwdtsn_skip *skiplist);
+struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
+ const struct sctp_sndrcvinfo *sinfo,
+ int len, __u8 flags, gfp_t gfp);
+struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
+ const __u32 lowest_tsn);
+struct sctp_chunk *sctp_make_sack(struct sctp_association *asoc);
+struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_shutdown_complete(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+int sctp_init_cause(struct sctp_chunk *chunk, __be16 cause, size_t paylen);
+struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const size_t hint);
+struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ __u32 tsn);
+struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
+ struct msghdr *msg, size_t msg_len);
+struct sctp_chunk *sctp_make_abort_violation(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const __u8 *payload,
+ const size_t paylen);
+struct sctp_chunk *sctp_make_violation_paramlen(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ struct sctp_paramhdr *param);
+struct sctp_chunk *sctp_make_violation_max_retrans(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk);
+struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
+ const struct sctp_transport *transport);
+struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ const void *payload,
+ const size_t paylen);
+struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk,
+ __be16 cause_code, const void *payload,
+ size_t paylen, size_t reserve_tail);
+
+struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
+ union sctp_addr *laddr,
+ struct sockaddr *addrs,
+ int addrcnt, __be16 flags);
+struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
+ union sctp_addr *addr);
+bool sctp_verify_asconf(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk, bool addr_param_needed,
+ struct sctp_paramhdr **errp);
+struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ struct sctp_chunk *asconf);
+int sctp_process_asconf_ack(struct sctp_association *asoc,
+ struct sctp_chunk *asconf_ack);
+struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
+ __u32 new_cum_tsn, size_t nstreams,
+ struct sctp_fwdtsn_skip *skiplist);
+struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc,
+ __u16 key_id);
+struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc,
+ __u16 stream_num, __be16 *stream_list,
+ bool out, bool in);
+struct sctp_chunk *sctp_make_strreset_tsnreq(
+ const struct sctp_association *asoc);
+struct sctp_chunk *sctp_make_strreset_addstrm(
+ const struct sctp_association *asoc,
+ __u16 out, __u16 in);
+struct sctp_chunk *sctp_make_strreset_resp(const struct sctp_association *asoc,
+ __u32 result, __u32 sn);
+struct sctp_chunk *sctp_make_strreset_tsnresp(struct sctp_association *asoc,
+ __u32 result, __u32 sn,
+ __u32 sender_tsn,
+ __u32 receiver_tsn);
+bool sctp_verify_reconf(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ struct sctp_paramhdr **errp);
+void sctp_chunk_assign_tsn(struct sctp_chunk *chunk);
+void sctp_chunk_assign_ssn(struct sctp_chunk *chunk);
+
+/* Prototypes for stream-processing functions. */
+struct sctp_chunk *sctp_process_strreset_outreq(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_inreq(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_tsnreq(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_addstrm_out(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_addstrm_in(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_resp(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+
+/* Prototypes for statetable processing. */
+
+int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
+ union sctp_subtype subtype, enum sctp_state state,
+ struct sctp_endpoint *ep, struct sctp_association *asoc,
+ void *event_arg, gfp_t gfp);
+
+/* 2nd level prototypes */
+void sctp_generate_t3_rtx_event(struct timer_list *t);
+void sctp_generate_heartbeat_event(struct timer_list *t);
+void sctp_generate_reconf_event(struct timer_list *t);
+void sctp_generate_proto_unreach_event(struct timer_list *t);
+
+void sctp_ootb_pkt_free(struct sctp_packet *packet);
+
+struct sctp_association *sctp_unpack_cookie(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ gfp_t gfp, int *err,
+ struct sctp_chunk **err_chk_p);
+
+/* 3rd level prototypes */
+__u32 sctp_generate_tag(const struct sctp_endpoint *ep);
+__u32 sctp_generate_tsn(const struct sctp_endpoint *ep);
+
+/* Extern declarations for major data structures. */
+extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
+
+
+/* Get the size of a DATA chunk payload. */
+static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
+{
+ __u16 size;
+
+ size = ntohs(chunk->chunk_hdr->length);
+ size -= sctp_datachk_len(&chunk->asoc->stream);
+
+ return size;
+}
+
+/* Compare two TSNs */
+#define TSN_lt(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) < 0))
+
+#define TSN_lte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) <= 0))
+
+/* Compare two MIDs */
+#define MID_lt(a, b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) < 0))
+
+/* Compare two SSNs */
+#define SSN_lt(a,b) \
+ (typecheck(__u16, a) && \
+ typecheck(__u16, b) && \
+ ((__s16)((a) - (b)) < 0))
+
+/* ADDIP 3.1.1 */
+#define ADDIP_SERIAL_gte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((b) - (a)) <= 0))
+
+/* Check VTAG of the packet matches the sender's own tag. */
+static inline int
+sctp_vtag_verify(const struct sctp_chunk *chunk,
+ const struct sctp_association *asoc)
+{
+ /* RFC 2960 Sec 8.5 When receiving an SCTP packet, the endpoint
+ * MUST ensure that the value in the Verification Tag field of
+ * the received SCTP packet matches its own Tag. If the received
+ * Verification Tag value does not match the receiver's own
+ * tag value, the receiver shall silently discard the packet...
+ */
+ if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)
+ return 1;
+
+ return 0;
+}
+
+/* Check VTAG of the packet matches the sender's own tag and the T bit is
+ * not set, OR its peer's tag and the T bit is set in the Chunk Flags.
+ */
+static inline int
+sctp_vtag_verify_either(const struct sctp_chunk *chunk,
+ const struct sctp_association *asoc)
+{
+ /* RFC 2960 Section 8.5.1, sctpimpguide Section 2.41
+ *
+ * B) The receiver of a ABORT MUST accept the packet
+ * if the Verification Tag field of the packet matches its own tag
+ * and the T bit is not set
+ * OR
+ * it is set to its peer's tag and the T bit is set in the Chunk
+ * Flags.
+ * Otherwise, the receiver MUST silently discard the packet
+ * and take no further action.
+ *
+ * C) The receiver of a SHUTDOWN COMPLETE shall accept the packet
+ * if the Verification Tag field of the packet matches its own tag
+ * and the T bit is not set
+ * OR
+ * it is set to its peer's tag and the T bit is set in the Chunk
+ * Flags.
+ * Otherwise, the receiver MUST silently discard the packet
+ * and take no further action. An endpoint MUST ignore the
+ * SHUTDOWN COMPLETE if it is not in the SHUTDOWN-ACK-SENT state.
+ */
+ if ((!sctp_test_T_bit(chunk) &&
+ (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)) ||
+ (sctp_test_T_bit(chunk) && asoc->c.peer_vtag &&
+ (ntohl(chunk->sctp_hdr->vtag) == asoc->c.peer_vtag))) {
+ return 1;
+ }
+
+ return 0;
+}
+
+#endif /* __sctp_sm_h__ */
diff --git a/include/net/sctp/stream_interleave.h b/include/net/sctp/stream_interleave.h
new file mode 100644
index 000000000..526358555
--- /dev/null
+++ b/include/net/sctp/stream_interleave.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * These are definitions used by the stream schedulers, defined in RFC
+ * draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11)
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Xin Long <lucien.xin@gmail.com>
+ */
+
+#ifndef __sctp_stream_interleave_h__
+#define __sctp_stream_interleave_h__
+
+struct sctp_stream_interleave {
+ __u16 data_chunk_len;
+ __u16 ftsn_chunk_len;
+ /* (I-)DATA process */
+ struct sctp_chunk *(*make_datafrag)(const struct sctp_association *asoc,
+ const struct sctp_sndrcvinfo *sinfo,
+ int len, __u8 flags, gfp_t gfp);
+ void (*assign_number)(struct sctp_chunk *chunk);
+ bool (*validate_data)(struct sctp_chunk *chunk);
+ int (*ulpevent_data)(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk, gfp_t gfp);
+ int (*enqueue_event)(struct sctp_ulpq *ulpq,
+ struct sctp_ulpevent *event);
+ void (*renege_events)(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk, gfp_t gfp);
+ void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
+ void (*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
+ /* (I-)FORWARD-TSN process */
+ void (*generate_ftsn)(struct sctp_outq *q, __u32 ctsn);
+ bool (*validate_ftsn)(struct sctp_chunk *chunk);
+ void (*report_ftsn)(struct sctp_ulpq *ulpq, __u32 ftsn);
+ void (*handle_ftsn)(struct sctp_ulpq *ulpq,
+ struct sctp_chunk *chunk);
+};
+
+void sctp_stream_interleave_init(struct sctp_stream *stream);
+
+#endif /* __sctp_stream_interleave_h__ */
diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h
new file mode 100644
index 000000000..65058faea
--- /dev/null
+++ b/include/net/sctp/stream_sched.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * These are definitions used by the stream schedulers, defined in RFC
+ * draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11)
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+ */
+
+#ifndef __sctp_stream_sched_h__
+#define __sctp_stream_sched_h__
+
+struct sctp_sched_ops {
+ /* Property handling for a given stream */
+ int (*set)(struct sctp_stream *stream, __u16 sid, __u16 value,
+ gfp_t gfp);
+ int (*get)(struct sctp_stream *stream, __u16 sid, __u16 *value);
+
+ /* Init the specific scheduler */
+ int (*init)(struct sctp_stream *stream);
+ /* Init a stream */
+ int (*init_sid)(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
+ /* free a stream */
+ void (*free_sid)(struct sctp_stream *stream, __u16 sid);
+ /* Frees the entire thing */
+ void (*free)(struct sctp_stream *stream);
+
+ /* Enqueue a chunk */
+ void (*enqueue)(struct sctp_outq *q, struct sctp_datamsg *msg);
+ /* Dequeue a chunk */
+ struct sctp_chunk *(*dequeue)(struct sctp_outq *q);
+ /* Called only if the chunk fit the packet */
+ void (*dequeue_done)(struct sctp_outq *q, struct sctp_chunk *chunk);
+ /* Sched all chunks already enqueued */
+ void (*sched_all)(struct sctp_stream *steam);
+ /* Unched all chunks already enqueued */
+ void (*unsched_all)(struct sctp_stream *steam);
+};
+
+int sctp_sched_set_sched(struct sctp_association *asoc,
+ enum sctp_sched_type sched);
+int sctp_sched_get_sched(struct sctp_association *asoc);
+int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid,
+ __u16 value, gfp_t gfp);
+int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
+ __u16 *value);
+void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch);
+
+void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch);
+int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
+struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream);
+
+void sctp_sched_ops_register(enum sctp_sched_type sched,
+ struct sctp_sched_ops *sched_ops);
+void sctp_sched_ops_prio_init(void);
+void sctp_sched_ops_rr_init(void);
+
+#endif /* __sctp_stream_sched_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
new file mode 100644
index 000000000..be59e8df0
--- /dev/null
+++ b/include/net/sctp/structs.h
@@ -0,0 +1,2177 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Randall Stewart <randall@sctp.chicago.il.us>
+ * Ken Morneau <kmorneau@cisco.com>
+ * Qiaobing Xie <qxie1@email.mot.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Xingang Guo <xingang.guo@intel.com>
+ * Hui Huang <hui.huang@nokia.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Daisy Chang <daisyc@us.ibm.com>
+ * Dajiang Zhang <dajiang.zhang@nokia.com>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Ryan Layer <rmlayer@us.ibm.com>
+ * Anup Pemmaiah <pemmaiah@cc.usu.edu>
+ * Kevin Gao <kevin.gao@intel.com>
+ */
+
+#ifndef __sctp_structs_h__
+#define __sctp_structs_h__
+
+#include <linux/ktime.h>
+#include <linux/generic-radix-tree.h>
+#include <linux/rhashtable-types.h>
+#include <linux/socket.h> /* linux/in.h needs this!! */
+#include <linux/in.h> /* We get struct sockaddr_in. */
+#include <linux/in6.h> /* We get struct in6_addr */
+#include <linux/ipv6.h>
+#include <asm/param.h> /* We get MAXHOSTNAMELEN. */
+#include <linux/atomic.h> /* This gets us atomic counters. */
+#include <linux/skbuff.h> /* We need sk_buff_head. */
+#include <linux/workqueue.h> /* We need tq_struct. */
+#include <linux/sctp.h> /* We need sctp* header structs. */
+#include <net/sctp/auth.h> /* We need auth specific structs */
+#include <net/ip.h> /* For inet_skb_parm */
+
+/* A convenience structure for handling sockaddr structures.
+ * We should wean ourselves off this.
+ */
+union sctp_addr {
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ struct sockaddr sa;
+};
+
+/* Forward declarations for data structures. */
+struct sctp_globals;
+struct sctp_endpoint;
+struct sctp_association;
+struct sctp_transport;
+struct sctp_packet;
+struct sctp_chunk;
+struct sctp_inq;
+struct sctp_outq;
+struct sctp_bind_addr;
+struct sctp_ulpq;
+struct sctp_ep_common;
+struct crypto_shash;
+struct sctp_stream;
+
+
+#include <net/sctp/tsnmap.h>
+#include <net/sctp/ulpevent.h>
+#include <net/sctp/ulpqueue.h>
+#include <net/sctp/stream_interleave.h>
+
+/* Structures useful for managing bind/connect. */
+
+struct sctp_bind_bucket {
+ unsigned short port;
+ signed char fastreuse;
+ signed char fastreuseport;
+ kuid_t fastuid;
+ struct hlist_node node;
+ struct hlist_head owner;
+ struct net *net;
+};
+
+struct sctp_bind_hashbucket {
+ spinlock_t lock;
+ struct hlist_head chain;
+};
+
+/* Used for hashing all associations. */
+struct sctp_hashbucket {
+ rwlock_t lock;
+ struct hlist_head chain;
+} __attribute__((__aligned__(8)));
+
+
+/* The SCTP globals structure. */
+extern struct sctp_globals {
+ /* This is a list of groups of functions for each address
+ * family that we support.
+ */
+ struct list_head address_families;
+
+ /* This is the hash of all endpoints. */
+ struct sctp_hashbucket *ep_hashtable;
+ /* This is the sctp port control hash. */
+ struct sctp_bind_hashbucket *port_hashtable;
+ /* This is the hash of all transports. */
+ struct rhltable transport_hashtable;
+
+ /* Sizes of above hashtables. */
+ int ep_hashsize;
+ int port_hashsize;
+
+ /* Default initialization values to be applied to new associations. */
+ __u16 max_instreams;
+ __u16 max_outstreams;
+
+ /* Flag to indicate whether computing and verifying checksum
+ * is disabled. */
+ bool checksum_disable;
+} sctp_globals;
+
+#define sctp_max_instreams (sctp_globals.max_instreams)
+#define sctp_max_outstreams (sctp_globals.max_outstreams)
+#define sctp_address_families (sctp_globals.address_families)
+#define sctp_ep_hashsize (sctp_globals.ep_hashsize)
+#define sctp_ep_hashtable (sctp_globals.ep_hashtable)
+#define sctp_port_hashsize (sctp_globals.port_hashsize)
+#define sctp_port_hashtable (sctp_globals.port_hashtable)
+#define sctp_transport_hashtable (sctp_globals.transport_hashtable)
+#define sctp_checksum_disable (sctp_globals.checksum_disable)
+
+/* SCTP Socket type: UDP or TCP style. */
+enum sctp_socket_type {
+ SCTP_SOCKET_UDP = 0,
+ SCTP_SOCKET_UDP_HIGH_BANDWIDTH,
+ SCTP_SOCKET_TCP
+};
+
+/* Per socket SCTP information. */
+struct sctp_sock {
+ /* inet_sock has to be the first member of sctp_sock */
+ struct inet_sock inet;
+ /* What kind of a socket is this? */
+ enum sctp_socket_type type;
+
+ /* PF_ family specific functions. */
+ struct sctp_pf *pf;
+
+ /* Access to HMAC transform. */
+ struct crypto_shash *hmac;
+ char *sctp_hmac_alg;
+
+ /* What is our base endpointer? */
+ struct sctp_endpoint *ep;
+
+ struct sctp_bind_bucket *bind_hash;
+ /* Various Socket Options. */
+ __u16 default_stream;
+ __u32 default_ppid;
+ __u16 default_flags;
+ __u32 default_context;
+ __u32 default_timetolive;
+ __u32 default_rcv_context;
+ int max_burst;
+
+ /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
+ * the destination address every heartbeat interval. This value
+ * will be inherited by all new associations.
+ */
+ __u32 hbinterval;
+
+ /* This is the max_retrans value for new associations. */
+ __u16 pathmaxrxt;
+
+ __u32 flowlabel;
+ __u8 dscp;
+
+ __u16 pf_retrans;
+ __u16 ps_retrans;
+
+ /* The initial Path MTU to use for new associations. */
+ __u32 pathmtu;
+
+ /* The default SACK delay timeout for new associations. */
+ __u32 sackdelay;
+ __u32 sackfreq;
+
+ /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
+ __u32 param_flags;
+
+ __u32 default_ss;
+
+ struct sctp_rtoinfo rtoinfo;
+ struct sctp_paddrparams paddrparam;
+ struct sctp_assocparams assocparams;
+
+ /*
+ * These two structures must be grouped together for the usercopy
+ * whitelist region.
+ */
+ __u16 subscribe;
+ struct sctp_initmsg initmsg;
+
+ int user_frag;
+
+ __u32 autoclose;
+ __u32 adaptation_ind;
+ __u32 pd_point;
+ __u16 nodelay:1,
+ pf_expose:2,
+ reuse:1,
+ disable_fragments:1,
+ v4mapped:1,
+ frag_interleave:1,
+ recvrcvinfo:1,
+ recvnxtinfo:1,
+ data_ready_signalled:1;
+
+ atomic_t pd_mode;
+
+ /* Fields after this point will be skipped on copies, like on accept
+ * and peeloff operations
+ */
+
+ /* Receive to here while partial delivery is in effect. */
+ struct sk_buff_head pd_lobby;
+
+ struct list_head auto_asconf_list;
+ int do_auto_asconf;
+};
+
+static inline struct sctp_sock *sctp_sk(const struct sock *sk)
+{
+ return (struct sctp_sock *)sk;
+}
+
+static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp)
+{
+ return (struct sock *)sp;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+struct sctp6_sock {
+ struct sctp_sock sctp;
+ struct ipv6_pinfo inet6;
+};
+#endif /* CONFIG_IPV6 */
+
+
+/* This is our APPLICATION-SPECIFIC state cookie.
+ * THIS IS NOT DICTATED BY THE SPECIFICATION.
+ */
+/* These are the parts of an association which we send in the cookie.
+ * Most of these are straight out of:
+ * RFC2960 12.2 Parameters necessary per association (i.e. the TCB)
+ *
+ */
+
+struct sctp_cookie {
+
+ /* My : Tag expected in every inbound packet and sent
+ * Verification: in the INIT or INIT ACK chunk.
+ * Tag :
+ */
+ __u32 my_vtag;
+
+ /* Peer's : Tag expected in every outbound packet except
+ * Verification: in the INIT chunk.
+ * Tag :
+ */
+ __u32 peer_vtag;
+
+ /* The rest of these are not from the spec, but really need to
+ * be in the cookie.
+ */
+
+ /* My Tie Tag : Assist in discovering a restarting association. */
+ __u32 my_ttag;
+
+ /* Peer's Tie Tag: Assist in discovering a restarting association. */
+ __u32 peer_ttag;
+
+ /* When does this cookie expire? */
+ ktime_t expiration;
+
+ /* Number of inbound/outbound streams which are set
+ * and negotiated during the INIT process.
+ */
+ __u16 sinit_num_ostreams;
+ __u16 sinit_max_instreams;
+
+ /* This is the first sequence number I used. */
+ __u32 initial_tsn;
+
+ /* This holds the originating address of the INIT packet. */
+ union sctp_addr peer_addr;
+
+ /* IG Section 2.35.3
+ * Include the source port of the INIT-ACK
+ */
+ __u16 my_port;
+
+ __u8 prsctp_capable;
+
+ /* Padding for future use */
+ __u8 padding;
+
+ __u32 adaptation_ind;
+
+ __u8 auth_random[sizeof(struct sctp_paramhdr) +
+ SCTP_AUTH_RANDOM_LENGTH];
+ __u8 auth_hmacs[SCTP_AUTH_NUM_HMACS * sizeof(__u16) + 2];
+ __u8 auth_chunks[sizeof(struct sctp_paramhdr) + SCTP_AUTH_MAX_CHUNKS];
+
+ /* This is a shim for my peer's INIT packet, followed by
+ * a copy of the raw address list of the association.
+ * The length of the raw address list is saved in the
+ * raw_addr_list_len field, which will be used at the time when
+ * the association TCB is re-constructed from the cookie.
+ */
+ __u32 raw_addr_list_len;
+ struct sctp_init_chunk peer_init[];
+};
+
+
+/* The format of our cookie that we send to our peer. */
+struct sctp_signed_cookie {
+ __u8 signature[SCTP_SECRET_SIZE];
+ __u32 __pad; /* force sctp_cookie alignment to 64 bits */
+ struct sctp_cookie c;
+} __packed;
+
+/* This is another convenience type to allocate memory for address
+ * params for the maximum size and pass such structures around
+ * internally.
+ */
+union sctp_addr_param {
+ struct sctp_paramhdr p;
+ struct sctp_ipv4addr_param v4;
+ struct sctp_ipv6addr_param v6;
+};
+
+/* A convenience type to allow walking through the various
+ * parameters and avoid casting all over the place.
+ */
+union sctp_params {
+ void *v;
+ struct sctp_paramhdr *p;
+ struct sctp_cookie_preserve_param *life;
+ struct sctp_hostname_param *dns;
+ struct sctp_cookie_param *cookie;
+ struct sctp_supported_addrs_param *sat;
+ struct sctp_ipv4addr_param *v4;
+ struct sctp_ipv6addr_param *v6;
+ union sctp_addr_param *addr;
+ struct sctp_adaptation_ind_param *aind;
+ struct sctp_supported_ext_param *ext;
+ struct sctp_random_param *random;
+ struct sctp_chunks_param *chunks;
+ struct sctp_hmac_algo_param *hmac_algo;
+ struct sctp_addip_param *addip;
+};
+
+/* RFC 2960. Section 3.3.5 Heartbeat.
+ * Heartbeat Information: variable length
+ * The Sender-specific Heartbeat Info field should normally include
+ * information about the sender's current time when this HEARTBEAT
+ * chunk is sent and the destination transport address to which this
+ * HEARTBEAT is sent (see Section 8.3).
+ */
+struct sctp_sender_hb_info {
+ struct sctp_paramhdr param_hdr;
+ union sctp_addr daddr;
+ unsigned long sent_at;
+ __u64 hb_nonce;
+};
+
+int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
+ gfp_t gfp);
+int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid);
+void sctp_stream_free(struct sctp_stream *stream);
+void sctp_stream_clear(struct sctp_stream *stream);
+void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
+
+/* What is the current SSN number for this stream? */
+#define sctp_ssn_peek(stream, type, sid) \
+ (sctp_stream_##type((stream), (sid))->ssn)
+
+/* Return the next SSN number for this stream. */
+#define sctp_ssn_next(stream, type, sid) \
+ (sctp_stream_##type((stream), (sid))->ssn++)
+
+/* Skip over this ssn and all below. */
+#define sctp_ssn_skip(stream, type, sid, ssn) \
+ (sctp_stream_##type((stream), (sid))->ssn = ssn + 1)
+
+/* What is the current MID number for this stream? */
+#define sctp_mid_peek(stream, type, sid) \
+ (sctp_stream_##type((stream), (sid))->mid)
+
+/* Return the next MID number for this stream. */
+#define sctp_mid_next(stream, type, sid) \
+ (sctp_stream_##type((stream), (sid))->mid++)
+
+/* Skip over this mid and all below. */
+#define sctp_mid_skip(stream, type, sid, mid) \
+ (sctp_stream_##type((stream), (sid))->mid = mid + 1)
+
+/* What is the current MID_uo number for this stream? */
+#define sctp_mid_uo_peek(stream, type, sid) \
+ (sctp_stream_##type((stream), (sid))->mid_uo)
+
+/* Return the next MID_uo number for this stream. */
+#define sctp_mid_uo_next(stream, type, sid) \
+ (sctp_stream_##type((stream), (sid))->mid_uo++)
+
+/*
+ * Pointers to address related SCTP functions.
+ * (i.e. things that depend on the address family.)
+ */
+struct sctp_af {
+ int (*sctp_xmit) (struct sk_buff *skb,
+ struct sctp_transport *);
+ int (*setsockopt) (struct sock *sk,
+ int level,
+ int optname,
+ sockptr_t optval,
+ unsigned int optlen);
+ int (*getsockopt) (struct sock *sk,
+ int level,
+ int optname,
+ char __user *optval,
+ int __user *optlen);
+ void (*get_dst) (struct sctp_transport *t,
+ union sctp_addr *saddr,
+ struct flowi *fl,
+ struct sock *sk);
+ void (*get_saddr) (struct sctp_sock *sk,
+ struct sctp_transport *t,
+ struct flowi *fl);
+ void (*copy_addrlist) (struct list_head *,
+ struct net_device *);
+ int (*cmp_addr) (const union sctp_addr *addr1,
+ const union sctp_addr *addr2);
+ void (*addr_copy) (union sctp_addr *dst,
+ union sctp_addr *src);
+ void (*from_skb) (union sctp_addr *,
+ struct sk_buff *skb,
+ int saddr);
+ void (*from_sk) (union sctp_addr *,
+ struct sock *sk);
+ bool (*from_addr_param) (union sctp_addr *,
+ union sctp_addr_param *,
+ __be16 port, int iif);
+ int (*to_addr_param) (const union sctp_addr *,
+ union sctp_addr_param *);
+ int (*addr_valid) (union sctp_addr *,
+ struct sctp_sock *,
+ const struct sk_buff *);
+ enum sctp_scope (*scope)(union sctp_addr *);
+ void (*inaddr_any) (union sctp_addr *, __be16);
+ int (*is_any) (const union sctp_addr *);
+ int (*available) (union sctp_addr *,
+ struct sctp_sock *);
+ int (*skb_iif) (const struct sk_buff *sk);
+ int (*is_ce) (const struct sk_buff *sk);
+ void (*seq_dump_addr)(struct seq_file *seq,
+ union sctp_addr *addr);
+ void (*ecn_capable)(struct sock *sk);
+ __u16 net_header_len;
+ int sockaddr_len;
+ int (*ip_options_len)(struct sock *sk);
+ sa_family_t sa_family;
+ struct list_head list;
+};
+
+struct sctp_af *sctp_get_af_specific(sa_family_t);
+int sctp_register_af(struct sctp_af *);
+
+/* Protocol family functions. */
+struct sctp_pf {
+ void (*event_msgname)(struct sctp_ulpevent *, char *, int *);
+ void (*skb_msgname) (struct sk_buff *, char *, int *);
+ int (*af_supported) (sa_family_t, struct sctp_sock *);
+ int (*cmp_addr) (const union sctp_addr *,
+ const union sctp_addr *,
+ struct sctp_sock *);
+ int (*bind_verify) (struct sctp_sock *, union sctp_addr *);
+ int (*send_verify) (struct sctp_sock *, union sctp_addr *);
+ int (*supported_addrs)(const struct sctp_sock *, __be16 *);
+ struct sock *(*create_accept_sk) (struct sock *sk,
+ struct sctp_association *asoc,
+ bool kern);
+ int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr);
+ void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
+ void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
+ void (*copy_ip_options)(struct sock *sk, struct sock *newsk);
+ struct sctp_af *af;
+};
+
+
+/* Structure to track chunk fragments that have been acked, but peer
+ * fragments of the same message have not.
+ */
+struct sctp_datamsg {
+ /* Chunks waiting to be submitted to lower layer. */
+ struct list_head chunks;
+ /* Reference counting. */
+ refcount_t refcnt;
+ /* When is this message no longer interesting to the peer? */
+ unsigned long expires_at;
+ /* Did the messenge fail to send? */
+ int send_error;
+ u8 send_failed:1,
+ can_delay:1, /* should this message be Nagle delayed */
+ abandoned:1; /* should this message be abandoned */
+};
+
+struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
+ struct sctp_sndrcvinfo *,
+ struct iov_iter *);
+void sctp_datamsg_free(struct sctp_datamsg *);
+void sctp_datamsg_put(struct sctp_datamsg *);
+void sctp_chunk_fail(struct sctp_chunk *, int error);
+int sctp_chunk_abandoned(struct sctp_chunk *);
+
+/* RFC2960 1.4 Key Terms
+ *
+ * o Chunk: A unit of information within an SCTP packet, consisting of
+ * a chunk header and chunk-specific content.
+ *
+ * As a matter of convenience, we remember the SCTP common header for
+ * each chunk as well as a few other header pointers...
+ */
+struct sctp_chunk {
+ struct list_head list;
+
+ refcount_t refcnt;
+
+ /* How many times this chunk have been sent, for prsctp RTX policy */
+ int sent_count;
+
+ union {
+ /* This is our link to the per-transport transmitted list. */
+ struct list_head transmitted_list;
+ /* List in specific stream outq */
+ struct list_head stream_list;
+ };
+
+ /* This field is used by chunks that hold fragmented data.
+ * For the first fragment this is the list that holds the rest of
+ * fragments. For the remaining fragments, this is the link to the
+ * frag_list maintained in the first fragment.
+ */
+ struct list_head frag_list;
+
+ /* This points to the sk_buff containing the actual data. */
+ struct sk_buff *skb;
+
+ union {
+ /* In case of GSO packets, this will store the head one */
+ struct sk_buff *head_skb;
+ /* In case of auth enabled, this will point to the shkey */
+ struct sctp_shared_key *shkey;
+ };
+
+ /* These are the SCTP headers by reverse order in a packet.
+ * Note that some of these may happen more than once. In that
+ * case, we point at the "current" one, whatever that means
+ * for that level of header.
+ */
+
+ /* We point this at the FIRST TLV parameter to chunk_hdr. */
+ union sctp_params param_hdr;
+ union {
+ __u8 *v;
+ struct sctp_datahdr *data_hdr;
+ struct sctp_inithdr *init_hdr;
+ struct sctp_sackhdr *sack_hdr;
+ struct sctp_heartbeathdr *hb_hdr;
+ struct sctp_sender_hb_info *hbs_hdr;
+ struct sctp_shutdownhdr *shutdown_hdr;
+ struct sctp_signed_cookie *cookie_hdr;
+ struct sctp_ecnehdr *ecne_hdr;
+ struct sctp_cwrhdr *ecn_cwr_hdr;
+ struct sctp_errhdr *err_hdr;
+ struct sctp_addiphdr *addip_hdr;
+ struct sctp_fwdtsn_hdr *fwdtsn_hdr;
+ struct sctp_authhdr *auth_hdr;
+ struct sctp_idatahdr *idata_hdr;
+ struct sctp_ifwdtsn_hdr *ifwdtsn_hdr;
+ } subh;
+
+ __u8 *chunk_end;
+
+ struct sctp_chunkhdr *chunk_hdr;
+ struct sctphdr *sctp_hdr;
+
+ /* This needs to be recoverable for SCTP_SEND_FAILED events. */
+ struct sctp_sndrcvinfo sinfo;
+
+ /* Which association does this belong to? */
+ struct sctp_association *asoc;
+
+ /* What endpoint received this chunk? */
+ struct sctp_ep_common *rcvr;
+
+ /* We fill this in if we are calculating RTT. */
+ unsigned long sent_at;
+
+ /* What is the origin IP address for this chunk? */
+ union sctp_addr source;
+ /* Destination address for this chunk. */
+ union sctp_addr dest;
+
+ /* For outbound message, track all fragments for SEND_FAILED. */
+ struct sctp_datamsg *msg;
+
+ /* For an inbound chunk, this tells us where it came from.
+ * For an outbound chunk, it tells us where we'd like it to
+ * go. It is NULL if we have no preference.
+ */
+ struct sctp_transport *transport;
+
+ /* SCTP-AUTH: For the special case inbound processing of COOKIE-ECHO
+ * we need save a pointer to the AUTH chunk, since the SCTP-AUTH
+ * spec violates the principle premis that all chunks are processed
+ * in order.
+ */
+ struct sk_buff *auth_chunk;
+
+#define SCTP_CAN_FRTX 0x0
+#define SCTP_NEED_FRTX 0x1
+#define SCTP_DONT_FRTX 0x2
+ __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
+ has_tsn:1, /* Does this chunk have a TSN yet? */
+ has_ssn:1, /* Does this chunk have a SSN yet? */
+#define has_mid has_ssn
+ singleton:1, /* Only chunk in the packet? */
+ end_of_packet:1, /* Last chunk in the packet? */
+ ecn_ce_done:1, /* Have we processed the ECN CE bit? */
+ pdiscard:1, /* Discard the whole packet now? */
+ tsn_gap_acked:1, /* Is this chunk acked by a GAP ACK? */
+ data_accepted:1, /* At least 1 chunk accepted */
+ auth:1, /* IN: was auth'ed | OUT: needs auth */
+ has_asconf:1, /* IN: have seen an asconf before */
+ tsn_missing_report:2, /* Data chunk missing counter. */
+ fast_retransmit:2; /* Is this chunk fast retransmitted? */
+};
+
+#define sctp_chunk_retransmitted(chunk) (chunk->sent_count > 1)
+void sctp_chunk_hold(struct sctp_chunk *);
+void sctp_chunk_put(struct sctp_chunk *);
+int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len,
+ struct iov_iter *from);
+void sctp_chunk_free(struct sctp_chunk *);
+void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
+struct sctp_chunk *sctp_chunkify(struct sk_buff *,
+ const struct sctp_association *,
+ struct sock *, gfp_t gfp);
+void sctp_init_addrs(struct sctp_chunk *, union sctp_addr *,
+ union sctp_addr *);
+const union sctp_addr *sctp_source(const struct sctp_chunk *chunk);
+
+static inline __u16 sctp_chunk_stream_no(struct sctp_chunk *ch)
+{
+ return ntohs(ch->subh.data_hdr->stream);
+}
+
+enum {
+ SCTP_ADDR_NEW, /* new address added to assoc/ep */
+ SCTP_ADDR_SRC, /* address can be used as source */
+ SCTP_ADDR_DEL, /* address about to be deleted */
+};
+
+/* This is a structure for holding either an IPv6 or an IPv4 address. */
+struct sctp_sockaddr_entry {
+ struct list_head list;
+ struct rcu_head rcu;
+ union sctp_addr a;
+ __u8 state;
+ __u8 valid;
+};
+
+#define SCTP_ADDRESS_TICK_DELAY 500
+
+/* This structure holds lists of chunks as we are assembling for
+ * transmission.
+ */
+struct sctp_packet {
+ /* These are the SCTP header values (host order) for the packet. */
+ __u16 source_port;
+ __u16 destination_port;
+ __u32 vtag;
+
+ /* This contains the payload chunks. */
+ struct list_head chunk_list;
+
+ /* This is the overhead of the sctp and ip headers. */
+ size_t overhead;
+ /* This is the total size of all chunks INCLUDING padding. */
+ size_t size;
+ /* This is the maximum size this packet may have */
+ size_t max_size;
+
+ /* The packet is destined for this transport address.
+ * The function we finally use to pass down to the next lower
+ * layer lives in the transport structure.
+ */
+ struct sctp_transport *transport;
+
+ /* pointer to the auth chunk for this packet */
+ struct sctp_chunk *auth;
+
+ u8 has_cookie_echo:1, /* This packet contains a COOKIE-ECHO chunk. */
+ has_sack:1, /* This packet contains a SACK chunk. */
+ has_auth:1, /* This packet contains an AUTH chunk */
+ has_data:1, /* This packet contains at least 1 DATA chunk */
+ ipfragok:1; /* So let ip fragment this packet */
+};
+
+void sctp_packet_init(struct sctp_packet *, struct sctp_transport *,
+ __u16 sport, __u16 dport);
+void sctp_packet_config(struct sctp_packet *, __u32 vtag, int);
+enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk,
+ int one_packet, gfp_t gfp);
+enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk);
+int sctp_packet_transmit(struct sctp_packet *, gfp_t);
+void sctp_packet_free(struct sctp_packet *);
+
+static inline int sctp_packet_empty(struct sctp_packet *packet)
+{
+ return packet->size == packet->overhead;
+}
+
+/* This represents a remote transport address.
+ * For local transport addresses, we just use union sctp_addr.
+ *
+ * RFC2960 Section 1.4 Key Terms
+ *
+ * o Transport address: A Transport Address is traditionally defined
+ * by Network Layer address, Transport Layer protocol and Transport
+ * Layer port number. In the case of SCTP running over IP, a
+ * transport address is defined by the combination of an IP address
+ * and an SCTP port number (where SCTP is the Transport protocol).
+ *
+ * RFC2960 Section 7.1 SCTP Differences from TCP Congestion control
+ *
+ * o The sender keeps a separate congestion control parameter set for
+ * each of the destination addresses it can send to (not each
+ * source-destination pair but for each destination). The parameters
+ * should decay if the address is not used for a long enough time
+ * period.
+ *
+ */
+struct sctp_transport {
+ /* A list of transports. */
+ struct list_head transports;
+ struct rhlist_head node;
+
+ /* Reference counting. */
+ refcount_t refcnt;
+ /* RTO-Pending : A flag used to track if one of the DATA
+ * chunks sent to this address is currently being
+ * used to compute a RTT. If this flag is 0,
+ * the next DATA chunk sent to this destination
+ * should be used to compute a RTT and this flag
+ * should be set. Every time the RTT
+ * calculation completes (i.e. the DATA chunk
+ * is SACK'd) clear this flag.
+ */
+ __u32 rto_pending:1,
+
+ /*
+ * hb_sent : a flag that signals that we have a pending
+ * heartbeat.
+ */
+ hb_sent:1,
+
+ /* Is the Path MTU update pending on this tranport */
+ pmtu_pending:1,
+
+ dst_pending_confirm:1, /* need to confirm neighbour */
+
+ /* Has this transport moved the ctsn since we last sacked */
+ sack_generation:1;
+ u32 dst_cookie;
+
+ struct flowi fl;
+
+ /* This is the peer's IP address and port. */
+ union sctp_addr ipaddr;
+
+ /* These are the functions we call to handle LLP stuff. */
+ struct sctp_af *af_specific;
+
+ /* Which association do we belong to? */
+ struct sctp_association *asoc;
+
+ /* RFC2960
+ *
+ * 12.3 Per Transport Address Data
+ *
+ * For each destination transport address in the peer's
+ * address list derived from the INIT or INIT ACK chunk, a
+ * number of data elements needs to be maintained including:
+ */
+ /* RTO : The current retransmission timeout value. */
+ unsigned long rto;
+
+ __u32 rtt; /* This is the most recent RTT. */
+
+ /* RTTVAR : The current RTT variation. */
+ __u32 rttvar;
+
+ /* SRTT : The current smoothed round trip time. */
+ __u32 srtt;
+
+ /*
+ * These are the congestion stats.
+ */
+ /* cwnd : The current congestion window. */
+ __u32 cwnd; /* This is the actual cwnd. */
+
+ /* ssthresh : The current slow start threshold value. */
+ __u32 ssthresh;
+
+ /* partial : The tracking method for increase of cwnd when in
+ * bytes acked : congestion avoidance mode (see Section 6.2.2)
+ */
+ __u32 partial_bytes_acked;
+
+ /* Data that has been sent, but not acknowledged. */
+ __u32 flight_size;
+
+ __u32 burst_limited; /* Holds old cwnd when max.burst is applied */
+
+ /* Destination */
+ struct dst_entry *dst;
+ /* Source address. */
+ union sctp_addr saddr;
+
+ /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
+ * the destination address every heartbeat interval.
+ */
+ unsigned long hbinterval;
+
+ /* SACK delay timeout */
+ unsigned long sackdelay;
+ __u32 sackfreq;
+
+ atomic_t mtu_info;
+
+ /* When was the last time that we heard from this transport? We use
+ * this to pick new active and retran paths.
+ */
+ ktime_t last_time_heard;
+
+ /* When was the last time that we sent a chunk using this
+ * transport? We use this to check for idle transports
+ */
+ unsigned long last_time_sent;
+
+ /* Last time(in jiffies) when cwnd is reduced due to the congestion
+ * indication based on ECNE chunk.
+ */
+ unsigned long last_time_ecne_reduced;
+
+ /* This is the max_retrans value for the transport and will
+ * be initialized from the assocs value. This can be changed
+ * using the SCTP_SET_PEER_ADDR_PARAMS socket option.
+ */
+ __u16 pathmaxrxt;
+
+ __u32 flowlabel;
+ __u8 dscp;
+
+ /* This is the partially failed retrans value for the transport
+ * and will be initialized from the assocs value. This can be changed
+ * using the SCTP_PEER_ADDR_THLDS socket option
+ */
+ __u16 pf_retrans;
+ /* Used for primary path switchover. */
+ __u16 ps_retrans;
+ /* PMTU : The current known path MTU. */
+ __u32 pathmtu;
+
+ /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
+ __u32 param_flags;
+
+ /* The number of times INIT has been sent on this transport. */
+ int init_sent_count;
+
+ /* state : The current state of this destination,
+ * : i.e. SCTP_ACTIVE, SCTP_INACTIVE, SCTP_UNKNOWN.
+ */
+ int state;
+
+ /* These are the error stats for this destination. */
+
+ /* Error count : The current error count for this destination. */
+ unsigned short error_count;
+
+ /* Per : A timer used by each destination.
+ * Destination :
+ * Timer :
+ *
+ * [Everywhere else in the text this is called T3-rtx. -ed]
+ */
+ struct timer_list T3_rtx_timer;
+
+ /* Heartbeat timer is per destination. */
+ struct timer_list hb_timer;
+
+ /* Timer to handle ICMP proto unreachable envets */
+ struct timer_list proto_unreach_timer;
+
+ /* Timer to handler reconf chunk rtx */
+ struct timer_list reconf_timer;
+
+ /* Since we're using per-destination retransmission timers
+ * (see above), we're also using per-destination "transmitted"
+ * queues. This probably ought to be a private struct
+ * accessible only within the outqueue, but it's not, yet.
+ */
+ struct list_head transmitted;
+
+ /* We build bundle-able packets for this transport here. */
+ struct sctp_packet packet;
+
+ /* This is the list of transports that have chunks to send. */
+ struct list_head send_ready;
+
+ /* State information saved for SFR_CACC algorithm. The key
+ * idea in SFR_CACC is to maintain state at the sender on a
+ * per-destination basis when a changeover happens.
+ * char changeover_active;
+ * char cycling_changeover;
+ * __u32 next_tsn_at_change;
+ * char cacc_saw_newack;
+ */
+ struct {
+ /* An unsigned integer, which stores the next TSN to be
+ * used by the sender, at the moment of changeover.
+ */
+ __u32 next_tsn_at_change;
+
+ /* A flag which indicates the occurrence of a changeover */
+ char changeover_active;
+
+ /* A flag which indicates whether the change of primary is
+ * the first switch to this destination address during an
+ * active switch.
+ */
+ char cycling_changeover;
+
+ /* A temporary flag, which is used during the processing of
+ * a SACK to estimate the causative TSN(s)'s group.
+ */
+ char cacc_saw_newack;
+ } cacc;
+
+ /* 64-bit random number sent with heartbeat. */
+ __u64 hb_nonce;
+
+ struct rcu_head rcu;
+};
+
+struct sctp_transport *sctp_transport_new(struct net *, const union sctp_addr *,
+ gfp_t);
+void sctp_transport_set_owner(struct sctp_transport *,
+ struct sctp_association *);
+void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
+ struct sctp_sock *);
+void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
+void sctp_transport_free(struct sctp_transport *);
+void sctp_transport_reset_t3_rtx(struct sctp_transport *);
+void sctp_transport_reset_hb_timer(struct sctp_transport *);
+void sctp_transport_reset_reconf_timer(struct sctp_transport *transport);
+int sctp_transport_hold(struct sctp_transport *);
+void sctp_transport_put(struct sctp_transport *);
+void sctp_transport_update_rto(struct sctp_transport *, __u32);
+void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32);
+void sctp_transport_lower_cwnd(struct sctp_transport *t,
+ enum sctp_lower_cwnd reason);
+void sctp_transport_burst_limited(struct sctp_transport *);
+void sctp_transport_burst_reset(struct sctp_transport *);
+unsigned long sctp_transport_timeout(struct sctp_transport *);
+void sctp_transport_reset(struct sctp_transport *t);
+bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
+void sctp_transport_immediate_rtx(struct sctp_transport *);
+void sctp_transport_dst_release(struct sctp_transport *t);
+void sctp_transport_dst_confirm(struct sctp_transport *t);
+
+
+/* This is the structure we use to queue packets as they come into
+ * SCTP. We write packets to it and read chunks from it.
+ */
+struct sctp_inq {
+ /* This is actually a queue of sctp_chunk each
+ * containing a partially decoded packet.
+ */
+ struct list_head in_chunk_list;
+ /* This is the packet which is currently off the in queue and is
+ * being worked on through the inbound chunk processing.
+ */
+ struct sctp_chunk *in_progress;
+
+ /* This is the delayed task to finish delivering inbound
+ * messages.
+ */
+ struct work_struct immediate;
+};
+
+void sctp_inq_init(struct sctp_inq *);
+void sctp_inq_free(struct sctp_inq *);
+void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet);
+struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
+struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *);
+void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t);
+
+/* This is the structure we use to hold outbound chunks. You push
+ * chunks in and they automatically pop out the other end as bundled
+ * packets (it calls (*output_handler)()).
+ *
+ * This structure covers sections 6.3, 6.4, 6.7, 6.8, 6.10, 7., 8.1,
+ * and 8.2 of the v13 draft.
+ *
+ * It handles retransmissions. The connection to the timeout portion
+ * of the state machine is through sctp_..._timeout() and timeout_handler.
+ *
+ * If you feed it SACKs, it will eat them.
+ *
+ * If you give it big chunks, it will fragment them.
+ *
+ * It assigns TSN's to data chunks. This happens at the last possible
+ * instant before transmission.
+ *
+ * When free()'d, it empties itself out via output_handler().
+ */
+struct sctp_outq {
+ struct sctp_association *asoc;
+
+ /* Data pending that has never been transmitted. */
+ struct list_head out_chunk_list;
+
+ /* Stream scheduler being used */
+ struct sctp_sched_ops *sched;
+
+ unsigned int out_qlen; /* Total length of queued data chunks. */
+
+ /* Error of send failed, may used in SCTP_SEND_FAILED event. */
+ unsigned int error;
+
+ /* These are control chunks we want to send. */
+ struct list_head control_chunk_list;
+
+ /* These are chunks that have been sacked but are above the
+ * CTSN, or cumulative tsn ack point.
+ */
+ struct list_head sacked;
+
+ /* Put chunks on this list to schedule them for
+ * retransmission.
+ */
+ struct list_head retransmit;
+
+ /* Put chunks on this list to save them for FWD TSN processing as
+ * they were abandoned.
+ */
+ struct list_head abandoned;
+
+ /* How many unackd bytes do we have in-flight? */
+ __u32 outstanding_bytes;
+
+ /* Are we doing fast-rtx on this queue */
+ char fast_rtx;
+
+ /* Corked? */
+ char cork;
+};
+
+void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
+void sctp_outq_teardown(struct sctp_outq *);
+void sctp_outq_free(struct sctp_outq*);
+void sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
+int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
+int sctp_outq_is_empty(const struct sctp_outq *);
+void sctp_outq_restart(struct sctp_outq *);
+
+void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
+ enum sctp_retransmit_reason reason);
+void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
+void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
+void sctp_prsctp_prune(struct sctp_association *asoc,
+ struct sctp_sndrcvinfo *sinfo, int msg_len);
+void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
+/* Uncork and flush an outqueue. */
+static inline void sctp_outq_cork(struct sctp_outq *q)
+{
+ q->cork = 1;
+}
+
+/* SCTP skb control block.
+ * sctp_input_cb is currently used on rx and sock rx queue
+ */
+struct sctp_input_cb {
+ union {
+ struct inet_skb_parm h4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_skb_parm h6;
+#endif
+ } header;
+ struct sctp_chunk *chunk;
+ struct sctp_af *af;
+};
+#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
+
+struct sctp_output_cb {
+ struct sk_buff *last;
+};
+#define SCTP_OUTPUT_CB(__skb) ((struct sctp_output_cb *)&((__skb)->cb[0]))
+
+static inline const struct sk_buff *sctp_gso_headskb(const struct sk_buff *skb)
+{
+ const struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
+
+ return chunk->head_skb ? : skb;
+}
+
+/* These bind address data fields common between endpoints and associations */
+struct sctp_bind_addr {
+
+ /* RFC 2960 12.1 Parameters necessary for the SCTP instance
+ *
+ * SCTP Port: The local SCTP port number the endpoint is
+ * bound to.
+ */
+ __u16 port;
+
+ /* RFC 2960 12.1 Parameters necessary for the SCTP instance
+ *
+ * Address List: The list of IP addresses that this instance
+ * has bound. This information is passed to one's
+ * peer(s) in INIT and INIT ACK chunks.
+ */
+ struct list_head address_list;
+};
+
+void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port);
+void sctp_bind_addr_free(struct sctp_bind_addr *);
+int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
+ const struct sctp_bind_addr *src,
+ enum sctp_scope scope, gfp_t gfp,
+ int flags);
+int sctp_bind_addr_dup(struct sctp_bind_addr *dest,
+ const struct sctp_bind_addr *src,
+ gfp_t gfp);
+int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
+ int new_size, __u8 addr_state, gfp_t gfp);
+int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);
+int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
+ struct sctp_sock *);
+int sctp_bind_addr_conflict(struct sctp_bind_addr *, const union sctp_addr *,
+ struct sctp_sock *, struct sctp_sock *);
+int sctp_bind_addr_state(const struct sctp_bind_addr *bp,
+ const union sctp_addr *addr);
+int sctp_bind_addrs_check(struct sctp_sock *sp,
+ struct sctp_sock *sp2, int cnt2);
+union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
+ const union sctp_addr *addrs,
+ int addrcnt,
+ struct sctp_sock *opt);
+union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp,
+ int *addrs_len,
+ gfp_t gfp);
+int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
+ __u16 port, gfp_t gfp);
+
+enum sctp_scope sctp_scope(const union sctp_addr *addr);
+int sctp_in_scope(struct net *net, const union sctp_addr *addr,
+ const enum sctp_scope scope);
+int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
+int sctp_is_ep_boundall(struct sock *sk);
+
+
+/* What type of endpoint? */
+enum sctp_endpoint_type {
+ SCTP_EP_TYPE_SOCKET,
+ SCTP_EP_TYPE_ASSOCIATION,
+};
+
+/*
+ * A common base class to bridge the implmentation view of a
+ * socket (usually listening) endpoint versus an association's
+ * local endpoint.
+ * This common structure is useful for several purposes:
+ * 1) Common interface for lookup routines.
+ * a) Subfunctions work for either endpoint or association
+ * b) Single interface to lookup allows hiding the lookup lock rather
+ * than acquiring it externally.
+ * 2) Common interface for the inbound chunk handling/state machine.
+ * 3) Common object handling routines for reference counting, etc.
+ * 4) Disentangle association lookup from endpoint lookup, where we
+ * do not have to find our endpoint to find our association.
+ *
+ */
+
+struct sctp_ep_common {
+ /* Fields to help us manage our entries in the hash tables. */
+ struct hlist_node node;
+ int hashent;
+
+ /* Runtime type information. What kind of endpoint is this? */
+ enum sctp_endpoint_type type;
+
+ /* Some fields to help us manage this object.
+ * refcnt - Reference count access to this object.
+ * dead - Do not attempt to use this object.
+ */
+ refcount_t refcnt;
+ bool dead;
+
+ /* What socket does this endpoint belong to? */
+ struct sock *sk;
+
+ /* Cache netns and it won't change once set */
+ struct net *net;
+
+ /* This is where we receive inbound chunks. */
+ struct sctp_inq inqueue;
+
+ /* This substructure includes the defining parameters of the
+ * endpoint:
+ * bind_addr.port is our shared port number.
+ * bind_addr.address_list is our set of local IP addresses.
+ */
+ struct sctp_bind_addr bind_addr;
+};
+
+
+/* RFC Section 1.4 Key Terms
+ *
+ * o SCTP endpoint: The logical sender/receiver of SCTP packets. On a
+ * multi-homed host, an SCTP endpoint is represented to its peers as a
+ * combination of a set of eligible destination transport addresses to
+ * which SCTP packets can be sent and a set of eligible source
+ * transport addresses from which SCTP packets can be received.
+ * All transport addresses used by an SCTP endpoint must use the
+ * same port number, but can use multiple IP addresses. A transport
+ * address used by an SCTP endpoint must not be used by another
+ * SCTP endpoint. In other words, a transport address is unique
+ * to an SCTP endpoint.
+ *
+ * From an implementation perspective, each socket has one of these.
+ * A TCP-style socket will have exactly one association on one of
+ * these. An UDP-style socket will have multiple associations hanging
+ * off one of these.
+ */
+
+struct sctp_endpoint {
+ /* Common substructure for endpoint and association. */
+ struct sctp_ep_common base;
+
+ /* Associations: A list of current associations and mappings
+ * to the data consumers for each association. This
+ * may be in the form of a hash table or other
+ * implementation dependent structure. The data
+ * consumers may be process identification
+ * information such as file descriptors, named pipe
+ * pointer, or table pointers dependent on how SCTP
+ * is implemented.
+ */
+ /* This is really a list of struct sctp_association entries. */
+ struct list_head asocs;
+
+ /* Secret Key: A secret key used by this endpoint to compute
+ * the MAC. This SHOULD be a cryptographic quality
+ * random number with a sufficient length.
+ * Discussion in [RFC1750] can be helpful in
+ * selection of the key.
+ */
+ __u8 secret_key[SCTP_SECRET_SIZE];
+
+ /* digest: This is a digest of the sctp cookie. This field is
+ * only used on the receive path when we try to validate
+ * that the cookie has not been tampered with. We put
+ * this here so we pre-allocate this once and can re-use
+ * on every receive.
+ */
+ __u8 *digest;
+
+ /* sendbuf acct. policy. */
+ __u32 sndbuf_policy;
+
+ /* rcvbuf acct. policy. */
+ __u32 rcvbuf_policy;
+
+ /* SCTP AUTH: array of the HMACs that will be allocated
+ * we need this per association so that we don't serialize
+ */
+ struct crypto_shash **auth_hmacs;
+
+ /* SCTP-AUTH: hmacs for the endpoint encoded into parameter */
+ struct sctp_hmac_algo_param *auth_hmacs_list;
+
+ /* SCTP-AUTH: chunks to authenticate encoded into parameter */
+ struct sctp_chunks_param *auth_chunk_list;
+
+ /* SCTP-AUTH: endpoint shared keys */
+ struct list_head endpoint_shared_keys;
+ __u16 active_key_id;
+ __u8 ecn_enable:1,
+ auth_enable:1,
+ intl_enable:1,
+ prsctp_enable:1,
+ asconf_enable:1,
+ reconf_enable:1;
+
+ __u8 strreset_enable;
+
+ /* Security identifiers from incoming (INIT). These are set by
+ * security_sctp_assoc_request(). These will only be used by
+ * SCTP TCP type sockets and peeled off connections as they
+ * cause a new socket to be generated. security_sctp_sk_clone()
+ * will then plug these into the new socket.
+ */
+
+ u32 secid;
+ u32 peer_secid;
+ struct rcu_head rcu;
+};
+
+/* Recover the outter endpoint structure. */
+static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
+{
+ struct sctp_endpoint *ep;
+
+ ep = container_of(base, struct sctp_endpoint, base);
+ return ep;
+}
+
+/* These are function signatures for manipulating endpoints. */
+struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
+void sctp_endpoint_free(struct sctp_endpoint *);
+void sctp_endpoint_put(struct sctp_endpoint *);
+int sctp_endpoint_hold(struct sctp_endpoint *ep);
+void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
+struct sctp_association *sctp_endpoint_lookup_assoc(
+ const struct sctp_endpoint *ep,
+ const union sctp_addr *paddr,
+ struct sctp_transport **);
+bool sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
+ const union sctp_addr *paddr);
+struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
+ struct net *, const union sctp_addr *);
+bool sctp_has_association(struct net *net, const union sctp_addr *laddr,
+ const union sctp_addr *paddr);
+
+int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ enum sctp_cid cid, struct sctp_init_chunk *peer_init,
+ struct sctp_chunk *chunk, struct sctp_chunk **err_chunk);
+int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk,
+ const union sctp_addr *peer,
+ struct sctp_init_chunk *init, gfp_t gfp);
+__u32 sctp_generate_tag(const struct sctp_endpoint *);
+__u32 sctp_generate_tsn(const struct sctp_endpoint *);
+
+struct sctp_inithdr_host {
+ __u32 init_tag;
+ __u32 a_rwnd;
+ __u16 num_outbound_streams;
+ __u16 num_inbound_streams;
+ __u32 initial_tsn;
+};
+
+struct sctp_stream_priorities {
+ /* List of priorities scheduled */
+ struct list_head prio_sched;
+ /* List of streams scheduled */
+ struct list_head active;
+ /* The next stream in line */
+ struct sctp_stream_out_ext *next;
+ __u16 prio;
+ __u16 users;
+};
+
+struct sctp_stream_out_ext {
+ __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
+ __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+ struct list_head outq; /* chunks enqueued by this stream */
+ union {
+ struct {
+ /* Scheduled streams list */
+ struct list_head prio_list;
+ struct sctp_stream_priorities *prio_head;
+ };
+ /* Fields used by RR scheduler */
+ struct {
+ struct list_head rr_list;
+ };
+ };
+};
+
+struct sctp_stream_out {
+ union {
+ __u32 mid;
+ __u16 ssn;
+ };
+ __u32 mid_uo;
+ struct sctp_stream_out_ext *ext;
+ __u8 state;
+};
+
+struct sctp_stream_in {
+ union {
+ __u32 mid;
+ __u16 ssn;
+ };
+ __u32 mid_uo;
+ __u32 fsn;
+ __u32 fsn_uo;
+ char pd_mode;
+ char pd_mode_uo;
+};
+
+struct sctp_stream {
+ GENRADIX(struct sctp_stream_out) out;
+ GENRADIX(struct sctp_stream_in) in;
+
+ __u16 outcnt;
+ __u16 incnt;
+ /* Current stream being sent, if any */
+ struct sctp_stream_out *out_curr;
+ union {
+ /* Fields used by priority scheduler */
+ struct {
+ /* List of priorities scheduled */
+ struct list_head prio_list;
+ };
+ /* Fields used by RR scheduler */
+ struct {
+ /* List of streams scheduled */
+ struct list_head rr_list;
+ /* The next stream in line */
+ struct sctp_stream_out_ext *rr_next;
+ };
+ };
+ struct sctp_stream_interleave *si;
+};
+
+static inline struct sctp_stream_out *sctp_stream_out(
+ struct sctp_stream *stream,
+ __u16 sid)
+{
+ return genradix_ptr(&stream->out, sid);
+}
+
+static inline struct sctp_stream_in *sctp_stream_in(
+ struct sctp_stream *stream,
+ __u16 sid)
+{
+ return genradix_ptr(&stream->in, sid);
+}
+
+#define SCTP_SO(s, i) sctp_stream_out((s), (i))
+#define SCTP_SI(s, i) sctp_stream_in((s), (i))
+
+#define SCTP_STREAM_CLOSED 0x00
+#define SCTP_STREAM_OPEN 0x01
+
+static inline __u16 sctp_datachk_len(const struct sctp_stream *stream)
+{
+ return stream->si->data_chunk_len;
+}
+
+static inline __u16 sctp_datahdr_len(const struct sctp_stream *stream)
+{
+ return stream->si->data_chunk_len - sizeof(struct sctp_chunkhdr);
+}
+
+static inline __u16 sctp_ftsnchk_len(const struct sctp_stream *stream)
+{
+ return stream->si->ftsn_chunk_len;
+}
+
+static inline __u16 sctp_ftsnhdr_len(const struct sctp_stream *stream)
+{
+ return stream->si->ftsn_chunk_len - sizeof(struct sctp_chunkhdr);
+}
+
+/* SCTP_GET_ASSOC_STATS counters */
+struct sctp_priv_assoc_stats {
+ /* Maximum observed rto in the association during subsequent
+ * observations. Value is set to 0 if no RTO measurement took place
+ * The transport where the max_rto was observed is returned in
+ * obs_rto_ipaddr
+ */
+ struct sockaddr_storage obs_rto_ipaddr;
+ __u64 max_obs_rto;
+ /* Total In and Out SACKs received and sent */
+ __u64 isacks;
+ __u64 osacks;
+ /* Total In and Out packets received and sent */
+ __u64 opackets;
+ __u64 ipackets;
+ /* Total retransmitted chunks */
+ __u64 rtxchunks;
+ /* TSN received > next expected */
+ __u64 outofseqtsns;
+ /* Duplicate Chunks received */
+ __u64 idupchunks;
+ /* Gap Ack Blocks received */
+ __u64 gapcnt;
+ /* Unordered data chunks sent and received */
+ __u64 ouodchunks;
+ __u64 iuodchunks;
+ /* Ordered data chunks sent and received */
+ __u64 oodchunks;
+ __u64 iodchunks;
+ /* Control chunks sent and received */
+ __u64 octrlchunks;
+ __u64 ictrlchunks;
+};
+
+/* RFC2960
+ *
+ * 12. Recommended Transmission Control Block (TCB) Parameters
+ *
+ * This section details a recommended set of parameters that should
+ * be contained within the TCB for an implementation. This section is
+ * for illustrative purposes and should not be deemed as requirements
+ * on an implementation or as an exhaustive list of all parameters
+ * inside an SCTP TCB. Each implementation may need its own additional
+ * parameters for optimization.
+ */
+
+
+/* Here we have information about each individual association. */
+struct sctp_association {
+
+ /* A base structure common to endpoint and association.
+ * In this context, it represents the associations's view
+ * of the local endpoint of the association.
+ */
+ struct sctp_ep_common base;
+
+ /* Associations on the same socket. */
+ struct list_head asocs;
+
+ /* association id. */
+ sctp_assoc_t assoc_id;
+
+ /* This is our parent endpoint. */
+ struct sctp_endpoint *ep;
+
+ /* These are those association elements needed in the cookie. */
+ struct sctp_cookie c;
+
+ /* This is all information about our peer. */
+ struct {
+ /* transport_addr_list
+ *
+ * Peer : A list of SCTP transport addresses that the
+ * Transport : peer is bound to. This information is derived
+ * Address : from the INIT or INIT ACK and is used to
+ * List : associate an inbound packet with a given
+ * : association. Normally this information is
+ * : hashed or keyed for quick lookup and access
+ * : of the TCB.
+ * : The list is also initialized with the list
+ * : of addresses passed with the sctp_connectx()
+ * : call.
+ *
+ * It is a list of SCTP_transport's.
+ */
+ struct list_head transport_addr_list;
+
+ /* rwnd
+ *
+ * Peer Rwnd : Current calculated value of the peer's rwnd.
+ */
+ __u32 rwnd;
+
+ /* transport_count
+ *
+ * Peer : A count of the number of peer addresses
+ * Transport : in the Peer Transport Address List.
+ * Address :
+ * Count :
+ */
+ __u16 transport_count;
+
+ /* port
+ * The transport layer port number.
+ */
+ __u16 port;
+
+ /* primary_path
+ *
+ * Primary : This is the current primary destination
+ * Path : transport address of the peer endpoint. It
+ * : may also specify a source transport address
+ * : on this endpoint.
+ *
+ * All of these paths live on transport_addr_list.
+ *
+ * At the bakeoffs, we discovered that the intent of
+ * primaryPath is that it only changes when the ULP
+ * asks to have it changed. We add the activePath to
+ * designate the connection we are currently using to
+ * transmit new data and most control chunks.
+ */
+ struct sctp_transport *primary_path;
+
+ /* Cache the primary path address here, when we
+ * need a an address for msg_name.
+ */
+ union sctp_addr primary_addr;
+
+ /* active_path
+ * The path that we are currently using to
+ * transmit new data and most control chunks.
+ */
+ struct sctp_transport *active_path;
+
+ /* retran_path
+ *
+ * RFC2960 6.4 Multi-homed SCTP Endpoints
+ * ...
+ * Furthermore, when its peer is multi-homed, an
+ * endpoint SHOULD try to retransmit a chunk to an
+ * active destination transport address that is
+ * different from the last destination address to
+ * which the DATA chunk was sent.
+ */
+ struct sctp_transport *retran_path;
+
+ /* Pointer to last transport I have sent on. */
+ struct sctp_transport *last_sent_to;
+
+ /* This is the last transport I have received DATA on. */
+ struct sctp_transport *last_data_from;
+
+ /*
+ * Mapping An array of bits or bytes indicating which out of
+ * Array order TSN's have been received (relative to the
+ * Last Rcvd TSN). If no gaps exist, i.e. no out of
+ * order packets have been received, this array
+ * will be set to all zero. This structure may be
+ * in the form of a circular buffer or bit array.
+ *
+ * Last Rcvd : This is the last TSN received in
+ * TSN : sequence. This value is set initially by
+ * : taking the peer's Initial TSN, received in
+ * : the INIT or INIT ACK chunk, and subtracting
+ * : one from it.
+ *
+ * Throughout most of the specification this is called the
+ * "Cumulative TSN ACK Point". In this case, we
+ * ignore the advice in 12.2 in favour of the term
+ * used in the bulk of the text. This value is hidden
+ * in tsn_map--we get it by calling sctp_tsnmap_get_ctsn().
+ */
+ struct sctp_tsnmap tsn_map;
+
+ /* This mask is used to disable sending the ASCONF chunk
+ * with specified parameter to peer.
+ */
+ __be16 addip_disabled_mask;
+
+ /* These are capabilities which our peer advertised. */
+ __u16 ecn_capable:1, /* Can peer do ECN? */
+ ipv4_address:1, /* Peer understands IPv4 addresses? */
+ ipv6_address:1, /* Peer understands IPv6 addresses? */
+ hostname_address:1, /* Peer understands DNS addresses? */
+ asconf_capable:1, /* Does peer support ADDIP? */
+ prsctp_capable:1, /* Can peer do PR-SCTP? */
+ reconf_capable:1, /* Can peer do RE-CONFIG? */
+ intl_capable:1, /* Can peer do INTERLEAVE */
+ auth_capable:1, /* Is peer doing SCTP-AUTH? */
+ /* sack_needed:
+ * This flag indicates if the next received
+ * packet is to be responded to with a
+ * SACK. This is initialized to 0. When a packet
+ * is received sack_cnt is incremented. If this value
+ * reaches 2 or more, a SACK is sent and the
+ * value is reset to 0. Note: This is used only
+ * when no DATA chunks are received out of
+ * order. When DATA chunks are out of order,
+ * SACK's are not delayed (see Section 6).
+ */
+ sack_needed:1, /* Do we need to sack the peer? */
+ sack_generation:1,
+ zero_window_announced:1;
+
+ __u32 sack_cnt;
+
+ __u32 adaptation_ind; /* Adaptation Code point. */
+
+ struct sctp_inithdr_host i;
+ void *cookie;
+ int cookie_len;
+
+ /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.
+ * C1) ... "Peer-Serial-Number'. This value MUST be initialized to the
+ * Initial TSN Value minus 1
+ */
+ __u32 addip_serial;
+
+ /* SCTP-AUTH: We need to know pears random number, hmac list
+ * and authenticated chunk list. All that is part of the
+ * cookie and these are just pointers to those locations
+ */
+ struct sctp_random_param *peer_random;
+ struct sctp_chunks_param *peer_chunks;
+ struct sctp_hmac_algo_param *peer_hmacs;
+ } peer;
+
+ /* State : A state variable indicating what state the
+ * : association is in, i.e. COOKIE-WAIT,
+ * : COOKIE-ECHOED, ESTABLISHED, SHUTDOWN-PENDING,
+ * : SHUTDOWN-SENT, SHUTDOWN-RECEIVED, SHUTDOWN-ACK-SENT.
+ *
+ * Note: No "CLOSED" state is illustrated since if a
+ * association is "CLOSED" its TCB SHOULD be removed.
+ *
+ * In this implementation we DO have a CLOSED
+ * state which is used during initiation and shutdown.
+ *
+ * State takes values from SCTP_STATE_*.
+ */
+ enum sctp_state state;
+
+ /* Overall : The overall association error count.
+ * Error Count : [Clear this any time I get something.]
+ */
+ int overall_error_count;
+
+ /* The cookie life I award for any cookie. */
+ ktime_t cookie_life;
+
+ /* These are the association's initial, max, and min RTO values.
+ * These values will be initialized by system defaults, but can
+ * be modified via the SCTP_RTOINFO socket option.
+ */
+ unsigned long rto_initial;
+ unsigned long rto_max;
+ unsigned long rto_min;
+
+ /* Maximum number of new data packets that can be sent in a burst. */
+ int max_burst;
+
+ /* This is the max_retrans value for the association. This value will
+ * be initialized from system defaults, but can be
+ * modified by the SCTP_ASSOCINFO socket option.
+ */
+ int max_retrans;
+
+ /* This is the partially failed retrans value for the transport
+ * and will be initialized from the assocs value. This can be
+ * changed using the SCTP_PEER_ADDR_THLDS socket option
+ */
+ __u16 pf_retrans;
+ /* Used for primary path switchover. */
+ __u16 ps_retrans;
+
+ /* Maximum number of times the endpoint will retransmit INIT */
+ __u16 max_init_attempts;
+
+ /* How many times have we resent an INIT? */
+ __u16 init_retries;
+
+ /* The largest timeout or RTO value to use in attempting an INIT */
+ unsigned long max_init_timeo;
+
+ /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
+ * the destination address every heartbeat interval. This value
+ * will be inherited by all new transports.
+ */
+ unsigned long hbinterval;
+
+ /* This is the max_retrans value for new transports in the
+ * association.
+ */
+ __u16 pathmaxrxt;
+
+ __u32 flowlabel;
+ __u8 dscp;
+
+ /* Flag that path mtu update is pending */
+ __u8 pmtu_pending;
+
+ /* Association : The smallest PMTU discovered for all of the
+ * PMTU : peer's transport addresses.
+ */
+ __u32 pathmtu;
+
+ /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
+ __u32 param_flags;
+
+ __u32 sackfreq;
+ /* SACK delay timeout */
+ unsigned long sackdelay;
+
+ unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES];
+ struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES];
+
+ /* Transport to which SHUTDOWN chunk was last sent. */
+ struct sctp_transport *shutdown_last_sent_to;
+
+ /* Transport to which INIT chunk was last sent. */
+ struct sctp_transport *init_last_sent_to;
+
+ /* How many times have we resent a SHUTDOWN */
+ int shutdown_retries;
+
+ /* Next TSN : The next TSN number to be assigned to a new
+ * : DATA chunk. This is sent in the INIT or INIT
+ * : ACK chunk to the peer and incremented each
+ * : time a DATA chunk is assigned a TSN
+ * : (normally just prior to transmit or during
+ * : fragmentation).
+ */
+ __u32 next_tsn;
+
+ /*
+ * Last Rcvd : This is the last TSN received in sequence. This value
+ * TSN : is set initially by taking the peer's Initial TSN,
+ * : received in the INIT or INIT ACK chunk, and
+ * : subtracting one from it.
+ *
+ * Most of RFC 2960 refers to this as the Cumulative TSN Ack Point.
+ */
+
+ __u32 ctsn_ack_point;
+
+ /* PR-SCTP Advanced.Peer.Ack.Point */
+ __u32 adv_peer_ack_point;
+
+ /* Highest TSN that is acknowledged by incoming SACKs. */
+ __u32 highest_sacked;
+
+ /* TSN marking the fast recovery exit point */
+ __u32 fast_recovery_exit;
+
+ /* Flag to track the current fast recovery state */
+ __u8 fast_recovery;
+
+ /* The number of unacknowledged data chunks. Reported through
+ * the SCTP_STATUS sockopt.
+ */
+ __u16 unack_data;
+
+ /* The total number of data chunks that we've had to retransmit
+ * as the result of a T3 timer expiration
+ */
+ __u32 rtx_data_chunks;
+
+ /* This is the association's receive buffer space. This value is used
+ * to set a_rwnd field in an INIT or a SACK chunk.
+ */
+ __u32 rwnd;
+
+ /* This is the last advertised value of rwnd over a SACK chunk. */
+ __u32 a_rwnd;
+
+ /* Number of bytes by which the rwnd has slopped. The rwnd is allowed
+ * to slop over a maximum of the association's frag_point.
+ */
+ __u32 rwnd_over;
+
+ /* Keeps treack of rwnd pressure. This happens when we have
+ * a window, but not recevie buffer (i.e small packets). This one
+ * is releases slowly (1 PMTU at a time ).
+ */
+ __u32 rwnd_press;
+
+ /* This is the sndbuf size in use for the association.
+ * This corresponds to the sndbuf size for the association,
+ * as specified in the sk->sndbuf.
+ */
+ int sndbuf_used;
+
+ /* This is the amount of memory that this association has allocated
+ * in the receive path at any given time.
+ */
+ atomic_t rmem_alloc;
+
+ /* This is the wait queue head for send requests waiting on
+ * the association sndbuf space.
+ */
+ wait_queue_head_t wait;
+
+ /* The message size at which SCTP fragmentation will occur. */
+ __u32 frag_point;
+ __u32 user_frag;
+
+ /* Counter used to count INIT errors. */
+ int init_err_counter;
+
+ /* Count the number of INIT cycles (for doubling timeout). */
+ int init_cycle;
+
+ /* Default send parameters. */
+ __u16 default_stream;
+ __u16 default_flags;
+ __u32 default_ppid;
+ __u32 default_context;
+ __u32 default_timetolive;
+
+ /* Default receive parameters */
+ __u32 default_rcv_context;
+
+ /* Stream arrays */
+ struct sctp_stream stream;
+
+ /* All outbound chunks go through this structure. */
+ struct sctp_outq outqueue;
+
+ /* A smart pipe that will handle reordering and fragmentation,
+ * as well as handle passing events up to the ULP.
+ */
+ struct sctp_ulpq ulpq;
+
+ /* Last TSN that caused an ECNE Chunk to be sent. */
+ __u32 last_ecne_tsn;
+
+ /* Last TSN that caused a CWR Chunk to be sent. */
+ __u32 last_cwr_tsn;
+
+ /* How many duplicated TSNs have we seen? */
+ int numduptsns;
+
+ /* These are to support
+ * "SCTP Extensions for Dynamic Reconfiguration of IP Addresses
+ * and Enforcement of Flow and Message Limits"
+ * <draft-ietf-tsvwg-addip-sctp-02.txt>
+ * or "ADDIP" for short.
+ */
+
+
+
+ /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
+ *
+ * R1) One and only one ASCONF Chunk MAY be in transit and
+ * unacknowledged at any one time. If a sender, after sending
+ * an ASCONF chunk, decides it needs to transfer another
+ * ASCONF Chunk, it MUST wait until the ASCONF-ACK Chunk
+ * returns from the previous ASCONF Chunk before sending a
+ * subsequent ASCONF. Note this restriction binds each side,
+ * so at any time two ASCONF may be in-transit on any given
+ * association (one sent from each endpoint).
+ *
+ * [This is our one-and-only-one ASCONF in flight. If we do
+ * not have an ASCONF in flight, this is NULL.]
+ */
+ struct sctp_chunk *addip_last_asconf;
+
+ /* ADDIP Section 5.2 Upon reception of an ASCONF Chunk.
+ *
+ * This is needed to implement itmes E1 - E4 of the updated
+ * spec. Here is the justification:
+ *
+ * Since the peer may bundle multiple ASCONF chunks toward us,
+ * we now need the ability to cache multiple ACKs. The section
+ * describes in detail how they are cached and cleaned up.
+ */
+ struct list_head asconf_ack_list;
+
+ /* These ASCONF chunks are waiting to be sent.
+ *
+ * These chunaks can't be pushed to outqueue until receiving
+ * ASCONF_ACK for the previous ASCONF indicated by
+ * addip_last_asconf, so as to guarantee that only one ASCONF
+ * is in flight at any time.
+ *
+ * ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
+ *
+ * In defining the ASCONF Chunk transfer procedures, it is
+ * essential that these transfers MUST NOT cause congestion
+ * within the network. To achieve this, we place these
+ * restrictions on the transfer of ASCONF Chunks:
+ *
+ * R1) One and only one ASCONF Chunk MAY be in transit and
+ * unacknowledged at any one time. If a sender, after sending
+ * an ASCONF chunk, decides it needs to transfer another
+ * ASCONF Chunk, it MUST wait until the ASCONF-ACK Chunk
+ * returns from the previous ASCONF Chunk before sending a
+ * subsequent ASCONF. Note this restriction binds each side,
+ * so at any time two ASCONF may be in-transit on any given
+ * association (one sent from each endpoint).
+ *
+ *
+ * [I really think this is EXACTLY the sort of intelligence
+ * which already resides in sctp_outq. Please move this
+ * queue and its supporting logic down there. --piggy]
+ */
+ struct list_head addip_chunk_list;
+
+ /* ADDIP Section 4.1 ASCONF Chunk Procedures
+ *
+ * A2) A serial number should be assigned to the Chunk. The
+ * serial number SHOULD be a monotonically increasing
+ * number. The serial number SHOULD be initialized at
+ * the start of the association to the same value as the
+ * Initial TSN and every time a new ASCONF chunk is created
+ * it is incremented by one after assigning the serial number
+ * to the newly created chunk.
+ *
+ * ADDIP
+ * 3.1.1 Address/Stream Configuration Change Chunk (ASCONF)
+ *
+ * Serial Number : 32 bits (unsigned integer)
+ *
+ * This value represents a Serial Number for the ASCONF
+ * Chunk. The valid range of Serial Number is from 0 to
+ * 4294967295 (2^32 - 1). Serial Numbers wrap back to 0
+ * after reaching 4294967295.
+ */
+ __u32 addip_serial;
+ int src_out_of_asoc_ok;
+ union sctp_addr *asconf_addr_del_pending;
+ struct sctp_transport *new_transport;
+
+ /* SCTP AUTH: list of the endpoint shared keys. These
+ * keys are provided out of band by the user applicaton
+ * and can't change during the lifetime of the association
+ */
+ struct list_head endpoint_shared_keys;
+
+ /* SCTP AUTH:
+ * The current generated assocaition shared key (secret)
+ */
+ struct sctp_auth_bytes *asoc_shared_key;
+ struct sctp_shared_key *shkey;
+
+ /* SCTP AUTH: hmac id of the first peer requested algorithm
+ * that we support.
+ */
+ __u16 default_hmac_id;
+
+ __u16 active_key_id;
+
+ __u8 need_ecne:1, /* Need to send an ECNE Chunk? */
+ temp:1, /* Is it a temporary association? */
+ pf_expose:2, /* Expose pf state? */
+ force_delay:1;
+
+ __u8 strreset_enable;
+ __u8 strreset_outstanding; /* request param count on the fly */
+
+ __u32 strreset_outseq; /* Update after receiving response */
+ __u32 strreset_inseq; /* Update after receiving request */
+ __u32 strreset_result[2]; /* save the results of last 2 responses */
+
+ struct sctp_chunk *strreset_chunk; /* save request chunk */
+
+ struct sctp_priv_assoc_stats stats;
+
+ int sent_cnt_removable;
+
+ __u16 subscribe;
+
+ __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
+ __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+
+ struct rcu_head rcu;
+};
+
+
+/* An eyecatcher for determining if we are really looking at an
+ * association data structure.
+ */
+enum {
+ SCTP_ASSOC_EYECATCHER = 0xa550c123,
+};
+
+/* Recover the outter association structure. */
+static inline struct sctp_association *sctp_assoc(struct sctp_ep_common *base)
+{
+ struct sctp_association *asoc;
+
+ asoc = container_of(base, struct sctp_association, base);
+ return asoc;
+}
+
+/* These are function signatures for manipulating associations. */
+
+
+struct sctp_association *
+sctp_association_new(const struct sctp_endpoint *ep, const struct sock *sk,
+ enum sctp_scope scope, gfp_t gfp);
+void sctp_association_free(struct sctp_association *);
+void sctp_association_put(struct sctp_association *);
+void sctp_association_hold(struct sctp_association *);
+
+struct sctp_transport *sctp_assoc_choose_alter_transport(
+ struct sctp_association *, struct sctp_transport *);
+void sctp_assoc_update_retran_path(struct sctp_association *);
+struct sctp_transport *sctp_assoc_lookup_paddr(const struct sctp_association *,
+ const union sctp_addr *);
+int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
+ const union sctp_addr *laddr);
+struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *,
+ const union sctp_addr *address,
+ const gfp_t gfp,
+ const int peer_state);
+void sctp_assoc_del_peer(struct sctp_association *asoc,
+ const union sctp_addr *addr);
+void sctp_assoc_rm_peer(struct sctp_association *asoc,
+ struct sctp_transport *peer);
+void sctp_assoc_control_transport(struct sctp_association *asoc,
+ struct sctp_transport *transport,
+ enum sctp_transport_cmd command,
+ sctp_sn_error_t error);
+struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32);
+void sctp_assoc_migrate(struct sctp_association *, struct sock *);
+int sctp_assoc_update(struct sctp_association *old,
+ struct sctp_association *new);
+
+__u32 sctp_association_get_next_tsn(struct sctp_association *);
+
+void sctp_assoc_update_frag_point(struct sctp_association *asoc);
+void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu);
+void sctp_assoc_sync_pmtu(struct sctp_association *asoc);
+void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
+void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
+void sctp_assoc_set_primary(struct sctp_association *,
+ struct sctp_transport *);
+void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
+ struct sctp_transport *);
+int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
+ enum sctp_scope scope, gfp_t gfp);
+int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *,
+ struct sctp_cookie*,
+ gfp_t gfp);
+int sctp_assoc_set_id(struct sctp_association *, gfp_t);
+void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc);
+struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
+ const struct sctp_association *asoc,
+ __be32 serial);
+void sctp_asconf_queue_teardown(struct sctp_association *asoc);
+
+int sctp_cmp_addr_exact(const union sctp_addr *ss1,
+ const union sctp_addr *ss2);
+struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc);
+
+/* A convenience structure to parse out SCTP specific CMSGs. */
+struct sctp_cmsgs {
+ struct sctp_initmsg *init;
+ struct sctp_sndrcvinfo *srinfo;
+ struct sctp_sndinfo *sinfo;
+ struct sctp_prinfo *prinfo;
+ struct sctp_authinfo *authinfo;
+ struct msghdr *addrs_msg;
+};
+
+/* Structure for tracking memory objects */
+struct sctp_dbg_objcnt_entry {
+ char *label;
+ atomic_t *counter;
+};
+
+#endif /* __sctp_structs_h__ */
diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h
new file mode 100644
index 000000000..616ae0d49
--- /dev/null
+++ b/include/net/sctp/tsnmap.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These are the definitions needed for the tsnmap type. The tsnmap is used
+ * to track out of order TSNs received.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+#include <net/sctp/constants.h>
+
+#ifndef __sctp_tsnmap_h__
+#define __sctp_tsnmap_h__
+
+/* RFC 2960 12.2 Parameters necessary per association (i.e. the TCB)
+ * Mapping An array of bits or bytes indicating which out of
+ * Array order TSN's have been received (relative to the
+ * Last Rcvd TSN). If no gaps exist, i.e. no out of
+ * order packets have been received, this array
+ * will be set to all zero. This structure may be
+ * in the form of a circular buffer or bit array.
+ */
+struct sctp_tsnmap {
+ /* This array counts the number of chunks with each TSN.
+ * It points at one of the two buffers with which we will
+ * ping-pong between.
+ */
+ unsigned long *tsn_map;
+
+ /* This is the TSN at tsn_map[0]. */
+ __u32 base_tsn;
+
+ /* Last Rcvd : This is the last TSN received in
+ * TSN : sequence. This value is set initially by
+ * : taking the peer's Initial TSN, received in
+ * : the INIT or INIT ACK chunk, and subtracting
+ * : one from it.
+ *
+ * Throughout most of the specification this is called the
+ * "Cumulative TSN ACK Point". In this case, we
+ * ignore the advice in 12.2 in favour of the term
+ * used in the bulk of the text.
+ */
+ __u32 cumulative_tsn_ack_point;
+
+ /* This is the highest TSN we've marked. */
+ __u32 max_tsn_seen;
+
+ /* This is the minimum number of TSNs we can track. This corresponds
+ * to the size of tsn_map. Note: the overflow_map allows us to
+ * potentially track more than this quantity.
+ */
+ __u16 len;
+
+ /* Data chunks pending receipt. used by SCTP_STATUS sockopt */
+ __u16 pending_data;
+
+ /* Record duplicate TSNs here. We clear this after
+ * every SACK. Store up to SCTP_MAX_DUP_TSNS worth of
+ * information.
+ */
+ __u16 num_dup_tsns;
+ __be32 dup_tsns[SCTP_MAX_DUP_TSNS];
+};
+
+struct sctp_tsnmap_iter {
+ __u32 start;
+};
+
+/* Initialize a block of memory as a tsnmap. */
+struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *, __u16 len,
+ __u32 initial_tsn, gfp_t gfp);
+
+void sctp_tsnmap_free(struct sctp_tsnmap *map);
+
+/* Test the tracking state of this TSN.
+ * Returns:
+ * 0 if the TSN has not yet been seen
+ * >0 if the TSN has been seen (duplicate)
+ * <0 if the TSN is invalid (too large to track)
+ */
+int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
+
+/* Mark this TSN as seen. */
+int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn,
+ struct sctp_transport *trans);
+
+/* Mark this TSN and all lower as seen. */
+void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
+
+/* Retrieve the Cumulative TSN ACK Point. */
+static inline __u32 sctp_tsnmap_get_ctsn(const struct sctp_tsnmap *map)
+{
+ return map->cumulative_tsn_ack_point;
+}
+
+/* Retrieve the highest TSN we've seen. */
+static inline __u32 sctp_tsnmap_get_max_tsn_seen(const struct sctp_tsnmap *map)
+{
+ return map->max_tsn_seen;
+}
+
+/* How many duplicate TSNs are stored? */
+static inline __u16 sctp_tsnmap_num_dups(struct sctp_tsnmap *map)
+{
+ return map->num_dup_tsns;
+}
+
+/* Return pointer to duplicate tsn array as needed by SACK. */
+static inline __be32 *sctp_tsnmap_get_dups(struct sctp_tsnmap *map)
+{
+ map->num_dup_tsns = 0;
+ return map->dup_tsns;
+}
+
+/* How many gap ack blocks do we have recorded? */
+__u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
+ struct sctp_gap_ack_block *gabs);
+
+/* Refresh the count on pending data. */
+__u16 sctp_tsnmap_pending(struct sctp_tsnmap *map);
+
+/* Is there a gap in the TSN map? */
+static inline int sctp_tsnmap_has_gap(const struct sctp_tsnmap *map)
+{
+ return map->cumulative_tsn_ack_point != map->max_tsn_seen;
+}
+
+/* Mark a duplicate TSN. Note: limit the storage of duplicate TSN
+ * information.
+ */
+static inline void sctp_tsnmap_mark_dup(struct sctp_tsnmap *map, __u32 tsn)
+{
+ if (map->num_dup_tsns < SCTP_MAX_DUP_TSNS)
+ map->dup_tsns[map->num_dup_tsns++] = htonl(tsn);
+}
+
+/* Renege a TSN that was seen. */
+void sctp_tsnmap_renege(struct sctp_tsnmap *, __u32 tsn);
+
+/* Is there a gap in the TSN map? */
+int sctp_tsnmap_has_gap(const struct sctp_tsnmap *);
+
+#endif /* __sctp_tsnmap_h__ */
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
new file mode 100644
index 000000000..994e984ee
--- /dev/null
+++ b/include/net/sctp/ulpevent.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * These are the definitions needed for the sctp_ulpevent type. The
+ * sctp_ulpevent type is used to carry information from the state machine
+ * upwards to the ULP.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#ifndef __sctp_ulpevent_h__
+#define __sctp_ulpevent_h__
+
+/* A structure to carry information to the ULP (e.g. Sockets API) */
+/* Warning: This sits inside an skb.cb[] area. Be very careful of
+ * growing this structure as it is at the maximum limit now.
+ *
+ * sctp_ulpevent is saved in sk->cb(48 bytes), whose last 4 bytes
+ * have been taken by sock_skb_cb, So here it has to use 'packed'
+ * to make sctp_ulpevent fit into the rest 44 bytes.
+ */
+struct sctp_ulpevent {
+ struct sctp_association *asoc;
+ struct sctp_chunk *chunk;
+ unsigned int rmem_len;
+ union {
+ __u32 mid;
+ __u16 ssn;
+ };
+ union {
+ __u32 ppid;
+ __u32 fsn;
+ };
+ __u32 tsn;
+ __u32 cumtsn;
+ __u16 stream;
+ __u16 flags;
+ __u16 msg_flags;
+} __packed;
+
+/* Retrieve the skb this event sits inside of. */
+static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev)
+{
+ return container_of((void *)ev, struct sk_buff, cb);
+}
+
+/* Retrieve & cast the event sitting inside the skb. */
+static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
+{
+ return (struct sctp_ulpevent *)skb->cb;
+}
+
+void sctp_ulpevent_free(struct sctp_ulpevent *);
+int sctp_ulpevent_is_notification(const struct sctp_ulpevent *);
+unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list);
+
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
+ const struct sctp_association *asoc,
+ __u16 flags,
+ __u16 state,
+ __u16 error,
+ __u16 outbound,
+ __u16 inbound,
+ struct sctp_chunk *chunk,
+ gfp_t gfp);
+
+void sctp_ulpevent_notify_peer_addr_change(struct sctp_transport *transport,
+ int state, int error);
+
+struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ __u16 flags,
+ gfp_t gfp);
+struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ __u16 flags,
+ __u32 error,
+ gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_send_failed_event(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ __u16 flags,
+ __u32 error,
+ gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
+ const struct sctp_association *asoc,
+ __u16 flags,
+ gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
+ const struct sctp_association *asoc,
+ __u32 indication, __u32 sid, __u32 seq,
+ __u32 flags, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
+ const struct sctp_association *asoc, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_authkey(
+ const struct sctp_association *asoc, __u16 key_id,
+ __u32 indication, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
+ const struct sctp_association *asoc, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
+ const struct sctp_association *asoc, __u16 flags,
+ __u16 stream_num, __be16 *stream_list, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
+ const struct sctp_association *asoc, __u16 flags,
+ __u32 local_tsn, __u32 remote_tsn, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
+ const struct sctp_association *asoc, __u16 flags,
+ __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_make_reassembled_event(
+ struct net *net, struct sk_buff_head *queue,
+ struct sk_buff *f_frag, struct sk_buff *l_frag);
+
+void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
+ struct msghdr *);
+void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
+ struct msghdr *);
+void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
+ struct msghdr *, struct sock *sk);
+
+__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
+
+static inline void sctp_ulpevent_type_set(__u16 *subscribe,
+ __u16 sn_type, __u8 on)
+{
+ if (sn_type > SCTP_SN_TYPE_MAX)
+ return;
+
+ if (on)
+ *subscribe |= (1 << (sn_type - SCTP_SN_TYPE_BASE));
+ else
+ *subscribe &= ~(1 << (sn_type - SCTP_SN_TYPE_BASE));
+}
+
+/* Is this event type enabled? */
+static inline bool sctp_ulpevent_type_enabled(__u16 subscribe, __u16 sn_type)
+{
+ if (sn_type > SCTP_SN_TYPE_MAX)
+ return false;
+
+ return subscribe & (1 << (sn_type - SCTP_SN_TYPE_BASE));
+}
+
+/* Given an event subscription, is this event enabled? */
+static inline bool sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
+ __u16 subscribe)
+{
+ __u16 sn_type;
+
+ if (!sctp_ulpevent_is_notification(event))
+ return true;
+
+ sn_type = sctp_ulpevent_get_notification_type(event);
+
+ return sctp_ulpevent_type_enabled(subscribe, sn_type);
+}
+
+#endif /* __sctp_ulpevent_h__ */
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
new file mode 100644
index 000000000..0eaf8650e
--- /dev/null
+++ b/include/net/sctp/ulpqueue.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * These are the definitions needed for the sctp_ulpq type. The
+ * sctp_ulpq is the interface between the Upper Layer Protocol, or ULP,
+ * and the core SCTP state machine. This is the component which handles
+ * reassembly and ordering.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Sridhar Samudrala <sri@us.ibm.com>
+ */
+
+#ifndef __sctp_ulpqueue_h__
+#define __sctp_ulpqueue_h__
+
+/* A structure to carry information to the ULP (e.g. Sockets API) */
+struct sctp_ulpq {
+ char pd_mode;
+ struct sctp_association *asoc;
+ struct sk_buff_head reasm;
+ struct sk_buff_head reasm_uo;
+ struct sk_buff_head lobby;
+};
+
+/* Prototypes. */
+struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *,
+ struct sctp_association *);
+void sctp_ulpq_flush(struct sctp_ulpq *ulpq);
+void sctp_ulpq_free(struct sctp_ulpq *);
+
+/* Add a new DATA chunk for processing. */
+int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
+
+/* Add a new event for propagation to the ULP. */
+int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sk_buff_head *skb_list);
+
+/* Renege previously received chunks. */
+void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
+
+/* Perform partial delivery. */
+void sctp_ulpq_partial_delivery(struct sctp_ulpq *, gfp_t);
+
+/* Abort the partial delivery. */
+void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t);
+
+/* Clear the partial data delivery condition on this socket. */
+int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
+
+/* Skip over an SSN. */
+void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
+
+void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
+
+__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
+ struct sk_buff_head *list, __u16 needed);
+
+#endif /* __sctp_ulpqueue_h__ */