diff options
Diffstat (limited to 'include/linux/tcp.h')
-rw-r--r-- | include/linux/tcp.h | 564 |
1 files changed, 564 insertions, 0 deletions
diff --git a/include/linux/tcp.h b/include/linux/tcp.h new file mode 100644 index 000000000..9cd289ad3 --- /dev/null +++ b/include/linux/tcp.h @@ -0,0 +1,564 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the TCP protocol. + * + * Version: @(#)tcp.h 1.0.2 04/28/93 + * + * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + */ +#ifndef _LINUX_TCP_H +#define _LINUX_TCP_H + + +#include <linux/skbuff.h> +#include <linux/win_minmax.h> +#include <net/sock.h> +#include <net/inet_connection_sock.h> +#include <net/inet_timewait_sock.h> +#include <uapi/linux/tcp.h> + +static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) +{ + return (struct tcphdr *)skb_transport_header(skb); +} + +static inline unsigned int __tcp_hdrlen(const struct tcphdr *th) +{ + return th->doff * 4; +} + +static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) +{ + return __tcp_hdrlen(tcp_hdr(skb)); +} + +static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb) +{ + return (struct tcphdr *)skb_inner_transport_header(skb); +} + +static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb) +{ + return inner_tcp_hdr(skb)->doff * 4; +} + +/** + * skb_tcp_all_headers - Returns size of all headers for a TCP packet + * @skb: buffer + * + * Used in TX path, for a packet known to be a TCP one. + * + * if (skb_is_gso(skb)) { + * int hlen = skb_tcp_all_headers(skb); + * ... + */ +static inline int skb_tcp_all_headers(const struct sk_buff *skb) +{ + return skb_transport_offset(skb) + tcp_hdrlen(skb); +} + +/** + * skb_inner_tcp_all_headers - Returns size of all headers for an encap TCP packet + * @skb: buffer + * + * Used in TX path, for a packet known to be a TCP one. + * + * if (skb_is_gso(skb) && skb->encapsulation) { + * int hlen = skb_inner_tcp_all_headers(skb); + * ... + */ +static inline int skb_inner_tcp_all_headers(const struct sk_buff *skb) +{ + return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); +} + +static inline unsigned int tcp_optlen(const struct sk_buff *skb) +{ + return (tcp_hdr(skb)->doff - 5) * 4; +} + +/* TCP Fast Open */ +#define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */ +#define TCP_FASTOPEN_COOKIE_MAX 16 /* Max Fast Open Cookie size in bytes */ +#define TCP_FASTOPEN_COOKIE_SIZE 8 /* the size employed by this impl. */ + +/* TCP Fast Open Cookie as stored in memory */ +struct tcp_fastopen_cookie { + __le64 val[DIV_ROUND_UP(TCP_FASTOPEN_COOKIE_MAX, sizeof(u64))]; + s8 len; + bool exp; /* In RFC6994 experimental option format */ +}; + +/* This defines a selective acknowledgement block. */ +struct tcp_sack_block_wire { + __be32 start_seq; + __be32 end_seq; +}; + +struct tcp_sack_block { + u32 start_seq; + u32 end_seq; +}; + +/*These are used to set the sack_ok field in struct tcp_options_received */ +#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */ +#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/ + +struct tcp_options_received { +/* PAWS/RTTM data */ + int ts_recent_stamp;/* Time we stored ts_recent (for aging) */ + u32 ts_recent; /* Time stamp to echo next */ + u32 rcv_tsval; /* Time stamp value */ + u32 rcv_tsecr; /* Time stamp echo reply */ + u16 saw_tstamp : 1, /* Saw TIMESTAMP on last packet */ + tstamp_ok : 1, /* TIMESTAMP seen on SYN packet */ + dsack : 1, /* D-SACK is scheduled */ + wscale_ok : 1, /* Wscale seen on SYN packet */ + sack_ok : 3, /* SACK seen on SYN packet */ + smc_ok : 1, /* SMC seen on SYN packet */ + snd_wscale : 4, /* Window scaling received from sender */ + rcv_wscale : 4; /* Window scaling to send to receiver */ + u8 saw_unknown:1, /* Received unknown option */ + unused:7; + u8 num_sacks; /* Number of SACK blocks */ + u16 user_mss; /* mss requested by user in ioctl */ + u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ +}; + +static inline void tcp_clear_options(struct tcp_options_received *rx_opt) +{ + rx_opt->tstamp_ok = rx_opt->sack_ok = 0; + rx_opt->wscale_ok = rx_opt->snd_wscale = 0; +#if IS_ENABLED(CONFIG_SMC) + rx_opt->smc_ok = 0; +#endif +} + +/* This is the max number of SACKS that we'll generate and process. It's safe + * to increase this, although since: + * size = TCPOLEN_SACK_BASE_ALIGNED (4) + n * TCPOLEN_SACK_PERBLOCK (8) + * only four options will fit in a standard TCP header */ +#define TCP_NUM_SACKS 4 + +struct tcp_request_sock_ops; + +struct tcp_request_sock { + struct inet_request_sock req; + const struct tcp_request_sock_ops *af_specific; + u64 snt_synack; /* first SYNACK sent time */ + bool tfo_listener; + bool is_mptcp; +#if IS_ENABLED(CONFIG_MPTCP) + bool drop_req; +#endif + u32 txhash; + u32 rcv_isn; + u32 snt_isn; + u32 ts_off; + u32 last_oow_ack_time; /* last SYNACK */ + u32 rcv_nxt; /* the ack # by SYNACK. For + * FastOpen it's the seq# + * after data-in-SYN. + */ + u8 syn_tos; +}; + +static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) +{ + return (struct tcp_request_sock *)req; +} + +struct tcp_sock { + /* inet_connection_sock has to be the first member of tcp_sock */ + struct inet_connection_sock inet_conn; + u16 tcp_header_len; /* Bytes of tcp header to send */ + u16 gso_segs; /* Max number of segs per GSO packet */ + +/* + * Header prediction flags + * 0x5?10 << 16 + snd_wnd in net byte order + */ + __be32 pred_flags; + +/* + * RFC793 variables by their proper names. This means you can + * read the code and the spec side by side (and laugh ...) + * See RFC793 and RFC1122. The RFC writes these in capitals. + */ + u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived + * sum(delta(rcv_nxt)), or how many bytes + * were acked. + */ + u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn + * total number of segments in. + */ + u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn + * total number of data segments in. + */ + u32 rcv_nxt; /* What we want to receive next */ + u32 copied_seq; /* Head of yet unread data */ + u32 rcv_wup; /* rcv_nxt on last window update sent */ + u32 snd_nxt; /* Next sequence we send */ + u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut + * The total number of segments sent. + */ + u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut + * total number of data segments sent. + */ + u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut + * total number of data bytes sent. + */ + u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked + * sum(delta(snd_una)), or how many bytes + * were acked. + */ + u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups + * total number of DSACK blocks received + */ + u32 snd_una; /* First byte we want an ack for */ + u32 snd_sml; /* Last byte of the most recently transmitted small packet */ + u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ + u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ + u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ + u32 compressed_ack_rcv_nxt; + + u32 tsoffset; /* timestamp offset */ + + struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ + struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */ + + u32 snd_wl1; /* Sequence for window update */ + u32 snd_wnd; /* The window we expect to receive */ + u32 max_window; /* Maximal window ever seen from peer */ + u32 mss_cache; /* Cached effective mss, not including SACKS */ + + u32 window_clamp; /* Maximal window to advertise */ + u32 rcv_ssthresh; /* Current window clamp */ + + /* Information of the most recently (s)acked skb */ + struct tcp_rack { + u64 mstamp; /* (Re)sent time of the skb */ + u32 rtt_us; /* Associated RTT */ + u32 end_seq; /* Ending TCP sequence of the skb */ + u32 last_delivered; /* tp->delivered at last reo_wnd adj */ + u8 reo_wnd_steps; /* Allowed reordering window */ +#define TCP_RACK_RECOVERY_THRESH 16 + u8 reo_wnd_persist:5, /* No. of recovery since last adj */ + dsack_seen:1, /* Whether DSACK seen after last adj */ + advanced:1; /* mstamp advanced since last lost marking */ + } rack; + u16 advmss; /* Advertised MSS */ + u8 compressed_ack; + u8 dup_ack_counter:2, + tlp_retrans:1, /* TLP is a retransmission */ + unused:5; + u32 chrono_start; /* Start time in jiffies of a TCP chrono */ + u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ + u8 chrono_type:2, /* current chronograph type */ + rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ + fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ + fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */ + is_sack_reneg:1, /* in recovery from loss with SACK reneg? */ + fastopen_client_fail:2; /* reason why fastopen failed */ + u8 nonagle : 4,/* Disable Nagle algorithm? */ + thin_lto : 1,/* Use linear timeouts for thin streams */ + recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */ + repair : 1, + frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */ + u8 repair_queue; + u8 save_syn:2, /* Save headers of SYN packet */ + syn_data:1, /* SYN includes data */ + syn_fastopen:1, /* SYN includes Fast Open option */ + syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ + syn_fastopen_ch:1, /* Active TFO re-enabling probe */ + syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ + is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ + u32 tlp_high_seq; /* snd_nxt at the time of TLP */ + + u32 tcp_tx_delay; /* delay (in usec) added to TX packets */ + u64 tcp_wstamp_ns; /* departure time for next sent data packet */ + u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ + +/* RTT measurement */ + u64 tcp_mstamp; /* most recent packet received/sent */ + u32 srtt_us; /* smoothed round trip time << 3 in usecs */ + u32 mdev_us; /* medium deviation */ + u32 mdev_max_us; /* maximal mdev for the last rtt period */ + u32 rttvar_us; /* smoothed mdev_max */ + u32 rtt_seq; /* sequence number to update rttvar */ + struct minmax rtt_min; + + u32 packets_out; /* Packets which are "in flight" */ + u32 retrans_out; /* Retransmitted packets out */ + u32 max_packets_out; /* max packets_out in last window */ + u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */ + + u16 urg_data; /* Saved octet of OOB data and control flags */ + u8 ecn_flags; /* ECN status bits. */ + u8 keepalive_probes; /* num of allowed keep alive probes */ + u32 reordering; /* Packet reordering metric. */ + u32 reord_seen; /* number of data packet reordering events */ + u32 snd_up; /* Urgent pointer */ + +/* + * Options received (usually on last packet, some only on SYN packets). + */ + struct tcp_options_received rx_opt; + +/* + * Slow start and congestion control (see also Nagle, and Karn & Partridge) + */ + u32 snd_ssthresh; /* Slow start size threshold */ + u32 snd_cwnd; /* Sending congestion window */ + u32 snd_cwnd_cnt; /* Linear increase counter */ + u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */ + u32 snd_cwnd_used; + u32 snd_cwnd_stamp; + u32 prior_cwnd; /* cwnd right before starting loss recovery */ + u32 prr_delivered; /* Number of newly delivered packets to + * receiver in Recovery. */ + u32 prr_out; /* Total number of pkts sent during Recovery. */ + u32 delivered; /* Total data packets delivered incl. rexmits */ + u32 delivered_ce; /* Like the above but only ECE marked packets */ + u32 lost; /* Total data packets lost incl. rexmits */ + u32 app_limited; /* limited until "delivered" reaches this val */ + u64 first_tx_mstamp; /* start of window send phase */ + u64 delivered_mstamp; /* time we reached "delivered" */ + u32 rate_delivered; /* saved rate sample: packets delivered */ + u32 rate_interval_us; /* saved rate sample: time elapsed */ + + u32 rcv_wnd; /* Current receiver window */ + u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ + u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */ + u32 pushed_seq; /* Last pushed seq, required to talk to windows */ + u32 lost_out; /* Lost packets */ + u32 sacked_out; /* SACK'd packets */ + + struct hrtimer pacing_timer; + struct hrtimer compressed_ack_timer; + + /* from STCP, retrans queue hinting */ + struct sk_buff* lost_skb_hint; + struct sk_buff *retransmit_skb_hint; + + /* OOO segments go in this rbtree. Socket lock must be held. */ + struct rb_root out_of_order_queue; + struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */ + + /* SACKs data, these 2 need to be together (see tcp_options_write) */ + struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ + struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ + + struct tcp_sack_block recv_sack_cache[4]; + + struct sk_buff *highest_sack; /* skb just after the highest + * skb with SACKed bit set + * (validity guaranteed only if + * sacked_out > 0) + */ + + int lost_cnt_hint; + + u32 prior_ssthresh; /* ssthresh saved at recovery start */ + u32 high_seq; /* snd_nxt at onset of congestion */ + + u32 retrans_stamp; /* Timestamp of the last retransmit, + * also used in SYN-SENT to remember stamp of + * the first SYN. */ + u32 undo_marker; /* snd_una upon a new recovery episode. */ + int undo_retrans; /* number of undoable retransmissions. */ + u64 bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans + * Total data bytes retransmitted + */ + u32 total_retrans; /* Total retransmits for entire connection */ + + u32 urg_seq; /* Seq of received urgent pointer */ + unsigned int keepalive_time; /* time before keep alive takes place */ + unsigned int keepalive_intvl; /* time interval between keep alive probes */ + + int linger2; + + +/* Sock_ops bpf program related variables */ +#ifdef CONFIG_BPF + u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs + * values defined in uapi/linux/tcp.h + */ + u8 bpf_chg_cc_inprogress:1; /* In the middle of + * bpf_setsockopt(TCP_CONGESTION), + * it is to avoid the bpf_tcp_cc->init() + * to recur itself by calling + * bpf_setsockopt(TCP_CONGESTION, "itself"). + */ +#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) +#else +#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 +#endif + + u16 timeout_rehash; /* Timeout-triggered rehash attempts */ + + u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */ + +/* Receiver side RTT estimation */ + u32 rcv_rtt_last_tsecr; + struct { + u32 rtt_us; + u32 seq; + u64 time; + } rcv_rtt_est; + +/* Receiver queue space */ + struct { + u32 space; + u32 seq; + u64 time; + } rcvq_space; + +/* TCP-specific MTU probe information. */ + struct { + u32 probe_seq_start; + u32 probe_seq_end; + } mtu_probe; + u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG + * while socket was owned by user. + */ +#if IS_ENABLED(CONFIG_MPTCP) + bool is_mptcp; +#endif +#if IS_ENABLED(CONFIG_SMC) + bool (*smc_hs_congested)(const struct sock *sk); + bool syn_smc; /* SYN includes SMC */ +#endif + +#ifdef CONFIG_TCP_MD5SIG +/* TCP AF-Specific parts; only used by MD5 Signature support so far */ + const struct tcp_sock_af_ops *af_specific; + +/* TCP MD5 Signature Option information */ + struct tcp_md5sig_info __rcu *md5sig_info; +#endif + +/* TCP fastopen related information */ + struct tcp_fastopen_request *fastopen_req; + /* fastopen_rsk points to request_sock that resulted in this big + * socket. Used to retransmit SYNACKs etc. + */ + struct request_sock __rcu *fastopen_rsk; + struct saved_syn *saved_syn; +}; + +enum tsq_enum { + TSQ_THROTTLED, + TSQ_QUEUED, + TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */ + TCP_WRITE_TIMER_DEFERRED, /* tcp_write_timer() found socket was owned */ + TCP_DELACK_TIMER_DEFERRED, /* tcp_delack_timer() found socket was owned */ + TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call + * tcp_v{4|6}_mtu_reduced() + */ +}; + +enum tsq_flags { + TSQF_THROTTLED = (1UL << TSQ_THROTTLED), + TSQF_QUEUED = (1UL << TSQ_QUEUED), + TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED), + TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED), + TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED), + TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED), +}; + +static inline struct tcp_sock *tcp_sk(const struct sock *sk) +{ + return (struct tcp_sock *)sk; +} + +struct tcp_timewait_sock { + struct inet_timewait_sock tw_sk; +#define tw_rcv_nxt tw_sk.__tw_common.skc_tw_rcv_nxt +#define tw_snd_nxt tw_sk.__tw_common.skc_tw_snd_nxt + u32 tw_rcv_wnd; + u32 tw_ts_offset; + u32 tw_ts_recent; + + /* The time we sent the last out-of-window ACK: */ + u32 tw_last_oow_ack_time; + + int tw_ts_recent_stamp; + u32 tw_tx_delay; +#ifdef CONFIG_TCP_MD5SIG + struct tcp_md5sig_key *tw_md5_key; +#endif +}; + +static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) +{ + return (struct tcp_timewait_sock *)sk; +} + +static inline bool tcp_passive_fastopen(const struct sock *sk) +{ + return sk->sk_state == TCP_SYN_RECV && + rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL; +} + +static inline void fastopen_queue_tune(struct sock *sk, int backlog) +{ + struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; + int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn); + + WRITE_ONCE(queue->fastopenq.max_qlen, min_t(unsigned int, backlog, somaxconn)); +} + +static inline void tcp_move_syn(struct tcp_sock *tp, + struct request_sock *req) +{ + tp->saved_syn = req->saved_syn; + req->saved_syn = NULL; +} + +static inline void tcp_saved_syn_free(struct tcp_sock *tp) +{ + kfree(tp->saved_syn); + tp->saved_syn = NULL; +} + +static inline u32 tcp_saved_syn_len(const struct saved_syn *saved_syn) +{ + return saved_syn->mac_hdrlen + saved_syn->network_hdrlen + + saved_syn->tcp_hdrlen; +} + +struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, + const struct sk_buff *orig_skb, + const struct sk_buff *ack_skb); + +static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) +{ + /* We use READ_ONCE() here because socket might not be locked. + * This happens for listeners. + */ + u16 user_mss = READ_ONCE(tp->rx_opt.user_mss); + + return (user_mss && user_mss < mss) ? user_mss : mss; +} + +int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, + int shiftlen); + +void __tcp_sock_set_cork(struct sock *sk, bool on); +void tcp_sock_set_cork(struct sock *sk, bool on); +int tcp_sock_set_keepcnt(struct sock *sk, int val); +int tcp_sock_set_keepidle_locked(struct sock *sk, int val); +int tcp_sock_set_keepidle(struct sock *sk, int val); +int tcp_sock_set_keepintvl(struct sock *sk, int val); +void __tcp_sock_set_nodelay(struct sock *sk, bool on); +void tcp_sock_set_nodelay(struct sock *sk); +void tcp_sock_set_quickack(struct sock *sk, int val); +int tcp_sock_set_syncnt(struct sock *sk, int val); +void tcp_sock_set_user_timeout(struct sock *sk, u32 val); + +#endif /* _LINUX_TCP_H */ |