diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:52 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:52 +0000 |
commit | 3afb00d3f86d3d924f88b56fa8285d4e9db85852 (patch) | |
tree | 95a985d3019522cea546b7d8df621369bc44fc6c /net/mptcp | |
parent | Adding debian version 6.9.12-1. (diff) | |
download | linux-3afb00d3f86d3d924f88b56fa8285d4e9db85852.tar.xz linux-3afb00d3f86d3d924f88b56fa8285d4e9db85852.zip |
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'net/mptcp')
-rw-r--r-- | net/mptcp/ctrl.c | 32 | ||||
-rw-r--r-- | net/mptcp/mib.h | 2 | ||||
-rw-r--r-- | net/mptcp/options.c | 1 | ||||
-rw-r--r-- | net/mptcp/pm_netlink.c | 1 | ||||
-rw-r--r-- | net/mptcp/pm_userspace.c | 1 | ||||
-rw-r--r-- | net/mptcp/protocol.c | 30 | ||||
-rw-r--r-- | net/mptcp/protocol.h | 46 | ||||
-rw-r--r-- | net/mptcp/sched.c | 22 | ||||
-rw-r--r-- | net/mptcp/sockopt.c | 26 | ||||
-rw-r--r-- | net/mptcp/subflow.c | 91 |
10 files changed, 196 insertions, 56 deletions
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c index 2963ba84e2..98b1dd498f 100644 --- a/net/mptcp/ctrl.c +++ b/net/mptcp/ctrl.c @@ -92,7 +92,7 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet) pernet->allow_join_initial_addr_port = 1; pernet->stale_loss_cnt = 4; pernet->pm_type = MPTCP_PM_TYPE_KERNEL; - strcpy(pernet->scheduler, "default"); + strscpy(pernet->scheduler, "default", sizeof(pernet->scheduler)); } #ifdef CONFIG_SYSCTL @@ -133,6 +133,24 @@ static int proc_scheduler(struct ctl_table *ctl, int write, return ret; } +static int proc_available_schedulers(struct ctl_table *ctl, + int write, void *buffer, + size_t *lenp, loff_t *ppos) +{ + struct ctl_table tbl = { .maxlen = MPTCP_SCHED_BUF_MAX, }; + int ret; + + tbl.data = kmalloc(tbl.maxlen, GFP_USER); + if (!tbl.data) + return -ENOMEM; + + mptcp_get_available_schedulers(tbl.data, MPTCP_SCHED_BUF_MAX); + ret = proc_dostring(&tbl, write, buffer, lenp, ppos); + kfree(tbl.data); + + return ret; +} + static struct ctl_table mptcp_sysctl_table[] = { { .procname = "enabled", @@ -188,12 +206,17 @@ static struct ctl_table mptcp_sysctl_table[] = { .proc_handler = proc_scheduler, }, { + .procname = "available_schedulers", + .maxlen = MPTCP_SCHED_BUF_MAX, + .mode = 0644, + .proc_handler = proc_available_schedulers, + }, + { .procname = "close_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - {} }; static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet) @@ -215,7 +238,8 @@ static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet) table[4].data = &pernet->stale_loss_cnt; table[5].data = &pernet->pm_type; table[6].data = &pernet->scheduler; - table[7].data = &pernet->close_timeout; + /* table[7] is for available_schedulers which is read-only info */ + table[8].data = &pernet->close_timeout; hdr = register_net_sysctl_sz(net, MPTCP_SYSCTL_PATH, table, ARRAY_SIZE(mptcp_sysctl_table)); @@ -235,7 +259,7 @@ err_alloc: static void mptcp_pernet_del_table(struct mptcp_pernet *pernet) { - struct ctl_table *table = pernet->ctl_table_hdr->ctl_table_arg; + const struct ctl_table *table = pernet->ctl_table_hdr->ctl_table_arg; unregister_net_sysctl_table(pernet->ctl_table_hdr); diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h index dd7fd1f246..2704afd0df 100644 --- a/net/mptcp/mib.h +++ b/net/mptcp/mib.h @@ -1,5 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ +#include <net/inet_common.h> + enum linux_mptcp_mib_field { MPTCP_MIB_NUM = 0, MPTCP_MIB_MPCAPABLEPASSIVE, /* Received SYN with MP_CAPABLE */ diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 27ca42c77b..8e8dcfbc29 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -1068,6 +1068,7 @@ static void ack_update_msk(struct mptcp_sock *msk, __mptcp_snd_una_update(msk, new_snd_una); __mptcp_data_acked(sk); } + msk->last_ack_recv = tcp_jiffies32; mptcp_data_unlock(sk); trace_ack_update_msk(mp_opt->data_ack, diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 8bf7ed6d63..ea9e5817b9 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -14,6 +14,7 @@ #include "protocol.h" #include "mib.h" +#include "mptcp_pm_gen.h" static int pm_nl_pernet_id; diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c index 9f5d422d5e..f0a4590506 100644 --- a/net/mptcp/pm_userspace.c +++ b/net/mptcp/pm_userspace.c @@ -6,6 +6,7 @@ #include "protocol.h" #include "mib.h" +#include "mptcp_pm_gen.h" void mptcp_free_local_addr_list(struct mptcp_sock *msk) { diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 68e4c08648..bb7dca8aa2 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -20,6 +20,7 @@ #include <net/transp_v6.h> #endif #include <net/mptcp.h> +#include <net/hotdata.h> #include <net/xfrm.h> #include <asm/ioctls.h> #include "protocol.h" @@ -706,6 +707,8 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, } } while (more_data_avail); + if (moved > 0) + msk->last_data_recv = tcp_jiffies32; *bytes += moved; return done; } @@ -1270,7 +1273,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, i = skb_shinfo(skb)->nr_frags; can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset); - if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) { + if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) { tcp_mark_push(tcp_sk(ssk), skb); goto alloc_skb; } @@ -1556,6 +1559,8 @@ static int __subflow_push_pending(struct sock *sk, struct sock *ssk, err = copied; out: + if (err > 0) + msk->last_data_sent = tcp_jiffies32; return err; } @@ -2056,7 +2061,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) ssk = mptcp_subflow_tcp_sock(subflow); slow = lock_sock_fast(ssk); WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); - tcp_sk(ssk)->window_clamp = window_clamp; + WRITE_ONCE(tcp_sk(ssk)->window_clamp, window_clamp); tcp_cleanup_rbuf(ssk, 1); unlock_sock_fast(ssk, slow); } @@ -2565,7 +2570,7 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk) slow = lock_sock_fast(tcp_sk); if (tcp_sk->sk_state != TCP_CLOSE) { - tcp_send_active_reset(tcp_sk, GFP_ATOMIC); + mptcp_send_active_reset_reason(tcp_sk); tcp_set_state(tcp_sk, TCP_CLOSE); } unlock_sock_fast(tcp_sk, slow); @@ -2793,6 +2798,9 @@ static void __mptcp_init_sock(struct sock *sk) WRITE_ONCE(msk->allow_infinite_fallback, true); msk->recovery = false; msk->subflow_id = 1; + msk->last_data_sent = tcp_jiffies32; + msk->last_data_recv = tcp_jiffies32; + msk->last_ack_recv = tcp_jiffies32; mptcp_pm_data_init(msk); @@ -2806,7 +2814,8 @@ static void mptcp_ca_reset(struct sock *sk) struct inet_connection_sock *icsk = inet_csk(sk); tcp_assign_congestion_control(sk); - strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name); + strscpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name, + sizeof(mptcp_sk(sk)->ca_name)); /* no need to keep a reference to the ops, the name will suffice */ tcp_cleanup_congestion_control(sk); @@ -3879,11 +3888,10 @@ unlock: } static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, - int flags, bool kern) + struct proto_accept_arg *arg) { struct mptcp_sock *msk = mptcp_sk(sock->sk); struct sock *ssk, *newsk; - int err; pr_debug("msk=%p", msk); @@ -3895,9 +3903,9 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, return -EINVAL; pr_debug("ssk=%p, listener=%p", ssk, mptcp_subflow_ctx(ssk)); - newsk = inet_csk_accept(ssk, flags, &err, kern); + newsk = inet_csk_accept(ssk, arg); if (!newsk) - return err; + return arg->err; pr_debug("newsk=%p, subflow is mptcp=%d", newsk, sk_is_mptcp(newsk)); if (sk_is_mptcp(newsk)) { @@ -3918,7 +3926,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, newsk = new_mptcp_sock; MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK); - newsk->sk_kern_sock = kern; + newsk->sk_kern_sock = arg->kern; lock_sock(newsk); __inet_accept(sock, newsock, newsk); @@ -3947,7 +3955,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, } } else { tcpfallback: - newsk->sk_kern_sock = kern; + newsk->sk_kern_sock = arg->kern; lock_sock(newsk); __inet_accept(sock, newsock, newsk); /* we are being invoked after accepting a non-mp-capable @@ -4167,7 +4175,7 @@ int __init mptcp_proto_v6_init(void) int err; mptcp_v6_prot = mptcp_prot; - strcpy(mptcp_v6_prot.name, "MPTCPv6"); + strscpy(mptcp_v6_prot.name, "MPTCPv6", sizeof(mptcp_v6_prot.name)); mptcp_v6_prot.slab = NULL; mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); mptcp_v6_prot.ipv6_pinfo_offset = offsetof(struct mptcp6_sock, np); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 9d1ee19949..7aa47e2dd5 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -12,8 +12,7 @@ #include <net/inet_connection_sock.h> #include <uapi/linux/mptcp.h> #include <net/genetlink.h> - -#include "mptcp_pm_gen.h" +#include <net/rstreason.h> #define MPTCP_SUPPORTED_VERSION 1 @@ -282,6 +281,9 @@ struct mptcp_sock { u64 bytes_acked; u64 snd_una; u64 wnd_end; + u32 last_data_sent; + u32 last_data_recv; + u32 last_ack_recv; unsigned long timer_ival; u32 token; int rmem_released; @@ -561,7 +563,7 @@ struct mptcp_subflow_context { static inline struct mptcp_subflow_context * mptcp_subflow_ctx(const struct sock *sk) { - struct inet_connection_sock *icsk = inet_csk(sk); + const struct inet_connection_sock *icsk = inet_csk(sk); /* Use RCU on icsk_ulp_data only for sock diag code */ return (__force struct mptcp_subflow_context *)icsk->icsk_ulp_data; @@ -581,6 +583,43 @@ mptcp_subflow_ctx_reset(struct mptcp_subflow_context *subflow) WRITE_ONCE(subflow->local_id, -1); } +/* Convert reset reasons in MPTCP to enum sk_rst_reason type */ +static inline enum sk_rst_reason +sk_rst_convert_mptcp_reason(u32 reason) +{ + switch (reason) { + case MPTCP_RST_EUNSPEC: + return SK_RST_REASON_MPTCP_RST_EUNSPEC; + case MPTCP_RST_EMPTCP: + return SK_RST_REASON_MPTCP_RST_EMPTCP; + case MPTCP_RST_ERESOURCE: + return SK_RST_REASON_MPTCP_RST_ERESOURCE; + case MPTCP_RST_EPROHIBIT: + return SK_RST_REASON_MPTCP_RST_EPROHIBIT; + case MPTCP_RST_EWQ2BIG: + return SK_RST_REASON_MPTCP_RST_EWQ2BIG; + case MPTCP_RST_EBADPERF: + return SK_RST_REASON_MPTCP_RST_EBADPERF; + case MPTCP_RST_EMIDDLEBOX: + return SK_RST_REASON_MPTCP_RST_EMIDDLEBOX; + default: + /* It should not happen, or else errors may occur + * in MPTCP layer + */ + return SK_RST_REASON_ERROR; + } +} + +static inline void +mptcp_send_active_reset_reason(struct sock *sk) +{ + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); + enum sk_rst_reason reason; + + reason = sk_rst_convert_mptcp_reason(subflow->reset_reason); + tcp_send_active_reset(sk, GFP_ATOMIC, reason); +} + static inline u64 mptcp_subflow_get_map_offset(const struct mptcp_subflow_context *subflow) { @@ -645,6 +684,7 @@ unsigned int mptcp_stale_loss_cnt(const struct net *net); unsigned int mptcp_close_timeout(const struct sock *sk); int mptcp_get_pm_type(const struct net *net); const char *mptcp_get_scheduler(const struct net *net); +void mptcp_get_available_schedulers(char *buf, size_t maxlen); void __mptcp_subflow_fully_established(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, const struct mptcp_options_received *mp_opt); diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c index 4ab0693c06..4a7fd0508a 100644 --- a/net/mptcp/sched.c +++ b/net/mptcp/sched.c @@ -51,6 +51,28 @@ struct mptcp_sched_ops *mptcp_sched_find(const char *name) return ret; } +/* Build string with list of available scheduler values. + * Similar to tcp_get_available_congestion_control() + */ +void mptcp_get_available_schedulers(char *buf, size_t maxlen) +{ + struct mptcp_sched_ops *sched; + size_t offs = 0; + + rcu_read_lock(); + spin_lock(&mptcp_sched_list_lock); + list_for_each_entry_rcu(sched, &mptcp_sched_list, list) { + offs += snprintf(buf + offs, maxlen - offs, + "%s%s", + offs == 0 ? "" : " ", sched->name); + + if (WARN_ON_ONCE(offs >= maxlen)) + break; + } + spin_unlock(&mptcp_sched_list_lock); + rcu_read_unlock(); +} + int mptcp_register_scheduler(struct mptcp_sched_ops *sched) { if (!sched->get_subflow) diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index 19ee684f9e..f9a4fb17b5 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -616,7 +616,7 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t } if (ret == 0) - strcpy(msk->ca_name, name); + strscpy(msk->ca_name, name, sizeof(msk->ca_name)); release_sock(sk); return ret; @@ -937,6 +937,7 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info) struct sock *sk = (struct sock *)msk; u32 flags = 0; bool slow; + u32 now; memset(info, 0, sizeof(*info)); @@ -965,11 +966,6 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info) if (READ_ONCE(msk->can_ack)) flags |= MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED; info->mptcpi_flags = flags; - mptcp_data_lock(sk); - info->mptcpi_snd_una = msk->snd_una; - info->mptcpi_rcv_nxt = msk->ack_seq; - info->mptcpi_bytes_acked = msk->bytes_acked; - mptcp_data_unlock(sk); slow = lock_sock_fast(sk); info->mptcpi_csum_enabled = READ_ONCE(msk->csum_enabled); @@ -981,7 +977,17 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info) info->mptcpi_bytes_retrans = msk->bytes_retrans; info->mptcpi_subflows_total = info->mptcpi_subflows + __mptcp_has_initial_subflow(msk); + now = tcp_jiffies32; + info->mptcpi_last_data_sent = jiffies_to_msecs(now - msk->last_data_sent); + info->mptcpi_last_data_recv = jiffies_to_msecs(now - msk->last_data_recv); unlock_sock_fast(sk, slow); + + mptcp_data_lock(sk); + info->mptcpi_last_ack_recv = jiffies_to_msecs(now - msk->last_ack_recv); + info->mptcpi_snd_una = msk->snd_una; + info->mptcpi_rcv_nxt = msk->ack_seq; + info->mptcpi_bytes_acked = msk->bytes_acked; + mptcp_data_unlock(sk); } EXPORT_SYMBOL_GPL(mptcp_diag_fill_info); @@ -993,6 +999,10 @@ static int mptcp_getsockopt_info(struct mptcp_sock *msk, char __user *optval, in if (get_user(len, optlen)) return -EFAULT; + /* When used only to check if a fallback to TCP happened. */ + if (len == 0) + return 0; + len = min_t(unsigned int, len, sizeof(struct mptcp_info)); mptcp_diag_fill_info(msk, &m_info); @@ -1395,6 +1405,8 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_probes)); case TCP_NOTSENT_LOWAT: return mptcp_put_int_option(msk, optval, optlen, msk->notsent_lowat); + case TCP_IS_MPTCP: + return mptcp_put_int_option(msk, optval, optlen, 1); } return -EOPNOTSUPP; } @@ -1579,7 +1591,7 @@ int mptcp_set_rcvlowat(struct sock *sk, int val) slow = lock_sock_fast(ssk); WRITE_ONCE(ssk->sk_rcvbuf, space); - tcp_sk(ssk)->window_clamp = val; + WRITE_ONCE(tcp_sk(ssk)->window_clamp, val); unlock_sock_fast(ssk, slow); } return 0; diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 6042a47da6..612c38570a 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -20,6 +20,7 @@ #include <net/transp_v6.h> #endif #include <net/mptcp.h> + #include "protocol.h" #include "mib.h" @@ -150,8 +151,10 @@ static int subflow_check_req(struct request_sock *req, /* no MPTCP if MD5SIG is enabled on this socket or we may run out of * TCP option space. */ - if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) + if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) { + subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); return -EINVAL; + } #endif mptcp_get_options(skb, &mp_opt); @@ -219,6 +222,7 @@ again: ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport)); if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) { SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX); + subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); return -EPERM; } SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX); @@ -227,10 +231,12 @@ again: subflow_req_create_thmac(subflow_req); if (unlikely(req->syncookie)) { - if (mptcp_can_accept_new_subflow(subflow_req->msk)) - subflow_init_req_cookie_join_save(subflow_req, skb); - else + if (!mptcp_can_accept_new_subflow(subflow_req->msk)) { + subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); return -EPERM; + } + + subflow_init_req_cookie_join_save(subflow_req, skb); } pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token, @@ -281,10 +287,21 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req, } EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req); +static enum sk_rst_reason mptcp_get_rst_reason(const struct sk_buff *skb) +{ + const struct mptcp_ext *mpext = mptcp_get_ext(skb); + + if (!mpext) + return SK_RST_REASON_NOT_SPECIFIED; + + return sk_rst_convert_mptcp_reason(mpext->reset_reason); +} + static struct dst_entry *subflow_v4_route_req(const struct sock *sk, struct sk_buff *skb, struct flowi *fl, - struct request_sock *req) + struct request_sock *req, + u32 tw_isn) { struct dst_entry *dst; int err; @@ -292,7 +309,7 @@ static struct dst_entry *subflow_v4_route_req(const struct sock *sk, tcp_rsk(req)->is_mptcp = 1; subflow_init_req(req, sk); - dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req); + dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req, tw_isn); if (!dst) return NULL; @@ -302,7 +319,8 @@ static struct dst_entry *subflow_v4_route_req(const struct sock *sk, dst_release(dst); if (!req->syncookie) - tcp_request_sock_ops.send_reset(sk, skb); + tcp_request_sock_ops.send_reset(sk, skb, + mptcp_get_rst_reason(skb)); return NULL; } @@ -351,7 +369,8 @@ static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst, static struct dst_entry *subflow_v6_route_req(const struct sock *sk, struct sk_buff *skb, struct flowi *fl, - struct request_sock *req) + struct request_sock *req, + u32 tw_isn) { struct dst_entry *dst; int err; @@ -359,7 +378,7 @@ static struct dst_entry *subflow_v6_route_req(const struct sock *sk, tcp_rsk(req)->is_mptcp = 1; subflow_init_req(req, sk); - dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req); + dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req, tw_isn); if (!dst) return NULL; @@ -369,7 +388,8 @@ static struct dst_entry *subflow_v6_route_req(const struct sock *sk, dst_release(dst); if (!req->syncookie) - tcp6_request_sock_ops.send_reset(sk, skb); + tcp6_request_sock_ops.send_reset(sk, skb, + mptcp_get_rst_reason(skb)); return NULL; } #endif @@ -405,7 +425,7 @@ void mptcp_subflow_reset(struct sock *ssk) /* must hold: tcp_done() could drop last reference on parent */ sock_hold(sk); - tcp_send_active_reset(ssk, GFP_ATOMIC); + mptcp_send_active_reset_reason(ssk); tcp_done(ssk); if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags)) mptcp_schedule_work(sk); @@ -774,6 +794,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, struct mptcp_subflow_request_sock *subflow_req; struct mptcp_options_received mp_opt; bool fallback, fallback_is_fatal; + enum sk_rst_reason reason; struct mptcp_sock *owner; struct sock *child; @@ -873,13 +894,18 @@ create_child: ntohs(inet_sk((struct sock *)owner)->inet_sport)); if (!mptcp_pm_sport_in_anno_list(owner, sk)) { SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX); + subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); goto dispose_child; } SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX); } - if (!mptcp_finish_join(child)) + if (!mptcp_finish_join(child)) { + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(child); + + subflow_add_reset_reason(skb, subflow->reset_reason); goto dispose_child; + } SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX); tcp_rsk(req)->drop_req = true; @@ -887,7 +913,7 @@ create_child: } /* check for expected invariant - should never trigger, just help - * catching eariler subtle bugs + * catching earlier subtle bugs */ WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp && (!mptcp_subflow_ctx(child) || @@ -899,7 +925,8 @@ dispose_child: tcp_rsk(req)->drop_req = true; inet_csk_prepare_for_destroy_sock(child); tcp_done(child); - req->rsk_ops->send_reset(sk, skb); + reason = mptcp_get_rst_reason(skb); + req->rsk_ops->send_reset(sk, skb, reason); /* The last child reference will be released by the caller */ return child; @@ -1092,6 +1119,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk, } if (mpext->data_fin == 1) { + u64 data_fin_seq; + if (data_len == 1) { bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq, mpext->dsn64); @@ -1104,26 +1133,26 @@ static enum mapping_status get_mapping_status(struct sock *ssk, */ skb_ext_del(skb, SKB_EXT_MPTCP); return MAPPING_OK; - } else { - if (updated) - mptcp_schedule_work((struct sock *)msk); - - return MAPPING_DATA_FIN; } - } else { - u64 data_fin_seq = mpext->data_seq + data_len - 1; - /* If mpext->data_seq is a 32-bit value, data_fin_seq - * must also be limited to 32 bits. - */ - if (!mpext->dsn64) - data_fin_seq &= GENMASK_ULL(31, 0); + if (updated) + mptcp_schedule_work((struct sock *)msk); - mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64); - pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d", - data_fin_seq, mpext->dsn64); + return MAPPING_DATA_FIN; } + data_fin_seq = mpext->data_seq + data_len - 1; + + /* If mpext->data_seq is a 32-bit value, data_fin_seq must also + * be limited to 32 bits. + */ + if (!mpext->dsn64) + data_fin_seq &= GENMASK_ULL(31, 0); + + mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64); + pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d", + data_fin_seq, mpext->dsn64); + /* Adjust for DATA_FIN using 1 byte of sequence space */ data_len--; } @@ -1234,7 +1263,7 @@ static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); unsigned long fail_tout; - /* greceful failure can happen only on the MPC subflow */ + /* graceful failure can happen only on the MPC subflow */ if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first))) return; @@ -1336,7 +1365,7 @@ reset: tcp_set_state(ssk, TCP_CLOSE); while ((skb = skb_peek(&ssk->sk_receive_queue))) sk_eat_skb(ssk, skb); - tcp_send_active_reset(ssk, GFP_ATOMIC); + mptcp_send_active_reset_reason(ssk); WRITE_ONCE(subflow->data_avail, false); return false; } |