diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /net/unix | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'net/unix')
-rw-r--r-- | net/unix/Kconfig | 39 | ||||
-rw-r--r-- | net/unix/Makefile | 15 | ||||
-rw-r--r-- | net/unix/af_unix.c | 3785 | ||||
-rw-r--r-- | net/unix/diag.c | 342 | ||||
-rw-r--r-- | net/unix/garbage.c | 335 | ||||
-rw-r--r-- | net/unix/scm.c | 154 | ||||
-rw-r--r-- | net/unix/scm.h | 10 | ||||
-rw-r--r-- | net/unix/sysctl_net_unix.c | 60 | ||||
-rw-r--r-- | net/unix/unix_bpf.c | 198 |
9 files changed, 4938 insertions, 0 deletions
diff --git a/net/unix/Kconfig b/net/unix/Kconfig new file mode 100644 index 000000000..b7f811216 --- /dev/null +++ b/net/unix/Kconfig @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Unix Domain Sockets +# + +config UNIX + tristate "Unix domain sockets" + help + If you say Y here, you will include support for Unix domain sockets; + sockets are the standard Unix mechanism for establishing and + accessing network connections. Many commonly used programs such as + the X Window system and syslog use these sockets even if your + machine is not connected to any network. Unless you are working on + an embedded system or something similar, you therefore definitely + want to say Y here. + + To compile this driver as a module, choose M here: the module will be + called unix. Note that several important services won't work + correctly if you say M here and then neglect to load the module. + + Say Y unless you know what you are doing. + +config UNIX_SCM + bool + depends on UNIX + default y + +config AF_UNIX_OOB + bool + depends on UNIX + default y + +config UNIX_DIAG + tristate "UNIX: socket monitoring interface" + depends on UNIX + default n + help + Support for UNIX socket monitoring interface used by the ss tool. + If unsure, say Y. diff --git a/net/unix/Makefile b/net/unix/Makefile new file mode 100644 index 000000000..20491825b --- /dev/null +++ b/net/unix/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux unix domain socket layer. +# + +obj-$(CONFIG_UNIX) += unix.o + +unix-y := af_unix.o garbage.o +unix-$(CONFIG_SYSCTL) += sysctl_net_unix.o +unix-$(CONFIG_BPF_SYSCALL) += unix_bpf.o + +obj-$(CONFIG_UNIX_DIAG) += unix_diag.o +unix_diag-y := diag.o + +obj-$(CONFIG_UNIX_SCM) += scm.o diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c new file mode 100644 index 000000000..be2ed7b0f --- /dev/null +++ b/net/unix/af_unix.c @@ -0,0 +1,3785 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * NET4: Implementation of BSD Unix domain sockets. + * + * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> + * + * Fixes: + * Linus Torvalds : Assorted bug cures. + * Niibe Yutaka : async I/O support. + * Carsten Paeth : PF_UNIX check, address fixes. + * Alan Cox : Limit size of allocated blocks. + * Alan Cox : Fixed the stupid socketpair bug. + * Alan Cox : BSD compatibility fine tuning. + * Alan Cox : Fixed a bug in connect when interrupted. + * Alan Cox : Sorted out a proper draft version of + * file descriptor passing hacked up from + * Mike Shaver's work. + * Marty Leisner : Fixes to fd passing + * Nick Nevin : recvmsg bugfix. + * Alan Cox : Started proper garbage collector + * Heiko EiBfeldt : Missing verify_area check + * Alan Cox : Started POSIXisms + * Andreas Schwab : Replace inode by dentry for proper + * reference counting + * Kirk Petersen : Made this a module + * Christoph Rohland : Elegant non-blocking accept/connect algorithm. + * Lots of bug fixes. + * Alexey Kuznetosv : Repaired (I hope) bugs introduces + * by above two patches. + * Andrea Arcangeli : If possible we block in connect(2) + * if the max backlog of the listen socket + * is been reached. This won't break + * old apps and it will avoid huge amount + * of socks hashed (this for unix_gc() + * performances reasons). + * Security fix that limits the max + * number of socks to 2*max_files and + * the number of skb queueable in the + * dgram receiver. + * Artur Skawina : Hash function optimizations + * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8) + * Malcolm Beattie : Set peercred for socketpair + * Michal Ostrowski : Module initialization cleanup. + * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT, + * the core infrastructure is doing that + * for all net proto families now (2.5.69+) + * + * Known differences from reference BSD that was tested: + * + * [TO FIX] + * ECONNREFUSED is not returned from one end of a connected() socket to the + * other the moment one end closes. + * fstat() doesn't return st_dev=0, and give the blksize as high water mark + * and a fake inode identifier (nor the BSD first socket fstat twice bug). + * [NOT TO FIX] + * accept() returns a path name even if the connecting socket has closed + * in the meantime (BSD loses the path and gives up). + * accept() returns 0 length path for an unbound connector. BSD returns 16 + * and a null first byte in the path (but not for gethost/peername - BSD bug ??) + * socketpair(...SOCK_RAW..) doesn't panic the kernel. + * BSD af_unix apparently has connect forgetting to block properly. + * (need to check this with the POSIX spec in detail) + * + * Differences from 2.0.0-11-... (ANK) + * Bug fixes and improvements. + * - client shutdown killed server socket. + * - removed all useless cli/sti pairs. + * + * Semantic changes/extensions. + * - generic control message passing. + * - SCM_CREDENTIALS control message. + * - "Abstract" (not FS based) socket bindings. + * Abstract names are sequences of bytes (not zero terminated) + * started by 0, so that this name space does not intersect + * with BSD names. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/sched/signal.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/stat.h> +#include <linux/dcache.h> +#include <linux/namei.h> +#include <linux/socket.h> +#include <linux/un.h> +#include <linux/fcntl.h> +#include <linux/filter.h> +#include <linux/termios.h> +#include <linux/sockios.h> +#include <linux/net.h> +#include <linux/in.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <net/net_namespace.h> +#include <net/sock.h> +#include <net/tcp_states.h> +#include <net/af_unix.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <net/scm.h> +#include <linux/init.h> +#include <linux/poll.h> +#include <linux/rtnetlink.h> +#include <linux/mount.h> +#include <net/checksum.h> +#include <linux/security.h> +#include <linux/freezer.h> +#include <linux/file.h> +#include <linux/btf_ids.h> + +#include "scm.h" + +static atomic_long_t unix_nr_socks; +static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2]; +static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2]; + +/* SMP locking strategy: + * hash table is protected with spinlock. + * each socket state is protected by separate spinlock. + */ + +static unsigned int unix_unbound_hash(struct sock *sk) +{ + unsigned long hash = (unsigned long)sk; + + hash ^= hash >> 16; + hash ^= hash >> 8; + hash ^= sk->sk_type; + + return hash & UNIX_HASH_MOD; +} + +static unsigned int unix_bsd_hash(struct inode *i) +{ + return i->i_ino & UNIX_HASH_MOD; +} + +static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr, + int addr_len, int type) +{ + __wsum csum = csum_partial(sunaddr, addr_len, 0); + unsigned int hash; + + hash = (__force unsigned int)csum_fold(csum); + hash ^= hash >> 8; + hash ^= type; + + return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD); +} + +static void unix_table_double_lock(struct net *net, + unsigned int hash1, unsigned int hash2) +{ + if (hash1 == hash2) { + spin_lock(&net->unx.table.locks[hash1]); + return; + } + + if (hash1 > hash2) + swap(hash1, hash2); + + spin_lock(&net->unx.table.locks[hash1]); + spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING); +} + +static void unix_table_double_unlock(struct net *net, + unsigned int hash1, unsigned int hash2) +{ + if (hash1 == hash2) { + spin_unlock(&net->unx.table.locks[hash1]); + return; + } + + spin_unlock(&net->unx.table.locks[hash1]); + spin_unlock(&net->unx.table.locks[hash2]); +} + +#ifdef CONFIG_SECURITY_NETWORK +static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) +{ + UNIXCB(skb).secid = scm->secid; +} + +static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) +{ + scm->secid = UNIXCB(skb).secid; +} + +static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) +{ + return (scm->secid == UNIXCB(skb).secid); +} +#else +static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) +{ } + +static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) +{ } + +static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) +{ + return true; +} +#endif /* CONFIG_SECURITY_NETWORK */ + +static inline int unix_our_peer(struct sock *sk, struct sock *osk) +{ + return unix_peer(osk) == sk; +} + +static inline int unix_may_send(struct sock *sk, struct sock *osk) +{ + return unix_peer(osk) == NULL || unix_our_peer(sk, osk); +} + +static inline int unix_recvq_full(const struct sock *sk) +{ + return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; +} + +static inline int unix_recvq_full_lockless(const struct sock *sk) +{ + return skb_queue_len_lockless(&sk->sk_receive_queue) > + READ_ONCE(sk->sk_max_ack_backlog); +} + +struct sock *unix_peer_get(struct sock *s) +{ + struct sock *peer; + + unix_state_lock(s); + peer = unix_peer(s); + if (peer) + sock_hold(peer); + unix_state_unlock(s); + return peer; +} +EXPORT_SYMBOL_GPL(unix_peer_get); + +static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr, + int addr_len) +{ + struct unix_address *addr; + + addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL); + if (!addr) + return NULL; + + refcount_set(&addr->refcnt, 1); + addr->len = addr_len; + memcpy(addr->name, sunaddr, addr_len); + + return addr; +} + +static inline void unix_release_addr(struct unix_address *addr) +{ + if (refcount_dec_and_test(&addr->refcnt)) + kfree(addr); +} + +/* + * Check unix socket name: + * - should be not zero length. + * - if started by not zero, should be NULL terminated (FS object) + * - if started by zero, it is abstract name. + */ + +static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len) +{ + if (addr_len <= offsetof(struct sockaddr_un, sun_path) || + addr_len > sizeof(*sunaddr)) + return -EINVAL; + + if (sunaddr->sun_family != AF_UNIX) + return -EINVAL; + + return 0; +} + +static void unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len) +{ + /* This may look like an off by one error but it is a bit more + * subtle. 108 is the longest valid AF_UNIX path for a binding. + * sun_path[108] doesn't as such exist. However in kernel space + * we are guaranteed that it is a valid memory location in our + * kernel address buffer because syscall functions always pass + * a pointer of struct sockaddr_storage which has a bigger buffer + * than 108. + */ + ((char *)sunaddr)[addr_len] = 0; +} + +static void __unix_remove_socket(struct sock *sk) +{ + sk_del_node_init(sk); +} + +static void __unix_insert_socket(struct net *net, struct sock *sk) +{ + DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk)); + sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]); +} + +static void __unix_set_addr_hash(struct net *net, struct sock *sk, + struct unix_address *addr, unsigned int hash) +{ + __unix_remove_socket(sk); + smp_store_release(&unix_sk(sk)->addr, addr); + + sk->sk_hash = hash; + __unix_insert_socket(net, sk); +} + +static void unix_remove_socket(struct net *net, struct sock *sk) +{ + spin_lock(&net->unx.table.locks[sk->sk_hash]); + __unix_remove_socket(sk); + spin_unlock(&net->unx.table.locks[sk->sk_hash]); +} + +static void unix_insert_unbound_socket(struct net *net, struct sock *sk) +{ + spin_lock(&net->unx.table.locks[sk->sk_hash]); + __unix_insert_socket(net, sk); + spin_unlock(&net->unx.table.locks[sk->sk_hash]); +} + +static void unix_insert_bsd_socket(struct sock *sk) +{ + spin_lock(&bsd_socket_locks[sk->sk_hash]); + sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]); + spin_unlock(&bsd_socket_locks[sk->sk_hash]); +} + +static void unix_remove_bsd_socket(struct sock *sk) +{ + if (!hlist_unhashed(&sk->sk_bind_node)) { + spin_lock(&bsd_socket_locks[sk->sk_hash]); + __sk_del_bind_node(sk); + spin_unlock(&bsd_socket_locks[sk->sk_hash]); + + sk_node_init(&sk->sk_bind_node); + } +} + +static struct sock *__unix_find_socket_byname(struct net *net, + struct sockaddr_un *sunname, + int len, unsigned int hash) +{ + struct sock *s; + + sk_for_each(s, &net->unx.table.buckets[hash]) { + struct unix_sock *u = unix_sk(s); + + if (u->addr->len == len && + !memcmp(u->addr->name, sunname, len)) + return s; + } + return NULL; +} + +static inline struct sock *unix_find_socket_byname(struct net *net, + struct sockaddr_un *sunname, + int len, unsigned int hash) +{ + struct sock *s; + + spin_lock(&net->unx.table.locks[hash]); + s = __unix_find_socket_byname(net, sunname, len, hash); + if (s) + sock_hold(s); + spin_unlock(&net->unx.table.locks[hash]); + return s; +} + +static struct sock *unix_find_socket_byinode(struct inode *i) +{ + unsigned int hash = unix_bsd_hash(i); + struct sock *s; + + spin_lock(&bsd_socket_locks[hash]); + sk_for_each_bound(s, &bsd_socket_buckets[hash]) { + struct dentry *dentry = unix_sk(s)->path.dentry; + + if (dentry && d_backing_inode(dentry) == i) { + sock_hold(s); + spin_unlock(&bsd_socket_locks[hash]); + return s; + } + } + spin_unlock(&bsd_socket_locks[hash]); + return NULL; +} + +/* Support code for asymmetrically connected dgram sockets + * + * If a datagram socket is connected to a socket not itself connected + * to the first socket (eg, /dev/log), clients may only enqueue more + * messages if the present receive queue of the server socket is not + * "too large". This means there's a second writeability condition + * poll and sendmsg need to test. The dgram recv code will do a wake + * up on the peer_wait wait queue of a socket upon reception of a + * datagram which needs to be propagated to sleeping would-be writers + * since these might not have sent anything so far. This can't be + * accomplished via poll_wait because the lifetime of the server + * socket might be less than that of its clients if these break their + * association with it or if the server socket is closed while clients + * are still connected to it and there's no way to inform "a polling + * implementation" that it should let go of a certain wait queue + * + * In order to propagate a wake up, a wait_queue_entry_t of the client + * socket is enqueued on the peer_wait queue of the server socket + * whose wake function does a wake_up on the ordinary client socket + * wait queue. This connection is established whenever a write (or + * poll for write) hit the flow control condition and broken when the + * association to the server socket is dissolved or after a wake up + * was relayed. + */ + +static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags, + void *key) +{ + struct unix_sock *u; + wait_queue_head_t *u_sleep; + + u = container_of(q, struct unix_sock, peer_wake); + + __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait, + q); + u->peer_wake.private = NULL; + + /* relaying can only happen while the wq still exists */ + u_sleep = sk_sleep(&u->sk); + if (u_sleep) + wake_up_interruptible_poll(u_sleep, key_to_poll(key)); + + return 0; +} + +static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other) +{ + struct unix_sock *u, *u_other; + int rc; + + u = unix_sk(sk); + u_other = unix_sk(other); + rc = 0; + spin_lock(&u_other->peer_wait.lock); + + if (!u->peer_wake.private) { + u->peer_wake.private = other; + __add_wait_queue(&u_other->peer_wait, &u->peer_wake); + + rc = 1; + } + + spin_unlock(&u_other->peer_wait.lock); + return rc; +} + +static void unix_dgram_peer_wake_disconnect(struct sock *sk, + struct sock *other) +{ + struct unix_sock *u, *u_other; + + u = unix_sk(sk); + u_other = unix_sk(other); + spin_lock(&u_other->peer_wait.lock); + + if (u->peer_wake.private == other) { + __remove_wait_queue(&u_other->peer_wait, &u->peer_wake); + u->peer_wake.private = NULL; + } + + spin_unlock(&u_other->peer_wait.lock); +} + +static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk, + struct sock *other) +{ + unix_dgram_peer_wake_disconnect(sk, other); + wake_up_interruptible_poll(sk_sleep(sk), + EPOLLOUT | + EPOLLWRNORM | + EPOLLWRBAND); +} + +/* preconditions: + * - unix_peer(sk) == other + * - association is stable + */ +static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) +{ + int connected; + + connected = unix_dgram_peer_wake_connect(sk, other); + + /* If other is SOCK_DEAD, we want to make sure we signal + * POLLOUT, such that a subsequent write() can get a + * -ECONNREFUSED. Otherwise, if we haven't queued any skbs + * to other and its full, we will hang waiting for POLLOUT. + */ + if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD)) + return 1; + + if (connected) + unix_dgram_peer_wake_disconnect(sk, other); + + return 0; +} + +static int unix_writable(const struct sock *sk) +{ + return sk->sk_state != TCP_LISTEN && + (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; +} + +static void unix_write_space(struct sock *sk) +{ + struct socket_wq *wq; + + rcu_read_lock(); + if (unix_writable(sk)) { + wq = rcu_dereference(sk->sk_wq); + if (skwq_has_sleeper(wq)) + wake_up_interruptible_sync_poll(&wq->wait, + EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); + sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); + } + rcu_read_unlock(); +} + +/* When dgram socket disconnects (or changes its peer), we clear its receive + * queue of packets arrived from previous peer. First, it allows to do + * flow control based only on wmem_alloc; second, sk connected to peer + * may receive messages only from that peer. */ +static void unix_dgram_disconnected(struct sock *sk, struct sock *other) +{ + if (!skb_queue_empty(&sk->sk_receive_queue)) { + skb_queue_purge(&sk->sk_receive_queue); + wake_up_interruptible_all(&unix_sk(sk)->peer_wait); + + /* If one link of bidirectional dgram pipe is disconnected, + * we signal error. Messages are lost. Do not make this, + * when peer was not connected to us. + */ + if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) { + other->sk_err = ECONNRESET; + sk_error_report(other); + } + } + other->sk_state = TCP_CLOSE; +} + +static void unix_sock_destructor(struct sock *sk) +{ + struct unix_sock *u = unix_sk(sk); + + skb_queue_purge(&sk->sk_receive_queue); + + DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc)); + DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk)); + DEBUG_NET_WARN_ON_ONCE(sk->sk_socket); + if (!sock_flag(sk, SOCK_DEAD)) { + pr_info("Attempt to release alive unix socket: %p\n", sk); + return; + } + + if (u->addr) + unix_release_addr(u->addr); + + atomic_long_dec(&unix_nr_socks); + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); +#ifdef UNIX_REFCNT_DEBUG + pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk, + atomic_long_read(&unix_nr_socks)); +#endif +} + +static void unix_release_sock(struct sock *sk, int embrion) +{ + struct unix_sock *u = unix_sk(sk); + struct sock *skpair; + struct sk_buff *skb; + struct path path; + int state; + + unix_remove_socket(sock_net(sk), sk); + unix_remove_bsd_socket(sk); + + /* Clear state */ + unix_state_lock(sk); + sock_orphan(sk); + WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); + path = u->path; + u->path.dentry = NULL; + u->path.mnt = NULL; + state = sk->sk_state; + sk->sk_state = TCP_CLOSE; + + skpair = unix_peer(sk); + unix_peer(sk) = NULL; + + unix_state_unlock(sk); + +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) + if (u->oob_skb) { + kfree_skb(u->oob_skb); + u->oob_skb = NULL; + } +#endif + + wake_up_interruptible_all(&u->peer_wait); + + if (skpair != NULL) { + if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { + unix_state_lock(skpair); + /* No more writes */ + WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK); + if (!skb_queue_empty(&sk->sk_receive_queue) || embrion) + skpair->sk_err = ECONNRESET; + unix_state_unlock(skpair); + skpair->sk_state_change(skpair); + sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); + } + + unix_dgram_peer_wake_disconnect(sk, skpair); + sock_put(skpair); /* It may now die */ + } + + /* Try to flush out this socket. Throw out buffers at least */ + + while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { + if (state == TCP_LISTEN) + unix_release_sock(skb->sk, 1); + /* passed fds are erased in the kfree_skb hook */ + UNIXCB(skb).consumed = skb->len; + kfree_skb(skb); + } + + if (path.dentry) + path_put(&path); + + sock_put(sk); + + /* ---- Socket is dead now and most probably destroyed ---- */ + + /* + * Fixme: BSD difference: In BSD all sockets connected to us get + * ECONNRESET and we die on the spot. In Linux we behave + * like files and pipes do and wait for the last + * dereference. + * + * Can't we simply set sock->err? + * + * What the above comment does talk about? --ANK(980817) + */ + + if (READ_ONCE(unix_tot_inflight)) + unix_gc(); /* Garbage collect fds */ +} + +static void init_peercred(struct sock *sk) +{ + const struct cred *old_cred; + struct pid *old_pid; + + spin_lock(&sk->sk_peer_lock); + old_pid = sk->sk_peer_pid; + old_cred = sk->sk_peer_cred; + sk->sk_peer_pid = get_pid(task_tgid(current)); + sk->sk_peer_cred = get_current_cred(); + spin_unlock(&sk->sk_peer_lock); + + put_pid(old_pid); + put_cred(old_cred); +} + +static void copy_peercred(struct sock *sk, struct sock *peersk) +{ + const struct cred *old_cred; + struct pid *old_pid; + + if (sk < peersk) { + spin_lock(&sk->sk_peer_lock); + spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING); + } else { + spin_lock(&peersk->sk_peer_lock); + spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING); + } + old_pid = sk->sk_peer_pid; + old_cred = sk->sk_peer_cred; + sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); + sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); + + spin_unlock(&sk->sk_peer_lock); + spin_unlock(&peersk->sk_peer_lock); + + put_pid(old_pid); + put_cred(old_cred); +} + +static int unix_listen(struct socket *sock, int backlog) +{ + int err; + struct sock *sk = sock->sk; + struct unix_sock *u = unix_sk(sk); + + err = -EOPNOTSUPP; + if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) + goto out; /* Only stream/seqpacket sockets accept */ + err = -EINVAL; + if (!u->addr) + goto out; /* No listens on an unbound socket */ + unix_state_lock(sk); + if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN) + goto out_unlock; + if (backlog > sk->sk_max_ack_backlog) + wake_up_interruptible_all(&u->peer_wait); + sk->sk_max_ack_backlog = backlog; + sk->sk_state = TCP_LISTEN; + /* set credentials so connect can copy them */ + init_peercred(sk); + err = 0; + +out_unlock: + unix_state_unlock(sk); +out: + return err; +} + +static int unix_release(struct socket *); +static int unix_bind(struct socket *, struct sockaddr *, int); +static int unix_stream_connect(struct socket *, struct sockaddr *, + int addr_len, int flags); +static int unix_socketpair(struct socket *, struct socket *); +static int unix_accept(struct socket *, struct socket *, int, bool); +static int unix_getname(struct socket *, struct sockaddr *, int); +static __poll_t unix_poll(struct file *, struct socket *, poll_table *); +static __poll_t unix_dgram_poll(struct file *, struct socket *, + poll_table *); +static int unix_ioctl(struct socket *, unsigned int, unsigned long); +#ifdef CONFIG_COMPAT +static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); +#endif +static int unix_shutdown(struct socket *, int); +static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t); +static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int); +static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset, + size_t size, int flags); +static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos, + struct pipe_inode_info *, size_t size, + unsigned int flags); +static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t); +static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int); +static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor); +static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor); +static int unix_dgram_connect(struct socket *, struct sockaddr *, + int, int); +static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t); +static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t, + int); + +static int unix_set_peek_off(struct sock *sk, int val) +{ + struct unix_sock *u = unix_sk(sk); + + if (mutex_lock_interruptible(&u->iolock)) + return -EINTR; + + WRITE_ONCE(sk->sk_peek_off, val); + mutex_unlock(&u->iolock); + + return 0; +} + +#ifdef CONFIG_PROC_FS +static int unix_count_nr_fds(struct sock *sk) +{ + struct sk_buff *skb; + struct unix_sock *u; + int nr_fds = 0; + + spin_lock(&sk->sk_receive_queue.lock); + skb = skb_peek(&sk->sk_receive_queue); + while (skb) { + u = unix_sk(skb->sk); + nr_fds += atomic_read(&u->scm_stat.nr_fds); + skb = skb_peek_next(skb, &sk->sk_receive_queue); + } + spin_unlock(&sk->sk_receive_queue.lock); + + return nr_fds; +} + +static void unix_show_fdinfo(struct seq_file *m, struct socket *sock) +{ + struct sock *sk = sock->sk; + struct unix_sock *u; + int nr_fds; + + if (sk) { + u = unix_sk(sk); + if (sock->type == SOCK_DGRAM) { + nr_fds = atomic_read(&u->scm_stat.nr_fds); + goto out_print; + } + + unix_state_lock(sk); + if (sk->sk_state != TCP_LISTEN) + nr_fds = atomic_read(&u->scm_stat.nr_fds); + else + nr_fds = unix_count_nr_fds(sk); + unix_state_unlock(sk); +out_print: + seq_printf(m, "scm_fds: %u\n", nr_fds); + } +} +#else +#define unix_show_fdinfo NULL +#endif + +static const struct proto_ops unix_stream_ops = { + .family = PF_UNIX, + .owner = THIS_MODULE, + .release = unix_release, + .bind = unix_bind, + .connect = unix_stream_connect, + .socketpair = unix_socketpair, + .accept = unix_accept, + .getname = unix_getname, + .poll = unix_poll, + .ioctl = unix_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = unix_compat_ioctl, +#endif + .listen = unix_listen, + .shutdown = unix_shutdown, + .sendmsg = unix_stream_sendmsg, + .recvmsg = unix_stream_recvmsg, + .read_skb = unix_stream_read_skb, + .mmap = sock_no_mmap, + .sendpage = unix_stream_sendpage, + .splice_read = unix_stream_splice_read, + .set_peek_off = unix_set_peek_off, + .show_fdinfo = unix_show_fdinfo, +}; + +static const struct proto_ops unix_dgram_ops = { + .family = PF_UNIX, + .owner = THIS_MODULE, + .release = unix_release, + .bind = unix_bind, + .connect = unix_dgram_connect, + .socketpair = unix_socketpair, + .accept = sock_no_accept, + .getname = unix_getname, + .poll = unix_dgram_poll, + .ioctl = unix_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = unix_compat_ioctl, +#endif + .listen = sock_no_listen, + .shutdown = unix_shutdown, + .sendmsg = unix_dgram_sendmsg, + .read_skb = unix_read_skb, + .recvmsg = unix_dgram_recvmsg, + .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage, + .set_peek_off = unix_set_peek_off, + .show_fdinfo = unix_show_fdinfo, +}; + +static const struct proto_ops unix_seqpacket_ops = { + .family = PF_UNIX, + .owner = THIS_MODULE, + .release = unix_release, + .bind = unix_bind, + .connect = unix_stream_connect, + .socketpair = unix_socketpair, + .accept = unix_accept, + .getname = unix_getname, + .poll = unix_dgram_poll, + .ioctl = unix_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = unix_compat_ioctl, +#endif + .listen = unix_listen, + .shutdown = unix_shutdown, + .sendmsg = unix_seqpacket_sendmsg, + .recvmsg = unix_seqpacket_recvmsg, + .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage, + .set_peek_off = unix_set_peek_off, + .show_fdinfo = unix_show_fdinfo, +}; + +static void unix_close(struct sock *sk, long timeout) +{ + /* Nothing to do here, unix socket does not need a ->close(). + * This is merely for sockmap. + */ +} + +static void unix_unhash(struct sock *sk) +{ + /* Nothing to do here, unix socket does not need a ->unhash(). + * This is merely for sockmap. + */ +} + +struct proto unix_dgram_proto = { + .name = "UNIX", + .owner = THIS_MODULE, + .obj_size = sizeof(struct unix_sock), + .close = unix_close, +#ifdef CONFIG_BPF_SYSCALL + .psock_update_sk_prot = unix_dgram_bpf_update_proto, +#endif +}; + +struct proto unix_stream_proto = { + .name = "UNIX-STREAM", + .owner = THIS_MODULE, + .obj_size = sizeof(struct unix_sock), + .close = unix_close, + .unhash = unix_unhash, +#ifdef CONFIG_BPF_SYSCALL + .psock_update_sk_prot = unix_stream_bpf_update_proto, +#endif +}; + +static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type) +{ + struct unix_sock *u; + struct sock *sk; + int err; + + atomic_long_inc(&unix_nr_socks); + if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) { + err = -ENFILE; + goto err; + } + + if (type == SOCK_STREAM) + sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern); + else /*dgram and seqpacket */ + sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern); + + if (!sk) { + err = -ENOMEM; + goto err; + } + + sock_init_data(sock, sk); + + sk->sk_hash = unix_unbound_hash(sk); + sk->sk_allocation = GFP_KERNEL_ACCOUNT; + sk->sk_write_space = unix_write_space; + sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen; + sk->sk_destruct = unix_sock_destructor; + u = unix_sk(sk); + u->path.dentry = NULL; + u->path.mnt = NULL; + spin_lock_init(&u->lock); + atomic_long_set(&u->inflight, 0); + INIT_LIST_HEAD(&u->link); + mutex_init(&u->iolock); /* single task reading lock */ + mutex_init(&u->bindlock); /* single task binding lock */ + init_waitqueue_head(&u->peer_wait); + init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); + memset(&u->scm_stat, 0, sizeof(struct scm_stat)); + unix_insert_unbound_socket(net, sk); + + sock_prot_inuse_add(net, sk->sk_prot, 1); + + return sk; + +err: + atomic_long_dec(&unix_nr_socks); + return ERR_PTR(err); +} + +static int unix_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + + if (protocol && protocol != PF_UNIX) + return -EPROTONOSUPPORT; + + sock->state = SS_UNCONNECTED; + + switch (sock->type) { + case SOCK_STREAM: + sock->ops = &unix_stream_ops; + break; + /* + * Believe it or not BSD has AF_UNIX, SOCK_RAW though + * nothing uses it. + */ + case SOCK_RAW: + sock->type = SOCK_DGRAM; + fallthrough; + case SOCK_DGRAM: + sock->ops = &unix_dgram_ops; + break; + case SOCK_SEQPACKET: + sock->ops = &unix_seqpacket_ops; + break; + default: + return -ESOCKTNOSUPPORT; + } + + sk = unix_create1(net, sock, kern, sock->type); + if (IS_ERR(sk)) + return PTR_ERR(sk); + + return 0; +} + +static int unix_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (!sk) + return 0; + + sk->sk_prot->close(sk, 0); + unix_release_sock(sk, 0); + sock->sk = NULL; + + return 0; +} + +static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len, + int type) +{ + struct inode *inode; + struct path path; + struct sock *sk; + int err; + + unix_mkname_bsd(sunaddr, addr_len); + err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path); + if (err) + goto fail; + + err = path_permission(&path, MAY_WRITE); + if (err) + goto path_put; + + err = -ECONNREFUSED; + inode = d_backing_inode(path.dentry); + if (!S_ISSOCK(inode->i_mode)) + goto path_put; + + sk = unix_find_socket_byinode(inode); + if (!sk) + goto path_put; + + err = -EPROTOTYPE; + if (sk->sk_type == type) + touch_atime(&path); + else + goto sock_put; + + path_put(&path); + + return sk; + +sock_put: + sock_put(sk); +path_put: + path_put(&path); +fail: + return ERR_PTR(err); +} + +static struct sock *unix_find_abstract(struct net *net, + struct sockaddr_un *sunaddr, + int addr_len, int type) +{ + unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type); + struct dentry *dentry; + struct sock *sk; + + sk = unix_find_socket_byname(net, sunaddr, addr_len, hash); + if (!sk) + return ERR_PTR(-ECONNREFUSED); + + dentry = unix_sk(sk)->path.dentry; + if (dentry) + touch_atime(&unix_sk(sk)->path); + + return sk; +} + +static struct sock *unix_find_other(struct net *net, + struct sockaddr_un *sunaddr, + int addr_len, int type) +{ + struct sock *sk; + + if (sunaddr->sun_path[0]) + sk = unix_find_bsd(sunaddr, addr_len, type); + else + sk = unix_find_abstract(net, sunaddr, addr_len, type); + + return sk; +} + +static int unix_autobind(struct sock *sk) +{ + unsigned int new_hash, old_hash = sk->sk_hash; + struct unix_sock *u = unix_sk(sk); + struct net *net = sock_net(sk); + struct unix_address *addr; + u32 lastnum, ordernum; + int err; + + err = mutex_lock_interruptible(&u->bindlock); + if (err) + return err; + + if (u->addr) + goto out; + + err = -ENOMEM; + addr = kzalloc(sizeof(*addr) + + offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL); + if (!addr) + goto out; + + addr->len = offsetof(struct sockaddr_un, sun_path) + 6; + addr->name->sun_family = AF_UNIX; + refcount_set(&addr->refcnt, 1); + + ordernum = get_random_u32(); + lastnum = ordernum & 0xFFFFF; +retry: + ordernum = (ordernum + 1) & 0xFFFFF; + sprintf(addr->name->sun_path + 1, "%05x", ordernum); + + new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type); + unix_table_double_lock(net, old_hash, new_hash); + + if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) { + unix_table_double_unlock(net, old_hash, new_hash); + + /* __unix_find_socket_byname() may take long time if many names + * are already in use. + */ + cond_resched(); + + if (ordernum == lastnum) { + /* Give up if all names seems to be in use. */ + err = -ENOSPC; + unix_release_addr(addr); + goto out; + } + + goto retry; + } + + __unix_set_addr_hash(net, sk, addr, new_hash); + unix_table_double_unlock(net, old_hash, new_hash); + err = 0; + +out: mutex_unlock(&u->bindlock); + return err; +} + +static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr, + int addr_len) +{ + umode_t mode = S_IFSOCK | + (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask()); + unsigned int new_hash, old_hash = sk->sk_hash; + struct unix_sock *u = unix_sk(sk); + struct net *net = sock_net(sk); + struct user_namespace *ns; // barf... + struct unix_address *addr; + struct dentry *dentry; + struct path parent; + int err; + + unix_mkname_bsd(sunaddr, addr_len); + addr_len = strlen(sunaddr->sun_path) + + offsetof(struct sockaddr_un, sun_path) + 1; + + addr = unix_create_addr(sunaddr, addr_len); + if (!addr) + return -ENOMEM; + + /* + * Get the parent directory, calculate the hash for last + * component. + */ + dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0); + if (IS_ERR(dentry)) { + err = PTR_ERR(dentry); + goto out; + } + + /* + * All right, let's create it. + */ + ns = mnt_user_ns(parent.mnt); + err = security_path_mknod(&parent, dentry, mode, 0); + if (!err) + err = vfs_mknod(ns, d_inode(parent.dentry), dentry, mode, 0); + if (err) + goto out_path; + err = mutex_lock_interruptible(&u->bindlock); + if (err) + goto out_unlink; + if (u->addr) + goto out_unlock; + + new_hash = unix_bsd_hash(d_backing_inode(dentry)); + unix_table_double_lock(net, old_hash, new_hash); + u->path.mnt = mntget(parent.mnt); + u->path.dentry = dget(dentry); + __unix_set_addr_hash(net, sk, addr, new_hash); + unix_table_double_unlock(net, old_hash, new_hash); + unix_insert_bsd_socket(sk); + mutex_unlock(&u->bindlock); + done_path_create(&parent, dentry); + return 0; + +out_unlock: + mutex_unlock(&u->bindlock); + err = -EINVAL; +out_unlink: + /* failed after successful mknod? unlink what we'd created... */ + vfs_unlink(ns, d_inode(parent.dentry), dentry, NULL); +out_path: + done_path_create(&parent, dentry); +out: + unix_release_addr(addr); + return err == -EEXIST ? -EADDRINUSE : err; +} + +static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr, + int addr_len) +{ + unsigned int new_hash, old_hash = sk->sk_hash; + struct unix_sock *u = unix_sk(sk); + struct net *net = sock_net(sk); + struct unix_address *addr; + int err; + + addr = unix_create_addr(sunaddr, addr_len); + if (!addr) + return -ENOMEM; + + err = mutex_lock_interruptible(&u->bindlock); + if (err) + goto out; + + if (u->addr) { + err = -EINVAL; + goto out_mutex; + } + + new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type); + unix_table_double_lock(net, old_hash, new_hash); + + if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) + goto out_spin; + + __unix_set_addr_hash(net, sk, addr, new_hash); + unix_table_double_unlock(net, old_hash, new_hash); + mutex_unlock(&u->bindlock); + return 0; + +out_spin: + unix_table_double_unlock(net, old_hash, new_hash); + err = -EADDRINUSE; +out_mutex: + mutex_unlock(&u->bindlock); +out: + unix_release_addr(addr); + return err; +} + +static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +{ + struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; + struct sock *sk = sock->sk; + int err; + + if (addr_len == offsetof(struct sockaddr_un, sun_path) && + sunaddr->sun_family == AF_UNIX) + return unix_autobind(sk); + + err = unix_validate_addr(sunaddr, addr_len); + if (err) + return err; + + if (sunaddr->sun_path[0]) + err = unix_bind_bsd(sk, sunaddr, addr_len); + else + err = unix_bind_abstract(sk, sunaddr, addr_len); + + return err; +} + +static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) +{ + if (unlikely(sk1 == sk2) || !sk2) { + unix_state_lock(sk1); + return; + } + if (sk1 < sk2) { + unix_state_lock(sk1); + unix_state_lock_nested(sk2); + } else { + unix_state_lock(sk2); + unix_state_lock_nested(sk1); + } +} + +static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2) +{ + if (unlikely(sk1 == sk2) || !sk2) { + unix_state_unlock(sk1); + return; + } + unix_state_unlock(sk1); + unix_state_unlock(sk2); +} + +static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, + int alen, int flags) +{ + struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; + struct sock *sk = sock->sk; + struct sock *other; + int err; + + err = -EINVAL; + if (alen < offsetofend(struct sockaddr, sa_family)) + goto out; + + if (addr->sa_family != AF_UNSPEC) { + err = unix_validate_addr(sunaddr, alen); + if (err) + goto out; + + if (test_bit(SOCK_PASSCRED, &sock->flags) && + !unix_sk(sk)->addr) { + err = unix_autobind(sk); + if (err) + goto out; + } + +restart: + other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type); + if (IS_ERR(other)) { + err = PTR_ERR(other); + goto out; + } + + unix_state_double_lock(sk, other); + + /* Apparently VFS overslept socket death. Retry. */ + if (sock_flag(other, SOCK_DEAD)) { + unix_state_double_unlock(sk, other); + sock_put(other); + goto restart; + } + + err = -EPERM; + if (!unix_may_send(sk, other)) + goto out_unlock; + + err = security_unix_may_send(sk->sk_socket, other->sk_socket); + if (err) + goto out_unlock; + + sk->sk_state = other->sk_state = TCP_ESTABLISHED; + } else { + /* + * 1003.1g breaking connected state with AF_UNSPEC + */ + other = NULL; + unix_state_double_lock(sk, other); + } + + /* + * If it was connected, reconnect. + */ + if (unix_peer(sk)) { + struct sock *old_peer = unix_peer(sk); + + unix_peer(sk) = other; + if (!other) + sk->sk_state = TCP_CLOSE; + unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer); + + unix_state_double_unlock(sk, other); + + if (other != old_peer) + unix_dgram_disconnected(sk, old_peer); + sock_put(old_peer); + } else { + unix_peer(sk) = other; + unix_state_double_unlock(sk, other); + } + + return 0; + +out_unlock: + unix_state_double_unlock(sk, other); + sock_put(other); +out: + return err; +} + +static long unix_wait_for_peer(struct sock *other, long timeo) + __releases(&unix_sk(other)->lock) +{ + struct unix_sock *u = unix_sk(other); + int sched; + DEFINE_WAIT(wait); + + prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE); + + sched = !sock_flag(other, SOCK_DEAD) && + !(other->sk_shutdown & RCV_SHUTDOWN) && + unix_recvq_full_lockless(other); + + unix_state_unlock(other); + + if (sched) + timeo = schedule_timeout(timeo); + + finish_wait(&u->peer_wait, &wait); + return timeo; +} + +static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, + int addr_len, int flags) +{ + struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; + struct sock *sk = sock->sk, *newsk = NULL, *other = NULL; + struct unix_sock *u = unix_sk(sk), *newu, *otheru; + struct net *net = sock_net(sk); + struct sk_buff *skb = NULL; + long timeo; + int err; + int st; + + err = unix_validate_addr(sunaddr, addr_len); + if (err) + goto out; + + if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) { + err = unix_autobind(sk); + if (err) + goto out; + } + + timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); + + /* First of all allocate resources. + If we will make it after state is locked, + we will have to recheck all again in any case. + */ + + /* create new sock for complete connection */ + newsk = unix_create1(net, NULL, 0, sock->type); + if (IS_ERR(newsk)) { + err = PTR_ERR(newsk); + newsk = NULL; + goto out; + } + + err = -ENOMEM; + + /* Allocate skb for sending to listening sock */ + skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL); + if (skb == NULL) + goto out; + +restart: + /* Find listening sock. */ + other = unix_find_other(net, sunaddr, addr_len, sk->sk_type); + if (IS_ERR(other)) { + err = PTR_ERR(other); + other = NULL; + goto out; + } + + /* Latch state of peer */ + unix_state_lock(other); + + /* Apparently VFS overslept socket death. Retry. */ + if (sock_flag(other, SOCK_DEAD)) { + unix_state_unlock(other); + sock_put(other); + goto restart; + } + + err = -ECONNREFUSED; + if (other->sk_state != TCP_LISTEN) + goto out_unlock; + if (other->sk_shutdown & RCV_SHUTDOWN) + goto out_unlock; + + if (unix_recvq_full(other)) { + err = -EAGAIN; + if (!timeo) + goto out_unlock; + + timeo = unix_wait_for_peer(other, timeo); + + err = sock_intr_errno(timeo); + if (signal_pending(current)) + goto out; + sock_put(other); + goto restart; + } + + /* Latch our state. + + It is tricky place. We need to grab our state lock and cannot + drop lock on peer. It is dangerous because deadlock is + possible. Connect to self case and simultaneous + attempt to connect are eliminated by checking socket + state. other is TCP_LISTEN, if sk is TCP_LISTEN we + check this before attempt to grab lock. + + Well, and we have to recheck the state after socket locked. + */ + st = sk->sk_state; + + switch (st) { + case TCP_CLOSE: + /* This is ok... continue with connect */ + break; + case TCP_ESTABLISHED: + /* Socket is already connected */ + err = -EISCONN; + goto out_unlock; + default: + err = -EINVAL; + goto out_unlock; + } + + unix_state_lock_nested(sk); + + if (sk->sk_state != st) { + unix_state_unlock(sk); + unix_state_unlock(other); + sock_put(other); + goto restart; + } + + err = security_unix_stream_connect(sk, other, newsk); + if (err) { + unix_state_unlock(sk); + goto out_unlock; + } + + /* The way is open! Fastly set all the necessary fields... */ + + sock_hold(sk); + unix_peer(newsk) = sk; + newsk->sk_state = TCP_ESTABLISHED; + newsk->sk_type = sk->sk_type; + init_peercred(newsk); + newu = unix_sk(newsk); + RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); + otheru = unix_sk(other); + + /* copy address information from listening to new sock + * + * The contents of *(otheru->addr) and otheru->path + * are seen fully set up here, since we have found + * otheru in hash under its lock. Insertion into the + * hash chain we'd found it in had been done in an + * earlier critical area protected by the chain's lock, + * the same one where we'd set *(otheru->addr) contents, + * as well as otheru->path and otheru->addr itself. + * + * Using smp_store_release() here to set newu->addr + * is enough to make those stores, as well as stores + * to newu->path visible to anyone who gets newu->addr + * by smp_load_acquire(). IOW, the same warranties + * as for unix_sock instances bound in unix_bind() or + * in unix_autobind(). + */ + if (otheru->path.dentry) { + path_get(&otheru->path); + newu->path = otheru->path; + } + refcount_inc(&otheru->addr->refcnt); + smp_store_release(&newu->addr, otheru->addr); + + /* Set credentials */ + copy_peercred(sk, other); + + sock->state = SS_CONNECTED; + sk->sk_state = TCP_ESTABLISHED; + sock_hold(newsk); + + smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */ + unix_peer(sk) = newsk; + + unix_state_unlock(sk); + + /* take ten and send info to listening sock */ + spin_lock(&other->sk_receive_queue.lock); + __skb_queue_tail(&other->sk_receive_queue, skb); + spin_unlock(&other->sk_receive_queue.lock); + unix_state_unlock(other); + other->sk_data_ready(other); + sock_put(other); + return 0; + +out_unlock: + if (other) + unix_state_unlock(other); + +out: + kfree_skb(skb); + if (newsk) + unix_release_sock(newsk, 0); + if (other) + sock_put(other); + return err; +} + +static int unix_socketpair(struct socket *socka, struct socket *sockb) +{ + struct sock *ska = socka->sk, *skb = sockb->sk; + + /* Join our sockets back to back */ + sock_hold(ska); + sock_hold(skb); + unix_peer(ska) = skb; + unix_peer(skb) = ska; + init_peercred(ska); + init_peercred(skb); + + ska->sk_state = TCP_ESTABLISHED; + skb->sk_state = TCP_ESTABLISHED; + socka->state = SS_CONNECTED; + sockb->state = SS_CONNECTED; + return 0; +} + +static void unix_sock_inherit_flags(const struct socket *old, + struct socket *new) +{ + if (test_bit(SOCK_PASSCRED, &old->flags)) + set_bit(SOCK_PASSCRED, &new->flags); + if (test_bit(SOCK_PASSSEC, &old->flags)) + set_bit(SOCK_PASSSEC, &new->flags); +} + +static int unix_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) +{ + struct sock *sk = sock->sk; + struct sock *tsk; + struct sk_buff *skb; + int err; + + err = -EOPNOTSUPP; + if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) + goto out; + + err = -EINVAL; + if (sk->sk_state != TCP_LISTEN) + goto out; + + /* If socket state is TCP_LISTEN it cannot change (for now...), + * so that no locks are necessary. + */ + + skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0, + &err); + if (!skb) { + /* This means receive shutdown. */ + if (err == 0) + err = -EINVAL; + goto out; + } + + tsk = skb->sk; + skb_free_datagram(sk, skb); + wake_up_interruptible(&unix_sk(sk)->peer_wait); + + /* attach accepted sock to socket */ + unix_state_lock(tsk); + newsock->state = SS_CONNECTED; + unix_sock_inherit_flags(sock, newsock); + sock_graft(tsk, newsock); + unix_state_unlock(tsk); + return 0; + +out: + return err; +} + + +static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) +{ + struct sock *sk = sock->sk; + struct unix_address *addr; + DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); + int err = 0; + + if (peer) { + sk = unix_peer_get(sk); + + err = -ENOTCONN; + if (!sk) + goto out; + err = 0; + } else { + sock_hold(sk); + } + + addr = smp_load_acquire(&unix_sk(sk)->addr); + if (!addr) { + sunaddr->sun_family = AF_UNIX; + sunaddr->sun_path[0] = 0; + err = offsetof(struct sockaddr_un, sun_path); + } else { + err = addr->len; + memcpy(sunaddr, addr->name, addr->len); + } + sock_put(sk); +out: + return err; +} + +static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) +{ + scm->fp = scm_fp_dup(UNIXCB(skb).fp); + + /* + * Garbage collection of unix sockets starts by selecting a set of + * candidate sockets which have reference only from being in flight + * (total_refs == inflight_refs). This condition is checked once during + * the candidate collection phase, and candidates are marked as such, so + * that non-candidates can later be ignored. While inflight_refs is + * protected by unix_gc_lock, total_refs (file count) is not, hence this + * is an instantaneous decision. + * + * Once a candidate, however, the socket must not be reinstalled into a + * file descriptor while the garbage collection is in progress. + * + * If the above conditions are met, then the directed graph of + * candidates (*) does not change while unix_gc_lock is held. + * + * Any operations that changes the file count through file descriptors + * (dup, close, sendmsg) does not change the graph since candidates are + * not installed in fds. + * + * Dequeing a candidate via recvmsg would install it into an fd, but + * that takes unix_gc_lock to decrement the inflight count, so it's + * serialized with garbage collection. + * + * MSG_PEEK is special in that it does not change the inflight count, + * yet does install the socket into an fd. The following lock/unlock + * pair is to ensure serialization with garbage collection. It must be + * done between incrementing the file count and installing the file into + * an fd. + * + * If garbage collection starts after the barrier provided by the + * lock/unlock, then it will see the elevated refcount and not mark this + * as a candidate. If a garbage collection is already in progress + * before the file count was incremented, then the lock/unlock pair will + * ensure that garbage collection is finished before progressing to + * installing the fd. + * + * (*) A -> B where B is on the queue of A or B is on the queue of C + * which is on the queue of listening socket A. + */ + spin_lock(&unix_gc_lock); + spin_unlock(&unix_gc_lock); +} + +static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) +{ + int err = 0; + + UNIXCB(skb).pid = get_pid(scm->pid); + UNIXCB(skb).uid = scm->creds.uid; + UNIXCB(skb).gid = scm->creds.gid; + UNIXCB(skb).fp = NULL; + unix_get_secdata(scm, skb); + if (scm->fp && send_fds) + err = unix_attach_fds(scm, skb); + + skb->destructor = unix_destruct_scm; + return err; +} + +static bool unix_passcred_enabled(const struct socket *sock, + const struct sock *other) +{ + return test_bit(SOCK_PASSCRED, &sock->flags) || + !other->sk_socket || + test_bit(SOCK_PASSCRED, &other->sk_socket->flags); +} + +/* + * Some apps rely on write() giving SCM_CREDENTIALS + * We include credentials if source or destination socket + * asserted SOCK_PASSCRED. + */ +static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock, + const struct sock *other) +{ + if (UNIXCB(skb).pid) + return; + if (unix_passcred_enabled(sock, other)) { + UNIXCB(skb).pid = get_pid(task_tgid(current)); + current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); + } +} + +static int maybe_init_creds(struct scm_cookie *scm, + struct socket *socket, + const struct sock *other) +{ + int err; + struct msghdr msg = { .msg_controllen = 0 }; + + err = scm_send(socket, &msg, scm, false); + if (err) + return err; + + if (unix_passcred_enabled(socket, other)) { + scm->pid = get_pid(task_tgid(current)); + current_uid_gid(&scm->creds.uid, &scm->creds.gid); + } + return err; +} + +static bool unix_skb_scm_eq(struct sk_buff *skb, + struct scm_cookie *scm) +{ + return UNIXCB(skb).pid == scm->pid && + uid_eq(UNIXCB(skb).uid, scm->creds.uid) && + gid_eq(UNIXCB(skb).gid, scm->creds.gid) && + unix_secdata_eq(scm, skb); +} + +static void scm_stat_add(struct sock *sk, struct sk_buff *skb) +{ + struct scm_fp_list *fp = UNIXCB(skb).fp; + struct unix_sock *u = unix_sk(sk); + + if (unlikely(fp && fp->count)) + atomic_add(fp->count, &u->scm_stat.nr_fds); +} + +static void scm_stat_del(struct sock *sk, struct sk_buff *skb) +{ + struct scm_fp_list *fp = UNIXCB(skb).fp; + struct unix_sock *u = unix_sk(sk); + + if (unlikely(fp && fp->count)) + atomic_sub(fp->count, &u->scm_stat.nr_fds); +} + +/* + * Send AF_UNIX data. + */ + +static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) +{ + DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name); + struct sock *sk = sock->sk, *other = NULL; + struct unix_sock *u = unix_sk(sk); + struct scm_cookie scm; + struct sk_buff *skb; + int data_len = 0; + int sk_locked; + long timeo; + int err; + + wait_for_unix_gc(); + err = scm_send(sock, msg, &scm, false); + if (err < 0) + return err; + + err = -EOPNOTSUPP; + if (msg->msg_flags&MSG_OOB) + goto out; + + if (msg->msg_namelen) { + err = unix_validate_addr(sunaddr, msg->msg_namelen); + if (err) + goto out; + } else { + sunaddr = NULL; + err = -ENOTCONN; + other = unix_peer_get(sk); + if (!other) + goto out; + } + + if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) { + err = unix_autobind(sk); + if (err) + goto out; + } + + err = -EMSGSIZE; + if (len > sk->sk_sndbuf - 32) + goto out; + + if (len > SKB_MAX_ALLOC) { + data_len = min_t(size_t, + len - SKB_MAX_ALLOC, + MAX_SKB_FRAGS * PAGE_SIZE); + data_len = PAGE_ALIGN(data_len); + + BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE); + } + + skb = sock_alloc_send_pskb(sk, len - data_len, data_len, + msg->msg_flags & MSG_DONTWAIT, &err, + PAGE_ALLOC_COSTLY_ORDER); + if (skb == NULL) + goto out; + + err = unix_scm_to_skb(&scm, skb, true); + if (err < 0) + goto out_free; + + skb_put(skb, len - data_len); + skb->data_len = data_len; + skb->len = len; + err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); + if (err) + goto out_free; + + timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); + +restart: + if (!other) { + err = -ECONNRESET; + if (sunaddr == NULL) + goto out_free; + + other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen, + sk->sk_type); + if (IS_ERR(other)) { + err = PTR_ERR(other); + other = NULL; + goto out_free; + } + } + + if (sk_filter(other, skb) < 0) { + /* Toss the packet but do not return any error to the sender */ + err = len; + goto out_free; + } + + sk_locked = 0; + unix_state_lock(other); +restart_locked: + err = -EPERM; + if (!unix_may_send(sk, other)) + goto out_unlock; + + if (unlikely(sock_flag(other, SOCK_DEAD))) { + /* + * Check with 1003.1g - what should + * datagram error + */ + unix_state_unlock(other); + sock_put(other); + + if (!sk_locked) + unix_state_lock(sk); + + err = 0; + if (sk->sk_type == SOCK_SEQPACKET) { + /* We are here only when racing with unix_release_sock() + * is clearing @other. Never change state to TCP_CLOSE + * unlike SOCK_DGRAM wants. + */ + unix_state_unlock(sk); + err = -EPIPE; + } else if (unix_peer(sk) == other) { + unix_peer(sk) = NULL; + unix_dgram_peer_wake_disconnect_wakeup(sk, other); + + sk->sk_state = TCP_CLOSE; + unix_state_unlock(sk); + + unix_dgram_disconnected(sk, other); + sock_put(other); + err = -ECONNREFUSED; + } else { + unix_state_unlock(sk); + } + + other = NULL; + if (err) + goto out_free; + goto restart; + } + + err = -EPIPE; + if (other->sk_shutdown & RCV_SHUTDOWN) + goto out_unlock; + + if (sk->sk_type != SOCK_SEQPACKET) { + err = security_unix_may_send(sk->sk_socket, other->sk_socket); + if (err) + goto out_unlock; + } + + /* other == sk && unix_peer(other) != sk if + * - unix_peer(sk) == NULL, destination address bound to sk + * - unix_peer(sk) == sk by time of get but disconnected before lock + */ + if (other != sk && + unlikely(unix_peer(other) != sk && + unix_recvq_full_lockless(other))) { + if (timeo) { + timeo = unix_wait_for_peer(other, timeo); + + err = sock_intr_errno(timeo); + if (signal_pending(current)) + goto out_free; + + goto restart; + } + + if (!sk_locked) { + unix_state_unlock(other); + unix_state_double_lock(sk, other); + } + + if (unix_peer(sk) != other || + unix_dgram_peer_wake_me(sk, other)) { + err = -EAGAIN; + sk_locked = 1; + goto out_unlock; + } + + if (!sk_locked) { + sk_locked = 1; + goto restart_locked; + } + } + + if (unlikely(sk_locked)) + unix_state_unlock(sk); + + if (sock_flag(other, SOCK_RCVTSTAMP)) + __net_timestamp(skb); + maybe_add_creds(skb, sock, other); + scm_stat_add(other, skb); + skb_queue_tail(&other->sk_receive_queue, skb); + unix_state_unlock(other); + other->sk_data_ready(other); + sock_put(other); + scm_destroy(&scm); + return len; + +out_unlock: + if (sk_locked) + unix_state_unlock(sk); + unix_state_unlock(other); +out_free: + kfree_skb(skb); +out: + if (other) + sock_put(other); + scm_destroy(&scm); + return err; +} + +/* We use paged skbs for stream sockets, and limit occupancy to 32768 + * bytes, and a minimum of a full page. + */ +#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) + +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) +static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other, + struct scm_cookie *scm, bool fds_sent) +{ + struct unix_sock *ousk = unix_sk(other); + struct sk_buff *skb; + int err = 0; + + skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err); + + if (!skb) + return err; + + err = unix_scm_to_skb(scm, skb, !fds_sent); + if (err < 0) { + kfree_skb(skb); + return err; + } + skb_put(skb, 1); + err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1); + + if (err) { + kfree_skb(skb); + return err; + } + + unix_state_lock(other); + + if (sock_flag(other, SOCK_DEAD) || + (other->sk_shutdown & RCV_SHUTDOWN)) { + unix_state_unlock(other); + kfree_skb(skb); + return -EPIPE; + } + + maybe_add_creds(skb, sock, other); + skb_get(skb); + + if (ousk->oob_skb) + consume_skb(ousk->oob_skb); + + WRITE_ONCE(ousk->oob_skb, skb); + + scm_stat_add(other, skb); + skb_queue_tail(&other->sk_receive_queue, skb); + sk_send_sigurg(other); + unix_state_unlock(other); + other->sk_data_ready(other); + + return err; +} +#endif + +static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) +{ + struct sock *sk = sock->sk; + struct sock *other = NULL; + int err, size; + struct sk_buff *skb; + int sent = 0; + struct scm_cookie scm; + bool fds_sent = false; + int data_len; + + wait_for_unix_gc(); + err = scm_send(sock, msg, &scm, false); + if (err < 0) + return err; + + err = -EOPNOTSUPP; + if (msg->msg_flags & MSG_OOB) { +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) + if (len) + len--; + else +#endif + goto out_err; + } + + if (msg->msg_namelen) { + err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; + goto out_err; + } else { + err = -ENOTCONN; + other = unix_peer(sk); + if (!other) + goto out_err; + } + + if (sk->sk_shutdown & SEND_SHUTDOWN) + goto pipe_err; + + while (sent < len) { + size = len - sent; + + /* Keep two messages in the pipe so it schedules better */ + size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64); + + /* allow fallback to order-0 allocations */ + size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ); + + data_len = max_t(int, 0, size - SKB_MAX_HEAD(0)); + + data_len = min_t(size_t, size, PAGE_ALIGN(data_len)); + + skb = sock_alloc_send_pskb(sk, size - data_len, data_len, + msg->msg_flags & MSG_DONTWAIT, &err, + get_order(UNIX_SKB_FRAGS_SZ)); + if (!skb) + goto out_err; + + /* Only send the fds in the first buffer */ + err = unix_scm_to_skb(&scm, skb, !fds_sent); + if (err < 0) { + kfree_skb(skb); + goto out_err; + } + fds_sent = true; + + skb_put(skb, size - data_len); + skb->data_len = data_len; + skb->len = size; + err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); + if (err) { + kfree_skb(skb); + goto out_err; + } + + unix_state_lock(other); + + if (sock_flag(other, SOCK_DEAD) || + (other->sk_shutdown & RCV_SHUTDOWN)) + goto pipe_err_free; + + maybe_add_creds(skb, sock, other); + scm_stat_add(other, skb); + skb_queue_tail(&other->sk_receive_queue, skb); + unix_state_unlock(other); + other->sk_data_ready(other); + sent += size; + } + +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) + if (msg->msg_flags & MSG_OOB) { + err = queue_oob(sock, msg, other, &scm, fds_sent); + if (err) + goto out_err; + sent++; + } +#endif + + scm_destroy(&scm); + + return sent; + +pipe_err_free: + unix_state_unlock(other); + kfree_skb(skb); +pipe_err: + if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL)) + send_sig(SIGPIPE, current, 0); + err = -EPIPE; +out_err: + scm_destroy(&scm); + return sent ? : err; +} + +static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, + int offset, size_t size, int flags) +{ + int err; + bool send_sigpipe = false; + bool init_scm = true; + struct scm_cookie scm; + struct sock *other, *sk = socket->sk; + struct sk_buff *skb, *newskb = NULL, *tail = NULL; + + if (flags & MSG_OOB) + return -EOPNOTSUPP; + + other = unix_peer(sk); + if (!other || sk->sk_state != TCP_ESTABLISHED) + return -ENOTCONN; + + if (false) { +alloc_skb: + spin_unlock(&other->sk_receive_queue.lock); + unix_state_unlock(other); + mutex_unlock(&unix_sk(other)->iolock); + newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, + &err, 0); + if (!newskb) + goto err; + } + + /* we must acquire iolock as we modify already present + * skbs in the sk_receive_queue and mess with skb->len + */ + err = mutex_lock_interruptible(&unix_sk(other)->iolock); + if (err) { + err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; + goto err; + } + + if (sk->sk_shutdown & SEND_SHUTDOWN) { + err = -EPIPE; + send_sigpipe = true; + goto err_unlock; + } + + unix_state_lock(other); + + if (sock_flag(other, SOCK_DEAD) || + other->sk_shutdown & RCV_SHUTDOWN) { + err = -EPIPE; + send_sigpipe = true; + goto err_state_unlock; + } + + if (init_scm) { + err = maybe_init_creds(&scm, socket, other); + if (err) + goto err_state_unlock; + init_scm = false; + } + + spin_lock(&other->sk_receive_queue.lock); + skb = skb_peek_tail(&other->sk_receive_queue); + if (tail && tail == skb) { + skb = newskb; + } else if (!skb || !unix_skb_scm_eq(skb, &scm)) { + if (newskb) { + skb = newskb; + } else { + tail = skb; + goto alloc_skb; + } + } else if (newskb) { + /* this is fast path, we don't necessarily need to + * call to kfree_skb even though with newskb == NULL + * this - does no harm + */ + consume_skb(newskb); + newskb = NULL; + } + + if (skb_append_pagefrags(skb, page, offset, size)) { + tail = skb; + goto alloc_skb; + } + + skb->len += size; + skb->data_len += size; + skb->truesize += size; + refcount_add(size, &sk->sk_wmem_alloc); + + if (newskb) { + unix_scm_to_skb(&scm, skb, false); + __skb_queue_tail(&other->sk_receive_queue, newskb); + } + + spin_unlock(&other->sk_receive_queue.lock); + unix_state_unlock(other); + mutex_unlock(&unix_sk(other)->iolock); + + other->sk_data_ready(other); + scm_destroy(&scm); + return size; + +err_state_unlock: + unix_state_unlock(other); +err_unlock: + mutex_unlock(&unix_sk(other)->iolock); +err: + kfree_skb(newskb); + if (send_sigpipe && !(flags & MSG_NOSIGNAL)) + send_sig(SIGPIPE, current, 0); + if (!init_scm) + scm_destroy(&scm); + return err; +} + +static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) +{ + int err; + struct sock *sk = sock->sk; + + err = sock_error(sk); + if (err) + return err; + + if (sk->sk_state != TCP_ESTABLISHED) + return -ENOTCONN; + + if (msg->msg_namelen) + msg->msg_namelen = 0; + + return unix_dgram_sendmsg(sock, msg, len); +} + +static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) +{ + struct sock *sk = sock->sk; + + if (sk->sk_state != TCP_ESTABLISHED) + return -ENOTCONN; + + return unix_dgram_recvmsg(sock, msg, size, flags); +} + +static void unix_copy_addr(struct msghdr *msg, struct sock *sk) +{ + struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); + + if (addr) { + msg->msg_namelen = addr->len; + memcpy(msg->msg_name, addr->name, addr->len); + } +} + +int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, + int flags) +{ + struct scm_cookie scm; + struct socket *sock = sk->sk_socket; + struct unix_sock *u = unix_sk(sk); + struct sk_buff *skb, *last; + long timeo; + int skip; + int err; + + err = -EOPNOTSUPP; + if (flags&MSG_OOB) + goto out; + + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); + + do { + mutex_lock(&u->iolock); + + skip = sk_peek_offset(sk, flags); + skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags, + &skip, &err, &last); + if (skb) { + if (!(flags & MSG_PEEK)) + scm_stat_del(sk, skb); + break; + } + + mutex_unlock(&u->iolock); + + if (err != -EAGAIN) + break; + } while (timeo && + !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue, + &err, &timeo, last)); + + if (!skb) { /* implies iolock unlocked */ + unix_state_lock(sk); + /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ + if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN && + (sk->sk_shutdown & RCV_SHUTDOWN)) + err = 0; + unix_state_unlock(sk); + goto out; + } + + if (wq_has_sleeper(&u->peer_wait)) + wake_up_interruptible_sync_poll(&u->peer_wait, + EPOLLOUT | EPOLLWRNORM | + EPOLLWRBAND); + + if (msg->msg_name) + unix_copy_addr(msg, skb->sk); + + if (size > skb->len - skip) + size = skb->len - skip; + else if (size < skb->len - skip) + msg->msg_flags |= MSG_TRUNC; + + err = skb_copy_datagram_msg(skb, skip, msg, size); + if (err) + goto out_free; + + if (sock_flag(sk, SOCK_RCVTSTAMP)) + __sock_recv_timestamp(msg, sk, skb); + + memset(&scm, 0, sizeof(scm)); + + scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); + unix_set_secdata(&scm, skb); + + if (!(flags & MSG_PEEK)) { + if (UNIXCB(skb).fp) + unix_detach_fds(&scm, skb); + + sk_peek_offset_bwd(sk, skb->len); + } else { + /* It is questionable: on PEEK we could: + - do not return fds - good, but too simple 8) + - return fds, and do not return them on read (old strategy, + apparently wrong) + - clone fds (I chose it for now, it is the most universal + solution) + + POSIX 1003.1g does not actually define this clearly + at all. POSIX 1003.1g doesn't define a lot of things + clearly however! + + */ + + sk_peek_offset_fwd(sk, size); + + if (UNIXCB(skb).fp) + unix_peek_fds(&scm, skb); + } + err = (flags & MSG_TRUNC) ? skb->len - skip : size; + + scm_recv(sock, msg, &scm, flags); + +out_free: + skb_free_datagram(sk, skb); + mutex_unlock(&u->iolock); +out: + return err; +} + +static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags) +{ + struct sock *sk = sock->sk; + +#ifdef CONFIG_BPF_SYSCALL + const struct proto *prot = READ_ONCE(sk->sk_prot); + + if (prot != &unix_dgram_proto) + return prot->recvmsg(sk, msg, size, flags, NULL); +#endif + return __unix_dgram_recvmsg(sk, msg, size, flags); +} + +static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor) +{ + struct unix_sock *u = unix_sk(sk); + struct sk_buff *skb; + int err; + + mutex_lock(&u->iolock); + skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err); + mutex_unlock(&u->iolock); + if (!skb) + return err; + + return recv_actor(sk, skb); +} + +/* + * Sleep until more data has arrived. But check for races.. + */ +static long unix_stream_data_wait(struct sock *sk, long timeo, + struct sk_buff *last, unsigned int last_len, + bool freezable) +{ + unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE; + struct sk_buff *tail; + DEFINE_WAIT(wait); + + unix_state_lock(sk); + + for (;;) { + prepare_to_wait(sk_sleep(sk), &wait, state); + + tail = skb_peek_tail(&sk->sk_receive_queue); + if (tail != last || + (tail && tail->len != last_len) || + sk->sk_err || + (sk->sk_shutdown & RCV_SHUTDOWN) || + signal_pending(current) || + !timeo) + break; + + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); + unix_state_unlock(sk); + timeo = schedule_timeout(timeo); + unix_state_lock(sk); + + if (sock_flag(sk, SOCK_DEAD)) + break; + + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); + } + + finish_wait(sk_sleep(sk), &wait); + unix_state_unlock(sk); + return timeo; +} + +static unsigned int unix_skb_len(const struct sk_buff *skb) +{ + return skb->len - UNIXCB(skb).consumed; +} + +struct unix_stream_read_state { + int (*recv_actor)(struct sk_buff *, int, int, + struct unix_stream_read_state *); + struct socket *socket; + struct msghdr *msg; + struct pipe_inode_info *pipe; + size_t size; + int flags; + unsigned int splice_flags; +}; + +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) +static int unix_stream_recv_urg(struct unix_stream_read_state *state) +{ + struct socket *sock = state->socket; + struct sock *sk = sock->sk; + struct unix_sock *u = unix_sk(sk); + int chunk = 1; + struct sk_buff *oob_skb; + + mutex_lock(&u->iolock); + unix_state_lock(sk); + + if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) { + unix_state_unlock(sk); + mutex_unlock(&u->iolock); + return -EINVAL; + } + + oob_skb = u->oob_skb; + + if (!(state->flags & MSG_PEEK)) + WRITE_ONCE(u->oob_skb, NULL); + else + skb_get(oob_skb); + unix_state_unlock(sk); + + chunk = state->recv_actor(oob_skb, 0, chunk, state); + + if (!(state->flags & MSG_PEEK)) + UNIXCB(oob_skb).consumed += 1; + + consume_skb(oob_skb); + + mutex_unlock(&u->iolock); + + if (chunk < 0) + return -EFAULT; + + state->msg->msg_flags |= MSG_OOB; + return 1; +} + +static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, + int flags, int copied) +{ + struct unix_sock *u = unix_sk(sk); + + if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) { + skb_unlink(skb, &sk->sk_receive_queue); + consume_skb(skb); + skb = NULL; + } else { + if (skb == u->oob_skb) { + if (copied) { + skb = NULL; + } else if (sock_flag(sk, SOCK_URGINLINE)) { + if (!(flags & MSG_PEEK)) { + WRITE_ONCE(u->oob_skb, NULL); + consume_skb(skb); + } + } else if (!(flags & MSG_PEEK)) { + skb_unlink(skb, &sk->sk_receive_queue); + consume_skb(skb); + skb = skb_peek(&sk->sk_receive_queue); + } + } + } + return skb; +} +#endif + +static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor) +{ + if (unlikely(sk->sk_state != TCP_ESTABLISHED)) + return -ENOTCONN; + + return unix_read_skb(sk, recv_actor); +} + +static int unix_stream_read_generic(struct unix_stream_read_state *state, + bool freezable) +{ + struct scm_cookie scm; + struct socket *sock = state->socket; + struct sock *sk = sock->sk; + struct unix_sock *u = unix_sk(sk); + int copied = 0; + int flags = state->flags; + int noblock = flags & MSG_DONTWAIT; + bool check_creds = false; + int target; + int err = 0; + long timeo; + int skip; + size_t size = state->size; + unsigned int last_len; + + if (unlikely(sk->sk_state != TCP_ESTABLISHED)) { + err = -EINVAL; + goto out; + } + + if (unlikely(flags & MSG_OOB)) { + err = -EOPNOTSUPP; +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) + err = unix_stream_recv_urg(state); +#endif + goto out; + } + + target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); + timeo = sock_rcvtimeo(sk, noblock); + + memset(&scm, 0, sizeof(scm)); + + /* Lock the socket to prevent queue disordering + * while sleeps in memcpy_tomsg + */ + mutex_lock(&u->iolock); + + skip = max(sk_peek_offset(sk, flags), 0); + + do { + int chunk; + bool drop_skb; + struct sk_buff *skb, *last; + +redo: + unix_state_lock(sk); + if (sock_flag(sk, SOCK_DEAD)) { + err = -ECONNRESET; + goto unlock; + } + last = skb = skb_peek(&sk->sk_receive_queue); + last_len = last ? last->len : 0; + +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) + if (skb) { + skb = manage_oob(skb, sk, flags, copied); + if (!skb) { + unix_state_unlock(sk); + if (copied) + break; + goto redo; + } + } +#endif +again: + if (skb == NULL) { + if (copied >= target) + goto unlock; + + /* + * POSIX 1003.1g mandates this order. + */ + + err = sock_error(sk); + if (err) + goto unlock; + if (sk->sk_shutdown & RCV_SHUTDOWN) + goto unlock; + + unix_state_unlock(sk); + if (!timeo) { + err = -EAGAIN; + break; + } + + mutex_unlock(&u->iolock); + + timeo = unix_stream_data_wait(sk, timeo, last, + last_len, freezable); + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + scm_destroy(&scm); + goto out; + } + + mutex_lock(&u->iolock); + goto redo; +unlock: + unix_state_unlock(sk); + break; + } + + while (skip >= unix_skb_len(skb)) { + skip -= unix_skb_len(skb); + last = skb; + last_len = skb->len; + skb = skb_peek_next(skb, &sk->sk_receive_queue); + if (!skb) + goto again; + } + + unix_state_unlock(sk); + + if (check_creds) { + /* Never glue messages from different writers */ + if (!unix_skb_scm_eq(skb, &scm)) + break; + } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { + /* Copy credentials */ + scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); + unix_set_secdata(&scm, skb); + check_creds = true; + } + + /* Copy address just once */ + if (state->msg && state->msg->msg_name) { + DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, + state->msg->msg_name); + unix_copy_addr(state->msg, skb->sk); + sunaddr = NULL; + } + + chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); + skb_get(skb); + chunk = state->recv_actor(skb, skip, chunk, state); + drop_skb = !unix_skb_len(skb); + /* skb is only safe to use if !drop_skb */ + consume_skb(skb); + if (chunk < 0) { + if (copied == 0) + copied = -EFAULT; + break; + } + copied += chunk; + size -= chunk; + + if (drop_skb) { + /* the skb was touched by a concurrent reader; + * we should not expect anything from this skb + * anymore and assume it invalid - we can be + * sure it was dropped from the socket queue + * + * let's report a short read + */ + err = 0; + break; + } + + /* Mark read part of skb as used */ + if (!(flags & MSG_PEEK)) { + UNIXCB(skb).consumed += chunk; + + sk_peek_offset_bwd(sk, chunk); + + if (UNIXCB(skb).fp) { + scm_stat_del(sk, skb); + unix_detach_fds(&scm, skb); + } + + if (unix_skb_len(skb)) + break; + + skb_unlink(skb, &sk->sk_receive_queue); + consume_skb(skb); + + if (scm.fp) + break; + } else { + /* It is questionable, see note in unix_dgram_recvmsg. + */ + if (UNIXCB(skb).fp) + unix_peek_fds(&scm, skb); + + sk_peek_offset_fwd(sk, chunk); + + if (UNIXCB(skb).fp) + break; + + skip = 0; + last = skb; + last_len = skb->len; + unix_state_lock(sk); + skb = skb_peek_next(skb, &sk->sk_receive_queue); + if (skb) + goto again; + unix_state_unlock(sk); + break; + } + } while (size); + + mutex_unlock(&u->iolock); + if (state->msg) + scm_recv(sock, state->msg, &scm, flags); + else + scm_destroy(&scm); +out: + return copied ? : err; +} + +static int unix_stream_read_actor(struct sk_buff *skb, + int skip, int chunk, + struct unix_stream_read_state *state) +{ + int ret; + + ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip, + state->msg, chunk); + return ret ?: chunk; +} + +int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg, + size_t size, int flags) +{ + struct unix_stream_read_state state = { + .recv_actor = unix_stream_read_actor, + .socket = sk->sk_socket, + .msg = msg, + .size = size, + .flags = flags + }; + + return unix_stream_read_generic(&state, true); +} + +static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) +{ + struct unix_stream_read_state state = { + .recv_actor = unix_stream_read_actor, + .socket = sock, + .msg = msg, + .size = size, + .flags = flags + }; + +#ifdef CONFIG_BPF_SYSCALL + struct sock *sk = sock->sk; + const struct proto *prot = READ_ONCE(sk->sk_prot); + + if (prot != &unix_stream_proto) + return prot->recvmsg(sk, msg, size, flags, NULL); +#endif + return unix_stream_read_generic(&state, true); +} + +static int unix_stream_splice_actor(struct sk_buff *skb, + int skip, int chunk, + struct unix_stream_read_state *state) +{ + return skb_splice_bits(skb, state->socket->sk, + UNIXCB(skb).consumed + skip, + state->pipe, chunk, state->splice_flags); +} + +static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos, + struct pipe_inode_info *pipe, + size_t size, unsigned int flags) +{ + struct unix_stream_read_state state = { + .recv_actor = unix_stream_splice_actor, + .socket = sock, + .pipe = pipe, + .size = size, + .splice_flags = flags, + }; + + if (unlikely(*ppos)) + return -ESPIPE; + + if (sock->file->f_flags & O_NONBLOCK || + flags & SPLICE_F_NONBLOCK) + state.flags = MSG_DONTWAIT; + + return unix_stream_read_generic(&state, false); +} + +static int unix_shutdown(struct socket *sock, int mode) +{ + struct sock *sk = sock->sk; + struct sock *other; + + if (mode < SHUT_RD || mode > SHUT_RDWR) + return -EINVAL; + /* This maps: + * SHUT_RD (0) -> RCV_SHUTDOWN (1) + * SHUT_WR (1) -> SEND_SHUTDOWN (2) + * SHUT_RDWR (2) -> SHUTDOWN_MASK (3) + */ + ++mode; + + unix_state_lock(sk); + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode); + other = unix_peer(sk); + if (other) + sock_hold(other); + unix_state_unlock(sk); + sk->sk_state_change(sk); + + if (other && + (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) { + + int peer_mode = 0; + const struct proto *prot = READ_ONCE(other->sk_prot); + + if (prot->unhash) + prot->unhash(other); + if (mode&RCV_SHUTDOWN) + peer_mode |= SEND_SHUTDOWN; + if (mode&SEND_SHUTDOWN) + peer_mode |= RCV_SHUTDOWN; + unix_state_lock(other); + WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode); + unix_state_unlock(other); + other->sk_state_change(other); + if (peer_mode == SHUTDOWN_MASK) + sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); + else if (peer_mode & RCV_SHUTDOWN) + sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); + } + if (other) + sock_put(other); + + return 0; +} + +long unix_inq_len(struct sock *sk) +{ + struct sk_buff *skb; + long amount = 0; + + if (sk->sk_state == TCP_LISTEN) + return -EINVAL; + + spin_lock(&sk->sk_receive_queue.lock); + if (sk->sk_type == SOCK_STREAM || + sk->sk_type == SOCK_SEQPACKET) { + skb_queue_walk(&sk->sk_receive_queue, skb) + amount += unix_skb_len(skb); + } else { + skb = skb_peek(&sk->sk_receive_queue); + if (skb) + amount = skb->len; + } + spin_unlock(&sk->sk_receive_queue.lock); + + return amount; +} +EXPORT_SYMBOL_GPL(unix_inq_len); + +long unix_outq_len(struct sock *sk) +{ + return sk_wmem_alloc_get(sk); +} +EXPORT_SYMBOL_GPL(unix_outq_len); + +static int unix_open_file(struct sock *sk) +{ + struct path path; + struct file *f; + int fd; + + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) + return -EPERM; + + if (!smp_load_acquire(&unix_sk(sk)->addr)) + return -ENOENT; + + path = unix_sk(sk)->path; + if (!path.dentry) + return -ENOENT; + + path_get(&path); + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) + goto out; + + f = dentry_open(&path, O_PATH, current_cred()); + if (IS_ERR(f)) { + put_unused_fd(fd); + fd = PTR_ERR(f); + goto out; + } + + fd_install(fd, f); +out: + path_put(&path); + + return fd; +} + +static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + struct sock *sk = sock->sk; + long amount = 0; + int err; + + switch (cmd) { + case SIOCOUTQ: + amount = unix_outq_len(sk); + err = put_user(amount, (int __user *)arg); + break; + case SIOCINQ: + amount = unix_inq_len(sk); + if (amount < 0) + err = amount; + else + err = put_user(amount, (int __user *)arg); + break; + case SIOCUNIXFILE: + err = unix_open_file(sk); + break; +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) + case SIOCATMARK: + { + struct sk_buff *skb; + int answ = 0; + + skb = skb_peek(&sk->sk_receive_queue); + if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb)) + answ = 1; + err = put_user(answ, (int __user *)arg); + } + break; +#endif + default: + err = -ENOIOCTLCMD; + break; + } + return err; +} + +#ifdef CONFIG_COMPAT +static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait) +{ + struct sock *sk = sock->sk; + __poll_t mask; + u8 shutdown; + + sock_poll_wait(file, sock, wait); + mask = 0; + shutdown = READ_ONCE(sk->sk_shutdown); + + /* exceptional events? */ + if (sk->sk_err) + mask |= EPOLLERR; + if (shutdown == SHUTDOWN_MASK) + mask |= EPOLLHUP; + if (shutdown & RCV_SHUTDOWN) + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; + + /* readable? */ + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) + mask |= EPOLLIN | EPOLLRDNORM; + if (sk_is_readable(sk)) + mask |= EPOLLIN | EPOLLRDNORM; +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) + if (READ_ONCE(unix_sk(sk)->oob_skb)) + mask |= EPOLLPRI; +#endif + + /* Connection-based need to check for termination and startup */ + if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && + sk->sk_state == TCP_CLOSE) + mask |= EPOLLHUP; + + /* + * we set writable also when the other side has shut down the + * connection. This prevents stuck sockets. + */ + if (unix_writable(sk)) + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; + + return mask; +} + +static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, + poll_table *wait) +{ + struct sock *sk = sock->sk, *other; + unsigned int writable; + __poll_t mask; + u8 shutdown; + + sock_poll_wait(file, sock, wait); + mask = 0; + shutdown = READ_ONCE(sk->sk_shutdown); + + /* exceptional events? */ + if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) + mask |= EPOLLERR | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); + + if (shutdown & RCV_SHUTDOWN) + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; + if (shutdown == SHUTDOWN_MASK) + mask |= EPOLLHUP; + + /* readable? */ + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) + mask |= EPOLLIN | EPOLLRDNORM; + if (sk_is_readable(sk)) + mask |= EPOLLIN | EPOLLRDNORM; + + /* Connection-based need to check for termination and startup */ + if (sk->sk_type == SOCK_SEQPACKET) { + if (sk->sk_state == TCP_CLOSE) + mask |= EPOLLHUP; + /* connection hasn't started yet? */ + if (sk->sk_state == TCP_SYN_SENT) + return mask; + } + + /* No write status requested, avoid expensive OUT tests. */ + if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT))) + return mask; + + writable = unix_writable(sk); + if (writable) { + unix_state_lock(sk); + + other = unix_peer(sk); + if (other && unix_peer(other) != sk && + unix_recvq_full_lockless(other) && + unix_dgram_peer_wake_me(sk, other)) + writable = 0; + + unix_state_unlock(sk); + } + + if (writable) + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; + else + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); + + return mask; +} + +#ifdef CONFIG_PROC_FS + +#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1) + +#define get_bucket(x) ((x) >> BUCKET_SPACE) +#define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1)) +#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) + +static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos) +{ + unsigned long offset = get_offset(*pos); + unsigned long bucket = get_bucket(*pos); + unsigned long count = 0; + struct sock *sk; + + for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]); + sk; sk = sk_next(sk)) { + if (++count == offset) + break; + } + + return sk; +} + +static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos) +{ + unsigned long bucket = get_bucket(*pos); + struct net *net = seq_file_net(seq); + struct sock *sk; + + while (bucket < UNIX_HASH_SIZE) { + spin_lock(&net->unx.table.locks[bucket]); + + sk = unix_from_bucket(seq, pos); + if (sk) + return sk; + + spin_unlock(&net->unx.table.locks[bucket]); + + *pos = set_bucket_offset(++bucket, 1); + } + + return NULL; +} + +static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk, + loff_t *pos) +{ + unsigned long bucket = get_bucket(*pos); + + sk = sk_next(sk); + if (sk) + return sk; + + + spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]); + + *pos = set_bucket_offset(++bucket, 1); + + return unix_get_first(seq, pos); +} + +static void *unix_seq_start(struct seq_file *seq, loff_t *pos) +{ + if (!*pos) + return SEQ_START_TOKEN; + + return unix_get_first(seq, pos); +} + +static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + ++*pos; + + if (v == SEQ_START_TOKEN) + return unix_get_first(seq, pos); + + return unix_get_next(seq, v, pos); +} + +static void unix_seq_stop(struct seq_file *seq, void *v) +{ + struct sock *sk = v; + + if (sk) + spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]); +} + +static int unix_seq_show(struct seq_file *seq, void *v) +{ + + if (v == SEQ_START_TOKEN) + seq_puts(seq, "Num RefCount Protocol Flags Type St " + "Inode Path\n"); + else { + struct sock *s = v; + struct unix_sock *u = unix_sk(s); + unix_state_lock(s); + + seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu", + s, + refcount_read(&s->sk_refcnt), + 0, + s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0, + s->sk_type, + s->sk_socket ? + (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) : + (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), + sock_i_ino(s)); + + if (u->addr) { // under a hash table lock here + int i, len; + seq_putc(seq, ' '); + + i = 0; + len = u->addr->len - + offsetof(struct sockaddr_un, sun_path); + if (u->addr->name->sun_path[0]) { + len--; + } else { + seq_putc(seq, '@'); + i++; + } + for ( ; i < len; i++) + seq_putc(seq, u->addr->name->sun_path[i] ?: + '@'); + } + unix_state_unlock(s); + seq_putc(seq, '\n'); + } + + return 0; +} + +static const struct seq_operations unix_seq_ops = { + .start = unix_seq_start, + .next = unix_seq_next, + .stop = unix_seq_stop, + .show = unix_seq_show, +}; + +#if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) +struct bpf_unix_iter_state { + struct seq_net_private p; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + struct sock **batch; + bool st_bucket_done; +}; + +struct bpf_iter__unix { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct unix_sock *, unix_sk); + uid_t uid __aligned(8); +}; + +static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, + struct unix_sock *unix_sk, uid_t uid) +{ + struct bpf_iter__unix ctx; + + meta->seq_num--; /* skip SEQ_START_TOKEN */ + ctx.meta = meta; + ctx.unix_sk = unix_sk; + ctx.uid = uid; + return bpf_iter_run_prog(prog, &ctx); +} + +static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk) + +{ + struct bpf_unix_iter_state *iter = seq->private; + unsigned int expected = 1; + struct sock *sk; + + sock_hold(start_sk); + iter->batch[iter->end_sk++] = start_sk; + + for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) { + if (iter->end_sk < iter->max_sk) { + sock_hold(sk); + iter->batch[iter->end_sk++] = sk; + } + + expected++; + } + + spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]); + + return expected; +} + +static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter) +{ + while (iter->cur_sk < iter->end_sk) + sock_put(iter->batch[iter->cur_sk++]); +} + +static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter, + unsigned int new_batch_sz) +{ + struct sock **new_batch; + + new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz, + GFP_USER | __GFP_NOWARN); + if (!new_batch) + return -ENOMEM; + + bpf_iter_unix_put_batch(iter); + kvfree(iter->batch); + iter->batch = new_batch; + iter->max_sk = new_batch_sz; + + return 0; +} + +static struct sock *bpf_iter_unix_batch(struct seq_file *seq, + loff_t *pos) +{ + struct bpf_unix_iter_state *iter = seq->private; + unsigned int expected; + bool resized = false; + struct sock *sk; + + if (iter->st_bucket_done) + *pos = set_bucket_offset(get_bucket(*pos) + 1, 1); + +again: + /* Get a new batch */ + iter->cur_sk = 0; + iter->end_sk = 0; + + sk = unix_get_first(seq, pos); + if (!sk) + return NULL; /* Done */ + + expected = bpf_iter_unix_hold_batch(seq, sk); + + if (iter->end_sk == expected) { + iter->st_bucket_done = true; + return sk; + } + + if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) { + resized = true; + goto again; + } + + return sk; +} + +static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos) +{ + if (!*pos) + return SEQ_START_TOKEN; + + /* bpf iter does not support lseek, so it always + * continue from where it was stop()-ped. + */ + return bpf_iter_unix_batch(seq, pos); +} + +static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bpf_unix_iter_state *iter = seq->private; + struct sock *sk; + + /* Whenever seq_next() is called, the iter->cur_sk is + * done with seq_show(), so advance to the next sk in + * the batch. + */ + if (iter->cur_sk < iter->end_sk) + sock_put(iter->batch[iter->cur_sk++]); + + ++*pos; + + if (iter->cur_sk < iter->end_sk) + sk = iter->batch[iter->cur_sk]; + else + sk = bpf_iter_unix_batch(seq, pos); + + return sk; +} + +static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_prog *prog; + struct sock *sk = v; + uid_t uid; + bool slow; + int ret; + + if (v == SEQ_START_TOKEN) + return 0; + + slow = lock_sock_fast(sk); + + if (unlikely(sk_unhashed(sk))) { + ret = SEQ_SKIP; + goto unlock; + } + + uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); + meta.seq = seq; + prog = bpf_iter_get_info(&meta, false); + ret = unix_prog_seq_show(prog, &meta, v, uid); +unlock: + unlock_sock_fast(sk, slow); + return ret; +} + +static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_unix_iter_state *iter = seq->private; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + + if (!v) { + meta.seq = seq; + prog = bpf_iter_get_info(&meta, true); + if (prog) + (void)unix_prog_seq_show(prog, &meta, v, 0); + } + + if (iter->cur_sk < iter->end_sk) + bpf_iter_unix_put_batch(iter); +} + +static const struct seq_operations bpf_iter_unix_seq_ops = { + .start = bpf_iter_unix_seq_start, + .next = bpf_iter_unix_seq_next, + .stop = bpf_iter_unix_seq_stop, + .show = bpf_iter_unix_seq_show, +}; +#endif +#endif + +static const struct net_proto_family unix_family_ops = { + .family = PF_UNIX, + .create = unix_create, + .owner = THIS_MODULE, +}; + + +static int __net_init unix_net_init(struct net *net) +{ + int i; + + net->unx.sysctl_max_dgram_qlen = 10; + if (unix_sysctl_register(net)) + goto out; + +#ifdef CONFIG_PROC_FS + if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops, + sizeof(struct seq_net_private))) + goto err_sysctl; +#endif + + net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE, + sizeof(spinlock_t), GFP_KERNEL); + if (!net->unx.table.locks) + goto err_proc; + + net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE, + sizeof(struct hlist_head), + GFP_KERNEL); + if (!net->unx.table.buckets) + goto free_locks; + + for (i = 0; i < UNIX_HASH_SIZE; i++) { + spin_lock_init(&net->unx.table.locks[i]); + INIT_HLIST_HEAD(&net->unx.table.buckets[i]); + } + + return 0; + +free_locks: + kvfree(net->unx.table.locks); +err_proc: +#ifdef CONFIG_PROC_FS + remove_proc_entry("unix", net->proc_net); +err_sysctl: +#endif + unix_sysctl_unregister(net); +out: + return -ENOMEM; +} + +static void __net_exit unix_net_exit(struct net *net) +{ + kvfree(net->unx.table.buckets); + kvfree(net->unx.table.locks); + unix_sysctl_unregister(net); + remove_proc_entry("unix", net->proc_net); +} + +static struct pernet_operations unix_net_ops = { + .init = unix_net_init, + .exit = unix_net_exit, +}; + +#if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) +DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta, + struct unix_sock *unix_sk, uid_t uid) + +#define INIT_BATCH_SZ 16 + +static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux) +{ + struct bpf_unix_iter_state *iter = priv_data; + int err; + + err = bpf_iter_init_seq_net(priv_data, aux); + if (err) + return err; + + err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ); + if (err) { + bpf_iter_fini_seq_net(priv_data); + return err; + } + + return 0; +} + +static void bpf_iter_fini_unix(void *priv_data) +{ + struct bpf_unix_iter_state *iter = priv_data; + + bpf_iter_fini_seq_net(priv_data); + kvfree(iter->batch); +} + +static const struct bpf_iter_seq_info unix_seq_info = { + .seq_ops = &bpf_iter_unix_seq_ops, + .init_seq_private = bpf_iter_init_unix, + .fini_seq_private = bpf_iter_fini_unix, + .seq_priv_size = sizeof(struct bpf_unix_iter_state), +}; + +static const struct bpf_func_proto * +bpf_iter_unix_get_func_proto(enum bpf_func_id func_id, + const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_setsockopt: + return &bpf_sk_setsockopt_proto; + case BPF_FUNC_getsockopt: + return &bpf_sk_getsockopt_proto; + default: + return NULL; + } +} + +static struct bpf_iter_reg unix_reg_info = { + .target = "unix", + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__unix, unix_sk), + PTR_TO_BTF_ID_OR_NULL }, + }, + .get_func_proto = bpf_iter_unix_get_func_proto, + .seq_info = &unix_seq_info, +}; + +static void __init bpf_iter_register(void) +{ + unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX]; + if (bpf_iter_reg_target(&unix_reg_info)) + pr_warn("Warning: could not register bpf iterator unix\n"); +} +#endif + +static int __init af_unix_init(void) +{ + int i, rc = -1; + + BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb)); + + for (i = 0; i < UNIX_HASH_SIZE / 2; i++) { + spin_lock_init(&bsd_socket_locks[i]); + INIT_HLIST_HEAD(&bsd_socket_buckets[i]); + } + + rc = proto_register(&unix_dgram_proto, 1); + if (rc != 0) { + pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__); + goto out; + } + + rc = proto_register(&unix_stream_proto, 1); + if (rc != 0) { + pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__); + proto_unregister(&unix_dgram_proto); + goto out; + } + + sock_register(&unix_family_ops); + register_pernet_subsys(&unix_net_ops); + unix_bpf_build_proto(); + +#if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) + bpf_iter_register(); +#endif + +out: + return rc; +} + +static void __exit af_unix_exit(void) +{ + sock_unregister(PF_UNIX); + proto_unregister(&unix_dgram_proto); + proto_unregister(&unix_stream_proto); + unregister_pernet_subsys(&unix_net_ops); +} + +/* Earlier than device_initcall() so that other drivers invoking + request_module() don't end up in a loop when modprobe tries + to use a UNIX socket. But later than subsys_initcall() because + we depend on stuff initialised there */ +fs_initcall(af_unix_init); +module_exit(af_unix_exit); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NETPROTO(PF_UNIX); diff --git a/net/unix/diag.c b/net/unix/diag.c new file mode 100644 index 000000000..616b55c5b --- /dev/null +++ b/net/unix/diag.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/sock_diag.h> +#include <linux/unix_diag.h> +#include <linux/skbuff.h> +#include <linux/module.h> +#include <linux/uidgid.h> +#include <net/netlink.h> +#include <net/af_unix.h> +#include <net/tcp_states.h> +#include <net/sock.h> + +static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) +{ + /* might or might not have a hash table lock */ + struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); + + if (!addr) + return 0; + + return nla_put(nlskb, UNIX_DIAG_NAME, + addr->len - offsetof(struct sockaddr_un, sun_path), + addr->name->sun_path); +} + +static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) +{ + struct dentry *dentry = unix_sk(sk)->path.dentry; + + if (dentry) { + struct unix_diag_vfs uv = { + .udiag_vfs_ino = d_backing_inode(dentry)->i_ino, + .udiag_vfs_dev = dentry->d_sb->s_dev, + }; + + return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv); + } + + return 0; +} + +static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) +{ + struct sock *peer; + int ino; + + peer = unix_peer_get(sk); + if (peer) { + unix_state_lock(peer); + ino = sock_i_ino(peer); + unix_state_unlock(peer); + sock_put(peer); + + return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino); + } + + return 0; +} + +static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) +{ + struct sk_buff *skb; + struct nlattr *attr; + u32 *buf; + int i; + + if (sk->sk_state == TCP_LISTEN) { + spin_lock(&sk->sk_receive_queue.lock); + + attr = nla_reserve(nlskb, UNIX_DIAG_ICONS, + sk->sk_receive_queue.qlen * sizeof(u32)); + if (!attr) + goto errout; + + buf = nla_data(attr); + i = 0; + skb_queue_walk(&sk->sk_receive_queue, skb) { + struct sock *req, *peer; + + req = skb->sk; + /* + * The state lock is outer for the same sk's + * queue lock. With the other's queue locked it's + * OK to lock the state. + */ + unix_state_lock_nested(req); + peer = unix_sk(req)->peer; + buf[i++] = (peer ? sock_i_ino(peer) : 0); + unix_state_unlock(req); + } + spin_unlock(&sk->sk_receive_queue.lock); + } + + return 0; + +errout: + spin_unlock(&sk->sk_receive_queue.lock); + return -EMSGSIZE; +} + +static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) +{ + struct unix_diag_rqlen rql; + + if (sk->sk_state == TCP_LISTEN) { + rql.udiag_rqueue = sk->sk_receive_queue.qlen; + rql.udiag_wqueue = sk->sk_max_ack_backlog; + } else { + rql.udiag_rqueue = (u32) unix_inq_len(sk); + rql.udiag_wqueue = (u32) unix_outq_len(sk); + } + + return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql); +} + +static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb, + struct user_namespace *user_ns) +{ + uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk)); + return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid); +} + +static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, + struct user_namespace *user_ns, + u32 portid, u32 seq, u32 flags, int sk_ino) +{ + struct nlmsghdr *nlh; + struct unix_diag_msg *rep; + + nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep), + flags); + if (!nlh) + return -EMSGSIZE; + + rep = nlmsg_data(nlh); + rep->udiag_family = AF_UNIX; + rep->udiag_type = sk->sk_type; + rep->udiag_state = sk->sk_state; + rep->pad = 0; + rep->udiag_ino = sk_ino; + sock_diag_save_cookie(sk, rep->udiag_cookie); + + if ((req->udiag_show & UDIAG_SHOW_NAME) && + sk_diag_dump_name(sk, skb)) + goto out_nlmsg_trim; + + if ((req->udiag_show & UDIAG_SHOW_VFS) && + sk_diag_dump_vfs(sk, skb)) + goto out_nlmsg_trim; + + if ((req->udiag_show & UDIAG_SHOW_PEER) && + sk_diag_dump_peer(sk, skb)) + goto out_nlmsg_trim; + + if ((req->udiag_show & UDIAG_SHOW_ICONS) && + sk_diag_dump_icons(sk, skb)) + goto out_nlmsg_trim; + + if ((req->udiag_show & UDIAG_SHOW_RQLEN) && + sk_diag_show_rqlen(sk, skb)) + goto out_nlmsg_trim; + + if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && + sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) + goto out_nlmsg_trim; + + if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown)) + goto out_nlmsg_trim; + + if ((req->udiag_show & UDIAG_SHOW_UID) && + sk_diag_dump_uid(sk, skb, user_ns)) + goto out_nlmsg_trim; + + nlmsg_end(skb, nlh); + return 0; + +out_nlmsg_trim: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, + struct user_namespace *user_ns, + u32 portid, u32 seq, u32 flags) +{ + int sk_ino; + + unix_state_lock(sk); + sk_ino = sock_i_ino(sk); + unix_state_unlock(sk); + + if (!sk_ino) + return 0; + + return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino); +} + +static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + int num, s_num, slot, s_slot; + struct unix_diag_req *req; + + req = nlmsg_data(cb->nlh); + + s_slot = cb->args[0]; + num = s_num = cb->args[1]; + + for (slot = s_slot; slot < UNIX_HASH_SIZE; s_num = 0, slot++) { + struct sock *sk; + + num = 0; + spin_lock(&net->unx.table.locks[slot]); + sk_for_each(sk, &net->unx.table.buckets[slot]) { + if (num < s_num) + goto next; + if (!(req->udiag_states & (1 << sk->sk_state))) + goto next; + if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk), + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI) < 0) { + spin_unlock(&net->unx.table.locks[slot]); + goto done; + } +next: + num++; + } + spin_unlock(&net->unx.table.locks[slot]); + } +done: + cb->args[0] = slot; + cb->args[1] = num; + + return skb->len; +} + +static struct sock *unix_lookup_by_ino(struct net *net, unsigned int ino) +{ + struct sock *sk; + int i; + + for (i = 0; i < UNIX_HASH_SIZE; i++) { + spin_lock(&net->unx.table.locks[i]); + sk_for_each(sk, &net->unx.table.buckets[i]) { + if (ino == sock_i_ino(sk)) { + sock_hold(sk); + spin_unlock(&net->unx.table.locks[i]); + return sk; + } + } + spin_unlock(&net->unx.table.locks[i]); + } + return NULL; +} + +static int unix_diag_get_exact(struct sk_buff *in_skb, + const struct nlmsghdr *nlh, + struct unix_diag_req *req) +{ + struct net *net = sock_net(in_skb->sk); + unsigned int extra_len; + struct sk_buff *rep; + struct sock *sk; + int err; + + err = -EINVAL; + if (req->udiag_ino == 0) + goto out_nosk; + + sk = unix_lookup_by_ino(net, req->udiag_ino); + err = -ENOENT; + if (sk == NULL) + goto out_nosk; + + err = sock_diag_check_cookie(sk, req->udiag_cookie); + if (err) + goto out; + + extra_len = 256; +again: + err = -ENOMEM; + rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL); + if (!rep) + goto out; + + err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk), + NETLINK_CB(in_skb).portid, + nlh->nlmsg_seq, 0, req->udiag_ino); + if (err < 0) { + nlmsg_free(rep); + extra_len += 256; + if (extra_len >= PAGE_SIZE) + goto out; + + goto again; + } + err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); + +out: + if (sk) + sock_put(sk); +out_nosk: + return err; +} + +static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) +{ + int hdrlen = sizeof(struct unix_diag_req); + + if (nlmsg_len(h) < hdrlen) + return -EINVAL; + + if (h->nlmsg_flags & NLM_F_DUMP) { + struct netlink_dump_control c = { + .dump = unix_diag_dump, + }; + return netlink_dump_start(sock_net(skb->sk)->diag_nlsk, skb, h, &c); + } else + return unix_diag_get_exact(skb, h, nlmsg_data(h)); +} + +static const struct sock_diag_handler unix_diag_handler = { + .family = AF_UNIX, + .dump = unix_diag_handler_dump, +}; + +static int __init unix_diag_init(void) +{ + return sock_diag_register(&unix_diag_handler); +} + +static void __exit unix_diag_exit(void) +{ + sock_diag_unregister(&unix_diag_handler); +} + +module_init(unix_diag_init); +module_exit(unix_diag_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */); diff --git a/net/unix/garbage.c b/net/unix/garbage.c new file mode 100644 index 000000000..dc2763540 --- /dev/null +++ b/net/unix/garbage.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * NET3: Garbage Collector For AF_UNIX sockets + * + * Garbage Collector: + * Copyright (C) Barak A. Pearlmutter. + * + * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. + * If it doesn't work blame me, it worked when Barak sent it. + * + * Assumptions: + * + * - object w/ a bit + * - free list + * + * Current optimizations: + * + * - explicit stack instead of recursion + * - tail recurse on first born instead of immediate push/pop + * - we gather the stuff that should not be killed into tree + * and stack is just a path from root to the current pointer. + * + * Future optimizations: + * + * - don't just push entire root set; process in place + * + * Fixes: + * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. + * Cope with changing max_files. + * Al Viro 11 Oct 1998 + * Graph may have cycles. That is, we can send the descriptor + * of foo to bar and vice versa. Current code chokes on that. + * Fix: move SCM_RIGHTS ones into the separate list and then + * skb_free() them all instead of doing explicit fput's. + * Another problem: since fput() may block somebody may + * create a new unix_socket when we are in the middle of sweep + * phase. Fix: revert the logic wrt MARKED. Mark everything + * upon the beginning and unmark non-junk ones. + * + * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS + * sent to connect()'ed but still not accept()'ed sockets. + * Fixed. Old code had slightly different problem here: + * extra fput() in situation when we passed the descriptor via + * such socket and closed it (descriptor). That would happen on + * each unix_gc() until the accept(). Since the struct file in + * question would go to the free list and might be reused... + * That might be the reason of random oopses on filp_close() + * in unrelated processes. + * + * AV 28 Feb 1999 + * Kill the explicit allocation of stack. Now we keep the tree + * with root in dummy + pointer (gc_current) to one of the nodes. + * Stack is represented as path from gc_current to dummy. Unmark + * now means "add to tree". Push == "make it a son of gc_current". + * Pop == "move gc_current to parent". We keep only pointers to + * parents (->gc_tree). + * AV 1 Mar 1999 + * Damn. Added missing check for ->dead in listen queues scanning. + * + * Miklos Szeredi 25 Jun 2007 + * Reimplement with a cycle collecting algorithm. This should + * solve several problems with the previous code, like being racy + * wrt receive and holding up unrelated socket operations. + */ + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/socket.h> +#include <linux/un.h> +#include <linux/net.h> +#include <linux/fs.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/file.h> +#include <linux/proc_fs.h> +#include <linux/mutex.h> +#include <linux/wait.h> + +#include <net/sock.h> +#include <net/af_unix.h> +#include <net/scm.h> +#include <net/tcp_states.h> + +#include "scm.h" + +/* Internal data structures and random procedures: */ + +static LIST_HEAD(gc_candidates); +static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); + +static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), + struct sk_buff_head *hitlist) +{ + struct sk_buff *skb; + struct sk_buff *next; + + spin_lock(&x->sk_receive_queue.lock); + skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { + /* Do we have file descriptors ? */ + if (UNIXCB(skb).fp) { + bool hit = false; + /* Process the descriptors of this socket */ + int nfd = UNIXCB(skb).fp->count; + struct file **fp = UNIXCB(skb).fp->fp; + + while (nfd--) { + /* Get the socket the fd matches if it indeed does so */ + struct sock *sk = unix_get_socket(*fp++); + + if (sk) { + struct unix_sock *u = unix_sk(sk); + + /* Ignore non-candidates, they could + * have been added to the queues after + * starting the garbage collection + */ + if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { + hit = true; + + func(u); + } + } + } + if (hit && hitlist != NULL) { + __skb_unlink(skb, &x->sk_receive_queue); + __skb_queue_tail(hitlist, skb); + } + } + } + spin_unlock(&x->sk_receive_queue.lock); +} + +static void scan_children(struct sock *x, void (*func)(struct unix_sock *), + struct sk_buff_head *hitlist) +{ + if (x->sk_state != TCP_LISTEN) { + scan_inflight(x, func, hitlist); + } else { + struct sk_buff *skb; + struct sk_buff *next; + struct unix_sock *u; + LIST_HEAD(embryos); + + /* For a listening socket collect the queued embryos + * and perform a scan on them as well. + */ + spin_lock(&x->sk_receive_queue.lock); + skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { + u = unix_sk(skb->sk); + + /* An embryo cannot be in-flight, so it's safe + * to use the list link. + */ + BUG_ON(!list_empty(&u->link)); + list_add_tail(&u->link, &embryos); + } + spin_unlock(&x->sk_receive_queue.lock); + + while (!list_empty(&embryos)) { + u = list_entry(embryos.next, struct unix_sock, link); + scan_inflight(&u->sk, func, hitlist); + list_del_init(&u->link); + } + } +} + +static void dec_inflight(struct unix_sock *usk) +{ + atomic_long_dec(&usk->inflight); +} + +static void inc_inflight(struct unix_sock *usk) +{ + atomic_long_inc(&usk->inflight); +} + +static void inc_inflight_move_tail(struct unix_sock *u) +{ + atomic_long_inc(&u->inflight); + /* If this still might be part of a cycle, move it to the end + * of the list, so that it's checked even if it was already + * passed over + */ + if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) + list_move_tail(&u->link, &gc_candidates); +} + +static bool gc_in_progress; +#define UNIX_INFLIGHT_TRIGGER_GC 16000 + +void wait_for_unix_gc(void) +{ + /* If number of inflight sockets is insane, + * force a garbage collect right now. + * Paired with the WRITE_ONCE() in unix_inflight(), + * unix_notinflight() and gc_in_progress(). + */ + if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && + !READ_ONCE(gc_in_progress)) + unix_gc(); + wait_event(unix_gc_wait, gc_in_progress == false); +} + +/* The external entry point: unix_gc() */ +void unix_gc(void) +{ + struct sk_buff *next_skb, *skb; + struct unix_sock *u; + struct unix_sock *next; + struct sk_buff_head hitlist; + struct list_head cursor; + LIST_HEAD(not_cycle_list); + + spin_lock(&unix_gc_lock); + + /* Avoid a recursive GC. */ + if (gc_in_progress) + goto out; + + /* Paired with READ_ONCE() in wait_for_unix_gc(). */ + WRITE_ONCE(gc_in_progress, true); + + /* First, select candidates for garbage collection. Only + * in-flight sockets are considered, and from those only ones + * which don't have any external reference. + * + * Holding unix_gc_lock will protect these candidates from + * being detached, and hence from gaining an external + * reference. Since there are no possible receivers, all + * buffers currently on the candidates' queues stay there + * during the garbage collection. + * + * We also know that no new candidate can be added onto the + * receive queues. Other, non candidate sockets _can_ be + * added to queue, so we must make sure only to touch + * candidates. + */ + list_for_each_entry_safe(u, next, &gc_inflight_list, link) { + long total_refs; + long inflight_refs; + + total_refs = file_count(u->sk.sk_socket->file); + inflight_refs = atomic_long_read(&u->inflight); + + BUG_ON(inflight_refs < 1); + BUG_ON(total_refs < inflight_refs); + if (total_refs == inflight_refs) { + list_move_tail(&u->link, &gc_candidates); + __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); + __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); + } + } + + /* Now remove all internal in-flight reference to children of + * the candidates. + */ + list_for_each_entry(u, &gc_candidates, link) + scan_children(&u->sk, dec_inflight, NULL); + + /* Restore the references for children of all candidates, + * which have remaining references. Do this recursively, so + * only those remain, which form cyclic references. + * + * Use a "cursor" link, to make the list traversal safe, even + * though elements might be moved about. + */ + list_add(&cursor, &gc_candidates); + while (cursor.next != &gc_candidates) { + u = list_entry(cursor.next, struct unix_sock, link); + + /* Move cursor to after the current position. */ + list_move(&cursor, &u->link); + + if (atomic_long_read(&u->inflight) > 0) { + list_move_tail(&u->link, ¬_cycle_list); + __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); + scan_children(&u->sk, inc_inflight_move_tail, NULL); + } + } + list_del(&cursor); + + /* Now gc_candidates contains only garbage. Restore original + * inflight counters for these as well, and remove the skbuffs + * which are creating the cycle(s). + */ + skb_queue_head_init(&hitlist); + list_for_each_entry(u, &gc_candidates, link) + scan_children(&u->sk, inc_inflight, &hitlist); + + /* not_cycle_list contains those sockets which do not make up a + * cycle. Restore these to the inflight list. + */ + while (!list_empty(¬_cycle_list)) { + u = list_entry(not_cycle_list.next, struct unix_sock, link); + __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); + list_move_tail(&u->link, &gc_inflight_list); + } + + spin_unlock(&unix_gc_lock); + + /* We need io_uring to clean its registered files, ignore all io_uring + * originated skbs. It's fine as io_uring doesn't keep references to + * other io_uring instances and so killing all other files in the cycle + * will put all io_uring references forcing it to go through normal + * release.path eventually putting registered files. + */ + skb_queue_walk_safe(&hitlist, skb, next_skb) { + if (skb->scm_io_uring) { + __skb_unlink(skb, &hitlist); + skb_queue_tail(&skb->sk->sk_receive_queue, skb); + } + } + + /* Here we are. Hitlist is filled. Die. */ + __skb_queue_purge(&hitlist); + + spin_lock(&unix_gc_lock); + + /* There could be io_uring registered files, just push them back to + * the inflight list + */ + list_for_each_entry_safe(u, next, &gc_candidates, link) + list_move_tail(&u->link, &gc_inflight_list); + + /* All candidates should have been detached by now. */ + BUG_ON(!list_empty(&gc_candidates)); + + /* Paired with READ_ONCE() in wait_for_unix_gc(). */ + WRITE_ONCE(gc_in_progress, false); + + wake_up(&unix_gc_wait); + + out: + spin_unlock(&unix_gc_lock); +} diff --git a/net/unix/scm.c b/net/unix/scm.c new file mode 100644 index 000000000..e8e2a00bb --- /dev/null +++ b/net/unix/scm.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/socket.h> +#include <linux/net.h> +#include <linux/fs.h> +#include <net/af_unix.h> +#include <net/scm.h> +#include <linux/init.h> +#include <linux/io_uring.h> + +#include "scm.h" + +unsigned int unix_tot_inflight; +EXPORT_SYMBOL(unix_tot_inflight); + +LIST_HEAD(gc_inflight_list); +EXPORT_SYMBOL(gc_inflight_list); + +DEFINE_SPINLOCK(unix_gc_lock); +EXPORT_SYMBOL(unix_gc_lock); + +struct sock *unix_get_socket(struct file *filp) +{ + struct sock *u_sock = NULL; + struct inode *inode = file_inode(filp); + + /* Socket ? */ + if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { + struct socket *sock = SOCKET_I(inode); + struct sock *s = sock->sk; + + /* PF_UNIX ? */ + if (s && sock->ops && sock->ops->family == PF_UNIX) + u_sock = s; + } else { + /* Could be an io_uring instance */ + u_sock = io_uring_get_socket(filp); + } + return u_sock; +} +EXPORT_SYMBOL(unix_get_socket); + +/* Keep the number of times in flight count for the file + * descriptor if it is for an AF_UNIX socket. + */ +void unix_inflight(struct user_struct *user, struct file *fp) +{ + struct sock *s = unix_get_socket(fp); + + spin_lock(&unix_gc_lock); + + if (s) { + struct unix_sock *u = unix_sk(s); + + if (atomic_long_inc_return(&u->inflight) == 1) { + BUG_ON(!list_empty(&u->link)); + list_add_tail(&u->link, &gc_inflight_list); + } else { + BUG_ON(list_empty(&u->link)); + } + /* Paired with READ_ONCE() in wait_for_unix_gc() */ + WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1); + } + WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1); + spin_unlock(&unix_gc_lock); +} + +void unix_notinflight(struct user_struct *user, struct file *fp) +{ + struct sock *s = unix_get_socket(fp); + + spin_lock(&unix_gc_lock); + + if (s) { + struct unix_sock *u = unix_sk(s); + + BUG_ON(!atomic_long_read(&u->inflight)); + BUG_ON(list_empty(&u->link)); + + if (atomic_long_dec_and_test(&u->inflight)) + list_del_init(&u->link); + /* Paired with READ_ONCE() in wait_for_unix_gc() */ + WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1); + } + WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1); + spin_unlock(&unix_gc_lock); +} + +/* + * The "user->unix_inflight" variable is protected by the garbage + * collection lock, and we just read it locklessly here. If you go + * over the limit, there might be a tiny race in actually noticing + * it across threads. Tough. + */ +static inline bool too_many_unix_fds(struct task_struct *p) +{ + struct user_struct *user = current_user(); + + if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE))) + return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); + return false; +} + +int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) +{ + int i; + + if (too_many_unix_fds(current)) + return -ETOOMANYREFS; + + /* + * Need to duplicate file references for the sake of garbage + * collection. Otherwise a socket in the fps might become a + * candidate for GC while the skb is not yet queued. + */ + UNIXCB(skb).fp = scm_fp_dup(scm->fp); + if (!UNIXCB(skb).fp) + return -ENOMEM; + + for (i = scm->fp->count - 1; i >= 0; i--) + unix_inflight(scm->fp->user, scm->fp->fp[i]); + return 0; +} +EXPORT_SYMBOL(unix_attach_fds); + +void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) +{ + int i; + + scm->fp = UNIXCB(skb).fp; + UNIXCB(skb).fp = NULL; + + for (i = scm->fp->count-1; i >= 0; i--) + unix_notinflight(scm->fp->user, scm->fp->fp[i]); +} +EXPORT_SYMBOL(unix_detach_fds); + +void unix_destruct_scm(struct sk_buff *skb) +{ + struct scm_cookie scm; + + memset(&scm, 0, sizeof(scm)); + scm.pid = UNIXCB(skb).pid; + if (UNIXCB(skb).fp) + unix_detach_fds(&scm, skb); + + /* Alas, it calls VFS */ + /* So fscking what? fput() had been SMP-safe since the last Summer */ + scm_destroy(&scm); + sock_wfree(skb); +} +EXPORT_SYMBOL(unix_destruct_scm); diff --git a/net/unix/scm.h b/net/unix/scm.h new file mode 100644 index 000000000..5a255a477 --- /dev/null +++ b/net/unix/scm.h @@ -0,0 +1,10 @@ +#ifndef NET_UNIX_SCM_H +#define NET_UNIX_SCM_H + +extern struct list_head gc_inflight_list; +extern spinlock_t unix_gc_lock; + +int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb); +void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb); + +#endif diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c new file mode 100644 index 000000000..500129aa7 --- /dev/null +++ b/net/unix/sysctl_net_unix.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * NET4: Sysctl interface to net af_unix subsystem. + * + * Authors: Mike Shaver. + */ + +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/sysctl.h> + +#include <net/af_unix.h> + +static struct ctl_table unix_table[] = { + { + .procname = "max_dgram_qlen", + .data = &init_net.unx.sysctl_max_dgram_qlen, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { } +}; + +int __net_init unix_sysctl_register(struct net *net) +{ + struct ctl_table *table; + + if (net_eq(net, &init_net)) { + table = unix_table; + } else { + table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL); + if (!table) + goto err_alloc; + + table[0].data = &net->unx.sysctl_max_dgram_qlen; + } + + net->unx.ctl = register_net_sysctl(net, "net/unix", table); + if (net->unx.ctl == NULL) + goto err_reg; + + return 0; + +err_reg: + if (!net_eq(net, &init_net)) + kfree(table); +err_alloc: + return -ENOMEM; +} + +void unix_sysctl_unregister(struct net *net) +{ + struct ctl_table *table; + + table = net->unx.ctl->ctl_table_arg; + unregister_net_sysctl_table(net->unx.ctl); + if (!net_eq(net, &init_net)) + kfree(table); +} diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c new file mode 100644 index 000000000..bd84785bf --- /dev/null +++ b/net/unix/unix_bpf.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Cong Wang <cong.wang@bytedance.com> */ + +#include <linux/skmsg.h> +#include <linux/bpf.h> +#include <net/sock.h> +#include <net/af_unix.h> + +#define unix_sk_has_data(__sk, __psock) \ + ({ !skb_queue_empty(&__sk->sk_receive_queue) || \ + !skb_queue_empty(&__psock->ingress_skb) || \ + !list_empty(&__psock->ingress_msg); \ + }) + +static int unix_msg_wait_data(struct sock *sk, struct sk_psock *psock, + long timeo) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct unix_sock *u = unix_sk(sk); + int ret = 0; + + if (sk->sk_shutdown & RCV_SHUTDOWN) + return 1; + + if (!timeo) + return ret; + + add_wait_queue(sk_sleep(sk), &wait); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); + if (!unix_sk_has_data(sk, psock)) { + mutex_unlock(&u->iolock); + wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); + mutex_lock(&u->iolock); + ret = unix_sk_has_data(sk, psock); + } + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); + remove_wait_queue(sk_sleep(sk), &wait); + return ret; +} + +static int __unix_recvmsg(struct sock *sk, struct msghdr *msg, + size_t len, int flags) +{ + if (sk->sk_type == SOCK_DGRAM) + return __unix_dgram_recvmsg(sk, msg, len, flags); + else + return __unix_stream_recvmsg(sk, msg, len, flags); +} + +static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg, + size_t len, int flags, int *addr_len) +{ + struct unix_sock *u = unix_sk(sk); + struct sk_psock *psock; + int copied; + + if (!len) + return 0; + + psock = sk_psock_get(sk); + if (unlikely(!psock)) + return __unix_recvmsg(sk, msg, len, flags); + + mutex_lock(&u->iolock); + if (!skb_queue_empty(&sk->sk_receive_queue) && + sk_psock_queue_empty(psock)) { + mutex_unlock(&u->iolock); + sk_psock_put(sk, psock); + return __unix_recvmsg(sk, msg, len, flags); + } + +msg_bytes_ready: + copied = sk_msg_recvmsg(sk, psock, msg, len, flags); + if (!copied) { + long timeo; + int data; + + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); + data = unix_msg_wait_data(sk, psock, timeo); + if (data) { + if (!sk_psock_queue_empty(psock)) + goto msg_bytes_ready; + mutex_unlock(&u->iolock); + sk_psock_put(sk, psock); + return __unix_recvmsg(sk, msg, len, flags); + } + copied = -EAGAIN; + } + mutex_unlock(&u->iolock); + sk_psock_put(sk, psock); + return copied; +} + +static struct proto *unix_dgram_prot_saved __read_mostly; +static DEFINE_SPINLOCK(unix_dgram_prot_lock); +static struct proto unix_dgram_bpf_prot; + +static struct proto *unix_stream_prot_saved __read_mostly; +static DEFINE_SPINLOCK(unix_stream_prot_lock); +static struct proto unix_stream_bpf_prot; + +static void unix_dgram_bpf_rebuild_protos(struct proto *prot, const struct proto *base) +{ + *prot = *base; + prot->close = sock_map_close; + prot->recvmsg = unix_bpf_recvmsg; + prot->sock_is_readable = sk_msg_is_readable; +} + +static void unix_stream_bpf_rebuild_protos(struct proto *prot, + const struct proto *base) +{ + *prot = *base; + prot->close = sock_map_close; + prot->recvmsg = unix_bpf_recvmsg; + prot->sock_is_readable = sk_msg_is_readable; + prot->unhash = sock_map_unhash; +} + +static void unix_dgram_bpf_check_needs_rebuild(struct proto *ops) +{ + if (unlikely(ops != smp_load_acquire(&unix_dgram_prot_saved))) { + spin_lock_bh(&unix_dgram_prot_lock); + if (likely(ops != unix_dgram_prot_saved)) { + unix_dgram_bpf_rebuild_protos(&unix_dgram_bpf_prot, ops); + smp_store_release(&unix_dgram_prot_saved, ops); + } + spin_unlock_bh(&unix_dgram_prot_lock); + } +} + +static void unix_stream_bpf_check_needs_rebuild(struct proto *ops) +{ + if (unlikely(ops != smp_load_acquire(&unix_stream_prot_saved))) { + spin_lock_bh(&unix_stream_prot_lock); + if (likely(ops != unix_stream_prot_saved)) { + unix_stream_bpf_rebuild_protos(&unix_stream_bpf_prot, ops); + smp_store_release(&unix_stream_prot_saved, ops); + } + spin_unlock_bh(&unix_stream_prot_lock); + } +} + +int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) +{ + if (sk->sk_type != SOCK_DGRAM) + return -EOPNOTSUPP; + + if (restore) { + sk->sk_write_space = psock->saved_write_space; + sock_replace_proto(sk, psock->sk_proto); + return 0; + } + + unix_dgram_bpf_check_needs_rebuild(psock->sk_proto); + sock_replace_proto(sk, &unix_dgram_bpf_prot); + return 0; +} + +int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) +{ + struct sock *sk_pair; + + /* Restore does not decrement the sk_pair reference yet because we must + * keep the a reference to the socket until after an RCU grace period + * and any pending sends have completed. + */ + if (restore) { + sk->sk_write_space = psock->saved_write_space; + sock_replace_proto(sk, psock->sk_proto); + return 0; + } + + /* psock_update_sk_prot can be called multiple times if psock is + * added to multiple maps and/or slots in the same map. There is + * also an edge case where replacing a psock with itself can trigger + * an extra psock_update_sk_prot during the insert process. So it + * must be safe to do multiple calls. Here we need to ensure we don't + * increment the refcnt through sock_hold many times. There will only + * be a single matching destroy operation. + */ + if (!psock->sk_pair) { + sk_pair = unix_peer(sk); + sock_hold(sk_pair); + psock->sk_pair = sk_pair; + } + + unix_stream_bpf_check_needs_rebuild(psock->sk_proto); + sock_replace_proto(sk, &unix_stream_bpf_prot); + return 0; +} + +void __init unix_bpf_build_proto(void) +{ + unix_dgram_bpf_rebuild_protos(&unix_dgram_bpf_prot, &unix_dgram_proto); + unix_stream_bpf_rebuild_protos(&unix_stream_bpf_prot, &unix_stream_proto); + +} |