1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET4: Implementation of BSD Unix domain sockets.
5 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
8 * Linus Torvalds : Assorted bug cures.
9 * Niibe Yutaka : async I/O support.
10 * Carsten Paeth : PF_UNIX check, address fixes.
11 * Alan Cox : Limit size of allocated blocks.
12 * Alan Cox : Fixed the stupid socketpair bug.
13 * Alan Cox : BSD compatibility fine tuning.
14 * Alan Cox : Fixed a bug in connect when interrupted.
15 * Alan Cox : Sorted out a proper draft version of
16 * file descriptor passing hacked up from
18 * Marty Leisner : Fixes to fd passing
19 * Nick Nevin : recvmsg bugfix.
20 * Alan Cox : Started proper garbage collector
21 * Heiko EiBfeldt : Missing verify_area check
22 * Alan Cox : Started POSIXisms
23 * Andreas Schwab : Replace inode by dentry for proper
25 * Kirk Petersen : Made this a module
26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
29 * by above two patches.
30 * Andrea Arcangeli : If possible we block in connect(2)
31 * if the max backlog of the listen socket
32 * is been reached. This won't break
33 * old apps and it will avoid huge amount
34 * of socks hashed (this for unix_gc()
35 * performances reasons).
36 * Security fix that limits the max
37 * number of socks to 2*max_files and
38 * the number of skb queueable in the
40 * Artur Skawina : Hash function optimizations
41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
42 * Malcolm Beattie : Set peercred for socketpair
43 * Michal Ostrowski : Module initialization cleanup.
44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
45 * the core infrastructure is doing that
46 * for all net proto families now (2.5.69+)
48 * Known differences from reference BSD that was tested:
51 * ECONNREFUSED is not returned from one end of a connected() socket to the
52 * other the moment one end closes.
53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
54 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
56 * accept() returns a path name even if the connecting socket has closed
57 * in the meantime (BSD loses the path and gives up).
58 * accept() returns 0 length path for an unbound connector. BSD returns 16
59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
61 * BSD af_unix apparently has connect forgetting to block properly.
62 * (need to check this with the POSIX spec in detail)
64 * Differences from 2.0.0-11-... (ANK)
65 * Bug fixes and improvements.
66 * - client shutdown killed server socket.
67 * - removed all useless cli/sti pairs.
69 * Semantic changes/extensions.
70 * - generic control message passing.
71 * - SCM_CREDENTIALS control message.
72 * - "Abstract" (not FS based) socket bindings.
73 * Abstract names are sequences of bytes (not zero terminated)
74 * started by 0, so that this name space does not intersect
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
80 #include <linux/module.h>
81 #include <linux/kernel.h>
82 #include <linux/signal.h>
83 #include <linux/sched/signal.h>
84 #include <linux/errno.h>
85 #include <linux/string.h>
86 #include <linux/stat.h>
87 #include <linux/dcache.h>
88 #include <linux/namei.h>
89 #include <linux/socket.h>
91 #include <linux/fcntl.h>
92 #include <linux/filter.h>
93 #include <linux/termios.h>
94 #include <linux/sockios.h>
95 #include <linux/net.h>
98 #include <linux/slab.h>
99 #include <linux/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <net/net_namespace.h>
103 #include <net/sock.h>
104 #include <net/tcp_states.h>
105 #include <net/af_unix.h>
106 #include <linux/proc_fs.h>
107 #include <linux/seq_file.h>
109 #include <linux/init.h>
110 #include <linux/poll.h>
111 #include <linux/rtnetlink.h>
112 #include <linux/mount.h>
113 #include <net/checksum.h>
114 #include <linux/security.h>
115 #include <linux/freezer.h>
116 #include <linux/file.h>
117 #include <linux/btf_ids.h>
121 spinlock_t unix_table_locks[2 * UNIX_HASH_SIZE];
122 EXPORT_SYMBOL_GPL(unix_table_locks);
123 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
124 EXPORT_SYMBOL_GPL(unix_socket_table);
125 static atomic_long_t unix_nr_socks;
127 /* SMP locking strategy:
128 * hash table is protected with spinlock unix_table_locks
129 * each socket state is protected by separate spin lock.
132 static unsigned int unix_unbound_hash(struct sock *sk)
134 unsigned long hash = (unsigned long)sk;
140 return UNIX_HASH_SIZE + (hash & (UNIX_HASH_SIZE - 1));
143 static unsigned int unix_bsd_hash(struct inode *i)
145 return i->i_ino & (UNIX_HASH_SIZE - 1);
148 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
149 int addr_len, int type)
151 __wsum csum = csum_partial(sunaddr, addr_len, 0);
154 hash = (__force unsigned int)csum_fold(csum);
158 return hash & (UNIX_HASH_SIZE - 1);
161 static void unix_table_double_lock(unsigned int hash1, unsigned int hash2)
163 /* hash1 and hash2 is never the same because
164 * one is between 0 and UNIX_HASH_SIZE - 1, and
165 * another is between UNIX_HASH_SIZE and UNIX_HASH_SIZE * 2.
170 spin_lock(&unix_table_locks[hash1]);
171 spin_lock_nested(&unix_table_locks[hash2], SINGLE_DEPTH_NESTING);
174 static void unix_table_double_unlock(unsigned int hash1, unsigned int hash2)
176 spin_unlock(&unix_table_locks[hash1]);
177 spin_unlock(&unix_table_locks[hash2]);
180 #ifdef CONFIG_SECURITY_NETWORK
181 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
183 UNIXCB(skb).secid = scm->secid;
186 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
188 scm->secid = UNIXCB(skb).secid;
191 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
193 return (scm->secid == UNIXCB(skb).secid);
196 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
199 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
202 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
206 #endif /* CONFIG_SECURITY_NETWORK */
208 #define unix_peer(sk) (unix_sk(sk)->peer)
210 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
212 return unix_peer(osk) == sk;
215 static inline int unix_may_send(struct sock *sk, struct sock *osk)
217 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
220 static inline int unix_recvq_full(const struct sock *sk)
222 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
225 static inline int unix_recvq_full_lockless(const struct sock *sk)
227 return skb_queue_len_lockless(&sk->sk_receive_queue) >
228 READ_ONCE(sk->sk_max_ack_backlog);
231 struct sock *unix_peer_get(struct sock *s)
239 unix_state_unlock(s);
242 EXPORT_SYMBOL_GPL(unix_peer_get);
244 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
247 struct unix_address *addr;
249 addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
253 refcount_set(&addr->refcnt, 1);
254 addr->len = addr_len;
255 memcpy(addr->name, sunaddr, addr_len);
260 static inline void unix_release_addr(struct unix_address *addr)
262 if (refcount_dec_and_test(&addr->refcnt))
267 * Check unix socket name:
268 * - should be not zero length.
269 * - if started by not zero, should be NULL terminated (FS object)
270 * - if started by zero, it is abstract name.
273 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
275 if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
276 addr_len > sizeof(*sunaddr))
279 if (sunaddr->sun_family != AF_UNIX)
285 static void unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
287 /* This may look like an off by one error but it is a bit more
288 * subtle. 108 is the longest valid AF_UNIX path for a binding.
289 * sun_path[108] doesn't as such exist. However in kernel space
290 * we are guaranteed that it is a valid memory location in our
291 * kernel address buffer because syscall functions always pass
292 * a pointer of struct sockaddr_storage which has a bigger buffer
295 ((char *)sunaddr)[addr_len] = 0;
298 static void __unix_remove_socket(struct sock *sk)
300 sk_del_node_init(sk);
303 static void __unix_insert_socket(struct sock *sk)
305 WARN_ON(!sk_unhashed(sk));
306 sk_add_node(sk, &unix_socket_table[sk->sk_hash]);
309 static void __unix_set_addr_hash(struct sock *sk, struct unix_address *addr,
312 __unix_remove_socket(sk);
313 smp_store_release(&unix_sk(sk)->addr, addr);
316 __unix_insert_socket(sk);
319 static void unix_remove_socket(struct sock *sk)
321 spin_lock(&unix_table_locks[sk->sk_hash]);
322 __unix_remove_socket(sk);
323 spin_unlock(&unix_table_locks[sk->sk_hash]);
326 static void unix_insert_unbound_socket(struct sock *sk)
328 spin_lock(&unix_table_locks[sk->sk_hash]);
329 __unix_insert_socket(sk);
330 spin_unlock(&unix_table_locks[sk->sk_hash]);
333 static struct sock *__unix_find_socket_byname(struct net *net,
334 struct sockaddr_un *sunname,
335 int len, unsigned int hash)
339 sk_for_each(s, &unix_socket_table[hash]) {
340 struct unix_sock *u = unix_sk(s);
342 if (!net_eq(sock_net(s), net))
345 if (u->addr->len == len &&
346 !memcmp(u->addr->name, sunname, len))
352 static inline struct sock *unix_find_socket_byname(struct net *net,
353 struct sockaddr_un *sunname,
354 int len, unsigned int hash)
358 spin_lock(&unix_table_locks[hash]);
359 s = __unix_find_socket_byname(net, sunname, len, hash);
362 spin_unlock(&unix_table_locks[hash]);
366 static struct sock *unix_find_socket_byinode(struct inode *i)
368 unsigned int hash = unix_bsd_hash(i);
371 spin_lock(&unix_table_locks[hash]);
372 sk_for_each(s, &unix_socket_table[hash]) {
373 struct dentry *dentry = unix_sk(s)->path.dentry;
375 if (dentry && d_backing_inode(dentry) == i) {
377 spin_unlock(&unix_table_locks[hash]);
381 spin_unlock(&unix_table_locks[hash]);
385 /* Support code for asymmetrically connected dgram sockets
387 * If a datagram socket is connected to a socket not itself connected
388 * to the first socket (eg, /dev/log), clients may only enqueue more
389 * messages if the present receive queue of the server socket is not
390 * "too large". This means there's a second writeability condition
391 * poll and sendmsg need to test. The dgram recv code will do a wake
392 * up on the peer_wait wait queue of a socket upon reception of a
393 * datagram which needs to be propagated to sleeping would-be writers
394 * since these might not have sent anything so far. This can't be
395 * accomplished via poll_wait because the lifetime of the server
396 * socket might be less than that of its clients if these break their
397 * association with it or if the server socket is closed while clients
398 * are still connected to it and there's no way to inform "a polling
399 * implementation" that it should let go of a certain wait queue
401 * In order to propagate a wake up, a wait_queue_entry_t of the client
402 * socket is enqueued on the peer_wait queue of the server socket
403 * whose wake function does a wake_up on the ordinary client socket
404 * wait queue. This connection is established whenever a write (or
405 * poll for write) hit the flow control condition and broken when the
406 * association to the server socket is dissolved or after a wake up
410 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
414 wait_queue_head_t *u_sleep;
416 u = container_of(q, struct unix_sock, peer_wake);
418 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
420 u->peer_wake.private = NULL;
422 /* relaying can only happen while the wq still exists */
423 u_sleep = sk_sleep(&u->sk);
425 wake_up_interruptible_poll(u_sleep, key_to_poll(key));
430 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
432 struct unix_sock *u, *u_other;
436 u_other = unix_sk(other);
438 spin_lock(&u_other->peer_wait.lock);
440 if (!u->peer_wake.private) {
441 u->peer_wake.private = other;
442 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
447 spin_unlock(&u_other->peer_wait.lock);
451 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
454 struct unix_sock *u, *u_other;
457 u_other = unix_sk(other);
458 spin_lock(&u_other->peer_wait.lock);
460 if (u->peer_wake.private == other) {
461 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
462 u->peer_wake.private = NULL;
465 spin_unlock(&u_other->peer_wait.lock);
468 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
471 unix_dgram_peer_wake_disconnect(sk, other);
472 wake_up_interruptible_poll(sk_sleep(sk),
479 * - unix_peer(sk) == other
480 * - association is stable
482 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
486 connected = unix_dgram_peer_wake_connect(sk, other);
488 /* If other is SOCK_DEAD, we want to make sure we signal
489 * POLLOUT, such that a subsequent write() can get a
490 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
491 * to other and its full, we will hang waiting for POLLOUT.
493 if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD))
497 unix_dgram_peer_wake_disconnect(sk, other);
502 static int unix_writable(const struct sock *sk)
504 return sk->sk_state != TCP_LISTEN &&
505 (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
508 static void unix_write_space(struct sock *sk)
510 struct socket_wq *wq;
513 if (unix_writable(sk)) {
514 wq = rcu_dereference(sk->sk_wq);
515 if (skwq_has_sleeper(wq))
516 wake_up_interruptible_sync_poll(&wq->wait,
517 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
518 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
523 /* When dgram socket disconnects (or changes its peer), we clear its receive
524 * queue of packets arrived from previous peer. First, it allows to do
525 * flow control based only on wmem_alloc; second, sk connected to peer
526 * may receive messages only from that peer. */
527 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
529 if (!skb_queue_empty(&sk->sk_receive_queue)) {
530 skb_queue_purge(&sk->sk_receive_queue);
531 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
533 /* If one link of bidirectional dgram pipe is disconnected,
534 * we signal error. Messages are lost. Do not make this,
535 * when peer was not connected to us.
537 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
538 other->sk_err = ECONNRESET;
539 sk_error_report(other);
542 other->sk_state = TCP_CLOSE;
545 static void unix_sock_destructor(struct sock *sk)
547 struct unix_sock *u = unix_sk(sk);
549 skb_queue_purge(&sk->sk_receive_queue);
551 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
553 kfree_skb(u->oob_skb);
557 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
558 WARN_ON(!sk_unhashed(sk));
559 WARN_ON(sk->sk_socket);
560 if (!sock_flag(sk, SOCK_DEAD)) {
561 pr_info("Attempt to release alive unix socket: %p\n", sk);
566 unix_release_addr(u->addr);
568 atomic_long_dec(&unix_nr_socks);
569 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
570 #ifdef UNIX_REFCNT_DEBUG
571 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
572 atomic_long_read(&unix_nr_socks));
576 static void unix_release_sock(struct sock *sk, int embrion)
578 struct unix_sock *u = unix_sk(sk);
584 unix_remove_socket(sk);
589 sk->sk_shutdown = SHUTDOWN_MASK;
591 u->path.dentry = NULL;
593 state = sk->sk_state;
594 sk->sk_state = TCP_CLOSE;
596 skpair = unix_peer(sk);
597 unix_peer(sk) = NULL;
599 unix_state_unlock(sk);
601 wake_up_interruptible_all(&u->peer_wait);
603 if (skpair != NULL) {
604 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
605 unix_state_lock(skpair);
607 skpair->sk_shutdown = SHUTDOWN_MASK;
608 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
609 skpair->sk_err = ECONNRESET;
610 unix_state_unlock(skpair);
611 skpair->sk_state_change(skpair);
612 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
615 unix_dgram_peer_wake_disconnect(sk, skpair);
616 sock_put(skpair); /* It may now die */
619 /* Try to flush out this socket. Throw out buffers at least */
621 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
622 if (state == TCP_LISTEN)
623 unix_release_sock(skb->sk, 1);
624 /* passed fds are erased in the kfree_skb hook */
625 UNIXCB(skb).consumed = skb->len;
634 /* ---- Socket is dead now and most probably destroyed ---- */
637 * Fixme: BSD difference: In BSD all sockets connected to us get
638 * ECONNRESET and we die on the spot. In Linux we behave
639 * like files and pipes do and wait for the last
642 * Can't we simply set sock->err?
644 * What the above comment does talk about? --ANK(980817)
647 if (unix_tot_inflight)
648 unix_gc(); /* Garbage collect fds */
651 static void init_peercred(struct sock *sk)
653 const struct cred *old_cred;
656 spin_lock(&sk->sk_peer_lock);
657 old_pid = sk->sk_peer_pid;
658 old_cred = sk->sk_peer_cred;
659 sk->sk_peer_pid = get_pid(task_tgid(current));
660 sk->sk_peer_cred = get_current_cred();
661 spin_unlock(&sk->sk_peer_lock);
667 static void copy_peercred(struct sock *sk, struct sock *peersk)
669 const struct cred *old_cred;
673 spin_lock(&sk->sk_peer_lock);
674 spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
676 spin_lock(&peersk->sk_peer_lock);
677 spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
679 old_pid = sk->sk_peer_pid;
680 old_cred = sk->sk_peer_cred;
681 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
682 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
684 spin_unlock(&sk->sk_peer_lock);
685 spin_unlock(&peersk->sk_peer_lock);
691 static int unix_listen(struct socket *sock, int backlog)
694 struct sock *sk = sock->sk;
695 struct unix_sock *u = unix_sk(sk);
698 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
699 goto out; /* Only stream/seqpacket sockets accept */
702 goto out; /* No listens on an unbound socket */
704 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
706 if (backlog > sk->sk_max_ack_backlog)
707 wake_up_interruptible_all(&u->peer_wait);
708 sk->sk_max_ack_backlog = backlog;
709 sk->sk_state = TCP_LISTEN;
710 /* set credentials so connect can copy them */
715 unix_state_unlock(sk);
720 static int unix_release(struct socket *);
721 static int unix_bind(struct socket *, struct sockaddr *, int);
722 static int unix_stream_connect(struct socket *, struct sockaddr *,
723 int addr_len, int flags);
724 static int unix_socketpair(struct socket *, struct socket *);
725 static int unix_accept(struct socket *, struct socket *, int, bool);
726 static int unix_getname(struct socket *, struct sockaddr *, int);
727 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
728 static __poll_t unix_dgram_poll(struct file *, struct socket *,
730 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
732 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
734 static int unix_shutdown(struct socket *, int);
735 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
736 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
737 static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
738 size_t size, int flags);
739 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
740 struct pipe_inode_info *, size_t size,
742 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
743 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
744 static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
745 sk_read_actor_t recv_actor);
746 static int unix_stream_read_sock(struct sock *sk, read_descriptor_t *desc,
747 sk_read_actor_t recv_actor);
748 static int unix_dgram_connect(struct socket *, struct sockaddr *,
750 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
751 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
754 static int unix_set_peek_off(struct sock *sk, int val)
756 struct unix_sock *u = unix_sk(sk);
758 if (mutex_lock_interruptible(&u->iolock))
761 sk->sk_peek_off = val;
762 mutex_unlock(&u->iolock);
767 #ifdef CONFIG_PROC_FS
768 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
770 struct sock *sk = sock->sk;
774 u = unix_sk(sock->sk);
775 seq_printf(m, "scm_fds: %u\n",
776 atomic_read(&u->scm_stat.nr_fds));
780 #define unix_show_fdinfo NULL
783 static const struct proto_ops unix_stream_ops = {
785 .owner = THIS_MODULE,
786 .release = unix_release,
788 .connect = unix_stream_connect,
789 .socketpair = unix_socketpair,
790 .accept = unix_accept,
791 .getname = unix_getname,
795 .compat_ioctl = unix_compat_ioctl,
797 .listen = unix_listen,
798 .shutdown = unix_shutdown,
799 .sendmsg = unix_stream_sendmsg,
800 .recvmsg = unix_stream_recvmsg,
801 .read_sock = unix_stream_read_sock,
802 .mmap = sock_no_mmap,
803 .sendpage = unix_stream_sendpage,
804 .splice_read = unix_stream_splice_read,
805 .set_peek_off = unix_set_peek_off,
806 .show_fdinfo = unix_show_fdinfo,
809 static const struct proto_ops unix_dgram_ops = {
811 .owner = THIS_MODULE,
812 .release = unix_release,
814 .connect = unix_dgram_connect,
815 .socketpair = unix_socketpair,
816 .accept = sock_no_accept,
817 .getname = unix_getname,
818 .poll = unix_dgram_poll,
821 .compat_ioctl = unix_compat_ioctl,
823 .listen = sock_no_listen,
824 .shutdown = unix_shutdown,
825 .sendmsg = unix_dgram_sendmsg,
826 .read_sock = unix_read_sock,
827 .recvmsg = unix_dgram_recvmsg,
828 .mmap = sock_no_mmap,
829 .sendpage = sock_no_sendpage,
830 .set_peek_off = unix_set_peek_off,
831 .show_fdinfo = unix_show_fdinfo,
834 static const struct proto_ops unix_seqpacket_ops = {
836 .owner = THIS_MODULE,
837 .release = unix_release,
839 .connect = unix_stream_connect,
840 .socketpair = unix_socketpair,
841 .accept = unix_accept,
842 .getname = unix_getname,
843 .poll = unix_dgram_poll,
846 .compat_ioctl = unix_compat_ioctl,
848 .listen = unix_listen,
849 .shutdown = unix_shutdown,
850 .sendmsg = unix_seqpacket_sendmsg,
851 .recvmsg = unix_seqpacket_recvmsg,
852 .mmap = sock_no_mmap,
853 .sendpage = sock_no_sendpage,
854 .set_peek_off = unix_set_peek_off,
855 .show_fdinfo = unix_show_fdinfo,
858 static void unix_close(struct sock *sk, long timeout)
860 /* Nothing to do here, unix socket does not need a ->close().
861 * This is merely for sockmap.
865 static void unix_unhash(struct sock *sk)
867 /* Nothing to do here, unix socket does not need a ->unhash().
868 * This is merely for sockmap.
872 struct proto unix_dgram_proto = {
874 .owner = THIS_MODULE,
875 .obj_size = sizeof(struct unix_sock),
877 #ifdef CONFIG_BPF_SYSCALL
878 .psock_update_sk_prot = unix_dgram_bpf_update_proto,
882 struct proto unix_stream_proto = {
883 .name = "UNIX-STREAM",
884 .owner = THIS_MODULE,
885 .obj_size = sizeof(struct unix_sock),
887 .unhash = unix_unhash,
888 #ifdef CONFIG_BPF_SYSCALL
889 .psock_update_sk_prot = unix_stream_bpf_update_proto,
893 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
899 atomic_long_inc(&unix_nr_socks);
900 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
905 if (type == SOCK_STREAM)
906 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
907 else /*dgram and seqpacket */
908 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
915 sock_init_data(sock, sk);
917 sk->sk_hash = unix_unbound_hash(sk);
918 sk->sk_allocation = GFP_KERNEL_ACCOUNT;
919 sk->sk_write_space = unix_write_space;
920 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
921 sk->sk_destruct = unix_sock_destructor;
923 u->path.dentry = NULL;
925 spin_lock_init(&u->lock);
926 atomic_long_set(&u->inflight, 0);
927 INIT_LIST_HEAD(&u->link);
928 mutex_init(&u->iolock); /* single task reading lock */
929 mutex_init(&u->bindlock); /* single task binding lock */
930 init_waitqueue_head(&u->peer_wait);
931 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
932 memset(&u->scm_stat, 0, sizeof(struct scm_stat));
933 unix_insert_unbound_socket(sk);
935 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
940 atomic_long_dec(&unix_nr_socks);
944 static int unix_create(struct net *net, struct socket *sock, int protocol,
949 if (protocol && protocol != PF_UNIX)
950 return -EPROTONOSUPPORT;
952 sock->state = SS_UNCONNECTED;
954 switch (sock->type) {
956 sock->ops = &unix_stream_ops;
959 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
963 sock->type = SOCK_DGRAM;
966 sock->ops = &unix_dgram_ops;
969 sock->ops = &unix_seqpacket_ops;
972 return -ESOCKTNOSUPPORT;
975 sk = unix_create1(net, sock, kern, sock->type);
982 static int unix_release(struct socket *sock)
984 struct sock *sk = sock->sk;
989 sk->sk_prot->close(sk, 0);
990 unix_release_sock(sk, 0);
996 static struct sock *unix_find_bsd(struct net *net, struct sockaddr_un *sunaddr,
997 int addr_len, int type)
1004 unix_mkname_bsd(sunaddr, addr_len);
1005 err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1009 err = path_permission(&path, MAY_WRITE);
1013 err = -ECONNREFUSED;
1014 inode = d_backing_inode(path.dentry);
1015 if (!S_ISSOCK(inode->i_mode))
1018 sk = unix_find_socket_byinode(inode);
1023 if (sk->sk_type == type)
1037 return ERR_PTR(err);
1040 static struct sock *unix_find_abstract(struct net *net,
1041 struct sockaddr_un *sunaddr,
1042 int addr_len, int type)
1044 unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1045 struct dentry *dentry;
1048 sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1050 return ERR_PTR(-ECONNREFUSED);
1052 dentry = unix_sk(sk)->path.dentry;
1054 touch_atime(&unix_sk(sk)->path);
1059 static struct sock *unix_find_other(struct net *net,
1060 struct sockaddr_un *sunaddr,
1061 int addr_len, int type)
1065 if (sunaddr->sun_path[0])
1066 sk = unix_find_bsd(net, sunaddr, addr_len, type);
1068 sk = unix_find_abstract(net, sunaddr, addr_len, type);
1073 static int unix_autobind(struct sock *sk)
1075 unsigned int new_hash, old_hash = sk->sk_hash;
1076 struct unix_sock *u = unix_sk(sk);
1077 struct unix_address *addr;
1078 u32 lastnum, ordernum;
1081 err = mutex_lock_interruptible(&u->bindlock);
1089 addr = kzalloc(sizeof(*addr) +
1090 offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1094 addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1095 addr->name->sun_family = AF_UNIX;
1096 refcount_set(&addr->refcnt, 1);
1098 ordernum = prandom_u32();
1099 lastnum = ordernum & 0xFFFFF;
1101 ordernum = (ordernum + 1) & 0xFFFFF;
1102 sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1104 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1105 unix_table_double_lock(old_hash, new_hash);
1107 if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
1109 unix_table_double_unlock(old_hash, new_hash);
1111 /* __unix_find_socket_byname() may take long time if many names
1112 * are already in use.
1116 if (ordernum == lastnum) {
1117 /* Give up if all names seems to be in use. */
1119 unix_release_addr(addr);
1126 __unix_set_addr_hash(sk, addr, new_hash);
1127 unix_table_double_unlock(old_hash, new_hash);
1130 out: mutex_unlock(&u->bindlock);
1134 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1137 umode_t mode = S_IFSOCK |
1138 (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1139 unsigned int new_hash, old_hash = sk->sk_hash;
1140 struct unix_sock *u = unix_sk(sk);
1141 struct user_namespace *ns; // barf...
1142 struct unix_address *addr;
1143 struct dentry *dentry;
1147 unix_mkname_bsd(sunaddr, addr_len);
1148 addr_len = strlen(sunaddr->sun_path) +
1149 offsetof(struct sockaddr_un, sun_path) + 1;
1151 addr = unix_create_addr(sunaddr, addr_len);
1156 * Get the parent directory, calculate the hash for last
1159 dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1160 if (IS_ERR(dentry)) {
1161 err = PTR_ERR(dentry);
1166 * All right, let's create it.
1168 ns = mnt_user_ns(parent.mnt);
1169 err = security_path_mknod(&parent, dentry, mode, 0);
1171 err = vfs_mknod(ns, d_inode(parent.dentry), dentry, mode, 0);
1174 err = mutex_lock_interruptible(&u->bindlock);
1180 new_hash = unix_bsd_hash(d_backing_inode(dentry));
1181 unix_table_double_lock(old_hash, new_hash);
1182 u->path.mnt = mntget(parent.mnt);
1183 u->path.dentry = dget(dentry);
1184 __unix_set_addr_hash(sk, addr, new_hash);
1185 unix_table_double_unlock(old_hash, new_hash);
1186 mutex_unlock(&u->bindlock);
1187 done_path_create(&parent, dentry);
1191 mutex_unlock(&u->bindlock);
1194 /* failed after successful mknod? unlink what we'd created... */
1195 vfs_unlink(ns, d_inode(parent.dentry), dentry, NULL);
1197 done_path_create(&parent, dentry);
1199 unix_release_addr(addr);
1200 return err == -EEXIST ? -EADDRINUSE : err;
1203 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1206 unsigned int new_hash, old_hash = sk->sk_hash;
1207 struct unix_sock *u = unix_sk(sk);
1208 struct unix_address *addr;
1211 addr = unix_create_addr(sunaddr, addr_len);
1215 err = mutex_lock_interruptible(&u->bindlock);
1224 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1225 unix_table_double_lock(old_hash, new_hash);
1227 if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
1231 __unix_set_addr_hash(sk, addr, new_hash);
1232 unix_table_double_unlock(old_hash, new_hash);
1233 mutex_unlock(&u->bindlock);
1237 unix_table_double_unlock(old_hash, new_hash);
1240 mutex_unlock(&u->bindlock);
1242 unix_release_addr(addr);
1246 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1248 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1249 struct sock *sk = sock->sk;
1252 if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1253 sunaddr->sun_family == AF_UNIX)
1254 return unix_autobind(sk);
1256 err = unix_validate_addr(sunaddr, addr_len);
1260 if (sunaddr->sun_path[0])
1261 err = unix_bind_bsd(sk, sunaddr, addr_len);
1263 err = unix_bind_abstract(sk, sunaddr, addr_len);
1268 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1270 if (unlikely(sk1 == sk2) || !sk2) {
1271 unix_state_lock(sk1);
1275 unix_state_lock(sk1);
1276 unix_state_lock_nested(sk2);
1278 unix_state_lock(sk2);
1279 unix_state_lock_nested(sk1);
1283 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1285 if (unlikely(sk1 == sk2) || !sk2) {
1286 unix_state_unlock(sk1);
1289 unix_state_unlock(sk1);
1290 unix_state_unlock(sk2);
1293 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1294 int alen, int flags)
1296 struct sock *sk = sock->sk;
1297 struct net *net = sock_net(sk);
1298 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1303 if (alen < offsetofend(struct sockaddr, sa_family))
1306 if (addr->sa_family != AF_UNSPEC) {
1307 err = unix_validate_addr(sunaddr, alen);
1311 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1312 !unix_sk(sk)->addr) {
1313 err = unix_autobind(sk);
1319 other = unix_find_other(net, sunaddr, alen, sock->type);
1320 if (IS_ERR(other)) {
1321 err = PTR_ERR(other);
1325 unix_state_double_lock(sk, other);
1327 /* Apparently VFS overslept socket death. Retry. */
1328 if (sock_flag(other, SOCK_DEAD)) {
1329 unix_state_double_unlock(sk, other);
1335 if (!unix_may_send(sk, other))
1338 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1342 sk->sk_state = other->sk_state = TCP_ESTABLISHED;
1345 * 1003.1g breaking connected state with AF_UNSPEC
1348 unix_state_double_lock(sk, other);
1352 * If it was connected, reconnect.
1354 if (unix_peer(sk)) {
1355 struct sock *old_peer = unix_peer(sk);
1357 unix_peer(sk) = other;
1359 sk->sk_state = TCP_CLOSE;
1360 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1362 unix_state_double_unlock(sk, other);
1364 if (other != old_peer)
1365 unix_dgram_disconnected(sk, old_peer);
1368 unix_peer(sk) = other;
1369 unix_state_double_unlock(sk, other);
1375 unix_state_double_unlock(sk, other);
1381 static long unix_wait_for_peer(struct sock *other, long timeo)
1382 __releases(&unix_sk(other)->lock)
1384 struct unix_sock *u = unix_sk(other);
1388 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1390 sched = !sock_flag(other, SOCK_DEAD) &&
1391 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1392 unix_recvq_full(other);
1394 unix_state_unlock(other);
1397 timeo = schedule_timeout(timeo);
1399 finish_wait(&u->peer_wait, &wait);
1403 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1404 int addr_len, int flags)
1406 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1407 struct sock *sk = sock->sk;
1408 struct net *net = sock_net(sk);
1409 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1410 struct sock *newsk = NULL;
1411 struct sock *other = NULL;
1412 struct sk_buff *skb = NULL;
1417 err = unix_validate_addr(sunaddr, addr_len);
1421 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
1422 err = unix_autobind(sk);
1427 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1429 /* First of all allocate resources.
1430 If we will make it after state is locked,
1431 we will have to recheck all again in any case.
1434 /* create new sock for complete connection */
1435 newsk = unix_create1(sock_net(sk), NULL, 0, sock->type);
1436 if (IS_ERR(newsk)) {
1437 err = PTR_ERR(newsk);
1444 /* Allocate skb for sending to listening sock */
1445 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1450 /* Find listening sock. */
1451 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1452 if (IS_ERR(other)) {
1453 err = PTR_ERR(other);
1458 /* Latch state of peer */
1459 unix_state_lock(other);
1461 /* Apparently VFS overslept socket death. Retry. */
1462 if (sock_flag(other, SOCK_DEAD)) {
1463 unix_state_unlock(other);
1468 err = -ECONNREFUSED;
1469 if (other->sk_state != TCP_LISTEN)
1471 if (other->sk_shutdown & RCV_SHUTDOWN)
1474 if (unix_recvq_full(other)) {
1479 timeo = unix_wait_for_peer(other, timeo);
1481 err = sock_intr_errno(timeo);
1482 if (signal_pending(current))
1490 It is tricky place. We need to grab our state lock and cannot
1491 drop lock on peer. It is dangerous because deadlock is
1492 possible. Connect to self case and simultaneous
1493 attempt to connect are eliminated by checking socket
1494 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1495 check this before attempt to grab lock.
1497 Well, and we have to recheck the state after socket locked.
1503 /* This is ok... continue with connect */
1505 case TCP_ESTABLISHED:
1506 /* Socket is already connected */
1514 unix_state_lock_nested(sk);
1516 if (sk->sk_state != st) {
1517 unix_state_unlock(sk);
1518 unix_state_unlock(other);
1523 err = security_unix_stream_connect(sk, other, newsk);
1525 unix_state_unlock(sk);
1529 /* The way is open! Fastly set all the necessary fields... */
1532 unix_peer(newsk) = sk;
1533 newsk->sk_state = TCP_ESTABLISHED;
1534 newsk->sk_type = sk->sk_type;
1535 init_peercred(newsk);
1536 newu = unix_sk(newsk);
1537 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1538 otheru = unix_sk(other);
1540 /* copy address information from listening to new sock
1542 * The contents of *(otheru->addr) and otheru->path
1543 * are seen fully set up here, since we have found
1544 * otheru in hash under unix_table_locks. Insertion
1545 * into the hash chain we'd found it in had been done
1546 * in an earlier critical area protected by unix_table_locks,
1547 * the same one where we'd set *(otheru->addr) contents,
1548 * as well as otheru->path and otheru->addr itself.
1550 * Using smp_store_release() here to set newu->addr
1551 * is enough to make those stores, as well as stores
1552 * to newu->path visible to anyone who gets newu->addr
1553 * by smp_load_acquire(). IOW, the same warranties
1554 * as for unix_sock instances bound in unix_bind() or
1555 * in unix_autobind().
1557 if (otheru->path.dentry) {
1558 path_get(&otheru->path);
1559 newu->path = otheru->path;
1561 refcount_inc(&otheru->addr->refcnt);
1562 smp_store_release(&newu->addr, otheru->addr);
1564 /* Set credentials */
1565 copy_peercred(sk, other);
1567 sock->state = SS_CONNECTED;
1568 sk->sk_state = TCP_ESTABLISHED;
1571 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1572 unix_peer(sk) = newsk;
1574 unix_state_unlock(sk);
1576 /* take ten and send info to listening sock */
1577 spin_lock(&other->sk_receive_queue.lock);
1578 __skb_queue_tail(&other->sk_receive_queue, skb);
1579 spin_unlock(&other->sk_receive_queue.lock);
1580 unix_state_unlock(other);
1581 other->sk_data_ready(other);
1587 unix_state_unlock(other);
1592 unix_release_sock(newsk, 0);
1598 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1600 struct sock *ska = socka->sk, *skb = sockb->sk;
1602 /* Join our sockets back to back */
1605 unix_peer(ska) = skb;
1606 unix_peer(skb) = ska;
1610 ska->sk_state = TCP_ESTABLISHED;
1611 skb->sk_state = TCP_ESTABLISHED;
1612 socka->state = SS_CONNECTED;
1613 sockb->state = SS_CONNECTED;
1617 static void unix_sock_inherit_flags(const struct socket *old,
1620 if (test_bit(SOCK_PASSCRED, &old->flags))
1621 set_bit(SOCK_PASSCRED, &new->flags);
1622 if (test_bit(SOCK_PASSSEC, &old->flags))
1623 set_bit(SOCK_PASSSEC, &new->flags);
1626 static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1629 struct sock *sk = sock->sk;
1631 struct sk_buff *skb;
1635 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1639 if (sk->sk_state != TCP_LISTEN)
1642 /* If socket state is TCP_LISTEN it cannot change (for now...),
1643 * so that no locks are necessary.
1646 skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1649 /* This means receive shutdown. */
1656 skb_free_datagram(sk, skb);
1657 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1659 /* attach accepted sock to socket */
1660 unix_state_lock(tsk);
1661 newsock->state = SS_CONNECTED;
1662 unix_sock_inherit_flags(sock, newsock);
1663 sock_graft(tsk, newsock);
1664 unix_state_unlock(tsk);
1672 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1674 struct sock *sk = sock->sk;
1675 struct unix_address *addr;
1676 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1680 sk = unix_peer_get(sk);
1690 addr = smp_load_acquire(&unix_sk(sk)->addr);
1692 sunaddr->sun_family = AF_UNIX;
1693 sunaddr->sun_path[0] = 0;
1694 err = offsetof(struct sockaddr_un, sun_path);
1697 memcpy(sunaddr, addr->name, addr->len);
1704 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1706 scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1709 * Garbage collection of unix sockets starts by selecting a set of
1710 * candidate sockets which have reference only from being in flight
1711 * (total_refs == inflight_refs). This condition is checked once during
1712 * the candidate collection phase, and candidates are marked as such, so
1713 * that non-candidates can later be ignored. While inflight_refs is
1714 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1715 * is an instantaneous decision.
1717 * Once a candidate, however, the socket must not be reinstalled into a
1718 * file descriptor while the garbage collection is in progress.
1720 * If the above conditions are met, then the directed graph of
1721 * candidates (*) does not change while unix_gc_lock is held.
1723 * Any operations that changes the file count through file descriptors
1724 * (dup, close, sendmsg) does not change the graph since candidates are
1725 * not installed in fds.
1727 * Dequeing a candidate via recvmsg would install it into an fd, but
1728 * that takes unix_gc_lock to decrement the inflight count, so it's
1729 * serialized with garbage collection.
1731 * MSG_PEEK is special in that it does not change the inflight count,
1732 * yet does install the socket into an fd. The following lock/unlock
1733 * pair is to ensure serialization with garbage collection. It must be
1734 * done between incrementing the file count and installing the file into
1737 * If garbage collection starts after the barrier provided by the
1738 * lock/unlock, then it will see the elevated refcount and not mark this
1739 * as a candidate. If a garbage collection is already in progress
1740 * before the file count was incremented, then the lock/unlock pair will
1741 * ensure that garbage collection is finished before progressing to
1742 * installing the fd.
1744 * (*) A -> B where B is on the queue of A or B is on the queue of C
1745 * which is on the queue of listening socket A.
1747 spin_lock(&unix_gc_lock);
1748 spin_unlock(&unix_gc_lock);
1751 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1755 UNIXCB(skb).pid = get_pid(scm->pid);
1756 UNIXCB(skb).uid = scm->creds.uid;
1757 UNIXCB(skb).gid = scm->creds.gid;
1758 UNIXCB(skb).fp = NULL;
1759 unix_get_secdata(scm, skb);
1760 if (scm->fp && send_fds)
1761 err = unix_attach_fds(scm, skb);
1763 skb->destructor = unix_destruct_scm;
1767 static bool unix_passcred_enabled(const struct socket *sock,
1768 const struct sock *other)
1770 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1771 !other->sk_socket ||
1772 test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1776 * Some apps rely on write() giving SCM_CREDENTIALS
1777 * We include credentials if source or destination socket
1778 * asserted SOCK_PASSCRED.
1780 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1781 const struct sock *other)
1783 if (UNIXCB(skb).pid)
1785 if (unix_passcred_enabled(sock, other)) {
1786 UNIXCB(skb).pid = get_pid(task_tgid(current));
1787 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1791 static int maybe_init_creds(struct scm_cookie *scm,
1792 struct socket *socket,
1793 const struct sock *other)
1796 struct msghdr msg = { .msg_controllen = 0 };
1798 err = scm_send(socket, &msg, scm, false);
1802 if (unix_passcred_enabled(socket, other)) {
1803 scm->pid = get_pid(task_tgid(current));
1804 current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1809 static bool unix_skb_scm_eq(struct sk_buff *skb,
1810 struct scm_cookie *scm)
1812 return UNIXCB(skb).pid == scm->pid &&
1813 uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1814 gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1815 unix_secdata_eq(scm, skb);
1818 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1820 struct scm_fp_list *fp = UNIXCB(skb).fp;
1821 struct unix_sock *u = unix_sk(sk);
1823 if (unlikely(fp && fp->count))
1824 atomic_add(fp->count, &u->scm_stat.nr_fds);
1827 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1829 struct scm_fp_list *fp = UNIXCB(skb).fp;
1830 struct unix_sock *u = unix_sk(sk);
1832 if (unlikely(fp && fp->count))
1833 atomic_sub(fp->count, &u->scm_stat.nr_fds);
1837 * Send AF_UNIX data.
1840 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1843 struct sock *sk = sock->sk;
1844 struct net *net = sock_net(sk);
1845 struct unix_sock *u = unix_sk(sk);
1846 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1847 struct sock *other = NULL;
1849 struct sk_buff *skb;
1851 struct scm_cookie scm;
1856 err = scm_send(sock, msg, &scm, false);
1861 if (msg->msg_flags&MSG_OOB)
1864 if (msg->msg_namelen) {
1865 err = unix_validate_addr(sunaddr, msg->msg_namelen);
1871 other = unix_peer_get(sk);
1876 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
1877 err = unix_autobind(sk);
1883 if (len > sk->sk_sndbuf - 32)
1886 if (len > SKB_MAX_ALLOC) {
1887 data_len = min_t(size_t,
1888 len - SKB_MAX_ALLOC,
1889 MAX_SKB_FRAGS * PAGE_SIZE);
1890 data_len = PAGE_ALIGN(data_len);
1892 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1895 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1896 msg->msg_flags & MSG_DONTWAIT, &err,
1897 PAGE_ALLOC_COSTLY_ORDER);
1901 err = unix_scm_to_skb(&scm, skb, true);
1905 skb_put(skb, len - data_len);
1906 skb->data_len = data_len;
1908 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1912 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1917 if (sunaddr == NULL)
1920 other = unix_find_other(net, sunaddr, msg->msg_namelen,
1922 if (IS_ERR(other)) {
1923 err = PTR_ERR(other);
1929 if (sk_filter(other, skb) < 0) {
1930 /* Toss the packet but do not return any error to the sender */
1936 unix_state_lock(other);
1939 if (!unix_may_send(sk, other))
1942 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1944 * Check with 1003.1g - what should
1947 unix_state_unlock(other);
1951 unix_state_lock(sk);
1954 if (unix_peer(sk) == other) {
1955 unix_peer(sk) = NULL;
1956 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1958 unix_state_unlock(sk);
1960 sk->sk_state = TCP_CLOSE;
1961 unix_dgram_disconnected(sk, other);
1963 err = -ECONNREFUSED;
1965 unix_state_unlock(sk);
1975 if (other->sk_shutdown & RCV_SHUTDOWN)
1978 if (sk->sk_type != SOCK_SEQPACKET) {
1979 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1984 /* other == sk && unix_peer(other) != sk if
1985 * - unix_peer(sk) == NULL, destination address bound to sk
1986 * - unix_peer(sk) == sk by time of get but disconnected before lock
1989 unlikely(unix_peer(other) != sk &&
1990 unix_recvq_full_lockless(other))) {
1992 timeo = unix_wait_for_peer(other, timeo);
1994 err = sock_intr_errno(timeo);
1995 if (signal_pending(current))
2002 unix_state_unlock(other);
2003 unix_state_double_lock(sk, other);
2006 if (unix_peer(sk) != other ||
2007 unix_dgram_peer_wake_me(sk, other)) {
2015 goto restart_locked;
2019 if (unlikely(sk_locked))
2020 unix_state_unlock(sk);
2022 if (sock_flag(other, SOCK_RCVTSTAMP))
2023 __net_timestamp(skb);
2024 maybe_add_creds(skb, sock, other);
2025 scm_stat_add(other, skb);
2026 skb_queue_tail(&other->sk_receive_queue, skb);
2027 unix_state_unlock(other);
2028 other->sk_data_ready(other);
2035 unix_state_unlock(sk);
2036 unix_state_unlock(other);
2046 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2047 * bytes, and a minimum of a full page.
2049 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2051 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2052 static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other)
2054 struct unix_sock *ousk = unix_sk(other);
2055 struct sk_buff *skb;
2058 skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2064 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2071 unix_state_lock(other);
2073 if (sock_flag(other, SOCK_DEAD) ||
2074 (other->sk_shutdown & RCV_SHUTDOWN)) {
2075 unix_state_unlock(other);
2080 maybe_add_creds(skb, sock, other);
2084 consume_skb(ousk->oob_skb);
2086 WRITE_ONCE(ousk->oob_skb, skb);
2088 scm_stat_add(other, skb);
2089 skb_queue_tail(&other->sk_receive_queue, skb);
2090 sk_send_sigurg(other);
2091 unix_state_unlock(other);
2092 other->sk_data_ready(other);
2098 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2101 struct sock *sk = sock->sk;
2102 struct sock *other = NULL;
2104 struct sk_buff *skb;
2106 struct scm_cookie scm;
2107 bool fds_sent = false;
2111 err = scm_send(sock, msg, &scm, false);
2116 if (msg->msg_flags & MSG_OOB) {
2117 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2125 if (msg->msg_namelen) {
2126 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2130 other = unix_peer(sk);
2135 if (sk->sk_shutdown & SEND_SHUTDOWN)
2138 while (sent < len) {
2141 /* Keep two messages in the pipe so it schedules better */
2142 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2144 /* allow fallback to order-0 allocations */
2145 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2147 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2149 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2151 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2152 msg->msg_flags & MSG_DONTWAIT, &err,
2153 get_order(UNIX_SKB_FRAGS_SZ));
2157 /* Only send the fds in the first buffer */
2158 err = unix_scm_to_skb(&scm, skb, !fds_sent);
2165 skb_put(skb, size - data_len);
2166 skb->data_len = data_len;
2168 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2174 unix_state_lock(other);
2176 if (sock_flag(other, SOCK_DEAD) ||
2177 (other->sk_shutdown & RCV_SHUTDOWN))
2180 maybe_add_creds(skb, sock, other);
2181 scm_stat_add(other, skb);
2182 skb_queue_tail(&other->sk_receive_queue, skb);
2183 unix_state_unlock(other);
2184 other->sk_data_ready(other);
2188 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2189 if (msg->msg_flags & MSG_OOB) {
2190 err = queue_oob(sock, msg, other);
2202 unix_state_unlock(other);
2205 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2206 send_sig(SIGPIPE, current, 0);
2210 return sent ? : err;
2213 static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
2214 int offset, size_t size, int flags)
2217 bool send_sigpipe = false;
2218 bool init_scm = true;
2219 struct scm_cookie scm;
2220 struct sock *other, *sk = socket->sk;
2221 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
2223 if (flags & MSG_OOB)
2226 other = unix_peer(sk);
2227 if (!other || sk->sk_state != TCP_ESTABLISHED)
2232 unix_state_unlock(other);
2233 mutex_unlock(&unix_sk(other)->iolock);
2234 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
2240 /* we must acquire iolock as we modify already present
2241 * skbs in the sk_receive_queue and mess with skb->len
2243 err = mutex_lock_interruptible(&unix_sk(other)->iolock);
2245 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
2249 if (sk->sk_shutdown & SEND_SHUTDOWN) {
2251 send_sigpipe = true;
2255 unix_state_lock(other);
2257 if (sock_flag(other, SOCK_DEAD) ||
2258 other->sk_shutdown & RCV_SHUTDOWN) {
2260 send_sigpipe = true;
2261 goto err_state_unlock;
2265 err = maybe_init_creds(&scm, socket, other);
2267 goto err_state_unlock;
2271 skb = skb_peek_tail(&other->sk_receive_queue);
2272 if (tail && tail == skb) {
2274 } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
2281 } else if (newskb) {
2282 /* this is fast path, we don't necessarily need to
2283 * call to kfree_skb even though with newskb == NULL
2284 * this - does no harm
2286 consume_skb(newskb);
2290 if (skb_append_pagefrags(skb, page, offset, size)) {
2296 skb->data_len += size;
2297 skb->truesize += size;
2298 refcount_add(size, &sk->sk_wmem_alloc);
2301 err = unix_scm_to_skb(&scm, skb, false);
2303 goto err_state_unlock;
2304 spin_lock(&other->sk_receive_queue.lock);
2305 __skb_queue_tail(&other->sk_receive_queue, newskb);
2306 spin_unlock(&other->sk_receive_queue.lock);
2309 unix_state_unlock(other);
2310 mutex_unlock(&unix_sk(other)->iolock);
2312 other->sk_data_ready(other);
2317 unix_state_unlock(other);
2319 mutex_unlock(&unix_sk(other)->iolock);
2322 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
2323 send_sig(SIGPIPE, current, 0);
2329 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2333 struct sock *sk = sock->sk;
2335 err = sock_error(sk);
2339 if (sk->sk_state != TCP_ESTABLISHED)
2342 if (msg->msg_namelen)
2343 msg->msg_namelen = 0;
2345 return unix_dgram_sendmsg(sock, msg, len);
2348 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2349 size_t size, int flags)
2351 struct sock *sk = sock->sk;
2353 if (sk->sk_state != TCP_ESTABLISHED)
2356 return unix_dgram_recvmsg(sock, msg, size, flags);
2359 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2361 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2364 msg->msg_namelen = addr->len;
2365 memcpy(msg->msg_name, addr->name, addr->len);
2369 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2372 struct scm_cookie scm;
2373 struct socket *sock = sk->sk_socket;
2374 struct unix_sock *u = unix_sk(sk);
2375 struct sk_buff *skb, *last;
2384 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2387 mutex_lock(&u->iolock);
2389 skip = sk_peek_offset(sk, flags);
2390 skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2391 &skip, &err, &last);
2393 if (!(flags & MSG_PEEK))
2394 scm_stat_del(sk, skb);
2398 mutex_unlock(&u->iolock);
2403 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2404 &err, &timeo, last));
2406 if (!skb) { /* implies iolock unlocked */
2407 unix_state_lock(sk);
2408 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2409 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2410 (sk->sk_shutdown & RCV_SHUTDOWN))
2412 unix_state_unlock(sk);
2416 if (wq_has_sleeper(&u->peer_wait))
2417 wake_up_interruptible_sync_poll(&u->peer_wait,
2418 EPOLLOUT | EPOLLWRNORM |
2422 unix_copy_addr(msg, skb->sk);
2424 if (size > skb->len - skip)
2425 size = skb->len - skip;
2426 else if (size < skb->len - skip)
2427 msg->msg_flags |= MSG_TRUNC;
2429 err = skb_copy_datagram_msg(skb, skip, msg, size);
2433 if (sock_flag(sk, SOCK_RCVTSTAMP))
2434 __sock_recv_timestamp(msg, sk, skb);
2436 memset(&scm, 0, sizeof(scm));
2438 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2439 unix_set_secdata(&scm, skb);
2441 if (!(flags & MSG_PEEK)) {
2443 unix_detach_fds(&scm, skb);
2445 sk_peek_offset_bwd(sk, skb->len);
2447 /* It is questionable: on PEEK we could:
2448 - do not return fds - good, but too simple 8)
2449 - return fds, and do not return them on read (old strategy,
2451 - clone fds (I chose it for now, it is the most universal
2454 POSIX 1003.1g does not actually define this clearly
2455 at all. POSIX 1003.1g doesn't define a lot of things
2460 sk_peek_offset_fwd(sk, size);
2463 unix_peek_fds(&scm, skb);
2465 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2467 scm_recv(sock, msg, &scm, flags);
2470 skb_free_datagram(sk, skb);
2471 mutex_unlock(&u->iolock);
2476 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2479 struct sock *sk = sock->sk;
2481 #ifdef CONFIG_BPF_SYSCALL
2482 const struct proto *prot = READ_ONCE(sk->sk_prot);
2484 if (prot != &unix_dgram_proto)
2485 return prot->recvmsg(sk, msg, size, flags, NULL);
2487 return __unix_dgram_recvmsg(sk, msg, size, flags);
2490 static int unix_read_sock(struct sock *sk, read_descriptor_t *desc,
2491 sk_read_actor_t recv_actor)
2496 struct unix_sock *u = unix_sk(sk);
2497 struct sk_buff *skb;
2500 mutex_lock(&u->iolock);
2501 skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2502 mutex_unlock(&u->iolock);
2506 used = recv_actor(desc, skb, 0, skb->len);
2512 } else if (used <= skb->len) {
2525 * Sleep until more data has arrived. But check for races..
2527 static long unix_stream_data_wait(struct sock *sk, long timeo,
2528 struct sk_buff *last, unsigned int last_len,
2531 struct sk_buff *tail;
2534 unix_state_lock(sk);
2537 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2539 tail = skb_peek_tail(&sk->sk_receive_queue);
2541 (tail && tail->len != last_len) ||
2543 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2544 signal_pending(current) ||
2548 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2549 unix_state_unlock(sk);
2551 timeo = freezable_schedule_timeout(timeo);
2553 timeo = schedule_timeout(timeo);
2554 unix_state_lock(sk);
2556 if (sock_flag(sk, SOCK_DEAD))
2559 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2562 finish_wait(sk_sleep(sk), &wait);
2563 unix_state_unlock(sk);
2567 static unsigned int unix_skb_len(const struct sk_buff *skb)
2569 return skb->len - UNIXCB(skb).consumed;
2572 struct unix_stream_read_state {
2573 int (*recv_actor)(struct sk_buff *, int, int,
2574 struct unix_stream_read_state *);
2575 struct socket *socket;
2577 struct pipe_inode_info *pipe;
2580 unsigned int splice_flags;
2583 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2584 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2586 struct socket *sock = state->socket;
2587 struct sock *sk = sock->sk;
2588 struct unix_sock *u = unix_sk(sk);
2590 struct sk_buff *oob_skb;
2592 mutex_lock(&u->iolock);
2593 unix_state_lock(sk);
2595 if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2596 unix_state_unlock(sk);
2597 mutex_unlock(&u->iolock);
2601 oob_skb = u->oob_skb;
2603 if (!(state->flags & MSG_PEEK))
2604 WRITE_ONCE(u->oob_skb, NULL);
2606 unix_state_unlock(sk);
2608 chunk = state->recv_actor(oob_skb, 0, chunk, state);
2610 if (!(state->flags & MSG_PEEK)) {
2611 UNIXCB(oob_skb).consumed += 1;
2615 mutex_unlock(&u->iolock);
2620 state->msg->msg_flags |= MSG_OOB;
2624 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2625 int flags, int copied)
2627 struct unix_sock *u = unix_sk(sk);
2629 if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2630 skb_unlink(skb, &sk->sk_receive_queue);
2634 if (skb == u->oob_skb) {
2637 } else if (sock_flag(sk, SOCK_URGINLINE)) {
2638 if (!(flags & MSG_PEEK)) {
2639 WRITE_ONCE(u->oob_skb, NULL);
2642 } else if (!(flags & MSG_PEEK)) {
2643 skb_unlink(skb, &sk->sk_receive_queue);
2645 skb = skb_peek(&sk->sk_receive_queue);
2653 static int unix_stream_read_sock(struct sock *sk, read_descriptor_t *desc,
2654 sk_read_actor_t recv_actor)
2656 if (unlikely(sk->sk_state != TCP_ESTABLISHED))
2659 return unix_read_sock(sk, desc, recv_actor);
2662 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2665 struct scm_cookie scm;
2666 struct socket *sock = state->socket;
2667 struct sock *sk = sock->sk;
2668 struct unix_sock *u = unix_sk(sk);
2670 int flags = state->flags;
2671 int noblock = flags & MSG_DONTWAIT;
2672 bool check_creds = false;
2677 size_t size = state->size;
2678 unsigned int last_len;
2680 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2685 if (unlikely(flags & MSG_OOB)) {
2687 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2688 err = unix_stream_recv_urg(state);
2693 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2694 timeo = sock_rcvtimeo(sk, noblock);
2696 memset(&scm, 0, sizeof(scm));
2698 /* Lock the socket to prevent queue disordering
2699 * while sleeps in memcpy_tomsg
2701 mutex_lock(&u->iolock);
2703 skip = max(sk_peek_offset(sk, flags), 0);
2708 struct sk_buff *skb, *last;
2711 unix_state_lock(sk);
2712 if (sock_flag(sk, SOCK_DEAD)) {
2716 last = skb = skb_peek(&sk->sk_receive_queue);
2717 last_len = last ? last->len : 0;
2719 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2721 skb = manage_oob(skb, sk, flags, copied);
2723 unix_state_unlock(sk);
2732 if (copied >= target)
2736 * POSIX 1003.1g mandates this order.
2739 err = sock_error(sk);
2742 if (sk->sk_shutdown & RCV_SHUTDOWN)
2745 unix_state_unlock(sk);
2751 mutex_unlock(&u->iolock);
2753 timeo = unix_stream_data_wait(sk, timeo, last,
2754 last_len, freezable);
2756 if (signal_pending(current)) {
2757 err = sock_intr_errno(timeo);
2762 mutex_lock(&u->iolock);
2765 unix_state_unlock(sk);
2769 while (skip >= unix_skb_len(skb)) {
2770 skip -= unix_skb_len(skb);
2772 last_len = skb->len;
2773 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2778 unix_state_unlock(sk);
2781 /* Never glue messages from different writers */
2782 if (!unix_skb_scm_eq(skb, &scm))
2784 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2785 /* Copy credentials */
2786 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2787 unix_set_secdata(&scm, skb);
2791 /* Copy address just once */
2792 if (state->msg && state->msg->msg_name) {
2793 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2794 state->msg->msg_name);
2795 unix_copy_addr(state->msg, skb->sk);
2799 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2801 chunk = state->recv_actor(skb, skip, chunk, state);
2802 drop_skb = !unix_skb_len(skb);
2803 /* skb is only safe to use if !drop_skb */
2814 /* the skb was touched by a concurrent reader;
2815 * we should not expect anything from this skb
2816 * anymore and assume it invalid - we can be
2817 * sure it was dropped from the socket queue
2819 * let's report a short read
2825 /* Mark read part of skb as used */
2826 if (!(flags & MSG_PEEK)) {
2827 UNIXCB(skb).consumed += chunk;
2829 sk_peek_offset_bwd(sk, chunk);
2831 if (UNIXCB(skb).fp) {
2832 scm_stat_del(sk, skb);
2833 unix_detach_fds(&scm, skb);
2836 if (unix_skb_len(skb))
2839 skb_unlink(skb, &sk->sk_receive_queue);
2845 /* It is questionable, see note in unix_dgram_recvmsg.
2848 unix_peek_fds(&scm, skb);
2850 sk_peek_offset_fwd(sk, chunk);
2857 last_len = skb->len;
2858 unix_state_lock(sk);
2859 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2862 unix_state_unlock(sk);
2867 mutex_unlock(&u->iolock);
2869 scm_recv(sock, state->msg, &scm, flags);
2873 return copied ? : err;
2876 static int unix_stream_read_actor(struct sk_buff *skb,
2877 int skip, int chunk,
2878 struct unix_stream_read_state *state)
2882 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2884 return ret ?: chunk;
2887 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2888 size_t size, int flags)
2890 struct unix_stream_read_state state = {
2891 .recv_actor = unix_stream_read_actor,
2892 .socket = sk->sk_socket,
2898 return unix_stream_read_generic(&state, true);
2901 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2902 size_t size, int flags)
2904 struct unix_stream_read_state state = {
2905 .recv_actor = unix_stream_read_actor,
2912 #ifdef CONFIG_BPF_SYSCALL
2913 struct sock *sk = sock->sk;
2914 const struct proto *prot = READ_ONCE(sk->sk_prot);
2916 if (prot != &unix_stream_proto)
2917 return prot->recvmsg(sk, msg, size, flags, NULL);
2919 return unix_stream_read_generic(&state, true);
2922 static int unix_stream_splice_actor(struct sk_buff *skb,
2923 int skip, int chunk,
2924 struct unix_stream_read_state *state)
2926 return skb_splice_bits(skb, state->socket->sk,
2927 UNIXCB(skb).consumed + skip,
2928 state->pipe, chunk, state->splice_flags);
2931 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2932 struct pipe_inode_info *pipe,
2933 size_t size, unsigned int flags)
2935 struct unix_stream_read_state state = {
2936 .recv_actor = unix_stream_splice_actor,
2940 .splice_flags = flags,
2943 if (unlikely(*ppos))
2946 if (sock->file->f_flags & O_NONBLOCK ||
2947 flags & SPLICE_F_NONBLOCK)
2948 state.flags = MSG_DONTWAIT;
2950 return unix_stream_read_generic(&state, false);
2953 static int unix_shutdown(struct socket *sock, int mode)
2955 struct sock *sk = sock->sk;
2958 if (mode < SHUT_RD || mode > SHUT_RDWR)
2961 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2962 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2963 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2967 unix_state_lock(sk);
2968 sk->sk_shutdown |= mode;
2969 other = unix_peer(sk);
2972 unix_state_unlock(sk);
2973 sk->sk_state_change(sk);
2976 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2979 const struct proto *prot = READ_ONCE(other->sk_prot);
2982 prot->unhash(other);
2983 if (mode&RCV_SHUTDOWN)
2984 peer_mode |= SEND_SHUTDOWN;
2985 if (mode&SEND_SHUTDOWN)
2986 peer_mode |= RCV_SHUTDOWN;
2987 unix_state_lock(other);
2988 other->sk_shutdown |= peer_mode;
2989 unix_state_unlock(other);
2990 other->sk_state_change(other);
2991 if (peer_mode == SHUTDOWN_MASK)
2992 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2993 else if (peer_mode & RCV_SHUTDOWN)
2994 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3002 long unix_inq_len(struct sock *sk)
3004 struct sk_buff *skb;
3007 if (sk->sk_state == TCP_LISTEN)
3010 spin_lock(&sk->sk_receive_queue.lock);
3011 if (sk->sk_type == SOCK_STREAM ||
3012 sk->sk_type == SOCK_SEQPACKET) {
3013 skb_queue_walk(&sk->sk_receive_queue, skb)
3014 amount += unix_skb_len(skb);
3016 skb = skb_peek(&sk->sk_receive_queue);
3020 spin_unlock(&sk->sk_receive_queue.lock);
3024 EXPORT_SYMBOL_GPL(unix_inq_len);
3026 long unix_outq_len(struct sock *sk)
3028 return sk_wmem_alloc_get(sk);
3030 EXPORT_SYMBOL_GPL(unix_outq_len);
3032 static int unix_open_file(struct sock *sk)
3038 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3041 if (!smp_load_acquire(&unix_sk(sk)->addr))
3044 path = unix_sk(sk)->path;
3050 fd = get_unused_fd_flags(O_CLOEXEC);
3054 f = dentry_open(&path, O_PATH, current_cred());
3068 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3070 struct sock *sk = sock->sk;
3076 amount = unix_outq_len(sk);
3077 err = put_user(amount, (int __user *)arg);
3080 amount = unix_inq_len(sk);
3084 err = put_user(amount, (int __user *)arg);
3087 err = unix_open_file(sk);
3089 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3092 struct sk_buff *skb;
3095 skb = skb_peek(&sk->sk_receive_queue);
3096 if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3098 err = put_user(answ, (int __user *)arg);
3109 #ifdef CONFIG_COMPAT
3110 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3112 return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3116 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3118 struct sock *sk = sock->sk;
3121 sock_poll_wait(file, sock, wait);
3124 /* exceptional events? */
3127 if (sk->sk_shutdown == SHUTDOWN_MASK)
3129 if (sk->sk_shutdown & RCV_SHUTDOWN)
3130 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3133 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3134 mask |= EPOLLIN | EPOLLRDNORM;
3135 if (sk_is_readable(sk))
3136 mask |= EPOLLIN | EPOLLRDNORM;
3137 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3138 if (READ_ONCE(unix_sk(sk)->oob_skb))
3142 /* Connection-based need to check for termination and startup */
3143 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3144 sk->sk_state == TCP_CLOSE)
3148 * we set writable also when the other side has shut down the
3149 * connection. This prevents stuck sockets.
3151 if (unix_writable(sk))
3152 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3157 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3160 struct sock *sk = sock->sk, *other;
3161 unsigned int writable;
3164 sock_poll_wait(file, sock, wait);
3167 /* exceptional events? */
3168 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
3170 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3172 if (sk->sk_shutdown & RCV_SHUTDOWN)
3173 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3174 if (sk->sk_shutdown == SHUTDOWN_MASK)
3178 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3179 mask |= EPOLLIN | EPOLLRDNORM;
3180 if (sk_is_readable(sk))
3181 mask |= EPOLLIN | EPOLLRDNORM;
3183 /* Connection-based need to check for termination and startup */
3184 if (sk->sk_type == SOCK_SEQPACKET) {
3185 if (sk->sk_state == TCP_CLOSE)
3187 /* connection hasn't started yet? */
3188 if (sk->sk_state == TCP_SYN_SENT)
3192 /* No write status requested, avoid expensive OUT tests. */
3193 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3196 writable = unix_writable(sk);
3198 unix_state_lock(sk);
3200 other = unix_peer(sk);
3201 if (other && unix_peer(other) != sk &&
3202 unix_recvq_full_lockless(other) &&
3203 unix_dgram_peer_wake_me(sk, other))
3206 unix_state_unlock(sk);
3210 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3212 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3217 #ifdef CONFIG_PROC_FS
3219 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3221 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3222 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3223 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3225 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3227 unsigned long offset = get_offset(*pos);
3228 unsigned long bucket = get_bucket(*pos);
3230 unsigned long count = 0;
3232 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
3233 if (sock_net(sk) != seq_file_net(seq))
3235 if (++count == offset)
3242 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3244 unsigned long bucket = get_bucket(*pos);
3247 while (bucket < ARRAY_SIZE(unix_socket_table)) {
3248 spin_lock(&unix_table_locks[bucket]);
3250 sk = unix_from_bucket(seq, pos);
3254 spin_unlock(&unix_table_locks[bucket]);
3256 *pos = set_bucket_offset(++bucket, 1);
3262 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3265 unsigned long bucket = get_bucket(*pos);
3267 for (sk = sk_next(sk); sk; sk = sk_next(sk))
3268 if (sock_net(sk) == seq_file_net(seq))
3271 spin_unlock(&unix_table_locks[bucket]);
3273 *pos = set_bucket_offset(++bucket, 1);
3275 return unix_get_first(seq, pos);
3278 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3281 return SEQ_START_TOKEN;
3283 return unix_get_first(seq, pos);
3286 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3290 if (v == SEQ_START_TOKEN)
3291 return unix_get_first(seq, pos);
3293 return unix_get_next(seq, v, pos);
3296 static void unix_seq_stop(struct seq_file *seq, void *v)
3298 struct sock *sk = v;
3301 spin_unlock(&unix_table_locks[sk->sk_hash]);
3304 static int unix_seq_show(struct seq_file *seq, void *v)
3307 if (v == SEQ_START_TOKEN)
3308 seq_puts(seq, "Num RefCount Protocol Flags Type St "
3312 struct unix_sock *u = unix_sk(s);
3315 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3317 refcount_read(&s->sk_refcnt),
3319 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3322 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3323 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3326 if (u->addr) { // under unix_table_locks here
3331 len = u->addr->len -
3332 offsetof(struct sockaddr_un, sun_path);
3333 if (u->addr->name->sun_path[0]) {
3339 for ( ; i < len; i++)
3340 seq_putc(seq, u->addr->name->sun_path[i] ?:
3343 unix_state_unlock(s);
3344 seq_putc(seq, '\n');
3350 static const struct seq_operations unix_seq_ops = {
3351 .start = unix_seq_start,
3352 .next = unix_seq_next,
3353 .stop = unix_seq_stop,
3354 .show = unix_seq_show,
3357 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL)
3358 struct bpf_unix_iter_state {
3359 struct seq_net_private p;
3360 unsigned int cur_sk;
3361 unsigned int end_sk;
3362 unsigned int max_sk;
3363 struct sock **batch;
3364 bool st_bucket_done;
3367 struct bpf_iter__unix {
3368 __bpf_md_ptr(struct bpf_iter_meta *, meta);
3369 __bpf_md_ptr(struct unix_sock *, unix_sk);
3370 uid_t uid __aligned(8);
3373 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3374 struct unix_sock *unix_sk, uid_t uid)
3376 struct bpf_iter__unix ctx;
3378 meta->seq_num--; /* skip SEQ_START_TOKEN */
3380 ctx.unix_sk = unix_sk;
3382 return bpf_iter_run_prog(prog, &ctx);
3385 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3388 struct bpf_unix_iter_state *iter = seq->private;
3389 unsigned int expected = 1;
3392 sock_hold(start_sk);
3393 iter->batch[iter->end_sk++] = start_sk;
3395 for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3396 if (sock_net(sk) != seq_file_net(seq))
3399 if (iter->end_sk < iter->max_sk) {
3401 iter->batch[iter->end_sk++] = sk;
3407 spin_unlock(&unix_table_locks[start_sk->sk_hash]);
3412 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3414 while (iter->cur_sk < iter->end_sk)
3415 sock_put(iter->batch[iter->cur_sk++]);
3418 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3419 unsigned int new_batch_sz)
3421 struct sock **new_batch;
3423 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3424 GFP_USER | __GFP_NOWARN);
3428 bpf_iter_unix_put_batch(iter);
3429 kvfree(iter->batch);
3430 iter->batch = new_batch;
3431 iter->max_sk = new_batch_sz;
3436 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3439 struct bpf_unix_iter_state *iter = seq->private;
3440 unsigned int expected;
3441 bool resized = false;
3444 if (iter->st_bucket_done)
3445 *pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3448 /* Get a new batch */
3452 sk = unix_get_first(seq, pos);
3454 return NULL; /* Done */
3456 expected = bpf_iter_unix_hold_batch(seq, sk);
3458 if (iter->end_sk == expected) {
3459 iter->st_bucket_done = true;
3463 if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3471 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3474 return SEQ_START_TOKEN;
3476 /* bpf iter does not support lseek, so it always
3477 * continue from where it was stop()-ped.
3479 return bpf_iter_unix_batch(seq, pos);
3482 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3484 struct bpf_unix_iter_state *iter = seq->private;
3487 /* Whenever seq_next() is called, the iter->cur_sk is
3488 * done with seq_show(), so advance to the next sk in
3491 if (iter->cur_sk < iter->end_sk)
3492 sock_put(iter->batch[iter->cur_sk++]);
3496 if (iter->cur_sk < iter->end_sk)
3497 sk = iter->batch[iter->cur_sk];
3499 sk = bpf_iter_unix_batch(seq, pos);
3504 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3506 struct bpf_iter_meta meta;
3507 struct bpf_prog *prog;
3508 struct sock *sk = v;
3513 if (v == SEQ_START_TOKEN)
3516 slow = lock_sock_fast(sk);
3518 if (unlikely(sk_unhashed(sk))) {
3523 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3525 prog = bpf_iter_get_info(&meta, false);
3526 ret = unix_prog_seq_show(prog, &meta, v, uid);
3528 unlock_sock_fast(sk, slow);
3532 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3534 struct bpf_unix_iter_state *iter = seq->private;
3535 struct bpf_iter_meta meta;
3536 struct bpf_prog *prog;
3540 prog = bpf_iter_get_info(&meta, true);
3542 (void)unix_prog_seq_show(prog, &meta, v, 0);
3545 if (iter->cur_sk < iter->end_sk)
3546 bpf_iter_unix_put_batch(iter);
3549 static const struct seq_operations bpf_iter_unix_seq_ops = {
3550 .start = bpf_iter_unix_seq_start,
3551 .next = bpf_iter_unix_seq_next,
3552 .stop = bpf_iter_unix_seq_stop,
3553 .show = bpf_iter_unix_seq_show,
3558 static const struct net_proto_family unix_family_ops = {
3560 .create = unix_create,
3561 .owner = THIS_MODULE,
3565 static int __net_init unix_net_init(struct net *net)
3567 int error = -ENOMEM;
3569 net->unx.sysctl_max_dgram_qlen = 10;
3570 if (unix_sysctl_register(net))
3573 #ifdef CONFIG_PROC_FS
3574 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3575 sizeof(struct seq_net_private))) {
3576 unix_sysctl_unregister(net);
3585 static void __net_exit unix_net_exit(struct net *net)
3587 unix_sysctl_unregister(net);
3588 remove_proc_entry("unix", net->proc_net);
3591 static struct pernet_operations unix_net_ops = {
3592 .init = unix_net_init,
3593 .exit = unix_net_exit,
3596 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3597 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3598 struct unix_sock *unix_sk, uid_t uid)
3600 #define INIT_BATCH_SZ 16
3602 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3604 struct bpf_unix_iter_state *iter = priv_data;
3607 err = bpf_iter_init_seq_net(priv_data, aux);
3611 err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3613 bpf_iter_fini_seq_net(priv_data);
3620 static void bpf_iter_fini_unix(void *priv_data)
3622 struct bpf_unix_iter_state *iter = priv_data;
3624 bpf_iter_fini_seq_net(priv_data);
3625 kvfree(iter->batch);
3628 static const struct bpf_iter_seq_info unix_seq_info = {
3629 .seq_ops = &bpf_iter_unix_seq_ops,
3630 .init_seq_private = bpf_iter_init_unix,
3631 .fini_seq_private = bpf_iter_fini_unix,
3632 .seq_priv_size = sizeof(struct bpf_unix_iter_state),
3635 static const struct bpf_func_proto *
3636 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3637 const struct bpf_prog *prog)
3640 case BPF_FUNC_setsockopt:
3641 return &bpf_sk_setsockopt_proto;
3642 case BPF_FUNC_getsockopt:
3643 return &bpf_sk_getsockopt_proto;
3649 static struct bpf_iter_reg unix_reg_info = {
3651 .ctx_arg_info_size = 1,
3653 { offsetof(struct bpf_iter__unix, unix_sk),
3654 PTR_TO_BTF_ID_OR_NULL },
3656 .get_func_proto = bpf_iter_unix_get_func_proto,
3657 .seq_info = &unix_seq_info,
3660 static void __init bpf_iter_register(void)
3662 unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3663 if (bpf_iter_reg_target(&unix_reg_info))
3664 pr_warn("Warning: could not register bpf iterator unix\n");
3668 static int __init af_unix_init(void)
3672 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3674 for (i = 0; i < 2 * UNIX_HASH_SIZE; i++)
3675 spin_lock_init(&unix_table_locks[i]);
3677 rc = proto_register(&unix_dgram_proto, 1);
3679 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3683 rc = proto_register(&unix_stream_proto, 1);
3685 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3689 sock_register(&unix_family_ops);
3690 register_pernet_subsys(&unix_net_ops);
3691 unix_bpf_build_proto();
3693 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3694 bpf_iter_register();
3701 static void __exit af_unix_exit(void)
3703 sock_unregister(PF_UNIX);
3704 proto_unregister(&unix_dgram_proto);
3705 proto_unregister(&unix_stream_proto);
3706 unregister_pernet_subsys(&unix_net_ops);
3709 /* Earlier than device_initcall() so that other drivers invoking
3710 request_module() don't end up in a loop when modprobe tries
3711 to use a UNIX socket. But later than subsys_initcall() because
3712 we depend on stuff initialised there */
3713 fs_initcall(af_unix_init);
3714 module_exit(af_unix_exit);
3716 MODULE_LICENSE("GPL");
3717 MODULE_ALIAS_NETPROTO(PF_UNIX);