1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET4: Implementation of BSD Unix domain sockets.
5 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
8 * Linus Torvalds : Assorted bug cures.
9 * Niibe Yutaka : async I/O support.
10 * Carsten Paeth : PF_UNIX check, address fixes.
11 * Alan Cox : Limit size of allocated blocks.
12 * Alan Cox : Fixed the stupid socketpair bug.
13 * Alan Cox : BSD compatibility fine tuning.
14 * Alan Cox : Fixed a bug in connect when interrupted.
15 * Alan Cox : Sorted out a proper draft version of
16 * file descriptor passing hacked up from
18 * Marty Leisner : Fixes to fd passing
19 * Nick Nevin : recvmsg bugfix.
20 * Alan Cox : Started proper garbage collector
21 * Heiko EiBfeldt : Missing verify_area check
22 * Alan Cox : Started POSIXisms
23 * Andreas Schwab : Replace inode by dentry for proper
25 * Kirk Petersen : Made this a module
26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
29 * by above two patches.
30 * Andrea Arcangeli : If possible we block in connect(2)
31 * if the max backlog of the listen socket
32 * is been reached. This won't break
33 * old apps and it will avoid huge amount
34 * of socks hashed (this for unix_gc()
35 * performances reasons).
36 * Security fix that limits the max
37 * number of socks to 2*max_files and
38 * the number of skb queueable in the
40 * Artur Skawina : Hash function optimizations
41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
42 * Malcolm Beattie : Set peercred for socketpair
43 * Michal Ostrowski : Module initialization cleanup.
44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
45 * the core infrastructure is doing that
46 * for all net proto families now (2.5.69+)
48 * Known differences from reference BSD that was tested:
51 * ECONNREFUSED is not returned from one end of a connected() socket to the
52 * other the moment one end closes.
53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
54 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
56 * accept() returns a path name even if the connecting socket has closed
57 * in the meantime (BSD loses the path and gives up).
58 * accept() returns 0 length path for an unbound connector. BSD returns 16
59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
61 * BSD af_unix apparently has connect forgetting to block properly.
62 * (need to check this with the POSIX spec in detail)
64 * Differences from 2.0.0-11-... (ANK)
65 * Bug fixes and improvements.
66 * - client shutdown killed server socket.
67 * - removed all useless cli/sti pairs.
69 * Semantic changes/extensions.
70 * - generic control message passing.
71 * - SCM_CREDENTIALS control message.
72 * - "Abstract" (not FS based) socket bindings.
73 * Abstract names are sequences of bytes (not zero terminated)
74 * started by 0, so that this name space does not intersect
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
80 #include <linux/module.h>
81 #include <linux/kernel.h>
82 #include <linux/signal.h>
83 #include <linux/sched/signal.h>
84 #include <linux/errno.h>
85 #include <linux/string.h>
86 #include <linux/stat.h>
87 #include <linux/dcache.h>
88 #include <linux/namei.h>
89 #include <linux/socket.h>
91 #include <linux/fcntl.h>
92 #include <linux/filter.h>
93 #include <linux/termios.h>
94 #include <linux/sockios.h>
95 #include <linux/net.h>
98 #include <linux/slab.h>
99 #include <linux/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <net/net_namespace.h>
103 #include <net/sock.h>
104 #include <net/tcp_states.h>
105 #include <net/af_unix.h>
106 #include <linux/proc_fs.h>
107 #include <linux/seq_file.h>
109 #include <linux/init.h>
110 #include <linux/poll.h>
111 #include <linux/rtnetlink.h>
112 #include <linux/mount.h>
113 #include <net/checksum.h>
114 #include <linux/security.h>
115 #include <linux/splice.h>
116 #include <linux/freezer.h>
117 #include <linux/file.h>
118 #include <linux/btf_ids.h>
122 static atomic_long_t unix_nr_socks;
123 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
124 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
126 /* SMP locking strategy:
127 * hash table is protected with spinlock.
128 * each socket state is protected by separate spinlock.
131 static unsigned int unix_unbound_hash(struct sock *sk)
133 unsigned long hash = (unsigned long)sk;
139 return hash & UNIX_HASH_MOD;
142 static unsigned int unix_bsd_hash(struct inode *i)
144 return i->i_ino & UNIX_HASH_MOD;
147 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
148 int addr_len, int type)
150 __wsum csum = csum_partial(sunaddr, addr_len, 0);
153 hash = (__force unsigned int)csum_fold(csum);
157 return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
160 static void unix_table_double_lock(struct net *net,
161 unsigned int hash1, unsigned int hash2)
163 if (hash1 == hash2) {
164 spin_lock(&net->unx.table.locks[hash1]);
171 spin_lock(&net->unx.table.locks[hash1]);
172 spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
175 static void unix_table_double_unlock(struct net *net,
176 unsigned int hash1, unsigned int hash2)
178 if (hash1 == hash2) {
179 spin_unlock(&net->unx.table.locks[hash1]);
183 spin_unlock(&net->unx.table.locks[hash1]);
184 spin_unlock(&net->unx.table.locks[hash2]);
187 #ifdef CONFIG_SECURITY_NETWORK
188 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
190 UNIXCB(skb).secid = scm->secid;
193 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
195 scm->secid = UNIXCB(skb).secid;
198 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
200 return (scm->secid == UNIXCB(skb).secid);
203 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
206 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
209 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
213 #endif /* CONFIG_SECURITY_NETWORK */
215 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
217 return unix_peer(osk) == sk;
220 static inline int unix_may_send(struct sock *sk, struct sock *osk)
222 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
225 static inline int unix_recvq_full(const struct sock *sk)
227 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
230 static inline int unix_recvq_full_lockless(const struct sock *sk)
232 return skb_queue_len_lockless(&sk->sk_receive_queue) >
233 READ_ONCE(sk->sk_max_ack_backlog);
236 struct sock *unix_peer_get(struct sock *s)
244 unix_state_unlock(s);
247 EXPORT_SYMBOL_GPL(unix_peer_get);
249 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
252 struct unix_address *addr;
254 addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
258 refcount_set(&addr->refcnt, 1);
259 addr->len = addr_len;
260 memcpy(addr->name, sunaddr, addr_len);
265 static inline void unix_release_addr(struct unix_address *addr)
267 if (refcount_dec_and_test(&addr->refcnt))
272 * Check unix socket name:
273 * - should be not zero length.
274 * - if started by not zero, should be NULL terminated (FS object)
275 * - if started by zero, it is abstract name.
278 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
280 if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
281 addr_len > sizeof(*sunaddr))
284 if (sunaddr->sun_family != AF_UNIX)
290 static int unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
292 struct sockaddr_storage *addr = (struct sockaddr_storage *)sunaddr;
293 short offset = offsetof(struct sockaddr_storage, __data);
295 BUILD_BUG_ON(offset != offsetof(struct sockaddr_un, sun_path));
297 /* This may look like an off by one error but it is a bit more
298 * subtle. 108 is the longest valid AF_UNIX path for a binding.
299 * sun_path[108] doesn't as such exist. However in kernel space
300 * we are guaranteed that it is a valid memory location in our
301 * kernel address buffer because syscall functions always pass
302 * a pointer of struct sockaddr_storage which has a bigger buffer
303 * than 108. Also, we must terminate sun_path for strlen() in
306 addr->__data[addr_len - offset] = 0;
308 /* Don't pass sunaddr->sun_path to strlen(). Otherwise, 108 will
309 * cause panic if CONFIG_FORTIFY_SOURCE=y. Let __fortify_strlen()
310 * know the actual buffer.
312 return strlen(addr->__data) + offset + 1;
315 static void __unix_remove_socket(struct sock *sk)
317 sk_del_node_init(sk);
320 static void __unix_insert_socket(struct net *net, struct sock *sk)
322 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
323 sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
326 static void __unix_set_addr_hash(struct net *net, struct sock *sk,
327 struct unix_address *addr, unsigned int hash)
329 __unix_remove_socket(sk);
330 smp_store_release(&unix_sk(sk)->addr, addr);
333 __unix_insert_socket(net, sk);
336 static void unix_remove_socket(struct net *net, struct sock *sk)
338 spin_lock(&net->unx.table.locks[sk->sk_hash]);
339 __unix_remove_socket(sk);
340 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
343 static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
345 spin_lock(&net->unx.table.locks[sk->sk_hash]);
346 __unix_insert_socket(net, sk);
347 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
350 static void unix_insert_bsd_socket(struct sock *sk)
352 spin_lock(&bsd_socket_locks[sk->sk_hash]);
353 sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
354 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
357 static void unix_remove_bsd_socket(struct sock *sk)
359 if (!hlist_unhashed(&sk->sk_bind_node)) {
360 spin_lock(&bsd_socket_locks[sk->sk_hash]);
361 __sk_del_bind_node(sk);
362 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
364 sk_node_init(&sk->sk_bind_node);
368 static struct sock *__unix_find_socket_byname(struct net *net,
369 struct sockaddr_un *sunname,
370 int len, unsigned int hash)
374 sk_for_each(s, &net->unx.table.buckets[hash]) {
375 struct unix_sock *u = unix_sk(s);
377 if (u->addr->len == len &&
378 !memcmp(u->addr->name, sunname, len))
384 static inline struct sock *unix_find_socket_byname(struct net *net,
385 struct sockaddr_un *sunname,
386 int len, unsigned int hash)
390 spin_lock(&net->unx.table.locks[hash]);
391 s = __unix_find_socket_byname(net, sunname, len, hash);
394 spin_unlock(&net->unx.table.locks[hash]);
398 static struct sock *unix_find_socket_byinode(struct inode *i)
400 unsigned int hash = unix_bsd_hash(i);
403 spin_lock(&bsd_socket_locks[hash]);
404 sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
405 struct dentry *dentry = unix_sk(s)->path.dentry;
407 if (dentry && d_backing_inode(dentry) == i) {
409 spin_unlock(&bsd_socket_locks[hash]);
413 spin_unlock(&bsd_socket_locks[hash]);
417 /* Support code for asymmetrically connected dgram sockets
419 * If a datagram socket is connected to a socket not itself connected
420 * to the first socket (eg, /dev/log), clients may only enqueue more
421 * messages if the present receive queue of the server socket is not
422 * "too large". This means there's a second writeability condition
423 * poll and sendmsg need to test. The dgram recv code will do a wake
424 * up on the peer_wait wait queue of a socket upon reception of a
425 * datagram which needs to be propagated to sleeping would-be writers
426 * since these might not have sent anything so far. This can't be
427 * accomplished via poll_wait because the lifetime of the server
428 * socket might be less than that of its clients if these break their
429 * association with it or if the server socket is closed while clients
430 * are still connected to it and there's no way to inform "a polling
431 * implementation" that it should let go of a certain wait queue
433 * In order to propagate a wake up, a wait_queue_entry_t of the client
434 * socket is enqueued on the peer_wait queue of the server socket
435 * whose wake function does a wake_up on the ordinary client socket
436 * wait queue. This connection is established whenever a write (or
437 * poll for write) hit the flow control condition and broken when the
438 * association to the server socket is dissolved or after a wake up
442 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
446 wait_queue_head_t *u_sleep;
448 u = container_of(q, struct unix_sock, peer_wake);
450 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
452 u->peer_wake.private = NULL;
454 /* relaying can only happen while the wq still exists */
455 u_sleep = sk_sleep(&u->sk);
457 wake_up_interruptible_poll(u_sleep, key_to_poll(key));
462 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
464 struct unix_sock *u, *u_other;
468 u_other = unix_sk(other);
470 spin_lock(&u_other->peer_wait.lock);
472 if (!u->peer_wake.private) {
473 u->peer_wake.private = other;
474 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
479 spin_unlock(&u_other->peer_wait.lock);
483 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
486 struct unix_sock *u, *u_other;
489 u_other = unix_sk(other);
490 spin_lock(&u_other->peer_wait.lock);
492 if (u->peer_wake.private == other) {
493 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
494 u->peer_wake.private = NULL;
497 spin_unlock(&u_other->peer_wait.lock);
500 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
503 unix_dgram_peer_wake_disconnect(sk, other);
504 wake_up_interruptible_poll(sk_sleep(sk),
511 * - unix_peer(sk) == other
512 * - association is stable
514 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
518 connected = unix_dgram_peer_wake_connect(sk, other);
520 /* If other is SOCK_DEAD, we want to make sure we signal
521 * POLLOUT, such that a subsequent write() can get a
522 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
523 * to other and its full, we will hang waiting for POLLOUT.
525 if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
529 unix_dgram_peer_wake_disconnect(sk, other);
534 static int unix_writable(const struct sock *sk)
536 return sk->sk_state != TCP_LISTEN &&
537 (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
540 static void unix_write_space(struct sock *sk)
542 struct socket_wq *wq;
545 if (unix_writable(sk)) {
546 wq = rcu_dereference(sk->sk_wq);
547 if (skwq_has_sleeper(wq))
548 wake_up_interruptible_sync_poll(&wq->wait,
549 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
550 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
555 /* When dgram socket disconnects (or changes its peer), we clear its receive
556 * queue of packets arrived from previous peer. First, it allows to do
557 * flow control based only on wmem_alloc; second, sk connected to peer
558 * may receive messages only from that peer. */
559 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
561 if (!skb_queue_empty(&sk->sk_receive_queue)) {
562 skb_queue_purge(&sk->sk_receive_queue);
563 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
565 /* If one link of bidirectional dgram pipe is disconnected,
566 * we signal error. Messages are lost. Do not make this,
567 * when peer was not connected to us.
569 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
570 WRITE_ONCE(other->sk_err, ECONNRESET);
571 sk_error_report(other);
574 other->sk_state = TCP_CLOSE;
577 static void unix_sock_destructor(struct sock *sk)
579 struct unix_sock *u = unix_sk(sk);
581 skb_queue_purge(&sk->sk_receive_queue);
583 DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
584 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
585 DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
586 if (!sock_flag(sk, SOCK_DEAD)) {
587 pr_info("Attempt to release alive unix socket: %p\n", sk);
592 unix_release_addr(u->addr);
594 atomic_long_dec(&unix_nr_socks);
595 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
596 #ifdef UNIX_REFCNT_DEBUG
597 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
598 atomic_long_read(&unix_nr_socks));
602 static void unix_release_sock(struct sock *sk, int embrion)
604 struct unix_sock *u = unix_sk(sk);
610 unix_remove_socket(sock_net(sk), sk);
611 unix_remove_bsd_socket(sk);
616 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
618 u->path.dentry = NULL;
620 state = sk->sk_state;
621 sk->sk_state = TCP_CLOSE;
623 skpair = unix_peer(sk);
624 unix_peer(sk) = NULL;
626 unix_state_unlock(sk);
628 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
630 kfree_skb(u->oob_skb);
635 wake_up_interruptible_all(&u->peer_wait);
637 if (skpair != NULL) {
638 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
639 unix_state_lock(skpair);
641 WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
642 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
643 WRITE_ONCE(skpair->sk_err, ECONNRESET);
644 unix_state_unlock(skpair);
645 skpair->sk_state_change(skpair);
646 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
649 unix_dgram_peer_wake_disconnect(sk, skpair);
650 sock_put(skpair); /* It may now die */
653 /* Try to flush out this socket. Throw out buffers at least */
655 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
656 if (state == TCP_LISTEN)
657 unix_release_sock(skb->sk, 1);
658 /* passed fds are erased in the kfree_skb hook */
659 UNIXCB(skb).consumed = skb->len;
668 /* ---- Socket is dead now and most probably destroyed ---- */
671 * Fixme: BSD difference: In BSD all sockets connected to us get
672 * ECONNRESET and we die on the spot. In Linux we behave
673 * like files and pipes do and wait for the last
676 * Can't we simply set sock->err?
678 * What the above comment does talk about? --ANK(980817)
681 if (READ_ONCE(unix_tot_inflight))
682 unix_gc(); /* Garbage collect fds */
685 static void init_peercred(struct sock *sk)
687 const struct cred *old_cred;
690 spin_lock(&sk->sk_peer_lock);
691 old_pid = sk->sk_peer_pid;
692 old_cred = sk->sk_peer_cred;
693 sk->sk_peer_pid = get_pid(task_tgid(current));
694 sk->sk_peer_cred = get_current_cred();
695 spin_unlock(&sk->sk_peer_lock);
701 static void copy_peercred(struct sock *sk, struct sock *peersk)
703 const struct cred *old_cred;
707 spin_lock(&sk->sk_peer_lock);
708 spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
710 spin_lock(&peersk->sk_peer_lock);
711 spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
713 old_pid = sk->sk_peer_pid;
714 old_cred = sk->sk_peer_cred;
715 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
716 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
718 spin_unlock(&sk->sk_peer_lock);
719 spin_unlock(&peersk->sk_peer_lock);
725 static int unix_listen(struct socket *sock, int backlog)
728 struct sock *sk = sock->sk;
729 struct unix_sock *u = unix_sk(sk);
732 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
733 goto out; /* Only stream/seqpacket sockets accept */
736 goto out; /* No listens on an unbound socket */
738 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
740 if (backlog > sk->sk_max_ack_backlog)
741 wake_up_interruptible_all(&u->peer_wait);
742 sk->sk_max_ack_backlog = backlog;
743 sk->sk_state = TCP_LISTEN;
744 /* set credentials so connect can copy them */
749 unix_state_unlock(sk);
754 static int unix_release(struct socket *);
755 static int unix_bind(struct socket *, struct sockaddr *, int);
756 static int unix_stream_connect(struct socket *, struct sockaddr *,
757 int addr_len, int flags);
758 static int unix_socketpair(struct socket *, struct socket *);
759 static int unix_accept(struct socket *, struct socket *, int, bool);
760 static int unix_getname(struct socket *, struct sockaddr *, int);
761 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
762 static __poll_t unix_dgram_poll(struct file *, struct socket *,
764 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
766 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
768 static int unix_shutdown(struct socket *, int);
769 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
770 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
771 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
772 struct pipe_inode_info *, size_t size,
774 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
775 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
776 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
777 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
778 static int unix_dgram_connect(struct socket *, struct sockaddr *,
780 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
781 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
784 static int unix_set_peek_off(struct sock *sk, int val)
786 struct unix_sock *u = unix_sk(sk);
788 if (mutex_lock_interruptible(&u->iolock))
791 WRITE_ONCE(sk->sk_peek_off, val);
792 mutex_unlock(&u->iolock);
797 #ifdef CONFIG_PROC_FS
798 static int unix_count_nr_fds(struct sock *sk)
804 spin_lock(&sk->sk_receive_queue.lock);
805 skb = skb_peek(&sk->sk_receive_queue);
807 u = unix_sk(skb->sk);
808 nr_fds += atomic_read(&u->scm_stat.nr_fds);
809 skb = skb_peek_next(skb, &sk->sk_receive_queue);
811 spin_unlock(&sk->sk_receive_queue.lock);
816 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
818 struct sock *sk = sock->sk;
819 unsigned char s_state;
824 s_state = READ_ONCE(sk->sk_state);
827 /* SOCK_STREAM and SOCK_SEQPACKET sockets never change their
828 * sk_state after switching to TCP_ESTABLISHED or TCP_LISTEN.
829 * SOCK_DGRAM is ordinary. So, no lock is needed.
831 if (sock->type == SOCK_DGRAM || s_state == TCP_ESTABLISHED)
832 nr_fds = atomic_read(&u->scm_stat.nr_fds);
833 else if (s_state == TCP_LISTEN)
834 nr_fds = unix_count_nr_fds(sk);
836 seq_printf(m, "scm_fds: %u\n", nr_fds);
840 #define unix_show_fdinfo NULL
843 static const struct proto_ops unix_stream_ops = {
845 .owner = THIS_MODULE,
846 .release = unix_release,
848 .connect = unix_stream_connect,
849 .socketpair = unix_socketpair,
850 .accept = unix_accept,
851 .getname = unix_getname,
855 .compat_ioctl = unix_compat_ioctl,
857 .listen = unix_listen,
858 .shutdown = unix_shutdown,
859 .sendmsg = unix_stream_sendmsg,
860 .recvmsg = unix_stream_recvmsg,
861 .read_skb = unix_stream_read_skb,
862 .mmap = sock_no_mmap,
863 .splice_read = unix_stream_splice_read,
864 .set_peek_off = unix_set_peek_off,
865 .show_fdinfo = unix_show_fdinfo,
868 static const struct proto_ops unix_dgram_ops = {
870 .owner = THIS_MODULE,
871 .release = unix_release,
873 .connect = unix_dgram_connect,
874 .socketpair = unix_socketpair,
875 .accept = sock_no_accept,
876 .getname = unix_getname,
877 .poll = unix_dgram_poll,
880 .compat_ioctl = unix_compat_ioctl,
882 .listen = sock_no_listen,
883 .shutdown = unix_shutdown,
884 .sendmsg = unix_dgram_sendmsg,
885 .read_skb = unix_read_skb,
886 .recvmsg = unix_dgram_recvmsg,
887 .mmap = sock_no_mmap,
888 .set_peek_off = unix_set_peek_off,
889 .show_fdinfo = unix_show_fdinfo,
892 static const struct proto_ops unix_seqpacket_ops = {
894 .owner = THIS_MODULE,
895 .release = unix_release,
897 .connect = unix_stream_connect,
898 .socketpair = unix_socketpair,
899 .accept = unix_accept,
900 .getname = unix_getname,
901 .poll = unix_dgram_poll,
904 .compat_ioctl = unix_compat_ioctl,
906 .listen = unix_listen,
907 .shutdown = unix_shutdown,
908 .sendmsg = unix_seqpacket_sendmsg,
909 .recvmsg = unix_seqpacket_recvmsg,
910 .mmap = sock_no_mmap,
911 .set_peek_off = unix_set_peek_off,
912 .show_fdinfo = unix_show_fdinfo,
915 static void unix_close(struct sock *sk, long timeout)
917 /* Nothing to do here, unix socket does not need a ->close().
918 * This is merely for sockmap.
922 static void unix_unhash(struct sock *sk)
924 /* Nothing to do here, unix socket does not need a ->unhash().
925 * This is merely for sockmap.
929 static bool unix_bpf_bypass_getsockopt(int level, int optname)
931 if (level == SOL_SOCKET) {
943 struct proto unix_dgram_proto = {
945 .owner = THIS_MODULE,
946 .obj_size = sizeof(struct unix_sock),
948 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
949 #ifdef CONFIG_BPF_SYSCALL
950 .psock_update_sk_prot = unix_dgram_bpf_update_proto,
954 struct proto unix_stream_proto = {
955 .name = "UNIX-STREAM",
956 .owner = THIS_MODULE,
957 .obj_size = sizeof(struct unix_sock),
959 .unhash = unix_unhash,
960 .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt,
961 #ifdef CONFIG_BPF_SYSCALL
962 .psock_update_sk_prot = unix_stream_bpf_update_proto,
966 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
972 atomic_long_inc(&unix_nr_socks);
973 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
978 if (type == SOCK_STREAM)
979 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
980 else /*dgram and seqpacket */
981 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
988 sock_init_data(sock, sk);
990 sk->sk_hash = unix_unbound_hash(sk);
991 sk->sk_allocation = GFP_KERNEL_ACCOUNT;
992 sk->sk_write_space = unix_write_space;
993 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
994 sk->sk_destruct = unix_sock_destructor;
996 u->path.dentry = NULL;
998 spin_lock_init(&u->lock);
999 atomic_long_set(&u->inflight, 0);
1000 INIT_LIST_HEAD(&u->link);
1001 mutex_init(&u->iolock); /* single task reading lock */
1002 mutex_init(&u->bindlock); /* single task binding lock */
1003 init_waitqueue_head(&u->peer_wait);
1004 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
1005 memset(&u->scm_stat, 0, sizeof(struct scm_stat));
1006 unix_insert_unbound_socket(net, sk);
1008 sock_prot_inuse_add(net, sk->sk_prot, 1);
1013 atomic_long_dec(&unix_nr_socks);
1014 return ERR_PTR(err);
1017 static int unix_create(struct net *net, struct socket *sock, int protocol,
1022 if (protocol && protocol != PF_UNIX)
1023 return -EPROTONOSUPPORT;
1025 sock->state = SS_UNCONNECTED;
1027 switch (sock->type) {
1029 sock->ops = &unix_stream_ops;
1032 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
1036 sock->type = SOCK_DGRAM;
1039 sock->ops = &unix_dgram_ops;
1041 case SOCK_SEQPACKET:
1042 sock->ops = &unix_seqpacket_ops;
1045 return -ESOCKTNOSUPPORT;
1048 sk = unix_create1(net, sock, kern, sock->type);
1055 static int unix_release(struct socket *sock)
1057 struct sock *sk = sock->sk;
1062 sk->sk_prot->close(sk, 0);
1063 unix_release_sock(sk, 0);
1069 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1072 struct inode *inode;
1077 unix_mkname_bsd(sunaddr, addr_len);
1078 err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1082 err = path_permission(&path, MAY_WRITE);
1086 err = -ECONNREFUSED;
1087 inode = d_backing_inode(path.dentry);
1088 if (!S_ISSOCK(inode->i_mode))
1091 sk = unix_find_socket_byinode(inode);
1096 if (sk->sk_type == type)
1110 return ERR_PTR(err);
1113 static struct sock *unix_find_abstract(struct net *net,
1114 struct sockaddr_un *sunaddr,
1115 int addr_len, int type)
1117 unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1118 struct dentry *dentry;
1121 sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1123 return ERR_PTR(-ECONNREFUSED);
1125 dentry = unix_sk(sk)->path.dentry;
1127 touch_atime(&unix_sk(sk)->path);
1132 static struct sock *unix_find_other(struct net *net,
1133 struct sockaddr_un *sunaddr,
1134 int addr_len, int type)
1138 if (sunaddr->sun_path[0])
1139 sk = unix_find_bsd(sunaddr, addr_len, type);
1141 sk = unix_find_abstract(net, sunaddr, addr_len, type);
1146 static int unix_autobind(struct sock *sk)
1148 unsigned int new_hash, old_hash = sk->sk_hash;
1149 struct unix_sock *u = unix_sk(sk);
1150 struct net *net = sock_net(sk);
1151 struct unix_address *addr;
1152 u32 lastnum, ordernum;
1155 err = mutex_lock_interruptible(&u->bindlock);
1163 addr = kzalloc(sizeof(*addr) +
1164 offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1168 addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1169 addr->name->sun_family = AF_UNIX;
1170 refcount_set(&addr->refcnt, 1);
1172 ordernum = get_random_u32();
1173 lastnum = ordernum & 0xFFFFF;
1175 ordernum = (ordernum + 1) & 0xFFFFF;
1176 sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1178 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1179 unix_table_double_lock(net, old_hash, new_hash);
1181 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1182 unix_table_double_unlock(net, old_hash, new_hash);
1184 /* __unix_find_socket_byname() may take long time if many names
1185 * are already in use.
1189 if (ordernum == lastnum) {
1190 /* Give up if all names seems to be in use. */
1192 unix_release_addr(addr);
1199 __unix_set_addr_hash(net, sk, addr, new_hash);
1200 unix_table_double_unlock(net, old_hash, new_hash);
1203 out: mutex_unlock(&u->bindlock);
1207 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1210 umode_t mode = S_IFSOCK |
1211 (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1212 unsigned int new_hash, old_hash = sk->sk_hash;
1213 struct unix_sock *u = unix_sk(sk);
1214 struct net *net = sock_net(sk);
1215 struct mnt_idmap *idmap;
1216 struct unix_address *addr;
1217 struct dentry *dentry;
1221 addr_len = unix_mkname_bsd(sunaddr, addr_len);
1222 addr = unix_create_addr(sunaddr, addr_len);
1227 * Get the parent directory, calculate the hash for last
1230 dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1231 if (IS_ERR(dentry)) {
1232 err = PTR_ERR(dentry);
1237 * All right, let's create it.
1239 idmap = mnt_idmap(parent.mnt);
1240 err = security_path_mknod(&parent, dentry, mode, 0);
1242 err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1245 err = mutex_lock_interruptible(&u->bindlock);
1251 new_hash = unix_bsd_hash(d_backing_inode(dentry));
1252 unix_table_double_lock(net, old_hash, new_hash);
1253 u->path.mnt = mntget(parent.mnt);
1254 u->path.dentry = dget(dentry);
1255 __unix_set_addr_hash(net, sk, addr, new_hash);
1256 unix_table_double_unlock(net, old_hash, new_hash);
1257 unix_insert_bsd_socket(sk);
1258 mutex_unlock(&u->bindlock);
1259 done_path_create(&parent, dentry);
1263 mutex_unlock(&u->bindlock);
1266 /* failed after successful mknod? unlink what we'd created... */
1267 vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1269 done_path_create(&parent, dentry);
1271 unix_release_addr(addr);
1272 return err == -EEXIST ? -EADDRINUSE : err;
1275 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1278 unsigned int new_hash, old_hash = sk->sk_hash;
1279 struct unix_sock *u = unix_sk(sk);
1280 struct net *net = sock_net(sk);
1281 struct unix_address *addr;
1284 addr = unix_create_addr(sunaddr, addr_len);
1288 err = mutex_lock_interruptible(&u->bindlock);
1297 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1298 unix_table_double_lock(net, old_hash, new_hash);
1300 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1303 __unix_set_addr_hash(net, sk, addr, new_hash);
1304 unix_table_double_unlock(net, old_hash, new_hash);
1305 mutex_unlock(&u->bindlock);
1309 unix_table_double_unlock(net, old_hash, new_hash);
1312 mutex_unlock(&u->bindlock);
1314 unix_release_addr(addr);
1318 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1320 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1321 struct sock *sk = sock->sk;
1324 if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1325 sunaddr->sun_family == AF_UNIX)
1326 return unix_autobind(sk);
1328 err = unix_validate_addr(sunaddr, addr_len);
1332 if (sunaddr->sun_path[0])
1333 err = unix_bind_bsd(sk, sunaddr, addr_len);
1335 err = unix_bind_abstract(sk, sunaddr, addr_len);
1340 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1342 if (unlikely(sk1 == sk2) || !sk2) {
1343 unix_state_lock(sk1);
1347 unix_state_lock(sk1);
1348 unix_state_lock_nested(sk2);
1350 unix_state_lock(sk2);
1351 unix_state_lock_nested(sk1);
1355 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1357 if (unlikely(sk1 == sk2) || !sk2) {
1358 unix_state_unlock(sk1);
1361 unix_state_unlock(sk1);
1362 unix_state_unlock(sk2);
1365 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1366 int alen, int flags)
1368 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1369 struct sock *sk = sock->sk;
1374 if (alen < offsetofend(struct sockaddr, sa_family))
1377 if (addr->sa_family != AF_UNSPEC) {
1378 err = unix_validate_addr(sunaddr, alen);
1382 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1383 test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
1384 !unix_sk(sk)->addr) {
1385 err = unix_autobind(sk);
1391 other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1392 if (IS_ERR(other)) {
1393 err = PTR_ERR(other);
1397 unix_state_double_lock(sk, other);
1399 /* Apparently VFS overslept socket death. Retry. */
1400 if (sock_flag(other, SOCK_DEAD)) {
1401 unix_state_double_unlock(sk, other);
1407 if (!unix_may_send(sk, other))
1410 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1414 sk->sk_state = other->sk_state = TCP_ESTABLISHED;
1417 * 1003.1g breaking connected state with AF_UNSPEC
1420 unix_state_double_lock(sk, other);
1424 * If it was connected, reconnect.
1426 if (unix_peer(sk)) {
1427 struct sock *old_peer = unix_peer(sk);
1429 unix_peer(sk) = other;
1431 sk->sk_state = TCP_CLOSE;
1432 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1434 unix_state_double_unlock(sk, other);
1436 if (other != old_peer)
1437 unix_dgram_disconnected(sk, old_peer);
1440 unix_peer(sk) = other;
1441 unix_state_double_unlock(sk, other);
1447 unix_state_double_unlock(sk, other);
1453 static long unix_wait_for_peer(struct sock *other, long timeo)
1454 __releases(&unix_sk(other)->lock)
1456 struct unix_sock *u = unix_sk(other);
1460 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1462 sched = !sock_flag(other, SOCK_DEAD) &&
1463 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1464 unix_recvq_full_lockless(other);
1466 unix_state_unlock(other);
1469 timeo = schedule_timeout(timeo);
1471 finish_wait(&u->peer_wait, &wait);
1475 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1476 int addr_len, int flags)
1478 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1479 struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1480 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1481 struct net *net = sock_net(sk);
1482 struct sk_buff *skb = NULL;
1487 err = unix_validate_addr(sunaddr, addr_len);
1491 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1492 test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
1493 err = unix_autobind(sk);
1498 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1500 /* First of all allocate resources.
1501 If we will make it after state is locked,
1502 we will have to recheck all again in any case.
1505 /* create new sock for complete connection */
1506 newsk = unix_create1(net, NULL, 0, sock->type);
1507 if (IS_ERR(newsk)) {
1508 err = PTR_ERR(newsk);
1515 /* Allocate skb for sending to listening sock */
1516 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1521 /* Find listening sock. */
1522 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1523 if (IS_ERR(other)) {
1524 err = PTR_ERR(other);
1529 /* Latch state of peer */
1530 unix_state_lock(other);
1532 /* Apparently VFS overslept socket death. Retry. */
1533 if (sock_flag(other, SOCK_DEAD)) {
1534 unix_state_unlock(other);
1539 err = -ECONNREFUSED;
1540 if (other->sk_state != TCP_LISTEN)
1542 if (other->sk_shutdown & RCV_SHUTDOWN)
1545 if (unix_recvq_full(other)) {
1550 timeo = unix_wait_for_peer(other, timeo);
1552 err = sock_intr_errno(timeo);
1553 if (signal_pending(current))
1561 It is tricky place. We need to grab our state lock and cannot
1562 drop lock on peer. It is dangerous because deadlock is
1563 possible. Connect to self case and simultaneous
1564 attempt to connect are eliminated by checking socket
1565 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1566 check this before attempt to grab lock.
1568 Well, and we have to recheck the state after socket locked.
1574 /* This is ok... continue with connect */
1576 case TCP_ESTABLISHED:
1577 /* Socket is already connected */
1585 unix_state_lock_nested(sk);
1587 if (sk->sk_state != st) {
1588 unix_state_unlock(sk);
1589 unix_state_unlock(other);
1594 err = security_unix_stream_connect(sk, other, newsk);
1596 unix_state_unlock(sk);
1600 /* The way is open! Fastly set all the necessary fields... */
1603 unix_peer(newsk) = sk;
1604 newsk->sk_state = TCP_ESTABLISHED;
1605 newsk->sk_type = sk->sk_type;
1606 init_peercred(newsk);
1607 newu = unix_sk(newsk);
1608 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1609 otheru = unix_sk(other);
1611 /* copy address information from listening to new sock
1613 * The contents of *(otheru->addr) and otheru->path
1614 * are seen fully set up here, since we have found
1615 * otheru in hash under its lock. Insertion into the
1616 * hash chain we'd found it in had been done in an
1617 * earlier critical area protected by the chain's lock,
1618 * the same one where we'd set *(otheru->addr) contents,
1619 * as well as otheru->path and otheru->addr itself.
1621 * Using smp_store_release() here to set newu->addr
1622 * is enough to make those stores, as well as stores
1623 * to newu->path visible to anyone who gets newu->addr
1624 * by smp_load_acquire(). IOW, the same warranties
1625 * as for unix_sock instances bound in unix_bind() or
1626 * in unix_autobind().
1628 if (otheru->path.dentry) {
1629 path_get(&otheru->path);
1630 newu->path = otheru->path;
1632 refcount_inc(&otheru->addr->refcnt);
1633 smp_store_release(&newu->addr, otheru->addr);
1635 /* Set credentials */
1636 copy_peercred(sk, other);
1638 sock->state = SS_CONNECTED;
1639 sk->sk_state = TCP_ESTABLISHED;
1642 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1643 unix_peer(sk) = newsk;
1645 unix_state_unlock(sk);
1647 /* take ten and send info to listening sock */
1648 spin_lock(&other->sk_receive_queue.lock);
1649 __skb_queue_tail(&other->sk_receive_queue, skb);
1650 spin_unlock(&other->sk_receive_queue.lock);
1651 unix_state_unlock(other);
1652 other->sk_data_ready(other);
1658 unix_state_unlock(other);
1663 unix_release_sock(newsk, 0);
1669 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1671 struct sock *ska = socka->sk, *skb = sockb->sk;
1673 /* Join our sockets back to back */
1676 unix_peer(ska) = skb;
1677 unix_peer(skb) = ska;
1681 ska->sk_state = TCP_ESTABLISHED;
1682 skb->sk_state = TCP_ESTABLISHED;
1683 socka->state = SS_CONNECTED;
1684 sockb->state = SS_CONNECTED;
1688 static void unix_sock_inherit_flags(const struct socket *old,
1691 if (test_bit(SOCK_PASSCRED, &old->flags))
1692 set_bit(SOCK_PASSCRED, &new->flags);
1693 if (test_bit(SOCK_PASSPIDFD, &old->flags))
1694 set_bit(SOCK_PASSPIDFD, &new->flags);
1695 if (test_bit(SOCK_PASSSEC, &old->flags))
1696 set_bit(SOCK_PASSSEC, &new->flags);
1699 static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1702 struct sock *sk = sock->sk;
1704 struct sk_buff *skb;
1708 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1712 if (sk->sk_state != TCP_LISTEN)
1715 /* If socket state is TCP_LISTEN it cannot change (for now...),
1716 * so that no locks are necessary.
1719 skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1722 /* This means receive shutdown. */
1729 skb_free_datagram(sk, skb);
1730 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1732 /* attach accepted sock to socket */
1733 unix_state_lock(tsk);
1734 newsock->state = SS_CONNECTED;
1735 unix_sock_inherit_flags(sock, newsock);
1736 sock_graft(tsk, newsock);
1737 unix_state_unlock(tsk);
1745 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1747 struct sock *sk = sock->sk;
1748 struct unix_address *addr;
1749 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1753 sk = unix_peer_get(sk);
1763 addr = smp_load_acquire(&unix_sk(sk)->addr);
1765 sunaddr->sun_family = AF_UNIX;
1766 sunaddr->sun_path[0] = 0;
1767 err = offsetof(struct sockaddr_un, sun_path);
1770 memcpy(sunaddr, addr->name, addr->len);
1777 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1779 scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1782 * Garbage collection of unix sockets starts by selecting a set of
1783 * candidate sockets which have reference only from being in flight
1784 * (total_refs == inflight_refs). This condition is checked once during
1785 * the candidate collection phase, and candidates are marked as such, so
1786 * that non-candidates can later be ignored. While inflight_refs is
1787 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1788 * is an instantaneous decision.
1790 * Once a candidate, however, the socket must not be reinstalled into a
1791 * file descriptor while the garbage collection is in progress.
1793 * If the above conditions are met, then the directed graph of
1794 * candidates (*) does not change while unix_gc_lock is held.
1796 * Any operations that changes the file count through file descriptors
1797 * (dup, close, sendmsg) does not change the graph since candidates are
1798 * not installed in fds.
1800 * Dequeing a candidate via recvmsg would install it into an fd, but
1801 * that takes unix_gc_lock to decrement the inflight count, so it's
1802 * serialized with garbage collection.
1804 * MSG_PEEK is special in that it does not change the inflight count,
1805 * yet does install the socket into an fd. The following lock/unlock
1806 * pair is to ensure serialization with garbage collection. It must be
1807 * done between incrementing the file count and installing the file into
1810 * If garbage collection starts after the barrier provided by the
1811 * lock/unlock, then it will see the elevated refcount and not mark this
1812 * as a candidate. If a garbage collection is already in progress
1813 * before the file count was incremented, then the lock/unlock pair will
1814 * ensure that garbage collection is finished before progressing to
1815 * installing the fd.
1817 * (*) A -> B where B is on the queue of A or B is on the queue of C
1818 * which is on the queue of listening socket A.
1820 spin_lock(&unix_gc_lock);
1821 spin_unlock(&unix_gc_lock);
1824 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1828 UNIXCB(skb).pid = get_pid(scm->pid);
1829 UNIXCB(skb).uid = scm->creds.uid;
1830 UNIXCB(skb).gid = scm->creds.gid;
1831 UNIXCB(skb).fp = NULL;
1832 unix_get_secdata(scm, skb);
1833 if (scm->fp && send_fds)
1834 err = unix_attach_fds(scm, skb);
1836 skb->destructor = unix_destruct_scm;
1840 static bool unix_passcred_enabled(const struct socket *sock,
1841 const struct sock *other)
1843 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1844 test_bit(SOCK_PASSPIDFD, &sock->flags) ||
1845 !other->sk_socket ||
1846 test_bit(SOCK_PASSCRED, &other->sk_socket->flags) ||
1847 test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags);
1851 * Some apps rely on write() giving SCM_CREDENTIALS
1852 * We include credentials if source or destination socket
1853 * asserted SOCK_PASSCRED.
1855 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1856 const struct sock *other)
1858 if (UNIXCB(skb).pid)
1860 if (unix_passcred_enabled(sock, other)) {
1861 UNIXCB(skb).pid = get_pid(task_tgid(current));
1862 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1866 static bool unix_skb_scm_eq(struct sk_buff *skb,
1867 struct scm_cookie *scm)
1869 return UNIXCB(skb).pid == scm->pid &&
1870 uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1871 gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1872 unix_secdata_eq(scm, skb);
1875 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1877 struct scm_fp_list *fp = UNIXCB(skb).fp;
1878 struct unix_sock *u = unix_sk(sk);
1880 if (unlikely(fp && fp->count))
1881 atomic_add(fp->count, &u->scm_stat.nr_fds);
1884 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1886 struct scm_fp_list *fp = UNIXCB(skb).fp;
1887 struct unix_sock *u = unix_sk(sk);
1889 if (unlikely(fp && fp->count))
1890 atomic_sub(fp->count, &u->scm_stat.nr_fds);
1894 * Send AF_UNIX data.
1897 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1900 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1901 struct sock *sk = sock->sk, *other = NULL;
1902 struct unix_sock *u = unix_sk(sk);
1903 struct scm_cookie scm;
1904 struct sk_buff *skb;
1911 err = scm_send(sock, msg, &scm, false);
1916 if (msg->msg_flags&MSG_OOB)
1919 if (msg->msg_namelen) {
1920 err = unix_validate_addr(sunaddr, msg->msg_namelen);
1926 other = unix_peer_get(sk);
1931 if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
1932 test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
1933 err = unix_autobind(sk);
1939 if (len > sk->sk_sndbuf - 32)
1942 if (len > SKB_MAX_ALLOC) {
1943 data_len = min_t(size_t,
1944 len - SKB_MAX_ALLOC,
1945 MAX_SKB_FRAGS * PAGE_SIZE);
1946 data_len = PAGE_ALIGN(data_len);
1948 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1951 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1952 msg->msg_flags & MSG_DONTWAIT, &err,
1953 PAGE_ALLOC_COSTLY_ORDER);
1957 err = unix_scm_to_skb(&scm, skb, true);
1961 skb_put(skb, len - data_len);
1962 skb->data_len = data_len;
1964 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1968 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1973 if (sunaddr == NULL)
1976 other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
1978 if (IS_ERR(other)) {
1979 err = PTR_ERR(other);
1985 if (sk_filter(other, skb) < 0) {
1986 /* Toss the packet but do not return any error to the sender */
1992 unix_state_lock(other);
1995 if (!unix_may_send(sk, other))
1998 if (unlikely(sock_flag(other, SOCK_DEAD))) {
2000 * Check with 1003.1g - what should
2003 unix_state_unlock(other);
2007 unix_state_lock(sk);
2010 if (sk->sk_type == SOCK_SEQPACKET) {
2011 /* We are here only when racing with unix_release_sock()
2012 * is clearing @other. Never change state to TCP_CLOSE
2013 * unlike SOCK_DGRAM wants.
2015 unix_state_unlock(sk);
2017 } else if (unix_peer(sk) == other) {
2018 unix_peer(sk) = NULL;
2019 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2021 sk->sk_state = TCP_CLOSE;
2022 unix_state_unlock(sk);
2024 unix_dgram_disconnected(sk, other);
2026 err = -ECONNREFUSED;
2028 unix_state_unlock(sk);
2038 if (other->sk_shutdown & RCV_SHUTDOWN)
2041 if (sk->sk_type != SOCK_SEQPACKET) {
2042 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2047 /* other == sk && unix_peer(other) != sk if
2048 * - unix_peer(sk) == NULL, destination address bound to sk
2049 * - unix_peer(sk) == sk by time of get but disconnected before lock
2052 unlikely(unix_peer(other) != sk &&
2053 unix_recvq_full_lockless(other))) {
2055 timeo = unix_wait_for_peer(other, timeo);
2057 err = sock_intr_errno(timeo);
2058 if (signal_pending(current))
2065 unix_state_unlock(other);
2066 unix_state_double_lock(sk, other);
2069 if (unix_peer(sk) != other ||
2070 unix_dgram_peer_wake_me(sk, other)) {
2078 goto restart_locked;
2082 if (unlikely(sk_locked))
2083 unix_state_unlock(sk);
2085 if (sock_flag(other, SOCK_RCVTSTAMP))
2086 __net_timestamp(skb);
2087 maybe_add_creds(skb, sock, other);
2088 scm_stat_add(other, skb);
2089 skb_queue_tail(&other->sk_receive_queue, skb);
2090 unix_state_unlock(other);
2091 other->sk_data_ready(other);
2098 unix_state_unlock(sk);
2099 unix_state_unlock(other);
2109 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2110 * bytes, and a minimum of a full page.
2112 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2114 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2115 static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
2116 struct scm_cookie *scm, bool fds_sent)
2118 struct unix_sock *ousk = unix_sk(other);
2119 struct sk_buff *skb;
2122 skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2127 err = unix_scm_to_skb(scm, skb, !fds_sent);
2133 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2140 unix_state_lock(other);
2142 if (sock_flag(other, SOCK_DEAD) ||
2143 (other->sk_shutdown & RCV_SHUTDOWN)) {
2144 unix_state_unlock(other);
2149 maybe_add_creds(skb, sock, other);
2153 consume_skb(ousk->oob_skb);
2155 WRITE_ONCE(ousk->oob_skb, skb);
2157 scm_stat_add(other, skb);
2158 skb_queue_tail(&other->sk_receive_queue, skb);
2159 sk_send_sigurg(other);
2160 unix_state_unlock(other);
2161 other->sk_data_ready(other);
2167 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2170 struct sock *sk = sock->sk;
2171 struct sock *other = NULL;
2173 struct sk_buff *skb;
2175 struct scm_cookie scm;
2176 bool fds_sent = false;
2180 err = scm_send(sock, msg, &scm, false);
2185 if (msg->msg_flags & MSG_OOB) {
2186 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2194 if (msg->msg_namelen) {
2195 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2199 other = unix_peer(sk);
2204 if (sk->sk_shutdown & SEND_SHUTDOWN)
2207 while (sent < len) {
2210 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2211 skb = sock_alloc_send_pskb(sk, 0, 0,
2212 msg->msg_flags & MSG_DONTWAIT,
2215 /* Keep two messages in the pipe so it schedules better */
2216 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2218 /* allow fallback to order-0 allocations */
2219 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2221 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2223 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2225 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2226 msg->msg_flags & MSG_DONTWAIT, &err,
2227 get_order(UNIX_SKB_FRAGS_SZ));
2232 /* Only send the fds in the first buffer */
2233 err = unix_scm_to_skb(&scm, skb, !fds_sent);
2240 if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
2241 err = skb_splice_from_iter(skb, &msg->msg_iter, size,
2248 refcount_add(size, &sk->sk_wmem_alloc);
2250 skb_put(skb, size - data_len);
2251 skb->data_len = data_len;
2253 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2260 unix_state_lock(other);
2262 if (sock_flag(other, SOCK_DEAD) ||
2263 (other->sk_shutdown & RCV_SHUTDOWN))
2266 maybe_add_creds(skb, sock, other);
2267 scm_stat_add(other, skb);
2268 skb_queue_tail(&other->sk_receive_queue, skb);
2269 unix_state_unlock(other);
2270 other->sk_data_ready(other);
2274 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2275 if (msg->msg_flags & MSG_OOB) {
2276 err = queue_oob(sock, msg, other, &scm, fds_sent);
2288 unix_state_unlock(other);
2291 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2292 send_sig(SIGPIPE, current, 0);
2296 return sent ? : err;
2299 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2303 struct sock *sk = sock->sk;
2305 err = sock_error(sk);
2309 if (sk->sk_state != TCP_ESTABLISHED)
2312 if (msg->msg_namelen)
2313 msg->msg_namelen = 0;
2315 return unix_dgram_sendmsg(sock, msg, len);
2318 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2319 size_t size, int flags)
2321 struct sock *sk = sock->sk;
2323 if (sk->sk_state != TCP_ESTABLISHED)
2326 return unix_dgram_recvmsg(sock, msg, size, flags);
2329 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2331 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2334 msg->msg_namelen = addr->len;
2335 memcpy(msg->msg_name, addr->name, addr->len);
2339 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2342 struct scm_cookie scm;
2343 struct socket *sock = sk->sk_socket;
2344 struct unix_sock *u = unix_sk(sk);
2345 struct sk_buff *skb, *last;
2354 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2357 mutex_lock(&u->iolock);
2359 skip = sk_peek_offset(sk, flags);
2360 skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2361 &skip, &err, &last);
2363 if (!(flags & MSG_PEEK))
2364 scm_stat_del(sk, skb);
2368 mutex_unlock(&u->iolock);
2373 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2374 &err, &timeo, last));
2376 if (!skb) { /* implies iolock unlocked */
2377 unix_state_lock(sk);
2378 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2379 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2380 (sk->sk_shutdown & RCV_SHUTDOWN))
2382 unix_state_unlock(sk);
2386 if (wq_has_sleeper(&u->peer_wait))
2387 wake_up_interruptible_sync_poll(&u->peer_wait,
2388 EPOLLOUT | EPOLLWRNORM |
2392 unix_copy_addr(msg, skb->sk);
2394 if (size > skb->len - skip)
2395 size = skb->len - skip;
2396 else if (size < skb->len - skip)
2397 msg->msg_flags |= MSG_TRUNC;
2399 err = skb_copy_datagram_msg(skb, skip, msg, size);
2403 if (sock_flag(sk, SOCK_RCVTSTAMP))
2404 __sock_recv_timestamp(msg, sk, skb);
2406 memset(&scm, 0, sizeof(scm));
2408 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2409 unix_set_secdata(&scm, skb);
2411 if (!(flags & MSG_PEEK)) {
2413 unix_detach_fds(&scm, skb);
2415 sk_peek_offset_bwd(sk, skb->len);
2417 /* It is questionable: on PEEK we could:
2418 - do not return fds - good, but too simple 8)
2419 - return fds, and do not return them on read (old strategy,
2421 - clone fds (I chose it for now, it is the most universal
2424 POSIX 1003.1g does not actually define this clearly
2425 at all. POSIX 1003.1g doesn't define a lot of things
2430 sk_peek_offset_fwd(sk, size);
2433 unix_peek_fds(&scm, skb);
2435 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2437 scm_recv_unix(sock, msg, &scm, flags);
2440 skb_free_datagram(sk, skb);
2441 mutex_unlock(&u->iolock);
2446 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2449 struct sock *sk = sock->sk;
2451 #ifdef CONFIG_BPF_SYSCALL
2452 const struct proto *prot = READ_ONCE(sk->sk_prot);
2454 if (prot != &unix_dgram_proto)
2455 return prot->recvmsg(sk, msg, size, flags, NULL);
2457 return __unix_dgram_recvmsg(sk, msg, size, flags);
2460 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2462 struct unix_sock *u = unix_sk(sk);
2463 struct sk_buff *skb;
2466 mutex_lock(&u->iolock);
2467 skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2468 mutex_unlock(&u->iolock);
2472 return recv_actor(sk, skb);
2476 * Sleep until more data has arrived. But check for races..
2478 static long unix_stream_data_wait(struct sock *sk, long timeo,
2479 struct sk_buff *last, unsigned int last_len,
2482 unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2483 struct sk_buff *tail;
2486 unix_state_lock(sk);
2489 prepare_to_wait(sk_sleep(sk), &wait, state);
2491 tail = skb_peek_tail(&sk->sk_receive_queue);
2493 (tail && tail->len != last_len) ||
2495 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2496 signal_pending(current) ||
2500 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2501 unix_state_unlock(sk);
2502 timeo = schedule_timeout(timeo);
2503 unix_state_lock(sk);
2505 if (sock_flag(sk, SOCK_DEAD))
2508 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2511 finish_wait(sk_sleep(sk), &wait);
2512 unix_state_unlock(sk);
2516 static unsigned int unix_skb_len(const struct sk_buff *skb)
2518 return skb->len - UNIXCB(skb).consumed;
2521 struct unix_stream_read_state {
2522 int (*recv_actor)(struct sk_buff *, int, int,
2523 struct unix_stream_read_state *);
2524 struct socket *socket;
2526 struct pipe_inode_info *pipe;
2529 unsigned int splice_flags;
2532 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2533 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2535 struct socket *sock = state->socket;
2536 struct sock *sk = sock->sk;
2537 struct unix_sock *u = unix_sk(sk);
2539 struct sk_buff *oob_skb;
2541 mutex_lock(&u->iolock);
2542 unix_state_lock(sk);
2544 if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2545 unix_state_unlock(sk);
2546 mutex_unlock(&u->iolock);
2550 oob_skb = u->oob_skb;
2552 if (!(state->flags & MSG_PEEK))
2553 WRITE_ONCE(u->oob_skb, NULL);
2556 unix_state_unlock(sk);
2558 chunk = state->recv_actor(oob_skb, 0, chunk, state);
2560 if (!(state->flags & MSG_PEEK))
2561 UNIXCB(oob_skb).consumed += 1;
2563 consume_skb(oob_skb);
2565 mutex_unlock(&u->iolock);
2570 state->msg->msg_flags |= MSG_OOB;
2574 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2575 int flags, int copied)
2577 struct unix_sock *u = unix_sk(sk);
2579 if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2580 skb_unlink(skb, &sk->sk_receive_queue);
2584 if (skb == u->oob_skb) {
2587 } else if (sock_flag(sk, SOCK_URGINLINE)) {
2588 if (!(flags & MSG_PEEK)) {
2589 WRITE_ONCE(u->oob_skb, NULL);
2592 } else if (!(flags & MSG_PEEK)) {
2593 skb_unlink(skb, &sk->sk_receive_queue);
2595 skb = skb_peek(&sk->sk_receive_queue);
2603 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2605 if (unlikely(sk->sk_state != TCP_ESTABLISHED))
2608 return unix_read_skb(sk, recv_actor);
2611 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2614 struct scm_cookie scm;
2615 struct socket *sock = state->socket;
2616 struct sock *sk = sock->sk;
2617 struct unix_sock *u = unix_sk(sk);
2619 int flags = state->flags;
2620 int noblock = flags & MSG_DONTWAIT;
2621 bool check_creds = false;
2626 size_t size = state->size;
2627 unsigned int last_len;
2629 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2634 if (unlikely(flags & MSG_OOB)) {
2636 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2637 err = unix_stream_recv_urg(state);
2642 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2643 timeo = sock_rcvtimeo(sk, noblock);
2645 memset(&scm, 0, sizeof(scm));
2647 /* Lock the socket to prevent queue disordering
2648 * while sleeps in memcpy_tomsg
2650 mutex_lock(&u->iolock);
2652 skip = max(sk_peek_offset(sk, flags), 0);
2657 struct sk_buff *skb, *last;
2660 unix_state_lock(sk);
2661 if (sock_flag(sk, SOCK_DEAD)) {
2665 last = skb = skb_peek(&sk->sk_receive_queue);
2666 last_len = last ? last->len : 0;
2668 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2670 skb = manage_oob(skb, sk, flags, copied);
2672 unix_state_unlock(sk);
2681 if (copied >= target)
2685 * POSIX 1003.1g mandates this order.
2688 err = sock_error(sk);
2691 if (sk->sk_shutdown & RCV_SHUTDOWN)
2694 unix_state_unlock(sk);
2700 mutex_unlock(&u->iolock);
2702 timeo = unix_stream_data_wait(sk, timeo, last,
2703 last_len, freezable);
2705 if (signal_pending(current)) {
2706 err = sock_intr_errno(timeo);
2711 mutex_lock(&u->iolock);
2714 unix_state_unlock(sk);
2718 while (skip >= unix_skb_len(skb)) {
2719 skip -= unix_skb_len(skb);
2721 last_len = skb->len;
2722 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2727 unix_state_unlock(sk);
2730 /* Never glue messages from different writers */
2731 if (!unix_skb_scm_eq(skb, &scm))
2733 } else if (test_bit(SOCK_PASSCRED, &sock->flags) ||
2734 test_bit(SOCK_PASSPIDFD, &sock->flags)) {
2735 /* Copy credentials */
2736 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2737 unix_set_secdata(&scm, skb);
2741 /* Copy address just once */
2742 if (state->msg && state->msg->msg_name) {
2743 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2744 state->msg->msg_name);
2745 unix_copy_addr(state->msg, skb->sk);
2749 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2751 chunk = state->recv_actor(skb, skip, chunk, state);
2752 drop_skb = !unix_skb_len(skb);
2753 /* skb is only safe to use if !drop_skb */
2764 /* the skb was touched by a concurrent reader;
2765 * we should not expect anything from this skb
2766 * anymore and assume it invalid - we can be
2767 * sure it was dropped from the socket queue
2769 * let's report a short read
2775 /* Mark read part of skb as used */
2776 if (!(flags & MSG_PEEK)) {
2777 UNIXCB(skb).consumed += chunk;
2779 sk_peek_offset_bwd(sk, chunk);
2781 if (UNIXCB(skb).fp) {
2782 scm_stat_del(sk, skb);
2783 unix_detach_fds(&scm, skb);
2786 if (unix_skb_len(skb))
2789 skb_unlink(skb, &sk->sk_receive_queue);
2795 /* It is questionable, see note in unix_dgram_recvmsg.
2798 unix_peek_fds(&scm, skb);
2800 sk_peek_offset_fwd(sk, chunk);
2807 last_len = skb->len;
2808 unix_state_lock(sk);
2809 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2812 unix_state_unlock(sk);
2817 mutex_unlock(&u->iolock);
2819 scm_recv_unix(sock, state->msg, &scm, flags);
2823 return copied ? : err;
2826 static int unix_stream_read_actor(struct sk_buff *skb,
2827 int skip, int chunk,
2828 struct unix_stream_read_state *state)
2832 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2834 return ret ?: chunk;
2837 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2838 size_t size, int flags)
2840 struct unix_stream_read_state state = {
2841 .recv_actor = unix_stream_read_actor,
2842 .socket = sk->sk_socket,
2848 return unix_stream_read_generic(&state, true);
2851 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2852 size_t size, int flags)
2854 struct unix_stream_read_state state = {
2855 .recv_actor = unix_stream_read_actor,
2862 #ifdef CONFIG_BPF_SYSCALL
2863 struct sock *sk = sock->sk;
2864 const struct proto *prot = READ_ONCE(sk->sk_prot);
2866 if (prot != &unix_stream_proto)
2867 return prot->recvmsg(sk, msg, size, flags, NULL);
2869 return unix_stream_read_generic(&state, true);
2872 static int unix_stream_splice_actor(struct sk_buff *skb,
2873 int skip, int chunk,
2874 struct unix_stream_read_state *state)
2876 return skb_splice_bits(skb, state->socket->sk,
2877 UNIXCB(skb).consumed + skip,
2878 state->pipe, chunk, state->splice_flags);
2881 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2882 struct pipe_inode_info *pipe,
2883 size_t size, unsigned int flags)
2885 struct unix_stream_read_state state = {
2886 .recv_actor = unix_stream_splice_actor,
2890 .splice_flags = flags,
2893 if (unlikely(*ppos))
2896 if (sock->file->f_flags & O_NONBLOCK ||
2897 flags & SPLICE_F_NONBLOCK)
2898 state.flags = MSG_DONTWAIT;
2900 return unix_stream_read_generic(&state, false);
2903 static int unix_shutdown(struct socket *sock, int mode)
2905 struct sock *sk = sock->sk;
2908 if (mode < SHUT_RD || mode > SHUT_RDWR)
2911 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2912 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2913 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2917 unix_state_lock(sk);
2918 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode);
2919 other = unix_peer(sk);
2922 unix_state_unlock(sk);
2923 sk->sk_state_change(sk);
2926 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2929 const struct proto *prot = READ_ONCE(other->sk_prot);
2932 prot->unhash(other);
2933 if (mode&RCV_SHUTDOWN)
2934 peer_mode |= SEND_SHUTDOWN;
2935 if (mode&SEND_SHUTDOWN)
2936 peer_mode |= RCV_SHUTDOWN;
2937 unix_state_lock(other);
2938 WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode);
2939 unix_state_unlock(other);
2940 other->sk_state_change(other);
2941 if (peer_mode == SHUTDOWN_MASK)
2942 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2943 else if (peer_mode & RCV_SHUTDOWN)
2944 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2952 long unix_inq_len(struct sock *sk)
2954 struct sk_buff *skb;
2957 if (sk->sk_state == TCP_LISTEN)
2960 spin_lock(&sk->sk_receive_queue.lock);
2961 if (sk->sk_type == SOCK_STREAM ||
2962 sk->sk_type == SOCK_SEQPACKET) {
2963 skb_queue_walk(&sk->sk_receive_queue, skb)
2964 amount += unix_skb_len(skb);
2966 skb = skb_peek(&sk->sk_receive_queue);
2970 spin_unlock(&sk->sk_receive_queue.lock);
2974 EXPORT_SYMBOL_GPL(unix_inq_len);
2976 long unix_outq_len(struct sock *sk)
2978 return sk_wmem_alloc_get(sk);
2980 EXPORT_SYMBOL_GPL(unix_outq_len);
2982 static int unix_open_file(struct sock *sk)
2988 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2991 if (!smp_load_acquire(&unix_sk(sk)->addr))
2994 path = unix_sk(sk)->path;
3000 fd = get_unused_fd_flags(O_CLOEXEC);
3004 f = dentry_open(&path, O_PATH, current_cred());
3018 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3020 struct sock *sk = sock->sk;
3026 amount = unix_outq_len(sk);
3027 err = put_user(amount, (int __user *)arg);
3030 amount = unix_inq_len(sk);
3034 err = put_user(amount, (int __user *)arg);
3037 err = unix_open_file(sk);
3039 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3042 struct sk_buff *skb;
3045 skb = skb_peek(&sk->sk_receive_queue);
3046 if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3048 err = put_user(answ, (int __user *)arg);
3059 #ifdef CONFIG_COMPAT
3060 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3062 return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3066 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3068 struct sock *sk = sock->sk;
3072 sock_poll_wait(file, sock, wait);
3074 shutdown = READ_ONCE(sk->sk_shutdown);
3076 /* exceptional events? */
3077 if (READ_ONCE(sk->sk_err))
3079 if (shutdown == SHUTDOWN_MASK)
3081 if (shutdown & RCV_SHUTDOWN)
3082 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3085 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3086 mask |= EPOLLIN | EPOLLRDNORM;
3087 if (sk_is_readable(sk))
3088 mask |= EPOLLIN | EPOLLRDNORM;
3089 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3090 if (READ_ONCE(unix_sk(sk)->oob_skb))
3094 /* Connection-based need to check for termination and startup */
3095 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3096 sk->sk_state == TCP_CLOSE)
3100 * we set writable also when the other side has shut down the
3101 * connection. This prevents stuck sockets.
3103 if (unix_writable(sk))
3104 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3109 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3112 struct sock *sk = sock->sk, *other;
3113 unsigned int writable;
3117 sock_poll_wait(file, sock, wait);
3119 shutdown = READ_ONCE(sk->sk_shutdown);
3121 /* exceptional events? */
3122 if (READ_ONCE(sk->sk_err) ||
3123 !skb_queue_empty_lockless(&sk->sk_error_queue))
3125 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3127 if (shutdown & RCV_SHUTDOWN)
3128 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3129 if (shutdown == SHUTDOWN_MASK)
3133 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3134 mask |= EPOLLIN | EPOLLRDNORM;
3135 if (sk_is_readable(sk))
3136 mask |= EPOLLIN | EPOLLRDNORM;
3138 /* Connection-based need to check for termination and startup */
3139 if (sk->sk_type == SOCK_SEQPACKET) {
3140 if (sk->sk_state == TCP_CLOSE)
3142 /* connection hasn't started yet? */
3143 if (sk->sk_state == TCP_SYN_SENT)
3147 /* No write status requested, avoid expensive OUT tests. */
3148 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3151 writable = unix_writable(sk);
3153 unix_state_lock(sk);
3155 other = unix_peer(sk);
3156 if (other && unix_peer(other) != sk &&
3157 unix_recvq_full_lockless(other) &&
3158 unix_dgram_peer_wake_me(sk, other))
3161 unix_state_unlock(sk);
3165 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3167 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3172 #ifdef CONFIG_PROC_FS
3174 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3176 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3177 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3178 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3180 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3182 unsigned long offset = get_offset(*pos);
3183 unsigned long bucket = get_bucket(*pos);
3184 unsigned long count = 0;
3187 for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3188 sk; sk = sk_next(sk)) {
3189 if (++count == offset)
3196 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3198 unsigned long bucket = get_bucket(*pos);
3199 struct net *net = seq_file_net(seq);
3202 while (bucket < UNIX_HASH_SIZE) {
3203 spin_lock(&net->unx.table.locks[bucket]);
3205 sk = unix_from_bucket(seq, pos);
3209 spin_unlock(&net->unx.table.locks[bucket]);
3211 *pos = set_bucket_offset(++bucket, 1);
3217 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3220 unsigned long bucket = get_bucket(*pos);
3227 spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3229 *pos = set_bucket_offset(++bucket, 1);
3231 return unix_get_first(seq, pos);
3234 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3237 return SEQ_START_TOKEN;
3239 return unix_get_first(seq, pos);
3242 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3246 if (v == SEQ_START_TOKEN)
3247 return unix_get_first(seq, pos);
3249 return unix_get_next(seq, v, pos);
3252 static void unix_seq_stop(struct seq_file *seq, void *v)
3254 struct sock *sk = v;
3257 spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3260 static int unix_seq_show(struct seq_file *seq, void *v)
3263 if (v == SEQ_START_TOKEN)
3264 seq_puts(seq, "Num RefCount Protocol Flags Type St "
3268 struct unix_sock *u = unix_sk(s);
3271 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3273 refcount_read(&s->sk_refcnt),
3275 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3278 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3279 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3282 if (u->addr) { // under a hash table lock here
3287 len = u->addr->len -
3288 offsetof(struct sockaddr_un, sun_path);
3289 if (u->addr->name->sun_path[0]) {
3295 for ( ; i < len; i++)
3296 seq_putc(seq, u->addr->name->sun_path[i] ?:
3299 unix_state_unlock(s);
3300 seq_putc(seq, '\n');
3306 static const struct seq_operations unix_seq_ops = {
3307 .start = unix_seq_start,
3308 .next = unix_seq_next,
3309 .stop = unix_seq_stop,
3310 .show = unix_seq_show,
3313 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL)
3314 struct bpf_unix_iter_state {
3315 struct seq_net_private p;
3316 unsigned int cur_sk;
3317 unsigned int end_sk;
3318 unsigned int max_sk;
3319 struct sock **batch;
3320 bool st_bucket_done;
3323 struct bpf_iter__unix {
3324 __bpf_md_ptr(struct bpf_iter_meta *, meta);
3325 __bpf_md_ptr(struct unix_sock *, unix_sk);
3326 uid_t uid __aligned(8);
3329 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3330 struct unix_sock *unix_sk, uid_t uid)
3332 struct bpf_iter__unix ctx;
3334 meta->seq_num--; /* skip SEQ_START_TOKEN */
3336 ctx.unix_sk = unix_sk;
3338 return bpf_iter_run_prog(prog, &ctx);
3341 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3344 struct bpf_unix_iter_state *iter = seq->private;
3345 unsigned int expected = 1;
3348 sock_hold(start_sk);
3349 iter->batch[iter->end_sk++] = start_sk;
3351 for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3352 if (iter->end_sk < iter->max_sk) {
3354 iter->batch[iter->end_sk++] = sk;
3360 spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3365 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3367 while (iter->cur_sk < iter->end_sk)
3368 sock_put(iter->batch[iter->cur_sk++]);
3371 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3372 unsigned int new_batch_sz)
3374 struct sock **new_batch;
3376 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3377 GFP_USER | __GFP_NOWARN);
3381 bpf_iter_unix_put_batch(iter);
3382 kvfree(iter->batch);
3383 iter->batch = new_batch;
3384 iter->max_sk = new_batch_sz;
3389 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3392 struct bpf_unix_iter_state *iter = seq->private;
3393 unsigned int expected;
3394 bool resized = false;
3397 if (iter->st_bucket_done)
3398 *pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3401 /* Get a new batch */
3405 sk = unix_get_first(seq, pos);
3407 return NULL; /* Done */
3409 expected = bpf_iter_unix_hold_batch(seq, sk);
3411 if (iter->end_sk == expected) {
3412 iter->st_bucket_done = true;
3416 if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3424 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3427 return SEQ_START_TOKEN;
3429 /* bpf iter does not support lseek, so it always
3430 * continue from where it was stop()-ped.
3432 return bpf_iter_unix_batch(seq, pos);
3435 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3437 struct bpf_unix_iter_state *iter = seq->private;
3440 /* Whenever seq_next() is called, the iter->cur_sk is
3441 * done with seq_show(), so advance to the next sk in
3444 if (iter->cur_sk < iter->end_sk)
3445 sock_put(iter->batch[iter->cur_sk++]);
3449 if (iter->cur_sk < iter->end_sk)
3450 sk = iter->batch[iter->cur_sk];
3452 sk = bpf_iter_unix_batch(seq, pos);
3457 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3459 struct bpf_iter_meta meta;
3460 struct bpf_prog *prog;
3461 struct sock *sk = v;
3466 if (v == SEQ_START_TOKEN)
3469 slow = lock_sock_fast(sk);
3471 if (unlikely(sk_unhashed(sk))) {
3476 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3478 prog = bpf_iter_get_info(&meta, false);
3479 ret = unix_prog_seq_show(prog, &meta, v, uid);
3481 unlock_sock_fast(sk, slow);
3485 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3487 struct bpf_unix_iter_state *iter = seq->private;
3488 struct bpf_iter_meta meta;
3489 struct bpf_prog *prog;
3493 prog = bpf_iter_get_info(&meta, true);
3495 (void)unix_prog_seq_show(prog, &meta, v, 0);
3498 if (iter->cur_sk < iter->end_sk)
3499 bpf_iter_unix_put_batch(iter);
3502 static const struct seq_operations bpf_iter_unix_seq_ops = {
3503 .start = bpf_iter_unix_seq_start,
3504 .next = bpf_iter_unix_seq_next,
3505 .stop = bpf_iter_unix_seq_stop,
3506 .show = bpf_iter_unix_seq_show,
3511 static const struct net_proto_family unix_family_ops = {
3513 .create = unix_create,
3514 .owner = THIS_MODULE,
3518 static int __net_init unix_net_init(struct net *net)
3522 net->unx.sysctl_max_dgram_qlen = 10;
3523 if (unix_sysctl_register(net))
3526 #ifdef CONFIG_PROC_FS
3527 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3528 sizeof(struct seq_net_private)))
3532 net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3533 sizeof(spinlock_t), GFP_KERNEL);
3534 if (!net->unx.table.locks)
3537 net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3538 sizeof(struct hlist_head),
3540 if (!net->unx.table.buckets)
3543 for (i = 0; i < UNIX_HASH_SIZE; i++) {
3544 spin_lock_init(&net->unx.table.locks[i]);
3545 INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3551 kvfree(net->unx.table.locks);
3553 #ifdef CONFIG_PROC_FS
3554 remove_proc_entry("unix", net->proc_net);
3557 unix_sysctl_unregister(net);
3562 static void __net_exit unix_net_exit(struct net *net)
3564 kvfree(net->unx.table.buckets);
3565 kvfree(net->unx.table.locks);
3566 unix_sysctl_unregister(net);
3567 remove_proc_entry("unix", net->proc_net);
3570 static struct pernet_operations unix_net_ops = {
3571 .init = unix_net_init,
3572 .exit = unix_net_exit,
3575 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3576 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3577 struct unix_sock *unix_sk, uid_t uid)
3579 #define INIT_BATCH_SZ 16
3581 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3583 struct bpf_unix_iter_state *iter = priv_data;
3586 err = bpf_iter_init_seq_net(priv_data, aux);
3590 err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3592 bpf_iter_fini_seq_net(priv_data);
3599 static void bpf_iter_fini_unix(void *priv_data)
3601 struct bpf_unix_iter_state *iter = priv_data;
3603 bpf_iter_fini_seq_net(priv_data);
3604 kvfree(iter->batch);
3607 static const struct bpf_iter_seq_info unix_seq_info = {
3608 .seq_ops = &bpf_iter_unix_seq_ops,
3609 .init_seq_private = bpf_iter_init_unix,
3610 .fini_seq_private = bpf_iter_fini_unix,
3611 .seq_priv_size = sizeof(struct bpf_unix_iter_state),
3614 static const struct bpf_func_proto *
3615 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3616 const struct bpf_prog *prog)
3619 case BPF_FUNC_setsockopt:
3620 return &bpf_sk_setsockopt_proto;
3621 case BPF_FUNC_getsockopt:
3622 return &bpf_sk_getsockopt_proto;
3628 static struct bpf_iter_reg unix_reg_info = {
3630 .ctx_arg_info_size = 1,
3632 { offsetof(struct bpf_iter__unix, unix_sk),
3633 PTR_TO_BTF_ID_OR_NULL },
3635 .get_func_proto = bpf_iter_unix_get_func_proto,
3636 .seq_info = &unix_seq_info,
3639 static void __init bpf_iter_register(void)
3641 unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3642 if (bpf_iter_reg_target(&unix_reg_info))
3643 pr_warn("Warning: could not register bpf iterator unix\n");
3647 static int __init af_unix_init(void)
3651 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3653 for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3654 spin_lock_init(&bsd_socket_locks[i]);
3655 INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3658 rc = proto_register(&unix_dgram_proto, 1);
3660 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3664 rc = proto_register(&unix_stream_proto, 1);
3666 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3667 proto_unregister(&unix_dgram_proto);
3671 sock_register(&unix_family_ops);
3672 register_pernet_subsys(&unix_net_ops);
3673 unix_bpf_build_proto();
3675 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3676 bpf_iter_register();
3683 static void __exit af_unix_exit(void)
3685 sock_unregister(PF_UNIX);
3686 proto_unregister(&unix_dgram_proto);
3687 proto_unregister(&unix_stream_proto);
3688 unregister_pernet_subsys(&unix_net_ops);
3691 /* Earlier than device_initcall() so that other drivers invoking
3692 request_module() don't end up in a loop when modprobe tries
3693 to use a UNIX socket. But later than subsys_initcall() because
3694 we depend on stuff initialised there */
3695 fs_initcall(af_unix_init);
3696 module_exit(af_unix_exit);
3698 MODULE_LICENSE("GPL");
3699 MODULE_ALIAS_NETPROTO(PF_UNIX);