1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * PF_INET protocol family socket handler.
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Florian La Roche, <flla@stud.uni-sb.de>
12 * Alan Cox, <A.Cox@swansea.ac.uk>
14 * Changes (see also sock.c)
17 * Karl Knutson : Socket protocol table
18 * A.N.Kuznetsov : Socket death error in accept().
19 * John Richardson : Fix non blocking error in connect()
20 * so sockets that fail to connect
21 * don't return -EINPROGRESS.
22 * Alan Cox : Asynchronous I/O support
23 * Alan Cox : Keep correct socket pointer on sock
26 * Alan Cox : Semantics of SO_LINGER aren't state
27 * moved to close when you look carefully.
28 * With this fixed and the accept bug fixed
29 * some RPC stuff seems happier.
30 * Niibe Yutaka : 4.4BSD style write async I/O
32 * Tony Gale : Fixed reuse semantics.
33 * Alan Cox : bind() shouldn't abort existing but dead
34 * sockets. Stops FTP netin:.. I hope.
35 * Alan Cox : bind() works correctly for RAW sockets.
36 * Note that FreeBSD at least was broken
37 * in this respect so be careful with
38 * compatibility tests...
39 * Alan Cox : routing cache support
40 * Alan Cox : memzero the socket structure for
42 * Matt Day : nonblock connect error handler
43 * Alan Cox : Allow large numbers of pending sockets
44 * (eg for big web sites), but only if
45 * specifically application requested.
46 * Alan Cox : New buffering throughout IP. Used
48 * Alan Cox : New buffering now used smartly.
49 * Alan Cox : BSD rather than common sense
50 * interpretation of listen.
51 * Germano Caronni : Assorted small races.
52 * Alan Cox : sendmsg/recvmsg basic support.
53 * Alan Cox : Only sendmsg/recvmsg now supported.
54 * Alan Cox : Locked down bind (see security list).
55 * Alan Cox : Loosened bind a little.
56 * Mike McLagan : ADD/DEL DLCI Ioctls
57 * Willy Konynenberg : Transparent proxying support.
58 * David S. Miller : New socket lookup architecture.
59 * Some other random speedups.
60 * Cyrus Durgin : Cleaned up file for kmod hacks.
61 * Andi Kleen : Fix inet_stream_connect TCP race.
64 #define pr_fmt(fmt) "IPv4: " fmt
66 #include <linux/err.h>
67 #include <linux/errno.h>
68 #include <linux/types.h>
69 #include <linux/socket.h>
71 #include <linux/kernel.h>
72 #include <linux/kmod.h>
73 #include <linux/sched.h>
74 #include <linux/timer.h>
75 #include <linux/string.h>
76 #include <linux/sockios.h>
77 #include <linux/net.h>
78 #include <linux/capability.h>
79 #include <linux/fcntl.h>
81 #include <linux/interrupt.h>
82 #include <linux/stat.h>
83 #include <linux/init.h>
84 #include <linux/poll.h>
85 #include <linux/netfilter_ipv4.h>
86 #include <linux/random.h>
87 #include <linux/slab.h>
89 #include <linux/uaccess.h>
91 #include <linux/inet.h>
92 #include <linux/igmp.h>
93 #include <linux/inetdevice.h>
94 #include <linux/netdevice.h>
95 #include <net/checksum.h>
97 #include <net/protocol.h>
99 #include <net/route.h>
100 #include <net/ip_fib.h>
101 #include <net/inet_connection_sock.h>
105 #include <net/udplite.h>
106 #include <net/ping.h>
107 #include <linux/skbuff.h>
108 #include <net/sock.h>
110 #include <net/icmp.h>
111 #include <net/inet_common.h>
112 #include <net/ip_tunnels.h>
113 #include <net/xfrm.h>
114 #include <net/net_namespace.h>
115 #include <net/secure_seq.h>
116 #ifdef CONFIG_IP_MROUTE
117 #include <linux/mroute.h>
119 #include <net/l3mdev.h>
120 #include <net/compat.h>
122 #include <trace/events/sock.h>
124 /* The inetsw table contains everything that inet_create needs to
125 * build a new socket.
127 static struct list_head inetsw[SOCK_MAX];
128 static DEFINE_SPINLOCK(inetsw_lock);
130 /* New destruction routine */
132 void inet_sock_destruct(struct sock *sk)
134 struct inet_sock *inet = inet_sk(sk);
136 __skb_queue_purge(&sk->sk_receive_queue);
137 __skb_queue_purge(&sk->sk_error_queue);
139 sk_mem_reclaim_final(sk);
141 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
142 pr_err("Attempt to release TCP socket in state %d %p\n",
146 if (!sock_flag(sk, SOCK_DEAD)) {
147 pr_err("Attempt to release alive inet socket %p\n", sk);
151 WARN_ON_ONCE(atomic_read(&sk->sk_rmem_alloc));
152 WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
153 WARN_ON_ONCE(sk->sk_wmem_queued);
154 WARN_ON_ONCE(sk_forward_alloc_get(sk));
156 kfree(rcu_dereference_protected(inet->inet_opt, 1));
157 dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
158 dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
160 EXPORT_SYMBOL(inet_sock_destruct);
163 * The routines beyond this point handle the behaviour of an AF_INET
164 * socket object. Mostly it punts to the subprotocols of IP to do
169 * Automatically bind an unbound socket.
172 static int inet_autobind(struct sock *sk)
174 struct inet_sock *inet;
175 /* We may need to bind the socket. */
178 if (!inet->inet_num) {
179 if (sk->sk_prot->get_port(sk, 0)) {
183 inet->inet_sport = htons(inet->inet_num);
190 * Move a socket into listening state.
192 int inet_listen(struct socket *sock, int backlog)
194 struct sock *sk = sock->sk;
195 unsigned char old_state;
196 int err, tcp_fastopen;
201 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
204 old_state = sk->sk_state;
205 if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
208 WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
209 /* Really, if the socket is already in listen state
210 * we can only allow the backlog to be adjusted.
212 if (old_state != TCP_LISTEN) {
213 /* Enable TFO w/o requiring TCP_FASTOPEN socket option.
214 * Note that only TCP sockets (SOCK_STREAM) will reach here.
215 * Also fastopen backlog may already been set via the option
216 * because the socket was in TCP_LISTEN state previously but
217 * was shutdown() rather than close().
219 tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
220 if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
221 (tcp_fastopen & TFO_SERVER_ENABLE) &&
222 !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
223 fastopen_queue_tune(sk, backlog);
224 tcp_fastopen_init_key_once(sock_net(sk));
227 err = inet_csk_listen_start(sk);
230 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
238 EXPORT_SYMBOL(inet_listen);
241 * Create an inet socket.
244 static int inet_create(struct net *net, struct socket *sock, int protocol,
248 struct inet_protosw *answer;
249 struct inet_sock *inet;
250 struct proto *answer_prot;
251 unsigned char answer_flags;
252 int try_loading_module = 0;
255 if (protocol < 0 || protocol >= IPPROTO_MAX)
258 sock->state = SS_UNCONNECTED;
260 /* Look for the requested type/protocol pair. */
262 err = -ESOCKTNOSUPPORT;
264 list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
267 /* Check the non-wild match. */
268 if (protocol == answer->protocol) {
269 if (protocol != IPPROTO_IP)
272 /* Check for the two wild cases. */
273 if (IPPROTO_IP == protocol) {
274 protocol = answer->protocol;
277 if (IPPROTO_IP == answer->protocol)
280 err = -EPROTONOSUPPORT;
284 if (try_loading_module < 2) {
287 * Be more specific, e.g. net-pf-2-proto-132-type-1
288 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
290 if (++try_loading_module == 1)
291 request_module("net-pf-%d-proto-%d-type-%d",
292 PF_INET, protocol, sock->type);
294 * Fall back to generic, e.g. net-pf-2-proto-132
295 * (net-pf-PF_INET-proto-IPPROTO_SCTP)
298 request_module("net-pf-%d-proto-%d",
300 goto lookup_protocol;
306 if (sock->type == SOCK_RAW && !kern &&
307 !ns_capable(net->user_ns, CAP_NET_RAW))
310 sock->ops = answer->ops;
311 answer_prot = answer->prot;
312 answer_flags = answer->flags;
315 WARN_ON(!answer_prot->slab);
318 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
323 if (INET_PROTOSW_REUSE & answer_flags)
324 sk->sk_reuse = SK_CAN_REUSE;
327 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
331 if (SOCK_RAW == sock->type) {
332 inet->inet_num = protocol;
333 if (IPPROTO_RAW == protocol)
337 if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc))
338 inet->pmtudisc = IP_PMTUDISC_DONT;
340 inet->pmtudisc = IP_PMTUDISC_WANT;
344 sock_init_data(sock, sk);
346 sk->sk_destruct = inet_sock_destruct;
347 sk->sk_protocol = protocol;
348 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
349 sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
356 inet->mc_list = NULL;
359 if (inet->inet_num) {
360 /* It assumes that any protocol which allows
361 * the user to assign a number at socket
362 * creation time automatically
365 inet->inet_sport = htons(inet->inet_num);
366 /* Add to protocol hash chains. */
367 err = sk->sk_prot->hash(sk);
369 sk_common_release(sk);
374 if (sk->sk_prot->init) {
375 err = sk->sk_prot->init(sk);
377 sk_common_release(sk);
383 err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
385 sk_common_release(sk);
398 * The peer socket should always be NULL (or else). When we call this
399 * function we are destroying the object and from then on nobody
400 * should refer to it.
402 int inet_release(struct socket *sock)
404 struct sock *sk = sock->sk;
409 if (!sk->sk_kern_sock)
410 BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk);
412 /* Applications forget to leave groups before exiting */
413 ip_mc_drop_socket(sk);
415 /* If linger is set, we don't return until the close
416 * is complete. Otherwise we return immediately. The
417 * actually closing is done the same either way.
419 * If the close is due to the process exiting, we never
423 if (sock_flag(sk, SOCK_LINGER) &&
424 !(current->flags & PF_EXITING))
425 timeout = sk->sk_lingertime;
426 sk->sk_prot->close(sk, timeout);
431 EXPORT_SYMBOL(inet_release);
433 int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
435 struct sock *sk = sock->sk;
436 u32 flags = BIND_WITH_LOCK;
439 /* If the socket has its own bind function then use it. (RAW) */
440 if (sk->sk_prot->bind) {
441 return sk->sk_prot->bind(sk, uaddr, addr_len);
443 if (addr_len < sizeof(struct sockaddr_in))
446 /* BPF prog is run before any checks are done so that if the prog
447 * changes context in a wrong way it will be caught.
449 err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr,
450 CGROUP_INET4_BIND, &flags);
454 return __inet_bind(sk, uaddr, addr_len, flags);
456 EXPORT_SYMBOL(inet_bind);
458 int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
461 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
462 struct inet_sock *inet = inet_sk(sk);
463 struct net *net = sock_net(sk);
466 u32 tb_id = RT_TABLE_LOCAL;
469 if (addr->sin_family != AF_INET) {
470 /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
471 * only if s_addr is INADDR_ANY.
474 if (addr->sin_family != AF_UNSPEC ||
475 addr->sin_addr.s_addr != htonl(INADDR_ANY))
479 tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
480 chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
482 /* Not specified by any standard per-se, however it breaks too
483 * many applications when removed. It is unfortunate since
484 * allowing applications to make a non-local bind solves
485 * several problems with systems using dynamic addressing.
486 * (ie. your servers still start up even if your ISDN link
487 * is temporarily down)
489 err = -EADDRNOTAVAIL;
490 if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr,
494 snum = ntohs(addr->sin_port);
496 if (!(flags & BIND_NO_CAP_NET_BIND_SERVICE) &&
497 snum && inet_port_requires_bind_service(net, snum) &&
498 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
501 /* We keep a pair of addresses. rcv_saddr is the one
502 * used by hash lookups, and saddr is used for transmit.
504 * In the BSD API these are the same except where it
505 * would be illegal to use them (multicast/broadcast) in
506 * which case the sending device address is used.
508 if (flags & BIND_WITH_LOCK)
511 /* Check these errors (active socket, double bind). */
513 if (sk->sk_state != TCP_CLOSE || inet->inet_num)
514 goto out_release_sock;
516 inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
517 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
518 inet->inet_saddr = 0; /* Use device */
520 /* Make sure we are allowed to bind here. */
521 if (snum || !(inet->bind_address_no_port ||
522 (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
523 err = sk->sk_prot->get_port(sk, snum);
525 inet->inet_saddr = inet->inet_rcv_saddr = 0;
526 goto out_release_sock;
528 if (!(flags & BIND_FROM_BPF)) {
529 err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
531 inet->inet_saddr = inet->inet_rcv_saddr = 0;
532 if (sk->sk_prot->put_port)
533 sk->sk_prot->put_port(sk);
534 goto out_release_sock;
539 if (inet->inet_rcv_saddr)
540 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
542 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
543 inet->inet_sport = htons(inet->inet_num);
544 inet->inet_daddr = 0;
545 inet->inet_dport = 0;
549 if (flags & BIND_WITH_LOCK)
555 int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
556 int addr_len, int flags)
558 struct sock *sk = sock->sk;
559 const struct proto *prot;
562 if (addr_len < sizeof(uaddr->sa_family))
565 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
566 prot = READ_ONCE(sk->sk_prot);
568 if (uaddr->sa_family == AF_UNSPEC)
569 return prot->disconnect(sk, flags);
571 if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
572 err = prot->pre_connect(sk, uaddr, addr_len);
577 if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
579 return prot->connect(sk, uaddr, addr_len);
581 EXPORT_SYMBOL(inet_dgram_connect);
583 static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
585 DEFINE_WAIT_FUNC(wait, woken_wake_function);
587 add_wait_queue(sk_sleep(sk), &wait);
588 sk->sk_write_pending += writebias;
589 sk->sk_wait_pending++;
591 /* Basic assumption: if someone sets sk->sk_err, he _must_
592 * change state of the socket from TCP_SYN_*.
593 * Connect() does not allow to get error notifications
594 * without closing the socket.
596 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
598 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
600 if (signal_pending(current) || !timeo)
603 remove_wait_queue(sk_sleep(sk), &wait);
604 sk->sk_write_pending -= writebias;
605 sk->sk_wait_pending--;
610 * Connect to a remote host. There is regrettably still a little
611 * TCP 'magic' in here.
613 int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
614 int addr_len, int flags, int is_sendmsg)
616 struct sock *sk = sock->sk;
621 * uaddr can be NULL and addr_len can be 0 if:
622 * sk is a TCP fastopen active socket and
623 * TCP_FASTOPEN_CONNECT sockopt is set and
624 * we already have a valid cookie for this socket.
625 * In this case, user can call write() after connect().
626 * write() will invoke tcp_sendmsg_fastopen() which calls
627 * __inet_stream_connect().
630 if (addr_len < sizeof(uaddr->sa_family))
633 if (uaddr->sa_family == AF_UNSPEC) {
634 err = sk->sk_prot->disconnect(sk, flags);
635 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
640 switch (sock->state) {
648 if (inet_sk(sk)->defer_connect)
649 err = is_sendmsg ? -EINPROGRESS : -EISCONN;
652 /* Fall out of switch with err, set for this state */
656 if (sk->sk_state != TCP_CLOSE)
659 if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
660 err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
665 err = sk->sk_prot->connect(sk, uaddr, addr_len);
669 sock->state = SS_CONNECTING;
671 if (!err && inet_sk(sk)->defer_connect)
674 /* Just entered SS_CONNECTING state; the only
675 * difference is that return value in non-blocking
676 * case is EINPROGRESS, rather than EALREADY.
682 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
684 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
685 int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
686 tcp_sk(sk)->fastopen_req &&
687 tcp_sk(sk)->fastopen_req->data ? 1 : 0;
689 /* Error code is set above */
690 if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
693 err = sock_intr_errno(timeo);
694 if (signal_pending(current))
698 /* Connection was closed by RST, timeout, ICMP error
699 * or another process disconnected us.
701 if (sk->sk_state == TCP_CLOSE)
704 /* sk->sk_err may be not zero now, if RECVERR was ordered by user
705 * and error was received after socket entered established state.
706 * Hence, it is handled normally after connect() return successfully.
709 sock->state = SS_CONNECTED;
715 err = sock_error(sk) ? : -ECONNABORTED;
716 sock->state = SS_UNCONNECTED;
717 if (sk->sk_prot->disconnect(sk, flags))
718 sock->state = SS_DISCONNECTING;
721 EXPORT_SYMBOL(__inet_stream_connect);
723 int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
724 int addr_len, int flags)
729 err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
730 release_sock(sock->sk);
733 EXPORT_SYMBOL(inet_stream_connect);
735 void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk)
737 sock_rps_record_flow(newsk);
738 WARN_ON(!((1 << newsk->sk_state) &
739 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
740 TCPF_CLOSE_WAIT | TCPF_CLOSE)));
742 if (test_bit(SOCK_SUPPORT_ZC, &sock->flags))
743 set_bit(SOCK_SUPPORT_ZC, &newsock->flags);
744 sock_graft(newsk, newsock);
746 newsock->state = SS_CONNECTED;
750 * Accept a pending connection. The TCP layer now gives BSD semantics.
753 int inet_accept(struct socket *sock, struct socket *newsock, int flags,
756 struct sock *sk1 = sock->sk, *sk2;
759 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
760 sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, flags, &err, kern);
765 __inet_accept(sock, newsock, sk2);
769 EXPORT_SYMBOL(inet_accept);
772 * This does both peername and sockname.
774 int inet_getname(struct socket *sock, struct sockaddr *uaddr,
777 struct sock *sk = sock->sk;
778 struct inet_sock *inet = inet_sk(sk);
779 DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
781 sin->sin_family = AF_INET;
784 if (!inet->inet_dport ||
785 (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
790 sin->sin_port = inet->inet_dport;
791 sin->sin_addr.s_addr = inet->inet_daddr;
792 BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin,
793 CGROUP_INET4_GETPEERNAME);
795 __be32 addr = inet->inet_rcv_saddr;
797 addr = inet->inet_saddr;
798 sin->sin_port = inet->inet_sport;
799 sin->sin_addr.s_addr = addr;
800 BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin,
801 CGROUP_INET4_GETSOCKNAME);
804 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
807 EXPORT_SYMBOL(inet_getname);
809 int inet_send_prepare(struct sock *sk)
811 sock_rps_record_flow(sk);
813 /* We may need to bind the socket. */
814 if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
820 EXPORT_SYMBOL_GPL(inet_send_prepare);
822 int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
824 struct sock *sk = sock->sk;
826 if (unlikely(inet_send_prepare(sk)))
829 return INDIRECT_CALL_2(sk->sk_prot->sendmsg, tcp_sendmsg, udp_sendmsg,
832 EXPORT_SYMBOL(inet_sendmsg);
834 void inet_splice_eof(struct socket *sock)
836 const struct proto *prot;
837 struct sock *sk = sock->sk;
839 if (unlikely(inet_send_prepare(sk)))
842 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
843 prot = READ_ONCE(sk->sk_prot);
844 if (prot->splice_eof)
845 prot->splice_eof(sock);
847 EXPORT_SYMBOL_GPL(inet_splice_eof);
849 ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
850 size_t size, int flags)
852 struct sock *sk = sock->sk;
853 const struct proto *prot;
855 if (unlikely(inet_send_prepare(sk)))
858 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
859 prot = READ_ONCE(sk->sk_prot);
861 return prot->sendpage(sk, page, offset, size, flags);
862 return sock_no_sendpage(sock, page, offset, size, flags);
864 EXPORT_SYMBOL(inet_sendpage);
866 INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *, struct msghdr *,
867 size_t, int, int *));
868 int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
871 struct sock *sk = sock->sk;
875 if (likely(!(flags & MSG_ERRQUEUE)))
876 sock_rps_record_flow(sk);
878 err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udp_recvmsg,
879 sk, msg, size, flags, &addr_len);
881 msg->msg_namelen = addr_len;
884 EXPORT_SYMBOL(inet_recvmsg);
886 int inet_shutdown(struct socket *sock, int how)
888 struct sock *sk = sock->sk;
891 /* This should really check to make sure
892 * the socket is a TCP socket. (WHY AC...)
894 how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
897 if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */
901 if (sock->state == SS_CONNECTING) {
902 if ((1 << sk->sk_state) &
903 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
904 sock->state = SS_DISCONNECTING;
906 sock->state = SS_CONNECTED;
909 switch (sk->sk_state) {
912 /* Hack to wake up other listeners, who can poll for
913 EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
916 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how);
917 if (sk->sk_prot->shutdown)
918 sk->sk_prot->shutdown(sk, how);
921 /* Remaining two branches are temporary solution for missing
922 * close() in multithreaded environment. It is _not_ a good idea,
923 * but we have no choice until close() is repaired at VFS level.
926 if (!(how & RCV_SHUTDOWN))
930 err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
931 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
935 /* Wake up anyone sleeping in poll. */
936 sk->sk_state_change(sk);
940 EXPORT_SYMBOL(inet_shutdown);
943 * ioctl() calls you can issue on an INET socket. Most of these are
944 * device configuration and stuff and very rarely used. Some ioctls
945 * pass on to the socket itself.
947 * NOTE: I like the idea of a module for the config stuff. ie ifconfig
948 * loads the devconfigure module does its configuring and unloads it.
949 * There's a good 20K of config code hanging around the kernel.
952 int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
954 struct sock *sk = sock->sk;
956 struct net *net = sock_net(sk);
957 void __user *p = (void __user *)arg;
964 if (copy_from_user(&rt, p, sizeof(struct rtentry)))
966 err = ip_rt_ioctl(net, cmd, &rt);
974 err = arp_ioctl(net, cmd, (void __user *)arg);
981 if (get_user_ifreq(&ifr, NULL, p))
983 err = devinet_ioctl(net, cmd, &ifr);
984 if (!err && put_user_ifreq(&ifr, p))
994 if (get_user_ifreq(&ifr, NULL, p))
996 err = devinet_ioctl(net, cmd, &ifr);
999 if (sk->sk_prot->ioctl)
1000 err = sk->sk_prot->ioctl(sk, cmd, arg);
1007 EXPORT_SYMBOL(inet_ioctl);
1009 #ifdef CONFIG_COMPAT
1010 static int inet_compat_routing_ioctl(struct sock *sk, unsigned int cmd,
1011 struct compat_rtentry __user *ur)
1013 compat_uptr_t rtdev;
1016 if (copy_from_user(&rt.rt_dst, &ur->rt_dst,
1017 3 * sizeof(struct sockaddr)) ||
1018 get_user(rt.rt_flags, &ur->rt_flags) ||
1019 get_user(rt.rt_metric, &ur->rt_metric) ||
1020 get_user(rt.rt_mtu, &ur->rt_mtu) ||
1021 get_user(rt.rt_window, &ur->rt_window) ||
1022 get_user(rt.rt_irtt, &ur->rt_irtt) ||
1023 get_user(rtdev, &ur->rt_dev))
1026 rt.rt_dev = compat_ptr(rtdev);
1027 return ip_rt_ioctl(sock_net(sk), cmd, &rt);
1030 static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1032 void __user *argp = compat_ptr(arg);
1033 struct sock *sk = sock->sk;
1038 return inet_compat_routing_ioctl(sk, cmd, argp);
1040 if (!sk->sk_prot->compat_ioctl)
1041 return -ENOIOCTLCMD;
1042 return sk->sk_prot->compat_ioctl(sk, cmd, arg);
1045 #endif /* CONFIG_COMPAT */
1047 const struct proto_ops inet_stream_ops = {
1049 .owner = THIS_MODULE,
1050 .release = inet_release,
1052 .connect = inet_stream_connect,
1053 .socketpair = sock_no_socketpair,
1054 .accept = inet_accept,
1055 .getname = inet_getname,
1057 .ioctl = inet_ioctl,
1058 .gettstamp = sock_gettstamp,
1059 .listen = inet_listen,
1060 .shutdown = inet_shutdown,
1061 .setsockopt = sock_common_setsockopt,
1062 .getsockopt = sock_common_getsockopt,
1063 .sendmsg = inet_sendmsg,
1064 .recvmsg = inet_recvmsg,
1068 .splice_eof = inet_splice_eof,
1069 .sendpage = inet_sendpage,
1070 .splice_read = tcp_splice_read,
1071 .read_sock = tcp_read_sock,
1072 .read_skb = tcp_read_skb,
1073 .sendmsg_locked = tcp_sendmsg_locked,
1074 .sendpage_locked = tcp_sendpage_locked,
1075 .peek_len = tcp_peek_len,
1076 #ifdef CONFIG_COMPAT
1077 .compat_ioctl = inet_compat_ioctl,
1079 .set_rcvlowat = tcp_set_rcvlowat,
1081 EXPORT_SYMBOL(inet_stream_ops);
1083 const struct proto_ops inet_dgram_ops = {
1085 .owner = THIS_MODULE,
1086 .release = inet_release,
1088 .connect = inet_dgram_connect,
1089 .socketpair = sock_no_socketpair,
1090 .accept = sock_no_accept,
1091 .getname = inet_getname,
1093 .ioctl = inet_ioctl,
1094 .gettstamp = sock_gettstamp,
1095 .listen = sock_no_listen,
1096 .shutdown = inet_shutdown,
1097 .setsockopt = sock_common_setsockopt,
1098 .getsockopt = sock_common_getsockopt,
1099 .sendmsg = inet_sendmsg,
1100 .read_skb = udp_read_skb,
1101 .recvmsg = inet_recvmsg,
1102 .mmap = sock_no_mmap,
1103 .splice_eof = inet_splice_eof,
1104 .sendpage = inet_sendpage,
1105 .set_peek_off = sk_set_peek_off,
1106 #ifdef CONFIG_COMPAT
1107 .compat_ioctl = inet_compat_ioctl,
1110 EXPORT_SYMBOL(inet_dgram_ops);
1113 * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
1116 static const struct proto_ops inet_sockraw_ops = {
1118 .owner = THIS_MODULE,
1119 .release = inet_release,
1121 .connect = inet_dgram_connect,
1122 .socketpair = sock_no_socketpair,
1123 .accept = sock_no_accept,
1124 .getname = inet_getname,
1125 .poll = datagram_poll,
1126 .ioctl = inet_ioctl,
1127 .gettstamp = sock_gettstamp,
1128 .listen = sock_no_listen,
1129 .shutdown = inet_shutdown,
1130 .setsockopt = sock_common_setsockopt,
1131 .getsockopt = sock_common_getsockopt,
1132 .sendmsg = inet_sendmsg,
1133 .recvmsg = inet_recvmsg,
1134 .mmap = sock_no_mmap,
1135 .splice_eof = inet_splice_eof,
1136 .sendpage = inet_sendpage,
1137 #ifdef CONFIG_COMPAT
1138 .compat_ioctl = inet_compat_ioctl,
1142 static const struct net_proto_family inet_family_ops = {
1144 .create = inet_create,
1145 .owner = THIS_MODULE,
1148 /* Upon startup we insert all the elements in inetsw_array[] into
1149 * the linked list inetsw.
1151 static struct inet_protosw inetsw_array[] =
1154 .type = SOCK_STREAM,
1155 .protocol = IPPROTO_TCP,
1157 .ops = &inet_stream_ops,
1158 .flags = INET_PROTOSW_PERMANENT |
1164 .protocol = IPPROTO_UDP,
1166 .ops = &inet_dgram_ops,
1167 .flags = INET_PROTOSW_PERMANENT,
1172 .protocol = IPPROTO_ICMP,
1174 .ops = &inet_sockraw_ops,
1175 .flags = INET_PROTOSW_REUSE,
1180 .protocol = IPPROTO_IP, /* wild card */
1182 .ops = &inet_sockraw_ops,
1183 .flags = INET_PROTOSW_REUSE,
1187 #define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
1189 void inet_register_protosw(struct inet_protosw *p)
1191 struct list_head *lh;
1192 struct inet_protosw *answer;
1193 int protocol = p->protocol;
1194 struct list_head *last_perm;
1196 spin_lock_bh(&inetsw_lock);
1198 if (p->type >= SOCK_MAX)
1201 /* If we are trying to override a permanent protocol, bail. */
1202 last_perm = &inetsw[p->type];
1203 list_for_each(lh, &inetsw[p->type]) {
1204 answer = list_entry(lh, struct inet_protosw, list);
1205 /* Check only the non-wild match. */
1206 if ((INET_PROTOSW_PERMANENT & answer->flags) == 0)
1208 if (protocol == answer->protocol)
1213 /* Add the new entry after the last permanent entry if any, so that
1214 * the new entry does not override a permanent entry when matched with
1215 * a wild-card protocol. But it is allowed to override any existing
1216 * non-permanent entry. This means that when we remove this entry, the
1217 * system automatically returns to the old behavior.
1219 list_add_rcu(&p->list, last_perm);
1221 spin_unlock_bh(&inetsw_lock);
1226 pr_err("Attempt to override permanent protocol %d\n", protocol);
1230 pr_err("Ignoring attempt to register invalid socket type %d\n",
1234 EXPORT_SYMBOL(inet_register_protosw);
1236 void inet_unregister_protosw(struct inet_protosw *p)
1238 if (INET_PROTOSW_PERMANENT & p->flags) {
1239 pr_err("Attempt to unregister permanent protocol %d\n",
1242 spin_lock_bh(&inetsw_lock);
1243 list_del_rcu(&p->list);
1244 spin_unlock_bh(&inetsw_lock);
1249 EXPORT_SYMBOL(inet_unregister_protosw);
1251 static int inet_sk_reselect_saddr(struct sock *sk)
1253 struct inet_sock *inet = inet_sk(sk);
1254 __be32 old_saddr = inet->inet_saddr;
1255 __be32 daddr = inet->inet_daddr;
1259 struct ip_options_rcu *inet_opt;
1262 inet_opt = rcu_dereference_protected(inet->inet_opt,
1263 lockdep_sock_is_held(sk));
1264 if (inet_opt && inet_opt->opt.srr)
1265 daddr = inet_opt->opt.faddr;
1267 /* Query new route. */
1268 fl4 = &inet->cork.fl.u.ip4;
1269 rt = ip_route_connect(fl4, daddr, 0, sk->sk_bound_dev_if,
1270 sk->sk_protocol, inet->inet_sport,
1271 inet->inet_dport, sk);
1275 new_saddr = fl4->saddr;
1277 if (new_saddr == old_saddr) {
1278 sk_setup_caps(sk, &rt->dst);
1282 err = inet_bhash2_update_saddr(sk, &new_saddr, AF_INET);
1288 sk_setup_caps(sk, &rt->dst);
1290 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) {
1291 pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1292 __func__, &old_saddr, &new_saddr);
1296 * XXX The only one ugly spot where we need to
1297 * XXX really change the sockets identity after
1298 * XXX it has entered the hashes. -DaveM
1300 * Besides that, it does not check for connection
1301 * uniqueness. Wait for troubles.
1303 return __sk_prot_rehash(sk);
1306 int inet_sk_rebuild_header(struct sock *sk)
1308 struct inet_sock *inet = inet_sk(sk);
1309 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1311 struct ip_options_rcu *inet_opt;
1315 /* Route is OK, nothing to do. */
1321 inet_opt = rcu_dereference(inet->inet_opt);
1322 daddr = inet->inet_daddr;
1323 if (inet_opt && inet_opt->opt.srr)
1324 daddr = inet_opt->opt.faddr;
1326 fl4 = &inet->cork.fl.u.ip4;
1327 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
1328 inet->inet_dport, inet->inet_sport,
1329 sk->sk_protocol, RT_CONN_FLAGS(sk),
1330 sk->sk_bound_dev_if);
1333 sk_setup_caps(sk, &rt->dst);
1337 /* Routing failed... */
1338 sk->sk_route_caps = 0;
1340 * Other protocols have to map its equivalent state to TCP_SYN_SENT.
1341 * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
1343 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) ||
1344 sk->sk_state != TCP_SYN_SENT ||
1345 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1346 (err = inet_sk_reselect_saddr(sk)) != 0)
1347 WRITE_ONCE(sk->sk_err_soft, -err);
1352 EXPORT_SYMBOL(inet_sk_rebuild_header);
1354 void inet_sk_set_state(struct sock *sk, int state)
1356 trace_inet_sock_set_state(sk, sk->sk_state, state);
1357 sk->sk_state = state;
1359 EXPORT_SYMBOL(inet_sk_set_state);
1361 void inet_sk_state_store(struct sock *sk, int newstate)
1363 trace_inet_sock_set_state(sk, sk->sk_state, newstate);
1364 smp_store_release(&sk->sk_state, newstate);
1367 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1368 netdev_features_t features)
1370 bool udpfrag = false, fixedid = false, gso_partial, encap;
1371 struct sk_buff *segs = ERR_PTR(-EINVAL);
1372 const struct net_offload *ops;
1373 unsigned int offset = 0;
1380 skb_reset_network_header(skb);
1381 nhoff = skb_network_header(skb) - skb_mac_header(skb);
1382 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1387 if (ihl < sizeof(*iph))
1390 id = ntohs(iph->id);
1391 proto = iph->protocol;
1393 /* Warning: after this point, iph might be no longer valid */
1394 if (unlikely(!pskb_may_pull(skb, ihl)))
1396 __skb_pull(skb, ihl);
1398 encap = SKB_GSO_CB(skb)->encap_level > 0;
1400 features &= skb->dev->hw_enc_features;
1401 SKB_GSO_CB(skb)->encap_level += ihl;
1403 skb_reset_transport_header(skb);
1405 segs = ERR_PTR(-EPROTONOSUPPORT);
1407 if (!skb->encapsulation || encap) {
1408 udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
1409 fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
1411 /* fixed ID is invalid if DF bit is not set */
1412 if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
1416 ops = rcu_dereference(inet_offloads[proto]);
1417 if (likely(ops && ops->callbacks.gso_segment)) {
1418 segs = ops->callbacks.gso_segment(skb, features);
1420 skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
1423 if (IS_ERR_OR_NULL(segs))
1426 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
1430 iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
1432 iph->frag_off = htons(offset >> 3);
1434 iph->frag_off |= htons(IP_MF);
1435 offset += skb->len - nhoff - ihl;
1436 tot_len = skb->len - nhoff;
1437 } else if (skb_is_gso(skb)) {
1439 iph->id = htons(id);
1440 id += skb_shinfo(skb)->gso_segs;
1444 tot_len = skb_shinfo(skb)->gso_size +
1445 SKB_GSO_CB(skb)->data_offset +
1446 skb->head - (unsigned char *)iph;
1448 tot_len = skb->len - nhoff;
1451 iph->id = htons(id++);
1452 tot_len = skb->len - nhoff;
1454 iph->tot_len = htons(tot_len);
1457 skb_reset_inner_headers(skb);
1458 skb->network_header = (u8 *)iph - skb->head;
1459 skb_reset_mac_len(skb);
1460 } while ((skb = skb->next));
1466 static struct sk_buff *ipip_gso_segment(struct sk_buff *skb,
1467 netdev_features_t features)
1469 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
1470 return ERR_PTR(-EINVAL);
1472 return inet_gso_segment(skb, features);
1475 struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
1477 const struct net_offload *ops;
1478 struct sk_buff *pp = NULL;
1479 const struct iphdr *iph;
1487 off = skb_gro_offset(skb);
1488 hlen = off + sizeof(*iph);
1489 iph = skb_gro_header(skb, hlen, off);
1493 proto = iph->protocol;
1495 ops = rcu_dereference(inet_offloads[proto]);
1496 if (!ops || !ops->callbacks.gro_receive)
1499 if (*(u8 *)iph != 0x45)
1502 if (ip_is_fragment(iph))
1505 if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1508 NAPI_GRO_CB(skb)->proto = proto;
1509 id = ntohl(*(__be32 *)&iph->id);
1510 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
1513 list_for_each_entry(p, head, list) {
1517 if (!NAPI_GRO_CB(p)->same_flow)
1520 iph2 = (struct iphdr *)(p->data + off);
1521 /* The above works because, with the exception of the top
1522 * (inner most) layer, we only aggregate pkts with the same
1523 * hdr length so all the hdrs we'll need to verify will start
1524 * at the same offset.
1526 if ((iph->protocol ^ iph2->protocol) |
1527 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1528 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1529 NAPI_GRO_CB(p)->same_flow = 0;
1533 /* All fields must match except length and checksum. */
1534 NAPI_GRO_CB(p)->flush |=
1535 (iph->ttl ^ iph2->ttl) |
1536 (iph->tos ^ iph2->tos) |
1537 ((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
1539 NAPI_GRO_CB(p)->flush |= flush;
1541 /* We need to store of the IP ID check to be included later
1542 * when we can verify that this packet does in fact belong
1545 flush_id = (u16)(id - ntohs(iph2->id));
1547 /* This bit of code makes it much easier for us to identify
1548 * the cases where we are doing atomic vs non-atomic IP ID
1549 * checks. Specifically an atomic check can return IP ID
1550 * values 0 - 0xFFFF, while a non-atomic check can only
1551 * return 0 or 0xFFFF.
1553 if (!NAPI_GRO_CB(p)->is_atomic ||
1554 !(iph->frag_off & htons(IP_DF))) {
1555 flush_id ^= NAPI_GRO_CB(p)->count;
1556 flush_id = flush_id ? 0xFFFF : 0;
1559 /* If the previous IP ID value was based on an atomic
1560 * datagram we can overwrite the value and ignore it.
1562 if (NAPI_GRO_CB(skb)->is_atomic)
1563 NAPI_GRO_CB(p)->flush_id = flush_id;
1565 NAPI_GRO_CB(p)->flush_id |= flush_id;
1568 NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF));
1569 NAPI_GRO_CB(skb)->flush |= flush;
1570 skb_set_network_header(skb, off);
1571 /* The above will be needed by the transport layer if there is one
1572 * immediately following this IP hdr.
1575 /* Note : No need to call skb_gro_postpull_rcsum() here,
1576 * as we already checked checksum over ipv4 header was 0
1578 skb_gro_pull(skb, sizeof(*iph));
1579 skb_set_transport_header(skb, skb_gro_offset(skb));
1581 pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
1582 ops->callbacks.gro_receive, head, skb);
1585 skb_gro_flush_final(skb, pp, flush);
1590 static struct sk_buff *ipip_gro_receive(struct list_head *head,
1591 struct sk_buff *skb)
1593 if (NAPI_GRO_CB(skb)->encap_mark) {
1594 NAPI_GRO_CB(skb)->flush = 1;
1598 NAPI_GRO_CB(skb)->encap_mark = 1;
1600 return inet_gro_receive(head, skb);
1603 #define SECONDS_PER_DAY 86400
1605 /* inet_current_timestamp - Return IP network timestamp
1607 * Return milliseconds since midnight in network byte order.
1609 __be32 inet_current_timestamp(void)
1613 struct timespec64 ts;
1615 ktime_get_real_ts64(&ts);
1617 /* Get secs since midnight. */
1618 (void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
1619 /* Convert to msecs. */
1620 msecs = secs * MSEC_PER_SEC;
1621 /* Convert nsec to msec. */
1622 msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
1624 /* Convert to network byte order. */
1625 return htonl(msecs);
1627 EXPORT_SYMBOL(inet_current_timestamp);
1629 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1631 if (sk->sk_family == AF_INET)
1632 return ip_recv_error(sk, msg, len, addr_len);
1633 #if IS_ENABLED(CONFIG_IPV6)
1634 if (sk->sk_family == AF_INET6)
1635 return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
1640 int inet_gro_complete(struct sk_buff *skb, int nhoff)
1642 struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
1643 const struct net_offload *ops;
1644 __be16 totlen = iph->tot_len;
1645 int proto = iph->protocol;
1648 if (skb->encapsulation) {
1649 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1650 skb_set_inner_network_header(skb, nhoff);
1653 iph_set_totlen(iph, skb->len - nhoff);
1654 csum_replace2(&iph->check, totlen, iph->tot_len);
1656 ops = rcu_dereference(inet_offloads[proto]);
1657 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1660 /* Only need to add sizeof(*iph) to get to the next hdr below
1661 * because any hdr with option will have been flushed in
1662 * inet_gro_receive().
1664 err = INDIRECT_CALL_2(ops->callbacks.gro_complete,
1665 tcp4_gro_complete, udp4_gro_complete,
1666 skb, nhoff + sizeof(*iph));
1672 static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
1674 skb->encapsulation = 1;
1675 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
1676 return inet_gro_complete(skb, nhoff);
1679 int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1680 unsigned short type, unsigned char protocol,
1683 struct socket *sock;
1684 int rc = sock_create_kern(net, family, type, protocol, &sock);
1688 (*sk)->sk_allocation = GFP_ATOMIC;
1689 (*sk)->sk_use_task_frag = false;
1691 * Unhash it so that IP input processing does not even see it,
1692 * we do not wish this socket to see incoming packets.
1694 (*sk)->sk_prot->unhash(*sk);
1698 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1700 unsigned long snmp_fold_field(void __percpu *mib, int offt)
1702 unsigned long res = 0;
1705 for_each_possible_cpu(i)
1706 res += snmp_get_cpu_field(mib, i, offt);
1709 EXPORT_SYMBOL_GPL(snmp_fold_field);
1711 #if BITS_PER_LONG==32
1713 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
1714 size_t syncp_offset)
1717 struct u64_stats_sync *syncp;
1721 bhptr = per_cpu_ptr(mib, cpu);
1722 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1724 start = u64_stats_fetch_begin(syncp);
1725 v = *(((u64 *)bhptr) + offt);
1726 } while (u64_stats_fetch_retry(syncp, start));
1730 EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
1732 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
1737 for_each_possible_cpu(cpu) {
1738 res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
1742 EXPORT_SYMBOL_GPL(snmp_fold_field64);
1745 #ifdef CONFIG_IP_MULTICAST
1746 static const struct net_protocol igmp_protocol = {
1747 .handler = igmp_rcv,
1751 static const struct net_protocol tcp_protocol = {
1752 .handler = tcp_v4_rcv,
1753 .err_handler = tcp_v4_err,
1755 .icmp_strict_tag_validation = 1,
1758 static const struct net_protocol udp_protocol = {
1760 .err_handler = udp_err,
1764 static const struct net_protocol icmp_protocol = {
1765 .handler = icmp_rcv,
1766 .err_handler = icmp_err,
1770 static __net_init int ipv4_mib_init_net(struct net *net)
1774 net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
1775 if (!net->mib.tcp_statistics)
1777 net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
1778 if (!net->mib.ip_statistics)
1781 for_each_possible_cpu(i) {
1782 struct ipstats_mib *af_inet_stats;
1783 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
1784 u64_stats_init(&af_inet_stats->syncp);
1787 net->mib.net_statistics = alloc_percpu(struct linux_mib);
1788 if (!net->mib.net_statistics)
1790 net->mib.udp_statistics = alloc_percpu(struct udp_mib);
1791 if (!net->mib.udp_statistics)
1793 net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
1794 if (!net->mib.udplite_statistics)
1795 goto err_udplite_mib;
1796 net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
1797 if (!net->mib.icmp_statistics)
1799 net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
1801 if (!net->mib.icmpmsg_statistics)
1802 goto err_icmpmsg_mib;
1808 free_percpu(net->mib.icmp_statistics);
1810 free_percpu(net->mib.udplite_statistics);
1812 free_percpu(net->mib.udp_statistics);
1814 free_percpu(net->mib.net_statistics);
1816 free_percpu(net->mib.ip_statistics);
1818 free_percpu(net->mib.tcp_statistics);
1823 static __net_exit void ipv4_mib_exit_net(struct net *net)
1825 kfree(net->mib.icmpmsg_statistics);
1826 free_percpu(net->mib.icmp_statistics);
1827 free_percpu(net->mib.udplite_statistics);
1828 free_percpu(net->mib.udp_statistics);
1829 free_percpu(net->mib.net_statistics);
1830 free_percpu(net->mib.ip_statistics);
1831 free_percpu(net->mib.tcp_statistics);
1833 /* allocated on demand, see mptcp_init_sock() */
1834 free_percpu(net->mib.mptcp_statistics);
1838 static __net_initdata struct pernet_operations ipv4_mib_ops = {
1839 .init = ipv4_mib_init_net,
1840 .exit = ipv4_mib_exit_net,
1843 static int __init init_ipv4_mibs(void)
1845 return register_pernet_subsys(&ipv4_mib_ops);
1848 static __net_init int inet_init_net(struct net *net)
1851 * Set defaults for local port range
1853 seqlock_init(&net->ipv4.ip_local_ports.lock);
1854 net->ipv4.ip_local_ports.range[0] = 32768;
1855 net->ipv4.ip_local_ports.range[1] = 60999;
1857 seqlock_init(&net->ipv4.ping_group_range.lock);
1859 * Sane defaults - nobody may create ping sockets.
1860 * Boot scripts should set this to distro-specific group.
1862 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1863 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1865 /* Default values for sysctl-controlled parameters.
1866 * We set them here, in case sysctl is not compiled.
1868 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1869 net->ipv4.sysctl_ip_fwd_update_priority = 1;
1870 net->ipv4.sysctl_ip_dynaddr = 0;
1871 net->ipv4.sysctl_ip_early_demux = 1;
1872 net->ipv4.sysctl_udp_early_demux = 1;
1873 net->ipv4.sysctl_tcp_early_demux = 1;
1874 net->ipv4.sysctl_nexthop_compat_mode = 1;
1875 #ifdef CONFIG_SYSCTL
1876 net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
1879 /* Some igmp sysctl, whose values are always used */
1880 net->ipv4.sysctl_igmp_max_memberships = 20;
1881 net->ipv4.sysctl_igmp_max_msf = 10;
1882 /* IGMP reports for link-local multicast groups are enabled by default */
1883 net->ipv4.sysctl_igmp_llm_reports = 1;
1884 net->ipv4.sysctl_igmp_qrv = 2;
1886 net->ipv4.sysctl_fib_notify_on_flag_change = 0;
1891 static __net_initdata struct pernet_operations af_inet_ops = {
1892 .init = inet_init_net,
1895 static int __init init_inet_pernet_ops(void)
1897 return register_pernet_subsys(&af_inet_ops);
1900 static int ipv4_proc_init(void);
1903 * IP protocol layer initialiser
1906 static struct packet_offload ip_packet_offload __read_mostly = {
1907 .type = cpu_to_be16(ETH_P_IP),
1909 .gso_segment = inet_gso_segment,
1910 .gro_receive = inet_gro_receive,
1911 .gro_complete = inet_gro_complete,
1915 static const struct net_offload ipip_offload = {
1917 .gso_segment = ipip_gso_segment,
1918 .gro_receive = ipip_gro_receive,
1919 .gro_complete = ipip_gro_complete,
1923 static int __init ipip_offload_init(void)
1925 return inet_add_offload(&ipip_offload, IPPROTO_IPIP);
1928 static int __init ipv4_offload_init(void)
1933 if (udpv4_offload_init() < 0)
1934 pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1935 if (tcpv4_offload_init() < 0)
1936 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
1937 if (ipip_offload_init() < 0)
1938 pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
1940 dev_add_offload(&ip_packet_offload);
1944 fs_initcall(ipv4_offload_init);
1946 static struct packet_type ip_packet_type __read_mostly = {
1947 .type = cpu_to_be16(ETH_P_IP),
1949 .list_func = ip_list_rcv,
1952 static int __init inet_init(void)
1954 struct inet_protosw *q;
1955 struct list_head *r;
1958 sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
1960 raw_hashinfo_init(&raw_v4_hashinfo);
1962 rc = proto_register(&tcp_prot, 1);
1966 rc = proto_register(&udp_prot, 1);
1968 goto out_unregister_tcp_proto;
1970 rc = proto_register(&raw_prot, 1);
1972 goto out_unregister_udp_proto;
1974 rc = proto_register(&ping_prot, 1);
1976 goto out_unregister_raw_proto;
1979 * Tell SOCKET that we are alive...
1982 (void)sock_register(&inet_family_ops);
1984 #ifdef CONFIG_SYSCTL
1985 ip_static_sysctl_init();
1989 * Add all the base protocols.
1992 if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1993 pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1994 if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
1995 pr_crit("%s: Cannot add UDP protocol\n", __func__);
1996 if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
1997 pr_crit("%s: Cannot add TCP protocol\n", __func__);
1998 #ifdef CONFIG_IP_MULTICAST
1999 if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
2000 pr_crit("%s: Cannot add IGMP protocol\n", __func__);
2003 /* Register the socket-side information for inet_create. */
2004 for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
2007 for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
2008 inet_register_protosw(q);
2011 * Set the ARP module up
2017 * Set the IP module up
2022 /* Initialise per-cpu ipv4 mibs */
2023 if (init_ipv4_mibs())
2024 panic("%s: Cannot init ipv4 mibs\n", __func__);
2026 /* Setup TCP slab cache for open requests. */
2029 /* Setup UDP memory threshold */
2032 /* Add UDP-Lite (RFC 3828) */
2033 udplite4_register();
2040 * Set the ICMP layer up
2043 if (icmp_init() < 0)
2044 panic("Failed to create the ICMP control socket.\n");
2047 * Initialise the multicast router
2049 #if defined(CONFIG_IP_MROUTE)
2051 pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
2054 if (init_inet_pernet_ops())
2055 pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
2061 dev_add_pack(&ip_packet_type);
2063 ip_tunnel_core_init();
2068 out_unregister_raw_proto:
2069 proto_unregister(&raw_prot);
2070 out_unregister_udp_proto:
2071 proto_unregister(&udp_prot);
2072 out_unregister_tcp_proto:
2073 proto_unregister(&tcp_prot);
2077 fs_initcall(inet_init);
2079 /* ------------------------------------------------------------------------ */
2081 #ifdef CONFIG_PROC_FS
2082 static int __init ipv4_proc_init(void)
2086 if (raw_proc_init())
2088 if (tcp4_proc_init())
2090 if (udp4_proc_init())
2092 if (ping_proc_init())
2094 if (ip_misc_proc_init())
2111 #else /* CONFIG_PROC_FS */
2112 static int __init ipv4_proc_init(void)
2116 #endif /* CONFIG_PROC_FS */