1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
9 * Based on linux/ipv4/udp.c
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
20 #include <linux/bpf-cgroup.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/in6.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/ipv6.h>
30 #include <linux/icmpv6.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
33 #include <linux/skbuff.h>
34 #include <linux/slab.h>
35 #include <linux/uaccess.h>
36 #include <linux/indirect_call_wrapper.h>
38 #include <net/addrconf.h>
39 #include <net/ndisc.h>
40 #include <net/protocol.h>
41 #include <net/transp_v6.h>
42 #include <net/ip6_route.h>
45 #include <net/tcp_states.h>
46 #include <net/ip6_checksum.h>
47 #include <net/ip6_tunnel.h>
48 #include <trace/events/udp.h>
50 #include <net/inet_hashtables.h>
51 #include <net/inet6_hashtables.h>
52 #include <net/busy_poll.h>
53 #include <net/sock_reuseport.h>
56 #include <linux/proc_fs.h>
57 #include <linux/seq_file.h>
58 #include <trace/events/skb.h>
61 static void udpv6_destruct_sock(struct sock *sk)
63 udp_destruct_common(sk);
64 inet6_sock_destruct(sk);
67 int udpv6_init_sock(struct sock *sk)
69 udp_lib_init_sock(sk);
70 sk->sk_destruct = udpv6_destruct_sock;
71 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
75 INDIRECT_CALLABLE_SCOPE
76 u32 udp6_ehashfn(const struct net *net,
77 const struct in6_addr *laddr,
79 const struct in6_addr *faddr,
82 static u32 udp6_ehash_secret __read_mostly;
83 static u32 udp_ipv6_hash_secret __read_mostly;
87 net_get_random_once(&udp6_ehash_secret,
88 sizeof(udp6_ehash_secret));
89 net_get_random_once(&udp_ipv6_hash_secret,
90 sizeof(udp_ipv6_hash_secret));
92 lhash = (__force u32)laddr->s6_addr32[3];
93 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
95 return __inet6_ehashfn(lhash, lport, fhash, fport,
96 udp6_ehash_secret + net_hash_mix(net));
99 int udp_v6_get_port(struct sock *sk, unsigned short snum)
101 unsigned int hash2_nulladdr =
102 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
103 unsigned int hash2_partial =
104 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
106 /* precompute partial secondary hash */
107 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
108 return udp_lib_get_port(sk, snum, hash2_nulladdr);
111 void udp_v6_rehash(struct sock *sk)
113 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
114 &sk->sk_v6_rcv_saddr,
115 inet_sk(sk)->inet_num);
117 udp_lib_rehash(sk, new_hash);
120 static int compute_score(struct sock *sk, struct net *net,
121 const struct in6_addr *saddr, __be16 sport,
122 const struct in6_addr *daddr, unsigned short hnum,
125 int bound_dev_if, score;
126 struct inet_sock *inet;
129 if (!net_eq(sock_net(sk), net) ||
130 udp_sk(sk)->udp_port_hash != hnum ||
131 sk->sk_family != PF_INET6)
134 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
140 if (inet->inet_dport) {
141 if (inet->inet_dport != sport)
146 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
147 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
152 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
153 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
159 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
165 /* called with rcu_read_lock() */
166 static struct sock *udp6_lib_lookup2(struct net *net,
167 const struct in6_addr *saddr, __be16 sport,
168 const struct in6_addr *daddr, unsigned int hnum,
169 int dif, int sdif, struct udp_hslot *hslot2,
172 struct sock *sk, *result;
177 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
178 score = compute_score(sk, net, saddr, sport,
179 daddr, hnum, dif, sdif);
180 if (score > badness) {
183 if (sk->sk_state == TCP_ESTABLISHED) {
188 result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
189 saddr, sport, daddr, hnum, udp6_ehashfn);
195 /* Fall back to scoring if group has connections */
196 if (!reuseport_has_conns(sk))
199 /* Reuseport logic returned an error, keep original score. */
203 badness = compute_score(sk, net, saddr, sport,
204 daddr, hnum, dif, sdif);
210 /* rcu_read_lock() must be held */
211 struct sock *__udp6_lib_lookup(struct net *net,
212 const struct in6_addr *saddr, __be16 sport,
213 const struct in6_addr *daddr, __be16 dport,
214 int dif, int sdif, struct udp_table *udptable,
217 unsigned short hnum = ntohs(dport);
218 unsigned int hash2, slot2;
219 struct udp_hslot *hslot2;
220 struct sock *result, *sk;
222 hash2 = ipv6_portaddr_hash(net, daddr, hnum);
223 slot2 = hash2 & udptable->mask;
224 hslot2 = &udptable->hash2[slot2];
226 /* Lookup connected or non-wildcard sockets */
227 result = udp6_lib_lookup2(net, saddr, sport,
228 daddr, hnum, dif, sdif,
230 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
233 /* Lookup redirect from BPF */
234 if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
235 udptable == net->ipv4.udp_table) {
236 sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
237 saddr, sport, daddr, hnum, dif,
245 /* Got non-wildcard socket or error on first lookup */
249 /* Lookup wildcard sockets */
250 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
251 slot2 = hash2 & udptable->mask;
252 hslot2 = &udptable->hash2[slot2];
254 result = udp6_lib_lookup2(net, saddr, sport,
255 &in6addr_any, hnum, dif, sdif,
262 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
264 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
265 __be16 sport, __be16 dport,
266 struct udp_table *udptable)
268 const struct ipv6hdr *iph = ipv6_hdr(skb);
270 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
271 &iph->daddr, dport, inet6_iif(skb),
272 inet6_sdif(skb), udptable, skb);
275 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
276 __be16 sport, __be16 dport)
278 const struct ipv6hdr *iph = ipv6_hdr(skb);
279 struct net *net = dev_net(skb->dev);
282 inet6_get_iif_sdif(skb, &iif, &sdif);
284 return __udp6_lib_lookup(net, &iph->saddr, sport,
285 &iph->daddr, dport, iif,
286 sdif, net->ipv4.udp_table, NULL);
289 /* Must be called under rcu_read_lock().
290 * Does increment socket refcount.
292 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
293 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
294 const struct in6_addr *daddr, __be16 dport, int dif)
298 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
299 dif, 0, net->ipv4.udp_table, NULL);
300 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
304 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
307 /* do not use the scratch area len for jumbogram: their length execeeds the
308 * scratch area space; note that the IP6CB flags is still in the first
309 * cacheline, so checking for jumbograms is cheap
311 static int udp6_skb_len(struct sk_buff *skb)
313 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
317 * This should be easy, if there is something there we
318 * return it, otherwise we block.
321 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
322 int flags, int *addr_len)
324 struct ipv6_pinfo *np = inet6_sk(sk);
325 struct inet_sock *inet = inet_sk(sk);
327 unsigned int ulen, copied;
328 int off, err, peeking = flags & MSG_PEEK;
329 int is_udplite = IS_UDPLITE(sk);
330 struct udp_mib __percpu *mib;
331 bool checksum_valid = false;
334 if (flags & MSG_ERRQUEUE)
335 return ipv6_recv_error(sk, msg, len, addr_len);
337 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
338 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
341 off = sk_peek_offset(sk, flags);
342 skb = __skb_recv_udp(sk, flags, &off, &err);
346 ulen = udp6_skb_len(skb);
348 if (copied > ulen - off)
350 else if (copied < ulen)
351 msg->msg_flags |= MSG_TRUNC;
353 is_udp4 = (skb->protocol == htons(ETH_P_IP));
354 mib = __UDPX_MIB(sk, is_udp4);
357 * If checksum is needed at all, try to do it while copying the
358 * data. If the data is truncated, or if we only want a partial
359 * coverage checksum (UDP-Lite), do it before the copy.
362 if (copied < ulen || peeking ||
363 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
364 checksum_valid = udp_skb_csum_unnecessary(skb) ||
365 !__udp_lib_checksum_complete(skb);
370 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
371 if (udp_skb_is_linear(skb))
372 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
374 err = skb_copy_datagram_msg(skb, off, msg, copied);
376 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
382 atomic_inc(&sk->sk_drops);
383 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
389 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
391 sock_recv_cmsgs(msg, sk, skb);
393 /* Copy the address. */
395 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
396 sin6->sin6_family = AF_INET6;
397 sin6->sin6_port = udp_hdr(skb)->source;
398 sin6->sin6_flowinfo = 0;
401 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
403 sin6->sin6_scope_id = 0;
405 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
406 sin6->sin6_scope_id =
407 ipv6_iface_scope_id(&sin6->sin6_addr,
410 *addr_len = sizeof(*sin6);
412 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
413 (struct sockaddr *)sin6);
416 if (udp_sk(sk)->gro_enabled)
417 udp_cmsg_recv(msg, sk, skb);
420 ip6_datagram_recv_common_ctl(sk, msg, skb);
423 if (inet_cmsg_flags(inet))
424 ip_cmsg_recv_offset(msg, sk, skb,
425 sizeof(struct udphdr), off);
428 ip6_datagram_recv_specific_ctl(sk, msg, skb);
432 if (flags & MSG_TRUNC)
435 skb_consume_udp(sk, skb, peeking ? -err : err);
439 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
440 udp_skb_destructor)) {
441 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
442 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
446 /* starting over for a new packet, but check if we need to yield */
448 msg->msg_flags &= ~MSG_TRUNC;
452 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
453 void udpv6_encap_enable(void)
455 static_branch_inc(&udpv6_encap_needed_key);
457 EXPORT_SYMBOL(udpv6_encap_enable);
459 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
460 * through error handlers in encapsulations looking for a match.
462 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
463 struct inet6_skb_parm *opt,
464 u8 type, u8 code, int offset, __be32 info)
468 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
469 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
470 u8 type, u8 code, int offset, __be32 info);
471 const struct ip6_tnl_encap_ops *encap;
473 encap = rcu_dereference(ip6tun_encaps[i]);
476 handler = encap->err_handler;
477 if (handler && !handler(skb, opt, type, code, offset, info))
484 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
485 * reversing source and destination port: this will match tunnels that force the
486 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
487 * lwtunnels might actually break this assumption by being configured with
488 * different destination ports on endpoints, in this case we won't be able to
489 * trace ICMP messages back to them.
491 * If this doesn't match any socket, probe tunnels with arbitrary destination
492 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
493 * we've sent packets to won't necessarily match the local destination port.
495 * Then ask the tunnel implementation to match the error against a valid
498 * Return an error if we can't find a match, the socket if we need further
499 * processing, zero otherwise.
501 static struct sock *__udp6_lib_err_encap(struct net *net,
502 const struct ipv6hdr *hdr, int offset,
504 struct udp_table *udptable,
507 struct inet6_skb_parm *opt,
508 u8 type, u8 code, __be32 info)
510 int (*lookup)(struct sock *sk, struct sk_buff *skb);
511 int network_offset, transport_offset;
514 network_offset = skb_network_offset(skb);
515 transport_offset = skb_transport_offset(skb);
517 /* Network header needs to point to the outer IPv6 header inside ICMP */
518 skb_reset_network_header(skb);
520 /* Transport header needs to point to the UDP header */
521 skb_set_transport_header(skb, offset);
526 lookup = READ_ONCE(up->encap_err_lookup);
527 if (lookup && lookup(sk, skb))
533 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
534 &hdr->saddr, uh->dest,
535 inet6_iif(skb), 0, udptable, skb);
539 lookup = READ_ONCE(up->encap_err_lookup);
540 if (!lookup || lookup(sk, skb))
546 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
550 skb_set_transport_header(skb, transport_offset);
551 skb_set_network_header(skb, network_offset);
556 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
557 u8 type, u8 code, int offset, __be32 info,
558 struct udp_table *udptable)
560 struct ipv6_pinfo *np;
561 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
562 const struct in6_addr *saddr = &hdr->saddr;
563 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
564 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
569 struct net *net = dev_net(skb->dev);
571 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
572 inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
574 if (!sk || udp_sk(sk)->encap_type) {
575 /* No socket for error: try tunnels before discarding */
576 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
577 sk = __udp6_lib_err_encap(net, hdr, offset, uh,
579 opt, type, code, info);
583 sk = ERR_PTR(-ENOENT);
586 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
594 harderr = icmpv6_err_convert(type, code, &err);
597 if (type == ICMPV6_PKT_TOOBIG) {
598 if (!ip6_sk_accept_pmtu(sk))
600 ip6_sk_update_pmtu(skb, sk, info);
601 if (np->pmtudisc != IPV6_PMTUDISC_DONT)
604 if (type == NDISC_REDIRECT) {
606 ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
607 READ_ONCE(sk->sk_mark), sk->sk_uid);
609 ip6_sk_redirect(skb, sk);
614 /* Tunnels don't have an application socket: don't pass errors back */
616 if (udp_sk(sk)->encap_err_rcv)
617 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
618 ntohl(info), (u8 *)(uh+1));
623 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
626 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
635 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
639 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
640 sock_rps_save_rxhash(sk, skb);
641 sk_mark_napi_id(sk, skb);
642 sk_incoming_cpu_update(sk);
644 sk_mark_napi_id_once(sk, skb);
647 rc = __udp_enqueue_schedule_skb(sk, skb);
649 int is_udplite = IS_UDPLITE(sk);
650 enum skb_drop_reason drop_reason;
652 /* Note that an ENOMEM error is charged twice */
654 UDP6_INC_STATS(sock_net(sk),
655 UDP_MIB_RCVBUFERRORS, is_udplite);
656 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
658 UDP6_INC_STATS(sock_net(sk),
659 UDP_MIB_MEMERRORS, is_udplite);
660 drop_reason = SKB_DROP_REASON_PROTO_MEM;
662 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
663 kfree_skb_reason(skb, drop_reason);
664 trace_udp_fail_queue_rcv_skb(rc, sk);
671 static __inline__ int udpv6_err(struct sk_buff *skb,
672 struct inet6_skb_parm *opt, u8 type,
673 u8 code, int offset, __be32 info)
675 return __udp6_lib_err(skb, opt, type, code, offset, info,
676 dev_net(skb->dev)->ipv4.udp_table);
679 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
681 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
682 struct udp_sock *up = udp_sk(sk);
683 int is_udplite = IS_UDPLITE(sk);
685 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
686 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
691 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
692 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
695 * This is an encapsulation socket so pass the skb to
696 * the socket's udp_encap_rcv() hook. Otherwise, just
697 * fall through and pass this up the UDP socket.
698 * up->encap_rcv() returns the following value:
699 * =0 if skb was successfully passed to the encap
700 * handler or was discarded by it.
701 * >0 if skb should be passed on to UDP.
702 * <0 if skb should be resubmitted as proto -N
705 /* if we're overly short, let UDP handle it */
706 encap_rcv = READ_ONCE(up->encap_rcv);
710 /* Verify checksum before giving to encap */
711 if (udp_lib_checksum_complete(skb))
714 ret = encap_rcv(sk, skb);
716 __UDP6_INC_STATS(sock_net(sk),
723 /* FALLTHROUGH -- it's a UDP Packet */
727 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
729 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
731 if (up->pcrlen == 0) { /* full coverage was set */
732 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
733 UDP_SKB_CB(skb)->cscov, skb->len);
736 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
737 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
738 UDP_SKB_CB(skb)->cscov, up->pcrlen);
743 prefetch(&sk->sk_rmem_alloc);
744 if (rcu_access_pointer(sk->sk_filter) &&
745 udp_lib_checksum_complete(skb))
748 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
749 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
753 udp_csum_pull_header(skb);
757 return __udpv6_queue_rcv_skb(sk, skb);
760 drop_reason = SKB_DROP_REASON_UDP_CSUM;
761 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
763 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
764 atomic_inc(&sk->sk_drops);
765 kfree_skb_reason(skb, drop_reason);
769 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
771 struct sk_buff *next, *segs;
774 if (likely(!udp_unexpected_gso(sk, skb)))
775 return udpv6_queue_rcv_one_skb(sk, skb);
777 __skb_push(skb, -skb_mac_offset(skb));
778 segs = udp_rcv_segment(sk, skb, false);
779 skb_list_walk_safe(segs, skb, next) {
780 __skb_pull(skb, skb_transport_offset(skb));
782 udp_post_segment_fix_csum(skb);
783 ret = udpv6_queue_rcv_one_skb(sk, skb);
785 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
791 static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
792 __be16 loc_port, const struct in6_addr *loc_addr,
793 __be16 rmt_port, const struct in6_addr *rmt_addr,
794 int dif, int sdif, unsigned short hnum)
796 const struct inet_sock *inet = inet_sk(sk);
798 if (!net_eq(sock_net(sk), net))
801 if (udp_sk(sk)->udp_port_hash != hnum ||
802 sk->sk_family != PF_INET6 ||
803 (inet->inet_dport && inet->inet_dport != rmt_port) ||
804 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
805 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
806 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
807 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
808 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
810 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
815 static void udp6_csum_zero_error(struct sk_buff *skb)
817 /* RFC 2460 section 8.1 says that we SHOULD log
818 * this error. Well, it is reasonable.
820 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
821 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
822 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
826 * Note: called only from the BH handler context,
827 * so we don't need to lock the hashes.
829 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
830 const struct in6_addr *saddr, const struct in6_addr *daddr,
831 struct udp_table *udptable, int proto)
833 struct sock *sk, *first = NULL;
834 const struct udphdr *uh = udp_hdr(skb);
835 unsigned short hnum = ntohs(uh->dest);
836 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
837 unsigned int offset = offsetof(typeof(*sk), sk_node);
838 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
839 int dif = inet6_iif(skb);
840 int sdif = inet6_sdif(skb);
841 struct hlist_node *node;
842 struct sk_buff *nskb;
845 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
847 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
849 hslot = &udptable->hash2[hash2];
850 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
853 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
854 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
855 uh->source, saddr, dif, sdif,
858 /* If zero checksum and no_check is not on for
859 * the socket then skip it.
861 if (!uh->check && !udp_sk(sk)->no_check6_rx)
867 nskb = skb_clone(skb, GFP_ATOMIC);
868 if (unlikely(!nskb)) {
869 atomic_inc(&sk->sk_drops);
870 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
872 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
877 if (udpv6_queue_rcv_skb(sk, nskb) > 0)
881 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
882 if (use_hash2 && hash2 != hash2_any) {
888 if (udpv6_queue_rcv_skb(first, skb) > 0)
892 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
893 proto == IPPROTO_UDPLITE);
898 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
900 if (udp_sk_rx_dst_set(sk, dst)) {
901 const struct rt6_info *rt = (const struct rt6_info *)dst;
903 sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
907 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
908 * return code conversion for ip layer consumption
910 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
915 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
916 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
918 ret = udpv6_queue_rcv_skb(sk, skb);
920 /* a return value > 0 means to resubmit the input */
926 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
929 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
930 const struct in6_addr *saddr, *daddr;
931 struct net *net = dev_net(skb->dev);
937 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
940 saddr = &ipv6_hdr(skb)->saddr;
941 daddr = &ipv6_hdr(skb)->daddr;
944 ulen = ntohs(uh->len);
948 if (proto == IPPROTO_UDP) {
949 /* UDP validates ulen. */
951 /* Check for jumbo payload */
955 if (ulen < sizeof(*uh))
958 if (ulen < skb->len) {
959 if (pskb_trim_rcsum(skb, ulen))
961 saddr = &ipv6_hdr(skb)->saddr;
962 daddr = &ipv6_hdr(skb)->daddr;
967 if (udp6_csum_init(skb, uh, proto))
970 /* Check if the socket is already available, e.g. due to early demux */
971 sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
972 &refcounted, udp6_ehashfn);
977 struct dst_entry *dst = skb_dst(skb);
980 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
981 udp6_sk_rx_dst_set(sk, dst);
983 if (!uh->check && !udp_sk(sk)->no_check6_rx) {
986 goto report_csum_error;
989 ret = udp6_unicast_rcv_skb(sk, skb, uh);
996 * Multicast receive code
998 if (ipv6_addr_is_multicast(daddr))
999 return __udp6_lib_mcast_deliver(net, skb,
1000 saddr, daddr, udptable, proto);
1003 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1005 if (!uh->check && !udp_sk(sk)->no_check6_rx)
1006 goto report_csum_error;
1007 return udp6_unicast_rcv_skb(sk, skb, uh);
1010 reason = SKB_DROP_REASON_NO_SOCKET;
1013 goto report_csum_error;
1015 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1019 if (udp_lib_checksum_complete(skb))
1022 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1023 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1025 kfree_skb_reason(skb, reason);
1029 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1030 reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1031 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1032 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1033 saddr, ntohs(uh->source),
1035 daddr, ntohs(uh->dest));
1039 udp6_csum_zero_error(skb);
1041 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1042 reason = SKB_DROP_REASON_UDP_CSUM;
1043 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1045 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1046 kfree_skb_reason(skb, reason);
1051 static struct sock *__udp6_lib_demux_lookup(struct net *net,
1052 __be16 loc_port, const struct in6_addr *loc_addr,
1053 __be16 rmt_port, const struct in6_addr *rmt_addr,
1056 struct udp_table *udptable = net->ipv4.udp_table;
1057 unsigned short hnum = ntohs(loc_port);
1058 unsigned int hash2, slot2;
1059 struct udp_hslot *hslot2;
1063 hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1064 slot2 = hash2 & udptable->mask;
1065 hslot2 = &udptable->hash2[slot2];
1066 ports = INET_COMBINED_PORTS(rmt_port, hnum);
1068 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1069 if (sk->sk_state == TCP_ESTABLISHED &&
1070 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1072 /* Only check first socket in chain */
1078 void udp_v6_early_demux(struct sk_buff *skb)
1080 struct net *net = dev_net(skb->dev);
1081 const struct udphdr *uh;
1083 struct dst_entry *dst;
1084 int dif = skb->dev->ifindex;
1085 int sdif = inet6_sdif(skb);
1087 if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1088 sizeof(struct udphdr)))
1093 if (skb->pkt_type == PACKET_HOST)
1094 sk = __udp6_lib_demux_lookup(net, uh->dest,
1095 &ipv6_hdr(skb)->daddr,
1096 uh->source, &ipv6_hdr(skb)->saddr,
1101 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1105 skb->destructor = sock_efree;
1106 dst = rcu_dereference(sk->sk_rx_dst);
1109 dst = dst_check(dst, sk->sk_rx_dst_cookie);
1111 /* set noref for now.
1112 * any place which wants to hold dst has to call
1115 skb_dst_set_noref(skb, dst);
1119 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1121 return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1125 * Throw away all pending data and cancel the corking. Socket is locked.
1127 static void udp_v6_flush_pending_frames(struct sock *sk)
1129 struct udp_sock *up = udp_sk(sk);
1131 if (up->pending == AF_INET)
1132 udp_flush_pending_frames(sk);
1133 else if (up->pending) {
1136 ip6_flush_pending_frames(sk);
1140 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1143 if (addr_len < offsetofend(struct sockaddr, sa_family))
1145 /* The following checks are replicated from __ip6_datagram_connect()
1146 * and intended to prevent BPF program called below from accessing
1147 * bytes that are out of the bound specified by user in addr_len.
1149 if (uaddr->sa_family == AF_INET) {
1150 if (ipv6_only_sock(sk))
1151 return -EAFNOSUPPORT;
1152 return udp_pre_connect(sk, uaddr, addr_len);
1155 if (addr_len < SIN6_LEN_RFC2133)
1158 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
1162 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1163 * @sk: socket we are sending on
1164 * @skb: sk_buff containing the filled-in UDP header
1165 * (checksum field must be zeroed out)
1166 * @saddr: source address
1167 * @daddr: destination address
1168 * @len: length of packet
1170 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1171 const struct in6_addr *saddr,
1172 const struct in6_addr *daddr, int len)
1174 unsigned int offset;
1175 struct udphdr *uh = udp_hdr(skb);
1176 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1180 /* Only one fragment on the socket. */
1181 skb->csum_start = skb_transport_header(skb) - skb->head;
1182 skb->csum_offset = offsetof(struct udphdr, check);
1183 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1186 * HW-checksum won't work as there are two or more
1187 * fragments on the socket so that all csums of sk_buffs
1188 * should be together
1190 offset = skb_transport_offset(skb);
1191 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1194 skb->ip_summed = CHECKSUM_NONE;
1197 csum = csum_add(csum, frags->csum);
1198 } while ((frags = frags->next));
1200 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1203 uh->check = CSUM_MANGLED_0;
1211 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1212 struct inet_cork *cork)
1214 struct sock *sk = skb->sk;
1217 int is_udplite = IS_UDPLITE(sk);
1219 int offset = skb_transport_offset(skb);
1220 int len = skb->len - offset;
1221 int datalen = len - sizeof(*uh);
1224 * Create a UDP header
1227 uh->source = fl6->fl6_sport;
1228 uh->dest = fl6->fl6_dport;
1229 uh->len = htons(len);
1232 if (cork->gso_size) {
1233 const int hlen = skb_network_header_len(skb) +
1234 sizeof(struct udphdr);
1236 if (hlen + cork->gso_size > cork->fragsize) {
1240 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1244 if (udp_sk(sk)->no_check6_tx) {
1248 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1249 dst_xfrm(skb_dst(skb))) {
1254 if (datalen > cork->gso_size) {
1255 skb_shinfo(skb)->gso_size = cork->gso_size;
1256 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1257 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1264 csum = udplite_csum(skb);
1265 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
1266 skb->ip_summed = CHECKSUM_NONE;
1268 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1270 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1273 csum = udp_csum(skb);
1275 /* add protocol-dependent pseudo-header */
1276 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1277 len, fl6->flowi6_proto, csum);
1279 uh->check = CSUM_MANGLED_0;
1282 err = ip6_send_skb(skb);
1284 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1285 UDP6_INC_STATS(sock_net(sk),
1286 UDP_MIB_SNDBUFERRORS, is_udplite);
1290 UDP6_INC_STATS(sock_net(sk),
1291 UDP_MIB_OUTDATAGRAMS, is_udplite);
1296 static int udp_v6_push_pending_frames(struct sock *sk)
1298 struct sk_buff *skb;
1299 struct udp_sock *up = udp_sk(sk);
1302 if (up->pending == AF_INET)
1303 return udp_push_pending_frames(sk);
1305 skb = ip6_finish_skb(sk);
1309 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1310 &inet_sk(sk)->cork.base);
1317 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1319 struct ipv6_txoptions opt_space;
1320 struct udp_sock *up = udp_sk(sk);
1321 struct inet_sock *inet = inet_sk(sk);
1322 struct ipv6_pinfo *np = inet6_sk(sk);
1323 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1324 struct in6_addr *daddr, *final_p, final;
1325 struct ipv6_txoptions *opt = NULL;
1326 struct ipv6_txoptions *opt_to_free = NULL;
1327 struct ip6_flowlabel *flowlabel = NULL;
1328 struct inet_cork_full cork;
1329 struct flowi6 *fl6 = &cork.fl.u.ip6;
1330 struct dst_entry *dst;
1331 struct ipcm6_cookie ipc6;
1332 int addr_len = msg->msg_namelen;
1333 bool connected = false;
1335 int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
1337 int is_udplite = IS_UDPLITE(sk);
1338 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1341 ipc6.gso_size = READ_ONCE(up->gso_size);
1342 ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
1343 ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
1345 /* destination address check */
1347 if (addr_len < offsetof(struct sockaddr, sa_data))
1350 switch (sin6->sin6_family) {
1352 if (addr_len < SIN6_LEN_RFC2133)
1354 daddr = &sin6->sin6_addr;
1355 if (ipv6_addr_any(daddr) &&
1356 ipv6_addr_v4mapped(&np->saddr))
1357 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1361 goto do_udp_sendmsg;
1363 msg->msg_name = sin6 = NULL;
1364 msg->msg_namelen = addr_len = 0;
1370 } else if (!up->pending) {
1371 if (sk->sk_state != TCP_ESTABLISHED)
1372 return -EDESTADDRREQ;
1373 daddr = &sk->sk_v6_daddr;
1378 if (ipv6_addr_v4mapped(daddr)) {
1379 struct sockaddr_in sin;
1380 sin.sin_family = AF_INET;
1381 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1382 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1383 msg->msg_name = &sin;
1384 msg->msg_namelen = sizeof(sin);
1386 err = ipv6_only_sock(sk) ?
1387 -ENETUNREACH : udp_sendmsg(sk, msg, len);
1388 msg->msg_name = sin6;
1389 msg->msg_namelen = addr_len;
1394 /* Rough check on arithmetic overflow,
1395 better check is made in ip6_append_data().
1397 if (len > INT_MAX - sizeof(struct udphdr))
1400 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1402 if (up->pending == AF_INET)
1403 return udp_sendmsg(sk, msg, len);
1405 * There are pending frames.
1406 * The socket lock must be held while it's corked.
1409 if (likely(up->pending)) {
1410 if (unlikely(up->pending != AF_INET6)) {
1412 return -EAFNOSUPPORT;
1415 goto do_append_data;
1419 ulen += sizeof(struct udphdr);
1421 memset(fl6, 0, sizeof(*fl6));
1424 if (sin6->sin6_port == 0)
1427 fl6->fl6_dport = sin6->sin6_port;
1428 daddr = &sin6->sin6_addr;
1431 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1432 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1433 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1434 if (IS_ERR(flowlabel))
1440 * Otherwise it will be difficult to maintain
1443 if (sk->sk_state == TCP_ESTABLISHED &&
1444 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1445 daddr = &sk->sk_v6_daddr;
1447 if (addr_len >= sizeof(struct sockaddr_in6) &&
1448 sin6->sin6_scope_id &&
1449 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1450 fl6->flowi6_oif = sin6->sin6_scope_id;
1452 if (sk->sk_state != TCP_ESTABLISHED)
1453 return -EDESTADDRREQ;
1455 fl6->fl6_dport = inet->inet_dport;
1456 daddr = &sk->sk_v6_daddr;
1457 fl6->flowlabel = np->flow_label;
1461 if (!fl6->flowi6_oif)
1462 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1464 if (!fl6->flowi6_oif)
1465 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1467 fl6->flowi6_uid = sk->sk_uid;
1469 if (msg->msg_controllen) {
1471 memset(opt, 0, sizeof(struct ipv6_txoptions));
1472 opt->tot_len = sizeof(*opt);
1475 err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1477 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1480 fl6_sock_release(flowlabel);
1483 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1484 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1485 if (IS_ERR(flowlabel))
1488 if (!(opt->opt_nflen|opt->opt_flen))
1493 opt = txopt_get(np);
1497 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1498 opt = ipv6_fixup_options(&opt_space, opt);
1501 fl6->flowi6_proto = sk->sk_protocol;
1502 fl6->flowi6_mark = ipc6.sockc.mark;
1503 fl6->daddr = *daddr;
1504 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1505 fl6->saddr = np->saddr;
1506 fl6->fl6_sport = inet->inet_sport;
1508 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1509 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1510 (struct sockaddr *)sin6,
1515 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1516 /* BPF program rewrote IPv6-only by IPv4-mapped
1517 * IPv6. It's currently unsupported.
1522 if (sin6->sin6_port == 0) {
1523 /* BPF program set invalid port. Reject it. */
1527 fl6->fl6_dport = sin6->sin6_port;
1528 fl6->daddr = sin6->sin6_addr;
1532 if (ipv6_addr_any(&fl6->daddr))
1533 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1535 final_p = fl6_update_dst(fl6, opt, &final);
1539 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1540 fl6->flowi6_oif = np->mcast_oif;
1542 } else if (!fl6->flowi6_oif)
1543 fl6->flowi6_oif = np->ucast_oif;
1545 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1547 if (ipc6.tclass < 0)
1548 ipc6.tclass = np->tclass;
1550 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1552 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1559 if (ipc6.hlimit < 0)
1560 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1562 if (msg->msg_flags&MSG_CONFIRM)
1566 /* Lockless fast path for the non-corking case */
1568 struct sk_buff *skb;
1570 skb = ip6_make_skb(sk, getfrag, msg, ulen,
1571 sizeof(struct udphdr), &ipc6,
1572 (struct rt6_info *)dst,
1573 msg->msg_flags, &cork);
1575 if (!IS_ERR_OR_NULL(skb))
1576 err = udp_v6_send_skb(skb, fl6, &cork.base);
1577 /* ip6_make_skb steals dst reference */
1582 if (unlikely(up->pending)) {
1583 /* The socket is already corked while preparing it. */
1584 /* ... which is an evident application bug. --ANK */
1587 net_dbg_ratelimited("udp cork app bug 2\n");
1592 up->pending = AF_INET6;
1595 if (ipc6.dontfrag < 0)
1596 ipc6.dontfrag = np->dontfrag;
1598 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1599 &ipc6, fl6, (struct rt6_info *)dst,
1600 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1602 udp_v6_flush_pending_frames(sk);
1604 err = udp_v6_push_pending_frames(sk);
1605 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1609 err = np->recverr ? net_xmit_errno(err) : 0;
1615 fl6_sock_release(flowlabel);
1616 txopt_put(opt_to_free);
1620 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1621 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1622 * we don't have a good statistic (IpOutDiscards but it can be too many
1623 * things). We could add another new stat but at least for now that
1624 * seems like overkill.
1626 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1627 UDP6_INC_STATS(sock_net(sk),
1628 UDP_MIB_SNDBUFERRORS, is_udplite);
1633 if (msg->msg_flags & MSG_PROBE)
1634 dst_confirm_neigh(dst, &fl6->daddr);
1635 if (!(msg->msg_flags&MSG_PROBE) || len)
1636 goto back_from_confirm;
1640 EXPORT_SYMBOL(udpv6_sendmsg);
1642 static void udpv6_splice_eof(struct socket *sock)
1644 struct sock *sk = sock->sk;
1645 struct udp_sock *up = udp_sk(sk);
1647 if (!up->pending || READ_ONCE(up->corkflag))
1651 if (up->pending && !READ_ONCE(up->corkflag))
1652 udp_v6_push_pending_frames(sk);
1656 void udpv6_destroy_sock(struct sock *sk)
1658 struct udp_sock *up = udp_sk(sk);
1661 /* protects from races with udp_abort() */
1662 sock_set_flag(sk, SOCK_DEAD);
1663 udp_v6_flush_pending_frames(sk);
1666 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1667 if (up->encap_type) {
1668 void (*encap_destroy)(struct sock *sk);
1669 encap_destroy = READ_ONCE(up->encap_destroy);
1673 if (up->encap_enabled) {
1674 static_branch_dec(&udpv6_encap_needed_key);
1675 udp_encap_disable();
1681 * Socket option code for UDP
1683 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1684 unsigned int optlen)
1686 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET)
1687 return udp_lib_setsockopt(sk, level, optname,
1689 udp_v6_push_pending_frames);
1690 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1693 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1694 char __user *optval, int __user *optlen)
1696 if (level == SOL_UDP || level == SOL_UDPLITE)
1697 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1698 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1701 static const struct inet6_protocol udpv6_protocol = {
1702 .handler = udpv6_rcv,
1703 .err_handler = udpv6_err,
1704 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1707 /* ------------------------------------------------------------------------ */
1708 #ifdef CONFIG_PROC_FS
1709 int udp6_seq_show(struct seq_file *seq, void *v)
1711 if (v == SEQ_START_TOKEN) {
1712 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1714 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1715 const struct inet_sock *inet = inet_sk((const struct sock *)v);
1716 __u16 srcp = ntohs(inet->inet_sport);
1717 __u16 destp = ntohs(inet->inet_dport);
1718 __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1719 udp_rqueue_get(v), bucket);
1724 const struct seq_operations udp6_seq_ops = {
1725 .start = udp_seq_start,
1726 .next = udp_seq_next,
1727 .stop = udp_seq_stop,
1728 .show = udp6_seq_show,
1730 EXPORT_SYMBOL(udp6_seq_ops);
1732 static struct udp_seq_afinfo udp6_seq_afinfo = {
1737 int __net_init udp6_proc_init(struct net *net)
1739 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1740 sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1745 void udp6_proc_exit(struct net *net)
1747 remove_proc_entry("udp6", net->proc_net);
1749 #endif /* CONFIG_PROC_FS */
1751 /* ------------------------------------------------------------------------ */
1753 struct proto udpv6_prot = {
1755 .owner = THIS_MODULE,
1756 .close = udp_lib_close,
1757 .pre_connect = udpv6_pre_connect,
1758 .connect = ip6_datagram_connect,
1759 .disconnect = udp_disconnect,
1761 .init = udpv6_init_sock,
1762 .destroy = udpv6_destroy_sock,
1763 .setsockopt = udpv6_setsockopt,
1764 .getsockopt = udpv6_getsockopt,
1765 .sendmsg = udpv6_sendmsg,
1766 .recvmsg = udpv6_recvmsg,
1767 .splice_eof = udpv6_splice_eof,
1768 .release_cb = ip6_datagram_release_cb,
1769 .hash = udp_lib_hash,
1770 .unhash = udp_lib_unhash,
1771 .rehash = udp_v6_rehash,
1772 .get_port = udp_v6_get_port,
1773 .put_port = udp_lib_unhash,
1774 #ifdef CONFIG_BPF_SYSCALL
1775 .psock_update_sk_prot = udp_bpf_update_proto,
1778 .memory_allocated = &udp_memory_allocated,
1779 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
1781 .sysctl_mem = sysctl_udp_mem,
1782 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1783 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1784 .obj_size = sizeof(struct udp6_sock),
1785 .ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1786 .h.udp_table = NULL,
1787 .diag_destroy = udp_abort,
1790 static struct inet_protosw udpv6_protosw = {
1792 .protocol = IPPROTO_UDP,
1793 .prot = &udpv6_prot,
1794 .ops = &inet6_dgram_ops,
1795 .flags = INET_PROTOSW_PERMANENT,
1798 int __init udpv6_init(void)
1802 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1806 ret = inet6_register_protosw(&udpv6_protosw);
1808 goto out_udpv6_protocol;
1813 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1817 void udpv6_exit(void)
1819 inet6_unregister_protosw(&udpv6_protosw);
1820 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);