1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Peer event handling, typically ICMP messages.
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/module.h>
10 #include <linux/skbuff.h>
11 #include <linux/errqueue.h>
12 #include <linux/udp.h>
14 #include <linux/in6.h>
15 #include <linux/icmp.h>
17 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
22 static void rxrpc_adjust_mtu(struct rxrpc_peer *, unsigned int);
23 static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
24 static void rxrpc_distribute_error(struct rxrpc_peer *, int,
25 enum rxrpc_call_completion);
28 * Find the peer associated with an ICMPv4 packet.
30 static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
32 unsigned int udp_offset,
34 struct sockaddr_rxrpc *srx)
36 struct iphdr *ip, *ip0 = ip_hdr(skb);
37 struct icmphdr *icmp = icmp_hdr(skb);
38 struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset);
40 _enter("%u,%u,%u", ip0->protocol, icmp->type, icmp->code);
43 case ICMP_DEST_UNREACH:
44 *info = ntohs(icmp->un.frag.mtu);
46 case ICMP_TIME_EXCEEDED:
47 case ICMP_PARAMETERPROB:
48 ip = (struct iphdr *)((void *)icmp + 8);
54 memset(srx, 0, sizeof(*srx));
55 srx->transport_type = local->srx.transport_type;
56 srx->transport_len = local->srx.transport_len;
57 srx->transport.family = local->srx.transport.family;
59 /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
62 switch (srx->transport.family) {
64 srx->transport_len = sizeof(srx->transport.sin);
65 srx->transport.family = AF_INET;
66 srx->transport.sin.sin_port = udp->dest;
67 memcpy(&srx->transport.sin.sin_addr, &ip->daddr,
68 sizeof(struct in_addr));
71 #ifdef CONFIG_AF_RXRPC_IPV6
73 srx->transport_len = sizeof(srx->transport.sin);
74 srx->transport.family = AF_INET;
75 srx->transport.sin.sin_port = udp->dest;
76 memcpy(&srx->transport.sin.sin_addr, &ip->daddr,
77 sizeof(struct in_addr));
86 _net("ICMP {%pISp}", &srx->transport);
87 return rxrpc_lookup_peer_rcu(local, srx);
90 #ifdef CONFIG_AF_RXRPC_IPV6
92 * Find the peer associated with an ICMPv6 packet.
94 static struct rxrpc_peer *rxrpc_lookup_peer_icmp6_rcu(struct rxrpc_local *local,
96 unsigned int udp_offset,
98 struct sockaddr_rxrpc *srx)
100 struct icmp6hdr *icmp = icmp6_hdr(skb);
101 struct ipv6hdr *ip, *ip0 = ipv6_hdr(skb);
102 struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset);
104 _enter("%u,%u,%u", ip0->nexthdr, icmp->icmp6_type, icmp->icmp6_code);
106 switch (icmp->icmp6_type) {
107 case ICMPV6_DEST_UNREACH:
108 *info = ntohl(icmp->icmp6_mtu);
110 case ICMPV6_PKT_TOOBIG:
111 case ICMPV6_TIME_EXCEED:
112 case ICMPV6_PARAMPROB:
113 ip = (struct ipv6hdr *)((void *)icmp + 8);
119 memset(srx, 0, sizeof(*srx));
120 srx->transport_type = local->srx.transport_type;
121 srx->transport_len = local->srx.transport_len;
122 srx->transport.family = local->srx.transport.family;
124 /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
127 switch (srx->transport.family) {
129 _net("Rx ICMP6 on v4 sock");
130 srx->transport_len = sizeof(srx->transport.sin);
131 srx->transport.family = AF_INET;
132 srx->transport.sin.sin_port = udp->dest;
133 memcpy(&srx->transport.sin.sin_addr,
134 &ip->daddr.s6_addr32[3], sizeof(struct in_addr));
138 srx->transport.sin.sin_port = udp->dest;
139 memcpy(&srx->transport.sin6.sin6_addr, &ip->daddr,
140 sizeof(struct in6_addr));
147 _net("ICMP {%pISp}", &srx->transport);
148 return rxrpc_lookup_peer_rcu(local, srx);
150 #endif /* CONFIG_AF_RXRPC_IPV6 */
153 * Handle an error received on the local endpoint as a tunnel.
155 void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb,
156 unsigned int udp_offset)
158 struct sock_extended_err ee;
159 struct sockaddr_rxrpc srx;
160 struct rxrpc_local *local;
161 struct rxrpc_peer *peer;
162 unsigned int info = 0;
164 u8 version = ip_hdr(skb)->version;
165 u8 type = icmp_hdr(skb)->type;
166 u8 code = icmp_hdr(skb)->code;
169 local = rcu_dereference_sk_user_data(sk);
170 if (unlikely(!local)) {
175 rxrpc_new_skb(skb, rxrpc_skb_received);
177 switch (ip_hdr(skb)->version) {
179 peer = rxrpc_lookup_peer_icmp_rcu(local, skb, udp_offset,
182 #ifdef CONFIG_AF_RXRPC_IPV6
184 peer = rxrpc_lookup_peer_icmp6_rcu(local, skb, udp_offset,
193 if (peer && !rxrpc_get_peer_maybe(peer))
200 memset(&ee, 0, sizeof(ee));
205 case ICMP_DEST_UNREACH:
207 case ICMP_FRAG_NEEDED:
208 rxrpc_adjust_mtu(peer, info);
210 rxrpc_put_peer(peer);
217 if (code <= NR_ICMP_UNREACH) {
218 /* Might want to do something different with
221 //harderr = icmp_err_convert[code].fatal;
222 err = icmp_err_convert[code].errno;
226 case ICMP_TIME_EXCEEDED:
234 ee.ee_origin = SO_EE_ORIGIN_ICMP;
240 #ifdef CONFIG_AF_RXRPC_IPV6
243 case ICMPV6_PKT_TOOBIG:
244 rxrpc_adjust_mtu(peer, info);
246 rxrpc_put_peer(peer);
250 icmpv6_err_convert(type, code, &err);
255 ee.ee_origin = SO_EE_ORIGIN_ICMP6;
263 trace_rxrpc_rx_icmp(peer, &ee, &srx);
265 rxrpc_distribute_error(peer, err, RXRPC_CALL_NETWORK_ERROR);
267 rxrpc_put_peer(peer);
271 * Find the peer associated with a local error.
273 static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local,
274 const struct sk_buff *skb,
275 struct sockaddr_rxrpc *srx)
277 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
281 memset(srx, 0, sizeof(*srx));
282 srx->transport_type = local->srx.transport_type;
283 srx->transport_len = local->srx.transport_len;
284 srx->transport.family = local->srx.transport.family;
286 switch (srx->transport.family) {
288 srx->transport_len = sizeof(srx->transport.sin);
289 srx->transport.family = AF_INET;
290 srx->transport.sin.sin_port = serr->port;
291 switch (serr->ee.ee_origin) {
292 case SO_EE_ORIGIN_ICMP:
294 memcpy(&srx->transport.sin.sin_addr,
295 skb_network_header(skb) + serr->addr_offset,
296 sizeof(struct in_addr));
298 case SO_EE_ORIGIN_ICMP6:
299 _net("Rx ICMP6 on v4 sock");
300 memcpy(&srx->transport.sin.sin_addr,
301 skb_network_header(skb) + serr->addr_offset + 12,
302 sizeof(struct in_addr));
305 memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
306 sizeof(struct in_addr));
311 #ifdef CONFIG_AF_RXRPC_IPV6
313 switch (serr->ee.ee_origin) {
314 case SO_EE_ORIGIN_ICMP6:
316 srx->transport.sin6.sin6_port = serr->port;
317 memcpy(&srx->transport.sin6.sin6_addr,
318 skb_network_header(skb) + serr->addr_offset,
319 sizeof(struct in6_addr));
321 case SO_EE_ORIGIN_ICMP:
322 _net("Rx ICMP on v6 sock");
323 srx->transport_len = sizeof(srx->transport.sin);
324 srx->transport.family = AF_INET;
325 srx->transport.sin.sin_port = serr->port;
326 memcpy(&srx->transport.sin.sin_addr,
327 skb_network_header(skb) + serr->addr_offset,
328 sizeof(struct in_addr));
331 memcpy(&srx->transport.sin6.sin6_addr,
332 &ipv6_hdr(skb)->saddr,
333 sizeof(struct in6_addr));
343 return rxrpc_lookup_peer_rcu(local, srx);
347 * Handle an MTU/fragmentation problem.
349 static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu)
351 _net("Rx ICMP Fragmentation Needed (%d)", mtu);
353 /* wind down the local interface MTU */
354 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
356 _net("I/F MTU %u", mtu);
360 /* they didn't give us a size, estimate one */
368 if (mtu < peer->hdrsize)
369 mtu = peer->hdrsize + 4;
373 if (mtu < peer->mtu) {
374 spin_lock_bh(&peer->lock);
376 peer->maxdata = peer->mtu - peer->hdrsize;
377 spin_unlock_bh(&peer->lock);
378 _net("Net MTU %u (maxdata %u)",
379 peer->mtu, peer->maxdata);
384 * Handle an error received on the local endpoint.
386 void rxrpc_error_report(struct sock *sk)
388 struct sock_exterr_skb *serr;
389 struct sockaddr_rxrpc srx;
390 struct rxrpc_local *local;
391 struct rxrpc_peer *peer = NULL;
395 local = rcu_dereference_sk_user_data(sk);
396 if (unlikely(!local)) {
400 _enter("%p{%d}", sk, local->debug_id);
402 /* Clear the outstanding error value on the socket so that it doesn't
403 * cause kernel_sendmsg() to return it later.
407 skb = sock_dequeue_err_skb(sk);
410 _leave("UDP socket errqueue empty");
413 rxrpc_new_skb(skb, rxrpc_skb_received);
414 serr = SKB_EXT_ERR(skb);
416 if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) {
417 peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx);
418 if (peer && !rxrpc_get_peer_maybe(peer))
421 trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
422 rxrpc_store_error(peer, serr);
427 rxrpc_free_skb(skb, rxrpc_skb_freed);
428 rxrpc_put_peer(peer);
433 * Map an error report to error codes on the peer record.
435 static void rxrpc_store_error(struct rxrpc_peer *peer,
436 struct sock_exterr_skb *serr)
438 enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
439 struct sock_extended_err *ee;
448 switch (ee->ee_origin) {
449 case SO_EE_ORIGIN_ICMP:
450 switch (ee->ee_type) {
451 case ICMP_DEST_UNREACH:
452 switch (ee->ee_code) {
453 case ICMP_NET_UNREACH:
454 _net("Rx Received ICMP Network Unreachable");
456 case ICMP_HOST_UNREACH:
457 _net("Rx Received ICMP Host Unreachable");
459 case ICMP_PORT_UNREACH:
460 _net("Rx Received ICMP Port Unreachable");
462 case ICMP_NET_UNKNOWN:
463 _net("Rx Received ICMP Unknown Network");
465 case ICMP_HOST_UNKNOWN:
466 _net("Rx Received ICMP Unknown Host");
469 _net("Rx Received ICMP DestUnreach code=%u",
475 case ICMP_TIME_EXCEEDED:
476 _net("Rx Received ICMP TTL Exceeded");
480 _proto("Rx Received ICMP error { type=%u code=%u }",
481 ee->ee_type, ee->ee_code);
486 case SO_EE_ORIGIN_NONE:
487 case SO_EE_ORIGIN_LOCAL:
488 _proto("Rx Received local error { error=%d }", err);
489 compl = RXRPC_CALL_LOCAL_ERROR;
492 case SO_EE_ORIGIN_ICMP6:
497 _proto("Rx Received error report { orig=%u }", ee->ee_origin);
501 rxrpc_distribute_error(peer, err, compl);
505 * Distribute an error that occurred on a peer.
507 static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
508 enum rxrpc_call_completion compl)
510 struct rxrpc_call *call;
512 hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
513 rxrpc_see_call(call);
514 rxrpc_set_call_completion(call, compl, 0, -error);
519 * Perform keep-alive pings.
521 static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
522 struct list_head *collector,
526 struct rxrpc_peer *peer;
527 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
528 time64_t keepalive_at;
531 spin_lock_bh(&rxnet->peer_hash_lock);
533 while (!list_empty(collector)) {
534 peer = list_entry(collector->next,
535 struct rxrpc_peer, keepalive_link);
537 list_del_init(&peer->keepalive_link);
538 if (!rxrpc_get_peer_maybe(peer))
541 if (__rxrpc_use_local(peer->local)) {
542 spin_unlock_bh(&rxnet->peer_hash_lock);
544 keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
545 slot = keepalive_at - base;
546 _debug("%02x peer %u t=%d {%pISp}",
547 cursor, peer->debug_id, slot, &peer->srx.transport);
549 if (keepalive_at <= base ||
550 keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
551 rxrpc_send_keepalive(peer);
552 slot = RXRPC_KEEPALIVE_TIME;
555 /* A transmission to this peer occurred since last we
556 * examined it so put it into the appropriate future
561 spin_lock_bh(&rxnet->peer_hash_lock);
562 list_add_tail(&peer->keepalive_link,
563 &rxnet->peer_keepalive[slot & mask]);
564 rxrpc_unuse_local(peer->local);
566 rxrpc_put_peer_locked(peer);
569 spin_unlock_bh(&rxnet->peer_hash_lock);
573 * Perform keep-alive pings with VERSION packets to keep any NAT alive.
575 void rxrpc_peer_keepalive_worker(struct work_struct *work)
577 struct rxrpc_net *rxnet =
578 container_of(work, struct rxrpc_net, peer_keepalive_work);
579 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
580 time64_t base, now, delay;
582 LIST_HEAD(collector);
584 now = ktime_get_seconds();
585 base = rxnet->peer_keepalive_base;
586 cursor = rxnet->peer_keepalive_cursor;
587 _enter("%lld,%u", base - now, cursor);
592 /* Remove to a temporary list all the peers that are currently lodged
593 * in expired buckets plus all new peers.
595 * Everything in the bucket at the cursor is processed this
596 * second; the bucket at cursor + 1 goes at now + 1s and so
599 spin_lock_bh(&rxnet->peer_hash_lock);
600 list_splice_init(&rxnet->peer_keepalive_new, &collector);
602 stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
603 while (base <= now && (s8)(cursor - stop) < 0) {
604 list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
611 spin_unlock_bh(&rxnet->peer_hash_lock);
613 rxnet->peer_keepalive_base = base;
614 rxnet->peer_keepalive_cursor = cursor;
615 rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
616 ASSERT(list_empty(&collector));
618 /* Schedule the timer for the next occupied timeslot. */
619 cursor = rxnet->peer_keepalive_cursor;
620 stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
621 for (; (s8)(cursor - stop) < 0; cursor++) {
622 if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
627 now = ktime_get_seconds();
633 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);