1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * xfrm_output.c - Common IPsec encapsulation code.
5 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/netdevice.h>
11 #include <linux/netfilter.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
17 #include <net/inet_ecn.h>
20 #if IS_ENABLED(CONFIG_IPV6)
21 #include <net/ip6_route.h>
22 #include <net/ipv6_stubs.h>
25 #include "xfrm_inout.h"
27 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
28 static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
30 static int xfrm_skb_check_space(struct sk_buff *skb)
32 struct dst_entry *dst = skb_dst(skb);
33 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
35 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
44 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
47 /* Children define the path of the packet through the
48 * Linux networking. Thus, destinations are stackable.
51 static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
53 struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
59 /* Add encapsulation header.
61 * The IP header will be moved forward to make space for the encapsulation
64 static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
66 struct iphdr *iph = ip_hdr(skb);
67 int ihl = iph->ihl * 4;
69 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
71 skb_set_network_header(skb, -x->props.header_len);
72 skb->mac_header = skb->network_header +
73 offsetof(struct iphdr, protocol);
74 skb->transport_header = skb->network_header + ihl;
76 memmove(skb_network_header(skb), iph, ihl);
80 #if IS_ENABLED(CONFIG_IPV6_MIP6)
81 static int mip6_rthdr_offset(struct sk_buff *skb, u8 **nexthdr, int type)
83 const unsigned char *nh = skb_network_header(skb);
84 unsigned int offset = sizeof(struct ipv6hdr);
85 unsigned int packet_len;
88 packet_len = skb_tail_pointer(skb) - nh;
89 *nexthdr = &ipv6_hdr(skb)->nexthdr;
91 while (offset <= packet_len) {
92 struct ipv6_opt_hdr *exthdr;
98 if (type == IPPROTO_ROUTING && offset + 3 <= packet_len) {
99 struct ipv6_rt_hdr *rt;
101 rt = (struct ipv6_rt_hdr *)(nh + offset);
108 /* HAO MUST NOT appear more than once.
109 * XXX: It is better to try to find by the end of
110 * XXX: packet if HAO exists.
112 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
113 net_dbg_ratelimited("mip6: hao exists already, override\n");
125 if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
128 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
130 offset += ipv6_optlen(exthdr);
131 if (offset > IPV6_MAXPLEN)
133 *nexthdr = &exthdr->nexthdr;
140 #if IS_ENABLED(CONFIG_IPV6)
141 static int xfrm6_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr)
143 switch (x->type->proto) {
144 #if IS_ENABLED(CONFIG_IPV6_MIP6)
145 case IPPROTO_DSTOPTS:
146 case IPPROTO_ROUTING:
147 return mip6_rthdr_offset(skb, prevhdr, x->type->proto);
153 return ip6_find_1stfragopt(skb, prevhdr);
157 /* Add encapsulation header.
159 * The IP header and mutable extension headers will be moved forward to make
160 * space for the encapsulation header.
162 static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
164 #if IS_ENABLED(CONFIG_IPV6)
170 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
172 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
175 skb_set_mac_header(skb,
176 (prevhdr - x->props.header_len) - skb->data);
177 skb_set_network_header(skb, -x->props.header_len);
178 skb->transport_header = skb->network_header + hdr_len;
179 __skb_pull(skb, hdr_len);
180 memmove(ipv6_hdr(skb), iph, hdr_len);
184 return -EAFNOSUPPORT;
188 /* Add route optimization header space.
190 * The IP header and mutable extension headers will be moved forward to make
191 * space for the route optimization header.
193 static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
195 #if IS_ENABLED(CONFIG_IPV6)
202 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
205 skb_set_mac_header(skb,
206 (prevhdr - x->props.header_len) - skb->data);
207 skb_set_network_header(skb, -x->props.header_len);
208 skb->transport_header = skb->network_header + hdr_len;
209 __skb_pull(skb, hdr_len);
210 memmove(ipv6_hdr(skb), iph, hdr_len);
212 x->lastused = ktime_get_real_seconds();
217 return -EAFNOSUPPORT;
221 /* Add encapsulation header.
223 * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
225 static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
227 struct ip_beet_phdr *ph;
228 struct iphdr *top_iph;
232 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
233 if (unlikely(optlen))
234 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
236 skb_set_network_header(skb, -x->props.header_len - hdrlen +
237 (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
238 if (x->sel.family != AF_INET6)
239 skb->network_header += IPV4_BEET_PHMAXLEN;
240 skb->mac_header = skb->network_header +
241 offsetof(struct iphdr, protocol);
242 skb->transport_header = skb->network_header + sizeof(*top_iph);
244 xfrm4_beet_make_header(skb);
246 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen);
248 top_iph = ip_hdr(skb);
250 if (unlikely(optlen)) {
251 if (WARN_ON(optlen < 0))
254 ph->padlen = 4 - (optlen & 4);
255 ph->hdrlen = optlen / 8;
256 ph->nexthdr = top_iph->protocol;
258 memset(ph + 1, IPOPT_NOP, ph->padlen);
260 top_iph->protocol = IPPROTO_BEETPH;
261 top_iph->ihl = sizeof(struct iphdr) / 4;
264 top_iph->saddr = x->props.saddr.a4;
265 top_iph->daddr = x->id.daddr.a4;
270 /* Add encapsulation header.
272 * The top IP header will be constructed per RFC 2401.
274 static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
276 struct dst_entry *dst = skb_dst(skb);
277 struct iphdr *top_iph;
280 skb_set_inner_network_header(skb, skb_network_offset(skb));
281 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
283 skb_set_network_header(skb, -x->props.header_len);
284 skb->mac_header = skb->network_header +
285 offsetof(struct iphdr, protocol);
286 skb->transport_header = skb->network_header + sizeof(*top_iph);
287 top_iph = ip_hdr(skb);
290 top_iph->version = 4;
292 top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family);
294 /* DS disclosing depends on XFRM_SA_XFLAG_DONT_ENCAP_DSCP */
295 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
298 top_iph->tos = XFRM_MODE_SKB_CB(skb)->tos;
299 top_iph->tos = INET_ECN_encapsulate(top_iph->tos,
300 XFRM_MODE_SKB_CB(skb)->tos);
302 flags = x->props.flags;
303 if (flags & XFRM_STATE_NOECN)
304 IP_ECN_clear(top_iph);
306 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
307 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
309 top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
311 top_iph->saddr = x->props.saddr.a4;
312 top_iph->daddr = x->id.daddr.a4;
313 ip_select_ident(dev_net(dst->dev), skb, NULL);
318 #if IS_ENABLED(CONFIG_IPV6)
319 static int xfrm6_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
321 struct dst_entry *dst = skb_dst(skb);
322 struct ipv6hdr *top_iph;
325 skb_set_inner_network_header(skb, skb_network_offset(skb));
326 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
328 skb_set_network_header(skb, -x->props.header_len);
329 skb->mac_header = skb->network_header +
330 offsetof(struct ipv6hdr, nexthdr);
331 skb->transport_header = skb->network_header + sizeof(*top_iph);
332 top_iph = ipv6_hdr(skb);
334 top_iph->version = 6;
336 memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl,
337 sizeof(top_iph->flow_lbl));
338 top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family);
340 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
343 dsfield = XFRM_MODE_SKB_CB(skb)->tos;
344 dsfield = INET_ECN_encapsulate(dsfield, XFRM_MODE_SKB_CB(skb)->tos);
345 if (x->props.flags & XFRM_STATE_NOECN)
346 dsfield &= ~INET_ECN_MASK;
347 ipv6_change_dsfield(top_iph, 0, dsfield);
348 top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
349 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
350 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
354 static int xfrm6_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
356 struct ipv6hdr *top_iph;
357 struct ip_beet_phdr *ph;
361 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
362 if (unlikely(optlen))
363 hdr_len += IPV4_BEET_PHMAXLEN - (optlen & 4);
365 skb_set_network_header(skb, -x->props.header_len - hdr_len);
366 if (x->sel.family != AF_INET6)
367 skb->network_header += IPV4_BEET_PHMAXLEN;
368 skb->mac_header = skb->network_header +
369 offsetof(struct ipv6hdr, nexthdr);
370 skb->transport_header = skb->network_header + sizeof(*top_iph);
371 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdr_len);
373 xfrm6_beet_make_header(skb);
375 top_iph = ipv6_hdr(skb);
376 if (unlikely(optlen)) {
377 if (WARN_ON(optlen < 0))
380 ph->padlen = 4 - (optlen & 4);
381 ph->hdrlen = optlen / 8;
382 ph->nexthdr = top_iph->nexthdr;
384 memset(ph + 1, IPOPT_NOP, ph->padlen);
386 top_iph->nexthdr = IPPROTO_BEETPH;
389 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
390 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
395 /* Add encapsulation header.
397 * On exit, the transport header will be set to the start of the
398 * encapsulation header to be filled in by x->type->output and the mac
399 * header will be set to the nextheader (protocol for IPv4) field of the
400 * extension header directly preceding the encapsulation header, or in
401 * its absence, that of the top IP header.
402 * The value of the network header will always point to the top IP header
403 * while skb->data will point to the payload.
405 static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
409 err = xfrm_inner_extract_output(x, skb);
413 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
414 skb->protocol = htons(ETH_P_IP);
416 switch (x->outer_mode.encap) {
418 return xfrm4_beet_encap_add(x, skb);
419 case XFRM_MODE_TUNNEL:
420 return xfrm4_tunnel_encap_add(x, skb);
427 static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
429 #if IS_ENABLED(CONFIG_IPV6)
432 err = xfrm_inner_extract_output(x, skb);
437 skb->protocol = htons(ETH_P_IPV6);
439 switch (x->outer_mode.encap) {
441 return xfrm6_beet_encap_add(x, skb);
442 case XFRM_MODE_TUNNEL:
443 return xfrm6_tunnel_encap_add(x, skb);
450 return -EAFNOSUPPORT;
453 static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
455 switch (x->outer_mode.encap) {
457 case XFRM_MODE_TUNNEL:
458 if (x->outer_mode.family == AF_INET)
459 return xfrm4_prepare_output(x, skb);
460 if (x->outer_mode.family == AF_INET6)
461 return xfrm6_prepare_output(x, skb);
463 case XFRM_MODE_TRANSPORT:
464 if (x->outer_mode.family == AF_INET)
465 return xfrm4_transport_output(x, skb);
466 if (x->outer_mode.family == AF_INET6)
467 return xfrm6_transport_output(x, skb);
469 case XFRM_MODE_ROUTEOPTIMIZATION:
470 if (x->outer_mode.family == AF_INET6)
471 return xfrm6_ro_output(x, skb);
482 #if IS_ENABLED(CONFIG_NET_PKTGEN)
483 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
485 return xfrm_outer_mode_output(x, skb);
487 EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output);
490 static int xfrm_output_one(struct sk_buff *skb, int err)
492 struct dst_entry *dst = skb_dst(skb);
493 struct xfrm_state *x = dst->xfrm;
494 struct net *net = xs_net(x);
500 err = xfrm_skb_check_space(skb);
502 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
506 skb->mark = xfrm_smark_get(skb->mark, x);
508 err = xfrm_outer_mode_output(x, skb);
510 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
514 spin_lock_bh(&x->lock);
516 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
517 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
522 err = xfrm_state_check_expire(x);
524 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
528 err = xfrm_replay_overflow(x, skb);
530 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
534 x->curlft.bytes += skb->len;
537 spin_unlock_bh(&x->lock);
541 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
546 if (xfrm_offload(skb)) {
547 x->type_offload->encap(x, skb);
549 /* Inner headers are invalid now. */
550 skb->encapsulation = 0;
552 err = x->type->output(x, skb);
553 if (err == -EINPROGRESS)
559 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
563 dst = skb_dst_pop(skb);
565 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
569 skb_dst_set(skb, dst);
571 } while (x && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL));
576 spin_unlock_bh(&x->lock);
583 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
585 struct net *net = xs_net(skb_dst(skb)->xfrm);
587 while (likely((err = xfrm_output_one(skb, err)) == 0)) {
590 err = skb_dst(skb)->ops->local_out(net, sk, skb);
591 if (unlikely(err != 1))
594 if (!skb_dst(skb)->xfrm)
595 return dst_output(net, sk, skb);
597 err = nf_hook(skb_dst(skb)->ops->family,
598 NF_INET_POST_ROUTING, net, sk, skb,
599 NULL, skb_dst(skb)->dev, xfrm_output2);
600 if (unlikely(err != 1))
604 if (err == -EINPROGRESS)
610 EXPORT_SYMBOL_GPL(xfrm_output_resume);
612 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
614 return xfrm_output_resume(sk, skb, 1);
617 static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
619 struct sk_buff *segs, *nskb;
621 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
622 BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_GSO_CB_OFFSET);
623 segs = skb_gso_segment(skb, 0);
626 return PTR_ERR(segs);
630 skb_list_walk_safe(segs, segs, nskb) {
633 skb_mark_not_on_list(segs);
634 err = xfrm_output2(net, sk, segs);
637 kfree_skb_list(nskb);
645 /* For partial checksum offload, the outer header checksum is calculated
646 * by software and the inner header checksum is calculated by hardware.
647 * This requires hardware to know the inner packet type to calculate
648 * the inner header checksum. Save inner ip protocol here to avoid
649 * traversing the packet in the vendor's xmit code.
650 * If the encap type is IPIP, just save skb->inner_ipproto. Otherwise,
651 * get the ip protocol from the IP header.
653 static void xfrm_get_inner_ipproto(struct sk_buff *skb)
655 struct xfrm_offload *xo = xfrm_offload(skb);
656 const struct ethhdr *eth;
661 if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
662 xo->inner_ipproto = skb->inner_ipproto;
666 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
669 eth = (struct ethhdr *)skb_inner_mac_header(skb);
671 switch (ntohs(eth->h_proto)) {
673 xo->inner_ipproto = inner_ipv6_hdr(skb)->nexthdr;
676 xo->inner_ipproto = inner_ip_hdr(skb)->protocol;
681 int xfrm_output(struct sock *sk, struct sk_buff *skb)
683 struct net *net = dev_net(skb_dst(skb)->dev);
684 struct xfrm_state *x = skb_dst(skb)->xfrm;
687 switch (x->outer_mode.family) {
689 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
690 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
693 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
695 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
701 if (xfrm_dev_offload_ok(skb, x)) {
704 sp = secpath_set(skb);
706 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
712 sp->xvec[sp->len++] = x;
715 if (skb->encapsulation)
716 xfrm_get_inner_ipproto(skb);
717 skb->encapsulation = 1;
719 if (skb_is_gso(skb)) {
720 if (skb->inner_protocol)
721 return xfrm_output_gso(net, sk, skb);
723 skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
727 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
731 return xfrm_output_gso(net, sk, skb);
734 if (skb->ip_summed == CHECKSUM_PARTIAL) {
735 err = skb_checksum_help(skb);
737 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
744 return xfrm_output2(net, sk, skb);
746 EXPORT_SYMBOL_GPL(xfrm_output);
748 static int xfrm4_tunnel_check_size(struct sk_buff *skb)
752 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
755 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
758 mtu = dst_mtu(skb_dst(skb));
759 if ((!skb_is_gso(skb) && skb->len > mtu) ||
761 !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
762 skb->protocol = htons(ETH_P_IP);
765 xfrm_local_error(skb, mtu);
767 icmp_send(skb, ICMP_DEST_UNREACH,
768 ICMP_FRAG_NEEDED, htonl(mtu));
775 static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
779 if (x->outer_mode.encap == XFRM_MODE_BEET &&
780 ip_is_fragment(ip_hdr(skb))) {
781 net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
782 return -EAFNOSUPPORT;
785 err = xfrm4_tunnel_check_size(skb);
789 XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol;
791 xfrm4_extract_header(skb);
795 #if IS_ENABLED(CONFIG_IPV6)
796 static int xfrm6_tunnel_check_size(struct sk_buff *skb)
799 struct dst_entry *dst = skb_dst(skb);
805 if (mtu < IPV6_MIN_MTU)
808 if ((!skb_is_gso(skb) && skb->len > mtu) ||
810 !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
812 skb->protocol = htons(ETH_P_IPV6);
814 if (xfrm6_local_dontfrag(skb->sk))
815 ipv6_stub->xfrm6_local_rxpmtu(skb, mtu);
817 xfrm_local_error(skb, mtu);
819 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
827 static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
829 #if IS_ENABLED(CONFIG_IPV6)
832 err = xfrm6_tunnel_check_size(skb);
836 XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr;
838 xfrm6_extract_header(skb);
842 return -EAFNOSUPPORT;
846 static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
848 const struct xfrm_mode *inner_mode;
850 if (x->sel.family == AF_UNSPEC)
851 inner_mode = xfrm_ip2inner_mode(x,
852 xfrm_af2proto(skb_dst(skb)->ops->family));
854 inner_mode = &x->inner_mode;
856 if (inner_mode == NULL)
857 return -EAFNOSUPPORT;
859 switch (inner_mode->family) {
861 return xfrm4_extract_output(x, skb);
863 return xfrm6_extract_output(x, skb);
866 return -EAFNOSUPPORT;
869 void xfrm_local_error(struct sk_buff *skb, int mtu)
872 struct xfrm_state_afinfo *afinfo;
874 if (skb->protocol == htons(ETH_P_IP))
876 else if (skb->protocol == htons(ETH_P_IPV6) &&
877 skb->sk->sk_family == AF_INET6)
882 afinfo = xfrm_state_get_afinfo(proto);
884 afinfo->local_error(skb, mtu);
888 EXPORT_SYMBOL_GPL(xfrm_local_error);