1 // SPDX-License-Identifier: GPL-2.0-only
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
6 * Copyright (C) 2016 secunet Security Networks AG
7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
12 #include <linux/skbuff.h>
13 #include <linux/init.h>
14 #include <net/protocol.h>
15 #include <crypto/aead.h>
16 #include <crypto/authenc.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
24 #include <linux/scatterlist.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
30 static struct sk_buff *esp4_gro_receive(struct list_head *head,
33 int offset = skb_gro_offset(skb);
34 struct xfrm_offload *xo;
39 if (!pskb_pull(skb, offset))
42 if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
45 xo = xfrm_offload(skb);
46 if (!xo || !(xo->flags & CRYPTO_DONE)) {
47 struct sec_path *sp = secpath_set(skb);
52 if (sp->len == XFRM_MAX_DEPTH)
55 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
56 (xfrm_address_t *)&ip_hdr(skb)->daddr,
57 spi, IPPROTO_ESP, AF_INET);
61 skb->mark = xfrm_smark_get(skb->mark, x);
63 sp->xvec[sp->len++] = x;
66 xo = xfrm_offload(skb);
71 xo->flags |= XFRM_GRO;
73 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
74 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
75 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
76 XFRM_SPI_SKB_CB(skb)->seq = seq;
78 /* We don't need to handle errors from xfrm_input, it does all
79 * the error handling and frees the resources on error. */
80 xfrm_input(skb, IPPROTO_ESP, spi, -2);
82 return ERR_PTR(-EINPROGRESS);
86 skb_push(skb, offset);
87 NAPI_GRO_CB(skb)->same_flow = 0;
88 NAPI_GRO_CB(skb)->flush = 1;
93 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
95 struct ip_esp_hdr *esph;
96 struct iphdr *iph = ip_hdr(skb);
97 struct xfrm_offload *xo = xfrm_offload(skb);
98 int proto = iph->protocol;
100 skb_push(skb, -skb_network_offset(skb));
101 esph = ip_esp_hdr(skb);
102 *skb_mac_header(skb) = IPPROTO_ESP;
104 esph->spi = x->id.spi;
105 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
110 static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
112 netdev_features_t features)
114 __be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
117 return skb_eth_gso_segment(skb, features, type);
120 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
122 netdev_features_t features)
124 const struct net_offload *ops;
125 struct sk_buff *segs = ERR_PTR(-EINVAL);
126 struct xfrm_offload *xo = xfrm_offload(skb);
128 skb->transport_header += x->props.header_len;
129 ops = rcu_dereference(inet_offloads[xo->proto]);
130 if (likely(ops && ops->callbacks.gso_segment))
131 segs = ops->callbacks.gso_segment(skb, features);
136 static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
138 netdev_features_t features)
140 struct xfrm_offload *xo = xfrm_offload(skb);
141 struct sk_buff *segs = ERR_PTR(-EINVAL);
142 const struct net_offload *ops;
143 u8 proto = xo->proto;
145 skb->transport_header += x->props.header_len;
147 if (x->sel.family != AF_INET6) {
148 if (proto == IPPROTO_BEETPH) {
149 struct ip_beet_phdr *ph =
150 (struct ip_beet_phdr *)skb->data;
152 skb->transport_header += ph->hdrlen * 8;
155 skb->transport_header -= IPV4_BEET_PHMAXLEN;
160 skb->transport_header +=
161 ipv6_skip_exthdr(skb, 0, &proto, &frag);
162 if (proto == IPPROTO_TCP)
163 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
166 if (proto == IPPROTO_IPV6)
167 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
169 __skb_pull(skb, skb_transport_offset(skb));
170 ops = rcu_dereference(inet_offloads[proto]);
171 if (likely(ops && ops->callbacks.gso_segment))
172 segs = ops->callbacks.gso_segment(skb, features);
177 static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
179 netdev_features_t features)
181 switch (x->outer_mode.encap) {
182 case XFRM_MODE_TUNNEL:
183 return xfrm4_tunnel_gso_segment(x, skb, features);
184 case XFRM_MODE_TRANSPORT:
185 return xfrm4_transport_gso_segment(x, skb, features);
187 return xfrm4_beet_gso_segment(x, skb, features);
190 return ERR_PTR(-EOPNOTSUPP);
193 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
194 netdev_features_t features)
196 struct xfrm_state *x;
197 struct ip_esp_hdr *esph;
198 struct crypto_aead *aead;
199 netdev_features_t esp_features = features;
200 struct xfrm_offload *xo = xfrm_offload(skb);
204 return ERR_PTR(-EINVAL);
206 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
207 return ERR_PTR(-EINVAL);
209 sp = skb_sec_path(skb);
210 x = sp->xvec[sp->len - 1];
212 esph = ip_esp_hdr(skb);
214 if (esph->spi != x->id.spi)
215 return ERR_PTR(-EINVAL);
217 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
218 return ERR_PTR(-EINVAL);
220 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
222 skb->encap_hdr_csum = 1;
224 if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
225 !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
226 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
228 else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
229 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
230 esp_features = features & ~(NETIF_F_CSUM_MASK |
233 xo->flags |= XFRM_GSO_SEGMENT;
235 return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
238 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
240 struct crypto_aead *aead = x->data;
241 struct xfrm_offload *xo = xfrm_offload(skb);
243 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
246 if (!(xo->flags & CRYPTO_DONE))
247 skb->ip_summed = CHECKSUM_NONE;
249 return esp_input_done2(skb, 0);
252 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
257 struct xfrm_offload *xo;
258 struct ip_esp_hdr *esph;
259 struct crypto_aead *aead;
261 bool hw_offload = true;
266 xo = xfrm_offload(skb);
271 if ((!(features & NETIF_F_HW_ESP) &&
272 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
273 x->xso.dev != skb->dev) {
274 xo->flags |= CRYPTO_FALLBACK;
278 esp.proto = xo->proto;
280 /* skb is pure payload to encrypt */
283 alen = crypto_aead_authsize(aead);
286 /* XXX: Add support for tfc padding here. */
288 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
289 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
290 esp.plen = esp.clen - skb->len - esp.tfclen;
291 esp.tailen = esp.tfclen + esp.plen + alen;
293 esp.esph = ip_esp_hdr(skb);
296 if (!hw_offload || !skb_is_gso(skb)) {
297 esp.nfrags = esp_output_head(x, skb, &esp);
305 esph->spi = x->id.spi;
307 skb_push(skb, -skb_network_offset(skb));
309 if (xo->flags & XFRM_GSO_SEGMENT) {
310 esph->seq_no = htonl(seq);
312 if (!skb_is_gso(skb))
315 xo->seq.low += skb_shinfo(skb)->gso_segs;
318 if (xo->seq.low < seq)
321 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
323 ip_hdr(skb)->tot_len = htons(skb->len);
324 ip_send_check(ip_hdr(skb));
327 if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
330 xo = xfrm_offload(skb);
334 xo->flags |= XFRM_XMIT;
338 err = esp_output_tail(x, skb, &esp);
344 if (skb_needs_linearize(skb, skb->dev->features) &&
345 __skb_linearize(skb))
350 static const struct net_offload esp4_offload = {
352 .gro_receive = esp4_gro_receive,
353 .gso_segment = esp4_gso_segment,
357 static const struct xfrm_type_offload esp_type_offload = {
358 .owner = THIS_MODULE,
359 .proto = IPPROTO_ESP,
360 .input_tail = esp_input_tail,
362 .encap = esp4_gso_encap,
365 static int __init esp4_offload_init(void)
367 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
368 pr_info("%s: can't add xfrm type offload\n", __func__);
372 return inet_add_offload(&esp4_offload, IPPROTO_ESP);
375 static void __exit esp4_offload_exit(void)
377 xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
378 inet_del_offload(&esp4_offload, IPPROTO_ESP);
381 module_init(esp4_offload_init);
382 module_exit(esp4_offload_exit);
383 MODULE_LICENSE("GPL");
384 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
385 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
386 MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");