1 // SPDX-License-Identifier: GPL-2.0-only
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
6 * Copyright (C) 2016 secunet Security Networks AG
7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
12 #include <linux/skbuff.h>
13 #include <linux/init.h>
14 #include <net/protocol.h>
15 #include <crypto/aead.h>
16 #include <crypto/authenc.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
29 static struct sk_buff *esp4_gro_receive(struct list_head *head,
32 int offset = skb_gro_offset(skb);
33 struct xfrm_offload *xo;
38 if (!pskb_pull(skb, offset))
41 if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
44 xo = xfrm_offload(skb);
45 if (!xo || !(xo->flags & CRYPTO_DONE)) {
46 struct sec_path *sp = secpath_set(skb);
51 if (sp->len == XFRM_MAX_DEPTH)
54 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
55 (xfrm_address_t *)&ip_hdr(skb)->daddr,
56 spi, IPPROTO_ESP, AF_INET);
60 skb->mark = xfrm_smark_get(skb->mark, x);
62 sp->xvec[sp->len++] = x;
65 xo = xfrm_offload(skb);
70 xo->flags |= XFRM_GRO;
72 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
73 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
74 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
75 XFRM_SPI_SKB_CB(skb)->seq = seq;
77 /* We don't need to handle errors from xfrm_input, it does all
78 * the error handling and frees the resources on error. */
79 xfrm_input(skb, IPPROTO_ESP, spi, -2);
81 return ERR_PTR(-EINPROGRESS);
85 skb_push(skb, offset);
86 NAPI_GRO_CB(skb)->same_flow = 0;
87 NAPI_GRO_CB(skb)->flush = 1;
92 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
94 struct ip_esp_hdr *esph;
95 struct iphdr *iph = ip_hdr(skb);
96 struct xfrm_offload *xo = xfrm_offload(skb);
97 int proto = iph->protocol;
99 skb_push(skb, -skb_network_offset(skb));
100 esph = ip_esp_hdr(skb);
101 *skb_mac_header(skb) = IPPROTO_ESP;
103 esph->spi = x->id.spi;
104 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
109 static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
111 netdev_features_t features)
113 return skb_eth_gso_segment(skb, features, htons(ETH_P_IP));
116 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
118 netdev_features_t features)
120 const struct net_offload *ops;
121 struct sk_buff *segs = ERR_PTR(-EINVAL);
122 struct xfrm_offload *xo = xfrm_offload(skb);
124 skb->transport_header += x->props.header_len;
125 ops = rcu_dereference(inet_offloads[xo->proto]);
126 if (likely(ops && ops->callbacks.gso_segment))
127 segs = ops->callbacks.gso_segment(skb, features);
132 static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
134 netdev_features_t features)
136 struct xfrm_offload *xo = xfrm_offload(skb);
137 struct sk_buff *segs = ERR_PTR(-EINVAL);
138 const struct net_offload *ops;
139 u8 proto = xo->proto;
141 skb->transport_header += x->props.header_len;
143 if (x->sel.family != AF_INET6) {
144 if (proto == IPPROTO_BEETPH) {
145 struct ip_beet_phdr *ph =
146 (struct ip_beet_phdr *)skb->data;
148 skb->transport_header += ph->hdrlen * 8;
151 skb->transport_header -= IPV4_BEET_PHMAXLEN;
156 skb->transport_header +=
157 ipv6_skip_exthdr(skb, 0, &proto, &frag);
158 if (proto == IPPROTO_TCP)
159 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
162 if (proto == IPPROTO_IPV6)
163 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
165 __skb_pull(skb, skb_transport_offset(skb));
166 ops = rcu_dereference(inet_offloads[proto]);
167 if (likely(ops && ops->callbacks.gso_segment))
168 segs = ops->callbacks.gso_segment(skb, features);
173 static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
175 netdev_features_t features)
177 switch (x->outer_mode.encap) {
178 case XFRM_MODE_TUNNEL:
179 return xfrm4_tunnel_gso_segment(x, skb, features);
180 case XFRM_MODE_TRANSPORT:
181 return xfrm4_transport_gso_segment(x, skb, features);
183 return xfrm4_beet_gso_segment(x, skb, features);
186 return ERR_PTR(-EOPNOTSUPP);
189 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
190 netdev_features_t features)
192 struct xfrm_state *x;
193 struct ip_esp_hdr *esph;
194 struct crypto_aead *aead;
195 netdev_features_t esp_features = features;
196 struct xfrm_offload *xo = xfrm_offload(skb);
200 return ERR_PTR(-EINVAL);
202 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
203 return ERR_PTR(-EINVAL);
205 sp = skb_sec_path(skb);
206 x = sp->xvec[sp->len - 1];
208 esph = ip_esp_hdr(skb);
210 if (esph->spi != x->id.spi)
211 return ERR_PTR(-EINVAL);
213 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
214 return ERR_PTR(-EINVAL);
216 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
218 skb->encap_hdr_csum = 1;
220 if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
221 !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
222 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
224 else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
225 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
226 esp_features = features & ~(NETIF_F_CSUM_MASK |
229 xo->flags |= XFRM_GSO_SEGMENT;
231 return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
234 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
236 struct crypto_aead *aead = x->data;
237 struct xfrm_offload *xo = xfrm_offload(skb);
239 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
242 if (!(xo->flags & CRYPTO_DONE))
243 skb->ip_summed = CHECKSUM_NONE;
245 return esp_input_done2(skb, 0);
248 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
253 struct xfrm_offload *xo;
254 struct ip_esp_hdr *esph;
255 struct crypto_aead *aead;
257 bool hw_offload = true;
262 xo = xfrm_offload(skb);
267 if ((!(features & NETIF_F_HW_ESP) &&
268 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
269 x->xso.dev != skb->dev) {
270 xo->flags |= CRYPTO_FALLBACK;
274 esp.proto = xo->proto;
276 /* skb is pure payload to encrypt */
279 alen = crypto_aead_authsize(aead);
282 /* XXX: Add support for tfc padding here. */
284 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
285 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
286 esp.plen = esp.clen - skb->len - esp.tfclen;
287 esp.tailen = esp.tfclen + esp.plen + alen;
289 esp.esph = ip_esp_hdr(skb);
292 if (!hw_offload || !skb_is_gso(skb)) {
293 esp.nfrags = esp_output_head(x, skb, &esp);
301 esph->spi = x->id.spi;
303 skb_push(skb, -skb_network_offset(skb));
305 if (xo->flags & XFRM_GSO_SEGMENT) {
306 esph->seq_no = htonl(seq);
308 if (!skb_is_gso(skb))
311 xo->seq.low += skb_shinfo(skb)->gso_segs;
314 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
316 ip_hdr(skb)->tot_len = htons(skb->len);
317 ip_send_check(ip_hdr(skb));
320 if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
323 xo = xfrm_offload(skb);
327 xo->flags |= XFRM_XMIT;
331 err = esp_output_tail(x, skb, &esp);
340 static const struct net_offload esp4_offload = {
342 .gro_receive = esp4_gro_receive,
343 .gso_segment = esp4_gso_segment,
347 static const struct xfrm_type_offload esp_type_offload = {
348 .owner = THIS_MODULE,
349 .proto = IPPROTO_ESP,
350 .input_tail = esp_input_tail,
352 .encap = esp4_gso_encap,
355 static int __init esp4_offload_init(void)
357 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
358 pr_info("%s: can't add xfrm type offload\n", __func__);
362 return inet_add_offload(&esp4_offload, IPPROTO_ESP);
365 static void __exit esp4_offload_exit(void)
367 xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
368 inet_del_offload(&esp4_offload, IPPROTO_ESP);
371 module_init(esp4_offload_init);
372 module_exit(esp4_offload_exit);
373 MODULE_LICENSE("GPL");
374 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
375 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
376 MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");