2 * Copyright (C)2002 USAGI/WIDE Project
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * Mitsuru KANDA @USAGI : IPv6 Support
21 * Kazunori MIYAZAWA @USAGI :
22 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
24 * This file is derived from net/ipv4/esp.c
27 #define pr_fmt(fmt) "IPv6: " fmt
29 #include <crypto/aead.h>
30 #include <crypto/authenc.h>
31 #include <linux/err.h>
32 #include <linux/module.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kernel.h>
38 #include <linux/pfkeyv2.h>
39 #include <linux/random.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <net/ip6_route.h>
45 #include <net/protocol.h>
46 #include <linux/icmpv6.h>
49 struct xfrm_skb_cb xfrm;
53 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
55 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
58 * Allocate an AEAD request structure with extra space for SG and IV.
60 * For alignment considerations the upper 32 bits of the sequence number are
61 * placed at the front, if present. Followed by the IV, the request and finally
64 * TODO: Use spare space in skb for this where possible.
66 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
72 len += crypto_aead_ivsize(aead);
75 len += crypto_aead_alignmask(aead) &
76 ~(crypto_tfm_ctx_alignment() - 1);
77 len = ALIGN(len, crypto_tfm_ctx_alignment());
80 len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
81 len = ALIGN(len, __alignof__(struct scatterlist));
83 len += sizeof(struct scatterlist) * nfrags;
85 return kmalloc(len, GFP_ATOMIC);
88 static inline __be32 *esp_tmp_seqhi(void *tmp)
90 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
93 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
95 return crypto_aead_ivsize(aead) ?
96 PTR_ALIGN((u8 *)tmp + seqhilen,
97 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
100 static inline struct aead_givcrypt_request *esp_tmp_givreq(
101 struct crypto_aead *aead, u8 *iv)
103 struct aead_givcrypt_request *req;
105 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
106 crypto_tfm_ctx_alignment());
107 aead_givcrypt_set_tfm(req, aead);
111 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
113 struct aead_request *req;
115 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
116 crypto_tfm_ctx_alignment());
117 aead_request_set_tfm(req, aead);
121 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
122 struct aead_request *req)
124 return (void *)ALIGN((unsigned long)(req + 1) +
125 crypto_aead_reqsize(aead),
126 __alignof__(struct scatterlist));
129 static inline struct scatterlist *esp_givreq_sg(
130 struct crypto_aead *aead, struct aead_givcrypt_request *req)
132 return (void *)ALIGN((unsigned long)(req + 1) +
133 crypto_aead_reqsize(aead),
134 __alignof__(struct scatterlist));
137 static void esp_output_done(struct crypto_async_request *base, int err)
139 struct sk_buff *skb = base->data;
141 kfree(ESP_SKB_CB(skb)->tmp);
142 xfrm_output_resume(skb, err);
145 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
148 struct ip_esp_hdr *esph;
149 struct crypto_aead *aead;
150 struct aead_givcrypt_request *req;
151 struct scatterlist *sg;
152 struct scatterlist *asg;
153 struct sk_buff *trailer;
167 struct esp_data *esp = x->data;
169 /* skb is pure payload to encrypt */
171 alen = crypto_aead_authsize(aead);
175 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
178 padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
179 if (skb->len < padto)
180 tfclen = padto - skb->len;
182 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
183 clen = ALIGN(skb->len + 2 + tfclen, blksize);
184 plen = clen - skb->len - tfclen;
186 err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
191 assoclen = sizeof(*esph);
195 if (x->props.flags & XFRM_STATE_ESN) {
197 seqhilen += sizeof(__be32);
198 assoclen += seqhilen;
201 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
207 seqhi = esp_tmp_seqhi(tmp);
208 iv = esp_tmp_iv(aead, tmp, seqhilen);
209 req = esp_tmp_givreq(aead, iv);
210 asg = esp_givreq_sg(aead, req);
213 /* Fill padding... */
214 tail = skb_tail_pointer(trailer);
216 memset(tail, 0, tfclen);
221 for (i = 0; i < plen - 2; i++)
224 tail[plen - 2] = plen - 2;
225 tail[plen - 1] = *skb_mac_header(skb);
226 pskb_put(skb, trailer, clen - skb->len + alen);
228 skb_push(skb, -skb_network_offset(skb));
229 esph = ip_esp_hdr(skb);
230 *skb_mac_header(skb) = IPPROTO_ESP;
232 esph->spi = x->id.spi;
233 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
235 sg_init_table(sg, nfrags);
236 skb_to_sgvec(skb, sg,
237 esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
240 if ((x->props.flags & XFRM_STATE_ESN)) {
241 sg_init_table(asg, 3);
242 sg_set_buf(asg, &esph->spi, sizeof(__be32));
243 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
244 sg_set_buf(asg + 1, seqhi, seqhilen);
245 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
247 sg_init_one(asg, esph, sizeof(*esph));
249 aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
250 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
251 aead_givcrypt_set_assoc(req, asg, assoclen);
252 aead_givcrypt_set_giv(req, esph->enc_data,
253 XFRM_SKB_CB(skb)->seq.output.low);
255 ESP_SKB_CB(skb)->tmp = tmp;
256 err = crypto_aead_givencrypt(req);
257 if (err == -EINPROGRESS)
269 static int esp_input_done2(struct sk_buff *skb, int err)
271 struct xfrm_state *x = xfrm_input_state(skb);
272 struct esp_data *esp = x->data;
273 struct crypto_aead *aead = esp->aead;
274 int alen = crypto_aead_authsize(aead);
275 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
276 int elen = skb->len - hlen;
277 int hdr_len = skb_network_header_len(skb);
281 kfree(ESP_SKB_CB(skb)->tmp);
286 if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
291 if (padlen + 2 + alen >= elen) {
292 LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage "
293 "padlen=%d, elen=%d\n", padlen + 2, elen - alen);
297 /* ... check padding bits here. Silly. :-) */
299 pskb_trim(skb, skb->len - alen - padlen - 2);
300 __skb_pull(skb, hlen);
301 if (x->props.mode == XFRM_MODE_TUNNEL)
302 skb_reset_transport_header(skb);
304 skb_set_transport_header(skb, -hdr_len);
308 /* RFC4303: Drop dummy packets without any error */
309 if (err == IPPROTO_NONE)
316 static void esp_input_done(struct crypto_async_request *base, int err)
318 struct sk_buff *skb = base->data;
320 xfrm_input_resume(skb, esp_input_done2(skb, err));
323 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
325 struct ip_esp_hdr *esph;
326 struct esp_data *esp = x->data;
327 struct crypto_aead *aead = esp->aead;
328 struct aead_request *req;
329 struct sk_buff *trailer;
330 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
339 struct scatterlist *sg;
340 struct scatterlist *asg;
342 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) {
352 if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
359 assoclen = sizeof(*esph);
363 if (x->props.flags & XFRM_STATE_ESN) {
365 seqhilen += sizeof(__be32);
366 assoclen += seqhilen;
369 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
373 ESP_SKB_CB(skb)->tmp = tmp;
374 seqhi = esp_tmp_seqhi(tmp);
375 iv = esp_tmp_iv(aead, tmp, seqhilen);
376 req = esp_tmp_req(aead, iv);
377 asg = esp_req_sg(aead, req);
380 skb->ip_summed = CHECKSUM_NONE;
382 esph = (struct ip_esp_hdr *)skb->data;
384 /* Get ivec. This can be wrong, check against another impls. */
387 sg_init_table(sg, nfrags);
388 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
390 if ((x->props.flags & XFRM_STATE_ESN)) {
391 sg_init_table(asg, 3);
392 sg_set_buf(asg, &esph->spi, sizeof(__be32));
393 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
394 sg_set_buf(asg + 1, seqhi, seqhilen);
395 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
397 sg_init_one(asg, esph, sizeof(*esph));
399 aead_request_set_callback(req, 0, esp_input_done, skb);
400 aead_request_set_crypt(req, sg, sg, elen, iv);
401 aead_request_set_assoc(req, asg, assoclen);
403 ret = crypto_aead_decrypt(req);
404 if (ret == -EINPROGRESS)
407 ret = esp_input_done2(skb, ret);
413 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
415 struct esp_data *esp = x->data;
416 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
417 unsigned int net_adj;
419 if (x->props.mode != XFRM_MODE_TUNNEL)
420 net_adj = sizeof(struct ipv6hdr);
424 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
425 net_adj) & ~(blksize - 1)) + net_adj - 2;
428 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
429 u8 type, u8 code, int offset, __be32 info)
431 struct net *net = dev_net(skb->dev);
432 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
433 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
434 struct xfrm_state *x;
436 if (type != ICMPV6_DEST_UNREACH &&
437 type != ICMPV6_PKT_TOOBIG &&
438 type != NDISC_REDIRECT)
441 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
442 esph->spi, IPPROTO_ESP, AF_INET6);
446 if (type == NDISC_REDIRECT)
447 ip6_redirect(skb, net, skb->dev->ifindex, 0);
449 ip6_update_pmtu(skb, net, info, 0, 0);
453 static void esp6_destroy(struct xfrm_state *x)
455 struct esp_data *esp = x->data;
460 crypto_free_aead(esp->aead);
464 static int esp_init_aead(struct xfrm_state *x)
466 struct esp_data *esp = x->data;
467 struct crypto_aead *aead;
470 aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
477 err = crypto_aead_setkey(aead, x->aead->alg_key,
478 (x->aead->alg_key_len + 7) / 8);
482 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
490 static int esp_init_authenc(struct xfrm_state *x)
492 struct esp_data *esp = x->data;
493 struct crypto_aead *aead;
494 struct crypto_authenc_key_param *param;
498 char authenc_name[CRYPTO_MAX_ALG_NAME];
508 if ((x->props.flags & XFRM_STATE_ESN)) {
509 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
511 x->aalg ? x->aalg->alg_name : "digest_null",
512 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
515 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
517 x->aalg ? x->aalg->alg_name : "digest_null",
518 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
522 aead = crypto_alloc_aead(authenc_name, 0, 0);
529 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
530 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
532 key = kmalloc(keylen, GFP_KERNEL);
538 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
539 rta->rta_len = RTA_LENGTH(sizeof(*param));
540 param = RTA_DATA(rta);
541 p += RTA_SPACE(sizeof(*param));
544 struct xfrm_algo_desc *aalg_desc;
546 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
547 p += (x->aalg->alg_key_len + 7) / 8;
549 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
553 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
554 crypto_aead_authsize(aead)) {
555 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
557 crypto_aead_authsize(aead),
558 aalg_desc->uinfo.auth.icv_fullbits/8);
562 err = crypto_aead_setauthsize(
563 aead, x->aalg->alg_trunc_len / 8);
568 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
569 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
571 err = crypto_aead_setkey(aead, key, keylen);
580 static int esp6_init_state(struct xfrm_state *x)
582 struct esp_data *esp;
583 struct crypto_aead *aead;
590 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
597 err = esp_init_aead(x);
599 err = esp_init_authenc(x);
606 x->props.header_len = sizeof(struct ip_esp_hdr) +
607 crypto_aead_ivsize(aead);
608 switch (x->props.mode) {
610 if (x->sel.family != AF_INET6)
611 x->props.header_len += IPV4_BEET_PHMAXLEN +
612 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
614 case XFRM_MODE_TRANSPORT:
616 case XFRM_MODE_TUNNEL:
617 x->props.header_len += sizeof(struct ipv6hdr);
623 align = ALIGN(crypto_aead_blocksize(aead), 4);
624 x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
630 static const struct xfrm_type esp6_type =
632 .description = "ESP6",
633 .owner = THIS_MODULE,
634 .proto = IPPROTO_ESP,
635 .flags = XFRM_TYPE_REPLAY_PROT,
636 .init_state = esp6_init_state,
637 .destructor = esp6_destroy,
638 .get_mtu = esp6_get_mtu,
640 .output = esp6_output,
641 .hdr_offset = xfrm6_find_1stfragopt,
644 static const struct inet6_protocol esp6_protocol = {
645 .handler = xfrm6_rcv,
646 .err_handler = esp6_err,
647 .flags = INET6_PROTO_NOPOLICY,
650 static int __init esp6_init(void)
652 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
653 pr_info("%s: can't add xfrm type\n", __func__);
656 if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) {
657 pr_info("%s: can't add protocol\n", __func__);
658 xfrm_unregister_type(&esp6_type, AF_INET6);
665 static void __exit esp6_fini(void)
667 if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0)
668 pr_info("%s: can't remove protocol\n", __func__);
669 if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
670 pr_info("%s: can't remove xfrm type\n", __func__);
673 module_init(esp6_init);
674 module_exit(esp6_fini);
676 MODULE_LICENSE("GPL");
677 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);