1 #include <crypto/hash.h>
3 #include <linux/module.h>
7 #include <linux/crypto.h>
8 #include <linux/pfkeyv2.h>
9 #include <linux/scatterlist.h>
11 #include <net/protocol.h>
14 struct xfrm_skb_cb xfrm;
18 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
20 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
25 len = size + crypto_ahash_digestsize(ahash) +
26 (crypto_ahash_alignmask(ahash) &
27 ~(crypto_tfm_ctx_alignment() - 1));
29 len = ALIGN(len, crypto_tfm_ctx_alignment());
31 len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
32 len = ALIGN(len, __alignof__(struct scatterlist));
34 len += sizeof(struct scatterlist) * nfrags;
36 return kmalloc(len, GFP_ATOMIC);
39 static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset)
44 static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
47 return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
50 static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
53 struct ahash_request *req;
55 req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
56 crypto_tfm_ctx_alignment());
58 ahash_request_set_tfm(req, ahash);
63 static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
64 struct ahash_request *req)
66 return (void *)ALIGN((unsigned long)(req + 1) +
67 crypto_ahash_reqsize(ahash),
68 __alignof__(struct scatterlist));
71 /* Clear mutable options and find final destination to substitute
72 * into IP header for icv calculation. Options are already checked
73 * for validity, so paranoia is not required. */
75 static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr)
77 unsigned char * optptr = (unsigned char*)(iph+1);
78 int l = iph->ihl*4 - sizeof(struct iphdr);
91 if (optlen<2 || optlen>l)
95 case 0x85: /* Some "Extended Security" crap. */
98 case 0x80|21: /* RFC1770 */
104 memcpy(daddr, optptr+optlen-4, 4);
107 memset(optptr, 0, optlen);
115 static void ah_output_done(struct crypto_async_request *base, int err)
119 struct sk_buff *skb = base->data;
120 struct xfrm_state *x = skb_dst(skb)->xfrm;
121 struct ah_data *ahp = x->data;
122 struct iphdr *top_iph = ip_hdr(skb);
123 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
124 int ihl = ip_hdrlen(skb);
126 iph = AH_SKB_CB(skb)->tmp;
127 icv = ah_tmp_icv(ahp->ahash, iph, ihl);
128 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
130 top_iph->tos = iph->tos;
131 top_iph->ttl = iph->ttl;
132 top_iph->frag_off = iph->frag_off;
133 if (top_iph->ihl != 5) {
134 top_iph->daddr = iph->daddr;
135 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
140 kfree(AH_SKB_CB(skb)->tmp);
141 xfrm_output_resume(skb, err);
144 static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
150 struct sk_buff *trailer;
151 struct crypto_ahash *ahash;
152 struct ahash_request *req;
153 struct scatterlist *sg;
154 struct iphdr *iph, *top_iph;
155 struct ip_auth_hdr *ah;
161 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
165 skb_push(skb, -skb_network_offset(skb));
166 ah = ip_auth_hdr(skb);
167 ihl = ip_hdrlen(skb);
170 iph = ah_alloc_tmp(ahash, nfrags, ihl);
174 icv = ah_tmp_icv(ahash, iph, ihl);
175 req = ah_tmp_req(ahash, icv);
176 sg = ah_req_sg(ahash, req);
178 memset(ah->auth_data, 0, ahp->icv_trunc_len);
180 top_iph = ip_hdr(skb);
182 iph->tos = top_iph->tos;
183 iph->ttl = top_iph->ttl;
184 iph->frag_off = top_iph->frag_off;
186 if (top_iph->ihl != 5) {
187 iph->daddr = top_iph->daddr;
188 memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
189 err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
194 ah->nexthdr = *skb_mac_header(skb);
195 *skb_mac_header(skb) = IPPROTO_AH;
198 top_iph->tot_len = htons(skb->len);
199 top_iph->frag_off = 0;
203 ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
207 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
209 sg_init_table(sg, nfrags);
210 skb_to_sgvec(skb, sg, 0, skb->len);
212 ahash_request_set_crypt(req, sg, icv, skb->len);
213 ahash_request_set_callback(req, 0, ah_output_done, skb);
215 AH_SKB_CB(skb)->tmp = iph;
217 err = crypto_ahash_digest(req);
219 if (err == -EINPROGRESS)
227 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
229 top_iph->tos = iph->tos;
230 top_iph->ttl = iph->ttl;
231 top_iph->frag_off = iph->frag_off;
232 if (top_iph->ihl != 5) {
233 top_iph->daddr = iph->daddr;
234 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
243 static void ah_input_done(struct crypto_async_request *base, int err)
247 struct iphdr *work_iph;
248 struct sk_buff *skb = base->data;
249 struct xfrm_state *x = xfrm_input_state(skb);
250 struct ah_data *ahp = x->data;
251 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
252 int ihl = ip_hdrlen(skb);
253 int ah_hlen = (ah->hdrlen + 2) << 2;
255 work_iph = AH_SKB_CB(skb)->tmp;
256 auth_data = ah_tmp_auth(work_iph, ihl);
257 icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
259 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
263 skb->network_header += ah_hlen;
264 memcpy(skb_network_header(skb), work_iph, ihl);
265 __skb_pull(skb, ah_hlen + ihl);
266 skb_set_transport_header(skb, -ihl);
270 kfree(AH_SKB_CB(skb)->tmp);
271 xfrm_input_resume(skb, err);
274 static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
282 struct sk_buff *trailer;
283 struct crypto_ahash *ahash;
284 struct ahash_request *req;
285 struct scatterlist *sg;
286 struct iphdr *iph, *work_iph;
287 struct ip_auth_hdr *ah;
291 if (!pskb_may_pull(skb, sizeof(*ah)))
294 ah = (struct ip_auth_hdr *)skb->data;
298 nexthdr = ah->nexthdr;
299 ah_hlen = (ah->hdrlen + 2) << 2;
301 if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
302 ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
305 if (!pskb_may_pull(skb, ah_hlen))
308 /* We are going to _remove_ AH header to keep sockets happy,
309 * so... Later this can change. */
310 if (skb_cloned(skb) &&
311 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
314 skb->ip_summed = CHECKSUM_NONE;
316 ah = (struct ip_auth_hdr *)skb->data;
318 ihl = ip_hdrlen(skb);
320 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
324 work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
328 auth_data = ah_tmp_auth(work_iph, ihl);
329 icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
330 req = ah_tmp_req(ahash, icv);
331 sg = ah_req_sg(ahash, req);
333 memcpy(work_iph, iph, ihl);
334 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
335 memset(ah->auth_data, 0, ahp->icv_trunc_len);
341 if (ihl > sizeof(*iph)) {
343 err = ip_clear_mutable_options(iph, &dummy);
350 sg_init_table(sg, nfrags);
351 skb_to_sgvec(skb, sg, 0, skb->len);
353 ahash_request_set_crypt(req, sg, icv, skb->len);
354 ahash_request_set_callback(req, 0, ah_input_done, skb);
356 AH_SKB_CB(skb)->tmp = work_iph;
358 err = crypto_ahash_digest(req);
360 if (err == -EINPROGRESS)
368 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
372 skb->network_header += ah_hlen;
373 memcpy(skb_network_header(skb), work_iph, ihl);
374 __skb_pull(skb, ah_hlen + ihl);
375 skb_set_transport_header(skb, -ihl);
385 static void ah4_err(struct sk_buff *skb, u32 info)
387 struct net *net = dev_net(skb->dev);
388 struct iphdr *iph = (struct iphdr *)skb->data;
389 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
390 struct xfrm_state *x;
392 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
393 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
396 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET);
399 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
400 ntohl(ah->spi), ntohl(iph->daddr));
404 static int ah_init_state(struct xfrm_state *x)
406 struct ah_data *ahp = NULL;
407 struct xfrm_algo_desc *aalg_desc;
408 struct crypto_ahash *ahash;
416 ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
420 ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
425 if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
426 (x->aalg->alg_key_len + 7) / 8))
430 * Lookup the algorithm description maintained by xfrm_algo,
431 * verify crypto transform properties, and store information
432 * we need for AH processing. This lookup cannot fail here
433 * after a successful crypto_alloc_ahash().
435 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
438 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
439 crypto_ahash_digestsize(ahash)) {
440 printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
441 x->aalg->alg_name, crypto_ahash_digestsize(ahash),
442 aalg_desc->uinfo.auth.icv_fullbits/8);
446 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
447 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
449 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
451 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
453 if (x->props.mode == XFRM_MODE_TUNNEL)
454 x->props.header_len += sizeof(struct iphdr);
461 crypto_free_ahash(ahp->ahash);
467 static void ah_destroy(struct xfrm_state *x)
469 struct ah_data *ahp = x->data;
474 crypto_free_ahash(ahp->ahash);
479 static const struct xfrm_type ah_type =
481 .description = "AH4",
482 .owner = THIS_MODULE,
484 .flags = XFRM_TYPE_REPLAY_PROT,
485 .init_state = ah_init_state,
486 .destructor = ah_destroy,
491 static const struct net_protocol ah4_protocol = {
492 .handler = xfrm4_rcv,
493 .err_handler = ah4_err,
498 static int __init ah4_init(void)
500 if (xfrm_register_type(&ah_type, AF_INET) < 0) {
501 printk(KERN_INFO "ip ah init: can't add xfrm type\n");
504 if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) {
505 printk(KERN_INFO "ip ah init: can't add protocol\n");
506 xfrm_unregister_type(&ah_type, AF_INET);
512 static void __exit ah4_fini(void)
514 if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0)
515 printk(KERN_INFO "ip ah close: can't remove protocol\n");
516 if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
517 printk(KERN_INFO "ip ah close: can't remove xfrm type\n");
520 module_init(ah4_init);
521 module_exit(ah4_fini);
522 MODULE_LICENSE("GPL");
523 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);