2 * Checksum updating actions
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
23 #include <linux/skbuff.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
32 #include <net/ip6_checksum.h>
34 #include <net/act_api.h>
36 #include <linux/tc_act/tc_csum.h>
37 #include <net/tc_act/tc_csum.h>
39 #define CSUM_TAB_MASK 15
40 static struct tcf_hashinfo csum_hash_info;
42 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
43 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
46 static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
47 struct tc_action *a, int ovr, int bind)
49 struct nlattr *tb[TCA_CSUM_MAX + 1];
51 struct tcf_common *pc;
58 err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
62 if (tb[TCA_CSUM_PARMS] == NULL)
64 parm = nla_data(tb[TCA_CSUM_PARMS]);
66 pc = tcf_hash_check(parm->index, a, bind);
68 pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
73 if (bind)/* dont override defaults */
75 tcf_hash_release(pc, bind, a->ops->hinfo);
81 spin_lock_bh(&p->tcf_lock);
82 p->tcf_action = parm->action;
83 p->update_flags = parm->update_flags;
84 spin_unlock_bh(&p->tcf_lock);
86 if (ret == ACT_P_CREATED)
87 tcf_hash_insert(pc, a->ops->hinfo);
92 static int tcf_csum_cleanup(struct tc_action *a, int bind)
94 struct tcf_csum *p = a->priv;
95 return tcf_hash_release(&p->common, bind, &csum_hash_info);
99 * tcf_csum_skb_nextlayer - Get next layer pointer
100 * @skb: sk_buff to use
101 * @ihl: previous summed headers length
102 * @ipl: complete packet length
103 * @jhl: next header length
105 * Check the expected next layer availability in the specified sk_buff.
106 * Return the next layer pointer if pass, NULL otherwise.
108 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
109 unsigned int ihl, unsigned int ipl,
112 int ntkoff = skb_network_offset(skb);
115 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
117 !skb_clone_writable(skb, hl + ntkoff) &&
118 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
121 return (void *)(skb_network_header(skb) + ihl);
124 static int tcf_csum_ipv4_icmp(struct sk_buff *skb,
125 unsigned int ihl, unsigned int ipl)
127 struct icmphdr *icmph;
129 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
134 skb->csum = csum_partial(icmph, ipl - ihl, 0);
135 icmph->checksum = csum_fold(skb->csum);
137 skb->ip_summed = CHECKSUM_NONE;
142 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
143 unsigned int ihl, unsigned int ipl)
145 struct igmphdr *igmph;
147 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
152 skb->csum = csum_partial(igmph, ipl - ihl, 0);
153 igmph->csum = csum_fold(skb->csum);
155 skb->ip_summed = CHECKSUM_NONE;
160 static int tcf_csum_ipv6_icmp(struct sk_buff *skb,
161 unsigned int ihl, unsigned int ipl)
163 struct icmp6hdr *icmp6h;
164 const struct ipv6hdr *ip6h;
166 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
170 ip6h = ipv6_hdr(skb);
171 icmp6h->icmp6_cksum = 0;
172 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
173 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
174 ipl - ihl, IPPROTO_ICMPV6,
177 skb->ip_summed = CHECKSUM_NONE;
182 static int tcf_csum_ipv4_tcp(struct sk_buff *skb,
183 unsigned int ihl, unsigned int ipl)
186 const struct iphdr *iph;
188 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
194 skb->csum = csum_partial(tcph, ipl - ihl, 0);
195 tcph->check = tcp_v4_check(ipl - ihl,
196 iph->saddr, iph->daddr, skb->csum);
198 skb->ip_summed = CHECKSUM_NONE;
203 static int tcf_csum_ipv6_tcp(struct sk_buff *skb,
204 unsigned int ihl, unsigned int ipl)
207 const struct ipv6hdr *ip6h;
209 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
213 ip6h = ipv6_hdr(skb);
215 skb->csum = csum_partial(tcph, ipl - ihl, 0);
216 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
217 ipl - ihl, IPPROTO_TCP,
220 skb->ip_summed = CHECKSUM_NONE;
225 static int tcf_csum_ipv4_udp(struct sk_buff *skb,
226 unsigned int ihl, unsigned int ipl, int udplite)
229 const struct iphdr *iph;
233 * Support both UDP and UDPLITE checksum algorithms, Don't use
234 * udph->len to get the real length without any protocol check,
235 * UDPLITE uses udph->len for another thing,
236 * Use iph->tot_len, or just ipl.
239 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
244 ul = ntohs(udph->len);
246 if (udplite || udph->check) {
252 skb->csum = csum_partial(udph, ipl - ihl, 0);
253 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
254 skb->csum = csum_partial(udph, ul, 0);
256 goto ignore_obscure_skb;
259 goto ignore_obscure_skb;
261 skb->csum = csum_partial(udph, ul, 0);
264 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
269 udph->check = CSUM_MANGLED_0;
272 skb->ip_summed = CHECKSUM_NONE;
278 static int tcf_csum_ipv6_udp(struct sk_buff *skb,
279 unsigned int ihl, unsigned int ipl, int udplite)
282 const struct ipv6hdr *ip6h;
286 * Support both UDP and UDPLITE checksum algorithms, Don't use
287 * udph->len to get the real length without any protocol check,
288 * UDPLITE uses udph->len for another thing,
289 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
292 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
296 ip6h = ipv6_hdr(skb);
297 ul = ntohs(udph->len);
303 skb->csum = csum_partial(udph, ipl - ihl, 0);
305 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
306 skb->csum = csum_partial(udph, ul, 0);
309 goto ignore_obscure_skb;
312 goto ignore_obscure_skb;
314 skb->csum = csum_partial(udph, ul, 0);
317 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
318 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
322 udph->check = CSUM_MANGLED_0;
324 skb->ip_summed = CHECKSUM_NONE;
330 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
332 const struct iphdr *iph;
335 ntkoff = skb_network_offset(skb);
337 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
342 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
344 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
345 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
346 ntohs(iph->tot_len)))
350 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
351 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
352 ntohs(iph->tot_len)))
356 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
357 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
358 ntohs(iph->tot_len)))
362 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
363 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
364 ntohs(iph->tot_len), 0))
367 case IPPROTO_UDPLITE:
368 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
369 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
370 ntohs(iph->tot_len), 1))
375 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
376 if (skb_cloned(skb) &&
377 !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
378 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
381 ip_send_check(ip_hdr(skb));
390 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
391 unsigned int ixhl, unsigned int *pl)
393 int off, len, optlen;
394 unsigned char *xh = (void *)ip6xh;
396 off = sizeof(*ip6xh);
405 optlen = xh[off + 1] + 2;
406 if (optlen != 6 || len < 6 || (off & 3) != 2)
407 /* wrong jumbo option length/alignment */
409 *pl = ntohl(*(__be32 *)(xh + off + 2));
412 optlen = xh[off + 1] + 2;
414 /* ignore obscure options */
426 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
428 struct ipv6hdr *ip6h;
429 struct ipv6_opt_hdr *ip6xh;
430 unsigned int hl, ixhl;
435 ntkoff = skb_network_offset(skb);
439 if (!pskb_may_pull(skb, hl + ntkoff))
442 ip6h = ipv6_hdr(skb);
444 pl = ntohs(ip6h->payload_len);
445 nexthdr = ip6h->nexthdr;
449 case NEXTHDR_FRAGMENT:
451 case NEXTHDR_ROUTING:
454 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
456 ip6xh = (void *)(skb_network_header(skb) + hl);
457 ixhl = ipv6_optlen(ip6xh);
458 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
460 ip6xh = (void *)(skb_network_header(skb) + hl);
461 if ((nexthdr == NEXTHDR_HOP) &&
462 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
464 nexthdr = ip6xh->nexthdr;
468 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
469 if (!tcf_csum_ipv6_icmp(skb,
470 hl, pl + sizeof(*ip6h)))
474 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
475 if (!tcf_csum_ipv6_tcp(skb,
476 hl, pl + sizeof(*ip6h)))
480 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
481 if (!tcf_csum_ipv6_udp(skb, hl,
482 pl + sizeof(*ip6h), 0))
485 case IPPROTO_UDPLITE:
486 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
487 if (!tcf_csum_ipv6_udp(skb, hl,
488 pl + sizeof(*ip6h), 1))
494 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
504 static int tcf_csum(struct sk_buff *skb,
505 const struct tc_action *a, struct tcf_result *res)
507 struct tcf_csum *p = a->priv;
511 spin_lock(&p->tcf_lock);
512 p->tcf_tm.lastuse = jiffies;
513 bstats_update(&p->tcf_bstats, skb);
514 action = p->tcf_action;
515 update_flags = p->update_flags;
516 spin_unlock(&p->tcf_lock);
518 if (unlikely(action == TC_ACT_SHOT))
521 switch (skb->protocol) {
522 case cpu_to_be16(ETH_P_IP):
523 if (!tcf_csum_ipv4(skb, update_flags))
526 case cpu_to_be16(ETH_P_IPV6):
527 if (!tcf_csum_ipv6(skb, update_flags))
535 spin_lock(&p->tcf_lock);
536 p->tcf_qstats.drops++;
537 spin_unlock(&p->tcf_lock);
541 static int tcf_csum_dump(struct sk_buff *skb,
542 struct tc_action *a, int bind, int ref)
544 unsigned char *b = skb_tail_pointer(skb);
545 struct tcf_csum *p = a->priv;
546 struct tc_csum opt = {
547 .update_flags = p->update_flags,
548 .index = p->tcf_index,
549 .action = p->tcf_action,
550 .refcnt = p->tcf_refcnt - ref,
551 .bindcnt = p->tcf_bindcnt - bind,
555 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
556 goto nla_put_failure;
557 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
558 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
559 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
560 if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t))
561 goto nla_put_failure;
570 static struct tc_action_ops act_csum_ops = {
572 .hinfo = &csum_hash_info,
573 .type = TCA_ACT_CSUM,
574 .owner = THIS_MODULE,
576 .dump = tcf_csum_dump,
577 .cleanup = tcf_csum_cleanup,
578 .init = tcf_csum_init,
581 MODULE_DESCRIPTION("Checksum updating actions");
582 MODULE_LICENSE("GPL");
584 static int __init csum_init_module(void)
586 int err = tcf_hashinfo_init(&csum_hash_info, CSUM_TAB_MASK);
590 return tcf_register_action(&act_csum_ops);
593 static void __exit csum_cleanup_module(void)
595 tcf_unregister_action(&act_csum_ops);
598 module_init(csum_init_module);
599 module_exit(csum_cleanup_module);