net_sched: act: move idx_gen into struct tcf_hashinfo
[platform/adaptation/renesas_rcar/renesas_kernel.git] / net / sched / act_csum.c
1 /*
2  * Checksum updating actions
3  *
4  * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  *
11  */
12
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
22
23 #include <linux/skbuff.h>
24
25 #include <net/ip.h>
26 #include <net/ipv6.h>
27 #include <net/icmp.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
30 #include <net/tcp.h>
31 #include <net/udp.h>
32 #include <net/ip6_checksum.h>
33
34 #include <net/act_api.h>
35
36 #include <linux/tc_act/tc_csum.h>
37 #include <net/tc_act/tc_csum.h>
38
39 #define CSUM_TAB_MASK 15
40 static struct tcf_hashinfo csum_hash_info;
41
42 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
43         [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
44 };
45
46 static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
47                          struct tc_action *a, int ovr, int bind)
48 {
49         struct nlattr *tb[TCA_CSUM_MAX + 1];
50         struct tc_csum *parm;
51         struct tcf_common *pc;
52         struct tcf_csum *p;
53         int ret = 0, err;
54
55         if (nla == NULL)
56                 return -EINVAL;
57
58         err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
59         if (err < 0)
60                 return err;
61
62         if (tb[TCA_CSUM_PARMS] == NULL)
63                 return -EINVAL;
64         parm = nla_data(tb[TCA_CSUM_PARMS]);
65
66         pc = tcf_hash_check(parm->index, a, bind, &csum_hash_info);
67         if (!pc) {
68                 pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
69                                      &csum_hash_info);
70                 if (IS_ERR(pc))
71                         return PTR_ERR(pc);
72                 ret = ACT_P_CREATED;
73         } else {
74                 if (bind)/* dont override defaults */
75                         return 0;
76                 tcf_hash_release(pc, bind, &csum_hash_info);
77                 if (!ovr)
78                         return -EEXIST;
79         }
80
81         p = to_tcf_csum(pc);
82         spin_lock_bh(&p->tcf_lock);
83         p->tcf_action = parm->action;
84         p->update_flags = parm->update_flags;
85         spin_unlock_bh(&p->tcf_lock);
86
87         if (ret == ACT_P_CREATED)
88                 tcf_hash_insert(pc, &csum_hash_info);
89
90         return ret;
91 }
92
93 static int tcf_csum_cleanup(struct tc_action *a, int bind)
94 {
95         struct tcf_csum *p = a->priv;
96         return tcf_hash_release(&p->common, bind, &csum_hash_info);
97 }
98
99 /**
100  * tcf_csum_skb_nextlayer - Get next layer pointer
101  * @skb: sk_buff to use
102  * @ihl: previous summed headers length
103  * @ipl: complete packet length
104  * @jhl: next header length
105  *
106  * Check the expected next layer availability in the specified sk_buff.
107  * Return the next layer pointer if pass, NULL otherwise.
108  */
109 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
110                                     unsigned int ihl, unsigned int ipl,
111                                     unsigned int jhl)
112 {
113         int ntkoff = skb_network_offset(skb);
114         int hl = ihl + jhl;
115
116         if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
117             (skb_cloned(skb) &&
118              !skb_clone_writable(skb, hl + ntkoff) &&
119              pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
120                 return NULL;
121         else
122                 return (void *)(skb_network_header(skb) + ihl);
123 }
124
125 static int tcf_csum_ipv4_icmp(struct sk_buff *skb,
126                               unsigned int ihl, unsigned int ipl)
127 {
128         struct icmphdr *icmph;
129
130         icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
131         if (icmph == NULL)
132                 return 0;
133
134         icmph->checksum = 0;
135         skb->csum = csum_partial(icmph, ipl - ihl, 0);
136         icmph->checksum = csum_fold(skb->csum);
137
138         skb->ip_summed = CHECKSUM_NONE;
139
140         return 1;
141 }
142
143 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
144                               unsigned int ihl, unsigned int ipl)
145 {
146         struct igmphdr *igmph;
147
148         igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
149         if (igmph == NULL)
150                 return 0;
151
152         igmph->csum = 0;
153         skb->csum = csum_partial(igmph, ipl - ihl, 0);
154         igmph->csum = csum_fold(skb->csum);
155
156         skb->ip_summed = CHECKSUM_NONE;
157
158         return 1;
159 }
160
161 static int tcf_csum_ipv6_icmp(struct sk_buff *skb,
162                               unsigned int ihl, unsigned int ipl)
163 {
164         struct icmp6hdr *icmp6h;
165         const struct ipv6hdr *ip6h;
166
167         icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
168         if (icmp6h == NULL)
169                 return 0;
170
171         ip6h = ipv6_hdr(skb);
172         icmp6h->icmp6_cksum = 0;
173         skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
174         icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
175                                               ipl - ihl, IPPROTO_ICMPV6,
176                                               skb->csum);
177
178         skb->ip_summed = CHECKSUM_NONE;
179
180         return 1;
181 }
182
183 static int tcf_csum_ipv4_tcp(struct sk_buff *skb,
184                              unsigned int ihl, unsigned int ipl)
185 {
186         struct tcphdr *tcph;
187         const struct iphdr *iph;
188
189         tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
190         if (tcph == NULL)
191                 return 0;
192
193         iph = ip_hdr(skb);
194         tcph->check = 0;
195         skb->csum = csum_partial(tcph, ipl - ihl, 0);
196         tcph->check = tcp_v4_check(ipl - ihl,
197                                    iph->saddr, iph->daddr, skb->csum);
198
199         skb->ip_summed = CHECKSUM_NONE;
200
201         return 1;
202 }
203
204 static int tcf_csum_ipv6_tcp(struct sk_buff *skb,
205                              unsigned int ihl, unsigned int ipl)
206 {
207         struct tcphdr *tcph;
208         const struct ipv6hdr *ip6h;
209
210         tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
211         if (tcph == NULL)
212                 return 0;
213
214         ip6h = ipv6_hdr(skb);
215         tcph->check = 0;
216         skb->csum = csum_partial(tcph, ipl - ihl, 0);
217         tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
218                                       ipl - ihl, IPPROTO_TCP,
219                                       skb->csum);
220
221         skb->ip_summed = CHECKSUM_NONE;
222
223         return 1;
224 }
225
226 static int tcf_csum_ipv4_udp(struct sk_buff *skb,
227                              unsigned int ihl, unsigned int ipl, int udplite)
228 {
229         struct udphdr *udph;
230         const struct iphdr *iph;
231         u16 ul;
232
233         /*
234          * Support both UDP and UDPLITE checksum algorithms, Don't use
235          * udph->len to get the real length without any protocol check,
236          * UDPLITE uses udph->len for another thing,
237          * Use iph->tot_len, or just ipl.
238          */
239
240         udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
241         if (udph == NULL)
242                 return 0;
243
244         iph = ip_hdr(skb);
245         ul = ntohs(udph->len);
246
247         if (udplite || udph->check) {
248
249                 udph->check = 0;
250
251                 if (udplite) {
252                         if (ul == 0)
253                                 skb->csum = csum_partial(udph, ipl - ihl, 0);
254                         else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
255                                 skb->csum = csum_partial(udph, ul, 0);
256                         else
257                                 goto ignore_obscure_skb;
258                 } else {
259                         if (ul != ipl - ihl)
260                                 goto ignore_obscure_skb;
261
262                         skb->csum = csum_partial(udph, ul, 0);
263                 }
264
265                 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
266                                                 ul, iph->protocol,
267                                                 skb->csum);
268
269                 if (!udph->check)
270                         udph->check = CSUM_MANGLED_0;
271         }
272
273         skb->ip_summed = CHECKSUM_NONE;
274
275 ignore_obscure_skb:
276         return 1;
277 }
278
279 static int tcf_csum_ipv6_udp(struct sk_buff *skb,
280                              unsigned int ihl, unsigned int ipl, int udplite)
281 {
282         struct udphdr *udph;
283         const struct ipv6hdr *ip6h;
284         u16 ul;
285
286         /*
287          * Support both UDP and UDPLITE checksum algorithms, Don't use
288          * udph->len to get the real length without any protocol check,
289          * UDPLITE uses udph->len for another thing,
290          * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
291          */
292
293         udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
294         if (udph == NULL)
295                 return 0;
296
297         ip6h = ipv6_hdr(skb);
298         ul = ntohs(udph->len);
299
300         udph->check = 0;
301
302         if (udplite) {
303                 if (ul == 0)
304                         skb->csum = csum_partial(udph, ipl - ihl, 0);
305
306                 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
307                         skb->csum = csum_partial(udph, ul, 0);
308
309                 else
310                         goto ignore_obscure_skb;
311         } else {
312                 if (ul != ipl - ihl)
313                         goto ignore_obscure_skb;
314
315                 skb->csum = csum_partial(udph, ul, 0);
316         }
317
318         udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
319                                       udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
320                                       skb->csum);
321
322         if (!udph->check)
323                 udph->check = CSUM_MANGLED_0;
324
325         skb->ip_summed = CHECKSUM_NONE;
326
327 ignore_obscure_skb:
328         return 1;
329 }
330
331 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
332 {
333         const struct iphdr *iph;
334         int ntkoff;
335
336         ntkoff = skb_network_offset(skb);
337
338         if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
339                 goto fail;
340
341         iph = ip_hdr(skb);
342
343         switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
344         case IPPROTO_ICMP:
345                 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
346                         if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
347                                                 ntohs(iph->tot_len)))
348                                 goto fail;
349                 break;
350         case IPPROTO_IGMP:
351                 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
352                         if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
353                                                 ntohs(iph->tot_len)))
354                                 goto fail;
355                 break;
356         case IPPROTO_TCP:
357                 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
358                         if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
359                                                ntohs(iph->tot_len)))
360                                 goto fail;
361                 break;
362         case IPPROTO_UDP:
363                 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
364                         if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
365                                                ntohs(iph->tot_len), 0))
366                                 goto fail;
367                 break;
368         case IPPROTO_UDPLITE:
369                 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
370                         if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
371                                                ntohs(iph->tot_len), 1))
372                                 goto fail;
373                 break;
374         }
375
376         if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
377                 if (skb_cloned(skb) &&
378                     !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
379                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
380                         goto fail;
381
382                 ip_send_check(ip_hdr(skb));
383         }
384
385         return 1;
386
387 fail:
388         return 0;
389 }
390
391 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
392                                  unsigned int ixhl, unsigned int *pl)
393 {
394         int off, len, optlen;
395         unsigned char *xh = (void *)ip6xh;
396
397         off = sizeof(*ip6xh);
398         len = ixhl - off;
399
400         while (len > 1) {
401                 switch (xh[off]) {
402                 case IPV6_TLV_PAD1:
403                         optlen = 1;
404                         break;
405                 case IPV6_TLV_JUMBO:
406                         optlen = xh[off + 1] + 2;
407                         if (optlen != 6 || len < 6 || (off & 3) != 2)
408                                 /* wrong jumbo option length/alignment */
409                                 return 0;
410                         *pl = ntohl(*(__be32 *)(xh + off + 2));
411                         goto done;
412                 default:
413                         optlen = xh[off + 1] + 2;
414                         if (optlen > len)
415                                 /* ignore obscure options */
416                                 goto done;
417                         break;
418                 }
419                 off += optlen;
420                 len -= optlen;
421         }
422
423 done:
424         return 1;
425 }
426
427 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
428 {
429         struct ipv6hdr *ip6h;
430         struct ipv6_opt_hdr *ip6xh;
431         unsigned int hl, ixhl;
432         unsigned int pl;
433         int ntkoff;
434         u8 nexthdr;
435
436         ntkoff = skb_network_offset(skb);
437
438         hl = sizeof(*ip6h);
439
440         if (!pskb_may_pull(skb, hl + ntkoff))
441                 goto fail;
442
443         ip6h = ipv6_hdr(skb);
444
445         pl = ntohs(ip6h->payload_len);
446         nexthdr = ip6h->nexthdr;
447
448         do {
449                 switch (nexthdr) {
450                 case NEXTHDR_FRAGMENT:
451                         goto ignore_skb;
452                 case NEXTHDR_ROUTING:
453                 case NEXTHDR_HOP:
454                 case NEXTHDR_DEST:
455                         if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
456                                 goto fail;
457                         ip6xh = (void *)(skb_network_header(skb) + hl);
458                         ixhl = ipv6_optlen(ip6xh);
459                         if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
460                                 goto fail;
461                         ip6xh = (void *)(skb_network_header(skb) + hl);
462                         if ((nexthdr == NEXTHDR_HOP) &&
463                             !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
464                                 goto fail;
465                         nexthdr = ip6xh->nexthdr;
466                         hl += ixhl;
467                         break;
468                 case IPPROTO_ICMPV6:
469                         if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
470                                 if (!tcf_csum_ipv6_icmp(skb,
471                                                         hl, pl + sizeof(*ip6h)))
472                                         goto fail;
473                         goto done;
474                 case IPPROTO_TCP:
475                         if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
476                                 if (!tcf_csum_ipv6_tcp(skb,
477                                                        hl, pl + sizeof(*ip6h)))
478                                         goto fail;
479                         goto done;
480                 case IPPROTO_UDP:
481                         if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
482                                 if (!tcf_csum_ipv6_udp(skb, hl,
483                                                        pl + sizeof(*ip6h), 0))
484                                         goto fail;
485                         goto done;
486                 case IPPROTO_UDPLITE:
487                         if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
488                                 if (!tcf_csum_ipv6_udp(skb, hl,
489                                                        pl + sizeof(*ip6h), 1))
490                                         goto fail;
491                         goto done;
492                 default:
493                         goto ignore_skb;
494                 }
495         } while (pskb_may_pull(skb, hl + 1 + ntkoff));
496
497 done:
498 ignore_skb:
499         return 1;
500
501 fail:
502         return 0;
503 }
504
505 static int tcf_csum(struct sk_buff *skb,
506                     const struct tc_action *a, struct tcf_result *res)
507 {
508         struct tcf_csum *p = a->priv;
509         int action;
510         u32 update_flags;
511
512         spin_lock(&p->tcf_lock);
513         p->tcf_tm.lastuse = jiffies;
514         bstats_update(&p->tcf_bstats, skb);
515         action = p->tcf_action;
516         update_flags = p->update_flags;
517         spin_unlock(&p->tcf_lock);
518
519         if (unlikely(action == TC_ACT_SHOT))
520                 goto drop;
521
522         switch (skb->protocol) {
523         case cpu_to_be16(ETH_P_IP):
524                 if (!tcf_csum_ipv4(skb, update_flags))
525                         goto drop;
526                 break;
527         case cpu_to_be16(ETH_P_IPV6):
528                 if (!tcf_csum_ipv6(skb, update_flags))
529                         goto drop;
530                 break;
531         }
532
533         return action;
534
535 drop:
536         spin_lock(&p->tcf_lock);
537         p->tcf_qstats.drops++;
538         spin_unlock(&p->tcf_lock);
539         return TC_ACT_SHOT;
540 }
541
542 static int tcf_csum_dump(struct sk_buff *skb,
543                          struct tc_action *a, int bind, int ref)
544 {
545         unsigned char *b = skb_tail_pointer(skb);
546         struct tcf_csum *p = a->priv;
547         struct tc_csum opt = {
548                 .update_flags = p->update_flags,
549                 .index   = p->tcf_index,
550                 .action  = p->tcf_action,
551                 .refcnt  = p->tcf_refcnt - ref,
552                 .bindcnt = p->tcf_bindcnt - bind,
553         };
554         struct tcf_t t;
555
556         if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
557                 goto nla_put_failure;
558         t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
559         t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
560         t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
561         if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t))
562                 goto nla_put_failure;
563
564         return skb->len;
565
566 nla_put_failure:
567         nlmsg_trim(skb, b);
568         return -1;
569 }
570
571 static struct tc_action_ops act_csum_ops = {
572         .kind           = "csum",
573         .hinfo          = &csum_hash_info,
574         .type           = TCA_ACT_CSUM,
575         .capab          = TCA_CAP_NONE,
576         .owner          = THIS_MODULE,
577         .act            = tcf_csum,
578         .dump           = tcf_csum_dump,
579         .cleanup        = tcf_csum_cleanup,
580         .init           = tcf_csum_init,
581 };
582
583 MODULE_DESCRIPTION("Checksum updating actions");
584 MODULE_LICENSE("GPL");
585
586 static int __init csum_init_module(void)
587 {
588         int err = tcf_hashinfo_init(&csum_hash_info, CSUM_TAB_MASK);
589         if (err)
590                 return err;
591
592         return tcf_register_action(&act_csum_ops);
593 }
594
595 static void __exit csum_cleanup_module(void)
596 {
597         tcf_unregister_action(&act_csum_ops);
598 }
599
600 module_init(csum_init_module);
601 module_exit(csum_cleanup_module);