Merge tag 'nfs-for-3.14-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[platform/adaptation/renesas_rcar/renesas_kernel.git] / net / sched / act_csum.c
1 /*
2  * Checksum updating actions
3  *
4  * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  *
11  */
12
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
22
23 #include <linux/skbuff.h>
24
25 #include <net/ip.h>
26 #include <net/ipv6.h>
27 #include <net/icmp.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
30 #include <net/tcp.h>
31 #include <net/udp.h>
32 #include <net/ip6_checksum.h>
33
34 #include <net/act_api.h>
35
36 #include <linux/tc_act/tc_csum.h>
37 #include <net/tc_act/tc_csum.h>
38
39 #define CSUM_TAB_MASK 15
40 static struct tcf_hashinfo csum_hash_info;
41
42 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
43         [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
44 };
45
46 static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
47                          struct tc_action *a, int ovr, int bind)
48 {
49         struct nlattr *tb[TCA_CSUM_MAX + 1];
50         struct tc_csum *parm;
51         struct tcf_common *pc;
52         struct tcf_csum *p;
53         int ret = 0, err;
54
55         if (nla == NULL)
56                 return -EINVAL;
57
58         err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
59         if (err < 0)
60                 return err;
61
62         if (tb[TCA_CSUM_PARMS] == NULL)
63                 return -EINVAL;
64         parm = nla_data(tb[TCA_CSUM_PARMS]);
65
66         pc = tcf_hash_check(parm->index, a, bind);
67         if (!pc) {
68                 pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
69                 if (IS_ERR(pc))
70                         return PTR_ERR(pc);
71                 ret = ACT_P_CREATED;
72         } else {
73                 if (bind)/* dont override defaults */
74                         return 0;
75                 tcf_hash_release(pc, bind, a->ops->hinfo);
76                 if (!ovr)
77                         return -EEXIST;
78         }
79
80         p = to_tcf_csum(pc);
81         spin_lock_bh(&p->tcf_lock);
82         p->tcf_action = parm->action;
83         p->update_flags = parm->update_flags;
84         spin_unlock_bh(&p->tcf_lock);
85
86         if (ret == ACT_P_CREATED)
87                 tcf_hash_insert(pc, a->ops->hinfo);
88
89         return ret;
90 }
91
92 static int tcf_csum_cleanup(struct tc_action *a, int bind)
93 {
94         struct tcf_csum *p = a->priv;
95         return tcf_hash_release(&p->common, bind, &csum_hash_info);
96 }
97
98 /**
99  * tcf_csum_skb_nextlayer - Get next layer pointer
100  * @skb: sk_buff to use
101  * @ihl: previous summed headers length
102  * @ipl: complete packet length
103  * @jhl: next header length
104  *
105  * Check the expected next layer availability in the specified sk_buff.
106  * Return the next layer pointer if pass, NULL otherwise.
107  */
108 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
109                                     unsigned int ihl, unsigned int ipl,
110                                     unsigned int jhl)
111 {
112         int ntkoff = skb_network_offset(skb);
113         int hl = ihl + jhl;
114
115         if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
116             (skb_cloned(skb) &&
117              !skb_clone_writable(skb, hl + ntkoff) &&
118              pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
119                 return NULL;
120         else
121                 return (void *)(skb_network_header(skb) + ihl);
122 }
123
124 static int tcf_csum_ipv4_icmp(struct sk_buff *skb,
125                               unsigned int ihl, unsigned int ipl)
126 {
127         struct icmphdr *icmph;
128
129         icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
130         if (icmph == NULL)
131                 return 0;
132
133         icmph->checksum = 0;
134         skb->csum = csum_partial(icmph, ipl - ihl, 0);
135         icmph->checksum = csum_fold(skb->csum);
136
137         skb->ip_summed = CHECKSUM_NONE;
138
139         return 1;
140 }
141
142 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
143                               unsigned int ihl, unsigned int ipl)
144 {
145         struct igmphdr *igmph;
146
147         igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
148         if (igmph == NULL)
149                 return 0;
150
151         igmph->csum = 0;
152         skb->csum = csum_partial(igmph, ipl - ihl, 0);
153         igmph->csum = csum_fold(skb->csum);
154
155         skb->ip_summed = CHECKSUM_NONE;
156
157         return 1;
158 }
159
160 static int tcf_csum_ipv6_icmp(struct sk_buff *skb,
161                               unsigned int ihl, unsigned int ipl)
162 {
163         struct icmp6hdr *icmp6h;
164         const struct ipv6hdr *ip6h;
165
166         icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
167         if (icmp6h == NULL)
168                 return 0;
169
170         ip6h = ipv6_hdr(skb);
171         icmp6h->icmp6_cksum = 0;
172         skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
173         icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
174                                               ipl - ihl, IPPROTO_ICMPV6,
175                                               skb->csum);
176
177         skb->ip_summed = CHECKSUM_NONE;
178
179         return 1;
180 }
181
182 static int tcf_csum_ipv4_tcp(struct sk_buff *skb,
183                              unsigned int ihl, unsigned int ipl)
184 {
185         struct tcphdr *tcph;
186         const struct iphdr *iph;
187
188         tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
189         if (tcph == NULL)
190                 return 0;
191
192         iph = ip_hdr(skb);
193         tcph->check = 0;
194         skb->csum = csum_partial(tcph, ipl - ihl, 0);
195         tcph->check = tcp_v4_check(ipl - ihl,
196                                    iph->saddr, iph->daddr, skb->csum);
197
198         skb->ip_summed = CHECKSUM_NONE;
199
200         return 1;
201 }
202
203 static int tcf_csum_ipv6_tcp(struct sk_buff *skb,
204                              unsigned int ihl, unsigned int ipl)
205 {
206         struct tcphdr *tcph;
207         const struct ipv6hdr *ip6h;
208
209         tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
210         if (tcph == NULL)
211                 return 0;
212
213         ip6h = ipv6_hdr(skb);
214         tcph->check = 0;
215         skb->csum = csum_partial(tcph, ipl - ihl, 0);
216         tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
217                                       ipl - ihl, IPPROTO_TCP,
218                                       skb->csum);
219
220         skb->ip_summed = CHECKSUM_NONE;
221
222         return 1;
223 }
224
225 static int tcf_csum_ipv4_udp(struct sk_buff *skb,
226                              unsigned int ihl, unsigned int ipl, int udplite)
227 {
228         struct udphdr *udph;
229         const struct iphdr *iph;
230         u16 ul;
231
232         /*
233          * Support both UDP and UDPLITE checksum algorithms, Don't use
234          * udph->len to get the real length without any protocol check,
235          * UDPLITE uses udph->len for another thing,
236          * Use iph->tot_len, or just ipl.
237          */
238
239         udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
240         if (udph == NULL)
241                 return 0;
242
243         iph = ip_hdr(skb);
244         ul = ntohs(udph->len);
245
246         if (udplite || udph->check) {
247
248                 udph->check = 0;
249
250                 if (udplite) {
251                         if (ul == 0)
252                                 skb->csum = csum_partial(udph, ipl - ihl, 0);
253                         else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
254                                 skb->csum = csum_partial(udph, ul, 0);
255                         else
256                                 goto ignore_obscure_skb;
257                 } else {
258                         if (ul != ipl - ihl)
259                                 goto ignore_obscure_skb;
260
261                         skb->csum = csum_partial(udph, ul, 0);
262                 }
263
264                 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
265                                                 ul, iph->protocol,
266                                                 skb->csum);
267
268                 if (!udph->check)
269                         udph->check = CSUM_MANGLED_0;
270         }
271
272         skb->ip_summed = CHECKSUM_NONE;
273
274 ignore_obscure_skb:
275         return 1;
276 }
277
278 static int tcf_csum_ipv6_udp(struct sk_buff *skb,
279                              unsigned int ihl, unsigned int ipl, int udplite)
280 {
281         struct udphdr *udph;
282         const struct ipv6hdr *ip6h;
283         u16 ul;
284
285         /*
286          * Support both UDP and UDPLITE checksum algorithms, Don't use
287          * udph->len to get the real length without any protocol check,
288          * UDPLITE uses udph->len for another thing,
289          * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
290          */
291
292         udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
293         if (udph == NULL)
294                 return 0;
295
296         ip6h = ipv6_hdr(skb);
297         ul = ntohs(udph->len);
298
299         udph->check = 0;
300
301         if (udplite) {
302                 if (ul == 0)
303                         skb->csum = csum_partial(udph, ipl - ihl, 0);
304
305                 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
306                         skb->csum = csum_partial(udph, ul, 0);
307
308                 else
309                         goto ignore_obscure_skb;
310         } else {
311                 if (ul != ipl - ihl)
312                         goto ignore_obscure_skb;
313
314                 skb->csum = csum_partial(udph, ul, 0);
315         }
316
317         udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
318                                       udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
319                                       skb->csum);
320
321         if (!udph->check)
322                 udph->check = CSUM_MANGLED_0;
323
324         skb->ip_summed = CHECKSUM_NONE;
325
326 ignore_obscure_skb:
327         return 1;
328 }
329
330 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
331 {
332         const struct iphdr *iph;
333         int ntkoff;
334
335         ntkoff = skb_network_offset(skb);
336
337         if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
338                 goto fail;
339
340         iph = ip_hdr(skb);
341
342         switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
343         case IPPROTO_ICMP:
344                 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
345                         if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
346                                                 ntohs(iph->tot_len)))
347                                 goto fail;
348                 break;
349         case IPPROTO_IGMP:
350                 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
351                         if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
352                                                 ntohs(iph->tot_len)))
353                                 goto fail;
354                 break;
355         case IPPROTO_TCP:
356                 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
357                         if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
358                                                ntohs(iph->tot_len)))
359                                 goto fail;
360                 break;
361         case IPPROTO_UDP:
362                 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
363                         if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
364                                                ntohs(iph->tot_len), 0))
365                                 goto fail;
366                 break;
367         case IPPROTO_UDPLITE:
368                 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
369                         if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
370                                                ntohs(iph->tot_len), 1))
371                                 goto fail;
372                 break;
373         }
374
375         if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
376                 if (skb_cloned(skb) &&
377                     !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
378                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
379                         goto fail;
380
381                 ip_send_check(ip_hdr(skb));
382         }
383
384         return 1;
385
386 fail:
387         return 0;
388 }
389
390 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
391                                  unsigned int ixhl, unsigned int *pl)
392 {
393         int off, len, optlen;
394         unsigned char *xh = (void *)ip6xh;
395
396         off = sizeof(*ip6xh);
397         len = ixhl - off;
398
399         while (len > 1) {
400                 switch (xh[off]) {
401                 case IPV6_TLV_PAD1:
402                         optlen = 1;
403                         break;
404                 case IPV6_TLV_JUMBO:
405                         optlen = xh[off + 1] + 2;
406                         if (optlen != 6 || len < 6 || (off & 3) != 2)
407                                 /* wrong jumbo option length/alignment */
408                                 return 0;
409                         *pl = ntohl(*(__be32 *)(xh + off + 2));
410                         goto done;
411                 default:
412                         optlen = xh[off + 1] + 2;
413                         if (optlen > len)
414                                 /* ignore obscure options */
415                                 goto done;
416                         break;
417                 }
418                 off += optlen;
419                 len -= optlen;
420         }
421
422 done:
423         return 1;
424 }
425
426 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
427 {
428         struct ipv6hdr *ip6h;
429         struct ipv6_opt_hdr *ip6xh;
430         unsigned int hl, ixhl;
431         unsigned int pl;
432         int ntkoff;
433         u8 nexthdr;
434
435         ntkoff = skb_network_offset(skb);
436
437         hl = sizeof(*ip6h);
438
439         if (!pskb_may_pull(skb, hl + ntkoff))
440                 goto fail;
441
442         ip6h = ipv6_hdr(skb);
443
444         pl = ntohs(ip6h->payload_len);
445         nexthdr = ip6h->nexthdr;
446
447         do {
448                 switch (nexthdr) {
449                 case NEXTHDR_FRAGMENT:
450                         goto ignore_skb;
451                 case NEXTHDR_ROUTING:
452                 case NEXTHDR_HOP:
453                 case NEXTHDR_DEST:
454                         if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
455                                 goto fail;
456                         ip6xh = (void *)(skb_network_header(skb) + hl);
457                         ixhl = ipv6_optlen(ip6xh);
458                         if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
459                                 goto fail;
460                         ip6xh = (void *)(skb_network_header(skb) + hl);
461                         if ((nexthdr == NEXTHDR_HOP) &&
462                             !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
463                                 goto fail;
464                         nexthdr = ip6xh->nexthdr;
465                         hl += ixhl;
466                         break;
467                 case IPPROTO_ICMPV6:
468                         if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
469                                 if (!tcf_csum_ipv6_icmp(skb,
470                                                         hl, pl + sizeof(*ip6h)))
471                                         goto fail;
472                         goto done;
473                 case IPPROTO_TCP:
474                         if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
475                                 if (!tcf_csum_ipv6_tcp(skb,
476                                                        hl, pl + sizeof(*ip6h)))
477                                         goto fail;
478                         goto done;
479                 case IPPROTO_UDP:
480                         if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
481                                 if (!tcf_csum_ipv6_udp(skb, hl,
482                                                        pl + sizeof(*ip6h), 0))
483                                         goto fail;
484                         goto done;
485                 case IPPROTO_UDPLITE:
486                         if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
487                                 if (!tcf_csum_ipv6_udp(skb, hl,
488                                                        pl + sizeof(*ip6h), 1))
489                                         goto fail;
490                         goto done;
491                 default:
492                         goto ignore_skb;
493                 }
494         } while (pskb_may_pull(skb, hl + 1 + ntkoff));
495
496 done:
497 ignore_skb:
498         return 1;
499
500 fail:
501         return 0;
502 }
503
504 static int tcf_csum(struct sk_buff *skb,
505                     const struct tc_action *a, struct tcf_result *res)
506 {
507         struct tcf_csum *p = a->priv;
508         int action;
509         u32 update_flags;
510
511         spin_lock(&p->tcf_lock);
512         p->tcf_tm.lastuse = jiffies;
513         bstats_update(&p->tcf_bstats, skb);
514         action = p->tcf_action;
515         update_flags = p->update_flags;
516         spin_unlock(&p->tcf_lock);
517
518         if (unlikely(action == TC_ACT_SHOT))
519                 goto drop;
520
521         switch (skb->protocol) {
522         case cpu_to_be16(ETH_P_IP):
523                 if (!tcf_csum_ipv4(skb, update_flags))
524                         goto drop;
525                 break;
526         case cpu_to_be16(ETH_P_IPV6):
527                 if (!tcf_csum_ipv6(skb, update_flags))
528                         goto drop;
529                 break;
530         }
531
532         return action;
533
534 drop:
535         spin_lock(&p->tcf_lock);
536         p->tcf_qstats.drops++;
537         spin_unlock(&p->tcf_lock);
538         return TC_ACT_SHOT;
539 }
540
541 static int tcf_csum_dump(struct sk_buff *skb,
542                          struct tc_action *a, int bind, int ref)
543 {
544         unsigned char *b = skb_tail_pointer(skb);
545         struct tcf_csum *p = a->priv;
546         struct tc_csum opt = {
547                 .update_flags = p->update_flags,
548                 .index   = p->tcf_index,
549                 .action  = p->tcf_action,
550                 .refcnt  = p->tcf_refcnt - ref,
551                 .bindcnt = p->tcf_bindcnt - bind,
552         };
553         struct tcf_t t;
554
555         if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
556                 goto nla_put_failure;
557         t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
558         t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
559         t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
560         if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t))
561                 goto nla_put_failure;
562
563         return skb->len;
564
565 nla_put_failure:
566         nlmsg_trim(skb, b);
567         return -1;
568 }
569
570 static struct tc_action_ops act_csum_ops = {
571         .kind           = "csum",
572         .hinfo          = &csum_hash_info,
573         .type           = TCA_ACT_CSUM,
574         .owner          = THIS_MODULE,
575         .act            = tcf_csum,
576         .dump           = tcf_csum_dump,
577         .cleanup        = tcf_csum_cleanup,
578         .init           = tcf_csum_init,
579 };
580
581 MODULE_DESCRIPTION("Checksum updating actions");
582 MODULE_LICENSE("GPL");
583
584 static int __init csum_init_module(void)
585 {
586         int err = tcf_hashinfo_init(&csum_hash_info, CSUM_TAB_MASK);
587         if (err)
588                 return err;
589
590         return tcf_register_action(&act_csum_ops);
591 }
592
593 static void __exit csum_cleanup_module(void)
594 {
595         tcf_unregister_action(&act_csum_ops);
596 }
597
598 module_init(csum_init_module);
599 module_exit(csum_cleanup_module);