Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[platform/adaptation/renesas_rcar/renesas_kernel.git] / net / ipv6 / reassembly.c
1 /*
2  *      IPv6 fragment reassembly
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on: net/ipv4/ip_fragment.c
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15
16 /*
17  *      Fixes:
18  *      Andi Kleen      Make it work with multiple hosts.
19  *                      More RFC compliance.
20  *
21  *      Horst von Brand Add missing #include <linux/string.h>
22  *      Alexey Kuznetsov        SMP races, threading, cleanup.
23  *      Patrick McHardy         LRU queue of frag heads for evictor.
24  *      Mitsuru KANDA @USAGI    Register inet6_protocol{}.
25  *      David Stevens and
26  *      YOSHIFUJI,H. @USAGI     Always remove fragment header to
27  *                              calculate ICV correctly.
28  */
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/jiffies.h>
35 #include <linux/net.h>
36 #include <linux/list.h>
37 #include <linux/netdevice.h>
38 #include <linux/in6.h>
39 #include <linux/ipv6.h>
40 #include <linux/icmpv6.h>
41 #include <linux/random.h>
42 #include <linux/jhash.h>
43 #include <linux/skbuff.h>
44 #include <linux/slab.h>
45
46 #include <net/sock.h>
47 #include <net/snmp.h>
48
49 #include <net/ipv6.h>
50 #include <net/ip6_route.h>
51 #include <net/protocol.h>
52 #include <net/transp_v6.h>
53 #include <net/rawv6.h>
54 #include <net/ndisc.h>
55 #include <net/addrconf.h>
56 #include <net/inet_frag.h>
57
58 struct ip6frag_skb_cb
59 {
60         struct inet6_skb_parm   h;
61         int                     offset;
62 };
63
64 #define FRAG6_CB(skb)   ((struct ip6frag_skb_cb*)((skb)->cb))
65
66
67 /*
68  *      Equivalent of ipv4 struct ipq
69  */
70
71 struct frag_queue
72 {
73         struct inet_frag_queue  q;
74
75         __be32                  id;             /* fragment id          */
76         u32                     user;
77         struct in6_addr         saddr;
78         struct in6_addr         daddr;
79
80         int                     iif;
81         unsigned int            csum;
82         __u16                   nhoffset;
83 };
84
85 static struct inet_frags ip6_frags;
86
87 int ip6_frag_nqueues(struct net *net)
88 {
89         return net->ipv6.frags.nqueues;
90 }
91
92 int ip6_frag_mem(struct net *net)
93 {
94         return atomic_read(&net->ipv6.frags.mem);
95 }
96
97 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
98                           struct net_device *dev);
99
100 /*
101  * callers should be careful not to use the hash value outside the ipfrag_lock
102  * as doing so could race with ipfrag_hash_rnd being recalculated.
103  */
104 unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
105                              const struct in6_addr *daddr, u32 rnd)
106 {
107         u32 c;
108
109         c = jhash_3words((__force u32)saddr->s6_addr32[0],
110                          (__force u32)saddr->s6_addr32[1],
111                          (__force u32)saddr->s6_addr32[2],
112                          rnd);
113
114         c = jhash_3words((__force u32)saddr->s6_addr32[3],
115                          (__force u32)daddr->s6_addr32[0],
116                          (__force u32)daddr->s6_addr32[1],
117                          c);
118
119         c =  jhash_3words((__force u32)daddr->s6_addr32[2],
120                           (__force u32)daddr->s6_addr32[3],
121                           (__force u32)id,
122                           c);
123
124         return c & (INETFRAGS_HASHSZ - 1);
125 }
126 EXPORT_SYMBOL_GPL(inet6_hash_frag);
127
128 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
129 {
130         struct frag_queue *fq;
131
132         fq = container_of(q, struct frag_queue, q);
133         return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
134 }
135
136 int ip6_frag_match(struct inet_frag_queue *q, void *a)
137 {
138         struct frag_queue *fq;
139         struct ip6_create_arg *arg = a;
140
141         fq = container_of(q, struct frag_queue, q);
142         return (fq->id == arg->id && fq->user == arg->user &&
143                         ipv6_addr_equal(&fq->saddr, arg->src) &&
144                         ipv6_addr_equal(&fq->daddr, arg->dst));
145 }
146 EXPORT_SYMBOL(ip6_frag_match);
147
148 void ip6_frag_init(struct inet_frag_queue *q, void *a)
149 {
150         struct frag_queue *fq = container_of(q, struct frag_queue, q);
151         struct ip6_create_arg *arg = a;
152
153         fq->id = arg->id;
154         fq->user = arg->user;
155         ipv6_addr_copy(&fq->saddr, arg->src);
156         ipv6_addr_copy(&fq->daddr, arg->dst);
157 }
158 EXPORT_SYMBOL(ip6_frag_init);
159
160 /* Destruction primitives. */
161
162 static __inline__ void fq_put(struct frag_queue *fq)
163 {
164         inet_frag_put(&fq->q, &ip6_frags);
165 }
166
167 /* Kill fq entry. It is not destroyed immediately,
168  * because caller (and someone more) holds reference count.
169  */
170 static __inline__ void fq_kill(struct frag_queue *fq)
171 {
172         inet_frag_kill(&fq->q, &ip6_frags);
173 }
174
175 static void ip6_evictor(struct net *net, struct inet6_dev *idev)
176 {
177         int evicted;
178
179         evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
180         if (evicted)
181                 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted);
182 }
183
184 static void ip6_frag_expire(unsigned long data)
185 {
186         struct frag_queue *fq;
187         struct net_device *dev = NULL;
188         struct net *net;
189
190         fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
191
192         spin_lock(&fq->q.lock);
193
194         if (fq->q.last_in & INET_FRAG_COMPLETE)
195                 goto out;
196
197         fq_kill(fq);
198
199         net = container_of(fq->q.net, struct net, ipv6.frags);
200         rcu_read_lock();
201         dev = dev_get_by_index_rcu(net, fq->iif);
202         if (!dev)
203                 goto out_rcu_unlock;
204
205         IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
206         IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
207
208         /* Don't send error if the first segment did not arrive. */
209         if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
210                 goto out_rcu_unlock;
211
212         /*
213            But use as source device on which LAST ARRIVED
214            segment was received. And do not use fq->dev
215            pointer directly, device might already disappeared.
216          */
217         fq->q.fragments->dev = dev;
218         icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
219 out_rcu_unlock:
220         rcu_read_unlock();
221 out:
222         spin_unlock(&fq->q.lock);
223         fq_put(fq);
224 }
225
226 static __inline__ struct frag_queue *
227 fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst)
228 {
229         struct inet_frag_queue *q;
230         struct ip6_create_arg arg;
231         unsigned int hash;
232
233         arg.id = id;
234         arg.user = IP6_DEFRAG_LOCAL_DELIVER;
235         arg.src = src;
236         arg.dst = dst;
237
238         read_lock(&ip6_frags.lock);
239         hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
240
241         q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
242         if (q == NULL)
243                 return NULL;
244
245         return container_of(q, struct frag_queue, q);
246 }
247
248 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
249                            struct frag_hdr *fhdr, int nhoff)
250 {
251         struct sk_buff *prev, *next;
252         struct net_device *dev;
253         int offset, end;
254         struct net *net = dev_net(skb_dst(skb)->dev);
255
256         if (fq->q.last_in & INET_FRAG_COMPLETE)
257                 goto err;
258
259         offset = ntohs(fhdr->frag_off) & ~0x7;
260         end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
261                         ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
262
263         if ((unsigned int)end > IPV6_MAXPLEN) {
264                 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
265                                  IPSTATS_MIB_INHDRERRORS);
266                 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
267                                   ((u8 *)&fhdr->frag_off -
268                                    skb_network_header(skb)));
269                 return -1;
270         }
271
272         if (skb->ip_summed == CHECKSUM_COMPLETE) {
273                 const unsigned char *nh = skb_network_header(skb);
274                 skb->csum = csum_sub(skb->csum,
275                                      csum_partial(nh, (u8 *)(fhdr + 1) - nh,
276                                                   0));
277         }
278
279         /* Is this the final fragment? */
280         if (!(fhdr->frag_off & htons(IP6_MF))) {
281                 /* If we already have some bits beyond end
282                  * or have different end, the segment is corrupted.
283                  */
284                 if (end < fq->q.len ||
285                     ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
286                         goto err;
287                 fq->q.last_in |= INET_FRAG_LAST_IN;
288                 fq->q.len = end;
289         } else {
290                 /* Check if the fragment is rounded to 8 bytes.
291                  * Required by the RFC.
292                  */
293                 if (end & 0x7) {
294                         /* RFC2460 says always send parameter problem in
295                          * this case. -DaveM
296                          */
297                         IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
298                                          IPSTATS_MIB_INHDRERRORS);
299                         icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
300                                           offsetof(struct ipv6hdr, payload_len));
301                         return -1;
302                 }
303                 if (end > fq->q.len) {
304                         /* Some bits beyond end -> corruption. */
305                         if (fq->q.last_in & INET_FRAG_LAST_IN)
306                                 goto err;
307                         fq->q.len = end;
308                 }
309         }
310
311         if (end == offset)
312                 goto err;
313
314         /* Point into the IP datagram 'data' part. */
315         if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
316                 goto err;
317
318         if (pskb_trim_rcsum(skb, end - offset))
319                 goto err;
320
321         /* Find out which fragments are in front and at the back of us
322          * in the chain of fragments so far.  We must know where to put
323          * this fragment, right?
324          */
325         prev = fq->q.fragments_tail;
326         if (!prev || FRAG6_CB(prev)->offset < offset) {
327                 next = NULL;
328                 goto found;
329         }
330         prev = NULL;
331         for(next = fq->q.fragments; next != NULL; next = next->next) {
332                 if (FRAG6_CB(next)->offset >= offset)
333                         break;  /* bingo! */
334                 prev = next;
335         }
336
337 found:
338         /* RFC5722, Section 4:
339          *                                  When reassembling an IPv6 datagram, if
340          *   one or more its constituent fragments is determined to be an
341          *   overlapping fragment, the entire datagram (and any constituent
342          *   fragments, including those not yet received) MUST be silently
343          *   discarded.
344          */
345
346         /* Check for overlap with preceding fragment. */
347         if (prev &&
348             (FRAG6_CB(prev)->offset + prev->len) > offset)
349                 goto discard_fq;
350
351         /* Look for overlap with succeeding segment. */
352         if (next && FRAG6_CB(next)->offset < end)
353                 goto discard_fq;
354
355         FRAG6_CB(skb)->offset = offset;
356
357         /* Insert this fragment in the chain of fragments. */
358         skb->next = next;
359         if (!next)
360                 fq->q.fragments_tail = skb;
361         if (prev)
362                 prev->next = skb;
363         else
364                 fq->q.fragments = skb;
365
366         dev = skb->dev;
367         if (dev) {
368                 fq->iif = dev->ifindex;
369                 skb->dev = NULL;
370         }
371         fq->q.stamp = skb->tstamp;
372         fq->q.meat += skb->len;
373         atomic_add(skb->truesize, &fq->q.net->mem);
374
375         /* The first fragment.
376          * nhoffset is obtained from the first fragment, of course.
377          */
378         if (offset == 0) {
379                 fq->nhoffset = nhoff;
380                 fq->q.last_in |= INET_FRAG_FIRST_IN;
381         }
382
383         if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
384             fq->q.meat == fq->q.len)
385                 return ip6_frag_reasm(fq, prev, dev);
386
387         write_lock(&ip6_frags.lock);
388         list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
389         write_unlock(&ip6_frags.lock);
390         return -1;
391
392 discard_fq:
393         fq_kill(fq);
394 err:
395         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
396                       IPSTATS_MIB_REASMFAILS);
397         kfree_skb(skb);
398         return -1;
399 }
400
401 /*
402  *      Check if this packet is complete.
403  *      Returns NULL on failure by any reason, and pointer
404  *      to current nexthdr field in reassembled frame.
405  *
406  *      It is called with locked fq, and caller must check that
407  *      queue is eligible for reassembly i.e. it is not COMPLETE,
408  *      the last and the first frames arrived and all the bits are here.
409  */
410 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
411                           struct net_device *dev)
412 {
413         struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
414         struct sk_buff *fp, *head = fq->q.fragments;
415         int    payload_len;
416         unsigned int nhoff;
417
418         fq_kill(fq);
419
420         /* Make the one we just received the head. */
421         if (prev) {
422                 head = prev->next;
423                 fp = skb_clone(head, GFP_ATOMIC);
424
425                 if (!fp)
426                         goto out_oom;
427
428                 fp->next = head->next;
429                 if (!fp->next)
430                         fq->q.fragments_tail = fp;
431                 prev->next = fp;
432
433                 skb_morph(head, fq->q.fragments);
434                 head->next = fq->q.fragments->next;
435
436                 kfree_skb(fq->q.fragments);
437                 fq->q.fragments = head;
438         }
439
440         WARN_ON(head == NULL);
441         WARN_ON(FRAG6_CB(head)->offset != 0);
442
443         /* Unfragmented part is taken from the first segment. */
444         payload_len = ((head->data - skb_network_header(head)) -
445                        sizeof(struct ipv6hdr) + fq->q.len -
446                        sizeof(struct frag_hdr));
447         if (payload_len > IPV6_MAXPLEN)
448                 goto out_oversize;
449
450         /* Head of list must not be cloned. */
451         if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
452                 goto out_oom;
453
454         /* If the first fragment is fragmented itself, we split
455          * it to two chunks: the first with data and paged part
456          * and the second, holding only fragments. */
457         if (skb_has_frag_list(head)) {
458                 struct sk_buff *clone;
459                 int i, plen = 0;
460
461                 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
462                         goto out_oom;
463                 clone->next = head->next;
464                 head->next = clone;
465                 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
466                 skb_frag_list_init(head);
467                 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
468                         plen += skb_shinfo(head)->frags[i].size;
469                 clone->len = clone->data_len = head->data_len - plen;
470                 head->data_len -= clone->len;
471                 head->len -= clone->len;
472                 clone->csum = 0;
473                 clone->ip_summed = head->ip_summed;
474                 atomic_add(clone->truesize, &fq->q.net->mem);
475         }
476
477         /* We have to remove fragment header from datagram and to relocate
478          * header in order to calculate ICV correctly. */
479         nhoff = fq->nhoffset;
480         skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
481         memmove(head->head + sizeof(struct frag_hdr), head->head,
482                 (head->data - head->head) - sizeof(struct frag_hdr));
483         head->mac_header += sizeof(struct frag_hdr);
484         head->network_header += sizeof(struct frag_hdr);
485
486         skb_shinfo(head)->frag_list = head->next;
487         skb_reset_transport_header(head);
488         skb_push(head, head->data - skb_network_header(head));
489
490         for (fp=head->next; fp; fp = fp->next) {
491                 head->data_len += fp->len;
492                 head->len += fp->len;
493                 if (head->ip_summed != fp->ip_summed)
494                         head->ip_summed = CHECKSUM_NONE;
495                 else if (head->ip_summed == CHECKSUM_COMPLETE)
496                         head->csum = csum_add(head->csum, fp->csum);
497                 head->truesize += fp->truesize;
498         }
499         atomic_sub(head->truesize, &fq->q.net->mem);
500
501         head->next = NULL;
502         head->dev = dev;
503         head->tstamp = fq->q.stamp;
504         ipv6_hdr(head)->payload_len = htons(payload_len);
505         IP6CB(head)->nhoff = nhoff;
506
507         /* Yes, and fold redundant checksum back. 8) */
508         if (head->ip_summed == CHECKSUM_COMPLETE)
509                 head->csum = csum_partial(skb_network_header(head),
510                                           skb_network_header_len(head),
511                                           head->csum);
512
513         rcu_read_lock();
514         IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
515         rcu_read_unlock();
516         fq->q.fragments = NULL;
517         fq->q.fragments_tail = NULL;
518         return 1;
519
520 out_oversize:
521         if (net_ratelimit())
522                 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
523         goto out_fail;
524 out_oom:
525         if (net_ratelimit())
526                 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
527 out_fail:
528         rcu_read_lock();
529         IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
530         rcu_read_unlock();
531         return -1;
532 }
533
534 static int ipv6_frag_rcv(struct sk_buff *skb)
535 {
536         struct frag_hdr *fhdr;
537         struct frag_queue *fq;
538         const struct ipv6hdr *hdr = ipv6_hdr(skb);
539         struct net *net = dev_net(skb_dst(skb)->dev);
540
541         IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
542
543         /* Jumbo payload inhibits frag. header */
544         if (hdr->payload_len==0)
545                 goto fail_hdr;
546
547         if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
548                                  sizeof(struct frag_hdr))))
549                 goto fail_hdr;
550
551         hdr = ipv6_hdr(skb);
552         fhdr = (struct frag_hdr *)skb_transport_header(skb);
553
554         if (!(fhdr->frag_off & htons(0xFFF9))) {
555                 /* It is not a fragmented frame */
556                 skb->transport_header += sizeof(struct frag_hdr);
557                 IP6_INC_STATS_BH(net,
558                                  ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
559
560                 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
561                 return 1;
562         }
563
564         if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
565                 ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
566
567         fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
568         if (fq != NULL) {
569                 int ret;
570
571                 spin_lock(&fq->q.lock);
572
573                 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
574
575                 spin_unlock(&fq->q.lock);
576                 fq_put(fq);
577                 return ret;
578         }
579
580         IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
581         kfree_skb(skb);
582         return -1;
583
584 fail_hdr:
585         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
586         icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
587         return -1;
588 }
589
590 static const struct inet6_protocol frag_protocol =
591 {
592         .handler        =       ipv6_frag_rcv,
593         .flags          =       INET6_PROTO_NOPOLICY,
594 };
595
596 #ifdef CONFIG_SYSCTL
597 static struct ctl_table ip6_frags_ns_ctl_table[] = {
598         {
599                 .procname       = "ip6frag_high_thresh",
600                 .data           = &init_net.ipv6.frags.high_thresh,
601                 .maxlen         = sizeof(int),
602                 .mode           = 0644,
603                 .proc_handler   = proc_dointvec
604         },
605         {
606                 .procname       = "ip6frag_low_thresh",
607                 .data           = &init_net.ipv6.frags.low_thresh,
608                 .maxlen         = sizeof(int),
609                 .mode           = 0644,
610                 .proc_handler   = proc_dointvec
611         },
612         {
613                 .procname       = "ip6frag_time",
614                 .data           = &init_net.ipv6.frags.timeout,
615                 .maxlen         = sizeof(int),
616                 .mode           = 0644,
617                 .proc_handler   = proc_dointvec_jiffies,
618         },
619         { }
620 };
621
622 static struct ctl_table ip6_frags_ctl_table[] = {
623         {
624                 .procname       = "ip6frag_secret_interval",
625                 .data           = &ip6_frags.secret_interval,
626                 .maxlen         = sizeof(int),
627                 .mode           = 0644,
628                 .proc_handler   = proc_dointvec_jiffies,
629         },
630         { }
631 };
632
633 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
634 {
635         struct ctl_table *table;
636         struct ctl_table_header *hdr;
637
638         table = ip6_frags_ns_ctl_table;
639         if (!net_eq(net, &init_net)) {
640                 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
641                 if (table == NULL)
642                         goto err_alloc;
643
644                 table[0].data = &net->ipv6.frags.high_thresh;
645                 table[1].data = &net->ipv6.frags.low_thresh;
646                 table[2].data = &net->ipv6.frags.timeout;
647         }
648
649         hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
650         if (hdr == NULL)
651                 goto err_reg;
652
653         net->ipv6.sysctl.frags_hdr = hdr;
654         return 0;
655
656 err_reg:
657         if (!net_eq(net, &init_net))
658                 kfree(table);
659 err_alloc:
660         return -ENOMEM;
661 }
662
663 static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
664 {
665         struct ctl_table *table;
666
667         table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
668         unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
669         if (!net_eq(net, &init_net))
670                 kfree(table);
671 }
672
673 static struct ctl_table_header *ip6_ctl_header;
674
675 static int ip6_frags_sysctl_register(void)
676 {
677         ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
678                         ip6_frags_ctl_table);
679         return ip6_ctl_header == NULL ? -ENOMEM : 0;
680 }
681
682 static void ip6_frags_sysctl_unregister(void)
683 {
684         unregister_net_sysctl_table(ip6_ctl_header);
685 }
686 #else
687 static inline int ip6_frags_ns_sysctl_register(struct net *net)
688 {
689         return 0;
690 }
691
692 static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
693 {
694 }
695
696 static inline int ip6_frags_sysctl_register(void)
697 {
698         return 0;
699 }
700
701 static inline void ip6_frags_sysctl_unregister(void)
702 {
703 }
704 #endif
705
706 static int __net_init ipv6_frags_init_net(struct net *net)
707 {
708         net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
709         net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
710         net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
711
712         inet_frags_init_net(&net->ipv6.frags);
713
714         return ip6_frags_ns_sysctl_register(net);
715 }
716
717 static void __net_exit ipv6_frags_exit_net(struct net *net)
718 {
719         ip6_frags_ns_sysctl_unregister(net);
720         inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
721 }
722
723 static struct pernet_operations ip6_frags_ops = {
724         .init = ipv6_frags_init_net,
725         .exit = ipv6_frags_exit_net,
726 };
727
728 int __init ipv6_frag_init(void)
729 {
730         int ret;
731
732         ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
733         if (ret)
734                 goto out;
735
736         ret = ip6_frags_sysctl_register();
737         if (ret)
738                 goto err_sysctl;
739
740         ret = register_pernet_subsys(&ip6_frags_ops);
741         if (ret)
742                 goto err_pernet;
743
744         ip6_frags.hashfn = ip6_hashfn;
745         ip6_frags.constructor = ip6_frag_init;
746         ip6_frags.destructor = NULL;
747         ip6_frags.skb_free = NULL;
748         ip6_frags.qsize = sizeof(struct frag_queue);
749         ip6_frags.match = ip6_frag_match;
750         ip6_frags.frag_expire = ip6_frag_expire;
751         ip6_frags.secret_interval = 10 * 60 * HZ;
752         inet_frags_init(&ip6_frags);
753 out:
754         return ret;
755
756 err_pernet:
757         ip6_frags_sysctl_unregister();
758 err_sysctl:
759         inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
760         goto out;
761 }
762
763 void ipv6_frag_exit(void)
764 {
765         inet_frags_fini(&ip6_frags);
766         ip6_frags_sysctl_unregister();
767         unregister_pernet_subsys(&ip6_frags_ops);
768         inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
769 }