Merge remote-tracking branch 'asoc/topic/omap' into asoc-next
[platform/kernel/linux-starfive.git] / net / ipv6 / ip6_flowlabel.c
1 /*
2  *      ip6_flowlabel.c         IPv6 flowlabel manager.
3  *
4  *      This program is free software; you can redistribute it and/or
5  *      modify it under the terms of the GNU General Public License
6  *      as published by the Free Software Foundation; either version
7  *      2 of the License, or (at your option) any later version.
8  *
9  *      Authors:        Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  */
11
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/socket.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/in6.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/pid_namespace.h>
24
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27
28 #include <net/ipv6.h>
29 #include <net/rawv6.h>
30 #include <net/transp_v6.h>
31
32 #include <asm/uaccess.h>
33
34 #define FL_MIN_LINGER   6       /* Minimal linger. It is set to 6sec specified
35                                    in old IPv6 RFC. Well, it was reasonable value.
36                                  */
37 #define FL_MAX_LINGER   150     /* Maximal linger timeout */
38
39 /* FL hash table */
40
41 #define FL_MAX_PER_SOCK 32
42 #define FL_MAX_SIZE     4096
43 #define FL_HASH_MASK    255
44 #define FL_HASH(l)      (ntohl(l)&FL_HASH_MASK)
45
46 static atomic_t fl_size = ATOMIC_INIT(0);
47 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
48
49 static void ip6_fl_gc(unsigned long dummy);
50 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
51
52 /* FL hash table lock: it protects only of GC */
53
54 static DEFINE_SPINLOCK(ip6_fl_lock);
55
56 /* Big socket sock */
57
58 static DEFINE_SPINLOCK(ip6_sk_fl_lock);
59
60 #define for_each_fl_rcu(hash, fl)                               \
61         for (fl = rcu_dereference_bh(fl_ht[(hash)]);            \
62              fl != NULL;                                        \
63              fl = rcu_dereference_bh(fl->next))
64 #define for_each_fl_continue_rcu(fl)                            \
65         for (fl = rcu_dereference_bh(fl->next);                 \
66              fl != NULL;                                        \
67              fl = rcu_dereference_bh(fl->next))
68
69 #define for_each_sk_fl_rcu(np, sfl)                             \
70         for (sfl = rcu_dereference_bh(np->ipv6_fl_list);        \
71              sfl != NULL;                                       \
72              sfl = rcu_dereference_bh(sfl->next))
73
74 static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
75 {
76         struct ip6_flowlabel *fl;
77
78         for_each_fl_rcu(FL_HASH(label), fl) {
79                 if (fl->label == label && net_eq(fl->fl_net, net))
80                         return fl;
81         }
82         return NULL;
83 }
84
85 static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
86 {
87         struct ip6_flowlabel *fl;
88
89         rcu_read_lock_bh();
90         fl = __fl_lookup(net, label);
91         if (fl && !atomic_inc_not_zero(&fl->users))
92                 fl = NULL;
93         rcu_read_unlock_bh();
94         return fl;
95 }
96
97
98 static void fl_free(struct ip6_flowlabel *fl)
99 {
100         if (fl) {
101                 if (fl->share == IPV6_FL_S_PROCESS)
102                         put_pid(fl->owner.pid);
103                 kfree(fl->opt);
104                 kfree_rcu(fl, rcu);
105         }
106 }
107
108 static void fl_release(struct ip6_flowlabel *fl)
109 {
110         spin_lock_bh(&ip6_fl_lock);
111
112         fl->lastuse = jiffies;
113         if (atomic_dec_and_test(&fl->users)) {
114                 unsigned long ttd = fl->lastuse + fl->linger;
115                 if (time_after(ttd, fl->expires))
116                         fl->expires = ttd;
117                 ttd = fl->expires;
118                 if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
119                         struct ipv6_txoptions *opt = fl->opt;
120                         fl->opt = NULL;
121                         kfree(opt);
122                 }
123                 if (!timer_pending(&ip6_fl_gc_timer) ||
124                     time_after(ip6_fl_gc_timer.expires, ttd))
125                         mod_timer(&ip6_fl_gc_timer, ttd);
126         }
127         spin_unlock_bh(&ip6_fl_lock);
128 }
129
130 static void ip6_fl_gc(unsigned long dummy)
131 {
132         int i;
133         unsigned long now = jiffies;
134         unsigned long sched = 0;
135
136         spin_lock(&ip6_fl_lock);
137
138         for (i = 0; i <= FL_HASH_MASK; i++) {
139                 struct ip6_flowlabel *fl;
140                 struct ip6_flowlabel __rcu **flp;
141
142                 flp = &fl_ht[i];
143                 while ((fl = rcu_dereference_protected(*flp,
144                                                        lockdep_is_held(&ip6_fl_lock))) != NULL) {
145                         if (atomic_read(&fl->users) == 0) {
146                                 unsigned long ttd = fl->lastuse + fl->linger;
147                                 if (time_after(ttd, fl->expires))
148                                         fl->expires = ttd;
149                                 ttd = fl->expires;
150                                 if (time_after_eq(now, ttd)) {
151                                         *flp = fl->next;
152                                         fl_free(fl);
153                                         atomic_dec(&fl_size);
154                                         continue;
155                                 }
156                                 if (!sched || time_before(ttd, sched))
157                                         sched = ttd;
158                         }
159                         flp = &fl->next;
160                 }
161         }
162         if (!sched && atomic_read(&fl_size))
163                 sched = now + FL_MAX_LINGER;
164         if (sched) {
165                 mod_timer(&ip6_fl_gc_timer, sched);
166         }
167         spin_unlock(&ip6_fl_lock);
168 }
169
170 static void __net_exit ip6_fl_purge(struct net *net)
171 {
172         int i;
173
174         spin_lock_bh(&ip6_fl_lock);
175         for (i = 0; i <= FL_HASH_MASK; i++) {
176                 struct ip6_flowlabel *fl;
177                 struct ip6_flowlabel __rcu **flp;
178
179                 flp = &fl_ht[i];
180                 while ((fl = rcu_dereference_protected(*flp,
181                                                        lockdep_is_held(&ip6_fl_lock))) != NULL) {
182                         if (net_eq(fl->fl_net, net) &&
183                             atomic_read(&fl->users) == 0) {
184                                 *flp = fl->next;
185                                 fl_free(fl);
186                                 atomic_dec(&fl_size);
187                                 continue;
188                         }
189                         flp = &fl->next;
190                 }
191         }
192         spin_unlock_bh(&ip6_fl_lock);
193 }
194
195 static struct ip6_flowlabel *fl_intern(struct net *net,
196                                        struct ip6_flowlabel *fl, __be32 label)
197 {
198         struct ip6_flowlabel *lfl;
199
200         fl->label = label & IPV6_FLOWLABEL_MASK;
201
202         spin_lock_bh(&ip6_fl_lock);
203         if (label == 0) {
204                 for (;;) {
205                         fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
206                         if (fl->label) {
207                                 lfl = __fl_lookup(net, fl->label);
208                                 if (!lfl)
209                                         break;
210                         }
211                 }
212         } else {
213                 /*
214                  * we dropper the ip6_fl_lock, so this entry could reappear
215                  * and we need to recheck with it.
216                  *
217                  * OTOH no need to search the active socket first, like it is
218                  * done in ipv6_flowlabel_opt - sock is locked, so new entry
219                  * with the same label can only appear on another sock
220                  */
221                 lfl = __fl_lookup(net, fl->label);
222                 if (lfl) {
223                         atomic_inc(&lfl->users);
224                         spin_unlock_bh(&ip6_fl_lock);
225                         return lfl;
226                 }
227         }
228
229         fl->lastuse = jiffies;
230         fl->next = fl_ht[FL_HASH(fl->label)];
231         rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
232         atomic_inc(&fl_size);
233         spin_unlock_bh(&ip6_fl_lock);
234         return NULL;
235 }
236
237
238
239 /* Socket flowlabel lists */
240
241 struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
242 {
243         struct ipv6_fl_socklist *sfl;
244         struct ipv6_pinfo *np = inet6_sk(sk);
245
246         label &= IPV6_FLOWLABEL_MASK;
247
248         rcu_read_lock_bh();
249         for_each_sk_fl_rcu(np, sfl) {
250                 struct ip6_flowlabel *fl = sfl->fl;
251                 if (fl->label == label) {
252                         fl->lastuse = jiffies;
253                         atomic_inc(&fl->users);
254                         rcu_read_unlock_bh();
255                         return fl;
256                 }
257         }
258         rcu_read_unlock_bh();
259         return NULL;
260 }
261 EXPORT_SYMBOL_GPL(fl6_sock_lookup);
262
263 void fl6_free_socklist(struct sock *sk)
264 {
265         struct ipv6_pinfo *np = inet6_sk(sk);
266         struct ipv6_fl_socklist *sfl;
267
268         if (!rcu_access_pointer(np->ipv6_fl_list))
269                 return;
270
271         spin_lock_bh(&ip6_sk_fl_lock);
272         while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
273                                                 lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
274                 np->ipv6_fl_list = sfl->next;
275                 spin_unlock_bh(&ip6_sk_fl_lock);
276
277                 fl_release(sfl->fl);
278                 kfree_rcu(sfl, rcu);
279
280                 spin_lock_bh(&ip6_sk_fl_lock);
281         }
282         spin_unlock_bh(&ip6_sk_fl_lock);
283 }
284
285 /* Service routines */
286
287
288 /*
289    It is the only difficult place. flowlabel enforces equal headers
290    before and including routing header, however user may supply options
291    following rthdr.
292  */
293
294 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
295                                          struct ip6_flowlabel *fl,
296                                          struct ipv6_txoptions *fopt)
297 {
298         struct ipv6_txoptions *fl_opt = fl->opt;
299
300         if (!fopt || fopt->opt_flen == 0)
301                 return fl_opt;
302
303         if (fl_opt) {
304                 opt_space->hopopt = fl_opt->hopopt;
305                 opt_space->dst0opt = fl_opt->dst0opt;
306                 opt_space->srcrt = fl_opt->srcrt;
307                 opt_space->opt_nflen = fl_opt->opt_nflen;
308         } else {
309                 if (fopt->opt_nflen == 0)
310                         return fopt;
311                 opt_space->hopopt = NULL;
312                 opt_space->dst0opt = NULL;
313                 opt_space->srcrt = NULL;
314                 opt_space->opt_nflen = 0;
315         }
316         opt_space->dst1opt = fopt->dst1opt;
317         opt_space->opt_flen = fopt->opt_flen;
318         return opt_space;
319 }
320 EXPORT_SYMBOL_GPL(fl6_merge_options);
321
322 static unsigned long check_linger(unsigned long ttl)
323 {
324         if (ttl < FL_MIN_LINGER)
325                 return FL_MIN_LINGER*HZ;
326         if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
327                 return 0;
328         return ttl*HZ;
329 }
330
331 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
332 {
333         linger = check_linger(linger);
334         if (!linger)
335                 return -EPERM;
336         expires = check_linger(expires);
337         if (!expires)
338                 return -EPERM;
339
340         spin_lock_bh(&ip6_fl_lock);
341         fl->lastuse = jiffies;
342         if (time_before(fl->linger, linger))
343                 fl->linger = linger;
344         if (time_before(expires, fl->linger))
345                 expires = fl->linger;
346         if (time_before(fl->expires, fl->lastuse + expires))
347                 fl->expires = fl->lastuse + expires;
348         spin_unlock_bh(&ip6_fl_lock);
349
350         return 0;
351 }
352
353 static struct ip6_flowlabel *
354 fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
355           char __user *optval, int optlen, int *err_p)
356 {
357         struct ip6_flowlabel *fl = NULL;
358         int olen;
359         int addr_type;
360         int err;
361
362         olen = optlen - CMSG_ALIGN(sizeof(*freq));
363         err = -EINVAL;
364         if (olen > 64 * 1024)
365                 goto done;
366
367         err = -ENOMEM;
368         fl = kzalloc(sizeof(*fl), GFP_KERNEL);
369         if (!fl)
370                 goto done;
371
372         if (olen > 0) {
373                 struct msghdr msg;
374                 struct flowi6 flowi6;
375                 int junk;
376
377                 err = -ENOMEM;
378                 fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
379                 if (!fl->opt)
380                         goto done;
381
382                 memset(fl->opt, 0, sizeof(*fl->opt));
383                 fl->opt->tot_len = sizeof(*fl->opt) + olen;
384                 err = -EFAULT;
385                 if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
386                         goto done;
387
388                 msg.msg_controllen = olen;
389                 msg.msg_control = (void *)(fl->opt+1);
390                 memset(&flowi6, 0, sizeof(flowi6));
391
392                 err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
393                                             &junk, &junk, &junk);
394                 if (err)
395                         goto done;
396                 err = -EINVAL;
397                 if (fl->opt->opt_flen)
398                         goto done;
399                 if (fl->opt->opt_nflen == 0) {
400                         kfree(fl->opt);
401                         fl->opt = NULL;
402                 }
403         }
404
405         fl->fl_net = net;
406         fl->expires = jiffies;
407         err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
408         if (err)
409                 goto done;
410         fl->share = freq->flr_share;
411         addr_type = ipv6_addr_type(&freq->flr_dst);
412         if ((addr_type & IPV6_ADDR_MAPPED) ||
413             addr_type == IPV6_ADDR_ANY) {
414                 err = -EINVAL;
415                 goto done;
416         }
417         fl->dst = freq->flr_dst;
418         atomic_set(&fl->users, 1);
419         switch (fl->share) {
420         case IPV6_FL_S_EXCL:
421         case IPV6_FL_S_ANY:
422                 break;
423         case IPV6_FL_S_PROCESS:
424                 fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
425                 break;
426         case IPV6_FL_S_USER:
427                 fl->owner.uid = current_euid();
428                 break;
429         default:
430                 err = -EINVAL;
431                 goto done;
432         }
433         return fl;
434
435 done:
436         fl_free(fl);
437         *err_p = err;
438         return NULL;
439 }
440
441 static int mem_check(struct sock *sk)
442 {
443         struct ipv6_pinfo *np = inet6_sk(sk);
444         struct ipv6_fl_socklist *sfl;
445         int room = FL_MAX_SIZE - atomic_read(&fl_size);
446         int count = 0;
447
448         if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
449                 return 0;
450
451         rcu_read_lock_bh();
452         for_each_sk_fl_rcu(np, sfl)
453                 count++;
454         rcu_read_unlock_bh();
455
456         if (room <= 0 ||
457             ((count >= FL_MAX_PER_SOCK ||
458               (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
459              !capable(CAP_NET_ADMIN)))
460                 return -ENOBUFS;
461
462         return 0;
463 }
464
465 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
466                 struct ip6_flowlabel *fl)
467 {
468         spin_lock_bh(&ip6_sk_fl_lock);
469         sfl->fl = fl;
470         sfl->next = np->ipv6_fl_list;
471         rcu_assign_pointer(np->ipv6_fl_list, sfl);
472         spin_unlock_bh(&ip6_sk_fl_lock);
473 }
474
475 int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
476                            int flags)
477 {
478         struct ipv6_pinfo *np = inet6_sk(sk);
479         struct ipv6_fl_socklist *sfl;
480
481         if (flags & IPV6_FL_F_REMOTE) {
482                 freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
483                 return 0;
484         }
485
486         if (np->repflow) {
487                 freq->flr_label = np->flow_label;
488                 return 0;
489         }
490
491         rcu_read_lock_bh();
492
493         for_each_sk_fl_rcu(np, sfl) {
494                 if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
495                         spin_lock_bh(&ip6_fl_lock);
496                         freq->flr_label = sfl->fl->label;
497                         freq->flr_dst = sfl->fl->dst;
498                         freq->flr_share = sfl->fl->share;
499                         freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
500                         freq->flr_linger = sfl->fl->linger / HZ;
501
502                         spin_unlock_bh(&ip6_fl_lock);
503                         rcu_read_unlock_bh();
504                         return 0;
505                 }
506         }
507         rcu_read_unlock_bh();
508
509         return -ENOENT;
510 }
511
512 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
513 {
514         int uninitialized_var(err);
515         struct net *net = sock_net(sk);
516         struct ipv6_pinfo *np = inet6_sk(sk);
517         struct in6_flowlabel_req freq;
518         struct ipv6_fl_socklist *sfl1 = NULL;
519         struct ipv6_fl_socklist *sfl;
520         struct ipv6_fl_socklist __rcu **sflp;
521         struct ip6_flowlabel *fl, *fl1 = NULL;
522
523
524         if (optlen < sizeof(freq))
525                 return -EINVAL;
526
527         if (copy_from_user(&freq, optval, sizeof(freq)))
528                 return -EFAULT;
529
530         switch (freq.flr_action) {
531         case IPV6_FL_A_PUT:
532                 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
533                         if (sk->sk_protocol != IPPROTO_TCP)
534                                 return -ENOPROTOOPT;
535                         if (!np->repflow)
536                                 return -ESRCH;
537                         np->flow_label = 0;
538                         np->repflow = 0;
539                         return 0;
540                 }
541                 spin_lock_bh(&ip6_sk_fl_lock);
542                 for (sflp = &np->ipv6_fl_list;
543                      (sfl = rcu_dereference_protected(*sflp,
544                                                       lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
545                      sflp = &sfl->next) {
546                         if (sfl->fl->label == freq.flr_label) {
547                                 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
548                                         np->flow_label &= ~IPV6_FLOWLABEL_MASK;
549                                 *sflp = sfl->next;
550                                 spin_unlock_bh(&ip6_sk_fl_lock);
551                                 fl_release(sfl->fl);
552                                 kfree_rcu(sfl, rcu);
553                                 return 0;
554                         }
555                 }
556                 spin_unlock_bh(&ip6_sk_fl_lock);
557                 return -ESRCH;
558
559         case IPV6_FL_A_RENEW:
560                 rcu_read_lock_bh();
561                 for_each_sk_fl_rcu(np, sfl) {
562                         if (sfl->fl->label == freq.flr_label) {
563                                 err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
564                                 rcu_read_unlock_bh();
565                                 return err;
566                         }
567                 }
568                 rcu_read_unlock_bh();
569
570                 if (freq.flr_share == IPV6_FL_S_NONE &&
571                     ns_capable(net->user_ns, CAP_NET_ADMIN)) {
572                         fl = fl_lookup(net, freq.flr_label);
573                         if (fl) {
574                                 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
575                                 fl_release(fl);
576                                 return err;
577                         }
578                 }
579                 return -ESRCH;
580
581         case IPV6_FL_A_GET:
582                 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
583                         struct net *net = sock_net(sk);
584                         if (net->ipv6.sysctl.flowlabel_consistency) {
585                                 net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
586                                 return -EPERM;
587                         }
588
589                         if (sk->sk_protocol != IPPROTO_TCP)
590                                 return -ENOPROTOOPT;
591
592                         np->repflow = 1;
593                         return 0;
594                 }
595
596                 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
597                         return -EINVAL;
598
599                 if (net->ipv6.sysctl.flowlabel_state_ranges &&
600                     (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
601                         return -ERANGE;
602
603                 fl = fl_create(net, sk, &freq, optval, optlen, &err);
604                 if (!fl)
605                         return err;
606                 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
607
608                 if (freq.flr_label) {
609                         err = -EEXIST;
610                         rcu_read_lock_bh();
611                         for_each_sk_fl_rcu(np, sfl) {
612                                 if (sfl->fl->label == freq.flr_label) {
613                                         if (freq.flr_flags&IPV6_FL_F_EXCL) {
614                                                 rcu_read_unlock_bh();
615                                                 goto done;
616                                         }
617                                         fl1 = sfl->fl;
618                                         atomic_inc(&fl1->users);
619                                         break;
620                                 }
621                         }
622                         rcu_read_unlock_bh();
623
624                         if (!fl1)
625                                 fl1 = fl_lookup(net, freq.flr_label);
626                         if (fl1) {
627 recheck:
628                                 err = -EEXIST;
629                                 if (freq.flr_flags&IPV6_FL_F_EXCL)
630                                         goto release;
631                                 err = -EPERM;
632                                 if (fl1->share == IPV6_FL_S_EXCL ||
633                                     fl1->share != fl->share ||
634                                     ((fl1->share == IPV6_FL_S_PROCESS) &&
635                                      (fl1->owner.pid == fl->owner.pid)) ||
636                                     ((fl1->share == IPV6_FL_S_USER) &&
637                                      uid_eq(fl1->owner.uid, fl->owner.uid)))
638                                         goto release;
639
640                                 err = -ENOMEM;
641                                 if (!sfl1)
642                                         goto release;
643                                 if (fl->linger > fl1->linger)
644                                         fl1->linger = fl->linger;
645                                 if ((long)(fl->expires - fl1->expires) > 0)
646                                         fl1->expires = fl->expires;
647                                 fl_link(np, sfl1, fl1);
648                                 fl_free(fl);
649                                 return 0;
650
651 release:
652                                 fl_release(fl1);
653                                 goto done;
654                         }
655                 }
656                 err = -ENOENT;
657                 if (!(freq.flr_flags&IPV6_FL_F_CREATE))
658                         goto done;
659
660                 err = -ENOMEM;
661                 if (!sfl1)
662                         goto done;
663
664                 err = mem_check(sk);
665                 if (err != 0)
666                         goto done;
667
668                 fl1 = fl_intern(net, fl, freq.flr_label);
669                 if (fl1)
670                         goto recheck;
671
672                 if (!freq.flr_label) {
673                         if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
674                                          &fl->label, sizeof(fl->label))) {
675                                 /* Intentionally ignore fault. */
676                         }
677                 }
678
679                 fl_link(np, sfl1, fl);
680                 return 0;
681
682         default:
683                 return -EINVAL;
684         }
685
686 done:
687         fl_free(fl);
688         kfree(sfl1);
689         return err;
690 }
691
692 #ifdef CONFIG_PROC_FS
693
694 struct ip6fl_iter_state {
695         struct seq_net_private p;
696         struct pid_namespace *pid_ns;
697         int bucket;
698 };
699
700 #define ip6fl_seq_private(seq)  ((struct ip6fl_iter_state *)(seq)->private)
701
702 static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
703 {
704         struct ip6_flowlabel *fl = NULL;
705         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
706         struct net *net = seq_file_net(seq);
707
708         for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
709                 for_each_fl_rcu(state->bucket, fl) {
710                         if (net_eq(fl->fl_net, net))
711                                 goto out;
712                 }
713         }
714         fl = NULL;
715 out:
716         return fl;
717 }
718
719 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
720 {
721         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
722         struct net *net = seq_file_net(seq);
723
724         for_each_fl_continue_rcu(fl) {
725                 if (net_eq(fl->fl_net, net))
726                         goto out;
727         }
728
729 try_again:
730         if (++state->bucket <= FL_HASH_MASK) {
731                 for_each_fl_rcu(state->bucket, fl) {
732                         if (net_eq(fl->fl_net, net))
733                                 goto out;
734                 }
735                 goto try_again;
736         }
737         fl = NULL;
738
739 out:
740         return fl;
741 }
742
743 static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
744 {
745         struct ip6_flowlabel *fl = ip6fl_get_first(seq);
746         if (fl)
747                 while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
748                         --pos;
749         return pos ? NULL : fl;
750 }
751
752 static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
753         __acquires(RCU)
754 {
755         rcu_read_lock_bh();
756         return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
757 }
758
759 static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
760 {
761         struct ip6_flowlabel *fl;
762
763         if (v == SEQ_START_TOKEN)
764                 fl = ip6fl_get_first(seq);
765         else
766                 fl = ip6fl_get_next(seq, v);
767         ++*pos;
768         return fl;
769 }
770
771 static void ip6fl_seq_stop(struct seq_file *seq, void *v)
772         __releases(RCU)
773 {
774         rcu_read_unlock_bh();
775 }
776
777 static int ip6fl_seq_show(struct seq_file *seq, void *v)
778 {
779         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
780         if (v == SEQ_START_TOKEN) {
781                 seq_puts(seq, "Label S Owner  Users  Linger Expires  Dst                              Opt\n");
782         } else {
783                 struct ip6_flowlabel *fl = v;
784                 seq_printf(seq,
785                            "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
786                            (unsigned int)ntohl(fl->label),
787                            fl->share,
788                            ((fl->share == IPV6_FL_S_PROCESS) ?
789                             pid_nr_ns(fl->owner.pid, state->pid_ns) :
790                             ((fl->share == IPV6_FL_S_USER) ?
791                              from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
792                              0)),
793                            atomic_read(&fl->users),
794                            fl->linger/HZ,
795                            (long)(fl->expires - jiffies)/HZ,
796                            &fl->dst,
797                            fl->opt ? fl->opt->opt_nflen : 0);
798         }
799         return 0;
800 }
801
802 static const struct seq_operations ip6fl_seq_ops = {
803         .start  =       ip6fl_seq_start,
804         .next   =       ip6fl_seq_next,
805         .stop   =       ip6fl_seq_stop,
806         .show   =       ip6fl_seq_show,
807 };
808
809 static int ip6fl_seq_open(struct inode *inode, struct file *file)
810 {
811         struct seq_file *seq;
812         struct ip6fl_iter_state *state;
813         int err;
814
815         err = seq_open_net(inode, file, &ip6fl_seq_ops,
816                            sizeof(struct ip6fl_iter_state));
817
818         if (!err) {
819                 seq = file->private_data;
820                 state = ip6fl_seq_private(seq);
821                 rcu_read_lock();
822                 state->pid_ns = get_pid_ns(task_active_pid_ns(current));
823                 rcu_read_unlock();
824         }
825         return err;
826 }
827
828 static int ip6fl_seq_release(struct inode *inode, struct file *file)
829 {
830         struct seq_file *seq = file->private_data;
831         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
832         put_pid_ns(state->pid_ns);
833         return seq_release_net(inode, file);
834 }
835
836 static const struct file_operations ip6fl_seq_fops = {
837         .owner          =       THIS_MODULE,
838         .open           =       ip6fl_seq_open,
839         .read           =       seq_read,
840         .llseek         =       seq_lseek,
841         .release        =       ip6fl_seq_release,
842 };
843
844 static int __net_init ip6_flowlabel_proc_init(struct net *net)
845 {
846         if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
847                          &ip6fl_seq_fops))
848                 return -ENOMEM;
849         return 0;
850 }
851
852 static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
853 {
854         remove_proc_entry("ip6_flowlabel", net->proc_net);
855 }
856 #else
857 static inline int ip6_flowlabel_proc_init(struct net *net)
858 {
859         return 0;
860 }
861 static inline void ip6_flowlabel_proc_fini(struct net *net)
862 {
863 }
864 #endif
865
866 static void __net_exit ip6_flowlabel_net_exit(struct net *net)
867 {
868         ip6_fl_purge(net);
869         ip6_flowlabel_proc_fini(net);
870 }
871
872 static struct pernet_operations ip6_flowlabel_net_ops = {
873         .init = ip6_flowlabel_proc_init,
874         .exit = ip6_flowlabel_net_exit,
875 };
876
877 int ip6_flowlabel_init(void)
878 {
879         return register_pernet_subsys(&ip6_flowlabel_net_ops);
880 }
881
882 void ip6_flowlabel_cleanup(void)
883 {
884         del_timer(&ip6_fl_gc_timer);
885         unregister_pernet_subsys(&ip6_flowlabel_net_ops);
886 }