Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[platform/kernel/linux-rpi.git] / net / ipv6 / route.c
1 /*
2  *      Linux INET6 implementation
3  *      FIB front-end.
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13
14 /*      Changes:
15  *
16  *      YOSHIFUJI Hideaki @USAGI
17  *              reworked default router selection.
18  *              - respect outgoing interface
19  *              - select from (probably) reachable routers (i.e.
20  *              routers in REACHABLE, STALE, DELAY or PROBE states).
21  *              - always select the same router if it is (probably)
22  *              reachable.  otherwise, round-robin the list.
23  *      Ville Nuorvala
24  *              Fixed routing subtrees.
25  */
26
27 #define pr_fmt(fmt) "IPv6: " fmt
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/dst_metadata.h>
58 #include <net/xfrm.h>
59 #include <net/netevent.h>
60 #include <net/netlink.h>
61 #include <net/nexthop.h>
62 #include <net/lwtunnel.h>
63 #include <net/ip_tunnels.h>
64 #include <net/l3mdev.h>
65 #include <trace/events/fib6.h>
66
67 #include <linux/uaccess.h>
68
69 #ifdef CONFIG_SYSCTL
70 #include <linux/sysctl.h>
71 #endif
72
73 enum rt6_nud_state {
74         RT6_NUD_FAIL_HARD = -3,
75         RT6_NUD_FAIL_PROBE = -2,
76         RT6_NUD_FAIL_DO_RR = -1,
77         RT6_NUD_SUCCEED = 1
78 };
79
80 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
81 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
82 static unsigned int      ip6_default_advmss(const struct dst_entry *dst);
83 static unsigned int      ip6_mtu(const struct dst_entry *dst);
84 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
85 static void             ip6_dst_destroy(struct dst_entry *);
86 static void             ip6_dst_ifdown(struct dst_entry *,
87                                        struct net_device *dev, int how);
88 static int               ip6_dst_gc(struct dst_ops *ops);
89
90 static int              ip6_pkt_discard(struct sk_buff *skb);
91 static int              ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
92 static int              ip6_pkt_prohibit(struct sk_buff *skb);
93 static int              ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
94 static void             ip6_link_failure(struct sk_buff *skb);
95 static void             ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
96                                            struct sk_buff *skb, u32 mtu);
97 static void             rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
98                                         struct sk_buff *skb);
99 static void             rt6_dst_from_metrics_check(struct rt6_info *rt);
100 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
101 static size_t rt6_nlmsg_size(struct rt6_info *rt);
102 static int rt6_fill_node(struct net *net,
103                          struct sk_buff *skb, struct rt6_info *rt,
104                          struct in6_addr *dst, struct in6_addr *src,
105                          int iif, int type, u32 portid, u32 seq,
106                          unsigned int flags);
107
108 #ifdef CONFIG_IPV6_ROUTE_INFO
109 static struct rt6_info *rt6_add_route_info(struct net *net,
110                                            const struct in6_addr *prefix, int prefixlen,
111                                            const struct in6_addr *gwaddr,
112                                            struct net_device *dev,
113                                            unsigned int pref);
114 static struct rt6_info *rt6_get_route_info(struct net *net,
115                                            const struct in6_addr *prefix, int prefixlen,
116                                            const struct in6_addr *gwaddr,
117                                            struct net_device *dev);
118 #endif
119
120 struct uncached_list {
121         spinlock_t              lock;
122         struct list_head        head;
123 };
124
125 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
126
127 static void rt6_uncached_list_add(struct rt6_info *rt)
128 {
129         struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
130
131         rt->rt6i_uncached_list = ul;
132
133         spin_lock_bh(&ul->lock);
134         list_add_tail(&rt->rt6i_uncached, &ul->head);
135         spin_unlock_bh(&ul->lock);
136 }
137
138 static void rt6_uncached_list_del(struct rt6_info *rt)
139 {
140         if (!list_empty(&rt->rt6i_uncached)) {
141                 struct uncached_list *ul = rt->rt6i_uncached_list;
142
143                 spin_lock_bh(&ul->lock);
144                 list_del(&rt->rt6i_uncached);
145                 spin_unlock_bh(&ul->lock);
146         }
147 }
148
149 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
150 {
151         struct net_device *loopback_dev = net->loopback_dev;
152         int cpu;
153
154         if (dev == loopback_dev)
155                 return;
156
157         for_each_possible_cpu(cpu) {
158                 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
159                 struct rt6_info *rt;
160
161                 spin_lock_bh(&ul->lock);
162                 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
163                         struct inet6_dev *rt_idev = rt->rt6i_idev;
164                         struct net_device *rt_dev = rt->dst.dev;
165
166                         if (rt_idev->dev == dev) {
167                                 rt->rt6i_idev = in6_dev_get(loopback_dev);
168                                 in6_dev_put(rt_idev);
169                         }
170
171                         if (rt_dev == dev) {
172                                 rt->dst.dev = loopback_dev;
173                                 dev_hold(rt->dst.dev);
174                                 dev_put(rt_dev);
175                         }
176                 }
177                 spin_unlock_bh(&ul->lock);
178         }
179 }
180
181 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
182 {
183         return dst_metrics_write_ptr(rt->dst.from);
184 }
185
186 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
187 {
188         struct rt6_info *rt = (struct rt6_info *)dst;
189
190         if (rt->rt6i_flags & RTF_PCPU)
191                 return rt6_pcpu_cow_metrics(rt);
192         else if (rt->rt6i_flags & RTF_CACHE)
193                 return NULL;
194         else
195                 return dst_cow_metrics_generic(dst, old);
196 }
197
198 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
199                                              struct sk_buff *skb,
200                                              const void *daddr)
201 {
202         struct in6_addr *p = &rt->rt6i_gateway;
203
204         if (!ipv6_addr_any(p))
205                 return (const void *) p;
206         else if (skb)
207                 return &ipv6_hdr(skb)->daddr;
208         return daddr;
209 }
210
211 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
212                                           struct sk_buff *skb,
213                                           const void *daddr)
214 {
215         struct rt6_info *rt = (struct rt6_info *) dst;
216         struct neighbour *n;
217
218         daddr = choose_neigh_daddr(rt, skb, daddr);
219         n = __ipv6_neigh_lookup(dst->dev, daddr);
220         if (n)
221                 return n;
222         return neigh_create(&nd_tbl, daddr, dst->dev);
223 }
224
225 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
226 {
227         struct net_device *dev = dst->dev;
228         struct rt6_info *rt = (struct rt6_info *)dst;
229
230         daddr = choose_neigh_daddr(rt, NULL, daddr);
231         if (!daddr)
232                 return;
233         if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
234                 return;
235         if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
236                 return;
237         __ipv6_confirm_neigh(dev, daddr);
238 }
239
240 static struct dst_ops ip6_dst_ops_template = {
241         .family                 =       AF_INET6,
242         .gc                     =       ip6_dst_gc,
243         .gc_thresh              =       1024,
244         .check                  =       ip6_dst_check,
245         .default_advmss         =       ip6_default_advmss,
246         .mtu                    =       ip6_mtu,
247         .cow_metrics            =       ipv6_cow_metrics,
248         .destroy                =       ip6_dst_destroy,
249         .ifdown                 =       ip6_dst_ifdown,
250         .negative_advice        =       ip6_negative_advice,
251         .link_failure           =       ip6_link_failure,
252         .update_pmtu            =       ip6_rt_update_pmtu,
253         .redirect               =       rt6_do_redirect,
254         .local_out              =       __ip6_local_out,
255         .neigh_lookup           =       ip6_neigh_lookup,
256         .confirm_neigh          =       ip6_confirm_neigh,
257 };
258
259 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
260 {
261         unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
262
263         return mtu ? : dst->dev->mtu;
264 }
265
266 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
267                                          struct sk_buff *skb, u32 mtu)
268 {
269 }
270
271 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
272                                       struct sk_buff *skb)
273 {
274 }
275
276 static struct dst_ops ip6_dst_blackhole_ops = {
277         .family                 =       AF_INET6,
278         .destroy                =       ip6_dst_destroy,
279         .check                  =       ip6_dst_check,
280         .mtu                    =       ip6_blackhole_mtu,
281         .default_advmss         =       ip6_default_advmss,
282         .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
283         .redirect               =       ip6_rt_blackhole_redirect,
284         .cow_metrics            =       dst_cow_metrics_generic,
285         .neigh_lookup           =       ip6_neigh_lookup,
286 };
287
288 static const u32 ip6_template_metrics[RTAX_MAX] = {
289         [RTAX_HOPLIMIT - 1] = 0,
290 };
291
292 static const struct rt6_info ip6_null_entry_template = {
293         .dst = {
294                 .__refcnt       = ATOMIC_INIT(1),
295                 .__use          = 1,
296                 .obsolete       = DST_OBSOLETE_FORCE_CHK,
297                 .error          = -ENETUNREACH,
298                 .input          = ip6_pkt_discard,
299                 .output         = ip6_pkt_discard_out,
300         },
301         .rt6i_flags     = (RTF_REJECT | RTF_NONEXTHOP),
302         .rt6i_protocol  = RTPROT_KERNEL,
303         .rt6i_metric    = ~(u32) 0,
304         .rt6i_ref       = ATOMIC_INIT(1),
305 };
306
307 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
308
309 static const struct rt6_info ip6_prohibit_entry_template = {
310         .dst = {
311                 .__refcnt       = ATOMIC_INIT(1),
312                 .__use          = 1,
313                 .obsolete       = DST_OBSOLETE_FORCE_CHK,
314                 .error          = -EACCES,
315                 .input          = ip6_pkt_prohibit,
316                 .output         = ip6_pkt_prohibit_out,
317         },
318         .rt6i_flags     = (RTF_REJECT | RTF_NONEXTHOP),
319         .rt6i_protocol  = RTPROT_KERNEL,
320         .rt6i_metric    = ~(u32) 0,
321         .rt6i_ref       = ATOMIC_INIT(1),
322 };
323
324 static const struct rt6_info ip6_blk_hole_entry_template = {
325         .dst = {
326                 .__refcnt       = ATOMIC_INIT(1),
327                 .__use          = 1,
328                 .obsolete       = DST_OBSOLETE_FORCE_CHK,
329                 .error          = -EINVAL,
330                 .input          = dst_discard,
331                 .output         = dst_discard_out,
332         },
333         .rt6i_flags     = (RTF_REJECT | RTF_NONEXTHOP),
334         .rt6i_protocol  = RTPROT_KERNEL,
335         .rt6i_metric    = ~(u32) 0,
336         .rt6i_ref       = ATOMIC_INIT(1),
337 };
338
339 #endif
340
341 static void rt6_info_init(struct rt6_info *rt)
342 {
343         struct dst_entry *dst = &rt->dst;
344
345         memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
346         INIT_LIST_HEAD(&rt->rt6i_siblings);
347         INIT_LIST_HEAD(&rt->rt6i_uncached);
348 }
349
350 /* allocate dst with ip6_dst_ops */
351 static struct rt6_info *__ip6_dst_alloc(struct net *net,
352                                         struct net_device *dev,
353                                         int flags)
354 {
355         struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
356                                         1, DST_OBSOLETE_FORCE_CHK, flags);
357
358         if (rt)
359                 rt6_info_init(rt);
360
361         return rt;
362 }
363
364 struct rt6_info *ip6_dst_alloc(struct net *net,
365                                struct net_device *dev,
366                                int flags)
367 {
368         struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
369
370         if (rt) {
371                 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
372                 if (rt->rt6i_pcpu) {
373                         int cpu;
374
375                         for_each_possible_cpu(cpu) {
376                                 struct rt6_info **p;
377
378                                 p = per_cpu_ptr(rt->rt6i_pcpu, cpu);
379                                 /* no one shares rt */
380                                 *p =  NULL;
381                         }
382                 } else {
383                         dst_release_immediate(&rt->dst);
384                         return NULL;
385                 }
386         }
387
388         return rt;
389 }
390 EXPORT_SYMBOL(ip6_dst_alloc);
391
392 static void ip6_dst_destroy(struct dst_entry *dst)
393 {
394         struct rt6_info *rt = (struct rt6_info *)dst;
395         struct dst_entry *from = dst->from;
396         struct inet6_dev *idev;
397
398         dst_destroy_metrics_generic(dst);
399         free_percpu(rt->rt6i_pcpu);
400         rt6_uncached_list_del(rt);
401
402         idev = rt->rt6i_idev;
403         if (idev) {
404                 rt->rt6i_idev = NULL;
405                 in6_dev_put(idev);
406         }
407
408         dst->from = NULL;
409         dst_release(from);
410 }
411
412 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
413                            int how)
414 {
415         struct rt6_info *rt = (struct rt6_info *)dst;
416         struct inet6_dev *idev = rt->rt6i_idev;
417         struct net_device *loopback_dev =
418                 dev_net(dev)->loopback_dev;
419
420         if (dev != loopback_dev) {
421                 if (idev && idev->dev == dev) {
422                         struct inet6_dev *loopback_idev =
423                                 in6_dev_get(loopback_dev);
424                         if (loopback_idev) {
425                                 rt->rt6i_idev = loopback_idev;
426                                 in6_dev_put(idev);
427                         }
428                 }
429         }
430 }
431
432 static bool __rt6_check_expired(const struct rt6_info *rt)
433 {
434         if (rt->rt6i_flags & RTF_EXPIRES)
435                 return time_after(jiffies, rt->dst.expires);
436         else
437                 return false;
438 }
439
440 static bool rt6_check_expired(const struct rt6_info *rt)
441 {
442         if (rt->rt6i_flags & RTF_EXPIRES) {
443                 if (time_after(jiffies, rt->dst.expires))
444                         return true;
445         } else if (rt->dst.from) {
446                 return rt6_check_expired((struct rt6_info *) rt->dst.from);
447         }
448         return false;
449 }
450
451 /* Multipath route selection:
452  *   Hash based function using packet header and flowlabel.
453  * Adapted from fib_info_hashfn()
454  */
455 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
456                                const struct flowi6 *fl6)
457 {
458         return get_hash_from_flowi6(fl6) % candidate_count;
459 }
460
461 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
462                                              struct flowi6 *fl6, int oif,
463                                              int strict)
464 {
465         struct rt6_info *sibling, *next_sibling;
466         int route_choosen;
467
468         route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
469         /* Don't change the route, if route_choosen == 0
470          * (siblings does not include ourself)
471          */
472         if (route_choosen)
473                 list_for_each_entry_safe(sibling, next_sibling,
474                                 &match->rt6i_siblings, rt6i_siblings) {
475                         route_choosen--;
476                         if (route_choosen == 0) {
477                                 if (rt6_score_route(sibling, oif, strict) < 0)
478                                         break;
479                                 match = sibling;
480                                 break;
481                         }
482                 }
483         return match;
484 }
485
486 /*
487  *      Route lookup. Any table->tb6_lock is implied.
488  */
489
490 static inline struct rt6_info *rt6_device_match(struct net *net,
491                                                     struct rt6_info *rt,
492                                                     const struct in6_addr *saddr,
493                                                     int oif,
494                                                     int flags)
495 {
496         struct rt6_info *local = NULL;
497         struct rt6_info *sprt;
498
499         if (!oif && ipv6_addr_any(saddr))
500                 goto out;
501
502         for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
503                 struct net_device *dev = sprt->dst.dev;
504
505                 if (oif) {
506                         if (dev->ifindex == oif)
507                                 return sprt;
508                         if (dev->flags & IFF_LOOPBACK) {
509                                 if (!sprt->rt6i_idev ||
510                                     sprt->rt6i_idev->dev->ifindex != oif) {
511                                         if (flags & RT6_LOOKUP_F_IFACE)
512                                                 continue;
513                                         if (local &&
514                                             local->rt6i_idev->dev->ifindex == oif)
515                                                 continue;
516                                 }
517                                 local = sprt;
518                         }
519                 } else {
520                         if (ipv6_chk_addr(net, saddr, dev,
521                                           flags & RT6_LOOKUP_F_IFACE))
522                                 return sprt;
523                 }
524         }
525
526         if (oif) {
527                 if (local)
528                         return local;
529
530                 if (flags & RT6_LOOKUP_F_IFACE)
531                         return net->ipv6.ip6_null_entry;
532         }
533 out:
534         return rt;
535 }
536
537 #ifdef CONFIG_IPV6_ROUTER_PREF
538 struct __rt6_probe_work {
539         struct work_struct work;
540         struct in6_addr target;
541         struct net_device *dev;
542 };
543
544 static void rt6_probe_deferred(struct work_struct *w)
545 {
546         struct in6_addr mcaddr;
547         struct __rt6_probe_work *work =
548                 container_of(w, struct __rt6_probe_work, work);
549
550         addrconf_addr_solict_mult(&work->target, &mcaddr);
551         ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
552         dev_put(work->dev);
553         kfree(work);
554 }
555
556 static void rt6_probe(struct rt6_info *rt)
557 {
558         struct __rt6_probe_work *work;
559         struct neighbour *neigh;
560         /*
561          * Okay, this does not seem to be appropriate
562          * for now, however, we need to check if it
563          * is really so; aka Router Reachability Probing.
564          *
565          * Router Reachability Probe MUST be rate-limited
566          * to no more than one per minute.
567          */
568         if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
569                 return;
570         rcu_read_lock_bh();
571         neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
572         if (neigh) {
573                 if (neigh->nud_state & NUD_VALID)
574                         goto out;
575
576                 work = NULL;
577                 write_lock(&neigh->lock);
578                 if (!(neigh->nud_state & NUD_VALID) &&
579                     time_after(jiffies,
580                                neigh->updated +
581                                rt->rt6i_idev->cnf.rtr_probe_interval)) {
582                         work = kmalloc(sizeof(*work), GFP_ATOMIC);
583                         if (work)
584                                 __neigh_set_probe_once(neigh);
585                 }
586                 write_unlock(&neigh->lock);
587         } else {
588                 work = kmalloc(sizeof(*work), GFP_ATOMIC);
589         }
590
591         if (work) {
592                 INIT_WORK(&work->work, rt6_probe_deferred);
593                 work->target = rt->rt6i_gateway;
594                 dev_hold(rt->dst.dev);
595                 work->dev = rt->dst.dev;
596                 schedule_work(&work->work);
597         }
598
599 out:
600         rcu_read_unlock_bh();
601 }
602 #else
603 static inline void rt6_probe(struct rt6_info *rt)
604 {
605 }
606 #endif
607
608 /*
609  * Default Router Selection (RFC 2461 6.3.6)
610  */
611 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
612 {
613         struct net_device *dev = rt->dst.dev;
614         if (!oif || dev->ifindex == oif)
615                 return 2;
616         if ((dev->flags & IFF_LOOPBACK) &&
617             rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
618                 return 1;
619         return 0;
620 }
621
622 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
623 {
624         struct neighbour *neigh;
625         enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
626
627         if (rt->rt6i_flags & RTF_NONEXTHOP ||
628             !(rt->rt6i_flags & RTF_GATEWAY))
629                 return RT6_NUD_SUCCEED;
630
631         rcu_read_lock_bh();
632         neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
633         if (neigh) {
634                 read_lock(&neigh->lock);
635                 if (neigh->nud_state & NUD_VALID)
636                         ret = RT6_NUD_SUCCEED;
637 #ifdef CONFIG_IPV6_ROUTER_PREF
638                 else if (!(neigh->nud_state & NUD_FAILED))
639                         ret = RT6_NUD_SUCCEED;
640                 else
641                         ret = RT6_NUD_FAIL_PROBE;
642 #endif
643                 read_unlock(&neigh->lock);
644         } else {
645                 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
646                       RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
647         }
648         rcu_read_unlock_bh();
649
650         return ret;
651 }
652
653 static int rt6_score_route(struct rt6_info *rt, int oif,
654                            int strict)
655 {
656         int m;
657
658         m = rt6_check_dev(rt, oif);
659         if (!m && (strict & RT6_LOOKUP_F_IFACE))
660                 return RT6_NUD_FAIL_HARD;
661 #ifdef CONFIG_IPV6_ROUTER_PREF
662         m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
663 #endif
664         if (strict & RT6_LOOKUP_F_REACHABLE) {
665                 int n = rt6_check_neigh(rt);
666                 if (n < 0)
667                         return n;
668         }
669         return m;
670 }
671
672 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
673                                    int *mpri, struct rt6_info *match,
674                                    bool *do_rr)
675 {
676         int m;
677         bool match_do_rr = false;
678         struct inet6_dev *idev = rt->rt6i_idev;
679         struct net_device *dev = rt->dst.dev;
680
681         if (dev && !netif_carrier_ok(dev) &&
682             idev->cnf.ignore_routes_with_linkdown &&
683             !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
684                 goto out;
685
686         if (rt6_check_expired(rt))
687                 goto out;
688
689         m = rt6_score_route(rt, oif, strict);
690         if (m == RT6_NUD_FAIL_DO_RR) {
691                 match_do_rr = true;
692                 m = 0; /* lowest valid score */
693         } else if (m == RT6_NUD_FAIL_HARD) {
694                 goto out;
695         }
696
697         if (strict & RT6_LOOKUP_F_REACHABLE)
698                 rt6_probe(rt);
699
700         /* note that m can be RT6_NUD_FAIL_PROBE at this point */
701         if (m > *mpri) {
702                 *do_rr = match_do_rr;
703                 *mpri = m;
704                 match = rt;
705         }
706 out:
707         return match;
708 }
709
710 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
711                                      struct rt6_info *rr_head,
712                                      u32 metric, int oif, int strict,
713                                      bool *do_rr)
714 {
715         struct rt6_info *rt, *match, *cont;
716         int mpri = -1;
717
718         match = NULL;
719         cont = NULL;
720         for (rt = rr_head; rt; rt = rt->dst.rt6_next) {
721                 if (rt->rt6i_metric != metric) {
722                         cont = rt;
723                         break;
724                 }
725
726                 match = find_match(rt, oif, strict, &mpri, match, do_rr);
727         }
728
729         for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) {
730                 if (rt->rt6i_metric != metric) {
731                         cont = rt;
732                         break;
733                 }
734
735                 match = find_match(rt, oif, strict, &mpri, match, do_rr);
736         }
737
738         if (match || !cont)
739                 return match;
740
741         for (rt = cont; rt; rt = rt->dst.rt6_next)
742                 match = find_match(rt, oif, strict, &mpri, match, do_rr);
743
744         return match;
745 }
746
747 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
748 {
749         struct rt6_info *match, *rt0;
750         struct net *net;
751         bool do_rr = false;
752
753         rt0 = fn->rr_ptr;
754         if (!rt0)
755                 fn->rr_ptr = rt0 = fn->leaf;
756
757         match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
758                              &do_rr);
759
760         if (do_rr) {
761                 struct rt6_info *next = rt0->dst.rt6_next;
762
763                 /* no entries matched; do round-robin */
764                 if (!next || next->rt6i_metric != rt0->rt6i_metric)
765                         next = fn->leaf;
766
767                 if (next != rt0)
768                         fn->rr_ptr = next;
769         }
770
771         net = dev_net(rt0->dst.dev);
772         return match ? match : net->ipv6.ip6_null_entry;
773 }
774
775 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
776 {
777         return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
778 }
779
780 #ifdef CONFIG_IPV6_ROUTE_INFO
781 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
782                   const struct in6_addr *gwaddr)
783 {
784         struct net *net = dev_net(dev);
785         struct route_info *rinfo = (struct route_info *) opt;
786         struct in6_addr prefix_buf, *prefix;
787         unsigned int pref;
788         unsigned long lifetime;
789         struct rt6_info *rt;
790
791         if (len < sizeof(struct route_info)) {
792                 return -EINVAL;
793         }
794
795         /* Sanity check for prefix_len and length */
796         if (rinfo->length > 3) {
797                 return -EINVAL;
798         } else if (rinfo->prefix_len > 128) {
799                 return -EINVAL;
800         } else if (rinfo->prefix_len > 64) {
801                 if (rinfo->length < 2) {
802                         return -EINVAL;
803                 }
804         } else if (rinfo->prefix_len > 0) {
805                 if (rinfo->length < 1) {
806                         return -EINVAL;
807                 }
808         }
809
810         pref = rinfo->route_pref;
811         if (pref == ICMPV6_ROUTER_PREF_INVALID)
812                 return -EINVAL;
813
814         lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
815
816         if (rinfo->length == 3)
817                 prefix = (struct in6_addr *)rinfo->prefix;
818         else {
819                 /* this function is safe */
820                 ipv6_addr_prefix(&prefix_buf,
821                                  (struct in6_addr *)rinfo->prefix,
822                                  rinfo->prefix_len);
823                 prefix = &prefix_buf;
824         }
825
826         if (rinfo->prefix_len == 0)
827                 rt = rt6_get_dflt_router(gwaddr, dev);
828         else
829                 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
830                                         gwaddr, dev);
831
832         if (rt && !lifetime) {
833                 ip6_del_rt(rt);
834                 rt = NULL;
835         }
836
837         if (!rt && lifetime)
838                 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
839                                         dev, pref);
840         else if (rt)
841                 rt->rt6i_flags = RTF_ROUTEINFO |
842                                  (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
843
844         if (rt) {
845                 if (!addrconf_finite_timeout(lifetime))
846                         rt6_clean_expires(rt);
847                 else
848                         rt6_set_expires(rt, jiffies + HZ * lifetime);
849
850                 ip6_rt_put(rt);
851         }
852         return 0;
853 }
854 #endif
855
856 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
857                                         struct in6_addr *saddr)
858 {
859         struct fib6_node *pn;
860         while (1) {
861                 if (fn->fn_flags & RTN_TL_ROOT)
862                         return NULL;
863                 pn = fn->parent;
864                 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
865                         fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
866                 else
867                         fn = pn;
868                 if (fn->fn_flags & RTN_RTINFO)
869                         return fn;
870         }
871 }
872
873 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
874                                              struct fib6_table *table,
875                                              struct flowi6 *fl6, int flags)
876 {
877         struct fib6_node *fn;
878         struct rt6_info *rt;
879
880         read_lock_bh(&table->tb6_lock);
881         fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
882 restart:
883         rt = fn->leaf;
884         rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
885         if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
886                 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
887         if (rt == net->ipv6.ip6_null_entry) {
888                 fn = fib6_backtrack(fn, &fl6->saddr);
889                 if (fn)
890                         goto restart;
891         }
892         dst_use(&rt->dst, jiffies);
893         read_unlock_bh(&table->tb6_lock);
894
895         trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
896
897         return rt;
898
899 }
900
901 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
902                                     int flags)
903 {
904         return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
905 }
906 EXPORT_SYMBOL_GPL(ip6_route_lookup);
907
908 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
909                             const struct in6_addr *saddr, int oif, int strict)
910 {
911         struct flowi6 fl6 = {
912                 .flowi6_oif = oif,
913                 .daddr = *daddr,
914         };
915         struct dst_entry *dst;
916         int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
917
918         if (saddr) {
919                 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
920                 flags |= RT6_LOOKUP_F_HAS_SADDR;
921         }
922
923         dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
924         if (dst->error == 0)
925                 return (struct rt6_info *) dst;
926
927         dst_release(dst);
928
929         return NULL;
930 }
931 EXPORT_SYMBOL(rt6_lookup);
932
933 /* ip6_ins_rt is called with FREE table->tb6_lock.
934  * It takes new route entry, the addition fails by any reason the
935  * route is released.
936  * Caller must hold dst before calling it.
937  */
938
939 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
940                         struct mx6_config *mxc,
941                         struct netlink_ext_ack *extack)
942 {
943         int err;
944         struct fib6_table *table;
945
946         table = rt->rt6i_table;
947         write_lock_bh(&table->tb6_lock);
948         err = fib6_add(&table->tb6_root, rt, info, mxc, extack);
949         write_unlock_bh(&table->tb6_lock);
950
951         return err;
952 }
953
954 int ip6_ins_rt(struct rt6_info *rt)
955 {
956         struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
957         struct mx6_config mxc = { .mx = NULL, };
958
959         /* Hold dst to account for the reference from the fib6 tree */
960         dst_hold(&rt->dst);
961         return __ip6_ins_rt(rt, &info, &mxc, NULL);
962 }
963
964 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
965                                            const struct in6_addr *daddr,
966                                            const struct in6_addr *saddr)
967 {
968         struct rt6_info *rt;
969
970         /*
971          *      Clone the route.
972          */
973
974         if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
975                 ort = (struct rt6_info *)ort->dst.from;
976
977         rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
978
979         if (!rt)
980                 return NULL;
981
982         ip6_rt_copy_init(rt, ort);
983         rt->rt6i_flags |= RTF_CACHE;
984         rt->rt6i_metric = 0;
985         rt->dst.flags |= DST_HOST;
986         rt->rt6i_dst.addr = *daddr;
987         rt->rt6i_dst.plen = 128;
988
989         if (!rt6_is_gw_or_nonexthop(ort)) {
990                 if (ort->rt6i_dst.plen != 128 &&
991                     ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
992                         rt->rt6i_flags |= RTF_ANYCAST;
993 #ifdef CONFIG_IPV6_SUBTREES
994                 if (rt->rt6i_src.plen && saddr) {
995                         rt->rt6i_src.addr = *saddr;
996                         rt->rt6i_src.plen = 128;
997                 }
998 #endif
999         }
1000
1001         return rt;
1002 }
1003
1004 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
1005 {
1006         struct rt6_info *pcpu_rt;
1007
1008         pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
1009                                   rt->dst.dev, rt->dst.flags);
1010
1011         if (!pcpu_rt)
1012                 return NULL;
1013         ip6_rt_copy_init(pcpu_rt, rt);
1014         pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
1015         pcpu_rt->rt6i_flags |= RTF_PCPU;
1016         return pcpu_rt;
1017 }
1018
1019 /* It should be called with read_lock_bh(&tb6_lock) acquired */
1020 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1021 {
1022         struct rt6_info *pcpu_rt, **p;
1023
1024         p = this_cpu_ptr(rt->rt6i_pcpu);
1025         pcpu_rt = *p;
1026
1027         if (pcpu_rt) {
1028                 dst_hold(&pcpu_rt->dst);
1029                 rt6_dst_from_metrics_check(pcpu_rt);
1030         }
1031         return pcpu_rt;
1032 }
1033
1034 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1035 {
1036         struct fib6_table *table = rt->rt6i_table;
1037         struct rt6_info *pcpu_rt, *prev, **p;
1038
1039         pcpu_rt = ip6_rt_pcpu_alloc(rt);
1040         if (!pcpu_rt) {
1041                 struct net *net = dev_net(rt->dst.dev);
1042
1043                 dst_hold(&net->ipv6.ip6_null_entry->dst);
1044                 return net->ipv6.ip6_null_entry;
1045         }
1046
1047         read_lock_bh(&table->tb6_lock);
1048         if (rt->rt6i_pcpu) {
1049                 p = this_cpu_ptr(rt->rt6i_pcpu);
1050                 prev = cmpxchg(p, NULL, pcpu_rt);
1051                 if (prev) {
1052                         /* If someone did it before us, return prev instead */
1053                         dst_release_immediate(&pcpu_rt->dst);
1054                         pcpu_rt = prev;
1055                 }
1056         } else {
1057                 /* rt has been removed from the fib6 tree
1058                  * before we have a chance to acquire the read_lock.
1059                  * In this case, don't brother to create a pcpu rt
1060                  * since rt is going away anyway.  The next
1061                  * dst_check() will trigger a re-lookup.
1062                  */
1063                 dst_release_immediate(&pcpu_rt->dst);
1064                 pcpu_rt = rt;
1065         }
1066         dst_hold(&pcpu_rt->dst);
1067         rt6_dst_from_metrics_check(pcpu_rt);
1068         read_unlock_bh(&table->tb6_lock);
1069         return pcpu_rt;
1070 }
1071
1072 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1073                                int oif, struct flowi6 *fl6, int flags)
1074 {
1075         struct fib6_node *fn, *saved_fn;
1076         struct rt6_info *rt;
1077         int strict = 0;
1078
1079         strict |= flags & RT6_LOOKUP_F_IFACE;
1080         strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1081         if (net->ipv6.devconf_all->forwarding == 0)
1082                 strict |= RT6_LOOKUP_F_REACHABLE;
1083
1084         read_lock_bh(&table->tb6_lock);
1085
1086         fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1087         saved_fn = fn;
1088
1089         if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1090                 oif = 0;
1091
1092 redo_rt6_select:
1093         rt = rt6_select(fn, oif, strict);
1094         if (rt->rt6i_nsiblings)
1095                 rt = rt6_multipath_select(rt, fl6, oif, strict);
1096         if (rt == net->ipv6.ip6_null_entry) {
1097                 fn = fib6_backtrack(fn, &fl6->saddr);
1098                 if (fn)
1099                         goto redo_rt6_select;
1100                 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1101                         /* also consider unreachable route */
1102                         strict &= ~RT6_LOOKUP_F_REACHABLE;
1103                         fn = saved_fn;
1104                         goto redo_rt6_select;
1105                 }
1106         }
1107
1108
1109         if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) {
1110                 dst_use(&rt->dst, jiffies);
1111                 read_unlock_bh(&table->tb6_lock);
1112
1113                 rt6_dst_from_metrics_check(rt);
1114
1115                 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1116                 return rt;
1117         } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1118                             !(rt->rt6i_flags & RTF_GATEWAY))) {
1119                 /* Create a RTF_CACHE clone which will not be
1120                  * owned by the fib6 tree.  It is for the special case where
1121                  * the daddr in the skb during the neighbor look-up is different
1122                  * from the fl6->daddr used to look-up route here.
1123                  */
1124
1125                 struct rt6_info *uncached_rt;
1126
1127                 dst_use(&rt->dst, jiffies);
1128                 read_unlock_bh(&table->tb6_lock);
1129
1130                 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1131                 dst_release(&rt->dst);
1132
1133                 if (uncached_rt) {
1134                         /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1135                          * No need for another dst_hold()
1136                          */
1137                         rt6_uncached_list_add(uncached_rt);
1138                 } else {
1139                         uncached_rt = net->ipv6.ip6_null_entry;
1140                         dst_hold(&uncached_rt->dst);
1141                 }
1142
1143                 trace_fib6_table_lookup(net, uncached_rt, table->tb6_id, fl6);
1144                 return uncached_rt;
1145
1146         } else {
1147                 /* Get a percpu copy */
1148
1149                 struct rt6_info *pcpu_rt;
1150
1151                 rt->dst.lastuse = jiffies;
1152                 rt->dst.__use++;
1153                 pcpu_rt = rt6_get_pcpu_route(rt);
1154
1155                 if (pcpu_rt) {
1156                         read_unlock_bh(&table->tb6_lock);
1157                 } else {
1158                         /* We have to do the read_unlock first
1159                          * because rt6_make_pcpu_route() may trigger
1160                          * ip6_dst_gc() which will take the write_lock.
1161                          */
1162                         dst_hold(&rt->dst);
1163                         read_unlock_bh(&table->tb6_lock);
1164                         pcpu_rt = rt6_make_pcpu_route(rt);
1165                         dst_release(&rt->dst);
1166                 }
1167
1168                 trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6);
1169                 return pcpu_rt;
1170
1171         }
1172 }
1173 EXPORT_SYMBOL_GPL(ip6_pol_route);
1174
1175 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1176                                             struct flowi6 *fl6, int flags)
1177 {
1178         return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1179 }
1180
1181 struct dst_entry *ip6_route_input_lookup(struct net *net,
1182                                          struct net_device *dev,
1183                                          struct flowi6 *fl6, int flags)
1184 {
1185         if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1186                 flags |= RT6_LOOKUP_F_IFACE;
1187
1188         return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1189 }
1190 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1191
1192 void ip6_route_input(struct sk_buff *skb)
1193 {
1194         const struct ipv6hdr *iph = ipv6_hdr(skb);
1195         struct net *net = dev_net(skb->dev);
1196         int flags = RT6_LOOKUP_F_HAS_SADDR;
1197         struct ip_tunnel_info *tun_info;
1198         struct flowi6 fl6 = {
1199                 .flowi6_iif = skb->dev->ifindex,
1200                 .daddr = iph->daddr,
1201                 .saddr = iph->saddr,
1202                 .flowlabel = ip6_flowinfo(iph),
1203                 .flowi6_mark = skb->mark,
1204                 .flowi6_proto = iph->nexthdr,
1205         };
1206
1207         tun_info = skb_tunnel_info(skb);
1208         if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1209                 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1210         skb_dst_drop(skb);
1211         skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1212 }
1213
1214 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1215                                              struct flowi6 *fl6, int flags)
1216 {
1217         return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1218 }
1219
1220 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
1221                                          struct flowi6 *fl6, int flags)
1222 {
1223         bool any_src;
1224
1225         if (rt6_need_strict(&fl6->daddr)) {
1226                 struct dst_entry *dst;
1227
1228                 dst = l3mdev_link_scope_lookup(net, fl6);
1229                 if (dst)
1230                         return dst;
1231         }
1232
1233         fl6->flowi6_iif = LOOPBACK_IFINDEX;
1234
1235         any_src = ipv6_addr_any(&fl6->saddr);
1236         if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1237             (fl6->flowi6_oif && any_src))
1238                 flags |= RT6_LOOKUP_F_IFACE;
1239
1240         if (!any_src)
1241                 flags |= RT6_LOOKUP_F_HAS_SADDR;
1242         else if (sk)
1243                 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1244
1245         return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1246 }
1247 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
1248
1249 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1250 {
1251         struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1252         struct net_device *loopback_dev = net->loopback_dev;
1253         struct dst_entry *new = NULL;
1254
1255         rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
1256                        DST_OBSOLETE_NONE, 0);
1257         if (rt) {
1258                 rt6_info_init(rt);
1259
1260                 new = &rt->dst;
1261                 new->__use = 1;
1262                 new->input = dst_discard;
1263                 new->output = dst_discard_out;
1264
1265                 dst_copy_metrics(new, &ort->dst);
1266
1267                 rt->rt6i_idev = in6_dev_get(loopback_dev);
1268                 rt->rt6i_gateway = ort->rt6i_gateway;
1269                 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
1270                 rt->rt6i_metric = 0;
1271
1272                 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1273 #ifdef CONFIG_IPV6_SUBTREES
1274                 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1275 #endif
1276         }
1277
1278         dst_release(dst_orig);
1279         return new ? new : ERR_PTR(-ENOMEM);
1280 }
1281
1282 /*
1283  *      Destination cache support functions
1284  */
1285
1286 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1287 {
1288         if (rt->dst.from &&
1289             dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
1290                 dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
1291 }
1292
1293 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1294 {
1295         if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1296                 return NULL;
1297
1298         if (rt6_check_expired(rt))
1299                 return NULL;
1300
1301         return &rt->dst;
1302 }
1303
1304 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1305 {
1306         if (!__rt6_check_expired(rt) &&
1307             rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1308             rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1309                 return &rt->dst;
1310         else
1311                 return NULL;
1312 }
1313
1314 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1315 {
1316         struct rt6_info *rt;
1317
1318         rt = (struct rt6_info *) dst;
1319
1320         /* All IPV6 dsts are created with ->obsolete set to the value
1321          * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1322          * into this function always.
1323          */
1324
1325         rt6_dst_from_metrics_check(rt);
1326
1327         if (rt->rt6i_flags & RTF_PCPU ||
1328             (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
1329                 return rt6_dst_from_check(rt, cookie);
1330         else
1331                 return rt6_check(rt, cookie);
1332 }
1333
1334 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1335 {
1336         struct rt6_info *rt = (struct rt6_info *) dst;
1337
1338         if (rt) {
1339                 if (rt->rt6i_flags & RTF_CACHE) {
1340                         if (rt6_check_expired(rt)) {
1341                                 ip6_del_rt(rt);
1342                                 dst = NULL;
1343                         }
1344                 } else {
1345                         dst_release(dst);
1346                         dst = NULL;
1347                 }
1348         }
1349         return dst;
1350 }
1351
1352 static void ip6_link_failure(struct sk_buff *skb)
1353 {
1354         struct rt6_info *rt;
1355
1356         icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1357
1358         rt = (struct rt6_info *) skb_dst(skb);
1359         if (rt) {
1360                 if (rt->rt6i_flags & RTF_CACHE) {
1361                         if (dst_hold_safe(&rt->dst))
1362                                 ip6_del_rt(rt);
1363                 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1364                         rt->rt6i_node->fn_sernum = -1;
1365                 }
1366         }
1367 }
1368
1369 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1370 {
1371         struct net *net = dev_net(rt->dst.dev);
1372
1373         rt->rt6i_flags |= RTF_MODIFIED;
1374         rt->rt6i_pmtu = mtu;
1375         rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1376 }
1377
1378 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
1379 {
1380         return !(rt->rt6i_flags & RTF_CACHE) &&
1381                 (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
1382 }
1383
1384 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1385                                  const struct ipv6hdr *iph, u32 mtu)
1386 {
1387         const struct in6_addr *daddr, *saddr;
1388         struct rt6_info *rt6 = (struct rt6_info *)dst;
1389
1390         if (rt6->rt6i_flags & RTF_LOCAL)
1391                 return;
1392
1393         if (dst_metric_locked(dst, RTAX_MTU))
1394                 return;
1395
1396         if (iph) {
1397                 daddr = &iph->daddr;
1398                 saddr = &iph->saddr;
1399         } else if (sk) {
1400                 daddr = &sk->sk_v6_daddr;
1401                 saddr = &inet6_sk(sk)->saddr;
1402         } else {
1403                 daddr = NULL;
1404                 saddr = NULL;
1405         }
1406         dst_confirm_neigh(dst, daddr);
1407         mtu = max_t(u32, mtu, IPV6_MIN_MTU);
1408         if (mtu >= dst_mtu(dst))
1409                 return;
1410
1411         if (!rt6_cache_allowed_for_pmtu(rt6)) {
1412                 rt6_do_update_pmtu(rt6, mtu);
1413         } else if (daddr) {
1414                 struct rt6_info *nrt6;
1415
1416                 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
1417                 if (nrt6) {
1418                         rt6_do_update_pmtu(nrt6, mtu);
1419
1420                         /* ip6_ins_rt(nrt6) will bump the
1421                          * rt6->rt6i_node->fn_sernum
1422                          * which will fail the next rt6_check() and
1423                          * invalidate the sk->sk_dst_cache.
1424                          */
1425                         ip6_ins_rt(nrt6);
1426                         /* Release the reference taken in
1427                          * ip6_rt_cache_alloc()
1428                          */
1429                         dst_release(&nrt6->dst);
1430                 }
1431         }
1432 }
1433
1434 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1435                                struct sk_buff *skb, u32 mtu)
1436 {
1437         __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
1438 }
1439
1440 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1441                      int oif, u32 mark, kuid_t uid)
1442 {
1443         const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1444         struct dst_entry *dst;
1445         struct flowi6 fl6;
1446
1447         memset(&fl6, 0, sizeof(fl6));
1448         fl6.flowi6_oif = oif;
1449         fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1450         fl6.daddr = iph->daddr;
1451         fl6.saddr = iph->saddr;
1452         fl6.flowlabel = ip6_flowinfo(iph);
1453         fl6.flowi6_uid = uid;
1454
1455         dst = ip6_route_output(net, NULL, &fl6);
1456         if (!dst->error)
1457                 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
1458         dst_release(dst);
1459 }
1460 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1461
1462 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1463 {
1464         struct dst_entry *dst;
1465
1466         ip6_update_pmtu(skb, sock_net(sk), mtu,
1467                         sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
1468
1469         dst = __sk_dst_get(sk);
1470         if (!dst || !dst->obsolete ||
1471             dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
1472                 return;
1473
1474         bh_lock_sock(sk);
1475         if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
1476                 ip6_datagram_dst_update(sk, false);
1477         bh_unlock_sock(sk);
1478 }
1479 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1480
1481 /* Handle redirects */
1482 struct ip6rd_flowi {
1483         struct flowi6 fl6;
1484         struct in6_addr gateway;
1485 };
1486
1487 static struct rt6_info *__ip6_route_redirect(struct net *net,
1488                                              struct fib6_table *table,
1489                                              struct flowi6 *fl6,
1490                                              int flags)
1491 {
1492         struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1493         struct rt6_info *rt;
1494         struct fib6_node *fn;
1495
1496         /* Get the "current" route for this destination and
1497          * check if the redirect has come from appropriate router.
1498          *
1499          * RFC 4861 specifies that redirects should only be
1500          * accepted if they come from the nexthop to the target.
1501          * Due to the way the routes are chosen, this notion
1502          * is a bit fuzzy and one might need to check all possible
1503          * routes.
1504          */
1505
1506         read_lock_bh(&table->tb6_lock);
1507         fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1508 restart:
1509         for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1510                 if (rt6_check_expired(rt))
1511                         continue;
1512                 if (rt->dst.error)
1513                         break;
1514                 if (!(rt->rt6i_flags & RTF_GATEWAY))
1515                         continue;
1516                 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1517                         continue;
1518                 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1519                         continue;
1520                 break;
1521         }
1522
1523         if (!rt)
1524                 rt = net->ipv6.ip6_null_entry;
1525         else if (rt->dst.error) {
1526                 rt = net->ipv6.ip6_null_entry;
1527                 goto out;
1528         }
1529
1530         if (rt == net->ipv6.ip6_null_entry) {
1531                 fn = fib6_backtrack(fn, &fl6->saddr);
1532                 if (fn)
1533                         goto restart;
1534         }
1535
1536 out:
1537         dst_hold(&rt->dst);
1538
1539         read_unlock_bh(&table->tb6_lock);
1540
1541         trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1542         return rt;
1543 };
1544
1545 static struct dst_entry *ip6_route_redirect(struct net *net,
1546                                         const struct flowi6 *fl6,
1547                                         const struct in6_addr *gateway)
1548 {
1549         int flags = RT6_LOOKUP_F_HAS_SADDR;
1550         struct ip6rd_flowi rdfl;
1551
1552         rdfl.fl6 = *fl6;
1553         rdfl.gateway = *gateway;
1554
1555         return fib6_rule_lookup(net, &rdfl.fl6,
1556                                 flags, __ip6_route_redirect);
1557 }
1558
1559 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
1560                   kuid_t uid)
1561 {
1562         const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1563         struct dst_entry *dst;
1564         struct flowi6 fl6;
1565
1566         memset(&fl6, 0, sizeof(fl6));
1567         fl6.flowi6_iif = LOOPBACK_IFINDEX;
1568         fl6.flowi6_oif = oif;
1569         fl6.flowi6_mark = mark;
1570         fl6.daddr = iph->daddr;
1571         fl6.saddr = iph->saddr;
1572         fl6.flowlabel = ip6_flowinfo(iph);
1573         fl6.flowi6_uid = uid;
1574
1575         dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1576         rt6_do_redirect(dst, NULL, skb);
1577         dst_release(dst);
1578 }
1579 EXPORT_SYMBOL_GPL(ip6_redirect);
1580
1581 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1582                             u32 mark)
1583 {
1584         const struct ipv6hdr *iph = ipv6_hdr(skb);
1585         const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1586         struct dst_entry *dst;
1587         struct flowi6 fl6;
1588
1589         memset(&fl6, 0, sizeof(fl6));
1590         fl6.flowi6_iif = LOOPBACK_IFINDEX;
1591         fl6.flowi6_oif = oif;
1592         fl6.flowi6_mark = mark;
1593         fl6.daddr = msg->dest;
1594         fl6.saddr = iph->daddr;
1595         fl6.flowi6_uid = sock_net_uid(net, NULL);
1596
1597         dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1598         rt6_do_redirect(dst, NULL, skb);
1599         dst_release(dst);
1600 }
1601
1602 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1603 {
1604         ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
1605                      sk->sk_uid);
1606 }
1607 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1608
1609 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1610 {
1611         struct net_device *dev = dst->dev;
1612         unsigned int mtu = dst_mtu(dst);
1613         struct net *net = dev_net(dev);
1614
1615         mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1616
1617         if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1618                 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1619
1620         /*
1621          * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1622          * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1623          * IPV6_MAXPLEN is also valid and means: "any MSS,
1624          * rely only on pmtu discovery"
1625          */
1626         if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1627                 mtu = IPV6_MAXPLEN;
1628         return mtu;
1629 }
1630
1631 static unsigned int ip6_mtu(const struct dst_entry *dst)
1632 {
1633         const struct rt6_info *rt = (const struct rt6_info *)dst;
1634         unsigned int mtu = rt->rt6i_pmtu;
1635         struct inet6_dev *idev;
1636
1637         if (mtu)
1638                 goto out;
1639
1640         mtu = dst_metric_raw(dst, RTAX_MTU);
1641         if (mtu)
1642                 goto out;
1643
1644         mtu = IPV6_MIN_MTU;
1645
1646         rcu_read_lock();
1647         idev = __in6_dev_get(dst->dev);
1648         if (idev)
1649                 mtu = idev->cnf.mtu6;
1650         rcu_read_unlock();
1651
1652 out:
1653         mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1654
1655         return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1656 }
1657
1658 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1659                                   struct flowi6 *fl6)
1660 {
1661         struct dst_entry *dst;
1662         struct rt6_info *rt;
1663         struct inet6_dev *idev = in6_dev_get(dev);
1664         struct net *net = dev_net(dev);
1665
1666         if (unlikely(!idev))
1667                 return ERR_PTR(-ENODEV);
1668
1669         rt = ip6_dst_alloc(net, dev, 0);
1670         if (unlikely(!rt)) {
1671                 in6_dev_put(idev);
1672                 dst = ERR_PTR(-ENOMEM);
1673                 goto out;
1674         }
1675
1676         rt->dst.flags |= DST_HOST;
1677         rt->dst.output  = ip6_output;
1678         rt->rt6i_gateway  = fl6->daddr;
1679         rt->rt6i_dst.addr = fl6->daddr;
1680         rt->rt6i_dst.plen = 128;
1681         rt->rt6i_idev     = idev;
1682         dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1683
1684         /* Add this dst into uncached_list so that rt6_ifdown() can
1685          * do proper release of the net_device
1686          */
1687         rt6_uncached_list_add(rt);
1688
1689         dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1690
1691 out:
1692         return dst;
1693 }
1694
1695 static int ip6_dst_gc(struct dst_ops *ops)
1696 {
1697         struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1698         int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1699         int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1700         int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1701         int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1702         unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1703         int entries;
1704
1705         entries = dst_entries_get_fast(ops);
1706         if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1707             entries <= rt_max_size)
1708                 goto out;
1709
1710         net->ipv6.ip6_rt_gc_expire++;
1711         fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1712         entries = dst_entries_get_slow(ops);
1713         if (entries < ops->gc_thresh)
1714                 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1715 out:
1716         net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1717         return entries > rt_max_size;
1718 }
1719
1720 static int ip6_convert_metrics(struct mx6_config *mxc,
1721                                const struct fib6_config *cfg)
1722 {
1723         bool ecn_ca = false;
1724         struct nlattr *nla;
1725         int remaining;
1726         u32 *mp;
1727
1728         if (!cfg->fc_mx)
1729                 return 0;
1730
1731         mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1732         if (unlikely(!mp))
1733                 return -ENOMEM;
1734
1735         nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1736                 int type = nla_type(nla);
1737                 u32 val;
1738
1739                 if (!type)
1740                         continue;
1741                 if (unlikely(type > RTAX_MAX))
1742                         goto err;
1743
1744                 if (type == RTAX_CC_ALGO) {
1745                         char tmp[TCP_CA_NAME_MAX];
1746
1747                         nla_strlcpy(tmp, nla, sizeof(tmp));
1748                         val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
1749                         if (val == TCP_CA_UNSPEC)
1750                                 goto err;
1751                 } else {
1752                         val = nla_get_u32(nla);
1753                 }
1754                 if (type == RTAX_HOPLIMIT && val > 255)
1755                         val = 255;
1756                 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
1757                         goto err;
1758
1759                 mp[type - 1] = val;
1760                 __set_bit(type - 1, mxc->mx_valid);
1761         }
1762
1763         if (ecn_ca) {
1764                 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
1765                 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
1766         }
1767
1768         mxc->mx = mp;
1769         return 0;
1770  err:
1771         kfree(mp);
1772         return -EINVAL;
1773 }
1774
1775 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
1776                                             struct fib6_config *cfg,
1777                                             const struct in6_addr *gw_addr)
1778 {
1779         struct flowi6 fl6 = {
1780                 .flowi6_oif = cfg->fc_ifindex,
1781                 .daddr = *gw_addr,
1782                 .saddr = cfg->fc_prefsrc,
1783         };
1784         struct fib6_table *table;
1785         struct rt6_info *rt;
1786         int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
1787
1788         table = fib6_get_table(net, cfg->fc_table);
1789         if (!table)
1790                 return NULL;
1791
1792         if (!ipv6_addr_any(&cfg->fc_prefsrc))
1793                 flags |= RT6_LOOKUP_F_HAS_SADDR;
1794
1795         rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags);
1796
1797         /* if table lookup failed, fall back to full lookup */
1798         if (rt == net->ipv6.ip6_null_entry) {
1799                 ip6_rt_put(rt);
1800                 rt = NULL;
1801         }
1802
1803         return rt;
1804 }
1805
1806 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
1807                                               struct netlink_ext_ack *extack)
1808 {
1809         struct net *net = cfg->fc_nlinfo.nl_net;
1810         struct rt6_info *rt = NULL;
1811         struct net_device *dev = NULL;
1812         struct inet6_dev *idev = NULL;
1813         struct fib6_table *table;
1814         int addr_type;
1815         int err = -EINVAL;
1816
1817         /* RTF_PCPU is an internal flag; can not be set by userspace */
1818         if (cfg->fc_flags & RTF_PCPU) {
1819                 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
1820                 goto out;
1821         }
1822
1823         if (cfg->fc_flags & RTF_OFFLOAD) {
1824                 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_OFFLOAD");
1825                 goto out;
1826         }
1827
1828         if (cfg->fc_dst_len > 128) {
1829                 NL_SET_ERR_MSG(extack, "Invalid prefix length");
1830                 goto out;
1831         }
1832         if (cfg->fc_src_len > 128) {
1833                 NL_SET_ERR_MSG(extack, "Invalid source address length");
1834                 goto out;
1835         }
1836 #ifndef CONFIG_IPV6_SUBTREES
1837         if (cfg->fc_src_len) {
1838                 NL_SET_ERR_MSG(extack,
1839                                "Specifying source address requires IPV6_SUBTREES to be enabled");
1840                 goto out;
1841         }
1842 #endif
1843         if (cfg->fc_ifindex) {
1844                 err = -ENODEV;
1845                 dev = dev_get_by_index(net, cfg->fc_ifindex);
1846                 if (!dev)
1847                         goto out;
1848                 idev = in6_dev_get(dev);
1849                 if (!idev)
1850                         goto out;
1851         }
1852
1853         if (cfg->fc_metric == 0)
1854                 cfg->fc_metric = IP6_RT_PRIO_USER;
1855
1856         err = -ENOBUFS;
1857         if (cfg->fc_nlinfo.nlh &&
1858             !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1859                 table = fib6_get_table(net, cfg->fc_table);
1860                 if (!table) {
1861                         pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1862                         table = fib6_new_table(net, cfg->fc_table);
1863                 }
1864         } else {
1865                 table = fib6_new_table(net, cfg->fc_table);
1866         }
1867
1868         if (!table)
1869                 goto out;
1870
1871         rt = ip6_dst_alloc(net, NULL,
1872                            (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
1873
1874         if (!rt) {
1875                 err = -ENOMEM;
1876                 goto out;
1877         }
1878
1879         if (cfg->fc_flags & RTF_EXPIRES)
1880                 rt6_set_expires(rt, jiffies +
1881                                 clock_t_to_jiffies(cfg->fc_expires));
1882         else
1883                 rt6_clean_expires(rt);
1884
1885         if (cfg->fc_protocol == RTPROT_UNSPEC)
1886                 cfg->fc_protocol = RTPROT_BOOT;
1887         rt->rt6i_protocol = cfg->fc_protocol;
1888
1889         addr_type = ipv6_addr_type(&cfg->fc_dst);
1890
1891         if (addr_type & IPV6_ADDR_MULTICAST)
1892                 rt->dst.input = ip6_mc_input;
1893         else if (cfg->fc_flags & RTF_LOCAL)
1894                 rt->dst.input = ip6_input;
1895         else
1896                 rt->dst.input = ip6_forward;
1897
1898         rt->dst.output = ip6_output;
1899
1900         if (cfg->fc_encap) {
1901                 struct lwtunnel_state *lwtstate;
1902
1903                 err = lwtunnel_build_state(cfg->fc_encap_type,
1904                                            cfg->fc_encap, AF_INET6, cfg,
1905                                            &lwtstate, extack);
1906                 if (err)
1907                         goto out;
1908                 rt->dst.lwtstate = lwtstate_get(lwtstate);
1909                 if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
1910                         rt->dst.lwtstate->orig_output = rt->dst.output;
1911                         rt->dst.output = lwtunnel_output;
1912                 }
1913                 if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
1914                         rt->dst.lwtstate->orig_input = rt->dst.input;
1915                         rt->dst.input = lwtunnel_input;
1916                 }
1917         }
1918
1919         ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1920         rt->rt6i_dst.plen = cfg->fc_dst_len;
1921         if (rt->rt6i_dst.plen == 128)
1922                 rt->dst.flags |= DST_HOST;
1923
1924 #ifdef CONFIG_IPV6_SUBTREES
1925         ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1926         rt->rt6i_src.plen = cfg->fc_src_len;
1927 #endif
1928
1929         rt->rt6i_metric = cfg->fc_metric;
1930
1931         /* We cannot add true routes via loopback here,
1932            they would result in kernel looping; promote them to reject routes
1933          */
1934         if ((cfg->fc_flags & RTF_REJECT) ||
1935             (dev && (dev->flags & IFF_LOOPBACK) &&
1936              !(addr_type & IPV6_ADDR_LOOPBACK) &&
1937              !(cfg->fc_flags & RTF_LOCAL))) {
1938                 /* hold loopback dev/idev if we haven't done so. */
1939                 if (dev != net->loopback_dev) {
1940                         if (dev) {
1941                                 dev_put(dev);
1942                                 in6_dev_put(idev);
1943                         }
1944                         dev = net->loopback_dev;
1945                         dev_hold(dev);
1946                         idev = in6_dev_get(dev);
1947                         if (!idev) {
1948                                 err = -ENODEV;
1949                                 goto out;
1950                         }
1951                 }
1952                 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1953                 switch (cfg->fc_type) {
1954                 case RTN_BLACKHOLE:
1955                         rt->dst.error = -EINVAL;
1956                         rt->dst.output = dst_discard_out;
1957                         rt->dst.input = dst_discard;
1958                         break;
1959                 case RTN_PROHIBIT:
1960                         rt->dst.error = -EACCES;
1961                         rt->dst.output = ip6_pkt_prohibit_out;
1962                         rt->dst.input = ip6_pkt_prohibit;
1963                         break;
1964                 case RTN_THROW:
1965                 case RTN_UNREACHABLE:
1966                 default:
1967                         rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1968                                         : (cfg->fc_type == RTN_UNREACHABLE)
1969                                         ? -EHOSTUNREACH : -ENETUNREACH;
1970                         rt->dst.output = ip6_pkt_discard_out;
1971                         rt->dst.input = ip6_pkt_discard;
1972                         break;
1973                 }
1974                 goto install_route;
1975         }
1976
1977         if (cfg->fc_flags & RTF_GATEWAY) {
1978                 const struct in6_addr *gw_addr;
1979                 int gwa_type;
1980
1981                 gw_addr = &cfg->fc_gateway;
1982                 gwa_type = ipv6_addr_type(gw_addr);
1983
1984                 /* if gw_addr is local we will fail to detect this in case
1985                  * address is still TENTATIVE (DAD in progress). rt6_lookup()
1986                  * will return already-added prefix route via interface that
1987                  * prefix route was assigned to, which might be non-loopback.
1988                  */
1989                 err = -EINVAL;
1990                 if (ipv6_chk_addr_and_flags(net, gw_addr,
1991                                             gwa_type & IPV6_ADDR_LINKLOCAL ?
1992                                             dev : NULL, 0, 0)) {
1993                         NL_SET_ERR_MSG(extack, "Invalid gateway address");
1994                         goto out;
1995                 }
1996                 rt->rt6i_gateway = *gw_addr;
1997
1998                 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1999                         struct rt6_info *grt = NULL;
2000
2001                         /* IPv6 strictly inhibits using not link-local
2002                            addresses as nexthop address.
2003                            Otherwise, router will not able to send redirects.
2004                            It is very good, but in some (rare!) circumstances
2005                            (SIT, PtP, NBMA NOARP links) it is handy to allow
2006                            some exceptions. --ANK
2007                            We allow IPv4-mapped nexthops to support RFC4798-type
2008                            addressing
2009                          */
2010                         if (!(gwa_type & (IPV6_ADDR_UNICAST |
2011                                           IPV6_ADDR_MAPPED))) {
2012                                 NL_SET_ERR_MSG(extack,
2013                                                "Invalid gateway address");
2014                                 goto out;
2015                         }
2016
2017                         if (cfg->fc_table) {
2018                                 grt = ip6_nh_lookup_table(net, cfg, gw_addr);
2019
2020                                 if (grt) {
2021                                         if (grt->rt6i_flags & RTF_GATEWAY ||
2022                                             (dev && dev != grt->dst.dev)) {
2023                                                 ip6_rt_put(grt);
2024                                                 grt = NULL;
2025                                         }
2026                                 }
2027                         }
2028
2029                         if (!grt)
2030                                 grt = rt6_lookup(net, gw_addr, NULL,
2031                                                  cfg->fc_ifindex, 1);
2032
2033                         err = -EHOSTUNREACH;
2034                         if (!grt)
2035                                 goto out;
2036                         if (dev) {
2037                                 if (dev != grt->dst.dev) {
2038                                         ip6_rt_put(grt);
2039                                         goto out;
2040                                 }
2041                         } else {
2042                                 dev = grt->dst.dev;
2043                                 idev = grt->rt6i_idev;
2044                                 dev_hold(dev);
2045                                 in6_dev_hold(grt->rt6i_idev);
2046                         }
2047                         if (!(grt->rt6i_flags & RTF_GATEWAY))
2048                                 err = 0;
2049                         ip6_rt_put(grt);
2050
2051                         if (err)
2052                                 goto out;
2053                 }
2054                 err = -EINVAL;
2055                 if (!dev) {
2056                         NL_SET_ERR_MSG(extack, "Egress device not specified");
2057                         goto out;
2058                 } else if (dev->flags & IFF_LOOPBACK) {
2059                         NL_SET_ERR_MSG(extack,
2060                                        "Egress device can not be loopback device for this route");
2061                         goto out;
2062                 }
2063         }
2064
2065         err = -ENODEV;
2066         if (!dev)
2067                 goto out;
2068
2069         if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
2070                 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
2071                         NL_SET_ERR_MSG(extack, "Invalid source address");
2072                         err = -EINVAL;
2073                         goto out;
2074                 }
2075                 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
2076                 rt->rt6i_prefsrc.plen = 128;
2077         } else
2078                 rt->rt6i_prefsrc.plen = 0;
2079
2080         rt->rt6i_flags = cfg->fc_flags;
2081
2082 install_route:
2083         rt->dst.dev = dev;
2084         rt->rt6i_idev = idev;
2085         rt->rt6i_table = table;
2086
2087         cfg->fc_nlinfo.nl_net = dev_net(dev);
2088
2089         return rt;
2090 out:
2091         if (dev)
2092                 dev_put(dev);
2093         if (idev)
2094                 in6_dev_put(idev);
2095         if (rt)
2096                 dst_release_immediate(&rt->dst);
2097
2098         return ERR_PTR(err);
2099 }
2100
2101 int ip6_route_add(struct fib6_config *cfg,
2102                   struct netlink_ext_ack *extack)
2103 {
2104         struct mx6_config mxc = { .mx = NULL, };
2105         struct rt6_info *rt;
2106         int err;
2107
2108         rt = ip6_route_info_create(cfg, extack);
2109         if (IS_ERR(rt)) {
2110                 err = PTR_ERR(rt);
2111                 rt = NULL;
2112                 goto out;
2113         }
2114
2115         err = ip6_convert_metrics(&mxc, cfg);
2116         if (err)
2117                 goto out;
2118
2119         err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack);
2120
2121         kfree(mxc.mx);
2122
2123         return err;
2124 out:
2125         if (rt)
2126                 dst_release_immediate(&rt->dst);
2127
2128         return err;
2129 }
2130
2131 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2132 {
2133         int err;
2134         struct fib6_table *table;
2135         struct net *net = dev_net(rt->dst.dev);
2136
2137         if (rt == net->ipv6.ip6_null_entry) {
2138                 err = -ENOENT;
2139                 goto out;
2140         }
2141
2142         table = rt->rt6i_table;
2143         write_lock_bh(&table->tb6_lock);
2144         err = fib6_del(rt, info);
2145         write_unlock_bh(&table->tb6_lock);
2146
2147 out:
2148         ip6_rt_put(rt);
2149         return err;
2150 }
2151
2152 int ip6_del_rt(struct rt6_info *rt)
2153 {
2154         struct nl_info info = {
2155                 .nl_net = dev_net(rt->dst.dev),
2156         };
2157         return __ip6_del_rt(rt, &info);
2158 }
2159
2160 static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
2161 {
2162         struct nl_info *info = &cfg->fc_nlinfo;
2163         struct net *net = info->nl_net;
2164         struct sk_buff *skb = NULL;
2165         struct fib6_table *table;
2166         int err = -ENOENT;
2167
2168         if (rt == net->ipv6.ip6_null_entry)
2169                 goto out_put;
2170         table = rt->rt6i_table;
2171         write_lock_bh(&table->tb6_lock);
2172
2173         if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) {
2174                 struct rt6_info *sibling, *next_sibling;
2175
2176                 /* prefer to send a single notification with all hops */
2177                 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
2178                 if (skb) {
2179                         u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2180
2181                         if (rt6_fill_node(net, skb, rt,
2182                                           NULL, NULL, 0, RTM_DELROUTE,
2183                                           info->portid, seq, 0) < 0) {
2184                                 kfree_skb(skb);
2185                                 skb = NULL;
2186                         } else
2187                                 info->skip_notify = 1;
2188                 }
2189
2190                 list_for_each_entry_safe(sibling, next_sibling,
2191                                          &rt->rt6i_siblings,
2192                                          rt6i_siblings) {
2193                         err = fib6_del(sibling, info);
2194                         if (err)
2195                                 goto out_unlock;
2196                 }
2197         }
2198
2199         err = fib6_del(rt, info);
2200 out_unlock:
2201         write_unlock_bh(&table->tb6_lock);
2202 out_put:
2203         ip6_rt_put(rt);
2204
2205         if (skb) {
2206                 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2207                             info->nlh, gfp_any());
2208         }
2209         return err;
2210 }
2211
2212 static int ip6_route_del(struct fib6_config *cfg,
2213                          struct netlink_ext_ack *extack)
2214 {
2215         struct fib6_table *table;
2216         struct fib6_node *fn;
2217         struct rt6_info *rt;
2218         int err = -ESRCH;
2219
2220         table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
2221         if (!table) {
2222                 NL_SET_ERR_MSG(extack, "FIB table does not exist");
2223                 return err;
2224         }
2225
2226         read_lock_bh(&table->tb6_lock);
2227
2228         fn = fib6_locate(&table->tb6_root,
2229                          &cfg->fc_dst, cfg->fc_dst_len,
2230                          &cfg->fc_src, cfg->fc_src_len);
2231
2232         if (fn) {
2233                 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2234                         if ((rt->rt6i_flags & RTF_CACHE) &&
2235                             !(cfg->fc_flags & RTF_CACHE))
2236                                 continue;
2237                         if (cfg->fc_ifindex &&
2238                             (!rt->dst.dev ||
2239                              rt->dst.dev->ifindex != cfg->fc_ifindex))
2240                                 continue;
2241                         if (cfg->fc_flags & RTF_GATEWAY &&
2242                             !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
2243                                 continue;
2244                         if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2245                                 continue;
2246                         if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
2247                                 continue;
2248                         dst_hold(&rt->dst);
2249                         read_unlock_bh(&table->tb6_lock);
2250
2251                         /* if gateway was specified only delete the one hop */
2252                         if (cfg->fc_flags & RTF_GATEWAY)
2253                                 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
2254
2255                         return __ip6_del_rt_siblings(rt, cfg);
2256                 }
2257         }
2258         read_unlock_bh(&table->tb6_lock);
2259
2260         return err;
2261 }
2262
2263 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
2264 {
2265         struct netevent_redirect netevent;
2266         struct rt6_info *rt, *nrt = NULL;
2267         struct ndisc_options ndopts;
2268         struct inet6_dev *in6_dev;
2269         struct neighbour *neigh;
2270         struct rd_msg *msg;
2271         int optlen, on_link;
2272         u8 *lladdr;
2273
2274         optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
2275         optlen -= sizeof(*msg);
2276
2277         if (optlen < 0) {
2278                 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2279                 return;
2280         }
2281
2282         msg = (struct rd_msg *)icmp6_hdr(skb);
2283
2284         if (ipv6_addr_is_multicast(&msg->dest)) {
2285                 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2286                 return;
2287         }
2288
2289         on_link = 0;
2290         if (ipv6_addr_equal(&msg->dest, &msg->target)) {
2291                 on_link = 1;
2292         } else if (ipv6_addr_type(&msg->target) !=
2293                    (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
2294                 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
2295                 return;
2296         }
2297
2298         in6_dev = __in6_dev_get(skb->dev);
2299         if (!in6_dev)
2300                 return;
2301         if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
2302                 return;
2303
2304         /* RFC2461 8.1:
2305          *      The IP source address of the Redirect MUST be the same as the current
2306          *      first-hop router for the specified ICMP Destination Address.
2307          */
2308
2309         if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
2310                 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
2311                 return;
2312         }
2313
2314         lladdr = NULL;
2315         if (ndopts.nd_opts_tgt_lladdr) {
2316                 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
2317                                              skb->dev);
2318                 if (!lladdr) {
2319                         net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
2320                         return;
2321                 }
2322         }
2323
2324         rt = (struct rt6_info *) dst;
2325         if (rt->rt6i_flags & RTF_REJECT) {
2326                 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
2327                 return;
2328         }
2329
2330         /* Redirect received -> path was valid.
2331          * Look, redirects are sent only in response to data packets,
2332          * so that this nexthop apparently is reachable. --ANK
2333          */
2334         dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
2335
2336         neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
2337         if (!neigh)
2338                 return;
2339
2340         /*
2341          *      We have finally decided to accept it.
2342          */
2343
2344         ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
2345                      NEIGH_UPDATE_F_WEAK_OVERRIDE|
2346                      NEIGH_UPDATE_F_OVERRIDE|
2347                      (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
2348                                      NEIGH_UPDATE_F_ISROUTER)),
2349                      NDISC_REDIRECT, &ndopts);
2350
2351         nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
2352         if (!nrt)
2353                 goto out;
2354
2355         nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
2356         if (on_link)
2357                 nrt->rt6i_flags &= ~RTF_GATEWAY;
2358
2359         nrt->rt6i_protocol = RTPROT_REDIRECT;
2360         nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
2361
2362         if (ip6_ins_rt(nrt))
2363                 goto out_release;
2364
2365         netevent.old = &rt->dst;
2366         netevent.new = &nrt->dst;
2367         netevent.daddr = &msg->dest;
2368         netevent.neigh = neigh;
2369         call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
2370
2371         if (rt->rt6i_flags & RTF_CACHE) {
2372                 rt = (struct rt6_info *) dst_clone(&rt->dst);
2373                 ip6_del_rt(rt);
2374         }
2375
2376 out_release:
2377         /* Release the reference taken in
2378          * ip6_rt_cache_alloc()
2379          */
2380         dst_release(&nrt->dst);
2381
2382 out:
2383         neigh_release(neigh);
2384 }
2385
2386 /*
2387  *      Misc support functions
2388  */
2389
2390 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
2391 {
2392         BUG_ON(from->dst.from);
2393
2394         rt->rt6i_flags &= ~RTF_EXPIRES;
2395         dst_hold(&from->dst);
2396         rt->dst.from = &from->dst;
2397         dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
2398 }
2399
2400 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
2401 {
2402         rt->dst.input = ort->dst.input;
2403         rt->dst.output = ort->dst.output;
2404         rt->rt6i_dst = ort->rt6i_dst;
2405         rt->dst.error = ort->dst.error;
2406         rt->rt6i_idev = ort->rt6i_idev;
2407         if (rt->rt6i_idev)
2408                 in6_dev_hold(rt->rt6i_idev);
2409         rt->dst.lastuse = jiffies;
2410         rt->rt6i_gateway = ort->rt6i_gateway;
2411         rt->rt6i_flags = ort->rt6i_flags;
2412         rt6_set_from(rt, ort);
2413         rt->rt6i_metric = ort->rt6i_metric;
2414 #ifdef CONFIG_IPV6_SUBTREES
2415         rt->rt6i_src = ort->rt6i_src;
2416 #endif
2417         rt->rt6i_prefsrc = ort->rt6i_prefsrc;
2418         rt->rt6i_table = ort->rt6i_table;
2419         rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
2420 }
2421
2422 #ifdef CONFIG_IPV6_ROUTE_INFO
2423 static struct rt6_info *rt6_get_route_info(struct net *net,
2424                                            const struct in6_addr *prefix, int prefixlen,
2425                                            const struct in6_addr *gwaddr,
2426                                            struct net_device *dev)
2427 {
2428         u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
2429         int ifindex = dev->ifindex;
2430         struct fib6_node *fn;
2431         struct rt6_info *rt = NULL;
2432         struct fib6_table *table;
2433
2434         table = fib6_get_table(net, tb_id);
2435         if (!table)
2436                 return NULL;
2437
2438         read_lock_bh(&table->tb6_lock);
2439         fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0);
2440         if (!fn)
2441                 goto out;
2442
2443         for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2444                 if (rt->dst.dev->ifindex != ifindex)
2445                         continue;
2446                 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
2447                         continue;
2448                 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
2449                         continue;
2450                 dst_hold(&rt->dst);
2451                 break;
2452         }
2453 out:
2454         read_unlock_bh(&table->tb6_lock);
2455         return rt;
2456 }
2457
2458 static struct rt6_info *rt6_add_route_info(struct net *net,
2459                                            const struct in6_addr *prefix, int prefixlen,
2460                                            const struct in6_addr *gwaddr,
2461                                            struct net_device *dev,
2462                                            unsigned int pref)
2463 {
2464         struct fib6_config cfg = {
2465                 .fc_metric      = IP6_RT_PRIO_USER,
2466                 .fc_ifindex     = dev->ifindex,
2467                 .fc_dst_len     = prefixlen,
2468                 .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2469                                   RTF_UP | RTF_PREF(pref),
2470                 .fc_protocol = RTPROT_RA,
2471                 .fc_nlinfo.portid = 0,
2472                 .fc_nlinfo.nlh = NULL,
2473                 .fc_nlinfo.nl_net = net,
2474         };
2475
2476         cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
2477         cfg.fc_dst = *prefix;
2478         cfg.fc_gateway = *gwaddr;
2479
2480         /* We should treat it as a default route if prefix length is 0. */
2481         if (!prefixlen)
2482                 cfg.fc_flags |= RTF_DEFAULT;
2483
2484         ip6_route_add(&cfg, NULL);
2485
2486         return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
2487 }
2488 #endif
2489
2490 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
2491 {
2492         u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
2493         struct rt6_info *rt;
2494         struct fib6_table *table;
2495
2496         table = fib6_get_table(dev_net(dev), tb_id);
2497         if (!table)
2498                 return NULL;
2499
2500         read_lock_bh(&table->tb6_lock);
2501         for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2502                 if (dev == rt->dst.dev &&
2503                     ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
2504                     ipv6_addr_equal(&rt->rt6i_gateway, addr))
2505                         break;
2506         }
2507         if (rt)
2508                 dst_hold(&rt->dst);
2509         read_unlock_bh(&table->tb6_lock);
2510         return rt;
2511 }
2512
2513 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2514                                      struct net_device *dev,
2515                                      unsigned int pref)
2516 {
2517         struct fib6_config cfg = {
2518                 .fc_table       = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
2519                 .fc_metric      = IP6_RT_PRIO_USER,
2520                 .fc_ifindex     = dev->ifindex,
2521                 .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2522                                   RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2523                 .fc_protocol = RTPROT_RA,
2524                 .fc_nlinfo.portid = 0,
2525                 .fc_nlinfo.nlh = NULL,
2526                 .fc_nlinfo.nl_net = dev_net(dev),
2527         };
2528
2529         cfg.fc_gateway = *gwaddr;
2530
2531         if (!ip6_route_add(&cfg, NULL)) {
2532                 struct fib6_table *table;
2533
2534                 table = fib6_get_table(dev_net(dev), cfg.fc_table);
2535                 if (table)
2536                         table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
2537         }
2538
2539         return rt6_get_dflt_router(gwaddr, dev);
2540 }
2541
2542 static void __rt6_purge_dflt_routers(struct fib6_table *table)
2543 {
2544         struct rt6_info *rt;
2545
2546 restart:
2547         read_lock_bh(&table->tb6_lock);
2548         for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2549                 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2550                     (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2551                         dst_hold(&rt->dst);
2552                         read_unlock_bh(&table->tb6_lock);
2553                         ip6_del_rt(rt);
2554                         goto restart;
2555                 }
2556         }
2557         read_unlock_bh(&table->tb6_lock);
2558
2559         table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
2560 }
2561
2562 void rt6_purge_dflt_routers(struct net *net)
2563 {
2564         struct fib6_table *table;
2565         struct hlist_head *head;
2566         unsigned int h;
2567
2568         rcu_read_lock();
2569
2570         for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
2571                 head = &net->ipv6.fib_table_hash[h];
2572                 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
2573                         if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
2574                                 __rt6_purge_dflt_routers(table);
2575                 }
2576         }
2577
2578         rcu_read_unlock();
2579 }
2580
2581 static void rtmsg_to_fib6_config(struct net *net,
2582                                  struct in6_rtmsg *rtmsg,
2583                                  struct fib6_config *cfg)
2584 {
2585         memset(cfg, 0, sizeof(*cfg));
2586
2587         cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
2588                          : RT6_TABLE_MAIN;
2589         cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2590         cfg->fc_metric = rtmsg->rtmsg_metric;
2591         cfg->fc_expires = rtmsg->rtmsg_info;
2592         cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2593         cfg->fc_src_len = rtmsg->rtmsg_src_len;
2594         cfg->fc_flags = rtmsg->rtmsg_flags;
2595
2596         cfg->fc_nlinfo.nl_net = net;
2597
2598         cfg->fc_dst = rtmsg->rtmsg_dst;
2599         cfg->fc_src = rtmsg->rtmsg_src;
2600         cfg->fc_gateway = rtmsg->rtmsg_gateway;
2601 }
2602
2603 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2604 {
2605         struct fib6_config cfg;
2606         struct in6_rtmsg rtmsg;
2607         int err;
2608
2609         switch (cmd) {
2610         case SIOCADDRT:         /* Add a route */
2611         case SIOCDELRT:         /* Delete a route */
2612                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2613                         return -EPERM;
2614                 err = copy_from_user(&rtmsg, arg,
2615                                      sizeof(struct in6_rtmsg));
2616                 if (err)
2617                         return -EFAULT;
2618
2619                 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2620
2621                 rtnl_lock();
2622                 switch (cmd) {
2623                 case SIOCADDRT:
2624                         err = ip6_route_add(&cfg, NULL);
2625                         break;
2626                 case SIOCDELRT:
2627                         err = ip6_route_del(&cfg, NULL);
2628                         break;
2629                 default:
2630                         err = -EINVAL;
2631                 }
2632                 rtnl_unlock();
2633
2634                 return err;
2635         }
2636
2637         return -EINVAL;
2638 }
2639
2640 /*
2641  *      Drop the packet on the floor
2642  */
2643
2644 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2645 {
2646         int type;
2647         struct dst_entry *dst = skb_dst(skb);
2648         switch (ipstats_mib_noroutes) {
2649         case IPSTATS_MIB_INNOROUTES:
2650                 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2651                 if (type == IPV6_ADDR_ANY) {
2652                         IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2653                                       IPSTATS_MIB_INADDRERRORS);
2654                         break;
2655                 }
2656                 /* FALLTHROUGH */
2657         case IPSTATS_MIB_OUTNOROUTES:
2658                 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2659                               ipstats_mib_noroutes);
2660                 break;
2661         }
2662         icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2663         kfree_skb(skb);
2664         return 0;
2665 }
2666
2667 static int ip6_pkt_discard(struct sk_buff *skb)
2668 {
2669         return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2670 }
2671
2672 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2673 {
2674         skb->dev = skb_dst(skb)->dev;
2675         return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2676 }
2677
2678 static int ip6_pkt_prohibit(struct sk_buff *skb)
2679 {
2680         return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2681 }
2682
2683 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2684 {
2685         skb->dev = skb_dst(skb)->dev;
2686         return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2687 }
2688
2689 /*
2690  *      Allocate a dst for local (unicast / anycast) address.
2691  */
2692
2693 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2694                                     const struct in6_addr *addr,
2695                                     bool anycast)
2696 {
2697         u32 tb_id;
2698         struct net *net = dev_net(idev->dev);
2699         struct net_device *dev = net->loopback_dev;
2700         struct rt6_info *rt;
2701
2702         /* use L3 Master device as loopback for host routes if device
2703          * is enslaved and address is not link local or multicast
2704          */
2705         if (!rt6_need_strict(addr))
2706                 dev = l3mdev_master_dev_rcu(idev->dev) ? : dev;
2707
2708         rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
2709         if (!rt)
2710                 return ERR_PTR(-ENOMEM);
2711
2712         in6_dev_hold(idev);
2713
2714         rt->dst.flags |= DST_HOST;
2715         rt->dst.input = ip6_input;
2716         rt->dst.output = ip6_output;
2717         rt->rt6i_idev = idev;
2718
2719         rt->rt6i_protocol = RTPROT_KERNEL;
2720         rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2721         if (anycast)
2722                 rt->rt6i_flags |= RTF_ANYCAST;
2723         else
2724                 rt->rt6i_flags |= RTF_LOCAL;
2725
2726         rt->rt6i_gateway  = *addr;
2727         rt->rt6i_dst.addr = *addr;
2728         rt->rt6i_dst.plen = 128;
2729         tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
2730         rt->rt6i_table = fib6_get_table(net, tb_id);
2731
2732         return rt;
2733 }
2734
2735 /* remove deleted ip from prefsrc entries */
2736 struct arg_dev_net_ip {
2737         struct net_device *dev;
2738         struct net *net;
2739         struct in6_addr *addr;
2740 };
2741
2742 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2743 {
2744         struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2745         struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2746         struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2747
2748         if (((void *)rt->dst.dev == dev || !dev) &&
2749             rt != net->ipv6.ip6_null_entry &&
2750             ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2751                 /* remove prefsrc entry */
2752                 rt->rt6i_prefsrc.plen = 0;
2753         }
2754         return 0;
2755 }
2756
2757 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2758 {
2759         struct net *net = dev_net(ifp->idev->dev);
2760         struct arg_dev_net_ip adni = {
2761                 .dev = ifp->idev->dev,
2762                 .net = net,
2763                 .addr = &ifp->addr,
2764         };
2765         fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2766 }
2767
2768 #define RTF_RA_ROUTER           (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2769 #define RTF_CACHE_GATEWAY       (RTF_GATEWAY | RTF_CACHE)
2770
2771 /* Remove routers and update dst entries when gateway turn into host. */
2772 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2773 {
2774         struct in6_addr *gateway = (struct in6_addr *)arg;
2775
2776         if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2777              ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2778              ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2779                 return -1;
2780         }
2781         return 0;
2782 }
2783
2784 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2785 {
2786         fib6_clean_all(net, fib6_clean_tohost, gateway);
2787 }
2788
2789 struct arg_dev_net {
2790         struct net_device *dev;
2791         struct net *net;
2792 };
2793
2794 /* called with write lock held for table with rt */
2795 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2796 {
2797         const struct arg_dev_net *adn = arg;
2798         const struct net_device *dev = adn->dev;
2799
2800         if ((rt->dst.dev == dev || !dev) &&
2801             rt != adn->net->ipv6.ip6_null_entry &&
2802             (rt->rt6i_nsiblings == 0 ||
2803              (dev && netdev_unregistering(dev)) ||
2804              !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
2805                 return -1;
2806
2807         return 0;
2808 }
2809
2810 void rt6_ifdown(struct net *net, struct net_device *dev)
2811 {
2812         struct arg_dev_net adn = {
2813                 .dev = dev,
2814                 .net = net,
2815         };
2816
2817         fib6_clean_all(net, fib6_ifdown, &adn);
2818         if (dev)
2819                 rt6_uncached_list_flush_dev(net, dev);
2820 }
2821
2822 struct rt6_mtu_change_arg {
2823         struct net_device *dev;
2824         unsigned int mtu;
2825 };
2826
2827 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2828 {
2829         struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2830         struct inet6_dev *idev;
2831
2832         /* In IPv6 pmtu discovery is not optional,
2833            so that RTAX_MTU lock cannot disable it.
2834            We still use this lock to block changes
2835            caused by addrconf/ndisc.
2836         */
2837
2838         idev = __in6_dev_get(arg->dev);
2839         if (!idev)
2840                 return 0;
2841
2842         /* For administrative MTU increase, there is no way to discover
2843            IPv6 PMTU increase, so PMTU increase should be updated here.
2844            Since RFC 1981 doesn't include administrative MTU increase
2845            update PMTU increase is a MUST. (i.e. jumbo frame)
2846          */
2847         /*
2848            If new MTU is less than route PMTU, this new MTU will be the
2849            lowest MTU in the path, update the route PMTU to reflect PMTU
2850            decreases; if new MTU is greater than route PMTU, and the
2851            old MTU is the lowest MTU in the path, update the route PMTU
2852            to reflect the increase. In this case if the other nodes' MTU
2853            also have the lowest MTU, TOO BIG MESSAGE will be lead to
2854            PMTU discovery.
2855          */
2856         if (rt->dst.dev == arg->dev &&
2857             dst_metric_raw(&rt->dst, RTAX_MTU) &&
2858             !dst_metric_locked(&rt->dst, RTAX_MTU)) {
2859                 if (rt->rt6i_flags & RTF_CACHE) {
2860                         /* For RTF_CACHE with rt6i_pmtu == 0
2861                          * (i.e. a redirected route),
2862                          * the metrics of its rt->dst.from has already
2863                          * been updated.
2864                          */
2865                         if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu)
2866                                 rt->rt6i_pmtu = arg->mtu;
2867                 } else if (dst_mtu(&rt->dst) >= arg->mtu ||
2868                            (dst_mtu(&rt->dst) < arg->mtu &&
2869                             dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
2870                         dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2871                 }
2872         }
2873         return 0;
2874 }
2875
2876 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2877 {
2878         struct rt6_mtu_change_arg arg = {
2879                 .dev = dev,
2880                 .mtu = mtu,
2881         };
2882
2883         fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
2884 }
2885
2886 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2887         [RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
2888         [RTA_OIF]               = { .type = NLA_U32 },
2889         [RTA_IIF]               = { .type = NLA_U32 },
2890         [RTA_PRIORITY]          = { .type = NLA_U32 },
2891         [RTA_METRICS]           = { .type = NLA_NESTED },
2892         [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
2893         [RTA_PREF]              = { .type = NLA_U8 },
2894         [RTA_ENCAP_TYPE]        = { .type = NLA_U16 },
2895         [RTA_ENCAP]             = { .type = NLA_NESTED },
2896         [RTA_EXPIRES]           = { .type = NLA_U32 },
2897         [RTA_UID]               = { .type = NLA_U32 },
2898         [RTA_MARK]              = { .type = NLA_U32 },
2899 };
2900
2901 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2902                               struct fib6_config *cfg,
2903                               struct netlink_ext_ack *extack)
2904 {
2905         struct rtmsg *rtm;
2906         struct nlattr *tb[RTA_MAX+1];
2907         unsigned int pref;
2908         int err;
2909
2910         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
2911                           NULL);
2912         if (err < 0)
2913                 goto errout;
2914
2915         err = -EINVAL;
2916         rtm = nlmsg_data(nlh);
2917         memset(cfg, 0, sizeof(*cfg));
2918
2919         cfg->fc_table = rtm->rtm_table;
2920         cfg->fc_dst_len = rtm->rtm_dst_len;
2921         cfg->fc_src_len = rtm->rtm_src_len;
2922         cfg->fc_flags = RTF_UP;
2923         cfg->fc_protocol = rtm->rtm_protocol;
2924         cfg->fc_type = rtm->rtm_type;
2925
2926         if (rtm->rtm_type == RTN_UNREACHABLE ||
2927             rtm->rtm_type == RTN_BLACKHOLE ||
2928             rtm->rtm_type == RTN_PROHIBIT ||
2929             rtm->rtm_type == RTN_THROW)
2930                 cfg->fc_flags |= RTF_REJECT;
2931
2932         if (rtm->rtm_type == RTN_LOCAL)
2933                 cfg->fc_flags |= RTF_LOCAL;
2934
2935         if (rtm->rtm_flags & RTM_F_CLONED)
2936                 cfg->fc_flags |= RTF_CACHE;
2937
2938         cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2939         cfg->fc_nlinfo.nlh = nlh;
2940         cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2941
2942         if (tb[RTA_GATEWAY]) {
2943                 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
2944                 cfg->fc_flags |= RTF_GATEWAY;
2945         }
2946
2947         if (tb[RTA_DST]) {
2948                 int plen = (rtm->rtm_dst_len + 7) >> 3;
2949
2950                 if (nla_len(tb[RTA_DST]) < plen)
2951                         goto errout;
2952
2953                 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2954         }
2955
2956         if (tb[RTA_SRC]) {
2957                 int plen = (rtm->rtm_src_len + 7) >> 3;
2958
2959                 if (nla_len(tb[RTA_SRC]) < plen)
2960                         goto errout;
2961
2962                 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2963         }
2964
2965         if (tb[RTA_PREFSRC])
2966                 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
2967
2968         if (tb[RTA_OIF])
2969                 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2970
2971         if (tb[RTA_PRIORITY])
2972                 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2973
2974         if (tb[RTA_METRICS]) {
2975                 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2976                 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2977         }
2978
2979         if (tb[RTA_TABLE])
2980                 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2981
2982         if (tb[RTA_MULTIPATH]) {
2983                 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2984                 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2985
2986                 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
2987                                                      cfg->fc_mp_len, extack);
2988                 if (err < 0)
2989                         goto errout;
2990         }
2991
2992         if (tb[RTA_PREF]) {
2993                 pref = nla_get_u8(tb[RTA_PREF]);
2994                 if (pref != ICMPV6_ROUTER_PREF_LOW &&
2995                     pref != ICMPV6_ROUTER_PREF_HIGH)
2996                         pref = ICMPV6_ROUTER_PREF_MEDIUM;
2997                 cfg->fc_flags |= RTF_PREF(pref);
2998         }
2999
3000         if (tb[RTA_ENCAP])
3001                 cfg->fc_encap = tb[RTA_ENCAP];
3002
3003         if (tb[RTA_ENCAP_TYPE]) {
3004                 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
3005
3006                 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
3007                 if (err < 0)
3008                         goto errout;
3009         }
3010
3011         if (tb[RTA_EXPIRES]) {
3012                 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
3013
3014                 if (addrconf_finite_timeout(timeout)) {
3015                         cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
3016                         cfg->fc_flags |= RTF_EXPIRES;
3017                 }
3018         }
3019
3020         err = 0;
3021 errout:
3022         return err;
3023 }
3024
3025 struct rt6_nh {
3026         struct rt6_info *rt6_info;
3027         struct fib6_config r_cfg;
3028         struct mx6_config mxc;
3029         struct list_head next;
3030 };
3031
3032 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
3033 {
3034         struct rt6_nh *nh;
3035
3036         list_for_each_entry(nh, rt6_nh_list, next) {
3037                 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
3038                         &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
3039                         nh->r_cfg.fc_ifindex);
3040         }
3041 }
3042
3043 static int ip6_route_info_append(struct list_head *rt6_nh_list,
3044                                  struct rt6_info *rt, struct fib6_config *r_cfg)
3045 {
3046         struct rt6_nh *nh;
3047         int err = -EEXIST;
3048
3049         list_for_each_entry(nh, rt6_nh_list, next) {
3050                 /* check if rt6_info already exists */
3051                 if (rt6_duplicate_nexthop(nh->rt6_info, rt))
3052                         return err;
3053         }
3054
3055         nh = kzalloc(sizeof(*nh), GFP_KERNEL);
3056         if (!nh)
3057                 return -ENOMEM;
3058         nh->rt6_info = rt;
3059         err = ip6_convert_metrics(&nh->mxc, r_cfg);
3060         if (err) {
3061                 kfree(nh);
3062                 return err;
3063         }
3064         memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
3065         list_add_tail(&nh->next, rt6_nh_list);
3066
3067         return 0;
3068 }
3069
3070 static void ip6_route_mpath_notify(struct rt6_info *rt,
3071                                    struct rt6_info *rt_last,
3072                                    struct nl_info *info,
3073                                    __u16 nlflags)
3074 {
3075         /* if this is an APPEND route, then rt points to the first route
3076          * inserted and rt_last points to last route inserted. Userspace
3077          * wants a consistent dump of the route which starts at the first
3078          * nexthop. Since sibling routes are always added at the end of
3079          * the list, find the first sibling of the last route appended
3080          */
3081         if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) {
3082                 rt = list_first_entry(&rt_last->rt6i_siblings,
3083                                       struct rt6_info,
3084                                       rt6i_siblings);
3085         }
3086
3087         if (rt)
3088                 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
3089 }
3090
3091 static int ip6_route_multipath_add(struct fib6_config *cfg,
3092                                    struct netlink_ext_ack *extack)
3093 {
3094         struct rt6_info *rt_notif = NULL, *rt_last = NULL;
3095         struct nl_info *info = &cfg->fc_nlinfo;
3096         struct fib6_config r_cfg;
3097         struct rtnexthop *rtnh;
3098         struct rt6_info *rt;
3099         struct rt6_nh *err_nh;
3100         struct rt6_nh *nh, *nh_safe;
3101         __u16 nlflags;
3102         int remaining;
3103         int attrlen;
3104         int err = 1;
3105         int nhn = 0;
3106         int replace = (cfg->fc_nlinfo.nlh &&
3107                        (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
3108         LIST_HEAD(rt6_nh_list);
3109
3110         nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
3111         if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
3112                 nlflags |= NLM_F_APPEND;
3113
3114         remaining = cfg->fc_mp_len;
3115         rtnh = (struct rtnexthop *)cfg->fc_mp;
3116
3117         /* Parse a Multipath Entry and build a list (rt6_nh_list) of
3118          * rt6_info structs per nexthop
3119          */
3120         while (rtnh_ok(rtnh, remaining)) {
3121                 memcpy(&r_cfg, cfg, sizeof(*cfg));
3122                 if (rtnh->rtnh_ifindex)
3123                         r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3124
3125                 attrlen = rtnh_attrlen(rtnh);
3126                 if (attrlen > 0) {
3127                         struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3128
3129                         nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3130                         if (nla) {
3131                                 r_cfg.fc_gateway = nla_get_in6_addr(nla);
3132                                 r_cfg.fc_flags |= RTF_GATEWAY;
3133                         }
3134                         r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
3135                         nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
3136                         if (nla)
3137                                 r_cfg.fc_encap_type = nla_get_u16(nla);
3138                 }
3139
3140                 rt = ip6_route_info_create(&r_cfg, extack);
3141                 if (IS_ERR(rt)) {
3142                         err = PTR_ERR(rt);
3143                         rt = NULL;
3144                         goto cleanup;
3145                 }
3146
3147                 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
3148                 if (err) {
3149                         dst_release_immediate(&rt->dst);
3150                         goto cleanup;
3151                 }
3152
3153                 rtnh = rtnh_next(rtnh, &remaining);
3154         }
3155
3156         /* for add and replace send one notification with all nexthops.
3157          * Skip the notification in fib6_add_rt2node and send one with
3158          * the full route when done
3159          */
3160         info->skip_notify = 1;
3161
3162         err_nh = NULL;
3163         list_for_each_entry(nh, &rt6_nh_list, next) {
3164                 rt_last = nh->rt6_info;
3165                 err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack);
3166                 /* save reference to first route for notification */
3167                 if (!rt_notif && !err)
3168                         rt_notif = nh->rt6_info;
3169
3170                 /* nh->rt6_info is used or freed at this point, reset to NULL*/
3171                 nh->rt6_info = NULL;
3172                 if (err) {
3173                         if (replace && nhn)
3174                                 ip6_print_replace_route_err(&rt6_nh_list);
3175                         err_nh = nh;
3176                         goto add_errout;
3177                 }
3178
3179                 /* Because each route is added like a single route we remove
3180                  * these flags after the first nexthop: if there is a collision,
3181                  * we have already failed to add the first nexthop:
3182                  * fib6_add_rt2node() has rejected it; when replacing, old
3183                  * nexthops have been replaced by first new, the rest should
3184                  * be added to it.
3185                  */
3186                 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
3187                                                      NLM_F_REPLACE);
3188                 nhn++;
3189         }
3190
3191         /* success ... tell user about new route */
3192         ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3193         goto cleanup;
3194
3195 add_errout:
3196         /* send notification for routes that were added so that
3197          * the delete notifications sent by ip6_route_del are
3198          * coherent
3199          */
3200         if (rt_notif)
3201                 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3202
3203         /* Delete routes that were already added */
3204         list_for_each_entry(nh, &rt6_nh_list, next) {
3205                 if (err_nh == nh)
3206                         break;
3207                 ip6_route_del(&nh->r_cfg, extack);
3208         }
3209
3210 cleanup:
3211         list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
3212                 if (nh->rt6_info)
3213                         dst_release_immediate(&nh->rt6_info->dst);
3214                 kfree(nh->mxc.mx);
3215                 list_del(&nh->next);
3216                 kfree(nh);
3217         }
3218
3219         return err;
3220 }
3221
3222 static int ip6_route_multipath_del(struct fib6_config *cfg,
3223                                    struct netlink_ext_ack *extack)
3224 {
3225         struct fib6_config r_cfg;
3226         struct rtnexthop *rtnh;
3227         int remaining;
3228         int attrlen;
3229         int err = 1, last_err = 0;
3230
3231         remaining = cfg->fc_mp_len;
3232         rtnh = (struct rtnexthop *)cfg->fc_mp;
3233
3234         /* Parse a Multipath Entry */
3235         while (rtnh_ok(rtnh, remaining)) {
3236                 memcpy(&r_cfg, cfg, sizeof(*cfg));
3237                 if (rtnh->rtnh_ifindex)
3238                         r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3239
3240                 attrlen = rtnh_attrlen(rtnh);
3241                 if (attrlen > 0) {
3242                         struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3243
3244                         nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3245                         if (nla) {
3246                                 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
3247                                 r_cfg.fc_flags |= RTF_GATEWAY;
3248                         }
3249                 }
3250                 err = ip6_route_del(&r_cfg, extack);
3251                 if (err)
3252                         last_err = err;
3253
3254                 rtnh = rtnh_next(rtnh, &remaining);
3255         }
3256
3257         return last_err;
3258 }
3259
3260 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3261                               struct netlink_ext_ack *extack)
3262 {
3263         struct fib6_config cfg;
3264         int err;
3265
3266         err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3267         if (err < 0)
3268                 return err;
3269
3270         if (cfg.fc_mp)
3271                 return ip6_route_multipath_del(&cfg, extack);
3272         else {
3273                 cfg.fc_delete_all_nh = 1;
3274                 return ip6_route_del(&cfg, extack);
3275         }
3276 }
3277
3278 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3279                               struct netlink_ext_ack *extack)
3280 {
3281         struct fib6_config cfg;
3282         int err;
3283
3284         err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3285         if (err < 0)
3286                 return err;
3287
3288         if (cfg.fc_mp)
3289                 return ip6_route_multipath_add(&cfg, extack);
3290         else
3291                 return ip6_route_add(&cfg, extack);
3292 }
3293
3294 static size_t rt6_nlmsg_size(struct rt6_info *rt)
3295 {
3296         int nexthop_len = 0;
3297
3298         if (rt->rt6i_nsiblings) {
3299                 nexthop_len = nla_total_size(0)  /* RTA_MULTIPATH */
3300                             + NLA_ALIGN(sizeof(struct rtnexthop))
3301                             + nla_total_size(16) /* RTA_GATEWAY */
3302                             + lwtunnel_get_encap_size(rt->dst.lwtstate);
3303
3304                 nexthop_len *= rt->rt6i_nsiblings;
3305         }
3306
3307         return NLMSG_ALIGN(sizeof(struct rtmsg))
3308                + nla_total_size(16) /* RTA_SRC */
3309                + nla_total_size(16) /* RTA_DST */
3310                + nla_total_size(16) /* RTA_GATEWAY */
3311                + nla_total_size(16) /* RTA_PREFSRC */
3312                + nla_total_size(4) /* RTA_TABLE */
3313                + nla_total_size(4) /* RTA_IIF */
3314                + nla_total_size(4) /* RTA_OIF */
3315                + nla_total_size(4) /* RTA_PRIORITY */
3316                + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
3317                + nla_total_size(sizeof(struct rta_cacheinfo))
3318                + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
3319                + nla_total_size(1) /* RTA_PREF */
3320                + lwtunnel_get_encap_size(rt->dst.lwtstate)
3321                + nexthop_len;
3322 }
3323
3324 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
3325                             unsigned int *flags, bool skip_oif)
3326 {
3327         if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
3328                 *flags |= RTNH_F_LINKDOWN;
3329                 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
3330                         *flags |= RTNH_F_DEAD;
3331         }
3332
3333         if (rt->rt6i_flags & RTF_GATEWAY) {
3334                 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
3335                         goto nla_put_failure;
3336         }
3337
3338         if (rt->rt6i_flags & RTF_OFFLOAD)
3339                 *flags |= RTNH_F_OFFLOAD;
3340
3341         /* not needed for multipath encoding b/c it has a rtnexthop struct */
3342         if (!skip_oif && rt->dst.dev &&
3343             nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
3344                 goto nla_put_failure;
3345
3346         if (rt->dst.lwtstate &&
3347             lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
3348                 goto nla_put_failure;
3349
3350         return 0;
3351
3352 nla_put_failure:
3353         return -EMSGSIZE;
3354 }
3355
3356 /* add multipath next hop */
3357 static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
3358 {
3359         struct rtnexthop *rtnh;
3360         unsigned int flags = 0;
3361
3362         rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
3363         if (!rtnh)
3364                 goto nla_put_failure;
3365
3366         rtnh->rtnh_hops = 0;
3367         rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
3368
3369         if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
3370                 goto nla_put_failure;
3371
3372         rtnh->rtnh_flags = flags;
3373
3374         /* length of rtnetlink header + attributes */
3375         rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
3376
3377         return 0;
3378
3379 nla_put_failure:
3380         return -EMSGSIZE;
3381 }
3382
3383 static int rt6_fill_node(struct net *net,
3384                          struct sk_buff *skb, struct rt6_info *rt,
3385                          struct in6_addr *dst, struct in6_addr *src,
3386                          int iif, int type, u32 portid, u32 seq,
3387                          unsigned int flags)
3388 {
3389         u32 metrics[RTAX_MAX];
3390         struct rtmsg *rtm;
3391         struct nlmsghdr *nlh;
3392         long expires;
3393         u32 table;
3394
3395         nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
3396         if (!nlh)
3397                 return -EMSGSIZE;
3398
3399         rtm = nlmsg_data(nlh);
3400         rtm->rtm_family = AF_INET6;
3401         rtm->rtm_dst_len = rt->rt6i_dst.plen;
3402         rtm->rtm_src_len = rt->rt6i_src.plen;
3403         rtm->rtm_tos = 0;
3404         if (rt->rt6i_table)
3405                 table = rt->rt6i_table->tb6_id;
3406         else
3407                 table = RT6_TABLE_UNSPEC;
3408         rtm->rtm_table = table;
3409         if (nla_put_u32(skb, RTA_TABLE, table))
3410                 goto nla_put_failure;
3411         if (rt->rt6i_flags & RTF_REJECT) {
3412                 switch (rt->dst.error) {
3413                 case -EINVAL:
3414                         rtm->rtm_type = RTN_BLACKHOLE;
3415                         break;
3416                 case -EACCES:
3417                         rtm->rtm_type = RTN_PROHIBIT;
3418                         break;
3419                 case -EAGAIN:
3420                         rtm->rtm_type = RTN_THROW;
3421                         break;
3422                 default:
3423                         rtm->rtm_type = RTN_UNREACHABLE;
3424                         break;
3425                 }
3426         }
3427         else if (rt->rt6i_flags & RTF_LOCAL)
3428                 rtm->rtm_type = RTN_LOCAL;
3429         else if (rt->rt6i_flags & RTF_ANYCAST)
3430                 rtm->rtm_type = RTN_ANYCAST;
3431         else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
3432                 rtm->rtm_type = RTN_LOCAL;
3433         else
3434                 rtm->rtm_type = RTN_UNICAST;
3435         rtm->rtm_flags = 0;
3436         rtm->rtm_scope = RT_SCOPE_UNIVERSE;
3437         rtm->rtm_protocol = rt->rt6i_protocol;
3438
3439         if (rt->rt6i_flags & RTF_CACHE)
3440                 rtm->rtm_flags |= RTM_F_CLONED;
3441
3442         if (dst) {
3443                 if (nla_put_in6_addr(skb, RTA_DST, dst))
3444                         goto nla_put_failure;
3445                 rtm->rtm_dst_len = 128;
3446         } else if (rtm->rtm_dst_len)
3447                 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
3448                         goto nla_put_failure;
3449 #ifdef CONFIG_IPV6_SUBTREES
3450         if (src) {
3451                 if (nla_put_in6_addr(skb, RTA_SRC, src))
3452                         goto nla_put_failure;
3453                 rtm->rtm_src_len = 128;
3454         } else if (rtm->rtm_src_len &&
3455                    nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
3456                 goto nla_put_failure;
3457 #endif
3458         if (iif) {
3459 #ifdef CONFIG_IPV6_MROUTE
3460                 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
3461                         int err = ip6mr_get_route(net, skb, rtm, portid);
3462
3463                         if (err == 0)
3464                                 return 0;
3465                         if (err < 0)
3466                                 goto nla_put_failure;
3467                 } else
3468 #endif
3469                         if (nla_put_u32(skb, RTA_IIF, iif))
3470                                 goto nla_put_failure;
3471         } else if (dst) {
3472                 struct in6_addr saddr_buf;
3473                 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
3474                     nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3475                         goto nla_put_failure;
3476         }
3477
3478         if (rt->rt6i_prefsrc.plen) {
3479                 struct in6_addr saddr_buf;
3480                 saddr_buf = rt->rt6i_prefsrc.addr;
3481                 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3482                         goto nla_put_failure;
3483         }
3484
3485         memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3486         if (rt->rt6i_pmtu)
3487                 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
3488         if (rtnetlink_put_metrics(skb, metrics) < 0)
3489                 goto nla_put_failure;
3490
3491         if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
3492                 goto nla_put_failure;
3493
3494         /* For multipath routes, walk the siblings list and add
3495          * each as a nexthop within RTA_MULTIPATH.
3496          */
3497         if (rt->rt6i_nsiblings) {
3498                 struct rt6_info *sibling, *next_sibling;
3499                 struct nlattr *mp;
3500
3501                 mp = nla_nest_start(skb, RTA_MULTIPATH);
3502                 if (!mp)
3503                         goto nla_put_failure;
3504
3505                 if (rt6_add_nexthop(skb, rt) < 0)
3506                         goto nla_put_failure;
3507
3508                 list_for_each_entry_safe(sibling, next_sibling,
3509                                          &rt->rt6i_siblings, rt6i_siblings) {
3510                         if (rt6_add_nexthop(skb, sibling) < 0)
3511                                 goto nla_put_failure;
3512                 }
3513
3514                 nla_nest_end(skb, mp);
3515         } else {
3516                 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
3517                         goto nla_put_failure;
3518         }
3519
3520         expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
3521
3522         if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
3523                 goto nla_put_failure;
3524
3525         if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
3526                 goto nla_put_failure;
3527
3528
3529         nlmsg_end(skb, nlh);
3530         return 0;
3531
3532 nla_put_failure:
3533         nlmsg_cancel(skb, nlh);
3534         return -EMSGSIZE;
3535 }
3536
3537 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
3538 {
3539         struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
3540         struct net *net = arg->net;
3541
3542         if (rt == net->ipv6.ip6_null_entry)
3543                 return 0;
3544
3545         if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
3546                 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
3547
3548                 /* user wants prefix routes only */
3549                 if (rtm->rtm_flags & RTM_F_PREFIX &&
3550                     !(rt->rt6i_flags & RTF_PREFIX_RT)) {
3551                         /* success since this is not a prefix route */
3552                         return 1;
3553                 }
3554         }
3555
3556         return rt6_fill_node(net,
3557                      arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
3558                      NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
3559                      NLM_F_MULTI);
3560 }
3561
3562 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3563                               struct netlink_ext_ack *extack)
3564 {
3565         struct net *net = sock_net(in_skb->sk);
3566         struct nlattr *tb[RTA_MAX+1];
3567         int err, iif = 0, oif = 0;
3568         struct dst_entry *dst;
3569         struct rt6_info *rt;
3570         struct sk_buff *skb;
3571         struct rtmsg *rtm;
3572         struct flowi6 fl6;
3573         bool fibmatch;
3574
3575         err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
3576                           extack);
3577         if (err < 0)
3578                 goto errout;
3579
3580         err = -EINVAL;
3581         memset(&fl6, 0, sizeof(fl6));
3582         rtm = nlmsg_data(nlh);
3583         fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
3584         fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
3585
3586         if (tb[RTA_SRC]) {
3587                 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
3588                         goto errout;
3589
3590                 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
3591         }
3592
3593         if (tb[RTA_DST]) {
3594                 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
3595                         goto errout;
3596
3597                 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
3598         }
3599
3600         if (tb[RTA_IIF])
3601                 iif = nla_get_u32(tb[RTA_IIF]);
3602
3603         if (tb[RTA_OIF])
3604                 oif = nla_get_u32(tb[RTA_OIF]);
3605
3606         if (tb[RTA_MARK])
3607                 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
3608
3609         if (tb[RTA_UID])
3610                 fl6.flowi6_uid = make_kuid(current_user_ns(),
3611                                            nla_get_u32(tb[RTA_UID]));
3612         else
3613                 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
3614
3615         if (iif) {
3616                 struct net_device *dev;
3617                 int flags = 0;
3618
3619                 dev = __dev_get_by_index(net, iif);
3620                 if (!dev) {
3621                         err = -ENODEV;
3622                         goto errout;
3623                 }
3624
3625                 fl6.flowi6_iif = iif;
3626
3627                 if (!ipv6_addr_any(&fl6.saddr))
3628                         flags |= RT6_LOOKUP_F_HAS_SADDR;
3629
3630                 if (!fibmatch)
3631                         dst = ip6_route_input_lookup(net, dev, &fl6, flags);
3632         } else {
3633                 fl6.flowi6_oif = oif;
3634
3635                 if (!fibmatch)
3636                         dst = ip6_route_output(net, NULL, &fl6);
3637         }
3638
3639         if (fibmatch)
3640                 dst = ip6_route_lookup(net, &fl6, 0);
3641
3642         rt = container_of(dst, struct rt6_info, dst);
3643         if (rt->dst.error) {
3644                 err = rt->dst.error;
3645                 ip6_rt_put(rt);
3646                 goto errout;
3647         }
3648
3649         if (rt == net->ipv6.ip6_null_entry) {
3650                 err = rt->dst.error;
3651                 ip6_rt_put(rt);
3652                 goto errout;
3653         }
3654
3655         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3656         if (!skb) {
3657                 ip6_rt_put(rt);
3658                 err = -ENOBUFS;
3659                 goto errout;
3660         }
3661
3662         skb_dst_set(skb, &rt->dst);
3663         if (fibmatch)
3664                 err = rt6_fill_node(net, skb, rt, NULL, NULL, iif,
3665                                     RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3666                                     nlh->nlmsg_seq, 0);
3667         else
3668                 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
3669                                     RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3670                                     nlh->nlmsg_seq, 0);
3671         if (err < 0) {
3672                 kfree_skb(skb);
3673                 goto errout;
3674         }
3675
3676         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3677 errout:
3678         return err;
3679 }
3680
3681 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
3682                      unsigned int nlm_flags)
3683 {
3684         struct sk_buff *skb;
3685         struct net *net = info->nl_net;
3686         u32 seq;
3687         int err;
3688
3689         err = -ENOBUFS;
3690         seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3691
3692         skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3693         if (!skb)
3694                 goto errout;
3695
3696         err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
3697                                 event, info->portid, seq, nlm_flags);
3698         if (err < 0) {
3699                 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
3700                 WARN_ON(err == -EMSGSIZE);
3701                 kfree_skb(skb);
3702                 goto errout;
3703         }
3704         rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3705                     info->nlh, gfp_any());
3706         return;
3707 errout:
3708         if (err < 0)
3709                 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
3710 }
3711
3712 static int ip6_route_dev_notify(struct notifier_block *this,
3713                                 unsigned long event, void *ptr)
3714 {
3715         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3716         struct net *net = dev_net(dev);
3717
3718         if (!(dev->flags & IFF_LOOPBACK))
3719                 return NOTIFY_OK;
3720
3721         if (event == NETDEV_REGISTER) {
3722                 net->ipv6.ip6_null_entry->dst.dev = dev;
3723                 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
3724 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3725                 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
3726                 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
3727                 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
3728                 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
3729 #endif
3730          } else if (event == NETDEV_UNREGISTER &&
3731                     dev->reg_state != NETREG_UNREGISTERED) {
3732                 /* NETDEV_UNREGISTER could be fired for multiple times by
3733                  * netdev_wait_allrefs(). Make sure we only call this once.
3734                  */
3735                 in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
3736 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3737                 in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
3738                 in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
3739 #endif
3740         }
3741
3742         return NOTIFY_OK;
3743 }
3744
3745 /*
3746  *      /proc
3747  */
3748
3749 #ifdef CONFIG_PROC_FS
3750
3751 static const struct file_operations ipv6_route_proc_fops = {
3752         .owner          = THIS_MODULE,
3753         .open           = ipv6_route_open,
3754         .read           = seq_read,
3755         .llseek         = seq_lseek,
3756         .release        = seq_release_net,
3757 };
3758
3759 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
3760 {
3761         struct net *net = (struct net *)seq->private;
3762         seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
3763                    net->ipv6.rt6_stats->fib_nodes,
3764                    net->ipv6.rt6_stats->fib_route_nodes,
3765                    net->ipv6.rt6_stats->fib_rt_alloc,
3766                    net->ipv6.rt6_stats->fib_rt_entries,
3767                    net->ipv6.rt6_stats->fib_rt_cache,
3768                    dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
3769                    net->ipv6.rt6_stats->fib_discarded_routes);
3770
3771         return 0;
3772 }
3773
3774 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
3775 {
3776         return single_open_net(inode, file, rt6_stats_seq_show);
3777 }
3778
3779 static const struct file_operations rt6_stats_seq_fops = {
3780         .owner   = THIS_MODULE,
3781         .open    = rt6_stats_seq_open,
3782         .read    = seq_read,
3783         .llseek  = seq_lseek,
3784         .release = single_release_net,
3785 };
3786 #endif  /* CONFIG_PROC_FS */
3787
3788 #ifdef CONFIG_SYSCTL
3789
3790 static
3791 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
3792                               void __user *buffer, size_t *lenp, loff_t *ppos)
3793 {
3794         struct net *net;
3795         int delay;
3796         if (!write)
3797                 return -EINVAL;
3798
3799         net = (struct net *)ctl->extra1;
3800         delay = net->ipv6.sysctl.flush_delay;
3801         proc_dointvec(ctl, write, buffer, lenp, ppos);
3802         fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
3803         return 0;
3804 }
3805
3806 struct ctl_table ipv6_route_table_template[] = {
3807         {
3808                 .procname       =       "flush",
3809                 .data           =       &init_net.ipv6.sysctl.flush_delay,
3810                 .maxlen         =       sizeof(int),
3811                 .mode           =       0200,
3812                 .proc_handler   =       ipv6_sysctl_rtcache_flush
3813         },
3814         {
3815                 .procname       =       "gc_thresh",
3816                 .data           =       &ip6_dst_ops_template.gc_thresh,
3817                 .maxlen         =       sizeof(int),
3818                 .mode           =       0644,
3819                 .proc_handler   =       proc_dointvec,
3820         },
3821         {
3822                 .procname       =       "max_size",
3823                 .data           =       &init_net.ipv6.sysctl.ip6_rt_max_size,
3824                 .maxlen         =       sizeof(int),
3825                 .mode           =       0644,
3826                 .proc_handler   =       proc_dointvec,
3827         },
3828         {
3829                 .procname       =       "gc_min_interval",
3830                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3831                 .maxlen         =       sizeof(int),
3832                 .mode           =       0644,
3833                 .proc_handler   =       proc_dointvec_jiffies,
3834         },
3835         {
3836                 .procname       =       "gc_timeout",
3837                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
3838                 .maxlen         =       sizeof(int),
3839                 .mode           =       0644,
3840                 .proc_handler   =       proc_dointvec_jiffies,
3841         },
3842         {
3843                 .procname       =       "gc_interval",
3844                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_interval,
3845                 .maxlen         =       sizeof(int),
3846                 .mode           =       0644,
3847                 .proc_handler   =       proc_dointvec_jiffies,
3848         },
3849         {
3850                 .procname       =       "gc_elasticity",
3851                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
3852                 .maxlen         =       sizeof(int),
3853                 .mode           =       0644,
3854                 .proc_handler   =       proc_dointvec,
3855         },
3856         {
3857                 .procname       =       "mtu_expires",
3858                 .data           =       &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
3859                 .maxlen         =       sizeof(int),
3860                 .mode           =       0644,
3861                 .proc_handler   =       proc_dointvec_jiffies,
3862         },
3863         {
3864                 .procname       =       "min_adv_mss",
3865                 .data           =       &init_net.ipv6.sysctl.ip6_rt_min_advmss,
3866                 .maxlen         =       sizeof(int),
3867                 .mode           =       0644,
3868                 .proc_handler   =       proc_dointvec,
3869         },
3870         {
3871                 .procname       =       "gc_min_interval_ms",
3872                 .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3873                 .maxlen         =       sizeof(int),
3874                 .mode           =       0644,
3875                 .proc_handler   =       proc_dointvec_ms_jiffies,
3876         },
3877         { }
3878 };
3879
3880 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
3881 {
3882         struct ctl_table *table;
3883
3884         table = kmemdup(ipv6_route_table_template,
3885                         sizeof(ipv6_route_table_template),
3886                         GFP_KERNEL);
3887
3888         if (table) {
3889                 table[0].data = &net->ipv6.sysctl.flush_delay;
3890                 table[0].extra1 = net;
3891                 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
3892                 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
3893                 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3894                 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
3895                 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
3896                 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
3897                 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
3898                 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
3899                 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3900
3901                 /* Don't export sysctls to unprivileged users */
3902                 if (net->user_ns != &init_user_ns)
3903                         table[0].procname = NULL;
3904         }
3905
3906         return table;
3907 }
3908 #endif
3909
3910 static int __net_init ip6_route_net_init(struct net *net)
3911 {
3912         int ret = -ENOMEM;
3913
3914         memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3915                sizeof(net->ipv6.ip6_dst_ops));
3916
3917         if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3918                 goto out_ip6_dst_ops;
3919
3920         net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3921                                            sizeof(*net->ipv6.ip6_null_entry),
3922                                            GFP_KERNEL);
3923         if (!net->ipv6.ip6_null_entry)
3924                 goto out_ip6_dst_entries;
3925         net->ipv6.ip6_null_entry->dst.path =
3926                 (struct dst_entry *)net->ipv6.ip6_null_entry;
3927         net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3928         dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3929                          ip6_template_metrics, true);
3930
3931 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3932         net->ipv6.fib6_has_custom_rules = false;
3933         net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3934                                                sizeof(*net->ipv6.ip6_prohibit_entry),
3935                                                GFP_KERNEL);
3936         if (!net->ipv6.ip6_prohibit_entry)
3937                 goto out_ip6_null_entry;
3938         net->ipv6.ip6_prohibit_entry->dst.path =
3939                 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3940         net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3941         dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3942                          ip6_template_metrics, true);
3943
3944         net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3945                                                sizeof(*net->ipv6.ip6_blk_hole_entry),
3946                                                GFP_KERNEL);
3947         if (!net->ipv6.ip6_blk_hole_entry)
3948                 goto out_ip6_prohibit_entry;
3949         net->ipv6.ip6_blk_hole_entry->dst.path =
3950                 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3951         net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3952         dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3953                          ip6_template_metrics, true);
3954 #endif
3955
3956         net->ipv6.sysctl.flush_delay = 0;
3957         net->ipv6.sysctl.ip6_rt_max_size = 4096;
3958         net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3959         net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3960         net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3961         net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3962         net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3963         net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3964
3965         net->ipv6.ip6_rt_gc_expire = 30*HZ;
3966
3967         ret = 0;
3968 out:
3969         return ret;
3970
3971 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3972 out_ip6_prohibit_entry:
3973         kfree(net->ipv6.ip6_prohibit_entry);
3974 out_ip6_null_entry:
3975         kfree(net->ipv6.ip6_null_entry);
3976 #endif
3977 out_ip6_dst_entries:
3978         dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3979 out_ip6_dst_ops:
3980         goto out;
3981 }
3982
3983 static void __net_exit ip6_route_net_exit(struct net *net)
3984 {
3985         kfree(net->ipv6.ip6_null_entry);
3986 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3987         kfree(net->ipv6.ip6_prohibit_entry);
3988         kfree(net->ipv6.ip6_blk_hole_entry);
3989 #endif
3990         dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3991 }
3992
3993 static int __net_init ip6_route_net_init_late(struct net *net)
3994 {
3995 #ifdef CONFIG_PROC_FS
3996         proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3997         proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3998 #endif
3999         return 0;
4000 }
4001
4002 static void __net_exit ip6_route_net_exit_late(struct net *net)
4003 {
4004 #ifdef CONFIG_PROC_FS
4005         remove_proc_entry("ipv6_route", net->proc_net);
4006         remove_proc_entry("rt6_stats", net->proc_net);
4007 #endif
4008 }
4009
4010 static struct pernet_operations ip6_route_net_ops = {
4011         .init = ip6_route_net_init,
4012         .exit = ip6_route_net_exit,
4013 };
4014
4015 static int __net_init ipv6_inetpeer_init(struct net *net)
4016 {
4017         struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
4018
4019         if (!bp)
4020                 return -ENOMEM;
4021         inet_peer_base_init(bp);
4022         net->ipv6.peers = bp;
4023         return 0;
4024 }
4025
4026 static void __net_exit ipv6_inetpeer_exit(struct net *net)
4027 {
4028         struct inet_peer_base *bp = net->ipv6.peers;
4029
4030         net->ipv6.peers = NULL;
4031         inetpeer_invalidate_tree(bp);
4032         kfree(bp);
4033 }
4034
4035 static struct pernet_operations ipv6_inetpeer_ops = {
4036         .init   =       ipv6_inetpeer_init,
4037         .exit   =       ipv6_inetpeer_exit,
4038 };
4039
4040 static struct pernet_operations ip6_route_net_late_ops = {
4041         .init = ip6_route_net_init_late,
4042         .exit = ip6_route_net_exit_late,
4043 };
4044
4045 static struct notifier_block ip6_route_dev_notifier = {
4046         .notifier_call = ip6_route_dev_notify,
4047         .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
4048 };
4049
4050 void __init ip6_route_init_special_entries(void)
4051 {
4052         /* Registering of the loopback is done before this portion of code,
4053          * the loopback reference in rt6_info will not be taken, do it
4054          * manually for init_net */
4055         init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
4056         init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4057   #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4058         init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
4059         init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4060         init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
4061         init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4062   #endif
4063 }
4064
4065 int __init ip6_route_init(void)
4066 {
4067         int ret;
4068         int cpu;
4069
4070         ret = -ENOMEM;
4071         ip6_dst_ops_template.kmem_cachep =
4072                 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
4073                                   SLAB_HWCACHE_ALIGN, NULL);
4074         if (!ip6_dst_ops_template.kmem_cachep)
4075                 goto out;
4076
4077         ret = dst_entries_init(&ip6_dst_blackhole_ops);
4078         if (ret)
4079                 goto out_kmem_cache;
4080
4081         ret = register_pernet_subsys(&ipv6_inetpeer_ops);
4082         if (ret)
4083                 goto out_dst_entries;
4084
4085         ret = register_pernet_subsys(&ip6_route_net_ops);
4086         if (ret)
4087                 goto out_register_inetpeer;
4088
4089         ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
4090
4091         ret = fib6_init();
4092         if (ret)
4093                 goto out_register_subsys;
4094
4095         ret = xfrm6_init();
4096         if (ret)
4097                 goto out_fib6_init;
4098
4099         ret = fib6_rules_init();
4100         if (ret)
4101                 goto xfrm6_init;
4102
4103         ret = register_pernet_subsys(&ip6_route_net_late_ops);
4104         if (ret)
4105                 goto fib6_rules_init;
4106
4107         ret = -ENOBUFS;
4108         if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
4109             __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
4110             __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
4111                 goto out_register_late_subsys;
4112
4113         ret = register_netdevice_notifier(&ip6_route_dev_notifier);
4114         if (ret)
4115                 goto out_register_late_subsys;
4116
4117         for_each_possible_cpu(cpu) {
4118                 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
4119
4120                 INIT_LIST_HEAD(&ul->head);
4121                 spin_lock_init(&ul->lock);
4122         }
4123
4124 out:
4125         return ret;
4126
4127 out_register_late_subsys:
4128         unregister_pernet_subsys(&ip6_route_net_late_ops);
4129 fib6_rules_init:
4130         fib6_rules_cleanup();
4131 xfrm6_init:
4132         xfrm6_fini();
4133 out_fib6_init:
4134         fib6_gc_cleanup();
4135 out_register_subsys:
4136         unregister_pernet_subsys(&ip6_route_net_ops);
4137 out_register_inetpeer:
4138         unregister_pernet_subsys(&ipv6_inetpeer_ops);
4139 out_dst_entries:
4140         dst_entries_destroy(&ip6_dst_blackhole_ops);
4141 out_kmem_cache:
4142         kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
4143         goto out;
4144 }
4145
4146 void ip6_route_cleanup(void)
4147 {
4148         unregister_netdevice_notifier(&ip6_route_dev_notifier);
4149         unregister_pernet_subsys(&ip6_route_net_late_ops);
4150         fib6_rules_cleanup();
4151         xfrm6_fini();
4152         fib6_gc_cleanup();
4153         unregister_pernet_subsys(&ipv6_inetpeer_ops);
4154         unregister_pernet_subsys(&ip6_route_net_ops);
4155         dst_entries_destroy(&ip6_dst_blackhole_ops);
4156         kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
4157 }