2 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
24 * Fixed routing subtrees.
27 #define pr_fmt(fmt) "IPv6: " fmt
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <linux/jhash.h>
48 #include <net/net_namespace.h>
51 #include <net/ip6_fib.h>
52 #include <net/ip6_route.h>
53 #include <net/ndisc.h>
54 #include <net/addrconf.h>
56 #include <linux/rtnetlink.h>
58 #include <net/dst_metadata.h>
60 #include <net/netevent.h>
61 #include <net/netlink.h>
62 #include <net/nexthop.h>
63 #include <net/lwtunnel.h>
64 #include <net/ip_tunnels.h>
65 #include <net/l3mdev.h>
66 #include <trace/events/fib6.h>
68 #include <linux/uaccess.h>
71 #include <linux/sysctl.h>
75 RT6_NUD_FAIL_HARD = -3,
76 RT6_NUD_FAIL_PROBE = -2,
77 RT6_NUD_FAIL_DO_RR = -1,
81 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
82 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
83 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
84 static unsigned int ip6_mtu(const struct dst_entry *dst);
85 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
86 static void ip6_dst_destroy(struct dst_entry *);
87 static void ip6_dst_ifdown(struct dst_entry *,
88 struct net_device *dev, int how);
89 static int ip6_dst_gc(struct dst_ops *ops);
91 static int ip6_pkt_discard(struct sk_buff *skb);
92 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
93 static int ip6_pkt_prohibit(struct sk_buff *skb);
94 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
95 static void ip6_link_failure(struct sk_buff *skb);
96 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
97 struct sk_buff *skb, u32 mtu);
98 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
100 static void rt6_dst_from_metrics_check(struct rt6_info *rt);
101 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
102 static size_t rt6_nlmsg_size(struct rt6_info *rt);
103 static int rt6_fill_node(struct net *net,
104 struct sk_buff *skb, struct rt6_info *rt,
105 struct in6_addr *dst, struct in6_addr *src,
106 int iif, int type, u32 portid, u32 seq,
108 static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
109 struct in6_addr *daddr,
110 struct in6_addr *saddr);
112 #ifdef CONFIG_IPV6_ROUTE_INFO
113 static struct rt6_info *rt6_add_route_info(struct net *net,
114 const struct in6_addr *prefix, int prefixlen,
115 const struct in6_addr *gwaddr,
116 struct net_device *dev,
118 static struct rt6_info *rt6_get_route_info(struct net *net,
119 const struct in6_addr *prefix, int prefixlen,
120 const struct in6_addr *gwaddr,
121 struct net_device *dev);
124 struct uncached_list {
126 struct list_head head;
129 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
131 static void rt6_uncached_list_add(struct rt6_info *rt)
133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
135 rt->rt6i_uncached_list = ul;
137 spin_lock_bh(&ul->lock);
138 list_add_tail(&rt->rt6i_uncached, &ul->head);
139 spin_unlock_bh(&ul->lock);
142 static void rt6_uncached_list_del(struct rt6_info *rt)
144 if (!list_empty(&rt->rt6i_uncached)) {
145 struct uncached_list *ul = rt->rt6i_uncached_list;
146 struct net *net = dev_net(rt->dst.dev);
148 spin_lock_bh(&ul->lock);
149 list_del(&rt->rt6i_uncached);
150 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
151 spin_unlock_bh(&ul->lock);
155 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
157 struct net_device *loopback_dev = net->loopback_dev;
160 if (dev == loopback_dev)
163 for_each_possible_cpu(cpu) {
164 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
167 spin_lock_bh(&ul->lock);
168 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
169 struct inet6_dev *rt_idev = rt->rt6i_idev;
170 struct net_device *rt_dev = rt->dst.dev;
172 if (rt_idev->dev == dev) {
173 rt->rt6i_idev = in6_dev_get(loopback_dev);
174 in6_dev_put(rt_idev);
178 rt->dst.dev = loopback_dev;
179 dev_hold(rt->dst.dev);
183 spin_unlock_bh(&ul->lock);
187 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
189 return dst_metrics_write_ptr(&rt->from->dst);
192 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
194 struct rt6_info *rt = (struct rt6_info *)dst;
196 if (rt->rt6i_flags & RTF_PCPU)
197 return rt6_pcpu_cow_metrics(rt);
198 else if (rt->rt6i_flags & RTF_CACHE)
201 return dst_cow_metrics_generic(dst, old);
204 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
208 struct in6_addr *p = &rt->rt6i_gateway;
210 if (!ipv6_addr_any(p))
211 return (const void *) p;
213 return &ipv6_hdr(skb)->daddr;
217 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
221 struct rt6_info *rt = (struct rt6_info *) dst;
224 daddr = choose_neigh_daddr(rt, skb, daddr);
225 n = __ipv6_neigh_lookup(dst->dev, daddr);
228 return neigh_create(&nd_tbl, daddr, dst->dev);
231 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
233 struct net_device *dev = dst->dev;
234 struct rt6_info *rt = (struct rt6_info *)dst;
236 daddr = choose_neigh_daddr(rt, NULL, daddr);
239 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
241 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
243 __ipv6_confirm_neigh(dev, daddr);
246 static struct dst_ops ip6_dst_ops_template = {
250 .check = ip6_dst_check,
251 .default_advmss = ip6_default_advmss,
253 .cow_metrics = ipv6_cow_metrics,
254 .destroy = ip6_dst_destroy,
255 .ifdown = ip6_dst_ifdown,
256 .negative_advice = ip6_negative_advice,
257 .link_failure = ip6_link_failure,
258 .update_pmtu = ip6_rt_update_pmtu,
259 .redirect = rt6_do_redirect,
260 .local_out = __ip6_local_out,
261 .neigh_lookup = ip6_neigh_lookup,
262 .confirm_neigh = ip6_confirm_neigh,
265 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
267 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
269 return mtu ? : dst->dev->mtu;
272 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
273 struct sk_buff *skb, u32 mtu)
277 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
282 static struct dst_ops ip6_dst_blackhole_ops = {
284 .destroy = ip6_dst_destroy,
285 .check = ip6_dst_check,
286 .mtu = ip6_blackhole_mtu,
287 .default_advmss = ip6_default_advmss,
288 .update_pmtu = ip6_rt_blackhole_update_pmtu,
289 .redirect = ip6_rt_blackhole_redirect,
290 .cow_metrics = dst_cow_metrics_generic,
291 .neigh_lookup = ip6_neigh_lookup,
294 static const u32 ip6_template_metrics[RTAX_MAX] = {
295 [RTAX_HOPLIMIT - 1] = 0,
298 static const struct rt6_info ip6_null_entry_template = {
300 .__refcnt = ATOMIC_INIT(1),
302 .obsolete = DST_OBSOLETE_FORCE_CHK,
303 .error = -ENETUNREACH,
304 .input = ip6_pkt_discard,
305 .output = ip6_pkt_discard_out,
307 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
308 .rt6i_protocol = RTPROT_KERNEL,
309 .rt6i_metric = ~(u32) 0,
310 .rt6i_ref = ATOMIC_INIT(1),
313 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
315 static const struct rt6_info ip6_prohibit_entry_template = {
317 .__refcnt = ATOMIC_INIT(1),
319 .obsolete = DST_OBSOLETE_FORCE_CHK,
321 .input = ip6_pkt_prohibit,
322 .output = ip6_pkt_prohibit_out,
324 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
325 .rt6i_protocol = RTPROT_KERNEL,
326 .rt6i_metric = ~(u32) 0,
327 .rt6i_ref = ATOMIC_INIT(1),
330 static const struct rt6_info ip6_blk_hole_entry_template = {
332 .__refcnt = ATOMIC_INIT(1),
334 .obsolete = DST_OBSOLETE_FORCE_CHK,
336 .input = dst_discard,
337 .output = dst_discard_out,
339 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
340 .rt6i_protocol = RTPROT_KERNEL,
341 .rt6i_metric = ~(u32) 0,
342 .rt6i_ref = ATOMIC_INIT(1),
347 static void rt6_info_init(struct rt6_info *rt)
349 struct dst_entry *dst = &rt->dst;
351 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
352 INIT_LIST_HEAD(&rt->rt6i_siblings);
353 INIT_LIST_HEAD(&rt->rt6i_uncached);
356 /* allocate dst with ip6_dst_ops */
357 static struct rt6_info *__ip6_dst_alloc(struct net *net,
358 struct net_device *dev,
361 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
362 1, DST_OBSOLETE_FORCE_CHK, flags);
366 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
372 struct rt6_info *ip6_dst_alloc(struct net *net,
373 struct net_device *dev,
376 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
379 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
380 if (!rt->rt6i_pcpu) {
381 dst_release_immediate(&rt->dst);
388 EXPORT_SYMBOL(ip6_dst_alloc);
390 static void ip6_dst_destroy(struct dst_entry *dst)
392 struct rt6_info *rt = (struct rt6_info *)dst;
393 struct rt6_exception_bucket *bucket;
394 struct rt6_info *from = rt->from;
395 struct inet6_dev *idev;
397 dst_destroy_metrics_generic(dst);
398 free_percpu(rt->rt6i_pcpu);
399 rt6_uncached_list_del(rt);
401 idev = rt->rt6i_idev;
403 rt->rt6i_idev = NULL;
406 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1);
408 rt->rt6i_exception_bucket = NULL;
413 dst_release(&from->dst);
416 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
419 struct rt6_info *rt = (struct rt6_info *)dst;
420 struct inet6_dev *idev = rt->rt6i_idev;
421 struct net_device *loopback_dev =
422 dev_net(dev)->loopback_dev;
424 if (idev && idev->dev != loopback_dev) {
425 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
427 rt->rt6i_idev = loopback_idev;
433 static bool __rt6_check_expired(const struct rt6_info *rt)
435 if (rt->rt6i_flags & RTF_EXPIRES)
436 return time_after(jiffies, rt->dst.expires);
441 static bool rt6_check_expired(const struct rt6_info *rt)
443 if (rt->rt6i_flags & RTF_EXPIRES) {
444 if (time_after(jiffies, rt->dst.expires))
446 } else if (rt->from) {
447 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
448 rt6_check_expired(rt->from);
453 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
454 struct flowi6 *fl6, int oif,
457 struct rt6_info *sibling, *next_sibling;
460 /* We might have already computed the hash for ICMPv6 errors. In such
461 * case it will always be non-zero. Otherwise now is the time to do it.
464 fl6->mp_hash = rt6_multipath_hash(fl6, NULL);
466 route_choosen = fl6->mp_hash % (match->rt6i_nsiblings + 1);
467 /* Don't change the route, if route_choosen == 0
468 * (siblings does not include ourself)
471 list_for_each_entry_safe(sibling, next_sibling,
472 &match->rt6i_siblings, rt6i_siblings) {
474 if (route_choosen == 0) {
475 struct inet6_dev *idev = sibling->rt6i_idev;
477 if (sibling->rt6i_nh_flags & RTNH_F_DEAD)
479 if (sibling->rt6i_nh_flags & RTNH_F_LINKDOWN &&
480 idev->cnf.ignore_routes_with_linkdown)
482 if (rt6_score_route(sibling, oif, strict) < 0)
492 * Route lookup. rcu_read_lock() should be held.
495 static inline struct rt6_info *rt6_device_match(struct net *net,
497 const struct in6_addr *saddr,
501 struct rt6_info *local = NULL;
502 struct rt6_info *sprt;
504 if (!oif && ipv6_addr_any(saddr) && !(rt->rt6i_nh_flags & RTNH_F_DEAD))
507 for (sprt = rt; sprt; sprt = rcu_dereference(sprt->rt6_next)) {
508 struct net_device *dev = sprt->dst.dev;
510 if (sprt->rt6i_nh_flags & RTNH_F_DEAD)
514 if (dev->ifindex == oif)
516 if (dev->flags & IFF_LOOPBACK) {
517 if (!sprt->rt6i_idev ||
518 sprt->rt6i_idev->dev->ifindex != oif) {
519 if (flags & RT6_LOOKUP_F_IFACE)
522 local->rt6i_idev->dev->ifindex == oif)
528 if (ipv6_chk_addr(net, saddr, dev,
529 flags & RT6_LOOKUP_F_IFACE))
538 if (flags & RT6_LOOKUP_F_IFACE)
539 return net->ipv6.ip6_null_entry;
542 return rt->rt6i_nh_flags & RTNH_F_DEAD ? net->ipv6.ip6_null_entry : rt;
545 #ifdef CONFIG_IPV6_ROUTER_PREF
546 struct __rt6_probe_work {
547 struct work_struct work;
548 struct in6_addr target;
549 struct net_device *dev;
552 static void rt6_probe_deferred(struct work_struct *w)
554 struct in6_addr mcaddr;
555 struct __rt6_probe_work *work =
556 container_of(w, struct __rt6_probe_work, work);
558 addrconf_addr_solict_mult(&work->target, &mcaddr);
559 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
564 static void rt6_probe(struct rt6_info *rt)
566 struct __rt6_probe_work *work;
567 struct neighbour *neigh;
569 * Okay, this does not seem to be appropriate
570 * for now, however, we need to check if it
571 * is really so; aka Router Reachability Probing.
573 * Router Reachability Probe MUST be rate-limited
574 * to no more than one per minute.
576 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
579 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
581 if (neigh->nud_state & NUD_VALID)
585 write_lock(&neigh->lock);
586 if (!(neigh->nud_state & NUD_VALID) &&
589 rt->rt6i_idev->cnf.rtr_probe_interval)) {
590 work = kmalloc(sizeof(*work), GFP_ATOMIC);
592 __neigh_set_probe_once(neigh);
594 write_unlock(&neigh->lock);
596 work = kmalloc(sizeof(*work), GFP_ATOMIC);
600 INIT_WORK(&work->work, rt6_probe_deferred);
601 work->target = rt->rt6i_gateway;
602 dev_hold(rt->dst.dev);
603 work->dev = rt->dst.dev;
604 schedule_work(&work->work);
608 rcu_read_unlock_bh();
611 static inline void rt6_probe(struct rt6_info *rt)
617 * Default Router Selection (RFC 2461 6.3.6)
619 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
621 struct net_device *dev = rt->dst.dev;
622 if (!oif || dev->ifindex == oif)
624 if ((dev->flags & IFF_LOOPBACK) &&
625 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
630 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
632 struct neighbour *neigh;
633 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
635 if (rt->rt6i_flags & RTF_NONEXTHOP ||
636 !(rt->rt6i_flags & RTF_GATEWAY))
637 return RT6_NUD_SUCCEED;
640 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
642 read_lock(&neigh->lock);
643 if (neigh->nud_state & NUD_VALID)
644 ret = RT6_NUD_SUCCEED;
645 #ifdef CONFIG_IPV6_ROUTER_PREF
646 else if (!(neigh->nud_state & NUD_FAILED))
647 ret = RT6_NUD_SUCCEED;
649 ret = RT6_NUD_FAIL_PROBE;
651 read_unlock(&neigh->lock);
653 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
654 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
656 rcu_read_unlock_bh();
661 static int rt6_score_route(struct rt6_info *rt, int oif,
666 m = rt6_check_dev(rt, oif);
667 if (!m && (strict & RT6_LOOKUP_F_IFACE))
668 return RT6_NUD_FAIL_HARD;
669 #ifdef CONFIG_IPV6_ROUTER_PREF
670 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
672 if (strict & RT6_LOOKUP_F_REACHABLE) {
673 int n = rt6_check_neigh(rt);
680 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
681 int *mpri, struct rt6_info *match,
685 bool match_do_rr = false;
686 struct inet6_dev *idev = rt->rt6i_idev;
688 if (rt->rt6i_nh_flags & RTNH_F_DEAD)
691 if (idev->cnf.ignore_routes_with_linkdown &&
692 rt->rt6i_nh_flags & RTNH_F_LINKDOWN &&
693 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
696 if (rt6_check_expired(rt))
699 m = rt6_score_route(rt, oif, strict);
700 if (m == RT6_NUD_FAIL_DO_RR) {
702 m = 0; /* lowest valid score */
703 } else if (m == RT6_NUD_FAIL_HARD) {
707 if (strict & RT6_LOOKUP_F_REACHABLE)
710 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
712 *do_rr = match_do_rr;
720 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
721 struct rt6_info *leaf,
722 struct rt6_info *rr_head,
723 u32 metric, int oif, int strict,
726 struct rt6_info *rt, *match, *cont;
731 for (rt = rr_head; rt; rt = rcu_dereference(rt->rt6_next)) {
732 if (rt->rt6i_metric != metric) {
737 match = find_match(rt, oif, strict, &mpri, match, do_rr);
740 for (rt = leaf; rt && rt != rr_head;
741 rt = rcu_dereference(rt->rt6_next)) {
742 if (rt->rt6i_metric != metric) {
747 match = find_match(rt, oif, strict, &mpri, match, do_rr);
753 for (rt = cont; rt; rt = rcu_dereference(rt->rt6_next))
754 match = find_match(rt, oif, strict, &mpri, match, do_rr);
759 static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn,
762 struct rt6_info *leaf = rcu_dereference(fn->leaf);
763 struct rt6_info *match, *rt0;
767 if (!leaf || leaf == net->ipv6.ip6_null_entry)
768 return net->ipv6.ip6_null_entry;
770 rt0 = rcu_dereference(fn->rr_ptr);
774 /* Double check to make sure fn is not an intermediate node
775 * and fn->leaf does not points to its child's leaf
776 * (This might happen if all routes under fn are deleted from
777 * the tree and fib6_repair_tree() is called on the node.)
779 key_plen = rt0->rt6i_dst.plen;
780 #ifdef CONFIG_IPV6_SUBTREES
781 if (rt0->rt6i_src.plen)
782 key_plen = rt0->rt6i_src.plen;
784 if (fn->fn_bit != key_plen)
785 return net->ipv6.ip6_null_entry;
787 match = find_rr_leaf(fn, leaf, rt0, rt0->rt6i_metric, oif, strict,
791 struct rt6_info *next = rcu_dereference(rt0->rt6_next);
793 /* no entries matched; do round-robin */
794 if (!next || next->rt6i_metric != rt0->rt6i_metric)
798 spin_lock_bh(&leaf->rt6i_table->tb6_lock);
799 /* make sure next is not being deleted from the tree */
801 rcu_assign_pointer(fn->rr_ptr, next);
802 spin_unlock_bh(&leaf->rt6i_table->tb6_lock);
806 return match ? match : net->ipv6.ip6_null_entry;
809 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
811 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
814 #ifdef CONFIG_IPV6_ROUTE_INFO
815 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
816 const struct in6_addr *gwaddr)
818 struct net *net = dev_net(dev);
819 struct route_info *rinfo = (struct route_info *) opt;
820 struct in6_addr prefix_buf, *prefix;
822 unsigned long lifetime;
825 if (len < sizeof(struct route_info)) {
829 /* Sanity check for prefix_len and length */
830 if (rinfo->length > 3) {
832 } else if (rinfo->prefix_len > 128) {
834 } else if (rinfo->prefix_len > 64) {
835 if (rinfo->length < 2) {
838 } else if (rinfo->prefix_len > 0) {
839 if (rinfo->length < 1) {
844 pref = rinfo->route_pref;
845 if (pref == ICMPV6_ROUTER_PREF_INVALID)
848 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
850 if (rinfo->length == 3)
851 prefix = (struct in6_addr *)rinfo->prefix;
853 /* this function is safe */
854 ipv6_addr_prefix(&prefix_buf,
855 (struct in6_addr *)rinfo->prefix,
857 prefix = &prefix_buf;
860 if (rinfo->prefix_len == 0)
861 rt = rt6_get_dflt_router(gwaddr, dev);
863 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
866 if (rt && !lifetime) {
872 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
875 rt->rt6i_flags = RTF_ROUTEINFO |
876 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
879 if (!addrconf_finite_timeout(lifetime))
880 rt6_clean_expires(rt);
882 rt6_set_expires(rt, jiffies + HZ * lifetime);
890 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
891 struct in6_addr *saddr)
893 struct fib6_node *pn, *sn;
895 if (fn->fn_flags & RTN_TL_ROOT)
897 pn = rcu_dereference(fn->parent);
898 sn = FIB6_SUBTREE(pn);
900 fn = fib6_lookup(sn, NULL, saddr);
903 if (fn->fn_flags & RTN_RTINFO)
908 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt,
911 struct rt6_info *rt = *prt;
913 if (dst_hold_safe(&rt->dst))
916 rt = net->ipv6.ip6_null_entry;
925 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
926 struct fib6_table *table,
927 struct flowi6 *fl6, int flags)
929 struct rt6_info *rt, *rt_cache;
930 struct fib6_node *fn;
933 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
935 rt = rcu_dereference(fn->leaf);
937 rt = net->ipv6.ip6_null_entry;
939 rt = rt6_device_match(net, rt, &fl6->saddr,
940 fl6->flowi6_oif, flags);
941 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
942 rt = rt6_multipath_select(rt, fl6,
943 fl6->flowi6_oif, flags);
945 if (rt == net->ipv6.ip6_null_entry) {
946 fn = fib6_backtrack(fn, &fl6->saddr);
950 /* Search through exception table */
951 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr);
955 if (ip6_hold_safe(net, &rt, true))
956 dst_use_noref(&rt->dst, jiffies);
960 trace_fib6_table_lookup(net, rt, table, fl6);
966 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
969 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
971 EXPORT_SYMBOL_GPL(ip6_route_lookup);
973 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
974 const struct in6_addr *saddr, int oif, int strict)
976 struct flowi6 fl6 = {
980 struct dst_entry *dst;
981 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
984 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
985 flags |= RT6_LOOKUP_F_HAS_SADDR;
988 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
990 return (struct rt6_info *) dst;
996 EXPORT_SYMBOL(rt6_lookup);
998 /* ip6_ins_rt is called with FREE table->tb6_lock.
999 * It takes new route entry, the addition fails by any reason the
1000 * route is released.
1001 * Caller must hold dst before calling it.
1004 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
1005 struct mx6_config *mxc,
1006 struct netlink_ext_ack *extack)
1009 struct fib6_table *table;
1011 table = rt->rt6i_table;
1012 spin_lock_bh(&table->tb6_lock);
1013 err = fib6_add(&table->tb6_root, rt, info, mxc, extack);
1014 spin_unlock_bh(&table->tb6_lock);
1019 int ip6_ins_rt(struct rt6_info *rt)
1021 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
1022 struct mx6_config mxc = { .mx = NULL, };
1024 /* Hold dst to account for the reference from the fib6 tree */
1026 return __ip6_ins_rt(rt, &info, &mxc, NULL);
1029 /* called with rcu_lock held */
1030 static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt)
1032 struct net_device *dev = rt->dst.dev;
1034 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1035 /* for copies of local routes, dst->dev needs to be the
1036 * device if it is a master device, the master device if
1037 * device is enslaved, and the loopback as the default
1039 if (netif_is_l3_slave(dev) &&
1040 !rt6_need_strict(&rt->rt6i_dst.addr))
1041 dev = l3mdev_master_dev_rcu(dev);
1042 else if (!netif_is_l3_master(dev))
1043 dev = dev_net(dev)->loopback_dev;
1044 /* last case is netif_is_l3_master(dev) is true in which
1045 * case we want dev returned to be dev
1052 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
1053 const struct in6_addr *daddr,
1054 const struct in6_addr *saddr)
1056 struct net_device *dev;
1057 struct rt6_info *rt;
1063 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
1067 dev = ip6_rt_get_dev_rcu(ort);
1068 rt = __ip6_dst_alloc(dev_net(dev), dev, 0);
1073 ip6_rt_copy_init(rt, ort);
1074 rt->rt6i_flags |= RTF_CACHE;
1075 rt->rt6i_metric = 0;
1076 rt->dst.flags |= DST_HOST;
1077 rt->rt6i_dst.addr = *daddr;
1078 rt->rt6i_dst.plen = 128;
1080 if (!rt6_is_gw_or_nonexthop(ort)) {
1081 if (ort->rt6i_dst.plen != 128 &&
1082 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
1083 rt->rt6i_flags |= RTF_ANYCAST;
1084 #ifdef CONFIG_IPV6_SUBTREES
1085 if (rt->rt6i_src.plen && saddr) {
1086 rt->rt6i_src.addr = *saddr;
1087 rt->rt6i_src.plen = 128;
1095 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
1097 struct net_device *dev;
1098 struct rt6_info *pcpu_rt;
1101 dev = ip6_rt_get_dev_rcu(rt);
1102 pcpu_rt = __ip6_dst_alloc(dev_net(dev), dev, rt->dst.flags);
1106 ip6_rt_copy_init(pcpu_rt, rt);
1107 pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
1108 pcpu_rt->rt6i_flags |= RTF_PCPU;
1112 /* It should be called with rcu_read_lock() acquired */
1113 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1115 struct rt6_info *pcpu_rt, **p;
1117 p = this_cpu_ptr(rt->rt6i_pcpu);
1120 if (pcpu_rt && ip6_hold_safe(NULL, &pcpu_rt, false))
1121 rt6_dst_from_metrics_check(pcpu_rt);
1126 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1128 struct rt6_info *pcpu_rt, *prev, **p;
1130 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1132 struct net *net = dev_net(rt->dst.dev);
1134 dst_hold(&net->ipv6.ip6_null_entry->dst);
1135 return net->ipv6.ip6_null_entry;
1138 dst_hold(&pcpu_rt->dst);
1139 p = this_cpu_ptr(rt->rt6i_pcpu);
1140 prev = cmpxchg(p, NULL, pcpu_rt);
1143 rt6_dst_from_metrics_check(pcpu_rt);
1147 /* exception hash table implementation
1149 static DEFINE_SPINLOCK(rt6_exception_lock);
1151 /* Remove rt6_ex from hash table and free the memory
1152 * Caller must hold rt6_exception_lock
1154 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1155 struct rt6_exception *rt6_ex)
1159 if (!bucket || !rt6_ex)
1162 net = dev_net(rt6_ex->rt6i->dst.dev);
1163 rt6_ex->rt6i->rt6i_node = NULL;
1164 hlist_del_rcu(&rt6_ex->hlist);
1165 rt6_release(rt6_ex->rt6i);
1166 kfree_rcu(rt6_ex, rcu);
1167 WARN_ON_ONCE(!bucket->depth);
1169 net->ipv6.rt6_stats->fib_rt_cache--;
1172 /* Remove oldest rt6_ex in bucket and free the memory
1173 * Caller must hold rt6_exception_lock
1175 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1177 struct rt6_exception *rt6_ex, *oldest = NULL;
1182 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1183 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1186 rt6_remove_exception(bucket, oldest);
1189 static u32 rt6_exception_hash(const struct in6_addr *dst,
1190 const struct in6_addr *src)
1192 static u32 seed __read_mostly;
1195 net_get_random_once(&seed, sizeof(seed));
1196 val = jhash(dst, sizeof(*dst), seed);
1198 #ifdef CONFIG_IPV6_SUBTREES
1200 val = jhash(src, sizeof(*src), val);
1202 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1205 /* Helper function to find the cached rt in the hash table
1206 * and update bucket pointer to point to the bucket for this
1207 * (daddr, saddr) pair
1208 * Caller must hold rt6_exception_lock
1210 static struct rt6_exception *
1211 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1212 const struct in6_addr *daddr,
1213 const struct in6_addr *saddr)
1215 struct rt6_exception *rt6_ex;
1218 if (!(*bucket) || !daddr)
1221 hval = rt6_exception_hash(daddr, saddr);
1224 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1225 struct rt6_info *rt6 = rt6_ex->rt6i;
1226 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1228 #ifdef CONFIG_IPV6_SUBTREES
1229 if (matched && saddr)
1230 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1238 /* Helper function to find the cached rt in the hash table
1239 * and update bucket pointer to point to the bucket for this
1240 * (daddr, saddr) pair
1241 * Caller must hold rcu_read_lock()
1243 static struct rt6_exception *
1244 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1245 const struct in6_addr *daddr,
1246 const struct in6_addr *saddr)
1248 struct rt6_exception *rt6_ex;
1251 WARN_ON_ONCE(!rcu_read_lock_held());
1253 if (!(*bucket) || !daddr)
1256 hval = rt6_exception_hash(daddr, saddr);
1259 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1260 struct rt6_info *rt6 = rt6_ex->rt6i;
1261 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1263 #ifdef CONFIG_IPV6_SUBTREES
1264 if (matched && saddr)
1265 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1273 static int rt6_insert_exception(struct rt6_info *nrt,
1274 struct rt6_info *ort)
1276 struct net *net = dev_net(ort->dst.dev);
1277 struct rt6_exception_bucket *bucket;
1278 struct in6_addr *src_key = NULL;
1279 struct rt6_exception *rt6_ex;
1282 /* ort can't be a cache or pcpu route */
1283 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
1285 WARN_ON_ONCE(ort->rt6i_flags & (RTF_CACHE | RTF_PCPU));
1287 spin_lock_bh(&rt6_exception_lock);
1289 if (ort->exception_bucket_flushed) {
1294 bucket = rcu_dereference_protected(ort->rt6i_exception_bucket,
1295 lockdep_is_held(&rt6_exception_lock));
1297 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1303 rcu_assign_pointer(ort->rt6i_exception_bucket, bucket);
1306 #ifdef CONFIG_IPV6_SUBTREES
1307 /* rt6i_src.plen != 0 indicates ort is in subtree
1308 * and exception table is indexed by a hash of
1309 * both rt6i_dst and rt6i_src.
1310 * Otherwise, the exception table is indexed by
1311 * a hash of only rt6i_dst.
1313 if (ort->rt6i_src.plen)
1314 src_key = &nrt->rt6i_src.addr;
1317 /* Update rt6i_prefsrc as it could be changed
1318 * in rt6_remove_prefsrc()
1320 nrt->rt6i_prefsrc = ort->rt6i_prefsrc;
1321 /* rt6_mtu_change() might lower mtu on ort.
1322 * Only insert this exception route if its mtu
1323 * is less than ort's mtu value.
1325 if (nrt->rt6i_pmtu >= dst_mtu(&ort->dst)) {
1330 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1333 rt6_remove_exception(bucket, rt6_ex);
1335 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1341 rt6_ex->stamp = jiffies;
1342 atomic_inc(&nrt->rt6i_ref);
1343 nrt->rt6i_node = ort->rt6i_node;
1344 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1346 net->ipv6.rt6_stats->fib_rt_cache++;
1348 if (bucket->depth > FIB6_MAX_DEPTH)
1349 rt6_exception_remove_oldest(bucket);
1352 spin_unlock_bh(&rt6_exception_lock);
1354 /* Update fn->fn_sernum to invalidate all cached dst */
1356 spin_lock_bh(&ort->rt6i_table->tb6_lock);
1357 fib6_update_sernum(ort);
1358 spin_unlock_bh(&ort->rt6i_table->tb6_lock);
1359 fib6_force_start_gc(net);
1365 void rt6_flush_exceptions(struct rt6_info *rt)
1367 struct rt6_exception_bucket *bucket;
1368 struct rt6_exception *rt6_ex;
1369 struct hlist_node *tmp;
1372 spin_lock_bh(&rt6_exception_lock);
1373 /* Prevent rt6_insert_exception() to recreate the bucket list */
1374 rt->exception_bucket_flushed = 1;
1376 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1377 lockdep_is_held(&rt6_exception_lock));
1381 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1382 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist)
1383 rt6_remove_exception(bucket, rt6_ex);
1384 WARN_ON_ONCE(bucket->depth);
1389 spin_unlock_bh(&rt6_exception_lock);
1392 /* Find cached rt in the hash table inside passed in rt
1393 * Caller has to hold rcu_read_lock()
1395 static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
1396 struct in6_addr *daddr,
1397 struct in6_addr *saddr)
1399 struct rt6_exception_bucket *bucket;
1400 struct in6_addr *src_key = NULL;
1401 struct rt6_exception *rt6_ex;
1402 struct rt6_info *res = NULL;
1404 bucket = rcu_dereference(rt->rt6i_exception_bucket);
1406 #ifdef CONFIG_IPV6_SUBTREES
1407 /* rt6i_src.plen != 0 indicates rt is in subtree
1408 * and exception table is indexed by a hash of
1409 * both rt6i_dst and rt6i_src.
1410 * Otherwise, the exception table is indexed by
1411 * a hash of only rt6i_dst.
1413 if (rt->rt6i_src.plen)
1416 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1418 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1424 /* Remove the passed in cached rt from the hash table that contains it */
1425 int rt6_remove_exception_rt(struct rt6_info *rt)
1427 struct rt6_exception_bucket *bucket;
1428 struct rt6_info *from = rt->from;
1429 struct in6_addr *src_key = NULL;
1430 struct rt6_exception *rt6_ex;
1434 !(rt->rt6i_flags & RTF_CACHE))
1437 if (!rcu_access_pointer(from->rt6i_exception_bucket))
1440 spin_lock_bh(&rt6_exception_lock);
1441 bucket = rcu_dereference_protected(from->rt6i_exception_bucket,
1442 lockdep_is_held(&rt6_exception_lock));
1443 #ifdef CONFIG_IPV6_SUBTREES
1444 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1445 * and exception table is indexed by a hash of
1446 * both rt6i_dst and rt6i_src.
1447 * Otherwise, the exception table is indexed by
1448 * a hash of only rt6i_dst.
1450 if (from->rt6i_src.plen)
1451 src_key = &rt->rt6i_src.addr;
1453 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1457 rt6_remove_exception(bucket, rt6_ex);
1463 spin_unlock_bh(&rt6_exception_lock);
1467 /* Find rt6_ex which contains the passed in rt cache and
1470 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1472 struct rt6_exception_bucket *bucket;
1473 struct rt6_info *from = rt->from;
1474 struct in6_addr *src_key = NULL;
1475 struct rt6_exception *rt6_ex;
1478 !(rt->rt6i_flags & RTF_CACHE))
1482 bucket = rcu_dereference(from->rt6i_exception_bucket);
1484 #ifdef CONFIG_IPV6_SUBTREES
1485 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1486 * and exception table is indexed by a hash of
1487 * both rt6i_dst and rt6i_src.
1488 * Otherwise, the exception table is indexed by
1489 * a hash of only rt6i_dst.
1491 if (from->rt6i_src.plen)
1492 src_key = &rt->rt6i_src.addr;
1494 rt6_ex = __rt6_find_exception_rcu(&bucket,
1498 rt6_ex->stamp = jiffies;
1503 static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt)
1505 struct rt6_exception_bucket *bucket;
1506 struct rt6_exception *rt6_ex;
1509 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1510 lockdep_is_held(&rt6_exception_lock));
1513 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1514 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1515 rt6_ex->rt6i->rt6i_prefsrc.plen = 0;
1522 static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu)
1524 struct rt6_exception_bucket *bucket;
1525 struct rt6_exception *rt6_ex;
1528 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1529 lockdep_is_held(&rt6_exception_lock));
1532 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1533 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1534 struct rt6_info *entry = rt6_ex->rt6i;
1535 /* For RTF_CACHE with rt6i_pmtu == 0
1536 * (i.e. a redirected route),
1537 * the metrics of its rt->dst.from has already
1540 if (entry->rt6i_pmtu && entry->rt6i_pmtu > mtu)
1541 entry->rt6i_pmtu = mtu;
1548 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
1550 static void rt6_exceptions_clean_tohost(struct rt6_info *rt,
1551 struct in6_addr *gateway)
1553 struct rt6_exception_bucket *bucket;
1554 struct rt6_exception *rt6_ex;
1555 struct hlist_node *tmp;
1558 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1561 spin_lock_bh(&rt6_exception_lock);
1562 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1563 lockdep_is_held(&rt6_exception_lock));
1566 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1567 hlist_for_each_entry_safe(rt6_ex, tmp,
1568 &bucket->chain, hlist) {
1569 struct rt6_info *entry = rt6_ex->rt6i;
1571 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
1572 RTF_CACHE_GATEWAY &&
1573 ipv6_addr_equal(gateway,
1574 &entry->rt6i_gateway)) {
1575 rt6_remove_exception(bucket, rt6_ex);
1582 spin_unlock_bh(&rt6_exception_lock);
1585 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
1586 struct rt6_exception *rt6_ex,
1587 struct fib6_gc_args *gc_args,
1590 struct rt6_info *rt = rt6_ex->rt6i;
1592 /* we are pruning and obsoleting aged-out and non gateway exceptions
1593 * even if others have still references to them, so that on next
1594 * dst_check() such references can be dropped.
1595 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
1596 * expired, independently from their aging, as per RFC 8201 section 4
1598 if (!(rt->rt6i_flags & RTF_EXPIRES) &&
1599 time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
1600 RT6_TRACE("aging clone %p\n", rt);
1601 rt6_remove_exception(bucket, rt6_ex);
1603 } else if (rt->rt6i_flags & RTF_GATEWAY) {
1604 struct neighbour *neigh;
1605 __u8 neigh_flags = 0;
1607 neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway);
1609 neigh_flags = neigh->flags;
1610 neigh_release(neigh);
1612 if (!(neigh_flags & NTF_ROUTER)) {
1613 RT6_TRACE("purging route %p via non-router but gateway\n",
1615 rt6_remove_exception(bucket, rt6_ex);
1618 } else if (__rt6_check_expired(rt)) {
1619 RT6_TRACE("purging expired route %p\n", rt);
1620 rt6_remove_exception(bucket, rt6_ex);
1626 void rt6_age_exceptions(struct rt6_info *rt,
1627 struct fib6_gc_args *gc_args,
1630 struct rt6_exception_bucket *bucket;
1631 struct rt6_exception *rt6_ex;
1632 struct hlist_node *tmp;
1635 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1638 spin_lock_bh(&rt6_exception_lock);
1639 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1640 lockdep_is_held(&rt6_exception_lock));
1643 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1644 hlist_for_each_entry_safe(rt6_ex, tmp,
1645 &bucket->chain, hlist) {
1646 rt6_age_examine_exception(bucket, rt6_ex,
1652 spin_unlock_bh(&rt6_exception_lock);
1655 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1656 int oif, struct flowi6 *fl6, int flags)
1658 struct fib6_node *fn, *saved_fn;
1659 struct rt6_info *rt, *rt_cache;
1662 strict |= flags & RT6_LOOKUP_F_IFACE;
1663 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1664 if (net->ipv6.devconf_all->forwarding == 0)
1665 strict |= RT6_LOOKUP_F_REACHABLE;
1669 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1672 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1676 rt = rt6_select(net, fn, oif, strict);
1677 if (rt->rt6i_nsiblings)
1678 rt = rt6_multipath_select(rt, fl6, oif, strict);
1679 if (rt == net->ipv6.ip6_null_entry) {
1680 fn = fib6_backtrack(fn, &fl6->saddr);
1682 goto redo_rt6_select;
1683 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1684 /* also consider unreachable route */
1685 strict &= ~RT6_LOOKUP_F_REACHABLE;
1687 goto redo_rt6_select;
1691 /*Search through exception table */
1692 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr);
1696 if (rt == net->ipv6.ip6_null_entry) {
1699 trace_fib6_table_lookup(net, rt, table, fl6);
1701 } else if (rt->rt6i_flags & RTF_CACHE) {
1702 if (ip6_hold_safe(net, &rt, true)) {
1703 dst_use_noref(&rt->dst, jiffies);
1704 rt6_dst_from_metrics_check(rt);
1707 trace_fib6_table_lookup(net, rt, table, fl6);
1709 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1710 !(rt->rt6i_flags & RTF_GATEWAY))) {
1711 /* Create a RTF_CACHE clone which will not be
1712 * owned by the fib6 tree. It is for the special case where
1713 * the daddr in the skb during the neighbor look-up is different
1714 * from the fl6->daddr used to look-up route here.
1717 struct rt6_info *uncached_rt;
1719 if (ip6_hold_safe(net, &rt, true)) {
1720 dst_use_noref(&rt->dst, jiffies);
1724 goto uncached_rt_out;
1728 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1729 dst_release(&rt->dst);
1732 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1733 * No need for another dst_hold()
1735 rt6_uncached_list_add(uncached_rt);
1736 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
1738 uncached_rt = net->ipv6.ip6_null_entry;
1739 dst_hold(&uncached_rt->dst);
1743 trace_fib6_table_lookup(net, uncached_rt, table, fl6);
1747 /* Get a percpu copy */
1749 struct rt6_info *pcpu_rt;
1751 dst_use_noref(&rt->dst, jiffies);
1753 pcpu_rt = rt6_get_pcpu_route(rt);
1756 /* atomic_inc_not_zero() is needed when using rcu */
1757 if (atomic_inc_not_zero(&rt->rt6i_ref)) {
1758 /* No dst_hold() on rt is needed because grabbing
1759 * rt->rt6i_ref makes sure rt can't be released.
1761 pcpu_rt = rt6_make_pcpu_route(rt);
1764 /* rt is already removed from tree */
1765 pcpu_rt = net->ipv6.ip6_null_entry;
1766 dst_hold(&pcpu_rt->dst);
1771 trace_fib6_table_lookup(net, pcpu_rt, table, fl6);
1775 EXPORT_SYMBOL_GPL(ip6_pol_route);
1777 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1778 struct flowi6 *fl6, int flags)
1780 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1783 struct dst_entry *ip6_route_input_lookup(struct net *net,
1784 struct net_device *dev,
1785 struct flowi6 *fl6, int flags)
1787 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1788 flags |= RT6_LOOKUP_F_IFACE;
1790 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1792 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1794 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1795 struct flow_keys *keys)
1797 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
1798 const struct ipv6hdr *key_iph = outer_iph;
1799 const struct ipv6hdr *inner_iph;
1800 const struct icmp6hdr *icmph;
1801 struct ipv6hdr _inner_iph;
1803 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
1806 icmph = icmp6_hdr(skb);
1807 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
1808 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
1809 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
1810 icmph->icmp6_type != ICMPV6_PARAMPROB)
1813 inner_iph = skb_header_pointer(skb,
1814 skb_transport_offset(skb) + sizeof(*icmph),
1815 sizeof(_inner_iph), &_inner_iph);
1819 key_iph = inner_iph;
1821 memset(keys, 0, sizeof(*keys));
1822 keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1823 keys->addrs.v6addrs.src = key_iph->saddr;
1824 keys->addrs.v6addrs.dst = key_iph->daddr;
1825 keys->tags.flow_label = ip6_flowinfo(key_iph);
1826 keys->basic.ip_proto = key_iph->nexthdr;
1829 /* if skb is set it will be used and fl6 can be NULL */
1830 u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb)
1832 struct flow_keys hash_keys;
1835 ip6_multipath_l3_keys(skb, &hash_keys);
1836 return flow_hash_from_keys(&hash_keys) >> 1;
1839 return get_hash_from_flowi6(fl6) >> 1;
1842 void ip6_route_input(struct sk_buff *skb)
1844 const struct ipv6hdr *iph = ipv6_hdr(skb);
1845 struct net *net = dev_net(skb->dev);
1846 int flags = RT6_LOOKUP_F_HAS_SADDR;
1847 struct ip_tunnel_info *tun_info;
1848 struct flowi6 fl6 = {
1849 .flowi6_iif = skb->dev->ifindex,
1850 .daddr = iph->daddr,
1851 .saddr = iph->saddr,
1852 .flowlabel = ip6_flowinfo(iph),
1853 .flowi6_mark = skb->mark,
1854 .flowi6_proto = iph->nexthdr,
1857 tun_info = skb_tunnel_info(skb);
1858 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1859 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1860 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
1861 fl6.mp_hash = rt6_multipath_hash(&fl6, skb);
1863 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1866 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1867 struct flowi6 *fl6, int flags)
1869 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1872 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
1873 struct flowi6 *fl6, int flags)
1877 if (rt6_need_strict(&fl6->daddr)) {
1878 struct dst_entry *dst;
1880 dst = l3mdev_link_scope_lookup(net, fl6);
1885 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1887 any_src = ipv6_addr_any(&fl6->saddr);
1888 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1889 (fl6->flowi6_oif && any_src))
1890 flags |= RT6_LOOKUP_F_IFACE;
1893 flags |= RT6_LOOKUP_F_HAS_SADDR;
1895 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1897 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1899 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
1901 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1903 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1904 struct net_device *loopback_dev = net->loopback_dev;
1905 struct dst_entry *new = NULL;
1907 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
1908 DST_OBSOLETE_DEAD, 0);
1911 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
1915 new->input = dst_discard;
1916 new->output = dst_discard_out;
1918 dst_copy_metrics(new, &ort->dst);
1920 rt->rt6i_idev = in6_dev_get(loopback_dev);
1921 rt->rt6i_gateway = ort->rt6i_gateway;
1922 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
1923 rt->rt6i_metric = 0;
1925 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1926 #ifdef CONFIG_IPV6_SUBTREES
1927 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1931 dst_release(dst_orig);
1932 return new ? new : ERR_PTR(-ENOMEM);
1936 * Destination cache support functions
1939 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1942 dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(&rt->from->dst))
1943 dst_init_metrics(&rt->dst, dst_metrics_ptr(&rt->from->dst), true);
1946 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1950 if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
1953 if (rt6_check_expired(rt))
1959 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1961 if (!__rt6_check_expired(rt) &&
1962 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1963 rt6_check(rt->from, cookie))
1969 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1971 struct rt6_info *rt;
1973 rt = (struct rt6_info *) dst;
1975 /* All IPV6 dsts are created with ->obsolete set to the value
1976 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1977 * into this function always.
1980 rt6_dst_from_metrics_check(rt);
1982 if (rt->rt6i_flags & RTF_PCPU ||
1983 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
1984 return rt6_dst_from_check(rt, cookie);
1986 return rt6_check(rt, cookie);
1989 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1991 struct rt6_info *rt = (struct rt6_info *) dst;
1994 if (rt->rt6i_flags & RTF_CACHE) {
1995 if (rt6_check_expired(rt)) {
2007 static void ip6_link_failure(struct sk_buff *skb)
2009 struct rt6_info *rt;
2011 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2013 rt = (struct rt6_info *) skb_dst(skb);
2015 if (rt->rt6i_flags & RTF_CACHE) {
2016 if (dst_hold_safe(&rt->dst))
2019 struct fib6_node *fn;
2022 fn = rcu_dereference(rt->rt6i_node);
2023 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2030 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2032 struct net *net = dev_net(rt->dst.dev);
2034 rt->rt6i_flags |= RTF_MODIFIED;
2035 rt->rt6i_pmtu = mtu;
2036 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2039 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2041 return !(rt->rt6i_flags & RTF_CACHE) &&
2042 (rt->rt6i_flags & RTF_PCPU ||
2043 rcu_access_pointer(rt->rt6i_node));
2046 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2047 const struct ipv6hdr *iph, u32 mtu)
2049 const struct in6_addr *daddr, *saddr;
2050 struct rt6_info *rt6 = (struct rt6_info *)dst;
2052 if (rt6->rt6i_flags & RTF_LOCAL)
2055 if (dst_metric_locked(dst, RTAX_MTU))
2059 daddr = &iph->daddr;
2060 saddr = &iph->saddr;
2062 daddr = &sk->sk_v6_daddr;
2063 saddr = &inet6_sk(sk)->saddr;
2068 dst_confirm_neigh(dst, daddr);
2069 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2070 if (mtu >= dst_mtu(dst))
2073 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2074 rt6_do_update_pmtu(rt6, mtu);
2075 /* update rt6_ex->stamp for cache */
2076 if (rt6->rt6i_flags & RTF_CACHE)
2077 rt6_update_exception_stamp_rt(rt6);
2079 struct rt6_info *nrt6;
2081 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
2083 rt6_do_update_pmtu(nrt6, mtu);
2084 if (rt6_insert_exception(nrt6, rt6))
2085 dst_release_immediate(&nrt6->dst);
2090 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2091 struct sk_buff *skb, u32 mtu)
2093 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2096 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2097 int oif, u32 mark, kuid_t uid)
2099 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2100 struct dst_entry *dst;
2103 memset(&fl6, 0, sizeof(fl6));
2104 fl6.flowi6_oif = oif;
2105 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
2106 fl6.daddr = iph->daddr;
2107 fl6.saddr = iph->saddr;
2108 fl6.flowlabel = ip6_flowinfo(iph);
2109 fl6.flowi6_uid = uid;
2111 dst = ip6_route_output(net, NULL, &fl6);
2113 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2116 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2118 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2120 struct dst_entry *dst;
2122 ip6_update_pmtu(skb, sock_net(sk), mtu,
2123 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
2125 dst = __sk_dst_get(sk);
2126 if (!dst || !dst->obsolete ||
2127 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2131 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2132 ip6_datagram_dst_update(sk, false);
2135 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2137 /* Handle redirects */
2138 struct ip6rd_flowi {
2140 struct in6_addr gateway;
2143 static struct rt6_info *__ip6_route_redirect(struct net *net,
2144 struct fib6_table *table,
2148 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2149 struct rt6_info *rt, *rt_cache;
2150 struct fib6_node *fn;
2152 /* Get the "current" route for this destination and
2153 * check if the redirect has come from appropriate router.
2155 * RFC 4861 specifies that redirects should only be
2156 * accepted if they come from the nexthop to the target.
2157 * Due to the way the routes are chosen, this notion
2158 * is a bit fuzzy and one might need to check all possible
2163 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2165 for_each_fib6_node_rt_rcu(fn) {
2166 if (rt->rt6i_nh_flags & RTNH_F_DEAD)
2168 if (rt6_check_expired(rt))
2172 if (!(rt->rt6i_flags & RTF_GATEWAY))
2174 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
2176 /* rt_cache's gateway might be different from its 'parent'
2177 * in the case of an ip redirect.
2178 * So we keep searching in the exception table if the gateway
2181 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) {
2182 rt_cache = rt6_find_cached_rt(rt,
2186 ipv6_addr_equal(&rdfl->gateway,
2187 &rt_cache->rt6i_gateway)) {
2197 rt = net->ipv6.ip6_null_entry;
2198 else if (rt->dst.error) {
2199 rt = net->ipv6.ip6_null_entry;
2203 if (rt == net->ipv6.ip6_null_entry) {
2204 fn = fib6_backtrack(fn, &fl6->saddr);
2210 ip6_hold_safe(net, &rt, true);
2214 trace_fib6_table_lookup(net, rt, table, fl6);
2218 static struct dst_entry *ip6_route_redirect(struct net *net,
2219 const struct flowi6 *fl6,
2220 const struct in6_addr *gateway)
2222 int flags = RT6_LOOKUP_F_HAS_SADDR;
2223 struct ip6rd_flowi rdfl;
2226 rdfl.gateway = *gateway;
2228 return fib6_rule_lookup(net, &rdfl.fl6,
2229 flags, __ip6_route_redirect);
2232 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2235 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2236 struct dst_entry *dst;
2239 memset(&fl6, 0, sizeof(fl6));
2240 fl6.flowi6_iif = LOOPBACK_IFINDEX;
2241 fl6.flowi6_oif = oif;
2242 fl6.flowi6_mark = mark;
2243 fl6.daddr = iph->daddr;
2244 fl6.saddr = iph->saddr;
2245 fl6.flowlabel = ip6_flowinfo(iph);
2246 fl6.flowi6_uid = uid;
2248 dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
2249 rt6_do_redirect(dst, NULL, skb);
2252 EXPORT_SYMBOL_GPL(ip6_redirect);
2254 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
2257 const struct ipv6hdr *iph = ipv6_hdr(skb);
2258 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
2259 struct dst_entry *dst;
2262 memset(&fl6, 0, sizeof(fl6));
2263 fl6.flowi6_iif = LOOPBACK_IFINDEX;
2264 fl6.flowi6_oif = oif;
2265 fl6.flowi6_mark = mark;
2266 fl6.daddr = msg->dest;
2267 fl6.saddr = iph->daddr;
2268 fl6.flowi6_uid = sock_net_uid(net, NULL);
2270 dst = ip6_route_redirect(net, &fl6, &iph->saddr);
2271 rt6_do_redirect(dst, NULL, skb);
2275 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
2277 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
2280 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
2282 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
2284 struct net_device *dev = dst->dev;
2285 unsigned int mtu = dst_mtu(dst);
2286 struct net *net = dev_net(dev);
2288 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
2290 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
2291 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
2294 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
2295 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
2296 * IPV6_MAXPLEN is also valid and means: "any MSS,
2297 * rely only on pmtu discovery"
2299 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
2304 static unsigned int ip6_mtu(const struct dst_entry *dst)
2306 const struct rt6_info *rt = (const struct rt6_info *)dst;
2307 unsigned int mtu = rt->rt6i_pmtu;
2308 struct inet6_dev *idev;
2313 mtu = dst_metric_raw(dst, RTAX_MTU);
2320 idev = __in6_dev_get(dst->dev);
2322 mtu = idev->cnf.mtu6;
2326 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2328 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
2331 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
2334 struct dst_entry *dst;
2335 struct rt6_info *rt;
2336 struct inet6_dev *idev = in6_dev_get(dev);
2337 struct net *net = dev_net(dev);
2339 if (unlikely(!idev))
2340 return ERR_PTR(-ENODEV);
2342 rt = ip6_dst_alloc(net, dev, 0);
2343 if (unlikely(!rt)) {
2345 dst = ERR_PTR(-ENOMEM);
2349 rt->dst.flags |= DST_HOST;
2350 rt->dst.input = ip6_input;
2351 rt->dst.output = ip6_output;
2352 rt->rt6i_gateway = fl6->daddr;
2353 rt->rt6i_dst.addr = fl6->daddr;
2354 rt->rt6i_dst.plen = 128;
2355 rt->rt6i_idev = idev;
2356 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
2358 /* Add this dst into uncached_list so that rt6_disable_ip() can
2359 * do proper release of the net_device
2361 rt6_uncached_list_add(rt);
2362 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2364 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
2370 static int ip6_dst_gc(struct dst_ops *ops)
2372 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
2373 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
2374 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
2375 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
2376 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
2377 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
2380 entries = dst_entries_get_fast(ops);
2381 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
2382 entries <= rt_max_size)
2385 net->ipv6.ip6_rt_gc_expire++;
2386 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
2387 entries = dst_entries_get_slow(ops);
2388 if (entries < ops->gc_thresh)
2389 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
2391 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
2392 return entries > rt_max_size;
2395 static int ip6_convert_metrics(struct mx6_config *mxc,
2396 const struct fib6_config *cfg)
2398 struct net *net = cfg->fc_nlinfo.nl_net;
2399 bool ecn_ca = false;
2407 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
2411 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
2412 int type = nla_type(nla);
2417 if (unlikely(type > RTAX_MAX))
2420 if (type == RTAX_CC_ALGO) {
2421 char tmp[TCP_CA_NAME_MAX];
2423 nla_strlcpy(tmp, nla, sizeof(tmp));
2424 val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca);
2425 if (val == TCP_CA_UNSPEC)
2428 val = nla_get_u32(nla);
2430 if (type == RTAX_HOPLIMIT && val > 255)
2432 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
2436 __set_bit(type - 1, mxc->mx_valid);
2440 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
2441 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
2451 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
2452 struct fib6_config *cfg,
2453 const struct in6_addr *gw_addr)
2455 struct flowi6 fl6 = {
2456 .flowi6_oif = cfg->fc_ifindex,
2458 .saddr = cfg->fc_prefsrc,
2460 struct fib6_table *table;
2461 struct rt6_info *rt;
2462 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
2464 table = fib6_get_table(net, cfg->fc_table);
2468 if (!ipv6_addr_any(&cfg->fc_prefsrc))
2469 flags |= RT6_LOOKUP_F_HAS_SADDR;
2471 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags);
2473 /* if table lookup failed, fall back to full lookup */
2474 if (rt == net->ipv6.ip6_null_entry) {
2482 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
2483 struct netlink_ext_ack *extack)
2485 struct net *net = cfg->fc_nlinfo.nl_net;
2486 struct rt6_info *rt = NULL;
2487 struct net_device *dev = NULL;
2488 struct inet6_dev *idev = NULL;
2489 struct fib6_table *table;
2493 /* RTF_PCPU is an internal flag; can not be set by userspace */
2494 if (cfg->fc_flags & RTF_PCPU) {
2495 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
2499 /* RTF_CACHE is an internal flag; can not be set by userspace */
2500 if (cfg->fc_flags & RTF_CACHE) {
2501 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
2505 if (cfg->fc_dst_len > 128) {
2506 NL_SET_ERR_MSG(extack, "Invalid prefix length");
2509 if (cfg->fc_src_len > 128) {
2510 NL_SET_ERR_MSG(extack, "Invalid source address length");
2513 #ifndef CONFIG_IPV6_SUBTREES
2514 if (cfg->fc_src_len) {
2515 NL_SET_ERR_MSG(extack,
2516 "Specifying source address requires IPV6_SUBTREES to be enabled");
2520 if (cfg->fc_ifindex) {
2522 dev = dev_get_by_index(net, cfg->fc_ifindex);
2525 idev = in6_dev_get(dev);
2530 if (cfg->fc_metric == 0)
2531 cfg->fc_metric = IP6_RT_PRIO_USER;
2534 if (cfg->fc_nlinfo.nlh &&
2535 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
2536 table = fib6_get_table(net, cfg->fc_table);
2538 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
2539 table = fib6_new_table(net, cfg->fc_table);
2542 table = fib6_new_table(net, cfg->fc_table);
2548 rt = ip6_dst_alloc(net, NULL,
2549 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
2556 if (cfg->fc_flags & RTF_EXPIRES)
2557 rt6_set_expires(rt, jiffies +
2558 clock_t_to_jiffies(cfg->fc_expires));
2560 rt6_clean_expires(rt);
2562 if (cfg->fc_protocol == RTPROT_UNSPEC)
2563 cfg->fc_protocol = RTPROT_BOOT;
2564 rt->rt6i_protocol = cfg->fc_protocol;
2566 addr_type = ipv6_addr_type(&cfg->fc_dst);
2568 if (addr_type & IPV6_ADDR_MULTICAST)
2569 rt->dst.input = ip6_mc_input;
2570 else if (cfg->fc_flags & RTF_LOCAL)
2571 rt->dst.input = ip6_input;
2573 rt->dst.input = ip6_forward;
2575 rt->dst.output = ip6_output;
2577 if (cfg->fc_encap) {
2578 struct lwtunnel_state *lwtstate;
2580 err = lwtunnel_build_state(cfg->fc_encap_type,
2581 cfg->fc_encap, AF_INET6, cfg,
2585 rt->dst.lwtstate = lwtstate_get(lwtstate);
2586 if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
2587 rt->dst.lwtstate->orig_output = rt->dst.output;
2588 rt->dst.output = lwtunnel_output;
2590 if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
2591 rt->dst.lwtstate->orig_input = rt->dst.input;
2592 rt->dst.input = lwtunnel_input;
2596 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
2597 rt->rt6i_dst.plen = cfg->fc_dst_len;
2598 if (rt->rt6i_dst.plen == 128)
2599 rt->dst.flags |= DST_HOST;
2601 #ifdef CONFIG_IPV6_SUBTREES
2602 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
2603 rt->rt6i_src.plen = cfg->fc_src_len;
2606 rt->rt6i_metric = cfg->fc_metric;
2608 /* We cannot add true routes via loopback here,
2609 they would result in kernel looping; promote them to reject routes
2611 if ((cfg->fc_flags & RTF_REJECT) ||
2612 (dev && (dev->flags & IFF_LOOPBACK) &&
2613 !(addr_type & IPV6_ADDR_LOOPBACK) &&
2614 !(cfg->fc_flags & RTF_LOCAL))) {
2615 /* hold loopback dev/idev if we haven't done so. */
2616 if (dev != net->loopback_dev) {
2621 dev = net->loopback_dev;
2623 idev = in6_dev_get(dev);
2629 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
2630 switch (cfg->fc_type) {
2632 rt->dst.error = -EINVAL;
2633 rt->dst.output = dst_discard_out;
2634 rt->dst.input = dst_discard;
2637 rt->dst.error = -EACCES;
2638 rt->dst.output = ip6_pkt_prohibit_out;
2639 rt->dst.input = ip6_pkt_prohibit;
2642 case RTN_UNREACHABLE:
2644 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
2645 : (cfg->fc_type == RTN_UNREACHABLE)
2646 ? -EHOSTUNREACH : -ENETUNREACH;
2647 rt->dst.output = ip6_pkt_discard_out;
2648 rt->dst.input = ip6_pkt_discard;
2654 if (cfg->fc_flags & RTF_GATEWAY) {
2655 const struct in6_addr *gw_addr;
2658 gw_addr = &cfg->fc_gateway;
2659 gwa_type = ipv6_addr_type(gw_addr);
2661 /* if gw_addr is local we will fail to detect this in case
2662 * address is still TENTATIVE (DAD in progress). rt6_lookup()
2663 * will return already-added prefix route via interface that
2664 * prefix route was assigned to, which might be non-loopback.
2667 if (ipv6_chk_addr_and_flags(net, gw_addr,
2668 gwa_type & IPV6_ADDR_LINKLOCAL ?
2669 dev : NULL, 0, 0)) {
2670 NL_SET_ERR_MSG(extack, "Invalid gateway address");
2673 rt->rt6i_gateway = *gw_addr;
2675 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
2676 struct rt6_info *grt = NULL;
2678 /* IPv6 strictly inhibits using not link-local
2679 addresses as nexthop address.
2680 Otherwise, router will not able to send redirects.
2681 It is very good, but in some (rare!) circumstances
2682 (SIT, PtP, NBMA NOARP links) it is handy to allow
2683 some exceptions. --ANK
2684 We allow IPv4-mapped nexthops to support RFC4798-type
2687 if (!(gwa_type & (IPV6_ADDR_UNICAST |
2688 IPV6_ADDR_MAPPED))) {
2689 NL_SET_ERR_MSG(extack,
2690 "Invalid gateway address");
2694 if (cfg->fc_table) {
2695 grt = ip6_nh_lookup_table(net, cfg, gw_addr);
2698 if (grt->rt6i_flags & RTF_GATEWAY ||
2699 (dev && dev != grt->dst.dev)) {
2707 grt = rt6_lookup(net, gw_addr, NULL,
2708 cfg->fc_ifindex, 1);
2710 err = -EHOSTUNREACH;
2714 if (dev != grt->dst.dev) {
2720 idev = grt->rt6i_idev;
2722 in6_dev_hold(grt->rt6i_idev);
2724 if (!(grt->rt6i_flags & RTF_GATEWAY))
2733 NL_SET_ERR_MSG(extack, "Egress device not specified");
2735 } else if (dev->flags & IFF_LOOPBACK) {
2736 NL_SET_ERR_MSG(extack,
2737 "Egress device can not be loopback device for this route");
2746 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
2747 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
2748 NL_SET_ERR_MSG(extack, "Invalid source address");
2752 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
2753 rt->rt6i_prefsrc.plen = 128;
2755 rt->rt6i_prefsrc.plen = 0;
2757 rt->rt6i_flags = cfg->fc_flags;
2760 if (!(rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
2761 !netif_carrier_ok(dev))
2762 rt->rt6i_nh_flags |= RTNH_F_LINKDOWN;
2764 rt->rt6i_idev = idev;
2765 rt->rt6i_table = table;
2767 cfg->fc_nlinfo.nl_net = dev_net(dev);
2776 dst_release_immediate(&rt->dst);
2778 return ERR_PTR(err);
2781 int ip6_route_add(struct fib6_config *cfg,
2782 struct netlink_ext_ack *extack)
2784 struct mx6_config mxc = { .mx = NULL, };
2785 struct rt6_info *rt;
2788 rt = ip6_route_info_create(cfg, extack);
2795 err = ip6_convert_metrics(&mxc, cfg);
2799 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack);
2806 dst_release_immediate(&rt->dst);
2811 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2814 struct fib6_table *table;
2815 struct net *net = dev_net(rt->dst.dev);
2817 if (rt == net->ipv6.ip6_null_entry) {
2822 table = rt->rt6i_table;
2823 spin_lock_bh(&table->tb6_lock);
2824 err = fib6_del(rt, info);
2825 spin_unlock_bh(&table->tb6_lock);
2832 int ip6_del_rt(struct rt6_info *rt)
2834 struct nl_info info = {
2835 .nl_net = dev_net(rt->dst.dev),
2837 return __ip6_del_rt(rt, &info);
2840 static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
2842 struct nl_info *info = &cfg->fc_nlinfo;
2843 struct net *net = info->nl_net;
2844 struct sk_buff *skb = NULL;
2845 struct fib6_table *table;
2848 if (rt == net->ipv6.ip6_null_entry)
2850 table = rt->rt6i_table;
2851 spin_lock_bh(&table->tb6_lock);
2853 if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) {
2854 struct rt6_info *sibling, *next_sibling;
2856 /* prefer to send a single notification with all hops */
2857 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
2859 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2861 if (rt6_fill_node(net, skb, rt,
2862 NULL, NULL, 0, RTM_DELROUTE,
2863 info->portid, seq, 0) < 0) {
2867 info->skip_notify = 1;
2870 list_for_each_entry_safe(sibling, next_sibling,
2873 err = fib6_del(sibling, info);
2879 err = fib6_del(rt, info);
2881 spin_unlock_bh(&table->tb6_lock);
2886 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2887 info->nlh, gfp_any());
2892 static int ip6_route_del(struct fib6_config *cfg,
2893 struct netlink_ext_ack *extack)
2895 struct rt6_info *rt, *rt_cache;
2896 struct fib6_table *table;
2897 struct fib6_node *fn;
2900 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
2902 NL_SET_ERR_MSG(extack, "FIB table does not exist");
2908 fn = fib6_locate(&table->tb6_root,
2909 &cfg->fc_dst, cfg->fc_dst_len,
2910 &cfg->fc_src, cfg->fc_src_len,
2911 !(cfg->fc_flags & RTF_CACHE));
2914 for_each_fib6_node_rt_rcu(fn) {
2915 if (cfg->fc_flags & RTF_CACHE) {
2916 rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst,
2922 if (cfg->fc_ifindex &&
2924 rt->dst.dev->ifindex != cfg->fc_ifindex))
2926 if (cfg->fc_flags & RTF_GATEWAY &&
2927 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
2929 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2931 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
2933 if (!dst_hold_safe(&rt->dst))
2937 /* if gateway was specified only delete the one hop */
2938 if (cfg->fc_flags & RTF_GATEWAY)
2939 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
2941 return __ip6_del_rt_siblings(rt, cfg);
2949 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
2951 struct netevent_redirect netevent;
2952 struct rt6_info *rt, *nrt = NULL;
2953 struct ndisc_options ndopts;
2954 struct inet6_dev *in6_dev;
2955 struct neighbour *neigh;
2957 int optlen, on_link;
2960 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
2961 optlen -= sizeof(*msg);
2964 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2968 msg = (struct rd_msg *)icmp6_hdr(skb);
2970 if (ipv6_addr_is_multicast(&msg->dest)) {
2971 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2976 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
2978 } else if (ipv6_addr_type(&msg->target) !=
2979 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
2980 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
2984 in6_dev = __in6_dev_get(skb->dev);
2987 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
2991 * The IP source address of the Redirect MUST be the same as the current
2992 * first-hop router for the specified ICMP Destination Address.
2995 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
2996 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
3001 if (ndopts.nd_opts_tgt_lladdr) {
3002 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
3005 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
3010 rt = (struct rt6_info *) dst;
3011 if (rt->rt6i_flags & RTF_REJECT) {
3012 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3016 /* Redirect received -> path was valid.
3017 * Look, redirects are sent only in response to data packets,
3018 * so that this nexthop apparently is reachable. --ANK
3020 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
3022 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
3027 * We have finally decided to accept it.
3030 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
3031 NEIGH_UPDATE_F_WEAK_OVERRIDE|
3032 NEIGH_UPDATE_F_OVERRIDE|
3033 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
3034 NEIGH_UPDATE_F_ISROUTER)),
3035 NDISC_REDIRECT, &ndopts);
3037 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
3041 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
3043 nrt->rt6i_flags &= ~RTF_GATEWAY;
3045 nrt->rt6i_protocol = RTPROT_REDIRECT;
3046 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
3048 /* No need to remove rt from the exception table if rt is
3049 * a cached route because rt6_insert_exception() will
3052 if (rt6_insert_exception(nrt, rt)) {
3053 dst_release_immediate(&nrt->dst);
3057 netevent.old = &rt->dst;
3058 netevent.new = &nrt->dst;
3059 netevent.daddr = &msg->dest;
3060 netevent.neigh = neigh;
3061 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
3064 neigh_release(neigh);
3068 * Misc support functions
3071 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
3075 rt->rt6i_flags &= ~RTF_EXPIRES;
3076 dst_hold(&from->dst);
3078 dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
3081 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
3083 rt->dst.input = ort->dst.input;
3084 rt->dst.output = ort->dst.output;
3085 rt->rt6i_dst = ort->rt6i_dst;
3086 rt->dst.error = ort->dst.error;
3087 rt->rt6i_idev = ort->rt6i_idev;
3089 in6_dev_hold(rt->rt6i_idev);
3090 rt->dst.lastuse = jiffies;
3091 rt->rt6i_gateway = ort->rt6i_gateway;
3092 rt->rt6i_flags = ort->rt6i_flags;
3093 rt6_set_from(rt, ort);
3094 rt->rt6i_metric = ort->rt6i_metric;
3095 #ifdef CONFIG_IPV6_SUBTREES
3096 rt->rt6i_src = ort->rt6i_src;
3098 rt->rt6i_prefsrc = ort->rt6i_prefsrc;
3099 rt->rt6i_table = ort->rt6i_table;
3100 rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
3103 #ifdef CONFIG_IPV6_ROUTE_INFO
3104 static struct rt6_info *rt6_get_route_info(struct net *net,
3105 const struct in6_addr *prefix, int prefixlen,
3106 const struct in6_addr *gwaddr,
3107 struct net_device *dev)
3109 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
3110 int ifindex = dev->ifindex;
3111 struct fib6_node *fn;
3112 struct rt6_info *rt = NULL;
3113 struct fib6_table *table;
3115 table = fib6_get_table(net, tb_id);
3120 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
3124 for_each_fib6_node_rt_rcu(fn) {
3125 if (rt->dst.dev->ifindex != ifindex)
3127 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
3129 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
3131 ip6_hold_safe(NULL, &rt, false);
3139 static struct rt6_info *rt6_add_route_info(struct net *net,
3140 const struct in6_addr *prefix, int prefixlen,
3141 const struct in6_addr *gwaddr,
3142 struct net_device *dev,
3145 struct fib6_config cfg = {
3146 .fc_metric = IP6_RT_PRIO_USER,
3147 .fc_ifindex = dev->ifindex,
3148 .fc_dst_len = prefixlen,
3149 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
3150 RTF_UP | RTF_PREF(pref),
3151 .fc_protocol = RTPROT_RA,
3152 .fc_nlinfo.portid = 0,
3153 .fc_nlinfo.nlh = NULL,
3154 .fc_nlinfo.nl_net = net,
3157 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
3158 cfg.fc_dst = *prefix;
3159 cfg.fc_gateway = *gwaddr;
3161 /* We should treat it as a default route if prefix length is 0. */
3163 cfg.fc_flags |= RTF_DEFAULT;
3165 ip6_route_add(&cfg, NULL);
3167 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
3171 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
3173 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
3174 struct rt6_info *rt;
3175 struct fib6_table *table;
3177 table = fib6_get_table(dev_net(dev), tb_id);
3182 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3183 if (dev == rt->dst.dev &&
3184 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
3185 ipv6_addr_equal(&rt->rt6i_gateway, addr))
3189 ip6_hold_safe(NULL, &rt, false);
3194 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
3195 struct net_device *dev,
3198 struct fib6_config cfg = {
3199 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
3200 .fc_metric = IP6_RT_PRIO_USER,
3201 .fc_ifindex = dev->ifindex,
3202 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
3203 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
3204 .fc_protocol = RTPROT_RA,
3205 .fc_nlinfo.portid = 0,
3206 .fc_nlinfo.nlh = NULL,
3207 .fc_nlinfo.nl_net = dev_net(dev),
3210 cfg.fc_gateway = *gwaddr;
3212 if (!ip6_route_add(&cfg, NULL)) {
3213 struct fib6_table *table;
3215 table = fib6_get_table(dev_net(dev), cfg.fc_table);
3217 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
3220 return rt6_get_dflt_router(gwaddr, dev);
3223 static void __rt6_purge_dflt_routers(struct fib6_table *table)
3225 struct rt6_info *rt;
3229 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3230 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3231 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
3232 if (dst_hold_safe(&rt->dst)) {
3243 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
3246 void rt6_purge_dflt_routers(struct net *net)
3248 struct fib6_table *table;
3249 struct hlist_head *head;
3254 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
3255 head = &net->ipv6.fib_table_hash[h];
3256 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
3257 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
3258 __rt6_purge_dflt_routers(table);
3265 static void rtmsg_to_fib6_config(struct net *net,
3266 struct in6_rtmsg *rtmsg,
3267 struct fib6_config *cfg)
3269 memset(cfg, 0, sizeof(*cfg));
3271 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
3273 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
3274 cfg->fc_metric = rtmsg->rtmsg_metric;
3275 cfg->fc_expires = rtmsg->rtmsg_info;
3276 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
3277 cfg->fc_src_len = rtmsg->rtmsg_src_len;
3278 cfg->fc_flags = rtmsg->rtmsg_flags;
3280 cfg->fc_nlinfo.nl_net = net;
3282 cfg->fc_dst = rtmsg->rtmsg_dst;
3283 cfg->fc_src = rtmsg->rtmsg_src;
3284 cfg->fc_gateway = rtmsg->rtmsg_gateway;
3287 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3289 struct fib6_config cfg;
3290 struct in6_rtmsg rtmsg;
3294 case SIOCADDRT: /* Add a route */
3295 case SIOCDELRT: /* Delete a route */
3296 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3298 err = copy_from_user(&rtmsg, arg,
3299 sizeof(struct in6_rtmsg));
3303 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
3308 err = ip6_route_add(&cfg, NULL);
3311 err = ip6_route_del(&cfg, NULL);
3325 * Drop the packet on the floor
3328 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
3331 struct dst_entry *dst = skb_dst(skb);
3332 switch (ipstats_mib_noroutes) {
3333 case IPSTATS_MIB_INNOROUTES:
3334 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
3335 if (type == IPV6_ADDR_ANY) {
3336 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
3337 IPSTATS_MIB_INADDRERRORS);
3341 case IPSTATS_MIB_OUTNOROUTES:
3342 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
3343 ipstats_mib_noroutes);
3346 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
3351 static int ip6_pkt_discard(struct sk_buff *skb)
3353 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
3356 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3358 skb->dev = skb_dst(skb)->dev;
3359 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
3362 static int ip6_pkt_prohibit(struct sk_buff *skb)
3364 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
3367 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3369 skb->dev = skb_dst(skb)->dev;
3370 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
3374 * Allocate a dst for local (unicast / anycast) address.
3377 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
3378 const struct in6_addr *addr,
3382 struct net *net = dev_net(idev->dev);
3383 struct net_device *dev = idev->dev;
3384 struct rt6_info *rt;
3386 rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
3388 return ERR_PTR(-ENOMEM);
3392 rt->dst.flags |= DST_HOST;
3393 rt->dst.input = ip6_input;
3394 rt->dst.output = ip6_output;
3395 rt->rt6i_idev = idev;
3397 rt->rt6i_protocol = RTPROT_KERNEL;
3398 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
3400 rt->rt6i_flags |= RTF_ANYCAST;
3402 rt->rt6i_flags |= RTF_LOCAL;
3404 rt->rt6i_gateway = *addr;
3405 rt->rt6i_dst.addr = *addr;
3406 rt->rt6i_dst.plen = 128;
3407 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
3408 rt->rt6i_table = fib6_get_table(net, tb_id);
3413 /* remove deleted ip from prefsrc entries */
3414 struct arg_dev_net_ip {
3415 struct net_device *dev;
3417 struct in6_addr *addr;
3420 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
3422 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
3423 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
3424 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
3426 if (((void *)rt->dst.dev == dev || !dev) &&
3427 rt != net->ipv6.ip6_null_entry &&
3428 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
3429 spin_lock_bh(&rt6_exception_lock);
3430 /* remove prefsrc entry */
3431 rt->rt6i_prefsrc.plen = 0;
3432 /* need to update cache as well */
3433 rt6_exceptions_remove_prefsrc(rt);
3434 spin_unlock_bh(&rt6_exception_lock);
3439 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
3441 struct net *net = dev_net(ifp->idev->dev);
3442 struct arg_dev_net_ip adni = {
3443 .dev = ifp->idev->dev,
3447 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
3450 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
3452 /* Remove routers and update dst entries when gateway turn into host. */
3453 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
3455 struct in6_addr *gateway = (struct in6_addr *)arg;
3457 if (((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
3458 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
3462 /* Further clean up cached routes in exception table.
3463 * This is needed because cached route may have a different
3464 * gateway than its 'parent' in the case of an ip redirect.
3466 rt6_exceptions_clean_tohost(rt, gateway);
3471 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
3473 fib6_clean_all(net, fib6_clean_tohost, gateway);
3476 struct arg_netdev_event {
3477 const struct net_device *dev;
3479 unsigned int nh_flags;
3480 unsigned long event;
3484 static struct rt6_info *rt6_multipath_first_sibling(const struct rt6_info *rt)
3486 struct rt6_info *iter;
3487 struct fib6_node *fn;
3489 fn = rcu_dereference_protected(rt->rt6i_node,
3490 lockdep_is_held(&rt->rt6i_table->tb6_lock));
3491 iter = rcu_dereference_protected(fn->leaf,
3492 lockdep_is_held(&rt->rt6i_table->tb6_lock));
3494 if (iter->rt6i_metric == rt->rt6i_metric &&
3495 rt6_qualify_for_ecmp(iter))
3497 iter = rcu_dereference_protected(iter->rt6_next,
3498 lockdep_is_held(&rt->rt6i_table->tb6_lock));
3504 static bool rt6_is_dead(const struct rt6_info *rt)
3506 if (rt->rt6i_nh_flags & RTNH_F_DEAD ||
3507 (rt->rt6i_nh_flags & RTNH_F_LINKDOWN &&
3508 rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
3514 static int rt6_multipath_total_weight(const struct rt6_info *rt)
3516 struct rt6_info *iter;
3519 if (!rt6_is_dead(rt))
3522 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) {
3523 if (!rt6_is_dead(iter))
3530 static void rt6_upper_bound_set(struct rt6_info *rt, int *weight, int total)
3532 int upper_bound = -1;
3534 if (!rt6_is_dead(rt)) {
3536 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
3539 atomic_set(&rt->rt6i_nh_upper_bound, upper_bound);
3542 static void rt6_multipath_upper_bound_set(struct rt6_info *rt, int total)
3544 struct rt6_info *iter;
3547 rt6_upper_bound_set(rt, &weight, total);
3549 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
3550 rt6_upper_bound_set(iter, &weight, total);
3553 void rt6_multipath_rebalance(struct rt6_info *rt)
3555 struct rt6_info *first;
3558 /* In case the entire multipath route was marked for flushing,
3559 * then there is no need to rebalance upon the removal of every
3562 if (!rt->rt6i_nsiblings || rt->should_flush)
3565 /* During lookup routes are evaluated in order, so we need to
3566 * make sure upper bounds are assigned from the first sibling
3569 first = rt6_multipath_first_sibling(rt);
3570 if (WARN_ON_ONCE(!first))
3573 total = rt6_multipath_total_weight(first);
3574 rt6_multipath_upper_bound_set(first, total);
3577 static int fib6_ifup(struct rt6_info *rt, void *p_arg)
3579 const struct arg_netdev_event *arg = p_arg;
3580 const struct net *net = dev_net(arg->dev);
3582 if (rt != net->ipv6.ip6_null_entry && rt->dst.dev == arg->dev) {
3583 rt->rt6i_nh_flags &= ~arg->nh_flags;
3584 fib6_update_sernum_upto_root(dev_net(rt->dst.dev), rt);
3585 rt6_multipath_rebalance(rt);
3591 void rt6_sync_up(struct net_device *dev, unsigned int nh_flags)
3593 struct arg_netdev_event arg = {
3595 .nh_flags = nh_flags,
3598 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
3599 arg.nh_flags |= RTNH_F_LINKDOWN;
3601 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
3604 static bool rt6_multipath_uses_dev(const struct rt6_info *rt,
3605 const struct net_device *dev)
3607 struct rt6_info *iter;
3609 if (rt->dst.dev == dev)
3611 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
3612 if (iter->dst.dev == dev)
3618 static void rt6_multipath_flush(struct rt6_info *rt)
3620 struct rt6_info *iter;
3622 rt->should_flush = 1;
3623 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
3624 iter->should_flush = 1;
3627 static unsigned int rt6_multipath_dead_count(const struct rt6_info *rt,
3628 const struct net_device *down_dev)
3630 struct rt6_info *iter;
3631 unsigned int dead = 0;
3633 if (rt->dst.dev == down_dev || rt->rt6i_nh_flags & RTNH_F_DEAD)
3635 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
3636 if (iter->dst.dev == down_dev ||
3637 iter->rt6i_nh_flags & RTNH_F_DEAD)
3643 static void rt6_multipath_nh_flags_set(struct rt6_info *rt,
3644 const struct net_device *dev,
3645 unsigned int nh_flags)
3647 struct rt6_info *iter;
3649 if (rt->dst.dev == dev)
3650 rt->rt6i_nh_flags |= nh_flags;
3651 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
3652 if (iter->dst.dev == dev)
3653 iter->rt6i_nh_flags |= nh_flags;
3656 /* called with write lock held for table with rt */
3657 static int fib6_ifdown(struct rt6_info *rt, void *p_arg)
3659 const struct arg_netdev_event *arg = p_arg;
3660 const struct net_device *dev = arg->dev;
3661 const struct net *net = dev_net(dev);
3663 if (rt == net->ipv6.ip6_null_entry)
3666 switch (arg->event) {
3667 case NETDEV_UNREGISTER:
3668 return rt->dst.dev == dev ? -1 : 0;
3670 if (rt->should_flush)
3672 if (!rt->rt6i_nsiblings)
3673 return rt->dst.dev == dev ? -1 : 0;
3674 if (rt6_multipath_uses_dev(rt, dev)) {
3677 count = rt6_multipath_dead_count(rt, dev);
3678 if (rt->rt6i_nsiblings + 1 == count) {
3679 rt6_multipath_flush(rt);
3682 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
3684 fib6_update_sernum(rt);
3685 rt6_multipath_rebalance(rt);
3689 if (rt->dst.dev != dev ||
3690 rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
3692 rt->rt6i_nh_flags |= RTNH_F_LINKDOWN;
3693 rt6_multipath_rebalance(rt);
3700 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
3702 struct arg_netdev_event arg = {
3707 fib6_clean_all(dev_net(dev), fib6_ifdown, &arg);
3710 void rt6_disable_ip(struct net_device *dev, unsigned long event)
3712 rt6_sync_down_dev(dev, event);
3713 rt6_uncached_list_flush_dev(dev_net(dev), dev);
3714 neigh_ifdown(&nd_tbl, dev);
3717 struct rt6_mtu_change_arg {
3718 struct net_device *dev;
3722 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
3724 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
3725 struct inet6_dev *idev;
3727 /* In IPv6 pmtu discovery is not optional,
3728 so that RTAX_MTU lock cannot disable it.
3729 We still use this lock to block changes
3730 caused by addrconf/ndisc.
3733 idev = __in6_dev_get(arg->dev);
3737 /* For administrative MTU increase, there is no way to discover
3738 IPv6 PMTU increase, so PMTU increase should be updated here.
3739 Since RFC 1981 doesn't include administrative MTU increase
3740 update PMTU increase is a MUST. (i.e. jumbo frame)
3743 If new MTU is less than route PMTU, this new MTU will be the
3744 lowest MTU in the path, update the route PMTU to reflect PMTU
3745 decreases; if new MTU is greater than route PMTU, and the
3746 old MTU is the lowest MTU in the path, update the route PMTU
3747 to reflect the increase. In this case if the other nodes' MTU
3748 also have the lowest MTU, TOO BIG MESSAGE will be lead to
3751 if (rt->dst.dev == arg->dev &&
3752 dst_metric_raw(&rt->dst, RTAX_MTU) &&
3753 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
3754 spin_lock_bh(&rt6_exception_lock);
3755 if (dst_mtu(&rt->dst) >= arg->mtu ||
3756 (dst_mtu(&rt->dst) < arg->mtu &&
3757 dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
3758 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
3760 rt6_exceptions_update_pmtu(rt, arg->mtu);
3761 spin_unlock_bh(&rt6_exception_lock);
3766 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
3768 struct rt6_mtu_change_arg arg = {
3773 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
3776 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
3777 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
3778 [RTA_OIF] = { .type = NLA_U32 },
3779 [RTA_IIF] = { .type = NLA_U32 },
3780 [RTA_PRIORITY] = { .type = NLA_U32 },
3781 [RTA_METRICS] = { .type = NLA_NESTED },
3782 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
3783 [RTA_PREF] = { .type = NLA_U8 },
3784 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
3785 [RTA_ENCAP] = { .type = NLA_NESTED },
3786 [RTA_EXPIRES] = { .type = NLA_U32 },
3787 [RTA_UID] = { .type = NLA_U32 },
3788 [RTA_MARK] = { .type = NLA_U32 },
3791 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
3792 struct fib6_config *cfg,
3793 struct netlink_ext_ack *extack)
3796 struct nlattr *tb[RTA_MAX+1];
3800 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
3806 rtm = nlmsg_data(nlh);
3807 memset(cfg, 0, sizeof(*cfg));
3809 cfg->fc_table = rtm->rtm_table;
3810 cfg->fc_dst_len = rtm->rtm_dst_len;
3811 cfg->fc_src_len = rtm->rtm_src_len;
3812 cfg->fc_flags = RTF_UP;
3813 cfg->fc_protocol = rtm->rtm_protocol;
3814 cfg->fc_type = rtm->rtm_type;
3816 if (rtm->rtm_type == RTN_UNREACHABLE ||
3817 rtm->rtm_type == RTN_BLACKHOLE ||
3818 rtm->rtm_type == RTN_PROHIBIT ||
3819 rtm->rtm_type == RTN_THROW)
3820 cfg->fc_flags |= RTF_REJECT;
3822 if (rtm->rtm_type == RTN_LOCAL)
3823 cfg->fc_flags |= RTF_LOCAL;
3825 if (rtm->rtm_flags & RTM_F_CLONED)
3826 cfg->fc_flags |= RTF_CACHE;
3828 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
3829 cfg->fc_nlinfo.nlh = nlh;
3830 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
3832 if (tb[RTA_GATEWAY]) {
3833 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
3834 cfg->fc_flags |= RTF_GATEWAY;
3838 int plen = (rtm->rtm_dst_len + 7) >> 3;
3840 if (nla_len(tb[RTA_DST]) < plen)
3843 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
3847 int plen = (rtm->rtm_src_len + 7) >> 3;
3849 if (nla_len(tb[RTA_SRC]) < plen)
3852 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
3855 if (tb[RTA_PREFSRC])
3856 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
3859 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
3861 if (tb[RTA_PRIORITY])
3862 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
3864 if (tb[RTA_METRICS]) {
3865 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
3866 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
3870 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
3872 if (tb[RTA_MULTIPATH]) {
3873 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
3874 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
3876 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
3877 cfg->fc_mp_len, extack);
3883 pref = nla_get_u8(tb[RTA_PREF]);
3884 if (pref != ICMPV6_ROUTER_PREF_LOW &&
3885 pref != ICMPV6_ROUTER_PREF_HIGH)
3886 pref = ICMPV6_ROUTER_PREF_MEDIUM;
3887 cfg->fc_flags |= RTF_PREF(pref);
3891 cfg->fc_encap = tb[RTA_ENCAP];
3893 if (tb[RTA_ENCAP_TYPE]) {
3894 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
3896 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
3901 if (tb[RTA_EXPIRES]) {
3902 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
3904 if (addrconf_finite_timeout(timeout)) {
3905 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
3906 cfg->fc_flags |= RTF_EXPIRES;
3916 struct rt6_info *rt6_info;
3917 struct fib6_config r_cfg;
3918 struct mx6_config mxc;
3919 struct list_head next;
3922 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
3926 list_for_each_entry(nh, rt6_nh_list, next) {
3927 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
3928 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
3929 nh->r_cfg.fc_ifindex);
3933 static int ip6_route_info_append(struct list_head *rt6_nh_list,
3934 struct rt6_info *rt, struct fib6_config *r_cfg)
3939 list_for_each_entry(nh, rt6_nh_list, next) {
3940 /* check if rt6_info already exists */
3941 if (rt6_duplicate_nexthop(nh->rt6_info, rt))
3945 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
3949 err = ip6_convert_metrics(&nh->mxc, r_cfg);
3954 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
3955 list_add_tail(&nh->next, rt6_nh_list);
3960 static void ip6_route_mpath_notify(struct rt6_info *rt,
3961 struct rt6_info *rt_last,
3962 struct nl_info *info,
3965 /* if this is an APPEND route, then rt points to the first route
3966 * inserted and rt_last points to last route inserted. Userspace
3967 * wants a consistent dump of the route which starts at the first
3968 * nexthop. Since sibling routes are always added at the end of
3969 * the list, find the first sibling of the last route appended
3971 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) {
3972 rt = list_first_entry(&rt_last->rt6i_siblings,
3978 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
3981 static int ip6_route_multipath_add(struct fib6_config *cfg,
3982 struct netlink_ext_ack *extack)
3984 struct rt6_info *rt_notif = NULL, *rt_last = NULL;
3985 struct nl_info *info = &cfg->fc_nlinfo;
3986 struct fib6_config r_cfg;
3987 struct rtnexthop *rtnh;
3988 struct rt6_info *rt;
3989 struct rt6_nh *err_nh;
3990 struct rt6_nh *nh, *nh_safe;
3996 int replace = (cfg->fc_nlinfo.nlh &&
3997 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
3998 LIST_HEAD(rt6_nh_list);
4000 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
4001 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
4002 nlflags |= NLM_F_APPEND;
4004 remaining = cfg->fc_mp_len;
4005 rtnh = (struct rtnexthop *)cfg->fc_mp;
4007 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
4008 * rt6_info structs per nexthop
4010 while (rtnh_ok(rtnh, remaining)) {
4011 memcpy(&r_cfg, cfg, sizeof(*cfg));
4012 if (rtnh->rtnh_ifindex)
4013 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4015 attrlen = rtnh_attrlen(rtnh);
4017 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4019 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4021 r_cfg.fc_gateway = nla_get_in6_addr(nla);
4022 r_cfg.fc_flags |= RTF_GATEWAY;
4024 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
4025 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
4027 r_cfg.fc_encap_type = nla_get_u16(nla);
4030 rt = ip6_route_info_create(&r_cfg, extack);
4037 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
4039 dst_release_immediate(&rt->dst);
4043 rtnh = rtnh_next(rtnh, &remaining);
4046 /* for add and replace send one notification with all nexthops.
4047 * Skip the notification in fib6_add_rt2node and send one with
4048 * the full route when done
4050 info->skip_notify = 1;
4053 list_for_each_entry(nh, &rt6_nh_list, next) {
4054 rt_last = nh->rt6_info;
4055 err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack);
4056 /* save reference to first route for notification */
4057 if (!rt_notif && !err)
4058 rt_notif = nh->rt6_info;
4060 /* nh->rt6_info is used or freed at this point, reset to NULL*/
4061 nh->rt6_info = NULL;
4064 ip6_print_replace_route_err(&rt6_nh_list);
4069 /* Because each route is added like a single route we remove
4070 * these flags after the first nexthop: if there is a collision,
4071 * we have already failed to add the first nexthop:
4072 * fib6_add_rt2node() has rejected it; when replacing, old
4073 * nexthops have been replaced by first new, the rest should
4076 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
4081 /* success ... tell user about new route */
4082 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4086 /* send notification for routes that were added so that
4087 * the delete notifications sent by ip6_route_del are
4091 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4093 /* Delete routes that were already added */
4094 list_for_each_entry(nh, &rt6_nh_list, next) {
4097 ip6_route_del(&nh->r_cfg, extack);
4101 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
4103 dst_release_immediate(&nh->rt6_info->dst);
4105 list_del(&nh->next);
4112 static int ip6_route_multipath_del(struct fib6_config *cfg,
4113 struct netlink_ext_ack *extack)
4115 struct fib6_config r_cfg;
4116 struct rtnexthop *rtnh;
4119 int err = 1, last_err = 0;
4121 remaining = cfg->fc_mp_len;
4122 rtnh = (struct rtnexthop *)cfg->fc_mp;
4124 /* Parse a Multipath Entry */
4125 while (rtnh_ok(rtnh, remaining)) {
4126 memcpy(&r_cfg, cfg, sizeof(*cfg));
4127 if (rtnh->rtnh_ifindex)
4128 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4130 attrlen = rtnh_attrlen(rtnh);
4132 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4134 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4136 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
4137 r_cfg.fc_flags |= RTF_GATEWAY;
4140 err = ip6_route_del(&r_cfg, extack);
4144 rtnh = rtnh_next(rtnh, &remaining);
4150 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4151 struct netlink_ext_ack *extack)
4153 struct fib6_config cfg;
4156 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4161 return ip6_route_multipath_del(&cfg, extack);
4163 cfg.fc_delete_all_nh = 1;
4164 return ip6_route_del(&cfg, extack);
4168 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4169 struct netlink_ext_ack *extack)
4171 struct fib6_config cfg;
4174 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4179 return ip6_route_multipath_add(&cfg, extack);
4181 return ip6_route_add(&cfg, extack);
4184 static size_t rt6_nlmsg_size(struct rt6_info *rt)
4186 int nexthop_len = 0;
4188 if (rt->rt6i_nsiblings) {
4189 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
4190 + NLA_ALIGN(sizeof(struct rtnexthop))
4191 + nla_total_size(16) /* RTA_GATEWAY */
4192 + lwtunnel_get_encap_size(rt->dst.lwtstate);
4194 nexthop_len *= rt->rt6i_nsiblings;
4197 return NLMSG_ALIGN(sizeof(struct rtmsg))
4198 + nla_total_size(16) /* RTA_SRC */
4199 + nla_total_size(16) /* RTA_DST */
4200 + nla_total_size(16) /* RTA_GATEWAY */
4201 + nla_total_size(16) /* RTA_PREFSRC */
4202 + nla_total_size(4) /* RTA_TABLE */
4203 + nla_total_size(4) /* RTA_IIF */
4204 + nla_total_size(4) /* RTA_OIF */
4205 + nla_total_size(4) /* RTA_PRIORITY */
4206 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
4207 + nla_total_size(sizeof(struct rta_cacheinfo))
4208 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
4209 + nla_total_size(1) /* RTA_PREF */
4210 + lwtunnel_get_encap_size(rt->dst.lwtstate)
4214 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
4215 unsigned int *flags, bool skip_oif)
4217 if (rt->rt6i_nh_flags & RTNH_F_DEAD)
4218 *flags |= RTNH_F_DEAD;
4220 if (rt->rt6i_nh_flags & RTNH_F_LINKDOWN) {
4221 *flags |= RTNH_F_LINKDOWN;
4222 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
4223 *flags |= RTNH_F_DEAD;
4226 if (rt->rt6i_flags & RTF_GATEWAY) {
4227 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
4228 goto nla_put_failure;
4231 if (rt->rt6i_nh_flags & RTNH_F_OFFLOAD)
4232 *flags |= RTNH_F_OFFLOAD;
4234 /* not needed for multipath encoding b/c it has a rtnexthop struct */
4235 if (!skip_oif && rt->dst.dev &&
4236 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
4237 goto nla_put_failure;
4239 if (rt->dst.lwtstate &&
4240 lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
4241 goto nla_put_failure;
4249 /* add multipath next hop */
4250 static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
4252 struct rtnexthop *rtnh;
4253 unsigned int flags = 0;
4255 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
4257 goto nla_put_failure;
4259 rtnh->rtnh_hops = 0;
4260 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
4262 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
4263 goto nla_put_failure;
4265 rtnh->rtnh_flags = flags;
4267 /* length of rtnetlink header + attributes */
4268 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
4276 static int rt6_fill_node(struct net *net,
4277 struct sk_buff *skb, struct rt6_info *rt,
4278 struct in6_addr *dst, struct in6_addr *src,
4279 int iif, int type, u32 portid, u32 seq,
4282 u32 metrics[RTAX_MAX];
4284 struct nlmsghdr *nlh;
4288 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
4292 rtm = nlmsg_data(nlh);
4293 rtm->rtm_family = AF_INET6;
4294 rtm->rtm_dst_len = rt->rt6i_dst.plen;
4295 rtm->rtm_src_len = rt->rt6i_src.plen;
4298 table = rt->rt6i_table->tb6_id;
4300 table = RT6_TABLE_UNSPEC;
4301 rtm->rtm_table = table;
4302 if (nla_put_u32(skb, RTA_TABLE, table))
4303 goto nla_put_failure;
4304 if (rt->rt6i_flags & RTF_REJECT) {
4305 switch (rt->dst.error) {
4307 rtm->rtm_type = RTN_BLACKHOLE;
4310 rtm->rtm_type = RTN_PROHIBIT;
4313 rtm->rtm_type = RTN_THROW;
4316 rtm->rtm_type = RTN_UNREACHABLE;
4320 else if (rt->rt6i_flags & RTF_LOCAL)
4321 rtm->rtm_type = RTN_LOCAL;
4322 else if (rt->rt6i_flags & RTF_ANYCAST)
4323 rtm->rtm_type = RTN_ANYCAST;
4324 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
4325 rtm->rtm_type = RTN_LOCAL;
4327 rtm->rtm_type = RTN_UNICAST;
4329 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
4330 rtm->rtm_protocol = rt->rt6i_protocol;
4332 if (rt->rt6i_flags & RTF_CACHE)
4333 rtm->rtm_flags |= RTM_F_CLONED;
4336 if (nla_put_in6_addr(skb, RTA_DST, dst))
4337 goto nla_put_failure;
4338 rtm->rtm_dst_len = 128;
4339 } else if (rtm->rtm_dst_len)
4340 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
4341 goto nla_put_failure;
4342 #ifdef CONFIG_IPV6_SUBTREES
4344 if (nla_put_in6_addr(skb, RTA_SRC, src))
4345 goto nla_put_failure;
4346 rtm->rtm_src_len = 128;
4347 } else if (rtm->rtm_src_len &&
4348 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
4349 goto nla_put_failure;
4352 #ifdef CONFIG_IPV6_MROUTE
4353 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
4354 int err = ip6mr_get_route(net, skb, rtm, portid);
4359 goto nla_put_failure;
4362 if (nla_put_u32(skb, RTA_IIF, iif))
4363 goto nla_put_failure;
4365 struct in6_addr saddr_buf;
4366 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
4367 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4368 goto nla_put_failure;
4371 if (rt->rt6i_prefsrc.plen) {
4372 struct in6_addr saddr_buf;
4373 saddr_buf = rt->rt6i_prefsrc.addr;
4374 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4375 goto nla_put_failure;
4378 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
4380 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
4381 if (rtnetlink_put_metrics(skb, metrics) < 0)
4382 goto nla_put_failure;
4384 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
4385 goto nla_put_failure;
4387 /* For multipath routes, walk the siblings list and add
4388 * each as a nexthop within RTA_MULTIPATH.
4390 if (rt->rt6i_nsiblings) {
4391 struct rt6_info *sibling, *next_sibling;
4394 mp = nla_nest_start(skb, RTA_MULTIPATH);
4396 goto nla_put_failure;
4398 if (rt6_add_nexthop(skb, rt) < 0)
4399 goto nla_put_failure;
4401 list_for_each_entry_safe(sibling, next_sibling,
4402 &rt->rt6i_siblings, rt6i_siblings) {
4403 if (rt6_add_nexthop(skb, sibling) < 0)
4404 goto nla_put_failure;
4407 nla_nest_end(skb, mp);
4409 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
4410 goto nla_put_failure;
4413 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
4415 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
4416 goto nla_put_failure;
4418 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
4419 goto nla_put_failure;
4422 nlmsg_end(skb, nlh);
4426 nlmsg_cancel(skb, nlh);
4430 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
4432 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
4433 struct net *net = arg->net;
4435 if (rt == net->ipv6.ip6_null_entry)
4438 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
4439 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
4441 /* user wants prefix routes only */
4442 if (rtm->rtm_flags & RTM_F_PREFIX &&
4443 !(rt->rt6i_flags & RTF_PREFIX_RT)) {
4444 /* success since this is not a prefix route */
4449 return rt6_fill_node(net,
4450 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
4451 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
4455 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4456 struct netlink_ext_ack *extack)
4458 struct net *net = sock_net(in_skb->sk);
4459 struct nlattr *tb[RTA_MAX+1];
4460 int err, iif = 0, oif = 0;
4461 struct dst_entry *dst;
4462 struct rt6_info *rt;
4463 struct sk_buff *skb;
4468 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
4474 memset(&fl6, 0, sizeof(fl6));
4475 rtm = nlmsg_data(nlh);
4476 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
4477 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
4480 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
4483 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
4487 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
4490 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
4494 iif = nla_get_u32(tb[RTA_IIF]);
4497 oif = nla_get_u32(tb[RTA_OIF]);
4500 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
4503 fl6.flowi6_uid = make_kuid(current_user_ns(),
4504 nla_get_u32(tb[RTA_UID]));
4506 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
4509 struct net_device *dev;
4514 dev = dev_get_by_index_rcu(net, iif);
4521 fl6.flowi6_iif = iif;
4523 if (!ipv6_addr_any(&fl6.saddr))
4524 flags |= RT6_LOOKUP_F_HAS_SADDR;
4526 dst = ip6_route_input_lookup(net, dev, &fl6, flags);
4530 fl6.flowi6_oif = oif;
4532 dst = ip6_route_output(net, NULL, &fl6);
4536 rt = container_of(dst, struct rt6_info, dst);
4537 if (rt->dst.error) {
4538 err = rt->dst.error;
4543 if (rt == net->ipv6.ip6_null_entry) {
4544 err = rt->dst.error;
4549 if (fibmatch && rt->from) {
4550 struct rt6_info *ort = rt->from;
4552 dst_hold(&ort->dst);
4557 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
4564 skb_dst_set(skb, &rt->dst);
4566 err = rt6_fill_node(net, skb, rt, NULL, NULL, iif,
4567 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
4570 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
4571 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
4578 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4583 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
4584 unsigned int nlm_flags)
4586 struct sk_buff *skb;
4587 struct net *net = info->nl_net;
4592 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
4594 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
4598 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
4599 event, info->portid, seq, nlm_flags);
4601 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
4602 WARN_ON(err == -EMSGSIZE);
4606 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
4607 info->nlh, gfp_any());
4611 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
4614 static int ip6_route_dev_notify(struct notifier_block *this,
4615 unsigned long event, void *ptr)
4617 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4618 struct net *net = dev_net(dev);
4620 if (!(dev->flags & IFF_LOOPBACK))
4623 if (event == NETDEV_REGISTER) {
4624 net->ipv6.ip6_null_entry->dst.dev = dev;
4625 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
4626 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4627 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
4628 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
4629 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
4630 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
4632 } else if (event == NETDEV_UNREGISTER &&
4633 dev->reg_state != NETREG_UNREGISTERED) {
4634 /* NETDEV_UNREGISTER could be fired for multiple times by
4635 * netdev_wait_allrefs(). Make sure we only call this once.
4637 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
4638 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4639 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
4640 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
4651 #ifdef CONFIG_PROC_FS
4653 static const struct file_operations ipv6_route_proc_fops = {
4654 .owner = THIS_MODULE,
4655 .open = ipv6_route_open,
4657 .llseek = seq_lseek,
4658 .release = seq_release_net,
4661 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
4663 struct net *net = (struct net *)seq->private;
4664 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
4665 net->ipv6.rt6_stats->fib_nodes,
4666 net->ipv6.rt6_stats->fib_route_nodes,
4667 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
4668 net->ipv6.rt6_stats->fib_rt_entries,
4669 net->ipv6.rt6_stats->fib_rt_cache,
4670 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
4671 net->ipv6.rt6_stats->fib_discarded_routes);
4676 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
4678 return single_open_net(inode, file, rt6_stats_seq_show);
4681 static const struct file_operations rt6_stats_seq_fops = {
4682 .owner = THIS_MODULE,
4683 .open = rt6_stats_seq_open,
4685 .llseek = seq_lseek,
4686 .release = single_release_net,
4688 #endif /* CONFIG_PROC_FS */
4690 #ifdef CONFIG_SYSCTL
4693 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
4694 void __user *buffer, size_t *lenp, loff_t *ppos)
4701 net = (struct net *)ctl->extra1;
4702 delay = net->ipv6.sysctl.flush_delay;
4703 proc_dointvec(ctl, write, buffer, lenp, ppos);
4704 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
4708 struct ctl_table ipv6_route_table_template[] = {
4710 .procname = "flush",
4711 .data = &init_net.ipv6.sysctl.flush_delay,
4712 .maxlen = sizeof(int),
4714 .proc_handler = ipv6_sysctl_rtcache_flush
4717 .procname = "gc_thresh",
4718 .data = &ip6_dst_ops_template.gc_thresh,
4719 .maxlen = sizeof(int),
4721 .proc_handler = proc_dointvec,
4724 .procname = "max_size",
4725 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
4726 .maxlen = sizeof(int),
4728 .proc_handler = proc_dointvec,
4731 .procname = "gc_min_interval",
4732 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
4733 .maxlen = sizeof(int),
4735 .proc_handler = proc_dointvec_jiffies,
4738 .procname = "gc_timeout",
4739 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
4740 .maxlen = sizeof(int),
4742 .proc_handler = proc_dointvec_jiffies,
4745 .procname = "gc_interval",
4746 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
4747 .maxlen = sizeof(int),
4749 .proc_handler = proc_dointvec_jiffies,
4752 .procname = "gc_elasticity",
4753 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
4754 .maxlen = sizeof(int),
4756 .proc_handler = proc_dointvec,
4759 .procname = "mtu_expires",
4760 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
4761 .maxlen = sizeof(int),
4763 .proc_handler = proc_dointvec_jiffies,
4766 .procname = "min_adv_mss",
4767 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
4768 .maxlen = sizeof(int),
4770 .proc_handler = proc_dointvec,
4773 .procname = "gc_min_interval_ms",
4774 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
4775 .maxlen = sizeof(int),
4777 .proc_handler = proc_dointvec_ms_jiffies,
4782 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
4784 struct ctl_table *table;
4786 table = kmemdup(ipv6_route_table_template,
4787 sizeof(ipv6_route_table_template),
4791 table[0].data = &net->ipv6.sysctl.flush_delay;
4792 table[0].extra1 = net;
4793 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
4794 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
4795 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
4796 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
4797 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
4798 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
4799 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
4800 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
4801 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
4803 /* Don't export sysctls to unprivileged users */
4804 if (net->user_ns != &init_user_ns)
4805 table[0].procname = NULL;
4812 static int __net_init ip6_route_net_init(struct net *net)
4816 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
4817 sizeof(net->ipv6.ip6_dst_ops));
4819 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
4820 goto out_ip6_dst_ops;
4822 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
4823 sizeof(*net->ipv6.ip6_null_entry),
4825 if (!net->ipv6.ip6_null_entry)
4826 goto out_ip6_dst_entries;
4827 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
4828 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
4829 ip6_template_metrics, true);
4831 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4832 net->ipv6.fib6_has_custom_rules = false;
4833 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
4834 sizeof(*net->ipv6.ip6_prohibit_entry),
4836 if (!net->ipv6.ip6_prohibit_entry)
4837 goto out_ip6_null_entry;
4838 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
4839 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
4840 ip6_template_metrics, true);
4842 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
4843 sizeof(*net->ipv6.ip6_blk_hole_entry),
4845 if (!net->ipv6.ip6_blk_hole_entry)
4846 goto out_ip6_prohibit_entry;
4847 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
4848 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
4849 ip6_template_metrics, true);
4852 net->ipv6.sysctl.flush_delay = 0;
4853 net->ipv6.sysctl.ip6_rt_max_size = 4096;
4854 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
4855 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
4856 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
4857 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
4858 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
4859 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
4861 net->ipv6.ip6_rt_gc_expire = 30*HZ;
4867 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4868 out_ip6_prohibit_entry:
4869 kfree(net->ipv6.ip6_prohibit_entry);
4871 kfree(net->ipv6.ip6_null_entry);
4873 out_ip6_dst_entries:
4874 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
4879 static void __net_exit ip6_route_net_exit(struct net *net)
4881 kfree(net->ipv6.ip6_null_entry);
4882 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4883 kfree(net->ipv6.ip6_prohibit_entry);
4884 kfree(net->ipv6.ip6_blk_hole_entry);
4886 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
4889 static int __net_init ip6_route_net_init_late(struct net *net)
4891 #ifdef CONFIG_PROC_FS
4892 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
4893 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
4898 static void __net_exit ip6_route_net_exit_late(struct net *net)
4900 #ifdef CONFIG_PROC_FS
4901 remove_proc_entry("ipv6_route", net->proc_net);
4902 remove_proc_entry("rt6_stats", net->proc_net);
4906 static struct pernet_operations ip6_route_net_ops = {
4907 .init = ip6_route_net_init,
4908 .exit = ip6_route_net_exit,
4911 static int __net_init ipv6_inetpeer_init(struct net *net)
4913 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
4917 inet_peer_base_init(bp);
4918 net->ipv6.peers = bp;
4922 static void __net_exit ipv6_inetpeer_exit(struct net *net)
4924 struct inet_peer_base *bp = net->ipv6.peers;
4926 net->ipv6.peers = NULL;
4927 inetpeer_invalidate_tree(bp);
4931 static struct pernet_operations ipv6_inetpeer_ops = {
4932 .init = ipv6_inetpeer_init,
4933 .exit = ipv6_inetpeer_exit,
4936 static struct pernet_operations ip6_route_net_late_ops = {
4937 .init = ip6_route_net_init_late,
4938 .exit = ip6_route_net_exit_late,
4941 static struct notifier_block ip6_route_dev_notifier = {
4942 .notifier_call = ip6_route_dev_notify,
4943 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
4946 void __init ip6_route_init_special_entries(void)
4948 /* Registering of the loopback is done before this portion of code,
4949 * the loopback reference in rt6_info will not be taken, do it
4950 * manually for init_net */
4951 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
4952 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4953 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4954 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
4955 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4956 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
4957 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4961 int __init ip6_route_init(void)
4967 ip6_dst_ops_template.kmem_cachep =
4968 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
4969 SLAB_HWCACHE_ALIGN, NULL);
4970 if (!ip6_dst_ops_template.kmem_cachep)
4973 ret = dst_entries_init(&ip6_dst_blackhole_ops);
4975 goto out_kmem_cache;
4977 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
4979 goto out_dst_entries;
4981 ret = register_pernet_subsys(&ip6_route_net_ops);
4983 goto out_register_inetpeer;
4985 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
4989 goto out_register_subsys;
4995 ret = fib6_rules_init();
4999 ret = register_pernet_subsys(&ip6_route_net_late_ops);
5001 goto fib6_rules_init;
5003 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
5004 inet6_rtm_newroute, NULL, 0);
5006 goto out_register_late_subsys;
5008 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
5009 inet6_rtm_delroute, NULL, 0);
5011 goto out_register_late_subsys;
5013 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
5014 inet6_rtm_getroute, NULL,
5015 RTNL_FLAG_DOIT_UNLOCKED);
5017 goto out_register_late_subsys;
5019 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
5021 goto out_register_late_subsys;
5023 for_each_possible_cpu(cpu) {
5024 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
5026 INIT_LIST_HEAD(&ul->head);
5027 spin_lock_init(&ul->lock);
5033 out_register_late_subsys:
5034 rtnl_unregister_all(PF_INET6);
5035 unregister_pernet_subsys(&ip6_route_net_late_ops);
5037 fib6_rules_cleanup();
5042 out_register_subsys:
5043 unregister_pernet_subsys(&ip6_route_net_ops);
5044 out_register_inetpeer:
5045 unregister_pernet_subsys(&ipv6_inetpeer_ops);
5047 dst_entries_destroy(&ip6_dst_blackhole_ops);
5049 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
5053 void ip6_route_cleanup(void)
5055 unregister_netdevice_notifier(&ip6_route_dev_notifier);
5056 unregister_pernet_subsys(&ip6_route_net_late_ops);
5057 fib6_rules_cleanup();
5060 unregister_pernet_subsys(&ipv6_inetpeer_ops);
5061 unregister_pernet_subsys(&ip6_route_net_ops);
5062 dst_entries_destroy(&ip6_dst_blackhole_ops);
5063 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);