2 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
24 * Fixed routing subtrees.
27 #define pr_fmt(fmt) "IPv6: " fmt
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
55 #include <linux/rtnetlink.h>
57 #include <net/dst_metadata.h>
59 #include <net/netevent.h>
60 #include <net/netlink.h>
61 #include <net/nexthop.h>
62 #include <net/lwtunnel.h>
63 #include <net/ip_tunnels.h>
64 #include <net/l3mdev.h>
65 #include <trace/events/fib6.h>
67 #include <linux/uaccess.h>
70 #include <linux/sysctl.h>
74 RT6_NUD_FAIL_HARD = -3,
75 RT6_NUD_FAIL_PROBE = -2,
76 RT6_NUD_FAIL_DO_RR = -1,
80 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
81 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
82 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
83 static unsigned int ip6_mtu(const struct dst_entry *dst);
84 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
85 static void ip6_dst_destroy(struct dst_entry *);
86 static void ip6_dst_ifdown(struct dst_entry *,
87 struct net_device *dev, int how);
88 static int ip6_dst_gc(struct dst_ops *ops);
90 static int ip6_pkt_discard(struct sk_buff *skb);
91 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
92 static int ip6_pkt_prohibit(struct sk_buff *skb);
93 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
94 static void ip6_link_failure(struct sk_buff *skb);
95 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
96 struct sk_buff *skb, u32 mtu);
97 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
99 static void rt6_dst_from_metrics_check(struct rt6_info *rt);
100 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
101 static size_t rt6_nlmsg_size(struct rt6_info *rt);
102 static int rt6_fill_node(struct net *net,
103 struct sk_buff *skb, struct rt6_info *rt,
104 struct in6_addr *dst, struct in6_addr *src,
105 int iif, int type, u32 portid, u32 seq,
108 #ifdef CONFIG_IPV6_ROUTE_INFO
109 static struct rt6_info *rt6_add_route_info(struct net *net,
110 const struct in6_addr *prefix, int prefixlen,
111 const struct in6_addr *gwaddr,
112 struct net_device *dev,
114 static struct rt6_info *rt6_get_route_info(struct net *net,
115 const struct in6_addr *prefix, int prefixlen,
116 const struct in6_addr *gwaddr,
117 struct net_device *dev);
120 struct uncached_list {
122 struct list_head head;
125 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
127 static void rt6_uncached_list_add(struct rt6_info *rt)
129 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
131 rt->rt6i_uncached_list = ul;
133 spin_lock_bh(&ul->lock);
134 list_add_tail(&rt->rt6i_uncached, &ul->head);
135 spin_unlock_bh(&ul->lock);
138 static void rt6_uncached_list_del(struct rt6_info *rt)
140 if (!list_empty(&rt->rt6i_uncached)) {
141 struct uncached_list *ul = rt->rt6i_uncached_list;
143 spin_lock_bh(&ul->lock);
144 list_del(&rt->rt6i_uncached);
145 spin_unlock_bh(&ul->lock);
149 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
151 struct net_device *loopback_dev = net->loopback_dev;
154 if (dev == loopback_dev)
157 for_each_possible_cpu(cpu) {
158 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
161 spin_lock_bh(&ul->lock);
162 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
163 struct inet6_dev *rt_idev = rt->rt6i_idev;
164 struct net_device *rt_dev = rt->dst.dev;
166 if (rt_idev->dev == dev) {
167 rt->rt6i_idev = in6_dev_get(loopback_dev);
168 in6_dev_put(rt_idev);
172 rt->dst.dev = loopback_dev;
173 dev_hold(rt->dst.dev);
177 spin_unlock_bh(&ul->lock);
181 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
183 return dst_metrics_write_ptr(rt->dst.from);
186 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
188 struct rt6_info *rt = (struct rt6_info *)dst;
190 if (rt->rt6i_flags & RTF_PCPU)
191 return rt6_pcpu_cow_metrics(rt);
192 else if (rt->rt6i_flags & RTF_CACHE)
195 return dst_cow_metrics_generic(dst, old);
198 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
202 struct in6_addr *p = &rt->rt6i_gateway;
204 if (!ipv6_addr_any(p))
205 return (const void *) p;
207 return &ipv6_hdr(skb)->daddr;
211 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
215 struct rt6_info *rt = (struct rt6_info *) dst;
218 daddr = choose_neigh_daddr(rt, skb, daddr);
219 n = __ipv6_neigh_lookup(dst->dev, daddr);
222 return neigh_create(&nd_tbl, daddr, dst->dev);
225 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
227 struct net_device *dev = dst->dev;
228 struct rt6_info *rt = (struct rt6_info *)dst;
230 daddr = choose_neigh_daddr(rt, NULL, daddr);
233 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
235 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
237 __ipv6_confirm_neigh(dev, daddr);
240 static struct dst_ops ip6_dst_ops_template = {
244 .check = ip6_dst_check,
245 .default_advmss = ip6_default_advmss,
247 .cow_metrics = ipv6_cow_metrics,
248 .destroy = ip6_dst_destroy,
249 .ifdown = ip6_dst_ifdown,
250 .negative_advice = ip6_negative_advice,
251 .link_failure = ip6_link_failure,
252 .update_pmtu = ip6_rt_update_pmtu,
253 .redirect = rt6_do_redirect,
254 .local_out = __ip6_local_out,
255 .neigh_lookup = ip6_neigh_lookup,
256 .confirm_neigh = ip6_confirm_neigh,
259 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
261 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
263 return mtu ? : dst->dev->mtu;
266 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
267 struct sk_buff *skb, u32 mtu)
271 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
276 static struct dst_ops ip6_dst_blackhole_ops = {
278 .destroy = ip6_dst_destroy,
279 .check = ip6_dst_check,
280 .mtu = ip6_blackhole_mtu,
281 .default_advmss = ip6_default_advmss,
282 .update_pmtu = ip6_rt_blackhole_update_pmtu,
283 .redirect = ip6_rt_blackhole_redirect,
284 .cow_metrics = dst_cow_metrics_generic,
285 .neigh_lookup = ip6_neigh_lookup,
288 static const u32 ip6_template_metrics[RTAX_MAX] = {
289 [RTAX_HOPLIMIT - 1] = 0,
292 static const struct rt6_info ip6_null_entry_template = {
294 .__refcnt = ATOMIC_INIT(1),
296 .obsolete = DST_OBSOLETE_FORCE_CHK,
297 .error = -ENETUNREACH,
298 .input = ip6_pkt_discard,
299 .output = ip6_pkt_discard_out,
301 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
302 .rt6i_protocol = RTPROT_KERNEL,
303 .rt6i_metric = ~(u32) 0,
304 .rt6i_ref = ATOMIC_INIT(1),
307 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
309 static const struct rt6_info ip6_prohibit_entry_template = {
311 .__refcnt = ATOMIC_INIT(1),
313 .obsolete = DST_OBSOLETE_FORCE_CHK,
315 .input = ip6_pkt_prohibit,
316 .output = ip6_pkt_prohibit_out,
318 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
319 .rt6i_protocol = RTPROT_KERNEL,
320 .rt6i_metric = ~(u32) 0,
321 .rt6i_ref = ATOMIC_INIT(1),
324 static const struct rt6_info ip6_blk_hole_entry_template = {
326 .__refcnt = ATOMIC_INIT(1),
328 .obsolete = DST_OBSOLETE_FORCE_CHK,
330 .input = dst_discard,
331 .output = dst_discard_out,
333 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
334 .rt6i_protocol = RTPROT_KERNEL,
335 .rt6i_metric = ~(u32) 0,
336 .rt6i_ref = ATOMIC_INIT(1),
341 static void rt6_info_init(struct rt6_info *rt)
343 struct dst_entry *dst = &rt->dst;
345 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
346 INIT_LIST_HEAD(&rt->rt6i_siblings);
347 INIT_LIST_HEAD(&rt->rt6i_uncached);
350 /* allocate dst with ip6_dst_ops */
351 static struct rt6_info *__ip6_dst_alloc(struct net *net,
352 struct net_device *dev,
355 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
356 1, DST_OBSOLETE_FORCE_CHK, flags);
364 struct rt6_info *ip6_dst_alloc(struct net *net,
365 struct net_device *dev,
368 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
371 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
375 for_each_possible_cpu(cpu) {
378 p = per_cpu_ptr(rt->rt6i_pcpu, cpu);
379 /* no one shares rt */
383 dst_release_immediate(&rt->dst);
390 EXPORT_SYMBOL(ip6_dst_alloc);
392 static void ip6_dst_destroy(struct dst_entry *dst)
394 struct rt6_info *rt = (struct rt6_info *)dst;
395 struct dst_entry *from = dst->from;
396 struct inet6_dev *idev;
398 dst_destroy_metrics_generic(dst);
399 free_percpu(rt->rt6i_pcpu);
400 rt6_uncached_list_del(rt);
402 idev = rt->rt6i_idev;
404 rt->rt6i_idev = NULL;
412 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
415 struct rt6_info *rt = (struct rt6_info *)dst;
416 struct inet6_dev *idev = rt->rt6i_idev;
417 struct net_device *loopback_dev =
418 dev_net(dev)->loopback_dev;
420 if (dev != loopback_dev) {
421 if (idev && idev->dev == dev) {
422 struct inet6_dev *loopback_idev =
423 in6_dev_get(loopback_dev);
425 rt->rt6i_idev = loopback_idev;
432 static bool __rt6_check_expired(const struct rt6_info *rt)
434 if (rt->rt6i_flags & RTF_EXPIRES)
435 return time_after(jiffies, rt->dst.expires);
440 static bool rt6_check_expired(const struct rt6_info *rt)
442 if (rt->rt6i_flags & RTF_EXPIRES) {
443 if (time_after(jiffies, rt->dst.expires))
445 } else if (rt->dst.from) {
446 return rt6_check_expired((struct rt6_info *) rt->dst.from);
451 /* Multipath route selection:
452 * Hash based function using packet header and flowlabel.
453 * Adapted from fib_info_hashfn()
455 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
456 const struct flowi6 *fl6)
458 return get_hash_from_flowi6(fl6) % candidate_count;
461 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
462 struct flowi6 *fl6, int oif,
465 struct rt6_info *sibling, *next_sibling;
468 route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
469 /* Don't change the route, if route_choosen == 0
470 * (siblings does not include ourself)
473 list_for_each_entry_safe(sibling, next_sibling,
474 &match->rt6i_siblings, rt6i_siblings) {
476 if (route_choosen == 0) {
477 if (rt6_score_route(sibling, oif, strict) < 0)
487 * Route lookup. Any table->tb6_lock is implied.
490 static inline struct rt6_info *rt6_device_match(struct net *net,
492 const struct in6_addr *saddr,
496 struct rt6_info *local = NULL;
497 struct rt6_info *sprt;
499 if (!oif && ipv6_addr_any(saddr))
502 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
503 struct net_device *dev = sprt->dst.dev;
506 if (dev->ifindex == oif)
508 if (dev->flags & IFF_LOOPBACK) {
509 if (!sprt->rt6i_idev ||
510 sprt->rt6i_idev->dev->ifindex != oif) {
511 if (flags & RT6_LOOKUP_F_IFACE)
514 local->rt6i_idev->dev->ifindex == oif)
520 if (ipv6_chk_addr(net, saddr, dev,
521 flags & RT6_LOOKUP_F_IFACE))
530 if (flags & RT6_LOOKUP_F_IFACE)
531 return net->ipv6.ip6_null_entry;
537 #ifdef CONFIG_IPV6_ROUTER_PREF
538 struct __rt6_probe_work {
539 struct work_struct work;
540 struct in6_addr target;
541 struct net_device *dev;
544 static void rt6_probe_deferred(struct work_struct *w)
546 struct in6_addr mcaddr;
547 struct __rt6_probe_work *work =
548 container_of(w, struct __rt6_probe_work, work);
550 addrconf_addr_solict_mult(&work->target, &mcaddr);
551 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
556 static void rt6_probe(struct rt6_info *rt)
558 struct __rt6_probe_work *work;
559 struct neighbour *neigh;
561 * Okay, this does not seem to be appropriate
562 * for now, however, we need to check if it
563 * is really so; aka Router Reachability Probing.
565 * Router Reachability Probe MUST be rate-limited
566 * to no more than one per minute.
568 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
571 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
573 if (neigh->nud_state & NUD_VALID)
577 write_lock(&neigh->lock);
578 if (!(neigh->nud_state & NUD_VALID) &&
581 rt->rt6i_idev->cnf.rtr_probe_interval)) {
582 work = kmalloc(sizeof(*work), GFP_ATOMIC);
584 __neigh_set_probe_once(neigh);
586 write_unlock(&neigh->lock);
588 work = kmalloc(sizeof(*work), GFP_ATOMIC);
592 INIT_WORK(&work->work, rt6_probe_deferred);
593 work->target = rt->rt6i_gateway;
594 dev_hold(rt->dst.dev);
595 work->dev = rt->dst.dev;
596 schedule_work(&work->work);
600 rcu_read_unlock_bh();
603 static inline void rt6_probe(struct rt6_info *rt)
609 * Default Router Selection (RFC 2461 6.3.6)
611 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
613 struct net_device *dev = rt->dst.dev;
614 if (!oif || dev->ifindex == oif)
616 if ((dev->flags & IFF_LOOPBACK) &&
617 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
622 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
624 struct neighbour *neigh;
625 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
627 if (rt->rt6i_flags & RTF_NONEXTHOP ||
628 !(rt->rt6i_flags & RTF_GATEWAY))
629 return RT6_NUD_SUCCEED;
632 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
634 read_lock(&neigh->lock);
635 if (neigh->nud_state & NUD_VALID)
636 ret = RT6_NUD_SUCCEED;
637 #ifdef CONFIG_IPV6_ROUTER_PREF
638 else if (!(neigh->nud_state & NUD_FAILED))
639 ret = RT6_NUD_SUCCEED;
641 ret = RT6_NUD_FAIL_PROBE;
643 read_unlock(&neigh->lock);
645 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
646 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
648 rcu_read_unlock_bh();
653 static int rt6_score_route(struct rt6_info *rt, int oif,
658 m = rt6_check_dev(rt, oif);
659 if (!m && (strict & RT6_LOOKUP_F_IFACE))
660 return RT6_NUD_FAIL_HARD;
661 #ifdef CONFIG_IPV6_ROUTER_PREF
662 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
664 if (strict & RT6_LOOKUP_F_REACHABLE) {
665 int n = rt6_check_neigh(rt);
672 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
673 int *mpri, struct rt6_info *match,
677 bool match_do_rr = false;
678 struct inet6_dev *idev = rt->rt6i_idev;
679 struct net_device *dev = rt->dst.dev;
681 if (dev && !netif_carrier_ok(dev) &&
682 idev->cnf.ignore_routes_with_linkdown &&
683 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
686 if (rt6_check_expired(rt))
689 m = rt6_score_route(rt, oif, strict);
690 if (m == RT6_NUD_FAIL_DO_RR) {
692 m = 0; /* lowest valid score */
693 } else if (m == RT6_NUD_FAIL_HARD) {
697 if (strict & RT6_LOOKUP_F_REACHABLE)
700 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
702 *do_rr = match_do_rr;
710 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
711 struct rt6_info *rr_head,
712 u32 metric, int oif, int strict,
715 struct rt6_info *rt, *match, *cont;
720 for (rt = rr_head; rt; rt = rt->dst.rt6_next) {
721 if (rt->rt6i_metric != metric) {
726 match = find_match(rt, oif, strict, &mpri, match, do_rr);
729 for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) {
730 if (rt->rt6i_metric != metric) {
735 match = find_match(rt, oif, strict, &mpri, match, do_rr);
741 for (rt = cont; rt; rt = rt->dst.rt6_next)
742 match = find_match(rt, oif, strict, &mpri, match, do_rr);
747 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
749 struct rt6_info *match, *rt0;
755 fn->rr_ptr = rt0 = fn->leaf;
757 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
761 struct rt6_info *next = rt0->dst.rt6_next;
763 /* no entries matched; do round-robin */
764 if (!next || next->rt6i_metric != rt0->rt6i_metric)
771 net = dev_net(rt0->dst.dev);
772 return match ? match : net->ipv6.ip6_null_entry;
775 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
777 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
780 #ifdef CONFIG_IPV6_ROUTE_INFO
781 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
782 const struct in6_addr *gwaddr)
784 struct net *net = dev_net(dev);
785 struct route_info *rinfo = (struct route_info *) opt;
786 struct in6_addr prefix_buf, *prefix;
788 unsigned long lifetime;
791 if (len < sizeof(struct route_info)) {
795 /* Sanity check for prefix_len and length */
796 if (rinfo->length > 3) {
798 } else if (rinfo->prefix_len > 128) {
800 } else if (rinfo->prefix_len > 64) {
801 if (rinfo->length < 2) {
804 } else if (rinfo->prefix_len > 0) {
805 if (rinfo->length < 1) {
810 pref = rinfo->route_pref;
811 if (pref == ICMPV6_ROUTER_PREF_INVALID)
814 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
816 if (rinfo->length == 3)
817 prefix = (struct in6_addr *)rinfo->prefix;
819 /* this function is safe */
820 ipv6_addr_prefix(&prefix_buf,
821 (struct in6_addr *)rinfo->prefix,
823 prefix = &prefix_buf;
826 if (rinfo->prefix_len == 0)
827 rt = rt6_get_dflt_router(gwaddr, dev);
829 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
832 if (rt && !lifetime) {
838 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
841 rt->rt6i_flags = RTF_ROUTEINFO |
842 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
845 if (!addrconf_finite_timeout(lifetime))
846 rt6_clean_expires(rt);
848 rt6_set_expires(rt, jiffies + HZ * lifetime);
856 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
857 struct in6_addr *saddr)
859 struct fib6_node *pn;
861 if (fn->fn_flags & RTN_TL_ROOT)
864 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
865 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
868 if (fn->fn_flags & RTN_RTINFO)
873 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
874 struct fib6_table *table,
875 struct flowi6 *fl6, int flags)
877 struct fib6_node *fn;
880 read_lock_bh(&table->tb6_lock);
881 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
884 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
885 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
886 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
887 if (rt == net->ipv6.ip6_null_entry) {
888 fn = fib6_backtrack(fn, &fl6->saddr);
892 dst_use(&rt->dst, jiffies);
893 read_unlock_bh(&table->tb6_lock);
895 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
901 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
904 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
906 EXPORT_SYMBOL_GPL(ip6_route_lookup);
908 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
909 const struct in6_addr *saddr, int oif, int strict)
911 struct flowi6 fl6 = {
915 struct dst_entry *dst;
916 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
919 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
920 flags |= RT6_LOOKUP_F_HAS_SADDR;
923 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
925 return (struct rt6_info *) dst;
931 EXPORT_SYMBOL(rt6_lookup);
933 /* ip6_ins_rt is called with FREE table->tb6_lock.
934 * It takes new route entry, the addition fails by any reason the
936 * Caller must hold dst before calling it.
939 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
940 struct mx6_config *mxc,
941 struct netlink_ext_ack *extack)
944 struct fib6_table *table;
946 table = rt->rt6i_table;
947 write_lock_bh(&table->tb6_lock);
948 err = fib6_add(&table->tb6_root, rt, info, mxc, extack);
949 write_unlock_bh(&table->tb6_lock);
954 int ip6_ins_rt(struct rt6_info *rt)
956 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
957 struct mx6_config mxc = { .mx = NULL, };
959 /* Hold dst to account for the reference from the fib6 tree */
961 return __ip6_ins_rt(rt, &info, &mxc, NULL);
964 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
965 const struct in6_addr *daddr,
966 const struct in6_addr *saddr)
974 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
975 ort = (struct rt6_info *)ort->dst.from;
977 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
982 ip6_rt_copy_init(rt, ort);
983 rt->rt6i_flags |= RTF_CACHE;
985 rt->dst.flags |= DST_HOST;
986 rt->rt6i_dst.addr = *daddr;
987 rt->rt6i_dst.plen = 128;
989 if (!rt6_is_gw_or_nonexthop(ort)) {
990 if (ort->rt6i_dst.plen != 128 &&
991 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
992 rt->rt6i_flags |= RTF_ANYCAST;
993 #ifdef CONFIG_IPV6_SUBTREES
994 if (rt->rt6i_src.plen && saddr) {
995 rt->rt6i_src.addr = *saddr;
996 rt->rt6i_src.plen = 128;
1004 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
1006 struct rt6_info *pcpu_rt;
1008 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
1009 rt->dst.dev, rt->dst.flags);
1013 ip6_rt_copy_init(pcpu_rt, rt);
1014 pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
1015 pcpu_rt->rt6i_flags |= RTF_PCPU;
1019 /* It should be called with read_lock_bh(&tb6_lock) acquired */
1020 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1022 struct rt6_info *pcpu_rt, **p;
1024 p = this_cpu_ptr(rt->rt6i_pcpu);
1028 dst_hold(&pcpu_rt->dst);
1029 rt6_dst_from_metrics_check(pcpu_rt);
1034 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1036 struct fib6_table *table = rt->rt6i_table;
1037 struct rt6_info *pcpu_rt, *prev, **p;
1039 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1041 struct net *net = dev_net(rt->dst.dev);
1043 dst_hold(&net->ipv6.ip6_null_entry->dst);
1044 return net->ipv6.ip6_null_entry;
1047 read_lock_bh(&table->tb6_lock);
1048 if (rt->rt6i_pcpu) {
1049 p = this_cpu_ptr(rt->rt6i_pcpu);
1050 prev = cmpxchg(p, NULL, pcpu_rt);
1052 /* If someone did it before us, return prev instead */
1053 dst_release_immediate(&pcpu_rt->dst);
1057 /* rt has been removed from the fib6 tree
1058 * before we have a chance to acquire the read_lock.
1059 * In this case, don't brother to create a pcpu rt
1060 * since rt is going away anyway. The next
1061 * dst_check() will trigger a re-lookup.
1063 dst_release_immediate(&pcpu_rt->dst);
1066 dst_hold(&pcpu_rt->dst);
1067 rt6_dst_from_metrics_check(pcpu_rt);
1068 read_unlock_bh(&table->tb6_lock);
1072 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1073 int oif, struct flowi6 *fl6, int flags)
1075 struct fib6_node *fn, *saved_fn;
1076 struct rt6_info *rt;
1079 strict |= flags & RT6_LOOKUP_F_IFACE;
1080 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1081 if (net->ipv6.devconf_all->forwarding == 0)
1082 strict |= RT6_LOOKUP_F_REACHABLE;
1084 read_lock_bh(&table->tb6_lock);
1086 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1089 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1093 rt = rt6_select(fn, oif, strict);
1094 if (rt->rt6i_nsiblings)
1095 rt = rt6_multipath_select(rt, fl6, oif, strict);
1096 if (rt == net->ipv6.ip6_null_entry) {
1097 fn = fib6_backtrack(fn, &fl6->saddr);
1099 goto redo_rt6_select;
1100 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1101 /* also consider unreachable route */
1102 strict &= ~RT6_LOOKUP_F_REACHABLE;
1104 goto redo_rt6_select;
1109 if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) {
1110 dst_use(&rt->dst, jiffies);
1111 read_unlock_bh(&table->tb6_lock);
1113 rt6_dst_from_metrics_check(rt);
1115 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1117 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1118 !(rt->rt6i_flags & RTF_GATEWAY))) {
1119 /* Create a RTF_CACHE clone which will not be
1120 * owned by the fib6 tree. It is for the special case where
1121 * the daddr in the skb during the neighbor look-up is different
1122 * from the fl6->daddr used to look-up route here.
1125 struct rt6_info *uncached_rt;
1127 dst_use(&rt->dst, jiffies);
1128 read_unlock_bh(&table->tb6_lock);
1130 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1131 dst_release(&rt->dst);
1134 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1135 * No need for another dst_hold()
1137 rt6_uncached_list_add(uncached_rt);
1139 uncached_rt = net->ipv6.ip6_null_entry;
1140 dst_hold(&uncached_rt->dst);
1143 trace_fib6_table_lookup(net, uncached_rt, table->tb6_id, fl6);
1147 /* Get a percpu copy */
1149 struct rt6_info *pcpu_rt;
1151 rt->dst.lastuse = jiffies;
1153 pcpu_rt = rt6_get_pcpu_route(rt);
1156 read_unlock_bh(&table->tb6_lock);
1158 /* We have to do the read_unlock first
1159 * because rt6_make_pcpu_route() may trigger
1160 * ip6_dst_gc() which will take the write_lock.
1163 read_unlock_bh(&table->tb6_lock);
1164 pcpu_rt = rt6_make_pcpu_route(rt);
1165 dst_release(&rt->dst);
1168 trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6);
1173 EXPORT_SYMBOL_GPL(ip6_pol_route);
1175 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1176 struct flowi6 *fl6, int flags)
1178 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1181 struct dst_entry *ip6_route_input_lookup(struct net *net,
1182 struct net_device *dev,
1183 struct flowi6 *fl6, int flags)
1185 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1186 flags |= RT6_LOOKUP_F_IFACE;
1188 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1190 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1192 void ip6_route_input(struct sk_buff *skb)
1194 const struct ipv6hdr *iph = ipv6_hdr(skb);
1195 struct net *net = dev_net(skb->dev);
1196 int flags = RT6_LOOKUP_F_HAS_SADDR;
1197 struct ip_tunnel_info *tun_info;
1198 struct flowi6 fl6 = {
1199 .flowi6_iif = skb->dev->ifindex,
1200 .daddr = iph->daddr,
1201 .saddr = iph->saddr,
1202 .flowlabel = ip6_flowinfo(iph),
1203 .flowi6_mark = skb->mark,
1204 .flowi6_proto = iph->nexthdr,
1207 tun_info = skb_tunnel_info(skb);
1208 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1209 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1211 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1214 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1215 struct flowi6 *fl6, int flags)
1217 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1220 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
1221 struct flowi6 *fl6, int flags)
1225 if (rt6_need_strict(&fl6->daddr)) {
1226 struct dst_entry *dst;
1228 dst = l3mdev_link_scope_lookup(net, fl6);
1233 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1235 any_src = ipv6_addr_any(&fl6->saddr);
1236 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1237 (fl6->flowi6_oif && any_src))
1238 flags |= RT6_LOOKUP_F_IFACE;
1241 flags |= RT6_LOOKUP_F_HAS_SADDR;
1243 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1245 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1247 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
1249 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1251 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1252 struct net_device *loopback_dev = net->loopback_dev;
1253 struct dst_entry *new = NULL;
1255 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
1256 DST_OBSOLETE_NONE, 0);
1262 new->input = dst_discard;
1263 new->output = dst_discard_out;
1265 dst_copy_metrics(new, &ort->dst);
1267 rt->rt6i_idev = in6_dev_get(loopback_dev);
1268 rt->rt6i_gateway = ort->rt6i_gateway;
1269 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
1270 rt->rt6i_metric = 0;
1272 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1273 #ifdef CONFIG_IPV6_SUBTREES
1274 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1278 dst_release(dst_orig);
1279 return new ? new : ERR_PTR(-ENOMEM);
1283 * Destination cache support functions
1286 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1289 dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
1290 dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
1293 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1295 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1298 if (rt6_check_expired(rt))
1304 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1306 if (!__rt6_check_expired(rt) &&
1307 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1308 rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1314 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1316 struct rt6_info *rt;
1318 rt = (struct rt6_info *) dst;
1320 /* All IPV6 dsts are created with ->obsolete set to the value
1321 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1322 * into this function always.
1325 rt6_dst_from_metrics_check(rt);
1327 if (rt->rt6i_flags & RTF_PCPU ||
1328 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
1329 return rt6_dst_from_check(rt, cookie);
1331 return rt6_check(rt, cookie);
1334 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1336 struct rt6_info *rt = (struct rt6_info *) dst;
1339 if (rt->rt6i_flags & RTF_CACHE) {
1340 if (rt6_check_expired(rt)) {
1352 static void ip6_link_failure(struct sk_buff *skb)
1354 struct rt6_info *rt;
1356 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1358 rt = (struct rt6_info *) skb_dst(skb);
1360 if (rt->rt6i_flags & RTF_CACHE) {
1361 if (dst_hold_safe(&rt->dst))
1363 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1364 rt->rt6i_node->fn_sernum = -1;
1369 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1371 struct net *net = dev_net(rt->dst.dev);
1373 rt->rt6i_flags |= RTF_MODIFIED;
1374 rt->rt6i_pmtu = mtu;
1375 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1378 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
1380 return !(rt->rt6i_flags & RTF_CACHE) &&
1381 (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
1384 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1385 const struct ipv6hdr *iph, u32 mtu)
1387 const struct in6_addr *daddr, *saddr;
1388 struct rt6_info *rt6 = (struct rt6_info *)dst;
1390 if (rt6->rt6i_flags & RTF_LOCAL)
1393 if (dst_metric_locked(dst, RTAX_MTU))
1397 daddr = &iph->daddr;
1398 saddr = &iph->saddr;
1400 daddr = &sk->sk_v6_daddr;
1401 saddr = &inet6_sk(sk)->saddr;
1406 dst_confirm_neigh(dst, daddr);
1407 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
1408 if (mtu >= dst_mtu(dst))
1411 if (!rt6_cache_allowed_for_pmtu(rt6)) {
1412 rt6_do_update_pmtu(rt6, mtu);
1414 struct rt6_info *nrt6;
1416 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
1418 rt6_do_update_pmtu(nrt6, mtu);
1420 /* ip6_ins_rt(nrt6) will bump the
1421 * rt6->rt6i_node->fn_sernum
1422 * which will fail the next rt6_check() and
1423 * invalidate the sk->sk_dst_cache.
1426 /* Release the reference taken in
1427 * ip6_rt_cache_alloc()
1429 dst_release(&nrt6->dst);
1434 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1435 struct sk_buff *skb, u32 mtu)
1437 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
1440 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1441 int oif, u32 mark, kuid_t uid)
1443 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1444 struct dst_entry *dst;
1447 memset(&fl6, 0, sizeof(fl6));
1448 fl6.flowi6_oif = oif;
1449 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1450 fl6.daddr = iph->daddr;
1451 fl6.saddr = iph->saddr;
1452 fl6.flowlabel = ip6_flowinfo(iph);
1453 fl6.flowi6_uid = uid;
1455 dst = ip6_route_output(net, NULL, &fl6);
1457 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
1460 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1462 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1464 struct dst_entry *dst;
1466 ip6_update_pmtu(skb, sock_net(sk), mtu,
1467 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
1469 dst = __sk_dst_get(sk);
1470 if (!dst || !dst->obsolete ||
1471 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
1475 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
1476 ip6_datagram_dst_update(sk, false);
1479 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1481 /* Handle redirects */
1482 struct ip6rd_flowi {
1484 struct in6_addr gateway;
1487 static struct rt6_info *__ip6_route_redirect(struct net *net,
1488 struct fib6_table *table,
1492 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1493 struct rt6_info *rt;
1494 struct fib6_node *fn;
1496 /* Get the "current" route for this destination and
1497 * check if the redirect has come from appropriate router.
1499 * RFC 4861 specifies that redirects should only be
1500 * accepted if they come from the nexthop to the target.
1501 * Due to the way the routes are chosen, this notion
1502 * is a bit fuzzy and one might need to check all possible
1506 read_lock_bh(&table->tb6_lock);
1507 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1509 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1510 if (rt6_check_expired(rt))
1514 if (!(rt->rt6i_flags & RTF_GATEWAY))
1516 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1518 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1524 rt = net->ipv6.ip6_null_entry;
1525 else if (rt->dst.error) {
1526 rt = net->ipv6.ip6_null_entry;
1530 if (rt == net->ipv6.ip6_null_entry) {
1531 fn = fib6_backtrack(fn, &fl6->saddr);
1539 read_unlock_bh(&table->tb6_lock);
1541 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1545 static struct dst_entry *ip6_route_redirect(struct net *net,
1546 const struct flowi6 *fl6,
1547 const struct in6_addr *gateway)
1549 int flags = RT6_LOOKUP_F_HAS_SADDR;
1550 struct ip6rd_flowi rdfl;
1553 rdfl.gateway = *gateway;
1555 return fib6_rule_lookup(net, &rdfl.fl6,
1556 flags, __ip6_route_redirect);
1559 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
1562 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1563 struct dst_entry *dst;
1566 memset(&fl6, 0, sizeof(fl6));
1567 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1568 fl6.flowi6_oif = oif;
1569 fl6.flowi6_mark = mark;
1570 fl6.daddr = iph->daddr;
1571 fl6.saddr = iph->saddr;
1572 fl6.flowlabel = ip6_flowinfo(iph);
1573 fl6.flowi6_uid = uid;
1575 dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1576 rt6_do_redirect(dst, NULL, skb);
1579 EXPORT_SYMBOL_GPL(ip6_redirect);
1581 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1584 const struct ipv6hdr *iph = ipv6_hdr(skb);
1585 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1586 struct dst_entry *dst;
1589 memset(&fl6, 0, sizeof(fl6));
1590 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1591 fl6.flowi6_oif = oif;
1592 fl6.flowi6_mark = mark;
1593 fl6.daddr = msg->dest;
1594 fl6.saddr = iph->daddr;
1595 fl6.flowi6_uid = sock_net_uid(net, NULL);
1597 dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1598 rt6_do_redirect(dst, NULL, skb);
1602 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1604 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
1607 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1609 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1611 struct net_device *dev = dst->dev;
1612 unsigned int mtu = dst_mtu(dst);
1613 struct net *net = dev_net(dev);
1615 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1617 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1618 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1621 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1622 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1623 * IPV6_MAXPLEN is also valid and means: "any MSS,
1624 * rely only on pmtu discovery"
1626 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1631 static unsigned int ip6_mtu(const struct dst_entry *dst)
1633 const struct rt6_info *rt = (const struct rt6_info *)dst;
1634 unsigned int mtu = rt->rt6i_pmtu;
1635 struct inet6_dev *idev;
1640 mtu = dst_metric_raw(dst, RTAX_MTU);
1647 idev = __in6_dev_get(dst->dev);
1649 mtu = idev->cnf.mtu6;
1653 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1655 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1658 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1661 struct dst_entry *dst;
1662 struct rt6_info *rt;
1663 struct inet6_dev *idev = in6_dev_get(dev);
1664 struct net *net = dev_net(dev);
1666 if (unlikely(!idev))
1667 return ERR_PTR(-ENODEV);
1669 rt = ip6_dst_alloc(net, dev, 0);
1670 if (unlikely(!rt)) {
1672 dst = ERR_PTR(-ENOMEM);
1676 rt->dst.flags |= DST_HOST;
1677 rt->dst.output = ip6_output;
1678 rt->rt6i_gateway = fl6->daddr;
1679 rt->rt6i_dst.addr = fl6->daddr;
1680 rt->rt6i_dst.plen = 128;
1681 rt->rt6i_idev = idev;
1682 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1684 /* Add this dst into uncached_list so that rt6_ifdown() can
1685 * do proper release of the net_device
1687 rt6_uncached_list_add(rt);
1689 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1695 static int ip6_dst_gc(struct dst_ops *ops)
1697 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1698 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1699 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1700 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1701 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1702 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1705 entries = dst_entries_get_fast(ops);
1706 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1707 entries <= rt_max_size)
1710 net->ipv6.ip6_rt_gc_expire++;
1711 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1712 entries = dst_entries_get_slow(ops);
1713 if (entries < ops->gc_thresh)
1714 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1716 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1717 return entries > rt_max_size;
1720 static int ip6_convert_metrics(struct mx6_config *mxc,
1721 const struct fib6_config *cfg)
1723 bool ecn_ca = false;
1731 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1735 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1736 int type = nla_type(nla);
1741 if (unlikely(type > RTAX_MAX))
1744 if (type == RTAX_CC_ALGO) {
1745 char tmp[TCP_CA_NAME_MAX];
1747 nla_strlcpy(tmp, nla, sizeof(tmp));
1748 val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
1749 if (val == TCP_CA_UNSPEC)
1752 val = nla_get_u32(nla);
1754 if (type == RTAX_HOPLIMIT && val > 255)
1756 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
1760 __set_bit(type - 1, mxc->mx_valid);
1764 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
1765 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
1775 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
1776 struct fib6_config *cfg,
1777 const struct in6_addr *gw_addr)
1779 struct flowi6 fl6 = {
1780 .flowi6_oif = cfg->fc_ifindex,
1782 .saddr = cfg->fc_prefsrc,
1784 struct fib6_table *table;
1785 struct rt6_info *rt;
1786 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
1788 table = fib6_get_table(net, cfg->fc_table);
1792 if (!ipv6_addr_any(&cfg->fc_prefsrc))
1793 flags |= RT6_LOOKUP_F_HAS_SADDR;
1795 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags);
1797 /* if table lookup failed, fall back to full lookup */
1798 if (rt == net->ipv6.ip6_null_entry) {
1806 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
1807 struct netlink_ext_ack *extack)
1809 struct net *net = cfg->fc_nlinfo.nl_net;
1810 struct rt6_info *rt = NULL;
1811 struct net_device *dev = NULL;
1812 struct inet6_dev *idev = NULL;
1813 struct fib6_table *table;
1817 /* RTF_PCPU is an internal flag; can not be set by userspace */
1818 if (cfg->fc_flags & RTF_PCPU) {
1819 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
1823 if (cfg->fc_flags & RTF_OFFLOAD) {
1824 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_OFFLOAD");
1828 if (cfg->fc_dst_len > 128) {
1829 NL_SET_ERR_MSG(extack, "Invalid prefix length");
1832 if (cfg->fc_src_len > 128) {
1833 NL_SET_ERR_MSG(extack, "Invalid source address length");
1836 #ifndef CONFIG_IPV6_SUBTREES
1837 if (cfg->fc_src_len) {
1838 NL_SET_ERR_MSG(extack,
1839 "Specifying source address requires IPV6_SUBTREES to be enabled");
1843 if (cfg->fc_ifindex) {
1845 dev = dev_get_by_index(net, cfg->fc_ifindex);
1848 idev = in6_dev_get(dev);
1853 if (cfg->fc_metric == 0)
1854 cfg->fc_metric = IP6_RT_PRIO_USER;
1857 if (cfg->fc_nlinfo.nlh &&
1858 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1859 table = fib6_get_table(net, cfg->fc_table);
1861 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1862 table = fib6_new_table(net, cfg->fc_table);
1865 table = fib6_new_table(net, cfg->fc_table);
1871 rt = ip6_dst_alloc(net, NULL,
1872 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
1879 if (cfg->fc_flags & RTF_EXPIRES)
1880 rt6_set_expires(rt, jiffies +
1881 clock_t_to_jiffies(cfg->fc_expires));
1883 rt6_clean_expires(rt);
1885 if (cfg->fc_protocol == RTPROT_UNSPEC)
1886 cfg->fc_protocol = RTPROT_BOOT;
1887 rt->rt6i_protocol = cfg->fc_protocol;
1889 addr_type = ipv6_addr_type(&cfg->fc_dst);
1891 if (addr_type & IPV6_ADDR_MULTICAST)
1892 rt->dst.input = ip6_mc_input;
1893 else if (cfg->fc_flags & RTF_LOCAL)
1894 rt->dst.input = ip6_input;
1896 rt->dst.input = ip6_forward;
1898 rt->dst.output = ip6_output;
1900 if (cfg->fc_encap) {
1901 struct lwtunnel_state *lwtstate;
1903 err = lwtunnel_build_state(cfg->fc_encap_type,
1904 cfg->fc_encap, AF_INET6, cfg,
1908 rt->dst.lwtstate = lwtstate_get(lwtstate);
1909 if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
1910 rt->dst.lwtstate->orig_output = rt->dst.output;
1911 rt->dst.output = lwtunnel_output;
1913 if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
1914 rt->dst.lwtstate->orig_input = rt->dst.input;
1915 rt->dst.input = lwtunnel_input;
1919 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1920 rt->rt6i_dst.plen = cfg->fc_dst_len;
1921 if (rt->rt6i_dst.plen == 128)
1922 rt->dst.flags |= DST_HOST;
1924 #ifdef CONFIG_IPV6_SUBTREES
1925 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1926 rt->rt6i_src.plen = cfg->fc_src_len;
1929 rt->rt6i_metric = cfg->fc_metric;
1931 /* We cannot add true routes via loopback here,
1932 they would result in kernel looping; promote them to reject routes
1934 if ((cfg->fc_flags & RTF_REJECT) ||
1935 (dev && (dev->flags & IFF_LOOPBACK) &&
1936 !(addr_type & IPV6_ADDR_LOOPBACK) &&
1937 !(cfg->fc_flags & RTF_LOCAL))) {
1938 /* hold loopback dev/idev if we haven't done so. */
1939 if (dev != net->loopback_dev) {
1944 dev = net->loopback_dev;
1946 idev = in6_dev_get(dev);
1952 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1953 switch (cfg->fc_type) {
1955 rt->dst.error = -EINVAL;
1956 rt->dst.output = dst_discard_out;
1957 rt->dst.input = dst_discard;
1960 rt->dst.error = -EACCES;
1961 rt->dst.output = ip6_pkt_prohibit_out;
1962 rt->dst.input = ip6_pkt_prohibit;
1965 case RTN_UNREACHABLE:
1967 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1968 : (cfg->fc_type == RTN_UNREACHABLE)
1969 ? -EHOSTUNREACH : -ENETUNREACH;
1970 rt->dst.output = ip6_pkt_discard_out;
1971 rt->dst.input = ip6_pkt_discard;
1977 if (cfg->fc_flags & RTF_GATEWAY) {
1978 const struct in6_addr *gw_addr;
1981 gw_addr = &cfg->fc_gateway;
1982 gwa_type = ipv6_addr_type(gw_addr);
1984 /* if gw_addr is local we will fail to detect this in case
1985 * address is still TENTATIVE (DAD in progress). rt6_lookup()
1986 * will return already-added prefix route via interface that
1987 * prefix route was assigned to, which might be non-loopback.
1990 if (ipv6_chk_addr_and_flags(net, gw_addr,
1991 gwa_type & IPV6_ADDR_LINKLOCAL ?
1992 dev : NULL, 0, 0)) {
1993 NL_SET_ERR_MSG(extack, "Invalid gateway address");
1996 rt->rt6i_gateway = *gw_addr;
1998 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1999 struct rt6_info *grt = NULL;
2001 /* IPv6 strictly inhibits using not link-local
2002 addresses as nexthop address.
2003 Otherwise, router will not able to send redirects.
2004 It is very good, but in some (rare!) circumstances
2005 (SIT, PtP, NBMA NOARP links) it is handy to allow
2006 some exceptions. --ANK
2007 We allow IPv4-mapped nexthops to support RFC4798-type
2010 if (!(gwa_type & (IPV6_ADDR_UNICAST |
2011 IPV6_ADDR_MAPPED))) {
2012 NL_SET_ERR_MSG(extack,
2013 "Invalid gateway address");
2017 if (cfg->fc_table) {
2018 grt = ip6_nh_lookup_table(net, cfg, gw_addr);
2021 if (grt->rt6i_flags & RTF_GATEWAY ||
2022 (dev && dev != grt->dst.dev)) {
2030 grt = rt6_lookup(net, gw_addr, NULL,
2031 cfg->fc_ifindex, 1);
2033 err = -EHOSTUNREACH;
2037 if (dev != grt->dst.dev) {
2043 idev = grt->rt6i_idev;
2045 in6_dev_hold(grt->rt6i_idev);
2047 if (!(grt->rt6i_flags & RTF_GATEWAY))
2056 NL_SET_ERR_MSG(extack, "Egress device not specified");
2058 } else if (dev->flags & IFF_LOOPBACK) {
2059 NL_SET_ERR_MSG(extack,
2060 "Egress device can not be loopback device for this route");
2069 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
2070 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
2071 NL_SET_ERR_MSG(extack, "Invalid source address");
2075 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
2076 rt->rt6i_prefsrc.plen = 128;
2078 rt->rt6i_prefsrc.plen = 0;
2080 rt->rt6i_flags = cfg->fc_flags;
2084 rt->rt6i_idev = idev;
2085 rt->rt6i_table = table;
2087 cfg->fc_nlinfo.nl_net = dev_net(dev);
2096 dst_release_immediate(&rt->dst);
2098 return ERR_PTR(err);
2101 int ip6_route_add(struct fib6_config *cfg,
2102 struct netlink_ext_ack *extack)
2104 struct mx6_config mxc = { .mx = NULL, };
2105 struct rt6_info *rt;
2108 rt = ip6_route_info_create(cfg, extack);
2115 err = ip6_convert_metrics(&mxc, cfg);
2119 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack);
2126 dst_release_immediate(&rt->dst);
2131 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2134 struct fib6_table *table;
2135 struct net *net = dev_net(rt->dst.dev);
2137 if (rt == net->ipv6.ip6_null_entry) {
2142 table = rt->rt6i_table;
2143 write_lock_bh(&table->tb6_lock);
2144 err = fib6_del(rt, info);
2145 write_unlock_bh(&table->tb6_lock);
2152 int ip6_del_rt(struct rt6_info *rt)
2154 struct nl_info info = {
2155 .nl_net = dev_net(rt->dst.dev),
2157 return __ip6_del_rt(rt, &info);
2160 static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
2162 struct nl_info *info = &cfg->fc_nlinfo;
2163 struct net *net = info->nl_net;
2164 struct sk_buff *skb = NULL;
2165 struct fib6_table *table;
2168 if (rt == net->ipv6.ip6_null_entry)
2170 table = rt->rt6i_table;
2171 write_lock_bh(&table->tb6_lock);
2173 if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) {
2174 struct rt6_info *sibling, *next_sibling;
2176 /* prefer to send a single notification with all hops */
2177 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
2179 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2181 if (rt6_fill_node(net, skb, rt,
2182 NULL, NULL, 0, RTM_DELROUTE,
2183 info->portid, seq, 0) < 0) {
2187 info->skip_notify = 1;
2190 list_for_each_entry_safe(sibling, next_sibling,
2193 err = fib6_del(sibling, info);
2199 err = fib6_del(rt, info);
2201 write_unlock_bh(&table->tb6_lock);
2206 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2207 info->nlh, gfp_any());
2212 static int ip6_route_del(struct fib6_config *cfg,
2213 struct netlink_ext_ack *extack)
2215 struct fib6_table *table;
2216 struct fib6_node *fn;
2217 struct rt6_info *rt;
2220 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
2222 NL_SET_ERR_MSG(extack, "FIB table does not exist");
2226 read_lock_bh(&table->tb6_lock);
2228 fn = fib6_locate(&table->tb6_root,
2229 &cfg->fc_dst, cfg->fc_dst_len,
2230 &cfg->fc_src, cfg->fc_src_len);
2233 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2234 if ((rt->rt6i_flags & RTF_CACHE) &&
2235 !(cfg->fc_flags & RTF_CACHE))
2237 if (cfg->fc_ifindex &&
2239 rt->dst.dev->ifindex != cfg->fc_ifindex))
2241 if (cfg->fc_flags & RTF_GATEWAY &&
2242 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
2244 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2246 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
2249 read_unlock_bh(&table->tb6_lock);
2251 /* if gateway was specified only delete the one hop */
2252 if (cfg->fc_flags & RTF_GATEWAY)
2253 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
2255 return __ip6_del_rt_siblings(rt, cfg);
2258 read_unlock_bh(&table->tb6_lock);
2263 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
2265 struct netevent_redirect netevent;
2266 struct rt6_info *rt, *nrt = NULL;
2267 struct ndisc_options ndopts;
2268 struct inet6_dev *in6_dev;
2269 struct neighbour *neigh;
2271 int optlen, on_link;
2274 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
2275 optlen -= sizeof(*msg);
2278 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2282 msg = (struct rd_msg *)icmp6_hdr(skb);
2284 if (ipv6_addr_is_multicast(&msg->dest)) {
2285 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2290 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
2292 } else if (ipv6_addr_type(&msg->target) !=
2293 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
2294 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
2298 in6_dev = __in6_dev_get(skb->dev);
2301 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
2305 * The IP source address of the Redirect MUST be the same as the current
2306 * first-hop router for the specified ICMP Destination Address.
2309 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
2310 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
2315 if (ndopts.nd_opts_tgt_lladdr) {
2316 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
2319 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
2324 rt = (struct rt6_info *) dst;
2325 if (rt->rt6i_flags & RTF_REJECT) {
2326 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
2330 /* Redirect received -> path was valid.
2331 * Look, redirects are sent only in response to data packets,
2332 * so that this nexthop apparently is reachable. --ANK
2334 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
2336 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
2341 * We have finally decided to accept it.
2344 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
2345 NEIGH_UPDATE_F_WEAK_OVERRIDE|
2346 NEIGH_UPDATE_F_OVERRIDE|
2347 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
2348 NEIGH_UPDATE_F_ISROUTER)),
2349 NDISC_REDIRECT, &ndopts);
2351 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
2355 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
2357 nrt->rt6i_flags &= ~RTF_GATEWAY;
2359 nrt->rt6i_protocol = RTPROT_REDIRECT;
2360 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
2362 if (ip6_ins_rt(nrt))
2365 netevent.old = &rt->dst;
2366 netevent.new = &nrt->dst;
2367 netevent.daddr = &msg->dest;
2368 netevent.neigh = neigh;
2369 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
2371 if (rt->rt6i_flags & RTF_CACHE) {
2372 rt = (struct rt6_info *) dst_clone(&rt->dst);
2377 /* Release the reference taken in
2378 * ip6_rt_cache_alloc()
2380 dst_release(&nrt->dst);
2383 neigh_release(neigh);
2387 * Misc support functions
2390 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
2392 BUG_ON(from->dst.from);
2394 rt->rt6i_flags &= ~RTF_EXPIRES;
2395 dst_hold(&from->dst);
2396 rt->dst.from = &from->dst;
2397 dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
2400 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
2402 rt->dst.input = ort->dst.input;
2403 rt->dst.output = ort->dst.output;
2404 rt->rt6i_dst = ort->rt6i_dst;
2405 rt->dst.error = ort->dst.error;
2406 rt->rt6i_idev = ort->rt6i_idev;
2408 in6_dev_hold(rt->rt6i_idev);
2409 rt->dst.lastuse = jiffies;
2410 rt->rt6i_gateway = ort->rt6i_gateway;
2411 rt->rt6i_flags = ort->rt6i_flags;
2412 rt6_set_from(rt, ort);
2413 rt->rt6i_metric = ort->rt6i_metric;
2414 #ifdef CONFIG_IPV6_SUBTREES
2415 rt->rt6i_src = ort->rt6i_src;
2417 rt->rt6i_prefsrc = ort->rt6i_prefsrc;
2418 rt->rt6i_table = ort->rt6i_table;
2419 rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
2422 #ifdef CONFIG_IPV6_ROUTE_INFO
2423 static struct rt6_info *rt6_get_route_info(struct net *net,
2424 const struct in6_addr *prefix, int prefixlen,
2425 const struct in6_addr *gwaddr,
2426 struct net_device *dev)
2428 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
2429 int ifindex = dev->ifindex;
2430 struct fib6_node *fn;
2431 struct rt6_info *rt = NULL;
2432 struct fib6_table *table;
2434 table = fib6_get_table(net, tb_id);
2438 read_lock_bh(&table->tb6_lock);
2439 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0);
2443 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2444 if (rt->dst.dev->ifindex != ifindex)
2446 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
2448 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
2454 read_unlock_bh(&table->tb6_lock);
2458 static struct rt6_info *rt6_add_route_info(struct net *net,
2459 const struct in6_addr *prefix, int prefixlen,
2460 const struct in6_addr *gwaddr,
2461 struct net_device *dev,
2464 struct fib6_config cfg = {
2465 .fc_metric = IP6_RT_PRIO_USER,
2466 .fc_ifindex = dev->ifindex,
2467 .fc_dst_len = prefixlen,
2468 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2469 RTF_UP | RTF_PREF(pref),
2470 .fc_protocol = RTPROT_RA,
2471 .fc_nlinfo.portid = 0,
2472 .fc_nlinfo.nlh = NULL,
2473 .fc_nlinfo.nl_net = net,
2476 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
2477 cfg.fc_dst = *prefix;
2478 cfg.fc_gateway = *gwaddr;
2480 /* We should treat it as a default route if prefix length is 0. */
2482 cfg.fc_flags |= RTF_DEFAULT;
2484 ip6_route_add(&cfg, NULL);
2486 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
2490 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
2492 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
2493 struct rt6_info *rt;
2494 struct fib6_table *table;
2496 table = fib6_get_table(dev_net(dev), tb_id);
2500 read_lock_bh(&table->tb6_lock);
2501 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2502 if (dev == rt->dst.dev &&
2503 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
2504 ipv6_addr_equal(&rt->rt6i_gateway, addr))
2509 read_unlock_bh(&table->tb6_lock);
2513 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2514 struct net_device *dev,
2517 struct fib6_config cfg = {
2518 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
2519 .fc_metric = IP6_RT_PRIO_USER,
2520 .fc_ifindex = dev->ifindex,
2521 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2522 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2523 .fc_protocol = RTPROT_RA,
2524 .fc_nlinfo.portid = 0,
2525 .fc_nlinfo.nlh = NULL,
2526 .fc_nlinfo.nl_net = dev_net(dev),
2529 cfg.fc_gateway = *gwaddr;
2531 if (!ip6_route_add(&cfg, NULL)) {
2532 struct fib6_table *table;
2534 table = fib6_get_table(dev_net(dev), cfg.fc_table);
2536 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
2539 return rt6_get_dflt_router(gwaddr, dev);
2542 static void __rt6_purge_dflt_routers(struct fib6_table *table)
2544 struct rt6_info *rt;
2547 read_lock_bh(&table->tb6_lock);
2548 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2549 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2550 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2552 read_unlock_bh(&table->tb6_lock);
2557 read_unlock_bh(&table->tb6_lock);
2559 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
2562 void rt6_purge_dflt_routers(struct net *net)
2564 struct fib6_table *table;
2565 struct hlist_head *head;
2570 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
2571 head = &net->ipv6.fib_table_hash[h];
2572 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
2573 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
2574 __rt6_purge_dflt_routers(table);
2581 static void rtmsg_to_fib6_config(struct net *net,
2582 struct in6_rtmsg *rtmsg,
2583 struct fib6_config *cfg)
2585 memset(cfg, 0, sizeof(*cfg));
2587 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
2589 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2590 cfg->fc_metric = rtmsg->rtmsg_metric;
2591 cfg->fc_expires = rtmsg->rtmsg_info;
2592 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2593 cfg->fc_src_len = rtmsg->rtmsg_src_len;
2594 cfg->fc_flags = rtmsg->rtmsg_flags;
2596 cfg->fc_nlinfo.nl_net = net;
2598 cfg->fc_dst = rtmsg->rtmsg_dst;
2599 cfg->fc_src = rtmsg->rtmsg_src;
2600 cfg->fc_gateway = rtmsg->rtmsg_gateway;
2603 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2605 struct fib6_config cfg;
2606 struct in6_rtmsg rtmsg;
2610 case SIOCADDRT: /* Add a route */
2611 case SIOCDELRT: /* Delete a route */
2612 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2614 err = copy_from_user(&rtmsg, arg,
2615 sizeof(struct in6_rtmsg));
2619 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2624 err = ip6_route_add(&cfg, NULL);
2627 err = ip6_route_del(&cfg, NULL);
2641 * Drop the packet on the floor
2644 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2647 struct dst_entry *dst = skb_dst(skb);
2648 switch (ipstats_mib_noroutes) {
2649 case IPSTATS_MIB_INNOROUTES:
2650 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2651 if (type == IPV6_ADDR_ANY) {
2652 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2653 IPSTATS_MIB_INADDRERRORS);
2657 case IPSTATS_MIB_OUTNOROUTES:
2658 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2659 ipstats_mib_noroutes);
2662 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2667 static int ip6_pkt_discard(struct sk_buff *skb)
2669 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2672 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2674 skb->dev = skb_dst(skb)->dev;
2675 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2678 static int ip6_pkt_prohibit(struct sk_buff *skb)
2680 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2683 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2685 skb->dev = skb_dst(skb)->dev;
2686 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2690 * Allocate a dst for local (unicast / anycast) address.
2693 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2694 const struct in6_addr *addr,
2698 struct net *net = dev_net(idev->dev);
2699 struct net_device *dev = net->loopback_dev;
2700 struct rt6_info *rt;
2702 /* use L3 Master device as loopback for host routes if device
2703 * is enslaved and address is not link local or multicast
2705 if (!rt6_need_strict(addr))
2706 dev = l3mdev_master_dev_rcu(idev->dev) ? : dev;
2708 rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
2710 return ERR_PTR(-ENOMEM);
2714 rt->dst.flags |= DST_HOST;
2715 rt->dst.input = ip6_input;
2716 rt->dst.output = ip6_output;
2717 rt->rt6i_idev = idev;
2719 rt->rt6i_protocol = RTPROT_KERNEL;
2720 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2722 rt->rt6i_flags |= RTF_ANYCAST;
2724 rt->rt6i_flags |= RTF_LOCAL;
2726 rt->rt6i_gateway = *addr;
2727 rt->rt6i_dst.addr = *addr;
2728 rt->rt6i_dst.plen = 128;
2729 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
2730 rt->rt6i_table = fib6_get_table(net, tb_id);
2735 /* remove deleted ip from prefsrc entries */
2736 struct arg_dev_net_ip {
2737 struct net_device *dev;
2739 struct in6_addr *addr;
2742 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2744 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2745 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2746 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2748 if (((void *)rt->dst.dev == dev || !dev) &&
2749 rt != net->ipv6.ip6_null_entry &&
2750 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2751 /* remove prefsrc entry */
2752 rt->rt6i_prefsrc.plen = 0;
2757 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2759 struct net *net = dev_net(ifp->idev->dev);
2760 struct arg_dev_net_ip adni = {
2761 .dev = ifp->idev->dev,
2765 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2768 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2769 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2771 /* Remove routers and update dst entries when gateway turn into host. */
2772 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2774 struct in6_addr *gateway = (struct in6_addr *)arg;
2776 if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2777 ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2778 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2784 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2786 fib6_clean_all(net, fib6_clean_tohost, gateway);
2789 struct arg_dev_net {
2790 struct net_device *dev;
2794 /* called with write lock held for table with rt */
2795 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2797 const struct arg_dev_net *adn = arg;
2798 const struct net_device *dev = adn->dev;
2800 if ((rt->dst.dev == dev || !dev) &&
2801 rt != adn->net->ipv6.ip6_null_entry &&
2802 (rt->rt6i_nsiblings == 0 ||
2803 (dev && netdev_unregistering(dev)) ||
2804 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
2810 void rt6_ifdown(struct net *net, struct net_device *dev)
2812 struct arg_dev_net adn = {
2817 fib6_clean_all(net, fib6_ifdown, &adn);
2819 rt6_uncached_list_flush_dev(net, dev);
2822 struct rt6_mtu_change_arg {
2823 struct net_device *dev;
2827 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2829 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2830 struct inet6_dev *idev;
2832 /* In IPv6 pmtu discovery is not optional,
2833 so that RTAX_MTU lock cannot disable it.
2834 We still use this lock to block changes
2835 caused by addrconf/ndisc.
2838 idev = __in6_dev_get(arg->dev);
2842 /* For administrative MTU increase, there is no way to discover
2843 IPv6 PMTU increase, so PMTU increase should be updated here.
2844 Since RFC 1981 doesn't include administrative MTU increase
2845 update PMTU increase is a MUST. (i.e. jumbo frame)
2848 If new MTU is less than route PMTU, this new MTU will be the
2849 lowest MTU in the path, update the route PMTU to reflect PMTU
2850 decreases; if new MTU is greater than route PMTU, and the
2851 old MTU is the lowest MTU in the path, update the route PMTU
2852 to reflect the increase. In this case if the other nodes' MTU
2853 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2856 if (rt->dst.dev == arg->dev &&
2857 dst_metric_raw(&rt->dst, RTAX_MTU) &&
2858 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
2859 if (rt->rt6i_flags & RTF_CACHE) {
2860 /* For RTF_CACHE with rt6i_pmtu == 0
2861 * (i.e. a redirected route),
2862 * the metrics of its rt->dst.from has already
2865 if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu)
2866 rt->rt6i_pmtu = arg->mtu;
2867 } else if (dst_mtu(&rt->dst) >= arg->mtu ||
2868 (dst_mtu(&rt->dst) < arg->mtu &&
2869 dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
2870 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2876 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2878 struct rt6_mtu_change_arg arg = {
2883 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
2886 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2887 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2888 [RTA_OIF] = { .type = NLA_U32 },
2889 [RTA_IIF] = { .type = NLA_U32 },
2890 [RTA_PRIORITY] = { .type = NLA_U32 },
2891 [RTA_METRICS] = { .type = NLA_NESTED },
2892 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2893 [RTA_PREF] = { .type = NLA_U8 },
2894 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
2895 [RTA_ENCAP] = { .type = NLA_NESTED },
2896 [RTA_EXPIRES] = { .type = NLA_U32 },
2897 [RTA_UID] = { .type = NLA_U32 },
2898 [RTA_MARK] = { .type = NLA_U32 },
2901 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2902 struct fib6_config *cfg,
2903 struct netlink_ext_ack *extack)
2906 struct nlattr *tb[RTA_MAX+1];
2910 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
2916 rtm = nlmsg_data(nlh);
2917 memset(cfg, 0, sizeof(*cfg));
2919 cfg->fc_table = rtm->rtm_table;
2920 cfg->fc_dst_len = rtm->rtm_dst_len;
2921 cfg->fc_src_len = rtm->rtm_src_len;
2922 cfg->fc_flags = RTF_UP;
2923 cfg->fc_protocol = rtm->rtm_protocol;
2924 cfg->fc_type = rtm->rtm_type;
2926 if (rtm->rtm_type == RTN_UNREACHABLE ||
2927 rtm->rtm_type == RTN_BLACKHOLE ||
2928 rtm->rtm_type == RTN_PROHIBIT ||
2929 rtm->rtm_type == RTN_THROW)
2930 cfg->fc_flags |= RTF_REJECT;
2932 if (rtm->rtm_type == RTN_LOCAL)
2933 cfg->fc_flags |= RTF_LOCAL;
2935 if (rtm->rtm_flags & RTM_F_CLONED)
2936 cfg->fc_flags |= RTF_CACHE;
2938 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2939 cfg->fc_nlinfo.nlh = nlh;
2940 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2942 if (tb[RTA_GATEWAY]) {
2943 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
2944 cfg->fc_flags |= RTF_GATEWAY;
2948 int plen = (rtm->rtm_dst_len + 7) >> 3;
2950 if (nla_len(tb[RTA_DST]) < plen)
2953 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2957 int plen = (rtm->rtm_src_len + 7) >> 3;
2959 if (nla_len(tb[RTA_SRC]) < plen)
2962 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2965 if (tb[RTA_PREFSRC])
2966 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
2969 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2971 if (tb[RTA_PRIORITY])
2972 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2974 if (tb[RTA_METRICS]) {
2975 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2976 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2980 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2982 if (tb[RTA_MULTIPATH]) {
2983 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2984 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2986 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
2987 cfg->fc_mp_len, extack);
2993 pref = nla_get_u8(tb[RTA_PREF]);
2994 if (pref != ICMPV6_ROUTER_PREF_LOW &&
2995 pref != ICMPV6_ROUTER_PREF_HIGH)
2996 pref = ICMPV6_ROUTER_PREF_MEDIUM;
2997 cfg->fc_flags |= RTF_PREF(pref);
3001 cfg->fc_encap = tb[RTA_ENCAP];
3003 if (tb[RTA_ENCAP_TYPE]) {
3004 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
3006 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
3011 if (tb[RTA_EXPIRES]) {
3012 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
3014 if (addrconf_finite_timeout(timeout)) {
3015 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
3016 cfg->fc_flags |= RTF_EXPIRES;
3026 struct rt6_info *rt6_info;
3027 struct fib6_config r_cfg;
3028 struct mx6_config mxc;
3029 struct list_head next;
3032 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
3036 list_for_each_entry(nh, rt6_nh_list, next) {
3037 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
3038 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
3039 nh->r_cfg.fc_ifindex);
3043 static int ip6_route_info_append(struct list_head *rt6_nh_list,
3044 struct rt6_info *rt, struct fib6_config *r_cfg)
3049 list_for_each_entry(nh, rt6_nh_list, next) {
3050 /* check if rt6_info already exists */
3051 if (rt6_duplicate_nexthop(nh->rt6_info, rt))
3055 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
3059 err = ip6_convert_metrics(&nh->mxc, r_cfg);
3064 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
3065 list_add_tail(&nh->next, rt6_nh_list);
3070 static void ip6_route_mpath_notify(struct rt6_info *rt,
3071 struct rt6_info *rt_last,
3072 struct nl_info *info,
3075 /* if this is an APPEND route, then rt points to the first route
3076 * inserted and rt_last points to last route inserted. Userspace
3077 * wants a consistent dump of the route which starts at the first
3078 * nexthop. Since sibling routes are always added at the end of
3079 * the list, find the first sibling of the last route appended
3081 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) {
3082 rt = list_first_entry(&rt_last->rt6i_siblings,
3088 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
3091 static int ip6_route_multipath_add(struct fib6_config *cfg,
3092 struct netlink_ext_ack *extack)
3094 struct rt6_info *rt_notif = NULL, *rt_last = NULL;
3095 struct nl_info *info = &cfg->fc_nlinfo;
3096 struct fib6_config r_cfg;
3097 struct rtnexthop *rtnh;
3098 struct rt6_info *rt;
3099 struct rt6_nh *err_nh;
3100 struct rt6_nh *nh, *nh_safe;
3106 int replace = (cfg->fc_nlinfo.nlh &&
3107 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
3108 LIST_HEAD(rt6_nh_list);
3110 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
3111 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
3112 nlflags |= NLM_F_APPEND;
3114 remaining = cfg->fc_mp_len;
3115 rtnh = (struct rtnexthop *)cfg->fc_mp;
3117 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
3118 * rt6_info structs per nexthop
3120 while (rtnh_ok(rtnh, remaining)) {
3121 memcpy(&r_cfg, cfg, sizeof(*cfg));
3122 if (rtnh->rtnh_ifindex)
3123 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3125 attrlen = rtnh_attrlen(rtnh);
3127 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3129 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3131 r_cfg.fc_gateway = nla_get_in6_addr(nla);
3132 r_cfg.fc_flags |= RTF_GATEWAY;
3134 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
3135 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
3137 r_cfg.fc_encap_type = nla_get_u16(nla);
3140 rt = ip6_route_info_create(&r_cfg, extack);
3147 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
3149 dst_release_immediate(&rt->dst);
3153 rtnh = rtnh_next(rtnh, &remaining);
3156 /* for add and replace send one notification with all nexthops.
3157 * Skip the notification in fib6_add_rt2node and send one with
3158 * the full route when done
3160 info->skip_notify = 1;
3163 list_for_each_entry(nh, &rt6_nh_list, next) {
3164 rt_last = nh->rt6_info;
3165 err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack);
3166 /* save reference to first route for notification */
3167 if (!rt_notif && !err)
3168 rt_notif = nh->rt6_info;
3170 /* nh->rt6_info is used or freed at this point, reset to NULL*/
3171 nh->rt6_info = NULL;
3174 ip6_print_replace_route_err(&rt6_nh_list);
3179 /* Because each route is added like a single route we remove
3180 * these flags after the first nexthop: if there is a collision,
3181 * we have already failed to add the first nexthop:
3182 * fib6_add_rt2node() has rejected it; when replacing, old
3183 * nexthops have been replaced by first new, the rest should
3186 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
3191 /* success ... tell user about new route */
3192 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3196 /* send notification for routes that were added so that
3197 * the delete notifications sent by ip6_route_del are
3201 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3203 /* Delete routes that were already added */
3204 list_for_each_entry(nh, &rt6_nh_list, next) {
3207 ip6_route_del(&nh->r_cfg, extack);
3211 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
3213 dst_release_immediate(&nh->rt6_info->dst);
3215 list_del(&nh->next);
3222 static int ip6_route_multipath_del(struct fib6_config *cfg,
3223 struct netlink_ext_ack *extack)
3225 struct fib6_config r_cfg;
3226 struct rtnexthop *rtnh;
3229 int err = 1, last_err = 0;
3231 remaining = cfg->fc_mp_len;
3232 rtnh = (struct rtnexthop *)cfg->fc_mp;
3234 /* Parse a Multipath Entry */
3235 while (rtnh_ok(rtnh, remaining)) {
3236 memcpy(&r_cfg, cfg, sizeof(*cfg));
3237 if (rtnh->rtnh_ifindex)
3238 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3240 attrlen = rtnh_attrlen(rtnh);
3242 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3244 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3246 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
3247 r_cfg.fc_flags |= RTF_GATEWAY;
3250 err = ip6_route_del(&r_cfg, extack);
3254 rtnh = rtnh_next(rtnh, &remaining);
3260 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3261 struct netlink_ext_ack *extack)
3263 struct fib6_config cfg;
3266 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3271 return ip6_route_multipath_del(&cfg, extack);
3273 cfg.fc_delete_all_nh = 1;
3274 return ip6_route_del(&cfg, extack);
3278 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3279 struct netlink_ext_ack *extack)
3281 struct fib6_config cfg;
3284 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3289 return ip6_route_multipath_add(&cfg, extack);
3291 return ip6_route_add(&cfg, extack);
3294 static size_t rt6_nlmsg_size(struct rt6_info *rt)
3296 int nexthop_len = 0;
3298 if (rt->rt6i_nsiblings) {
3299 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
3300 + NLA_ALIGN(sizeof(struct rtnexthop))
3301 + nla_total_size(16) /* RTA_GATEWAY */
3302 + lwtunnel_get_encap_size(rt->dst.lwtstate);
3304 nexthop_len *= rt->rt6i_nsiblings;
3307 return NLMSG_ALIGN(sizeof(struct rtmsg))
3308 + nla_total_size(16) /* RTA_SRC */
3309 + nla_total_size(16) /* RTA_DST */
3310 + nla_total_size(16) /* RTA_GATEWAY */
3311 + nla_total_size(16) /* RTA_PREFSRC */
3312 + nla_total_size(4) /* RTA_TABLE */
3313 + nla_total_size(4) /* RTA_IIF */
3314 + nla_total_size(4) /* RTA_OIF */
3315 + nla_total_size(4) /* RTA_PRIORITY */
3316 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
3317 + nla_total_size(sizeof(struct rta_cacheinfo))
3318 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
3319 + nla_total_size(1) /* RTA_PREF */
3320 + lwtunnel_get_encap_size(rt->dst.lwtstate)
3324 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
3325 unsigned int *flags, bool skip_oif)
3327 if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
3328 *flags |= RTNH_F_LINKDOWN;
3329 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
3330 *flags |= RTNH_F_DEAD;
3333 if (rt->rt6i_flags & RTF_GATEWAY) {
3334 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
3335 goto nla_put_failure;
3338 if (rt->rt6i_flags & RTF_OFFLOAD)
3339 *flags |= RTNH_F_OFFLOAD;
3341 /* not needed for multipath encoding b/c it has a rtnexthop struct */
3342 if (!skip_oif && rt->dst.dev &&
3343 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
3344 goto nla_put_failure;
3346 if (rt->dst.lwtstate &&
3347 lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
3348 goto nla_put_failure;
3356 /* add multipath next hop */
3357 static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
3359 struct rtnexthop *rtnh;
3360 unsigned int flags = 0;
3362 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
3364 goto nla_put_failure;
3366 rtnh->rtnh_hops = 0;
3367 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
3369 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
3370 goto nla_put_failure;
3372 rtnh->rtnh_flags = flags;
3374 /* length of rtnetlink header + attributes */
3375 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
3383 static int rt6_fill_node(struct net *net,
3384 struct sk_buff *skb, struct rt6_info *rt,
3385 struct in6_addr *dst, struct in6_addr *src,
3386 int iif, int type, u32 portid, u32 seq,
3389 u32 metrics[RTAX_MAX];
3391 struct nlmsghdr *nlh;
3395 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
3399 rtm = nlmsg_data(nlh);
3400 rtm->rtm_family = AF_INET6;
3401 rtm->rtm_dst_len = rt->rt6i_dst.plen;
3402 rtm->rtm_src_len = rt->rt6i_src.plen;
3405 table = rt->rt6i_table->tb6_id;
3407 table = RT6_TABLE_UNSPEC;
3408 rtm->rtm_table = table;
3409 if (nla_put_u32(skb, RTA_TABLE, table))
3410 goto nla_put_failure;
3411 if (rt->rt6i_flags & RTF_REJECT) {
3412 switch (rt->dst.error) {
3414 rtm->rtm_type = RTN_BLACKHOLE;
3417 rtm->rtm_type = RTN_PROHIBIT;
3420 rtm->rtm_type = RTN_THROW;
3423 rtm->rtm_type = RTN_UNREACHABLE;
3427 else if (rt->rt6i_flags & RTF_LOCAL)
3428 rtm->rtm_type = RTN_LOCAL;
3429 else if (rt->rt6i_flags & RTF_ANYCAST)
3430 rtm->rtm_type = RTN_ANYCAST;
3431 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
3432 rtm->rtm_type = RTN_LOCAL;
3434 rtm->rtm_type = RTN_UNICAST;
3436 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
3437 rtm->rtm_protocol = rt->rt6i_protocol;
3439 if (rt->rt6i_flags & RTF_CACHE)
3440 rtm->rtm_flags |= RTM_F_CLONED;
3443 if (nla_put_in6_addr(skb, RTA_DST, dst))
3444 goto nla_put_failure;
3445 rtm->rtm_dst_len = 128;
3446 } else if (rtm->rtm_dst_len)
3447 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
3448 goto nla_put_failure;
3449 #ifdef CONFIG_IPV6_SUBTREES
3451 if (nla_put_in6_addr(skb, RTA_SRC, src))
3452 goto nla_put_failure;
3453 rtm->rtm_src_len = 128;
3454 } else if (rtm->rtm_src_len &&
3455 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
3456 goto nla_put_failure;
3459 #ifdef CONFIG_IPV6_MROUTE
3460 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
3461 int err = ip6mr_get_route(net, skb, rtm, portid);
3466 goto nla_put_failure;
3469 if (nla_put_u32(skb, RTA_IIF, iif))
3470 goto nla_put_failure;
3472 struct in6_addr saddr_buf;
3473 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
3474 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3475 goto nla_put_failure;
3478 if (rt->rt6i_prefsrc.plen) {
3479 struct in6_addr saddr_buf;
3480 saddr_buf = rt->rt6i_prefsrc.addr;
3481 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3482 goto nla_put_failure;
3485 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3487 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
3488 if (rtnetlink_put_metrics(skb, metrics) < 0)
3489 goto nla_put_failure;
3491 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
3492 goto nla_put_failure;
3494 /* For multipath routes, walk the siblings list and add
3495 * each as a nexthop within RTA_MULTIPATH.
3497 if (rt->rt6i_nsiblings) {
3498 struct rt6_info *sibling, *next_sibling;
3501 mp = nla_nest_start(skb, RTA_MULTIPATH);
3503 goto nla_put_failure;
3505 if (rt6_add_nexthop(skb, rt) < 0)
3506 goto nla_put_failure;
3508 list_for_each_entry_safe(sibling, next_sibling,
3509 &rt->rt6i_siblings, rt6i_siblings) {
3510 if (rt6_add_nexthop(skb, sibling) < 0)
3511 goto nla_put_failure;
3514 nla_nest_end(skb, mp);
3516 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
3517 goto nla_put_failure;
3520 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
3522 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
3523 goto nla_put_failure;
3525 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
3526 goto nla_put_failure;
3529 nlmsg_end(skb, nlh);
3533 nlmsg_cancel(skb, nlh);
3537 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
3539 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
3540 struct net *net = arg->net;
3542 if (rt == net->ipv6.ip6_null_entry)
3545 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
3546 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
3548 /* user wants prefix routes only */
3549 if (rtm->rtm_flags & RTM_F_PREFIX &&
3550 !(rt->rt6i_flags & RTF_PREFIX_RT)) {
3551 /* success since this is not a prefix route */
3556 return rt6_fill_node(net,
3557 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
3558 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
3562 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3563 struct netlink_ext_ack *extack)
3565 struct net *net = sock_net(in_skb->sk);
3566 struct nlattr *tb[RTA_MAX+1];
3567 int err, iif = 0, oif = 0;
3568 struct dst_entry *dst;
3569 struct rt6_info *rt;
3570 struct sk_buff *skb;
3575 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
3581 memset(&fl6, 0, sizeof(fl6));
3582 rtm = nlmsg_data(nlh);
3583 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
3584 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
3587 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
3590 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
3594 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
3597 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
3601 iif = nla_get_u32(tb[RTA_IIF]);
3604 oif = nla_get_u32(tb[RTA_OIF]);
3607 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
3610 fl6.flowi6_uid = make_kuid(current_user_ns(),
3611 nla_get_u32(tb[RTA_UID]));
3613 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
3616 struct net_device *dev;
3619 dev = __dev_get_by_index(net, iif);
3625 fl6.flowi6_iif = iif;
3627 if (!ipv6_addr_any(&fl6.saddr))
3628 flags |= RT6_LOOKUP_F_HAS_SADDR;
3631 dst = ip6_route_input_lookup(net, dev, &fl6, flags);
3633 fl6.flowi6_oif = oif;
3636 dst = ip6_route_output(net, NULL, &fl6);
3640 dst = ip6_route_lookup(net, &fl6, 0);
3642 rt = container_of(dst, struct rt6_info, dst);
3643 if (rt->dst.error) {
3644 err = rt->dst.error;
3649 if (rt == net->ipv6.ip6_null_entry) {
3650 err = rt->dst.error;
3655 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3662 skb_dst_set(skb, &rt->dst);
3664 err = rt6_fill_node(net, skb, rt, NULL, NULL, iif,
3665 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3668 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
3669 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3676 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3681 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
3682 unsigned int nlm_flags)
3684 struct sk_buff *skb;
3685 struct net *net = info->nl_net;
3690 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3692 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3696 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
3697 event, info->portid, seq, nlm_flags);
3699 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
3700 WARN_ON(err == -EMSGSIZE);
3704 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3705 info->nlh, gfp_any());
3709 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
3712 static int ip6_route_dev_notify(struct notifier_block *this,
3713 unsigned long event, void *ptr)
3715 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3716 struct net *net = dev_net(dev);
3718 if (!(dev->flags & IFF_LOOPBACK))
3721 if (event == NETDEV_REGISTER) {
3722 net->ipv6.ip6_null_entry->dst.dev = dev;
3723 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
3724 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3725 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
3726 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
3727 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
3728 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
3730 } else if (event == NETDEV_UNREGISTER &&
3731 dev->reg_state != NETREG_UNREGISTERED) {
3732 /* NETDEV_UNREGISTER could be fired for multiple times by
3733 * netdev_wait_allrefs(). Make sure we only call this once.
3735 in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
3736 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3737 in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
3738 in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
3749 #ifdef CONFIG_PROC_FS
3751 static const struct file_operations ipv6_route_proc_fops = {
3752 .owner = THIS_MODULE,
3753 .open = ipv6_route_open,
3755 .llseek = seq_lseek,
3756 .release = seq_release_net,
3759 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
3761 struct net *net = (struct net *)seq->private;
3762 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
3763 net->ipv6.rt6_stats->fib_nodes,
3764 net->ipv6.rt6_stats->fib_route_nodes,
3765 net->ipv6.rt6_stats->fib_rt_alloc,
3766 net->ipv6.rt6_stats->fib_rt_entries,
3767 net->ipv6.rt6_stats->fib_rt_cache,
3768 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
3769 net->ipv6.rt6_stats->fib_discarded_routes);
3774 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
3776 return single_open_net(inode, file, rt6_stats_seq_show);
3779 static const struct file_operations rt6_stats_seq_fops = {
3780 .owner = THIS_MODULE,
3781 .open = rt6_stats_seq_open,
3783 .llseek = seq_lseek,
3784 .release = single_release_net,
3786 #endif /* CONFIG_PROC_FS */
3788 #ifdef CONFIG_SYSCTL
3791 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
3792 void __user *buffer, size_t *lenp, loff_t *ppos)
3799 net = (struct net *)ctl->extra1;
3800 delay = net->ipv6.sysctl.flush_delay;
3801 proc_dointvec(ctl, write, buffer, lenp, ppos);
3802 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
3806 struct ctl_table ipv6_route_table_template[] = {
3808 .procname = "flush",
3809 .data = &init_net.ipv6.sysctl.flush_delay,
3810 .maxlen = sizeof(int),
3812 .proc_handler = ipv6_sysctl_rtcache_flush
3815 .procname = "gc_thresh",
3816 .data = &ip6_dst_ops_template.gc_thresh,
3817 .maxlen = sizeof(int),
3819 .proc_handler = proc_dointvec,
3822 .procname = "max_size",
3823 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
3824 .maxlen = sizeof(int),
3826 .proc_handler = proc_dointvec,
3829 .procname = "gc_min_interval",
3830 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3831 .maxlen = sizeof(int),
3833 .proc_handler = proc_dointvec_jiffies,
3836 .procname = "gc_timeout",
3837 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
3838 .maxlen = sizeof(int),
3840 .proc_handler = proc_dointvec_jiffies,
3843 .procname = "gc_interval",
3844 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
3845 .maxlen = sizeof(int),
3847 .proc_handler = proc_dointvec_jiffies,
3850 .procname = "gc_elasticity",
3851 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
3852 .maxlen = sizeof(int),
3854 .proc_handler = proc_dointvec,
3857 .procname = "mtu_expires",
3858 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
3859 .maxlen = sizeof(int),
3861 .proc_handler = proc_dointvec_jiffies,
3864 .procname = "min_adv_mss",
3865 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
3866 .maxlen = sizeof(int),
3868 .proc_handler = proc_dointvec,
3871 .procname = "gc_min_interval_ms",
3872 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3873 .maxlen = sizeof(int),
3875 .proc_handler = proc_dointvec_ms_jiffies,
3880 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
3882 struct ctl_table *table;
3884 table = kmemdup(ipv6_route_table_template,
3885 sizeof(ipv6_route_table_template),
3889 table[0].data = &net->ipv6.sysctl.flush_delay;
3890 table[0].extra1 = net;
3891 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
3892 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
3893 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3894 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
3895 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
3896 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
3897 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
3898 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
3899 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3901 /* Don't export sysctls to unprivileged users */
3902 if (net->user_ns != &init_user_ns)
3903 table[0].procname = NULL;
3910 static int __net_init ip6_route_net_init(struct net *net)
3914 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3915 sizeof(net->ipv6.ip6_dst_ops));
3917 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3918 goto out_ip6_dst_ops;
3920 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3921 sizeof(*net->ipv6.ip6_null_entry),
3923 if (!net->ipv6.ip6_null_entry)
3924 goto out_ip6_dst_entries;
3925 net->ipv6.ip6_null_entry->dst.path =
3926 (struct dst_entry *)net->ipv6.ip6_null_entry;
3927 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3928 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3929 ip6_template_metrics, true);
3931 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3932 net->ipv6.fib6_has_custom_rules = false;
3933 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3934 sizeof(*net->ipv6.ip6_prohibit_entry),
3936 if (!net->ipv6.ip6_prohibit_entry)
3937 goto out_ip6_null_entry;
3938 net->ipv6.ip6_prohibit_entry->dst.path =
3939 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3940 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3941 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3942 ip6_template_metrics, true);
3944 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3945 sizeof(*net->ipv6.ip6_blk_hole_entry),
3947 if (!net->ipv6.ip6_blk_hole_entry)
3948 goto out_ip6_prohibit_entry;
3949 net->ipv6.ip6_blk_hole_entry->dst.path =
3950 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3951 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3952 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3953 ip6_template_metrics, true);
3956 net->ipv6.sysctl.flush_delay = 0;
3957 net->ipv6.sysctl.ip6_rt_max_size = 4096;
3958 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3959 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3960 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3961 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3962 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3963 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3965 net->ipv6.ip6_rt_gc_expire = 30*HZ;
3971 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3972 out_ip6_prohibit_entry:
3973 kfree(net->ipv6.ip6_prohibit_entry);
3975 kfree(net->ipv6.ip6_null_entry);
3977 out_ip6_dst_entries:
3978 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3983 static void __net_exit ip6_route_net_exit(struct net *net)
3985 kfree(net->ipv6.ip6_null_entry);
3986 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3987 kfree(net->ipv6.ip6_prohibit_entry);
3988 kfree(net->ipv6.ip6_blk_hole_entry);
3990 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3993 static int __net_init ip6_route_net_init_late(struct net *net)
3995 #ifdef CONFIG_PROC_FS
3996 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3997 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
4002 static void __net_exit ip6_route_net_exit_late(struct net *net)
4004 #ifdef CONFIG_PROC_FS
4005 remove_proc_entry("ipv6_route", net->proc_net);
4006 remove_proc_entry("rt6_stats", net->proc_net);
4010 static struct pernet_operations ip6_route_net_ops = {
4011 .init = ip6_route_net_init,
4012 .exit = ip6_route_net_exit,
4015 static int __net_init ipv6_inetpeer_init(struct net *net)
4017 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
4021 inet_peer_base_init(bp);
4022 net->ipv6.peers = bp;
4026 static void __net_exit ipv6_inetpeer_exit(struct net *net)
4028 struct inet_peer_base *bp = net->ipv6.peers;
4030 net->ipv6.peers = NULL;
4031 inetpeer_invalidate_tree(bp);
4035 static struct pernet_operations ipv6_inetpeer_ops = {
4036 .init = ipv6_inetpeer_init,
4037 .exit = ipv6_inetpeer_exit,
4040 static struct pernet_operations ip6_route_net_late_ops = {
4041 .init = ip6_route_net_init_late,
4042 .exit = ip6_route_net_exit_late,
4045 static struct notifier_block ip6_route_dev_notifier = {
4046 .notifier_call = ip6_route_dev_notify,
4047 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
4050 void __init ip6_route_init_special_entries(void)
4052 /* Registering of the loopback is done before this portion of code,
4053 * the loopback reference in rt6_info will not be taken, do it
4054 * manually for init_net */
4055 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
4056 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4057 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4058 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
4059 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4060 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
4061 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4065 int __init ip6_route_init(void)
4071 ip6_dst_ops_template.kmem_cachep =
4072 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
4073 SLAB_HWCACHE_ALIGN, NULL);
4074 if (!ip6_dst_ops_template.kmem_cachep)
4077 ret = dst_entries_init(&ip6_dst_blackhole_ops);
4079 goto out_kmem_cache;
4081 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
4083 goto out_dst_entries;
4085 ret = register_pernet_subsys(&ip6_route_net_ops);
4087 goto out_register_inetpeer;
4089 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
4093 goto out_register_subsys;
4099 ret = fib6_rules_init();
4103 ret = register_pernet_subsys(&ip6_route_net_late_ops);
4105 goto fib6_rules_init;
4108 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
4109 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
4110 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
4111 goto out_register_late_subsys;
4113 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
4115 goto out_register_late_subsys;
4117 for_each_possible_cpu(cpu) {
4118 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
4120 INIT_LIST_HEAD(&ul->head);
4121 spin_lock_init(&ul->lock);
4127 out_register_late_subsys:
4128 unregister_pernet_subsys(&ip6_route_net_late_ops);
4130 fib6_rules_cleanup();
4135 out_register_subsys:
4136 unregister_pernet_subsys(&ip6_route_net_ops);
4137 out_register_inetpeer:
4138 unregister_pernet_subsys(&ipv6_inetpeer_ops);
4140 dst_entries_destroy(&ip6_dst_blackhole_ops);
4142 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
4146 void ip6_route_cleanup(void)
4148 unregister_netdevice_notifier(&ip6_route_dev_notifier);
4149 unregister_pernet_subsys(&ip6_route_net_late_ops);
4150 fib6_rules_cleanup();
4153 unregister_pernet_subsys(&ipv6_inetpeer_ops);
4154 unregister_pernet_subsys(&ip6_route_net_ops);
4155 dst_entries_destroy(&ip6_dst_blackhole_ops);
4156 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);