1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * ROUTE - implementation of the IP router.
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox, <gw4pts@gw4pts.ampr.org>
12 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
13 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
16 * Alan Cox : Verify area fixes.
17 * Alan Cox : cli() protects routing changes
18 * Rui Oliveira : ICMP routing table updates
19 * (rco@di.uminho.pt) Routing table insertion and update
20 * Linus Torvalds : Rewrote bits to be sensible
21 * Alan Cox : Added BSD route gw semantics
22 * Alan Cox : Super /proc >4K
23 * Alan Cox : MTU in route table
24 * Alan Cox : MSS actually. Also added the window
26 * Sam Lantinga : Fixed route matching in rt_del()
27 * Alan Cox : Routing cache support.
28 * Alan Cox : Removed compatibility cruft.
29 * Alan Cox : RTF_REJECT support.
30 * Alan Cox : TCP irtt support.
31 * Jonathan Naylor : Added Metric support.
32 * Miquel van Smoorenburg : BSD API fixes.
33 * Miquel van Smoorenburg : Metrics.
34 * Alan Cox : Use __u32 properly
35 * Alan Cox : Aligned routing errors more closely with BSD
36 * our system is still very different.
37 * Alan Cox : Faster /proc handling
38 * Alexey Kuznetsov : Massive rework to support tree based routing,
39 * routing caches and better behaviour.
41 * Olaf Erb : irtt wasn't being copied right.
42 * Bjorn Ekwall : Kerneld route support.
43 * Alan Cox : Multicast fixed (I hope)
44 * Pavel Krauz : Limited broadcast fixed
45 * Mike McLagan : Routing by source
46 * Alexey Kuznetsov : End of old history. Split to fib.c and
47 * route.c and rewritten from scratch.
48 * Andi Kleen : Load-limit warning messages.
49 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
50 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
51 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
52 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
53 * Marc Boucher : routing by fwmark
54 * Robert Olsson : Added rt_cache statistics
55 * Arnaldo C. Melo : Convert proc stuff to seq_file
56 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
57 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
58 * Ilia Sotnikov : Removed TOS from hash calculations
61 #define pr_fmt(fmt) "IPv4: " fmt
63 #include <linux/module.h>
64 #include <linux/bitops.h>
65 #include <linux/kernel.h>
67 #include <linux/memblock.h>
68 #include <linux/socket.h>
69 #include <linux/errno.h>
71 #include <linux/inet.h>
72 #include <linux/netdevice.h>
73 #include <linux/proc_fs.h>
74 #include <linux/init.h>
75 #include <linux/skbuff.h>
76 #include <linux/inetdevice.h>
77 #include <linux/igmp.h>
78 #include <linux/pkt_sched.h>
79 #include <linux/mroute.h>
80 #include <linux/netfilter_ipv4.h>
81 #include <linux/random.h>
82 #include <linux/rcupdate.h>
83 #include <linux/slab.h>
84 #include <linux/jhash.h>
86 #include <net/dst_metadata.h>
87 #include <net/inet_dscp.h>
88 #include <net/net_namespace.h>
90 #include <net/route.h>
91 #include <net/inetpeer.h>
93 #include <net/ip_fib.h>
94 #include <net/nexthop.h>
98 #include <net/lwtunnel.h>
99 #include <net/netevent.h>
100 #include <net/rtnetlink.h>
102 #include <linux/sysctl.h>
104 #include <net/secure_seq.h>
105 #include <net/ip_tunnels.h>
107 #include "fib_lookup.h"
109 #define RT_FL_TOS(oldflp4) \
110 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
112 #define RT_GC_TIMEOUT (300*HZ)
114 #define DEFAULT_MIN_PMTU (512 + 20 + 20)
115 #define DEFAULT_MTU_EXPIRES (10 * 60 * HZ)
116 #define DEFAULT_MIN_ADVMSS 256
117 static int ip_rt_max_size;
118 static int ip_rt_redirect_number __read_mostly = 9;
119 static int ip_rt_redirect_load __read_mostly = HZ / 50;
120 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
121 static int ip_rt_error_cost __read_mostly = HZ;
122 static int ip_rt_error_burst __read_mostly = 5 * HZ;
124 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
127 * Interface to generic destination cache.
130 INDIRECT_CALLABLE_SCOPE
131 struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
132 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
133 INDIRECT_CALLABLE_SCOPE
134 unsigned int ipv4_mtu(const struct dst_entry *dst);
135 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
136 static void ipv4_link_failure(struct sk_buff *skb);
137 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
138 struct sk_buff *skb, u32 mtu,
140 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
141 struct sk_buff *skb);
142 static void ipv4_dst_destroy(struct dst_entry *dst);
144 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
150 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
153 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
155 static struct dst_ops ipv4_dst_ops = {
157 .check = ipv4_dst_check,
158 .default_advmss = ipv4_default_advmss,
160 .cow_metrics = ipv4_cow_metrics,
161 .destroy = ipv4_dst_destroy,
162 .negative_advice = ipv4_negative_advice,
163 .link_failure = ipv4_link_failure,
164 .update_pmtu = ip_rt_update_pmtu,
165 .redirect = ip_do_redirect,
166 .local_out = __ip_local_out,
167 .neigh_lookup = ipv4_neigh_lookup,
168 .confirm_neigh = ipv4_confirm_neigh,
171 #define ECN_OR_COST(class) TC_PRIO_##class
173 const __u8 ip_tos2prio[16] = {
175 ECN_OR_COST(BESTEFFORT),
177 ECN_OR_COST(BESTEFFORT),
183 ECN_OR_COST(INTERACTIVE),
185 ECN_OR_COST(INTERACTIVE),
186 TC_PRIO_INTERACTIVE_BULK,
187 ECN_OR_COST(INTERACTIVE_BULK),
188 TC_PRIO_INTERACTIVE_BULK,
189 ECN_OR_COST(INTERACTIVE_BULK)
191 EXPORT_SYMBOL(ip_tos2prio);
193 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
194 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
196 #ifdef CONFIG_PROC_FS
197 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
201 return SEQ_START_TOKEN;
204 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
210 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
214 static int rt_cache_seq_show(struct seq_file *seq, void *v)
216 if (v == SEQ_START_TOKEN)
217 seq_printf(seq, "%-127s\n",
218 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
219 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
224 static const struct seq_operations rt_cache_seq_ops = {
225 .start = rt_cache_seq_start,
226 .next = rt_cache_seq_next,
227 .stop = rt_cache_seq_stop,
228 .show = rt_cache_seq_show,
231 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
236 return SEQ_START_TOKEN;
238 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
239 if (!cpu_possible(cpu))
242 return &per_cpu(rt_cache_stat, cpu);
247 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
251 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
252 if (!cpu_possible(cpu))
255 return &per_cpu(rt_cache_stat, cpu);
262 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
267 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
269 struct rt_cache_stat *st = v;
271 if (v == SEQ_START_TOKEN) {
272 seq_puts(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
276 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x "
277 "%08x %08x %08x %08x %08x %08x "
278 "%08x %08x %08x %08x\n",
279 dst_entries_get_slow(&ipv4_dst_ops),
292 0, /* st->gc_total */
293 0, /* st->gc_ignored */
294 0, /* st->gc_goal_miss */
295 0, /* st->gc_dst_overflow */
296 0, /* st->in_hlist_search */
297 0 /* st->out_hlist_search */
302 static const struct seq_operations rt_cpu_seq_ops = {
303 .start = rt_cpu_seq_start,
304 .next = rt_cpu_seq_next,
305 .stop = rt_cpu_seq_stop,
306 .show = rt_cpu_seq_show,
309 #ifdef CONFIG_IP_ROUTE_CLASSID
310 static int rt_acct_proc_show(struct seq_file *m, void *v)
312 struct ip_rt_acct *dst, *src;
315 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
319 for_each_possible_cpu(i) {
320 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
321 for (j = 0; j < 256; j++) {
322 dst[j].o_bytes += src[j].o_bytes;
323 dst[j].o_packets += src[j].o_packets;
324 dst[j].i_bytes += src[j].i_bytes;
325 dst[j].i_packets += src[j].i_packets;
329 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
335 static int __net_init ip_rt_do_proc_init(struct net *net)
337 struct proc_dir_entry *pde;
339 pde = proc_create_seq("rt_cache", 0444, net->proc_net,
344 pde = proc_create_seq("rt_cache", 0444, net->proc_net_stat,
349 #ifdef CONFIG_IP_ROUTE_CLASSID
350 pde = proc_create_single("rt_acct", 0, net->proc_net,
357 #ifdef CONFIG_IP_ROUTE_CLASSID
359 remove_proc_entry("rt_cache", net->proc_net_stat);
362 remove_proc_entry("rt_cache", net->proc_net);
367 static void __net_exit ip_rt_do_proc_exit(struct net *net)
369 remove_proc_entry("rt_cache", net->proc_net_stat);
370 remove_proc_entry("rt_cache", net->proc_net);
371 #ifdef CONFIG_IP_ROUTE_CLASSID
372 remove_proc_entry("rt_acct", net->proc_net);
376 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
377 .init = ip_rt_do_proc_init,
378 .exit = ip_rt_do_proc_exit,
381 static int __init ip_rt_proc_init(void)
383 return register_pernet_subsys(&ip_rt_proc_ops);
387 static inline int ip_rt_proc_init(void)
391 #endif /* CONFIG_PROC_FS */
393 static inline bool rt_is_expired(const struct rtable *rth)
395 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
398 void rt_cache_flush(struct net *net)
400 rt_genid_bump_ipv4(net);
403 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
407 const struct rtable *rt = container_of(dst, struct rtable, dst);
408 struct net_device *dev = dst->dev;
413 if (likely(rt->rt_gw_family == AF_INET)) {
414 n = ip_neigh_gw4(dev, rt->rt_gw4);
415 } else if (rt->rt_gw_family == AF_INET6) {
416 n = ip_neigh_gw6(dev, &rt->rt_gw6);
420 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
421 n = ip_neigh_gw4(dev, pkey);
424 if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
427 rcu_read_unlock_bh();
432 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
434 const struct rtable *rt = container_of(dst, struct rtable, dst);
435 struct net_device *dev = dst->dev;
436 const __be32 *pkey = daddr;
438 if (rt->rt_gw_family == AF_INET) {
439 pkey = (const __be32 *)&rt->rt_gw4;
440 } else if (rt->rt_gw_family == AF_INET6) {
441 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
444 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
447 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
450 /* Hash tables of size 2048..262144 depending on RAM size.
451 * Each bucket uses 8 bytes.
453 static u32 ip_idents_mask __read_mostly;
454 static atomic_t *ip_idents __read_mostly;
455 static u32 *ip_tstamps __read_mostly;
457 /* In order to protect privacy, we add a perturbation to identifiers
458 * if one generator is seldom used. This makes hard for an attacker
459 * to infer how many packets were sent between two points in time.
461 static u32 ip_idents_reserve(u32 hash, int segs)
463 u32 bucket, old, now = (u32)jiffies;
468 bucket = hash & ip_idents_mask;
469 p_tstamp = ip_tstamps + bucket;
470 p_id = ip_idents + bucket;
471 old = READ_ONCE(*p_tstamp);
473 if (old != now && cmpxchg(p_tstamp, old, now) == old)
474 delta = prandom_u32_max(now - old);
476 /* If UBSAN reports an error there, please make sure your compiler
477 * supports -fno-strict-overflow before reporting it that was a bug
478 * in UBSAN, and it has been fixed in GCC-8.
480 return atomic_add_return(segs + delta, p_id) - segs;
483 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
487 /* Note the following code is not safe, but this is okay. */
488 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
489 get_random_bytes(&net->ipv4.ip_id_key,
490 sizeof(net->ipv4.ip_id_key));
492 hash = siphash_3u32((__force u32)iph->daddr,
493 (__force u32)iph->saddr,
495 &net->ipv4.ip_id_key);
496 id = ip_idents_reserve(hash, segs);
499 EXPORT_SYMBOL(__ip_select_ident);
501 static void ip_rt_fix_tos(struct flowi4 *fl4)
503 __u8 tos = RT_FL_TOS(fl4);
505 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
506 fl4->flowi4_scope = tos & RTO_ONLINK ?
507 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE;
510 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
511 const struct sock *sk,
512 const struct iphdr *iph,
514 u8 prot, u32 mark, int flow_flags)
517 const struct inet_sock *inet = inet_sk(sk);
519 oif = sk->sk_bound_dev_if;
521 tos = RT_CONN_FLAGS(sk);
522 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
524 flowi4_init_output(fl4, oif, mark, tos,
525 RT_SCOPE_UNIVERSE, prot,
527 iph->daddr, iph->saddr, 0, 0,
528 sock_net_uid(net, sk));
531 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
532 const struct sock *sk)
534 const struct net *net = dev_net(skb->dev);
535 const struct iphdr *iph = ip_hdr(skb);
536 int oif = skb->dev->ifindex;
537 u8 tos = RT_TOS(iph->tos);
538 u8 prot = iph->protocol;
539 u32 mark = skb->mark;
541 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
544 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
546 const struct inet_sock *inet = inet_sk(sk);
547 const struct ip_options_rcu *inet_opt;
548 __be32 daddr = inet->inet_daddr;
551 inet_opt = rcu_dereference(inet->inet_opt);
552 if (inet_opt && inet_opt->opt.srr)
553 daddr = inet_opt->opt.faddr;
554 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
555 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
556 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
557 inet_sk_flowi_flags(sk),
558 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
562 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
563 const struct sk_buff *skb)
566 build_skb_flow_key(fl4, skb, sk);
568 build_sk_flow_key(fl4, sk);
571 static DEFINE_SPINLOCK(fnhe_lock);
573 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
577 rt = rcu_dereference(fnhe->fnhe_rth_input);
579 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
580 dst_dev_put(&rt->dst);
581 dst_release(&rt->dst);
583 rt = rcu_dereference(fnhe->fnhe_rth_output);
585 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
586 dst_dev_put(&rt->dst);
587 dst_release(&rt->dst);
591 static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
593 struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
594 struct fib_nh_exception *fnhe, *oldest = NULL;
596 for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
597 fnhe = rcu_dereference_protected(*fnhe_p,
598 lockdep_is_held(&fnhe_lock));
602 time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
607 fnhe_flush_routes(oldest);
608 *oldest_p = oldest->fnhe_next;
609 kfree_rcu(oldest, rcu);
612 static u32 fnhe_hashfun(__be32 daddr)
614 static siphash_aligned_key_t fnhe_hash_key;
617 net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
618 hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key);
619 return hash_64(hval, FNHE_HASH_SHIFT);
622 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
624 rt->rt_pmtu = fnhe->fnhe_pmtu;
625 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
626 rt->dst.expires = fnhe->fnhe_expires;
629 rt->rt_flags |= RTCF_REDIRECTED;
630 rt->rt_uses_gateway = 1;
631 rt->rt_gw_family = AF_INET;
632 rt->rt_gw4 = fnhe->fnhe_gw;
636 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
637 __be32 gw, u32 pmtu, bool lock,
638 unsigned long expires)
640 struct fnhe_hash_bucket *hash;
641 struct fib_nh_exception *fnhe;
647 genid = fnhe_genid(dev_net(nhc->nhc_dev));
648 hval = fnhe_hashfun(daddr);
650 spin_lock_bh(&fnhe_lock);
652 hash = rcu_dereference(nhc->nhc_exceptions);
654 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
657 rcu_assign_pointer(nhc->nhc_exceptions, hash);
663 for (fnhe = rcu_dereference(hash->chain); fnhe;
664 fnhe = rcu_dereference(fnhe->fnhe_next)) {
665 if (fnhe->fnhe_daddr == daddr)
671 if (fnhe->fnhe_genid != genid)
672 fnhe->fnhe_genid = genid;
676 fnhe->fnhe_pmtu = pmtu;
677 fnhe->fnhe_mtu_locked = lock;
679 fnhe->fnhe_expires = max(1UL, expires);
680 /* Update all cached dsts too */
681 rt = rcu_dereference(fnhe->fnhe_rth_input);
683 fill_route_from_fnhe(rt, fnhe);
684 rt = rcu_dereference(fnhe->fnhe_rth_output);
686 fill_route_from_fnhe(rt, fnhe);
688 /* Randomize max depth to avoid some side channels attacks. */
689 int max_depth = FNHE_RECLAIM_DEPTH +
690 prandom_u32_max(FNHE_RECLAIM_DEPTH);
692 while (depth > max_depth) {
693 fnhe_remove_oldest(hash);
697 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
701 fnhe->fnhe_next = hash->chain;
703 fnhe->fnhe_genid = genid;
704 fnhe->fnhe_daddr = daddr;
706 fnhe->fnhe_pmtu = pmtu;
707 fnhe->fnhe_mtu_locked = lock;
708 fnhe->fnhe_expires = max(1UL, expires);
710 rcu_assign_pointer(hash->chain, fnhe);
712 /* Exception created; mark the cached routes for the nexthop
713 * stale, so anyone caching it rechecks if this exception
716 rt = rcu_dereference(nhc->nhc_rth_input);
718 rt->dst.obsolete = DST_OBSOLETE_KILL;
720 for_each_possible_cpu(i) {
721 struct rtable __rcu **prt;
723 prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
724 rt = rcu_dereference(*prt);
726 rt->dst.obsolete = DST_OBSOLETE_KILL;
730 fnhe->fnhe_stamp = jiffies;
733 spin_unlock_bh(&fnhe_lock);
736 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
739 __be32 new_gw = icmp_hdr(skb)->un.gateway;
740 __be32 old_gw = ip_hdr(skb)->saddr;
741 struct net_device *dev = skb->dev;
742 struct in_device *in_dev;
743 struct fib_result res;
747 switch (icmp_hdr(skb)->code & 7) {
749 case ICMP_REDIR_NETTOS:
750 case ICMP_REDIR_HOST:
751 case ICMP_REDIR_HOSTTOS:
758 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
761 in_dev = __in_dev_get_rcu(dev);
766 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
767 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
768 ipv4_is_zeronet(new_gw))
769 goto reject_redirect;
771 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
772 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
773 goto reject_redirect;
774 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
775 goto reject_redirect;
777 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
778 goto reject_redirect;
781 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
783 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
785 if (!(n->nud_state & NUD_VALID)) {
786 neigh_event_send(n, NULL);
788 if (fib_lookup(net, fl4, &res, 0) == 0) {
789 struct fib_nh_common *nhc;
791 fib_select_path(net, &res, fl4, skb);
792 nhc = FIB_RES_NHC(res);
793 update_or_create_fnhe(nhc, fl4->daddr, new_gw,
795 jiffies + ip_rt_gc_timeout);
798 rt->dst.obsolete = DST_OBSOLETE_KILL;
799 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
806 #ifdef CONFIG_IP_ROUTE_VERBOSE
807 if (IN_DEV_LOG_MARTIANS(in_dev)) {
808 const struct iphdr *iph = (const struct iphdr *) skb->data;
809 __be32 daddr = iph->daddr;
810 __be32 saddr = iph->saddr;
812 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
813 " Advised path = %pI4 -> %pI4\n",
814 &old_gw, dev->name, &new_gw,
821 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
825 const struct iphdr *iph = (const struct iphdr *) skb->data;
826 struct net *net = dev_net(skb->dev);
827 int oif = skb->dev->ifindex;
828 u8 tos = RT_TOS(iph->tos);
829 u8 prot = iph->protocol;
830 u32 mark = skb->mark;
832 rt = (struct rtable *) dst;
834 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
836 __ip_do_redirect(rt, skb, &fl4, true);
839 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
841 struct rtable *rt = (struct rtable *)dst;
842 struct dst_entry *ret = dst;
845 if (dst->obsolete > 0) {
848 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
859 * 1. The first ip_rt_redirect_number redirects are sent
860 * with exponential backoff, then we stop sending them at all,
861 * assuming that the host ignores our redirects.
862 * 2. If we did not see packets requiring redirects
863 * during ip_rt_redirect_silence, we assume that the host
864 * forgot redirected route and start to send redirects again.
866 * This algorithm is much cheaper and more intelligent than dumb load limiting
869 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
870 * and "frag. need" (breaks PMTU discovery) in icmp.c.
873 void ip_rt_send_redirect(struct sk_buff *skb)
875 struct rtable *rt = skb_rtable(skb);
876 struct in_device *in_dev;
877 struct inet_peer *peer;
883 in_dev = __in_dev_get_rcu(rt->dst.dev);
884 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
888 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
889 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
892 net = dev_net(rt->dst.dev);
893 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
895 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
896 rt_nexthop(rt, ip_hdr(skb)->daddr));
900 /* No redirected packets during ip_rt_redirect_silence;
901 * reset the algorithm.
903 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
904 peer->rate_tokens = 0;
905 peer->n_redirects = 0;
908 /* Too many ignored redirects; do not send anything
909 * set dst.rate_last to the last seen redirected packet.
911 if (peer->n_redirects >= ip_rt_redirect_number) {
912 peer->rate_last = jiffies;
916 /* Check for load limit; set rate_last to the latest sent
919 if (peer->n_redirects == 0 ||
922 (ip_rt_redirect_load << peer->n_redirects)))) {
923 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
925 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
926 peer->rate_last = jiffies;
928 #ifdef CONFIG_IP_ROUTE_VERBOSE
930 peer->n_redirects == ip_rt_redirect_number)
931 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
932 &ip_hdr(skb)->saddr, inet_iif(skb),
933 &ip_hdr(skb)->daddr, &gw);
940 static int ip_error(struct sk_buff *skb)
942 struct rtable *rt = skb_rtable(skb);
943 struct net_device *dev = skb->dev;
944 struct in_device *in_dev;
945 struct inet_peer *peer;
951 if (netif_is_l3_master(skb->dev)) {
952 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
957 in_dev = __in_dev_get_rcu(dev);
959 /* IP on this device is disabled. */
963 net = dev_net(rt->dst.dev);
964 if (!IN_DEV_FORWARD(in_dev)) {
965 switch (rt->dst.error) {
967 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
971 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
977 switch (rt->dst.error) {
982 code = ICMP_HOST_UNREACH;
985 code = ICMP_NET_UNREACH;
986 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
989 code = ICMP_PKT_FILTERED;
993 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
994 l3mdev_master_ifindex(skb->dev), 1);
999 peer->rate_tokens += now - peer->rate_last;
1000 if (peer->rate_tokens > ip_rt_error_burst)
1001 peer->rate_tokens = ip_rt_error_burst;
1002 peer->rate_last = now;
1003 if (peer->rate_tokens >= ip_rt_error_cost)
1004 peer->rate_tokens -= ip_rt_error_cost;
1010 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1012 out: kfree_skb(skb);
1016 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1018 struct dst_entry *dst = &rt->dst;
1019 struct net *net = dev_net(dst->dev);
1020 struct fib_result res;
1024 if (ip_mtu_locked(dst))
1027 old_mtu = ipv4_mtu(dst);
1031 if (mtu < net->ipv4.ip_rt_min_pmtu) {
1033 mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
1036 if (rt->rt_pmtu == mtu && !lock &&
1037 time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
1041 if (fib_lookup(net, fl4, &res, 0) == 0) {
1042 struct fib_nh_common *nhc;
1044 fib_select_path(net, &res, fl4, NULL);
1045 nhc = FIB_RES_NHC(res);
1046 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1047 jiffies + net->ipv4.ip_rt_mtu_expires);
1052 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1053 struct sk_buff *skb, u32 mtu,
1056 struct rtable *rt = (struct rtable *) dst;
1059 ip_rt_build_flow_key(&fl4, sk, skb);
1060 ip_rt_fix_tos(&fl4);
1062 /* Don't make lookup fail for bridged encapsulations */
1063 if (skb && netif_is_any_bridge_port(skb->dev))
1066 __ip_rt_update_pmtu(rt, &fl4, mtu);
1069 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1070 int oif, u8 protocol)
1072 const struct iphdr *iph = (const struct iphdr *)skb->data;
1075 u32 mark = IP4_REPLY_MARK(net, skb->mark);
1077 __build_flow_key(net, &fl4, NULL, iph, oif,
1078 RT_TOS(iph->tos), protocol, mark, 0);
1079 rt = __ip_route_output_key(net, &fl4);
1081 __ip_rt_update_pmtu(rt, &fl4, mtu);
1085 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1087 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1089 const struct iphdr *iph = (const struct iphdr *)skb->data;
1093 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1095 if (!fl4.flowi4_mark)
1096 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1098 rt = __ip_route_output_key(sock_net(sk), &fl4);
1100 __ip_rt_update_pmtu(rt, &fl4, mtu);
1105 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1107 const struct iphdr *iph = (const struct iphdr *)skb->data;
1110 struct dst_entry *odst = NULL;
1112 struct net *net = sock_net(sk);
1116 if (!ip_sk_accept_pmtu(sk))
1119 odst = sk_dst_get(sk);
1121 if (sock_owned_by_user(sk) || !odst) {
1122 __ipv4_sk_update_pmtu(skb, sk, mtu);
1126 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1128 rt = (struct rtable *)odst;
1129 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1130 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1136 ip_rt_fix_tos(&fl4);
1139 __ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
1141 if (!dst_check(&rt->dst, 0)) {
1143 dst_release(&rt->dst);
1145 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1153 sk_dst_set(sk, &rt->dst);
1159 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1161 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1162 int oif, u8 protocol)
1164 const struct iphdr *iph = (const struct iphdr *)skb->data;
1168 __build_flow_key(net, &fl4, NULL, iph, oif,
1169 RT_TOS(iph->tos), protocol, 0, 0);
1170 rt = __ip_route_output_key(net, &fl4);
1172 __ip_do_redirect(rt, skb, &fl4, false);
1176 EXPORT_SYMBOL_GPL(ipv4_redirect);
1178 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1180 const struct iphdr *iph = (const struct iphdr *)skb->data;
1183 struct net *net = sock_net(sk);
1185 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1186 rt = __ip_route_output_key(net, &fl4);
1188 __ip_do_redirect(rt, skb, &fl4, false);
1192 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1194 INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
1197 struct rtable *rt = (struct rtable *) dst;
1199 /* All IPV4 dsts are created with ->obsolete set to the value
1200 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1201 * into this function always.
1203 * When a PMTU/redirect information update invalidates a route,
1204 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1205 * DST_OBSOLETE_DEAD.
1207 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1211 EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
1213 static void ipv4_send_dest_unreach(struct sk_buff *skb)
1215 struct ip_options opt;
1218 /* Recompile ip options since IPCB may not be valid anymore.
1219 * Also check we have a reasonable ipv4 header.
1221 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1222 ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1225 memset(&opt, 0, sizeof(opt));
1226 if (ip_hdr(skb)->ihl > 5) {
1227 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1229 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1232 res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1238 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1241 static void ipv4_link_failure(struct sk_buff *skb)
1245 ipv4_send_dest_unreach(skb);
1247 rt = skb_rtable(skb);
1249 dst_set_expires(&rt->dst, 0);
1252 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1254 pr_debug("%s: %pI4 -> %pI4, %s\n",
1255 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1256 skb->dev ? skb->dev->name : "?");
1263 * We do not cache source address of outgoing interface,
1264 * because it is used only by IP RR, TS and SRR options,
1265 * so that it out of fast path.
1267 * BTW remember: "addr" is allowed to be not aligned
1271 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1275 if (rt_is_output_route(rt))
1276 src = ip_hdr(skb)->saddr;
1278 struct fib_result res;
1279 struct iphdr *iph = ip_hdr(skb);
1280 struct flowi4 fl4 = {
1281 .daddr = iph->daddr,
1282 .saddr = iph->saddr,
1283 .flowi4_tos = RT_TOS(iph->tos),
1284 .flowi4_oif = rt->dst.dev->ifindex,
1285 .flowi4_iif = skb->dev->ifindex,
1286 .flowi4_mark = skb->mark,
1290 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1291 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1293 src = inet_select_addr(rt->dst.dev,
1294 rt_nexthop(rt, iph->daddr),
1298 memcpy(addr, &src, 4);
1301 #ifdef CONFIG_IP_ROUTE_CLASSID
1302 static void set_class_tag(struct rtable *rt, u32 tag)
1304 if (!(rt->dst.tclassid & 0xFFFF))
1305 rt->dst.tclassid |= tag & 0xFFFF;
1306 if (!(rt->dst.tclassid & 0xFFFF0000))
1307 rt->dst.tclassid |= tag & 0xFFFF0000;
1311 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1313 struct net *net = dev_net(dst->dev);
1314 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1315 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1316 net->ipv4.ip_rt_min_advmss);
1318 return min(advmss, IPV4_MAX_PMTU - header_size);
1321 INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
1323 return ip_dst_mtu_maybe_forward(dst, false);
1325 EXPORT_INDIRECT_CALLABLE(ipv4_mtu);
1327 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1329 struct fnhe_hash_bucket *hash;
1330 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1331 u32 hval = fnhe_hashfun(daddr);
1333 spin_lock_bh(&fnhe_lock);
1335 hash = rcu_dereference_protected(nhc->nhc_exceptions,
1336 lockdep_is_held(&fnhe_lock));
1339 fnhe_p = &hash->chain;
1340 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1342 if (fnhe->fnhe_daddr == daddr) {
1343 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1344 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1345 /* set fnhe_daddr to 0 to ensure it won't bind with
1346 * new dsts in rt_bind_exception().
1348 fnhe->fnhe_daddr = 0;
1349 fnhe_flush_routes(fnhe);
1350 kfree_rcu(fnhe, rcu);
1353 fnhe_p = &fnhe->fnhe_next;
1354 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1355 lockdep_is_held(&fnhe_lock));
1358 spin_unlock_bh(&fnhe_lock);
1361 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1364 struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1365 struct fib_nh_exception *fnhe;
1371 hval = fnhe_hashfun(daddr);
1373 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1374 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1375 if (fnhe->fnhe_daddr == daddr) {
1376 if (fnhe->fnhe_expires &&
1377 time_after(jiffies, fnhe->fnhe_expires)) {
1378 ip_del_fnhe(nhc, daddr);
1388 * 1. mtu on route is locked - use it
1389 * 2. mtu from nexthop exception
1390 * 3. mtu from egress device
1393 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1395 struct fib_nh_common *nhc = res->nhc;
1396 struct net_device *dev = nhc->nhc_dev;
1397 struct fib_info *fi = res->fi;
1400 if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1401 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1405 struct fib_nh_exception *fnhe;
1407 fnhe = find_exception(nhc, daddr);
1408 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1409 mtu = fnhe->fnhe_pmtu;
1413 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1415 return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1418 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1419 __be32 daddr, const bool do_cache)
1423 spin_lock_bh(&fnhe_lock);
1425 if (daddr == fnhe->fnhe_daddr) {
1426 struct rtable __rcu **porig;
1427 struct rtable *orig;
1428 int genid = fnhe_genid(dev_net(rt->dst.dev));
1430 if (rt_is_input_route(rt))
1431 porig = &fnhe->fnhe_rth_input;
1433 porig = &fnhe->fnhe_rth_output;
1434 orig = rcu_dereference(*porig);
1436 if (fnhe->fnhe_genid != genid) {
1437 fnhe->fnhe_genid = genid;
1439 fnhe->fnhe_pmtu = 0;
1440 fnhe->fnhe_expires = 0;
1441 fnhe->fnhe_mtu_locked = false;
1442 fnhe_flush_routes(fnhe);
1445 fill_route_from_fnhe(rt, fnhe);
1448 rt->rt_gw_family = AF_INET;
1453 rcu_assign_pointer(*porig, rt);
1455 dst_dev_put(&orig->dst);
1456 dst_release(&orig->dst);
1461 fnhe->fnhe_stamp = jiffies;
1463 spin_unlock_bh(&fnhe_lock);
1468 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1470 struct rtable *orig, *prev, **p;
1473 if (rt_is_input_route(rt)) {
1474 p = (struct rtable **)&nhc->nhc_rth_input;
1476 p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1480 /* hold dst before doing cmpxchg() to avoid race condition
1484 prev = cmpxchg(p, orig, rt);
1487 rt_add_uncached_list(orig);
1488 dst_release(&orig->dst);
1491 dst_release(&rt->dst);
1498 struct uncached_list {
1500 struct list_head head;
1501 struct list_head quarantine;
1504 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1506 void rt_add_uncached_list(struct rtable *rt)
1508 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1510 rt->rt_uncached_list = ul;
1512 spin_lock_bh(&ul->lock);
1513 list_add_tail(&rt->rt_uncached, &ul->head);
1514 spin_unlock_bh(&ul->lock);
1517 void rt_del_uncached_list(struct rtable *rt)
1519 if (!list_empty(&rt->rt_uncached)) {
1520 struct uncached_list *ul = rt->rt_uncached_list;
1522 spin_lock_bh(&ul->lock);
1523 list_del_init(&rt->rt_uncached);
1524 spin_unlock_bh(&ul->lock);
1528 static void ipv4_dst_destroy(struct dst_entry *dst)
1530 struct rtable *rt = (struct rtable *)dst;
1532 ip_dst_metrics_put(dst);
1533 rt_del_uncached_list(rt);
1536 void rt_flush_dev(struct net_device *dev)
1538 struct rtable *rt, *safe;
1541 for_each_possible_cpu(cpu) {
1542 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1544 if (list_empty(&ul->head))
1547 spin_lock_bh(&ul->lock);
1548 list_for_each_entry_safe(rt, safe, &ul->head, rt_uncached) {
1549 if (rt->dst.dev != dev)
1551 rt->dst.dev = blackhole_netdev;
1552 dev_replace_track(dev, blackhole_netdev,
1553 &rt->dst.dev_tracker,
1555 list_move(&rt->rt_uncached, &ul->quarantine);
1557 spin_unlock_bh(&ul->lock);
1561 static bool rt_cache_valid(const struct rtable *rt)
1564 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1568 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1569 const struct fib_result *res,
1570 struct fib_nh_exception *fnhe,
1571 struct fib_info *fi, u16 type, u32 itag,
1572 const bool do_cache)
1574 bool cached = false;
1577 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1579 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1580 rt->rt_uses_gateway = 1;
1581 rt->rt_gw_family = nhc->nhc_gw_family;
1582 /* only INET and INET6 are supported */
1583 if (likely(nhc->nhc_gw_family == AF_INET))
1584 rt->rt_gw4 = nhc->nhc_gw.ipv4;
1586 rt->rt_gw6 = nhc->nhc_gw.ipv6;
1589 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1591 #ifdef CONFIG_IP_ROUTE_CLASSID
1592 if (nhc->nhc_family == AF_INET) {
1595 nh = container_of(nhc, struct fib_nh, nh_common);
1596 rt->dst.tclassid = nh->nh_tclassid;
1599 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1601 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1603 cached = rt_cache_route(nhc, rt);
1604 if (unlikely(!cached)) {
1605 /* Routes we intend to cache in nexthop exception or
1606 * FIB nexthop have the DST_NOCACHE bit clear.
1607 * However, if we are unsuccessful at storing this
1608 * route into the cache we really need to set it.
1611 rt->rt_gw_family = AF_INET;
1614 rt_add_uncached_list(rt);
1617 rt_add_uncached_list(rt);
1619 #ifdef CONFIG_IP_ROUTE_CLASSID
1620 #ifdef CONFIG_IP_MULTIPLE_TABLES
1621 set_class_tag(rt, res->tclassid);
1623 set_class_tag(rt, itag);
1627 struct rtable *rt_dst_alloc(struct net_device *dev,
1628 unsigned int flags, u16 type,
1629 bool nopolicy, bool noxfrm)
1633 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1634 (nopolicy ? DST_NOPOLICY : 0) |
1635 (noxfrm ? DST_NOXFRM : 0));
1638 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1639 rt->rt_flags = flags;
1641 rt->rt_is_input = 0;
1644 rt->rt_mtu_locked = 0;
1645 rt->rt_uses_gateway = 0;
1646 rt->rt_gw_family = 0;
1648 INIT_LIST_HEAD(&rt->rt_uncached);
1650 rt->dst.output = ip_output;
1651 if (flags & RTCF_LOCAL)
1652 rt->dst.input = ip_local_deliver;
1657 EXPORT_SYMBOL(rt_dst_alloc);
1659 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1661 struct rtable *new_rt;
1663 new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1667 new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1668 new_rt->rt_flags = rt->rt_flags;
1669 new_rt->rt_type = rt->rt_type;
1670 new_rt->rt_is_input = rt->rt_is_input;
1671 new_rt->rt_iif = rt->rt_iif;
1672 new_rt->rt_pmtu = rt->rt_pmtu;
1673 new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1674 new_rt->rt_gw_family = rt->rt_gw_family;
1675 if (rt->rt_gw_family == AF_INET)
1676 new_rt->rt_gw4 = rt->rt_gw4;
1677 else if (rt->rt_gw_family == AF_INET6)
1678 new_rt->rt_gw6 = rt->rt_gw6;
1679 INIT_LIST_HEAD(&new_rt->rt_uncached);
1681 new_rt->dst.input = rt->dst.input;
1682 new_rt->dst.output = rt->dst.output;
1683 new_rt->dst.error = rt->dst.error;
1684 new_rt->dst.lastuse = jiffies;
1685 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1689 EXPORT_SYMBOL(rt_dst_clone);
1691 /* called in rcu_read_lock() section */
1692 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1693 u8 tos, struct net_device *dev,
1694 struct in_device *in_dev, u32 *itag)
1698 /* Primary sanity checks. */
1702 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1703 skb->protocol != htons(ETH_P_IP))
1706 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1709 if (ipv4_is_zeronet(saddr)) {
1710 if (!ipv4_is_local_multicast(daddr) &&
1711 ip_hdr(skb)->protocol != IPPROTO_IGMP)
1714 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1722 /* called in rcu_read_lock() section */
1723 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1724 u8 tos, struct net_device *dev, int our)
1726 struct in_device *in_dev = __in_dev_get_rcu(dev);
1727 unsigned int flags = RTCF_MULTICAST;
1733 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1738 flags |= RTCF_LOCAL;
1740 no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
1742 IPCB(skb)->flags |= IPSKB_NOPOLICY;
1744 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1749 #ifdef CONFIG_IP_ROUTE_CLASSID
1750 rth->dst.tclassid = itag;
1752 rth->dst.output = ip_rt_bug;
1753 rth->rt_is_input= 1;
1755 #ifdef CONFIG_IP_MROUTE
1756 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1757 rth->dst.input = ip_mr_input;
1759 RT_CACHE_STAT_INC(in_slow_mc);
1761 skb_dst_set(skb, &rth->dst);
1766 static void ip_handle_martian_source(struct net_device *dev,
1767 struct in_device *in_dev,
1768 struct sk_buff *skb,
1772 RT_CACHE_STAT_INC(in_martian_src);
1773 #ifdef CONFIG_IP_ROUTE_VERBOSE
1774 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1776 * RFC1812 recommendation, if source is martian,
1777 * the only hint is MAC header.
1779 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1780 &daddr, &saddr, dev->name);
1781 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1782 print_hex_dump(KERN_WARNING, "ll header: ",
1783 DUMP_PREFIX_OFFSET, 16, 1,
1784 skb_mac_header(skb),
1785 dev->hard_header_len, false);
1791 /* called in rcu_read_lock() section */
1792 static int __mkroute_input(struct sk_buff *skb,
1793 const struct fib_result *res,
1794 struct in_device *in_dev,
1795 __be32 daddr, __be32 saddr, u32 tos)
1797 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1798 struct net_device *dev = nhc->nhc_dev;
1799 struct fib_nh_exception *fnhe;
1802 struct in_device *out_dev;
1803 bool do_cache, no_policy;
1806 /* get a working reference to the output device */
1807 out_dev = __in_dev_get_rcu(dev);
1809 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1813 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1814 in_dev->dev, in_dev, &itag);
1816 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1822 do_cache = res->fi && !itag;
1823 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1824 skb->protocol == htons(ETH_P_IP)) {
1827 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1828 if (IN_DEV_SHARED_MEDIA(out_dev) ||
1829 inet_addr_onlink(out_dev, saddr, gw))
1830 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1833 if (skb->protocol != htons(ETH_P_IP)) {
1834 /* Not IP (i.e. ARP). Do not create route, if it is
1835 * invalid for proxy arp. DNAT routes are always valid.
1837 * Proxy arp feature have been extended to allow, ARP
1838 * replies back to the same interface, to support
1839 * Private VLAN switch technologies. See arp.c.
1841 if (out_dev == in_dev &&
1842 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1848 no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
1850 IPCB(skb)->flags |= IPSKB_NOPOLICY;
1852 fnhe = find_exception(nhc, daddr);
1855 rth = rcu_dereference(fnhe->fnhe_rth_input);
1857 rth = rcu_dereference(nhc->nhc_rth_input);
1858 if (rt_cache_valid(rth)) {
1859 skb_dst_set_noref(skb, &rth->dst);
1864 rth = rt_dst_alloc(out_dev->dev, 0, res->type, no_policy,
1865 IN_DEV_ORCONF(out_dev, NOXFRM));
1871 rth->rt_is_input = 1;
1872 RT_CACHE_STAT_INC(in_slow_tot);
1874 rth->dst.input = ip_forward;
1876 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1878 lwtunnel_set_redirect(&rth->dst);
1879 skb_dst_set(skb, &rth->dst);
1886 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1887 /* To make ICMP packets follow the right flow, the multipath hash is
1888 * calculated from the inner IP addresses.
1890 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1891 struct flow_keys *hash_keys)
1893 const struct iphdr *outer_iph = ip_hdr(skb);
1894 const struct iphdr *key_iph = outer_iph;
1895 const struct iphdr *inner_iph;
1896 const struct icmphdr *icmph;
1897 struct iphdr _inner_iph;
1898 struct icmphdr _icmph;
1900 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1903 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1906 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1911 if (!icmp_is_err(icmph->type))
1914 inner_iph = skb_header_pointer(skb,
1915 outer_iph->ihl * 4 + sizeof(_icmph),
1916 sizeof(_inner_iph), &_inner_iph);
1920 key_iph = inner_iph;
1922 hash_keys->addrs.v4addrs.src = key_iph->saddr;
1923 hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1926 static u32 fib_multipath_custom_hash_outer(const struct net *net,
1927 const struct sk_buff *skb,
1930 u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
1931 struct flow_keys keys, hash_keys;
1933 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
1936 memset(&hash_keys, 0, sizeof(hash_keys));
1937 skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
1939 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1940 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
1941 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1942 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
1943 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1944 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
1945 hash_keys.basic.ip_proto = keys.basic.ip_proto;
1946 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
1947 hash_keys.ports.src = keys.ports.src;
1948 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
1949 hash_keys.ports.dst = keys.ports.dst;
1951 *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
1952 return flow_hash_from_keys(&hash_keys);
1955 static u32 fib_multipath_custom_hash_inner(const struct net *net,
1956 const struct sk_buff *skb,
1959 u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
1960 struct flow_keys keys, hash_keys;
1962 /* We assume the packet carries an encapsulation, but if none was
1963 * encountered during dissection of the outer flow, then there is no
1964 * point in calling the flow dissector again.
1969 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
1972 memset(&hash_keys, 0, sizeof(hash_keys));
1973 skb_flow_dissect_flow_keys(skb, &keys, 0);
1975 if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
1978 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1979 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1980 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1981 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1982 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1983 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1984 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1985 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1986 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1987 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
1988 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1989 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
1990 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
1991 hash_keys.tags.flow_label = keys.tags.flow_label;
1994 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
1995 hash_keys.basic.ip_proto = keys.basic.ip_proto;
1996 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
1997 hash_keys.ports.src = keys.ports.src;
1998 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
1999 hash_keys.ports.dst = keys.ports.dst;
2001 return flow_hash_from_keys(&hash_keys);
2004 static u32 fib_multipath_custom_hash_skb(const struct net *net,
2005 const struct sk_buff *skb)
2007 u32 mhash, mhash_inner;
2008 bool has_inner = true;
2010 mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner);
2011 mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner);
2013 return jhash_2words(mhash, mhash_inner, 0);
2016 static u32 fib_multipath_custom_hash_fl4(const struct net *net,
2017 const struct flowi4 *fl4)
2019 u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
2020 struct flow_keys hash_keys;
2022 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2025 memset(&hash_keys, 0, sizeof(hash_keys));
2026 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2027 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2028 hash_keys.addrs.v4addrs.src = fl4->saddr;
2029 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2030 hash_keys.addrs.v4addrs.dst = fl4->daddr;
2031 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2032 hash_keys.basic.ip_proto = fl4->flowi4_proto;
2033 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2034 hash_keys.ports.src = fl4->fl4_sport;
2035 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2036 hash_keys.ports.dst = fl4->fl4_dport;
2038 return flow_hash_from_keys(&hash_keys);
2041 /* if skb is set it will be used and fl4 can be NULL */
2042 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
2043 const struct sk_buff *skb, struct flow_keys *flkeys)
2045 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
2046 struct flow_keys hash_keys;
2049 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
2051 memset(&hash_keys, 0, sizeof(hash_keys));
2052 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2054 ip_multipath_l3_keys(skb, &hash_keys);
2056 hash_keys.addrs.v4addrs.src = fl4->saddr;
2057 hash_keys.addrs.v4addrs.dst = fl4->daddr;
2059 mhash = flow_hash_from_keys(&hash_keys);
2062 /* skb is currently provided only when forwarding */
2064 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2065 struct flow_keys keys;
2067 /* short-circuit if we already have L4 hash present */
2069 return skb_get_hash_raw(skb) >> 1;
2071 memset(&hash_keys, 0, sizeof(hash_keys));
2074 skb_flow_dissect_flow_keys(skb, &keys, flag);
2078 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2079 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2080 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2081 hash_keys.ports.src = flkeys->ports.src;
2082 hash_keys.ports.dst = flkeys->ports.dst;
2083 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2085 memset(&hash_keys, 0, sizeof(hash_keys));
2086 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2087 hash_keys.addrs.v4addrs.src = fl4->saddr;
2088 hash_keys.addrs.v4addrs.dst = fl4->daddr;
2089 hash_keys.ports.src = fl4->fl4_sport;
2090 hash_keys.ports.dst = fl4->fl4_dport;
2091 hash_keys.basic.ip_proto = fl4->flowi4_proto;
2093 mhash = flow_hash_from_keys(&hash_keys);
2096 memset(&hash_keys, 0, sizeof(hash_keys));
2097 /* skb is currently provided only when forwarding */
2099 struct flow_keys keys;
2101 skb_flow_dissect_flow_keys(skb, &keys, 0);
2102 /* Inner can be v4 or v6 */
2103 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2104 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2105 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2106 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2107 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2108 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2109 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2110 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2111 hash_keys.tags.flow_label = keys.tags.flow_label;
2112 hash_keys.basic.ip_proto = keys.basic.ip_proto;
2114 /* Same as case 0 */
2115 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2116 ip_multipath_l3_keys(skb, &hash_keys);
2119 /* Same as case 0 */
2120 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2121 hash_keys.addrs.v4addrs.src = fl4->saddr;
2122 hash_keys.addrs.v4addrs.dst = fl4->daddr;
2124 mhash = flow_hash_from_keys(&hash_keys);
2128 mhash = fib_multipath_custom_hash_skb(net, skb);
2130 mhash = fib_multipath_custom_hash_fl4(net, fl4);
2135 mhash = jhash_2words(mhash, multipath_hash, 0);
2139 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
2141 static int ip_mkroute_input(struct sk_buff *skb,
2142 struct fib_result *res,
2143 struct in_device *in_dev,
2144 __be32 daddr, __be32 saddr, u32 tos,
2145 struct flow_keys *hkeys)
2147 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2148 if (res->fi && fib_info_num_path(res->fi) > 1) {
2149 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2151 fib_select_multipath(res, h);
2155 /* create a routing cache entry */
2156 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2159 /* Implements all the saddr-related checks as ip_route_input_slow(),
2160 * assuming daddr is valid and the destination is not a local broadcast one.
2161 * Uses the provided hint instead of performing a route lookup.
2163 int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2164 u8 tos, struct net_device *dev,
2165 const struct sk_buff *hint)
2167 struct in_device *in_dev = __in_dev_get_rcu(dev);
2168 struct rtable *rt = skb_rtable(hint);
2169 struct net *net = dev_net(dev);
2173 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2174 goto martian_source;
2176 if (ipv4_is_zeronet(saddr))
2177 goto martian_source;
2179 if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2180 goto martian_source;
2182 if (rt->rt_type != RTN_LOCAL)
2183 goto skip_validate_source;
2185 tos &= IPTOS_RT_MASK;
2186 err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
2188 goto martian_source;
2190 skip_validate_source:
2191 skb_dst_copy(skb, hint);
2195 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2199 /* get device for dst_alloc with local routes */
2200 static struct net_device *ip_rt_get_dev(struct net *net,
2201 const struct fib_result *res)
2203 struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
2204 struct net_device *dev = NULL;
2207 dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
2209 return dev ? : net->loopback_dev;
2213 * NOTE. We drop all the packets that has local source
2214 * addresses, because every properly looped back packet
2215 * must have correct destination already attached by output routine.
2216 * Changes in the enforced policies must be applied also to
2217 * ip_route_use_hint().
2219 * Such approach solves two big problems:
2220 * 1. Not simplex devices are handled properly.
2221 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2222 * called with rcu_read_lock()
2225 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2226 u8 tos, struct net_device *dev,
2227 struct fib_result *res)
2229 struct in_device *in_dev = __in_dev_get_rcu(dev);
2230 struct flow_keys *flkeys = NULL, _flkeys;
2231 struct net *net = dev_net(dev);
2232 struct ip_tunnel_info *tun_info;
2234 unsigned int flags = 0;
2238 bool do_cache = true;
2241 /* IP on this device is disabled. */
2246 /* Check for the most weird martians, which can be not detected
2250 tun_info = skb_tunnel_info(skb);
2251 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2252 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2254 fl4.flowi4_tun_key.tun_id = 0;
2257 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2258 goto martian_source;
2262 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2265 /* Accept zero addresses only to limited broadcast;
2266 * I even do not know to fix it or not. Waiting for complains :-)
2268 if (ipv4_is_zeronet(saddr))
2269 goto martian_source;
2271 if (ipv4_is_zeronet(daddr))
2272 goto martian_destination;
2274 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2275 * and call it once if daddr or/and saddr are loopback addresses
2277 if (ipv4_is_loopback(daddr)) {
2278 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2279 goto martian_destination;
2280 } else if (ipv4_is_loopback(saddr)) {
2281 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2282 goto martian_source;
2286 * Now we are ready to route packet.
2288 fl4.flowi4_l3mdev = 0;
2290 fl4.flowi4_iif = dev->ifindex;
2291 fl4.flowi4_mark = skb->mark;
2292 fl4.flowi4_tos = tos;
2293 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2294 fl4.flowi4_flags = 0;
2297 fl4.flowi4_uid = sock_net_uid(net, NULL);
2298 fl4.flowi4_multipath_hash = 0;
2300 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2303 fl4.flowi4_proto = 0;
2308 err = fib_lookup(net, &fl4, res, 0);
2310 if (!IN_DEV_FORWARD(in_dev))
2311 err = -EHOSTUNREACH;
2315 if (res->type == RTN_BROADCAST) {
2316 if (IN_DEV_BFORWARD(in_dev))
2318 /* not do cache if bc_forwarding is enabled */
2319 if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2324 if (res->type == RTN_LOCAL) {
2325 err = fib_validate_source(skb, saddr, daddr, tos,
2326 0, dev, in_dev, &itag);
2328 goto martian_source;
2332 if (!IN_DEV_FORWARD(in_dev)) {
2333 err = -EHOSTUNREACH;
2336 if (res->type != RTN_UNICAST)
2337 goto martian_destination;
2340 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2344 if (skb->protocol != htons(ETH_P_IP))
2347 if (!ipv4_is_zeronet(saddr)) {
2348 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2351 goto martian_source;
2353 flags |= RTCF_BROADCAST;
2354 res->type = RTN_BROADCAST;
2355 RT_CACHE_STAT_INC(in_brd);
2358 no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
2360 IPCB(skb)->flags |= IPSKB_NOPOLICY;
2362 do_cache &= res->fi && !itag;
2364 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2366 rth = rcu_dereference(nhc->nhc_rth_input);
2367 if (rt_cache_valid(rth)) {
2368 skb_dst_set_noref(skb, &rth->dst);
2374 rth = rt_dst_alloc(ip_rt_get_dev(net, res),
2375 flags | RTCF_LOCAL, res->type,
2380 rth->dst.output= ip_rt_bug;
2381 #ifdef CONFIG_IP_ROUTE_CLASSID
2382 rth->dst.tclassid = itag;
2384 rth->rt_is_input = 1;
2386 RT_CACHE_STAT_INC(in_slow_tot);
2387 if (res->type == RTN_UNREACHABLE) {
2388 rth->dst.input= ip_error;
2389 rth->dst.error= -err;
2390 rth->rt_flags &= ~RTCF_LOCAL;
2394 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2396 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2397 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2398 WARN_ON(rth->dst.input == lwtunnel_input);
2399 rth->dst.lwtstate->orig_input = rth->dst.input;
2400 rth->dst.input = lwtunnel_input;
2403 if (unlikely(!rt_cache_route(nhc, rth)))
2404 rt_add_uncached_list(rth);
2406 skb_dst_set(skb, &rth->dst);
2411 RT_CACHE_STAT_INC(in_no_route);
2412 res->type = RTN_UNREACHABLE;
2418 * Do not cache martian addresses: they should be logged (RFC1812)
2420 martian_destination:
2421 RT_CACHE_STAT_INC(in_martian_dst);
2422 #ifdef CONFIG_IP_ROUTE_VERBOSE
2423 if (IN_DEV_LOG_MARTIANS(in_dev))
2424 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2425 &daddr, &saddr, dev->name);
2437 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2441 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2442 u8 tos, struct net_device *dev)
2444 struct fib_result res;
2447 tos &= IPTOS_RT_MASK;
2449 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2454 EXPORT_SYMBOL(ip_route_input_noref);
2456 /* called with rcu_read_lock held */
2457 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2458 u8 tos, struct net_device *dev, struct fib_result *res)
2460 /* Multicast recognition logic is moved from route cache to here.
2461 * The problem was that too many Ethernet cards have broken/missing
2462 * hardware multicast filters :-( As result the host on multicasting
2463 * network acquires a lot of useless route cache entries, sort of
2464 * SDR messages from all the world. Now we try to get rid of them.
2465 * Really, provided software IP multicast filter is organized
2466 * reasonably (at least, hashed), it does not result in a slowdown
2467 * comparing with route cache reject entries.
2468 * Note, that multicast routers are not affected, because
2469 * route cache entry is created eventually.
2471 if (ipv4_is_multicast(daddr)) {
2472 struct in_device *in_dev = __in_dev_get_rcu(dev);
2478 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2479 ip_hdr(skb)->protocol);
2481 /* check l3 master if no match yet */
2482 if (!our && netif_is_l3_slave(dev)) {
2483 struct in_device *l3_in_dev;
2485 l3_in_dev = __in_dev_get_rcu(skb->dev);
2487 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2488 ip_hdr(skb)->protocol);
2492 #ifdef CONFIG_IP_MROUTE
2494 (!ipv4_is_local_multicast(daddr) &&
2495 IN_DEV_MFORWARD(in_dev))
2498 err = ip_route_input_mc(skb, daddr, saddr,
2504 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2507 /* called with rcu_read_lock() */
2508 static struct rtable *__mkroute_output(const struct fib_result *res,
2509 const struct flowi4 *fl4, int orig_oif,
2510 struct net_device *dev_out,
2513 struct fib_info *fi = res->fi;
2514 struct fib_nh_exception *fnhe;
2515 struct in_device *in_dev;
2516 u16 type = res->type;
2520 in_dev = __in_dev_get_rcu(dev_out);
2522 return ERR_PTR(-EINVAL);
2524 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2525 if (ipv4_is_loopback(fl4->saddr) &&
2526 !(dev_out->flags & IFF_LOOPBACK) &&
2527 !netif_is_l3_master(dev_out))
2528 return ERR_PTR(-EINVAL);
2530 if (ipv4_is_lbcast(fl4->daddr))
2531 type = RTN_BROADCAST;
2532 else if (ipv4_is_multicast(fl4->daddr))
2533 type = RTN_MULTICAST;
2534 else if (ipv4_is_zeronet(fl4->daddr))
2535 return ERR_PTR(-EINVAL);
2537 if (dev_out->flags & IFF_LOOPBACK)
2538 flags |= RTCF_LOCAL;
2541 if (type == RTN_BROADCAST) {
2542 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2544 } else if (type == RTN_MULTICAST) {
2545 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2546 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2548 flags &= ~RTCF_LOCAL;
2551 /* If multicast route do not exist use
2552 * default one, but do not gateway in this case.
2555 if (fi && res->prefixlen < 4)
2557 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2558 (orig_oif != dev_out->ifindex)) {
2559 /* For local routes that require a particular output interface
2560 * we do not want to cache the result. Caching the result
2561 * causes incorrect behaviour when there are multiple source
2562 * addresses on the interface, the end result being that if the
2563 * intended recipient is waiting on that interface for the
2564 * packet he won't receive it because it will be delivered on
2565 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2566 * be set to the loopback interface as well.
2572 do_cache &= fi != NULL;
2574 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2575 struct rtable __rcu **prth;
2577 fnhe = find_exception(nhc, fl4->daddr);
2581 prth = &fnhe->fnhe_rth_output;
2583 if (unlikely(fl4->flowi4_flags &
2584 FLOWI_FLAG_KNOWN_NH &&
2585 !(nhc->nhc_gw_family &&
2586 nhc->nhc_scope == RT_SCOPE_LINK))) {
2590 prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2592 rth = rcu_dereference(*prth);
2593 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2598 rth = rt_dst_alloc(dev_out, flags, type,
2599 IN_DEV_ORCONF(in_dev, NOPOLICY),
2600 IN_DEV_ORCONF(in_dev, NOXFRM));
2602 return ERR_PTR(-ENOBUFS);
2604 rth->rt_iif = orig_oif;
2606 RT_CACHE_STAT_INC(out_slow_tot);
2608 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2609 if (flags & RTCF_LOCAL &&
2610 !(dev_out->flags & IFF_LOOPBACK)) {
2611 rth->dst.output = ip_mc_output;
2612 RT_CACHE_STAT_INC(out_slow_mc);
2614 #ifdef CONFIG_IP_MROUTE
2615 if (type == RTN_MULTICAST) {
2616 if (IN_DEV_MFORWARD(in_dev) &&
2617 !ipv4_is_local_multicast(fl4->daddr)) {
2618 rth->dst.input = ip_mr_input;
2619 rth->dst.output = ip_mc_output;
2625 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2626 lwtunnel_set_redirect(&rth->dst);
2632 * Major route resolver routine.
2635 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2636 const struct sk_buff *skb)
2638 struct fib_result res = {
2646 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2650 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2655 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2657 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2658 struct fib_result *res,
2659 const struct sk_buff *skb)
2661 struct net_device *dev_out = NULL;
2662 int orig_oif = fl4->flowi4_oif;
2663 unsigned int flags = 0;
2668 if (ipv4_is_multicast(fl4->saddr) ||
2669 ipv4_is_lbcast(fl4->saddr) ||
2670 ipv4_is_zeronet(fl4->saddr)) {
2671 rth = ERR_PTR(-EINVAL);
2675 rth = ERR_PTR(-ENETUNREACH);
2677 /* I removed check for oif == dev_out->oif here.
2678 * It was wrong for two reasons:
2679 * 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2680 * is assigned to multiple interfaces.
2681 * 2. Moreover, we are allowed to send packets with saddr
2682 * of another iface. --ANK
2685 if (fl4->flowi4_oif == 0 &&
2686 (ipv4_is_multicast(fl4->daddr) ||
2687 ipv4_is_lbcast(fl4->daddr))) {
2688 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2689 dev_out = __ip_dev_find(net, fl4->saddr, false);
2693 /* Special hack: user can direct multicasts
2694 * and limited broadcast via necessary interface
2695 * without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2696 * This hack is not just for fun, it allows
2697 * vic,vat and friends to work.
2698 * They bind socket to loopback, set ttl to zero
2699 * and expect that it will work.
2700 * From the viewpoint of routing cache they are broken,
2701 * because we are not allowed to build multicast path
2702 * with loopback source addr (look, routing cache
2703 * cannot know, that ttl is zero, so that packet
2704 * will not leave this host and route is valid).
2705 * Luckily, this hack is good workaround.
2708 fl4->flowi4_oif = dev_out->ifindex;
2712 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2713 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2714 if (!__ip_dev_find(net, fl4->saddr, false))
2720 if (fl4->flowi4_oif) {
2721 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2722 rth = ERR_PTR(-ENODEV);
2726 /* RACE: Check return value of inet_select_addr instead. */
2727 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2728 rth = ERR_PTR(-ENETUNREACH);
2731 if (ipv4_is_local_multicast(fl4->daddr) ||
2732 ipv4_is_lbcast(fl4->daddr) ||
2733 fl4->flowi4_proto == IPPROTO_IGMP) {
2735 fl4->saddr = inet_select_addr(dev_out, 0,
2740 if (ipv4_is_multicast(fl4->daddr))
2741 fl4->saddr = inet_select_addr(dev_out, 0,
2743 else if (!fl4->daddr)
2744 fl4->saddr = inet_select_addr(dev_out, 0,
2750 fl4->daddr = fl4->saddr;
2752 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2753 dev_out = net->loopback_dev;
2754 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2755 res->type = RTN_LOCAL;
2756 flags |= RTCF_LOCAL;
2760 err = fib_lookup(net, fl4, res, 0);
2764 if (fl4->flowi4_oif &&
2765 (ipv4_is_multicast(fl4->daddr) || !fl4->flowi4_l3mdev)) {
2766 /* Apparently, routing tables are wrong. Assume,
2767 * that the destination is on link.
2770 * Because we are allowed to send to iface
2771 * even if it has NO routes and NO assigned
2772 * addresses. When oif is specified, routing
2773 * tables are looked up with only one purpose:
2774 * to catch if destination is gatewayed, rather than
2775 * direct. Moreover, if MSG_DONTROUTE is set,
2776 * we send packet, ignoring both routing tables
2777 * and ifaddr state. --ANK
2780 * We could make it even if oif is unknown,
2781 * likely IPv6, but we do not.
2784 if (fl4->saddr == 0)
2785 fl4->saddr = inet_select_addr(dev_out, 0,
2787 res->type = RTN_UNICAST;
2794 if (res->type == RTN_LOCAL) {
2796 if (res->fi->fib_prefsrc)
2797 fl4->saddr = res->fi->fib_prefsrc;
2799 fl4->saddr = fl4->daddr;
2802 /* L3 master device is the loopback for that domain */
2803 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2806 /* make sure orig_oif points to fib result device even
2807 * though packet rx/tx happens over loopback or l3mdev
2809 orig_oif = FIB_RES_OIF(*res);
2811 fl4->flowi4_oif = dev_out->ifindex;
2812 flags |= RTCF_LOCAL;
2816 fib_select_path(net, res, fl4, skb);
2818 dev_out = FIB_RES_DEV(*res);
2821 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2827 static struct dst_ops ipv4_dst_blackhole_ops = {
2829 .default_advmss = ipv4_default_advmss,
2830 .neigh_lookup = ipv4_neigh_lookup,
2831 .check = dst_blackhole_check,
2832 .cow_metrics = dst_blackhole_cow_metrics,
2833 .update_pmtu = dst_blackhole_update_pmtu,
2834 .redirect = dst_blackhole_redirect,
2835 .mtu = dst_blackhole_mtu,
2838 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2840 struct rtable *ort = (struct rtable *) dst_orig;
2843 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2845 struct dst_entry *new = &rt->dst;
2848 new->input = dst_discard;
2849 new->output = dst_discard_out;
2851 new->dev = net->loopback_dev;
2852 dev_hold_track(new->dev, &new->dev_tracker, GFP_ATOMIC);
2854 rt->rt_is_input = ort->rt_is_input;
2855 rt->rt_iif = ort->rt_iif;
2856 rt->rt_pmtu = ort->rt_pmtu;
2857 rt->rt_mtu_locked = ort->rt_mtu_locked;
2859 rt->rt_genid = rt_genid_ipv4(net);
2860 rt->rt_flags = ort->rt_flags;
2861 rt->rt_type = ort->rt_type;
2862 rt->rt_uses_gateway = ort->rt_uses_gateway;
2863 rt->rt_gw_family = ort->rt_gw_family;
2864 if (rt->rt_gw_family == AF_INET)
2865 rt->rt_gw4 = ort->rt_gw4;
2866 else if (rt->rt_gw_family == AF_INET6)
2867 rt->rt_gw6 = ort->rt_gw6;
2869 INIT_LIST_HEAD(&rt->rt_uncached);
2872 dst_release(dst_orig);
2874 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2877 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2878 const struct sock *sk)
2880 struct rtable *rt = __ip_route_output_key(net, flp4);
2885 if (flp4->flowi4_proto) {
2886 flp4->flowi4_oif = rt->dst.dev->ifindex;
2887 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2888 flowi4_to_flowi(flp4),
2894 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2896 struct rtable *ip_route_output_tunnel(struct sk_buff *skb,
2897 struct net_device *dev,
2898 struct net *net, __be32 *saddr,
2899 const struct ip_tunnel_info *info,
2900 u8 protocol, bool use_cache)
2902 #ifdef CONFIG_DST_CACHE
2903 struct dst_cache *dst_cache;
2905 struct rtable *rt = NULL;
2909 #ifdef CONFIG_DST_CACHE
2910 dst_cache = (struct dst_cache *)&info->dst_cache;
2912 rt = dst_cache_get_ip4(dst_cache, saddr);
2917 memset(&fl4, 0, sizeof(fl4));
2918 fl4.flowi4_mark = skb->mark;
2919 fl4.flowi4_proto = protocol;
2920 fl4.daddr = info->key.u.ipv4.dst;
2921 fl4.saddr = info->key.u.ipv4.src;
2922 tos = info->key.tos;
2923 fl4.flowi4_tos = RT_TOS(tos);
2925 rt = ip_route_output_key(net, &fl4);
2927 netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
2928 return ERR_PTR(-ENETUNREACH);
2930 if (rt->dst.dev == dev) { /* is this necessary? */
2931 netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
2933 return ERR_PTR(-ELOOP);
2935 #ifdef CONFIG_DST_CACHE
2937 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
2942 EXPORT_SYMBOL_GPL(ip_route_output_tunnel);
2944 /* called with rcu_read_lock held */
2945 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2946 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2947 struct sk_buff *skb, u32 portid, u32 seq,
2951 struct nlmsghdr *nlh;
2952 unsigned long expires = 0;
2954 u32 metrics[RTAX_MAX];
2956 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2960 r = nlmsg_data(nlh);
2961 r->rtm_family = AF_INET;
2962 r->rtm_dst_len = 32;
2964 r->rtm_tos = fl4 ? fl4->flowi4_tos : 0;
2965 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2966 if (nla_put_u32(skb, RTA_TABLE, table_id))
2967 goto nla_put_failure;
2968 r->rtm_type = rt->rt_type;
2969 r->rtm_scope = RT_SCOPE_UNIVERSE;
2970 r->rtm_protocol = RTPROT_UNSPEC;
2971 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2972 if (rt->rt_flags & RTCF_NOTIFY)
2973 r->rtm_flags |= RTM_F_NOTIFY;
2974 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2975 r->rtm_flags |= RTCF_DOREDIRECT;
2977 if (nla_put_in_addr(skb, RTA_DST, dst))
2978 goto nla_put_failure;
2980 r->rtm_src_len = 32;
2981 if (nla_put_in_addr(skb, RTA_SRC, src))
2982 goto nla_put_failure;
2985 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2986 goto nla_put_failure;
2987 if (rt->dst.lwtstate &&
2988 lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
2989 goto nla_put_failure;
2990 #ifdef CONFIG_IP_ROUTE_CLASSID
2991 if (rt->dst.tclassid &&
2992 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2993 goto nla_put_failure;
2995 if (fl4 && !rt_is_input_route(rt) &&
2996 fl4->saddr != src) {
2997 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2998 goto nla_put_failure;
3000 if (rt->rt_uses_gateway) {
3001 if (rt->rt_gw_family == AF_INET &&
3002 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
3003 goto nla_put_failure;
3004 } else if (rt->rt_gw_family == AF_INET6) {
3005 int alen = sizeof(struct in6_addr);
3009 nla = nla_reserve(skb, RTA_VIA, alen + 2);
3011 goto nla_put_failure;
3013 via = nla_data(nla);
3014 via->rtvia_family = AF_INET6;
3015 memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
3019 expires = rt->dst.expires;
3021 unsigned long now = jiffies;
3023 if (time_before(now, expires))
3029 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3030 if (rt->rt_pmtu && expires)
3031 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
3032 if (rt->rt_mtu_locked && expires)
3033 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
3034 if (rtnetlink_put_metrics(skb, metrics) < 0)
3035 goto nla_put_failure;
3038 if (fl4->flowi4_mark &&
3039 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
3040 goto nla_put_failure;
3042 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
3043 nla_put_u32(skb, RTA_UID,
3044 from_kuid_munged(current_user_ns(),
3046 goto nla_put_failure;
3048 if (rt_is_input_route(rt)) {
3049 #ifdef CONFIG_IP_MROUTE
3050 if (ipv4_is_multicast(dst) &&
3051 !ipv4_is_local_multicast(dst) &&
3052 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
3053 int err = ipmr_get_route(net, skb,
3054 fl4->saddr, fl4->daddr,
3060 goto nla_put_failure;
3064 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
3065 goto nla_put_failure;
3069 error = rt->dst.error;
3071 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
3072 goto nla_put_failure;
3074 nlmsg_end(skb, nlh);
3078 nlmsg_cancel(skb, nlh);
3082 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
3083 struct netlink_callback *cb, u32 table_id,
3084 struct fnhe_hash_bucket *bucket, int genid,
3085 int *fa_index, int fa_start, unsigned int flags)
3089 for (i = 0; i < FNHE_HASH_SIZE; i++) {
3090 struct fib_nh_exception *fnhe;
3092 for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
3093 fnhe = rcu_dereference(fnhe->fnhe_next)) {
3097 if (*fa_index < fa_start)
3100 if (fnhe->fnhe_genid != genid)
3103 if (fnhe->fnhe_expires &&
3104 time_after(jiffies, fnhe->fnhe_expires))
3107 rt = rcu_dereference(fnhe->fnhe_rth_input);
3109 rt = rcu_dereference(fnhe->fnhe_rth_output);
3113 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
3114 table_id, NULL, skb,
3115 NETLINK_CB(cb->skb).portid,
3116 cb->nlh->nlmsg_seq, flags);
3127 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
3128 u32 table_id, struct fib_info *fi,
3129 int *fa_index, int fa_start, unsigned int flags)
3131 struct net *net = sock_net(cb->skb->sk);
3132 int nhsel, genid = fnhe_genid(net);
3134 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
3135 struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
3136 struct fnhe_hash_bucket *bucket;
3139 if (nhc->nhc_flags & RTNH_F_DEAD)
3143 bucket = rcu_dereference(nhc->nhc_exceptions);
3146 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
3147 genid, fa_index, fa_start,
3157 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
3158 u8 ip_proto, __be16 sport,
3161 struct sk_buff *skb;
3164 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3168 /* Reserve room for dummy headers, this skb can pass
3169 * through good chunk of routing engine.
3171 skb_reset_mac_header(skb);
3172 skb_reset_network_header(skb);
3173 skb->protocol = htons(ETH_P_IP);
3174 iph = skb_put(skb, sizeof(struct iphdr));
3175 iph->protocol = ip_proto;
3181 skb_set_transport_header(skb, skb->len);
3183 switch (iph->protocol) {
3185 struct udphdr *udph;
3187 udph = skb_put_zero(skb, sizeof(struct udphdr));
3188 udph->source = sport;
3190 udph->len = htons(sizeof(struct udphdr));
3195 struct tcphdr *tcph;
3197 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
3198 tcph->source = sport;
3200 tcph->doff = sizeof(struct tcphdr) / 4;
3202 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
3206 case IPPROTO_ICMP: {
3207 struct icmphdr *icmph;
3209 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
3210 icmph->type = ICMP_ECHO;
3218 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
3219 const struct nlmsghdr *nlh,
3221 struct netlink_ext_ack *extack)
3226 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
3227 NL_SET_ERR_MSG(extack,
3228 "ipv4: Invalid header for route get request");
3232 if (!netlink_strict_get_check(skb))
3233 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
3234 rtm_ipv4_policy, extack);
3236 rtm = nlmsg_data(nlh);
3237 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3238 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3239 rtm->rtm_table || rtm->rtm_protocol ||
3240 rtm->rtm_scope || rtm->rtm_type) {
3241 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3245 if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3246 RTM_F_LOOKUP_TABLE |
3248 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3252 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3253 rtm_ipv4_policy, extack);
3257 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3258 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3259 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3263 for (i = 0; i <= RTA_MAX; i++) {
3279 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3287 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3288 struct netlink_ext_ack *extack)
3290 struct net *net = sock_net(in_skb->sk);
3291 struct nlattr *tb[RTA_MAX+1];
3292 u32 table_id = RT_TABLE_MAIN;
3293 __be16 sport = 0, dport = 0;
3294 struct fib_result res = {};
3295 u8 ip_proto = IPPROTO_UDP;
3296 struct rtable *rt = NULL;
3297 struct sk_buff *skb;
3299 struct flowi4 fl4 = {};
3307 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3311 rtm = nlmsg_data(nlh);
3312 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3313 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3314 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3315 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3317 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3319 uid = (iif ? INVALID_UID : current_uid());
3321 if (tb[RTA_IP_PROTO]) {
3322 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3323 &ip_proto, AF_INET, extack);
3329 sport = nla_get_be16(tb[RTA_SPORT]);
3332 dport = nla_get_be16(tb[RTA_DPORT]);
3334 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3340 fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
3341 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3342 fl4.flowi4_mark = mark;
3343 fl4.flowi4_uid = uid;
3345 fl4.fl4_sport = sport;
3347 fl4.fl4_dport = dport;
3348 fl4.flowi4_proto = ip_proto;
3353 struct net_device *dev;
3355 dev = dev_get_by_index_rcu(net, iif);
3361 fl4.flowi4_iif = iif; /* for rt_fill_info */
3364 err = ip_route_input_rcu(skb, dst, src,
3365 rtm->rtm_tos & IPTOS_RT_MASK, dev,
3368 rt = skb_rtable(skb);
3369 if (err == 0 && rt->dst.error)
3370 err = -rt->dst.error;
3372 fl4.flowi4_iif = LOOPBACK_IFINDEX;
3373 skb->dev = net->loopback_dev;
3374 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3379 skb_dst_set(skb, &rt->dst);
3385 if (rtm->rtm_flags & RTM_F_NOTIFY)
3386 rt->rt_flags |= RTCF_NOTIFY;
3388 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3389 table_id = res.table ? res.table->tb_id : 0;
3391 /* reset skb for netlink reply msg */
3393 skb_reset_network_header(skb);
3394 skb_reset_transport_header(skb);
3395 skb_reset_mac_header(skb);
3397 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3398 struct fib_rt_info fri;
3401 err = fib_props[res.type].error;
3403 err = -EHOSTUNREACH;
3407 fri.tb_id = table_id;
3408 fri.dst = res.prefix;
3409 fri.dst_len = res.prefixlen;
3410 fri.tos = fl4.flowi4_tos;
3411 fri.type = rt->rt_type;
3414 fri.offload_failed = 0;
3416 struct fib_alias *fa;
3418 hlist_for_each_entry_rcu(fa, res.fa_head, fa_list) {
3419 u8 slen = 32 - fri.dst_len;
3421 if (fa->fa_slen == slen &&
3422 fa->tb_id == fri.tb_id &&
3423 fa->fa_dscp == inet_dsfield_to_dscp(fri.tos) &&
3424 fa->fa_info == res.fi &&
3425 fa->fa_type == fri.type) {
3426 fri.offload = READ_ONCE(fa->offload);
3427 fri.trap = READ_ONCE(fa->trap);
3432 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3433 nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
3435 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3436 NETLINK_CB(in_skb).portid,
3444 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3454 void ip_rt_multicast_event(struct in_device *in_dev)
3456 rt_cache_flush(dev_net(in_dev->dev));
3459 #ifdef CONFIG_SYSCTL
3460 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
3461 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
3462 static int ip_rt_gc_elasticity __read_mostly = 8;
3463 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3465 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3466 void *buffer, size_t *lenp, loff_t *ppos)
3468 struct net *net = (struct net *)__ctl->extra1;
3471 rt_cache_flush(net);
3472 fnhe_genid_bump(net);
3479 static struct ctl_table ipv4_route_table[] = {
3481 .procname = "gc_thresh",
3482 .data = &ipv4_dst_ops.gc_thresh,
3483 .maxlen = sizeof(int),
3485 .proc_handler = proc_dointvec,
3488 .procname = "max_size",
3489 .data = &ip_rt_max_size,
3490 .maxlen = sizeof(int),
3492 .proc_handler = proc_dointvec,
3495 /* Deprecated. Use gc_min_interval_ms */
3497 .procname = "gc_min_interval",
3498 .data = &ip_rt_gc_min_interval,
3499 .maxlen = sizeof(int),
3501 .proc_handler = proc_dointvec_jiffies,
3504 .procname = "gc_min_interval_ms",
3505 .data = &ip_rt_gc_min_interval,
3506 .maxlen = sizeof(int),
3508 .proc_handler = proc_dointvec_ms_jiffies,
3511 .procname = "gc_timeout",
3512 .data = &ip_rt_gc_timeout,
3513 .maxlen = sizeof(int),
3515 .proc_handler = proc_dointvec_jiffies,
3518 .procname = "gc_interval",
3519 .data = &ip_rt_gc_interval,
3520 .maxlen = sizeof(int),
3522 .proc_handler = proc_dointvec_jiffies,
3525 .procname = "redirect_load",
3526 .data = &ip_rt_redirect_load,
3527 .maxlen = sizeof(int),
3529 .proc_handler = proc_dointvec,
3532 .procname = "redirect_number",
3533 .data = &ip_rt_redirect_number,
3534 .maxlen = sizeof(int),
3536 .proc_handler = proc_dointvec,
3539 .procname = "redirect_silence",
3540 .data = &ip_rt_redirect_silence,
3541 .maxlen = sizeof(int),
3543 .proc_handler = proc_dointvec,
3546 .procname = "error_cost",
3547 .data = &ip_rt_error_cost,
3548 .maxlen = sizeof(int),
3550 .proc_handler = proc_dointvec,
3553 .procname = "error_burst",
3554 .data = &ip_rt_error_burst,
3555 .maxlen = sizeof(int),
3557 .proc_handler = proc_dointvec,
3560 .procname = "gc_elasticity",
3561 .data = &ip_rt_gc_elasticity,
3562 .maxlen = sizeof(int),
3564 .proc_handler = proc_dointvec,
3569 static const char ipv4_route_flush_procname[] = "flush";
3571 static struct ctl_table ipv4_route_netns_table[] = {
3573 .procname = ipv4_route_flush_procname,
3574 .maxlen = sizeof(int),
3576 .proc_handler = ipv4_sysctl_rtcache_flush,
3579 .procname = "min_pmtu",
3580 .data = &init_net.ipv4.ip_rt_min_pmtu,
3581 .maxlen = sizeof(int),
3583 .proc_handler = proc_dointvec_minmax,
3584 .extra1 = &ip_min_valid_pmtu,
3587 .procname = "mtu_expires",
3588 .data = &init_net.ipv4.ip_rt_mtu_expires,
3589 .maxlen = sizeof(int),
3591 .proc_handler = proc_dointvec_jiffies,
3594 .procname = "min_adv_mss",
3595 .data = &init_net.ipv4.ip_rt_min_advmss,
3596 .maxlen = sizeof(int),
3598 .proc_handler = proc_dointvec,
3603 static __net_init int sysctl_route_net_init(struct net *net)
3605 struct ctl_table *tbl;
3607 tbl = ipv4_route_netns_table;
3608 if (!net_eq(net, &init_net)) {
3611 tbl = kmemdup(tbl, sizeof(ipv4_route_netns_table), GFP_KERNEL);
3615 /* Don't export non-whitelisted sysctls to unprivileged users */
3616 if (net->user_ns != &init_user_ns) {
3617 if (tbl[0].procname != ipv4_route_flush_procname)
3618 tbl[0].procname = NULL;
3621 /* Update the variables to point into the current struct net
3622 * except for the first element flush
3624 for (i = 1; i < ARRAY_SIZE(ipv4_route_netns_table) - 1; i++)
3625 tbl[i].data += (void *)net - (void *)&init_net;
3627 tbl[0].extra1 = net;
3629 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3630 if (!net->ipv4.route_hdr)
3635 if (tbl != ipv4_route_netns_table)
3641 static __net_exit void sysctl_route_net_exit(struct net *net)
3643 struct ctl_table *tbl;
3645 tbl = net->ipv4.route_hdr->ctl_table_arg;
3646 unregister_net_sysctl_table(net->ipv4.route_hdr);
3647 BUG_ON(tbl == ipv4_route_netns_table);
3651 static __net_initdata struct pernet_operations sysctl_route_ops = {
3652 .init = sysctl_route_net_init,
3653 .exit = sysctl_route_net_exit,
3657 static __net_init int netns_ip_rt_init(struct net *net)
3659 /* Set default value for namespaceified sysctls */
3660 net->ipv4.ip_rt_min_pmtu = DEFAULT_MIN_PMTU;
3661 net->ipv4.ip_rt_mtu_expires = DEFAULT_MTU_EXPIRES;
3662 net->ipv4.ip_rt_min_advmss = DEFAULT_MIN_ADVMSS;
3666 static struct pernet_operations __net_initdata ip_rt_ops = {
3667 .init = netns_ip_rt_init,
3670 static __net_init int rt_genid_init(struct net *net)
3672 atomic_set(&net->ipv4.rt_genid, 0);
3673 atomic_set(&net->fnhe_genid, 0);
3674 atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3678 static __net_initdata struct pernet_operations rt_genid_ops = {
3679 .init = rt_genid_init,
3682 static int __net_init ipv4_inetpeer_init(struct net *net)
3684 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3688 inet_peer_base_init(bp);
3689 net->ipv4.peers = bp;
3693 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3695 struct inet_peer_base *bp = net->ipv4.peers;
3697 net->ipv4.peers = NULL;
3698 inetpeer_invalidate_tree(bp);
3702 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3703 .init = ipv4_inetpeer_init,
3704 .exit = ipv4_inetpeer_exit,
3707 #ifdef CONFIG_IP_ROUTE_CLASSID
3708 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3709 #endif /* CONFIG_IP_ROUTE_CLASSID */
3711 int __init ip_rt_init(void)
3716 /* For modern hosts, this will use 2 MB of memory */
3717 idents_hash = alloc_large_system_hash("IP idents",
3718 sizeof(*ip_idents) + sizeof(*ip_tstamps),
3720 16, /* one bucket per 64 KB */
3727 ip_idents = idents_hash;
3729 prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
3731 ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
3733 for_each_possible_cpu(cpu) {
3734 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3736 INIT_LIST_HEAD(&ul->head);
3737 INIT_LIST_HEAD(&ul->quarantine);
3738 spin_lock_init(&ul->lock);
3740 #ifdef CONFIG_IP_ROUTE_CLASSID
3741 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3743 panic("IP: failed to allocate ip_rt_acct\n");
3746 ipv4_dst_ops.kmem_cachep =
3747 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3748 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3750 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3752 if (dst_entries_init(&ipv4_dst_ops) < 0)
3753 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3755 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3756 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3758 ipv4_dst_ops.gc_thresh = ~0;
3759 ip_rt_max_size = INT_MAX;
3764 if (ip_rt_proc_init())
3765 pr_err("Unable to create route proc files\n");
3770 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3771 RTNL_FLAG_DOIT_UNLOCKED);
3773 #ifdef CONFIG_SYSCTL
3774 register_pernet_subsys(&sysctl_route_ops);
3776 register_pernet_subsys(&ip_rt_ops);
3777 register_pernet_subsys(&rt_genid_ops);
3778 register_pernet_subsys(&ipv4_inetpeer_ops);
3782 #ifdef CONFIG_SYSCTL
3784 * We really need to sanitize the damn ipv4 init order, then all
3785 * this nonsense will go away.
3787 void __init ip_static_sysctl_init(void)
3789 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);