1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * ROUTE - implementation of the IP router.
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox, <gw4pts@gw4pts.ampr.org>
12 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
13 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
16 * Alan Cox : Verify area fixes.
17 * Alan Cox : cli() protects routing changes
18 * Rui Oliveira : ICMP routing table updates
19 * (rco@di.uminho.pt) Routing table insertion and update
20 * Linus Torvalds : Rewrote bits to be sensible
21 * Alan Cox : Added BSD route gw semantics
22 * Alan Cox : Super /proc >4K
23 * Alan Cox : MTU in route table
24 * Alan Cox : MSS actually. Also added the window
26 * Sam Lantinga : Fixed route matching in rt_del()
27 * Alan Cox : Routing cache support.
28 * Alan Cox : Removed compatibility cruft.
29 * Alan Cox : RTF_REJECT support.
30 * Alan Cox : TCP irtt support.
31 * Jonathan Naylor : Added Metric support.
32 * Miquel van Smoorenburg : BSD API fixes.
33 * Miquel van Smoorenburg : Metrics.
34 * Alan Cox : Use __u32 properly
35 * Alan Cox : Aligned routing errors more closely with BSD
36 * our system is still very different.
37 * Alan Cox : Faster /proc handling
38 * Alexey Kuznetsov : Massive rework to support tree based routing,
39 * routing caches and better behaviour.
41 * Olaf Erb : irtt wasn't being copied right.
42 * Bjorn Ekwall : Kerneld route support.
43 * Alan Cox : Multicast fixed (I hope)
44 * Pavel Krauz : Limited broadcast fixed
45 * Mike McLagan : Routing by source
46 * Alexey Kuznetsov : End of old history. Split to fib.c and
47 * route.c and rewritten from scratch.
48 * Andi Kleen : Load-limit warning messages.
49 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
50 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
51 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
52 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
53 * Marc Boucher : routing by fwmark
54 * Robert Olsson : Added rt_cache statistics
55 * Arnaldo C. Melo : Convert proc stuff to seq_file
56 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
57 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
58 * Ilia Sotnikov : Removed TOS from hash calculations
61 #define pr_fmt(fmt) "IPv4: " fmt
63 #include <linux/module.h>
64 #include <linux/uaccess.h>
65 #include <linux/bitops.h>
66 #include <linux/types.h>
67 #include <linux/kernel.h>
69 #include <linux/string.h>
70 #include <linux/socket.h>
71 #include <linux/sockios.h>
72 #include <linux/errno.h>
74 #include <linux/inet.h>
75 #include <linux/netdevice.h>
76 #include <linux/proc_fs.h>
77 #include <linux/init.h>
78 #include <linux/skbuff.h>
79 #include <linux/inetdevice.h>
80 #include <linux/igmp.h>
81 #include <linux/pkt_sched.h>
82 #include <linux/mroute.h>
83 #include <linux/netfilter_ipv4.h>
84 #include <linux/random.h>
85 #include <linux/rcupdate.h>
86 #include <linux/times.h>
87 #include <linux/slab.h>
88 #include <linux/jhash.h>
90 #include <net/dst_metadata.h>
91 #include <net/net_namespace.h>
92 #include <net/protocol.h>
94 #include <net/route.h>
95 #include <net/inetpeer.h>
97 #include <net/ip_fib.h>
98 #include <net/nexthop.h>
101 #include <net/icmp.h>
102 #include <net/xfrm.h>
103 #include <net/lwtunnel.h>
104 #include <net/netevent.h>
105 #include <net/rtnetlink.h>
107 #include <linux/sysctl.h>
109 #include <net/secure_seq.h>
110 #include <net/ip_tunnels.h>
111 #include <net/l3mdev.h>
113 #include "fib_lookup.h"
115 #define RT_FL_TOS(oldflp4) \
116 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
118 #define RT_GC_TIMEOUT (300*HZ)
120 static int ip_rt_max_size;
121 static int ip_rt_redirect_number __read_mostly = 9;
122 static int ip_rt_redirect_load __read_mostly = HZ / 50;
123 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
124 static int ip_rt_error_cost __read_mostly = HZ;
125 static int ip_rt_error_burst __read_mostly = 5 * HZ;
126 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
127 static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
128 static int ip_rt_min_advmss __read_mostly = 256;
130 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
133 * Interface to generic destination cache.
136 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
137 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
138 static unsigned int ipv4_mtu(const struct dst_entry *dst);
139 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
140 static void ipv4_link_failure(struct sk_buff *skb);
141 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
142 struct sk_buff *skb, u32 mtu);
143 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
144 struct sk_buff *skb);
145 static void ipv4_dst_destroy(struct dst_entry *dst);
147 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
153 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
156 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
158 static struct dst_ops ipv4_dst_ops = {
160 .check = ipv4_dst_check,
161 .default_advmss = ipv4_default_advmss,
163 .cow_metrics = ipv4_cow_metrics,
164 .destroy = ipv4_dst_destroy,
165 .negative_advice = ipv4_negative_advice,
166 .link_failure = ipv4_link_failure,
167 .update_pmtu = ip_rt_update_pmtu,
168 .redirect = ip_do_redirect,
169 .local_out = __ip_local_out,
170 .neigh_lookup = ipv4_neigh_lookup,
171 .confirm_neigh = ipv4_confirm_neigh,
174 #define ECN_OR_COST(class) TC_PRIO_##class
176 const __u8 ip_tos2prio[16] = {
178 ECN_OR_COST(BESTEFFORT),
180 ECN_OR_COST(BESTEFFORT),
186 ECN_OR_COST(INTERACTIVE),
188 ECN_OR_COST(INTERACTIVE),
189 TC_PRIO_INTERACTIVE_BULK,
190 ECN_OR_COST(INTERACTIVE_BULK),
191 TC_PRIO_INTERACTIVE_BULK,
192 ECN_OR_COST(INTERACTIVE_BULK)
194 EXPORT_SYMBOL(ip_tos2prio);
196 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
197 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
199 #ifdef CONFIG_PROC_FS
200 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
204 return SEQ_START_TOKEN;
207 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
213 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
217 static int rt_cache_seq_show(struct seq_file *seq, void *v)
219 if (v == SEQ_START_TOKEN)
220 seq_printf(seq, "%-127s\n",
221 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
222 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
227 static const struct seq_operations rt_cache_seq_ops = {
228 .start = rt_cache_seq_start,
229 .next = rt_cache_seq_next,
230 .stop = rt_cache_seq_stop,
231 .show = rt_cache_seq_show,
234 static int rt_cache_seq_open(struct inode *inode, struct file *file)
236 return seq_open(file, &rt_cache_seq_ops);
239 static const struct file_operations rt_cache_seq_fops = {
240 .open = rt_cache_seq_open,
243 .release = seq_release,
247 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
252 return SEQ_START_TOKEN;
254 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
255 if (!cpu_possible(cpu))
258 return &per_cpu(rt_cache_stat, cpu);
263 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
267 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
268 if (!cpu_possible(cpu))
271 return &per_cpu(rt_cache_stat, cpu);
277 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
282 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
284 struct rt_cache_stat *st = v;
286 if (v == SEQ_START_TOKEN) {
287 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
291 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
292 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
293 dst_entries_get_slow(&ipv4_dst_ops),
306 0, /* st->gc_total */
307 0, /* st->gc_ignored */
308 0, /* st->gc_goal_miss */
309 0, /* st->gc_dst_overflow */
310 0, /* st->in_hlist_search */
311 0 /* st->out_hlist_search */
316 static const struct seq_operations rt_cpu_seq_ops = {
317 .start = rt_cpu_seq_start,
318 .next = rt_cpu_seq_next,
319 .stop = rt_cpu_seq_stop,
320 .show = rt_cpu_seq_show,
324 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
326 return seq_open(file, &rt_cpu_seq_ops);
329 static const struct file_operations rt_cpu_seq_fops = {
330 .open = rt_cpu_seq_open,
333 .release = seq_release,
336 #ifdef CONFIG_IP_ROUTE_CLASSID
337 static int rt_acct_proc_show(struct seq_file *m, void *v)
339 struct ip_rt_acct *dst, *src;
342 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
346 for_each_possible_cpu(i) {
347 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
348 for (j = 0; j < 256; j++) {
349 dst[j].o_bytes += src[j].o_bytes;
350 dst[j].o_packets += src[j].o_packets;
351 dst[j].i_bytes += src[j].i_bytes;
352 dst[j].i_packets += src[j].i_packets;
356 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
362 static int __net_init ip_rt_do_proc_init(struct net *net)
364 struct proc_dir_entry *pde;
366 pde = proc_create("rt_cache", 0444, net->proc_net,
371 pde = proc_create("rt_cache", 0444,
372 net->proc_net_stat, &rt_cpu_seq_fops);
376 #ifdef CONFIG_IP_ROUTE_CLASSID
377 pde = proc_create_single("rt_acct", 0, net->proc_net,
384 #ifdef CONFIG_IP_ROUTE_CLASSID
386 remove_proc_entry("rt_cache", net->proc_net_stat);
389 remove_proc_entry("rt_cache", net->proc_net);
394 static void __net_exit ip_rt_do_proc_exit(struct net *net)
396 remove_proc_entry("rt_cache", net->proc_net_stat);
397 remove_proc_entry("rt_cache", net->proc_net);
398 #ifdef CONFIG_IP_ROUTE_CLASSID
399 remove_proc_entry("rt_acct", net->proc_net);
403 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
404 .init = ip_rt_do_proc_init,
405 .exit = ip_rt_do_proc_exit,
408 static int __init ip_rt_proc_init(void)
410 return register_pernet_subsys(&ip_rt_proc_ops);
414 static inline int ip_rt_proc_init(void)
418 #endif /* CONFIG_PROC_FS */
420 static inline bool rt_is_expired(const struct rtable *rth)
422 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
425 void rt_cache_flush(struct net *net)
427 rt_genid_bump_ipv4(net);
430 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
434 const struct rtable *rt = container_of(dst, struct rtable, dst);
435 struct net_device *dev = dst->dev;
440 if (likely(rt->rt_gw_family == AF_INET)) {
441 n = ip_neigh_gw4(dev, rt->rt_gw4);
442 } else if (rt->rt_gw_family == AF_INET6) {
443 n = ip_neigh_gw6(dev, &rt->rt_gw6);
447 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
448 n = ip_neigh_gw4(dev, pkey);
451 if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
454 rcu_read_unlock_bh();
459 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
461 const struct rtable *rt = container_of(dst, struct rtable, dst);
462 struct net_device *dev = dst->dev;
463 const __be32 *pkey = daddr;
465 if (rt->rt_gw_family == AF_INET) {
466 pkey = (const __be32 *)&rt->rt_gw4;
467 } else if (rt->rt_gw_family == AF_INET6) {
468 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
471 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
474 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
477 #define IP_IDENTS_SZ 2048u
479 static atomic_t *ip_idents __read_mostly;
480 static u32 *ip_tstamps __read_mostly;
482 /* In order to protect privacy, we add a perturbation to identifiers
483 * if one generator is seldom used. This makes hard for an attacker
484 * to infer how many packets were sent between two points in time.
486 u32 ip_idents_reserve(u32 hash, int segs)
488 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
489 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
490 u32 old = READ_ONCE(*p_tstamp);
491 u32 now = (u32)jiffies;
494 if (old != now && cmpxchg(p_tstamp, old, now) == old)
495 delta = prandom_u32_max(now - old);
497 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
499 old = (u32)atomic_read(p_id);
500 new = old + delta + segs;
501 } while (atomic_cmpxchg(p_id, old, new) != old);
505 EXPORT_SYMBOL(ip_idents_reserve);
507 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
511 /* Note the following code is not safe, but this is okay. */
512 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
513 get_random_bytes(&net->ipv4.ip_id_key,
514 sizeof(net->ipv4.ip_id_key));
516 hash = siphash_3u32((__force u32)iph->daddr,
517 (__force u32)iph->saddr,
519 &net->ipv4.ip_id_key);
520 id = ip_idents_reserve(hash, segs);
523 EXPORT_SYMBOL(__ip_select_ident);
525 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
526 const struct sock *sk,
527 const struct iphdr *iph,
529 u8 prot, u32 mark, int flow_flags)
532 const struct inet_sock *inet = inet_sk(sk);
534 oif = sk->sk_bound_dev_if;
536 tos = RT_CONN_FLAGS(sk);
537 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
539 flowi4_init_output(fl4, oif, mark, tos,
540 RT_SCOPE_UNIVERSE, prot,
542 iph->daddr, iph->saddr, 0, 0,
543 sock_net_uid(net, sk));
546 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
547 const struct sock *sk)
549 const struct net *net = dev_net(skb->dev);
550 const struct iphdr *iph = ip_hdr(skb);
551 int oif = skb->dev->ifindex;
552 u8 tos = RT_TOS(iph->tos);
553 u8 prot = iph->protocol;
554 u32 mark = skb->mark;
556 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
559 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
561 const struct inet_sock *inet = inet_sk(sk);
562 const struct ip_options_rcu *inet_opt;
563 __be32 daddr = inet->inet_daddr;
566 inet_opt = rcu_dereference(inet->inet_opt);
567 if (inet_opt && inet_opt->opt.srr)
568 daddr = inet_opt->opt.faddr;
569 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
570 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
571 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
572 inet_sk_flowi_flags(sk),
573 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
577 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
578 const struct sk_buff *skb)
581 build_skb_flow_key(fl4, skb, sk);
583 build_sk_flow_key(fl4, sk);
586 static DEFINE_SPINLOCK(fnhe_lock);
588 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
592 rt = rcu_dereference(fnhe->fnhe_rth_input);
594 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
595 dst_dev_put(&rt->dst);
596 dst_release(&rt->dst);
598 rt = rcu_dereference(fnhe->fnhe_rth_output);
600 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
601 dst_dev_put(&rt->dst);
602 dst_release(&rt->dst);
606 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
608 struct fib_nh_exception *fnhe, *oldest;
610 oldest = rcu_dereference(hash->chain);
611 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
612 fnhe = rcu_dereference(fnhe->fnhe_next)) {
613 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
616 fnhe_flush_routes(oldest);
620 static inline u32 fnhe_hashfun(__be32 daddr)
622 static u32 fnhe_hashrnd __read_mostly;
625 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
626 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
627 return hash_32(hval, FNHE_HASH_SHIFT);
630 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
632 rt->rt_pmtu = fnhe->fnhe_pmtu;
633 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
634 rt->dst.expires = fnhe->fnhe_expires;
637 rt->rt_flags |= RTCF_REDIRECTED;
638 rt->rt_gw_family = AF_INET;
639 rt->rt_gw4 = fnhe->fnhe_gw;
643 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
644 __be32 gw, u32 pmtu, bool lock,
645 unsigned long expires)
647 struct fnhe_hash_bucket *hash;
648 struct fib_nh_exception *fnhe;
654 genid = fnhe_genid(dev_net(nhc->nhc_dev));
655 hval = fnhe_hashfun(daddr);
657 spin_lock_bh(&fnhe_lock);
659 hash = rcu_dereference(nhc->nhc_exceptions);
661 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
664 rcu_assign_pointer(nhc->nhc_exceptions, hash);
670 for (fnhe = rcu_dereference(hash->chain); fnhe;
671 fnhe = rcu_dereference(fnhe->fnhe_next)) {
672 if (fnhe->fnhe_daddr == daddr)
678 if (fnhe->fnhe_genid != genid)
679 fnhe->fnhe_genid = genid;
683 fnhe->fnhe_pmtu = pmtu;
684 fnhe->fnhe_mtu_locked = lock;
686 fnhe->fnhe_expires = max(1UL, expires);
687 /* Update all cached dsts too */
688 rt = rcu_dereference(fnhe->fnhe_rth_input);
690 fill_route_from_fnhe(rt, fnhe);
691 rt = rcu_dereference(fnhe->fnhe_rth_output);
693 fill_route_from_fnhe(rt, fnhe);
695 if (depth > FNHE_RECLAIM_DEPTH)
696 fnhe = fnhe_oldest(hash);
698 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
702 fnhe->fnhe_next = hash->chain;
703 rcu_assign_pointer(hash->chain, fnhe);
705 fnhe->fnhe_genid = genid;
706 fnhe->fnhe_daddr = daddr;
708 fnhe->fnhe_pmtu = pmtu;
709 fnhe->fnhe_mtu_locked = lock;
710 fnhe->fnhe_expires = max(1UL, expires);
712 /* Exception created; mark the cached routes for the nexthop
713 * stale, so anyone caching it rechecks if this exception
716 rt = rcu_dereference(nhc->nhc_rth_input);
718 rt->dst.obsolete = DST_OBSOLETE_KILL;
720 for_each_possible_cpu(i) {
721 struct rtable __rcu **prt;
722 prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
723 rt = rcu_dereference(*prt);
725 rt->dst.obsolete = DST_OBSOLETE_KILL;
729 fnhe->fnhe_stamp = jiffies;
732 spin_unlock_bh(&fnhe_lock);
735 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
738 __be32 new_gw = icmp_hdr(skb)->un.gateway;
739 __be32 old_gw = ip_hdr(skb)->saddr;
740 struct net_device *dev = skb->dev;
741 struct in_device *in_dev;
742 struct fib_result res;
746 switch (icmp_hdr(skb)->code & 7) {
748 case ICMP_REDIR_NETTOS:
749 case ICMP_REDIR_HOST:
750 case ICMP_REDIR_HOSTTOS:
757 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
760 in_dev = __in_dev_get_rcu(dev);
765 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
766 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
767 ipv4_is_zeronet(new_gw))
768 goto reject_redirect;
770 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
771 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
772 goto reject_redirect;
773 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
774 goto reject_redirect;
776 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
777 goto reject_redirect;
780 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
782 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
784 if (!(n->nud_state & NUD_VALID)) {
785 neigh_event_send(n, NULL);
787 if (fib_lookup(net, fl4, &res, 0) == 0) {
788 struct fib_nh_common *nhc = FIB_RES_NHC(res);
790 update_or_create_fnhe(nhc, fl4->daddr, new_gw,
792 jiffies + ip_rt_gc_timeout);
795 rt->dst.obsolete = DST_OBSOLETE_KILL;
796 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
803 #ifdef CONFIG_IP_ROUTE_VERBOSE
804 if (IN_DEV_LOG_MARTIANS(in_dev)) {
805 const struct iphdr *iph = (const struct iphdr *) skb->data;
806 __be32 daddr = iph->daddr;
807 __be32 saddr = iph->saddr;
809 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
810 " Advised path = %pI4 -> %pI4\n",
811 &old_gw, dev->name, &new_gw,
818 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
822 const struct iphdr *iph = (const struct iphdr *) skb->data;
823 struct net *net = dev_net(skb->dev);
824 int oif = skb->dev->ifindex;
825 u8 tos = RT_TOS(iph->tos);
826 u8 prot = iph->protocol;
827 u32 mark = skb->mark;
829 rt = (struct rtable *) dst;
831 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
832 __ip_do_redirect(rt, skb, &fl4, true);
835 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
837 struct rtable *rt = (struct rtable *)dst;
838 struct dst_entry *ret = dst;
841 if (dst->obsolete > 0) {
844 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
855 * 1. The first ip_rt_redirect_number redirects are sent
856 * with exponential backoff, then we stop sending them at all,
857 * assuming that the host ignores our redirects.
858 * 2. If we did not see packets requiring redirects
859 * during ip_rt_redirect_silence, we assume that the host
860 * forgot redirected route and start to send redirects again.
862 * This algorithm is much cheaper and more intelligent than dumb load limiting
865 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
866 * and "frag. need" (breaks PMTU discovery) in icmp.c.
869 void ip_rt_send_redirect(struct sk_buff *skb)
871 struct rtable *rt = skb_rtable(skb);
872 struct in_device *in_dev;
873 struct inet_peer *peer;
879 in_dev = __in_dev_get_rcu(rt->dst.dev);
880 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
884 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
885 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
888 net = dev_net(rt->dst.dev);
889 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
891 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
892 rt_nexthop(rt, ip_hdr(skb)->daddr));
896 /* No redirected packets during ip_rt_redirect_silence;
897 * reset the algorithm.
899 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
900 peer->rate_tokens = 0;
901 peer->n_redirects = 0;
904 /* Too many ignored redirects; do not send anything
905 * set dst.rate_last to the last seen redirected packet.
907 if (peer->n_redirects >= ip_rt_redirect_number) {
908 peer->rate_last = jiffies;
912 /* Check for load limit; set rate_last to the latest sent
915 if (peer->rate_tokens == 0 ||
918 (ip_rt_redirect_load << peer->rate_tokens)))) {
919 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
921 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
922 peer->rate_last = jiffies;
925 #ifdef CONFIG_IP_ROUTE_VERBOSE
927 peer->rate_tokens == ip_rt_redirect_number)
928 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
929 &ip_hdr(skb)->saddr, inet_iif(skb),
930 &ip_hdr(skb)->daddr, &gw);
937 static int ip_error(struct sk_buff *skb)
939 struct rtable *rt = skb_rtable(skb);
940 struct net_device *dev = skb->dev;
941 struct in_device *in_dev;
942 struct inet_peer *peer;
948 if (netif_is_l3_master(skb->dev)) {
949 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
954 in_dev = __in_dev_get_rcu(dev);
956 /* IP on this device is disabled. */
960 net = dev_net(rt->dst.dev);
961 if (!IN_DEV_FORWARD(in_dev)) {
962 switch (rt->dst.error) {
964 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
968 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
974 switch (rt->dst.error) {
979 code = ICMP_HOST_UNREACH;
982 code = ICMP_NET_UNREACH;
983 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
986 code = ICMP_PKT_FILTERED;
990 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
991 l3mdev_master_ifindex(skb->dev), 1);
996 peer->rate_tokens += now - peer->rate_last;
997 if (peer->rate_tokens > ip_rt_error_burst)
998 peer->rate_tokens = ip_rt_error_burst;
999 peer->rate_last = now;
1000 if (peer->rate_tokens >= ip_rt_error_cost)
1001 peer->rate_tokens -= ip_rt_error_cost;
1007 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1009 out: kfree_skb(skb);
1013 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1015 struct dst_entry *dst = &rt->dst;
1016 u32 old_mtu = ipv4_mtu(dst);
1017 struct fib_result res;
1020 if (ip_mtu_locked(dst))
1026 if (mtu < ip_rt_min_pmtu) {
1028 mtu = min(old_mtu, ip_rt_min_pmtu);
1031 if (rt->rt_pmtu == mtu && !lock &&
1032 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1036 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1037 struct fib_nh_common *nhc = FIB_RES_NHC(res);
1039 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1040 jiffies + ip_rt_mtu_expires);
1045 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1046 struct sk_buff *skb, u32 mtu)
1048 struct rtable *rt = (struct rtable *) dst;
1051 ip_rt_build_flow_key(&fl4, sk, skb);
1052 __ip_rt_update_pmtu(rt, &fl4, mtu);
1055 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1056 int oif, u8 protocol)
1058 const struct iphdr *iph = (const struct iphdr *) skb->data;
1061 u32 mark = IP4_REPLY_MARK(net, skb->mark);
1063 __build_flow_key(net, &fl4, NULL, iph, oif,
1064 RT_TOS(iph->tos), protocol, mark, 0);
1065 rt = __ip_route_output_key(net, &fl4);
1067 __ip_rt_update_pmtu(rt, &fl4, mtu);
1071 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1073 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1075 const struct iphdr *iph = (const struct iphdr *) skb->data;
1079 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1081 if (!fl4.flowi4_mark)
1082 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1084 rt = __ip_route_output_key(sock_net(sk), &fl4);
1086 __ip_rt_update_pmtu(rt, &fl4, mtu);
1091 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1093 const struct iphdr *iph = (const struct iphdr *) skb->data;
1096 struct dst_entry *odst = NULL;
1098 struct net *net = sock_net(sk);
1102 if (!ip_sk_accept_pmtu(sk))
1105 odst = sk_dst_get(sk);
1107 if (sock_owned_by_user(sk) || !odst) {
1108 __ipv4_sk_update_pmtu(skb, sk, mtu);
1112 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1114 rt = (struct rtable *)odst;
1115 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1116 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1123 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1125 if (!dst_check(&rt->dst, 0)) {
1127 dst_release(&rt->dst);
1129 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1137 sk_dst_set(sk, &rt->dst);
1143 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1145 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1146 int oif, u8 protocol)
1148 const struct iphdr *iph = (const struct iphdr *) skb->data;
1152 __build_flow_key(net, &fl4, NULL, iph, oif,
1153 RT_TOS(iph->tos), protocol, 0, 0);
1154 rt = __ip_route_output_key(net, &fl4);
1156 __ip_do_redirect(rt, skb, &fl4, false);
1160 EXPORT_SYMBOL_GPL(ipv4_redirect);
1162 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1164 const struct iphdr *iph = (const struct iphdr *) skb->data;
1167 struct net *net = sock_net(sk);
1169 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1170 rt = __ip_route_output_key(net, &fl4);
1172 __ip_do_redirect(rt, skb, &fl4, false);
1176 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1178 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1180 struct rtable *rt = (struct rtable *) dst;
1182 /* All IPV4 dsts are created with ->obsolete set to the value
1183 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1184 * into this function always.
1186 * When a PMTU/redirect information update invalidates a route,
1187 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1188 * DST_OBSOLETE_DEAD.
1190 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1195 static void ipv4_send_dest_unreach(struct sk_buff *skb)
1197 struct ip_options opt;
1200 /* Recompile ip options since IPCB may not be valid anymore.
1201 * Also check we have a reasonable ipv4 header.
1203 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1204 ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1207 memset(&opt, 0, sizeof(opt));
1208 if (ip_hdr(skb)->ihl > 5) {
1209 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1211 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1214 res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1220 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1223 static void ipv4_link_failure(struct sk_buff *skb)
1227 ipv4_send_dest_unreach(skb);
1229 rt = skb_rtable(skb);
1231 dst_set_expires(&rt->dst, 0);
1234 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1236 pr_debug("%s: %pI4 -> %pI4, %s\n",
1237 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1238 skb->dev ? skb->dev->name : "?");
1245 We do not cache source address of outgoing interface,
1246 because it is used only by IP RR, TS and SRR options,
1247 so that it out of fast path.
1249 BTW remember: "addr" is allowed to be not aligned
1253 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1257 if (rt_is_output_route(rt))
1258 src = ip_hdr(skb)->saddr;
1260 struct fib_result res;
1261 struct iphdr *iph = ip_hdr(skb);
1262 struct flowi4 fl4 = {
1263 .daddr = iph->daddr,
1264 .saddr = iph->saddr,
1265 .flowi4_tos = RT_TOS(iph->tos),
1266 .flowi4_oif = rt->dst.dev->ifindex,
1267 .flowi4_iif = skb->dev->ifindex,
1268 .flowi4_mark = skb->mark,
1272 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1273 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1275 src = inet_select_addr(rt->dst.dev,
1276 rt_nexthop(rt, iph->daddr),
1280 memcpy(addr, &src, 4);
1283 #ifdef CONFIG_IP_ROUTE_CLASSID
1284 static void set_class_tag(struct rtable *rt, u32 tag)
1286 if (!(rt->dst.tclassid & 0xFFFF))
1287 rt->dst.tclassid |= tag & 0xFFFF;
1288 if (!(rt->dst.tclassid & 0xFFFF0000))
1289 rt->dst.tclassid |= tag & 0xFFFF0000;
1293 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1295 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1296 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1299 return min(advmss, IPV4_MAX_PMTU - header_size);
1302 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1304 const struct rtable *rt = (const struct rtable *) dst;
1305 unsigned int mtu = rt->rt_pmtu;
1307 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1308 mtu = dst_metric_raw(dst, RTAX_MTU);
1313 mtu = READ_ONCE(dst->dev->mtu);
1315 if (unlikely(ip_mtu_locked(dst))) {
1316 if (rt->rt_gw_family && mtu > 576)
1320 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1322 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1325 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1327 struct fnhe_hash_bucket *hash;
1328 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1329 u32 hval = fnhe_hashfun(daddr);
1331 spin_lock_bh(&fnhe_lock);
1333 hash = rcu_dereference_protected(nhc->nhc_exceptions,
1334 lockdep_is_held(&fnhe_lock));
1337 fnhe_p = &hash->chain;
1338 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1340 if (fnhe->fnhe_daddr == daddr) {
1341 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1342 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1343 /* set fnhe_daddr to 0 to ensure it won't bind with
1344 * new dsts in rt_bind_exception().
1346 fnhe->fnhe_daddr = 0;
1347 fnhe_flush_routes(fnhe);
1348 kfree_rcu(fnhe, rcu);
1351 fnhe_p = &fnhe->fnhe_next;
1352 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1353 lockdep_is_held(&fnhe_lock));
1356 spin_unlock_bh(&fnhe_lock);
1359 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1362 struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1363 struct fib_nh_exception *fnhe;
1369 hval = fnhe_hashfun(daddr);
1371 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1372 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1373 if (fnhe->fnhe_daddr == daddr) {
1374 if (fnhe->fnhe_expires &&
1375 time_after(jiffies, fnhe->fnhe_expires)) {
1376 ip_del_fnhe(nhc, daddr);
1386 * 1. mtu on route is locked - use it
1387 * 2. mtu from nexthop exception
1388 * 3. mtu from egress device
1391 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1393 struct fib_nh_common *nhc = res->nhc;
1394 struct net_device *dev = nhc->nhc_dev;
1395 struct fib_info *fi = res->fi;
1398 if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1399 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1403 struct fib_nh_exception *fnhe;
1405 fnhe = find_exception(nhc, daddr);
1406 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1407 mtu = fnhe->fnhe_pmtu;
1411 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1413 return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1416 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1417 __be32 daddr, const bool do_cache)
1421 spin_lock_bh(&fnhe_lock);
1423 if (daddr == fnhe->fnhe_daddr) {
1424 struct rtable __rcu **porig;
1425 struct rtable *orig;
1426 int genid = fnhe_genid(dev_net(rt->dst.dev));
1428 if (rt_is_input_route(rt))
1429 porig = &fnhe->fnhe_rth_input;
1431 porig = &fnhe->fnhe_rth_output;
1432 orig = rcu_dereference(*porig);
1434 if (fnhe->fnhe_genid != genid) {
1435 fnhe->fnhe_genid = genid;
1437 fnhe->fnhe_pmtu = 0;
1438 fnhe->fnhe_expires = 0;
1439 fnhe->fnhe_mtu_locked = false;
1440 fnhe_flush_routes(fnhe);
1443 fill_route_from_fnhe(rt, fnhe);
1446 rt->rt_gw_family = AF_INET;
1451 rcu_assign_pointer(*porig, rt);
1453 dst_dev_put(&orig->dst);
1454 dst_release(&orig->dst);
1459 fnhe->fnhe_stamp = jiffies;
1461 spin_unlock_bh(&fnhe_lock);
1466 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1468 struct rtable *orig, *prev, **p;
1471 if (rt_is_input_route(rt)) {
1472 p = (struct rtable **)&nhc->nhc_rth_input;
1474 p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1478 /* hold dst before doing cmpxchg() to avoid race condition
1482 prev = cmpxchg(p, orig, rt);
1485 dst_dev_put(&orig->dst);
1486 dst_release(&orig->dst);
1489 dst_release(&rt->dst);
1496 struct uncached_list {
1498 struct list_head head;
1501 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1503 void rt_add_uncached_list(struct rtable *rt)
1505 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1507 rt->rt_uncached_list = ul;
1509 spin_lock_bh(&ul->lock);
1510 list_add_tail(&rt->rt_uncached, &ul->head);
1511 spin_unlock_bh(&ul->lock);
1514 void rt_del_uncached_list(struct rtable *rt)
1516 if (!list_empty(&rt->rt_uncached)) {
1517 struct uncached_list *ul = rt->rt_uncached_list;
1519 spin_lock_bh(&ul->lock);
1520 list_del(&rt->rt_uncached);
1521 spin_unlock_bh(&ul->lock);
1525 static void ipv4_dst_destroy(struct dst_entry *dst)
1527 struct rtable *rt = (struct rtable *)dst;
1529 ip_dst_metrics_put(dst);
1530 rt_del_uncached_list(rt);
1533 void rt_flush_dev(struct net_device *dev)
1538 for_each_possible_cpu(cpu) {
1539 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1541 spin_lock_bh(&ul->lock);
1542 list_for_each_entry(rt, &ul->head, rt_uncached) {
1543 if (rt->dst.dev != dev)
1545 rt->dst.dev = blackhole_netdev;
1546 dev_hold(rt->dst.dev);
1549 spin_unlock_bh(&ul->lock);
1553 static bool rt_cache_valid(const struct rtable *rt)
1556 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1560 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1561 const struct fib_result *res,
1562 struct fib_nh_exception *fnhe,
1563 struct fib_info *fi, u16 type, u32 itag,
1564 const bool do_cache)
1566 bool cached = false;
1569 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1571 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1572 rt->rt_gw_family = nhc->nhc_gw_family;
1573 /* only INET and INET6 are supported */
1574 if (likely(nhc->nhc_gw_family == AF_INET))
1575 rt->rt_gw4 = nhc->nhc_gw.ipv4;
1577 rt->rt_gw6 = nhc->nhc_gw.ipv6;
1580 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1582 #ifdef CONFIG_IP_ROUTE_CLASSID
1583 if (nhc->nhc_family == AF_INET) {
1586 nh = container_of(nhc, struct fib_nh, nh_common);
1587 rt->dst.tclassid = nh->nh_tclassid;
1590 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1592 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1594 cached = rt_cache_route(nhc, rt);
1595 if (unlikely(!cached)) {
1596 /* Routes we intend to cache in nexthop exception or
1597 * FIB nexthop have the DST_NOCACHE bit clear.
1598 * However, if we are unsuccessful at storing this
1599 * route into the cache we really need to set it.
1602 rt->rt_gw_family = AF_INET;
1605 rt_add_uncached_list(rt);
1608 rt_add_uncached_list(rt);
1610 #ifdef CONFIG_IP_ROUTE_CLASSID
1611 #ifdef CONFIG_IP_MULTIPLE_TABLES
1612 set_class_tag(rt, res->tclassid);
1614 set_class_tag(rt, itag);
1618 struct rtable *rt_dst_alloc(struct net_device *dev,
1619 unsigned int flags, u16 type,
1620 bool nopolicy, bool noxfrm, bool will_cache)
1624 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1625 (will_cache ? 0 : DST_HOST) |
1626 (nopolicy ? DST_NOPOLICY : 0) |
1627 (noxfrm ? DST_NOXFRM : 0));
1630 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1631 rt->rt_flags = flags;
1633 rt->rt_is_input = 0;
1636 rt->rt_mtu_locked = 0;
1637 rt->rt_gw_family = 0;
1639 INIT_LIST_HEAD(&rt->rt_uncached);
1641 rt->dst.output = ip_output;
1642 if (flags & RTCF_LOCAL)
1643 rt->dst.input = ip_local_deliver;
1648 EXPORT_SYMBOL(rt_dst_alloc);
1650 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1652 struct rtable *new_rt;
1654 new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1658 new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1659 new_rt->rt_flags = rt->rt_flags;
1660 new_rt->rt_type = rt->rt_type;
1661 new_rt->rt_is_input = rt->rt_is_input;
1662 new_rt->rt_iif = rt->rt_iif;
1663 new_rt->rt_pmtu = rt->rt_pmtu;
1664 new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1665 new_rt->rt_gw_family = rt->rt_gw_family;
1666 if (rt->rt_gw_family == AF_INET)
1667 new_rt->rt_gw4 = rt->rt_gw4;
1668 else if (rt->rt_gw_family == AF_INET6)
1669 new_rt->rt_gw6 = rt->rt_gw6;
1670 INIT_LIST_HEAD(&new_rt->rt_uncached);
1672 new_rt->dst.flags |= DST_HOST;
1673 new_rt->dst.input = rt->dst.input;
1674 new_rt->dst.output = rt->dst.output;
1675 new_rt->dst.error = rt->dst.error;
1676 new_rt->dst.lastuse = jiffies;
1677 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1681 EXPORT_SYMBOL(rt_dst_clone);
1683 /* called in rcu_read_lock() section */
1684 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1685 u8 tos, struct net_device *dev,
1686 struct in_device *in_dev, u32 *itag)
1690 /* Primary sanity checks. */
1694 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1695 skb->protocol != htons(ETH_P_IP))
1698 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1701 if (ipv4_is_zeronet(saddr)) {
1702 if (!ipv4_is_local_multicast(daddr) &&
1703 ip_hdr(skb)->protocol != IPPROTO_IGMP)
1706 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1714 /* called in rcu_read_lock() section */
1715 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1716 u8 tos, struct net_device *dev, int our)
1718 struct in_device *in_dev = __in_dev_get_rcu(dev);
1719 unsigned int flags = RTCF_MULTICAST;
1724 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1729 flags |= RTCF_LOCAL;
1731 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1732 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1736 #ifdef CONFIG_IP_ROUTE_CLASSID
1737 rth->dst.tclassid = itag;
1739 rth->dst.output = ip_rt_bug;
1740 rth->rt_is_input= 1;
1742 #ifdef CONFIG_IP_MROUTE
1743 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1744 rth->dst.input = ip_mr_input;
1746 RT_CACHE_STAT_INC(in_slow_mc);
1748 skb_dst_set(skb, &rth->dst);
1753 static void ip_handle_martian_source(struct net_device *dev,
1754 struct in_device *in_dev,
1755 struct sk_buff *skb,
1759 RT_CACHE_STAT_INC(in_martian_src);
1760 #ifdef CONFIG_IP_ROUTE_VERBOSE
1761 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1763 * RFC1812 recommendation, if source is martian,
1764 * the only hint is MAC header.
1766 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1767 &daddr, &saddr, dev->name);
1768 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1769 print_hex_dump(KERN_WARNING, "ll header: ",
1770 DUMP_PREFIX_OFFSET, 16, 1,
1771 skb_mac_header(skb),
1772 dev->hard_header_len, false);
1778 /* called in rcu_read_lock() section */
1779 static int __mkroute_input(struct sk_buff *skb,
1780 const struct fib_result *res,
1781 struct in_device *in_dev,
1782 __be32 daddr, __be32 saddr, u32 tos)
1784 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1785 struct net_device *dev = nhc->nhc_dev;
1786 struct fib_nh_exception *fnhe;
1789 struct in_device *out_dev;
1793 /* get a working reference to the output device */
1794 out_dev = __in_dev_get_rcu(dev);
1796 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1800 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1801 in_dev->dev, in_dev, &itag);
1803 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1809 do_cache = res->fi && !itag;
1810 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1811 skb->protocol == htons(ETH_P_IP)) {
1814 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1815 if (IN_DEV_SHARED_MEDIA(out_dev) ||
1816 inet_addr_onlink(out_dev, saddr, gw))
1817 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1820 if (skb->protocol != htons(ETH_P_IP)) {
1821 /* Not IP (i.e. ARP). Do not create route, if it is
1822 * invalid for proxy arp. DNAT routes are always valid.
1824 * Proxy arp feature have been extended to allow, ARP
1825 * replies back to the same interface, to support
1826 * Private VLAN switch technologies. See arp.c.
1828 if (out_dev == in_dev &&
1829 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1835 fnhe = find_exception(nhc, daddr);
1838 rth = rcu_dereference(fnhe->fnhe_rth_input);
1840 rth = rcu_dereference(nhc->nhc_rth_input);
1841 if (rt_cache_valid(rth)) {
1842 skb_dst_set_noref(skb, &rth->dst);
1847 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1848 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1849 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1855 rth->rt_is_input = 1;
1856 RT_CACHE_STAT_INC(in_slow_tot);
1858 rth->dst.input = ip_forward;
1860 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1862 lwtunnel_set_redirect(&rth->dst);
1863 skb_dst_set(skb, &rth->dst);
1870 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1871 /* To make ICMP packets follow the right flow, the multipath hash is
1872 * calculated from the inner IP addresses.
1874 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1875 struct flow_keys *hash_keys)
1877 const struct iphdr *outer_iph = ip_hdr(skb);
1878 const struct iphdr *key_iph = outer_iph;
1879 const struct iphdr *inner_iph;
1880 const struct icmphdr *icmph;
1881 struct iphdr _inner_iph;
1882 struct icmphdr _icmph;
1884 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1887 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1890 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1895 if (icmph->type != ICMP_DEST_UNREACH &&
1896 icmph->type != ICMP_REDIRECT &&
1897 icmph->type != ICMP_TIME_EXCEEDED &&
1898 icmph->type != ICMP_PARAMETERPROB)
1901 inner_iph = skb_header_pointer(skb,
1902 outer_iph->ihl * 4 + sizeof(_icmph),
1903 sizeof(_inner_iph), &_inner_iph);
1907 key_iph = inner_iph;
1909 hash_keys->addrs.v4addrs.src = key_iph->saddr;
1910 hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1913 /* if skb is set it will be used and fl4 can be NULL */
1914 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
1915 const struct sk_buff *skb, struct flow_keys *flkeys)
1917 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
1918 struct flow_keys hash_keys;
1921 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1923 memset(&hash_keys, 0, sizeof(hash_keys));
1924 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1926 ip_multipath_l3_keys(skb, &hash_keys);
1928 hash_keys.addrs.v4addrs.src = fl4->saddr;
1929 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1933 /* skb is currently provided only when forwarding */
1935 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1936 struct flow_keys keys;
1938 /* short-circuit if we already have L4 hash present */
1940 return skb_get_hash_raw(skb) >> 1;
1942 memset(&hash_keys, 0, sizeof(hash_keys));
1945 skb_flow_dissect_flow_keys(skb, &keys, flag);
1949 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1950 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
1951 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
1952 hash_keys.ports.src = flkeys->ports.src;
1953 hash_keys.ports.dst = flkeys->ports.dst;
1954 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
1956 memset(&hash_keys, 0, sizeof(hash_keys));
1957 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1958 hash_keys.addrs.v4addrs.src = fl4->saddr;
1959 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1960 hash_keys.ports.src = fl4->fl4_sport;
1961 hash_keys.ports.dst = fl4->fl4_dport;
1962 hash_keys.basic.ip_proto = fl4->flowi4_proto;
1966 memset(&hash_keys, 0, sizeof(hash_keys));
1967 /* skb is currently provided only when forwarding */
1969 struct flow_keys keys;
1971 skb_flow_dissect_flow_keys(skb, &keys, 0);
1972 /* Inner can be v4 or v6 */
1973 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1974 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1975 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1976 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1977 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1978 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1979 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
1980 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
1981 hash_keys.tags.flow_label = keys.tags.flow_label;
1982 hash_keys.basic.ip_proto = keys.basic.ip_proto;
1984 /* Same as case 0 */
1985 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1986 ip_multipath_l3_keys(skb, &hash_keys);
1989 /* Same as case 0 */
1990 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1991 hash_keys.addrs.v4addrs.src = fl4->saddr;
1992 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1996 mhash = flow_hash_from_keys(&hash_keys);
1999 mhash = jhash_2words(mhash, multipath_hash, 0);
2003 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
2005 static int ip_mkroute_input(struct sk_buff *skb,
2006 struct fib_result *res,
2007 struct in_device *in_dev,
2008 __be32 daddr, __be32 saddr, u32 tos,
2009 struct flow_keys *hkeys)
2011 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2012 if (res->fi && fib_info_num_path(res->fi) > 1) {
2013 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2015 fib_select_multipath(res, h);
2019 /* create a routing cache entry */
2020 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2024 * NOTE. We drop all the packets that has local source
2025 * addresses, because every properly looped back packet
2026 * must have correct destination already attached by output routine.
2028 * Such approach solves two big problems:
2029 * 1. Not simplex devices are handled properly.
2030 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2031 * called with rcu_read_lock()
2034 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2035 u8 tos, struct net_device *dev,
2036 struct fib_result *res)
2038 struct in_device *in_dev = __in_dev_get_rcu(dev);
2039 struct flow_keys *flkeys = NULL, _flkeys;
2040 struct net *net = dev_net(dev);
2041 struct ip_tunnel_info *tun_info;
2043 unsigned int flags = 0;
2047 bool do_cache = true;
2049 /* IP on this device is disabled. */
2054 /* Check for the most weird martians, which can be not detected
2058 tun_info = skb_tunnel_info(skb);
2059 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2060 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2062 fl4.flowi4_tun_key.tun_id = 0;
2065 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2066 goto martian_source;
2070 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2073 /* Accept zero addresses only to limited broadcast;
2074 * I even do not know to fix it or not. Waiting for complains :-)
2076 if (ipv4_is_zeronet(saddr))
2077 goto martian_source;
2079 if (ipv4_is_zeronet(daddr))
2080 goto martian_destination;
2082 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2083 * and call it once if daddr or/and saddr are loopback addresses
2085 if (ipv4_is_loopback(daddr)) {
2086 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2087 goto martian_destination;
2088 } else if (ipv4_is_loopback(saddr)) {
2089 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2090 goto martian_source;
2094 * Now we are ready to route packet.
2097 fl4.flowi4_iif = dev->ifindex;
2098 fl4.flowi4_mark = skb->mark;
2099 fl4.flowi4_tos = tos;
2100 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2101 fl4.flowi4_flags = 0;
2104 fl4.flowi4_uid = sock_net_uid(net, NULL);
2106 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2109 fl4.flowi4_proto = 0;
2114 err = fib_lookup(net, &fl4, res, 0);
2116 if (!IN_DEV_FORWARD(in_dev))
2117 err = -EHOSTUNREACH;
2121 if (res->type == RTN_BROADCAST) {
2122 if (IN_DEV_BFORWARD(in_dev))
2124 /* not do cache if bc_forwarding is enabled */
2125 if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2130 if (res->type == RTN_LOCAL) {
2131 err = fib_validate_source(skb, saddr, daddr, tos,
2132 0, dev, in_dev, &itag);
2134 goto martian_source;
2138 if (!IN_DEV_FORWARD(in_dev)) {
2139 err = -EHOSTUNREACH;
2142 if (res->type != RTN_UNICAST)
2143 goto martian_destination;
2146 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2150 if (skb->protocol != htons(ETH_P_IP))
2153 if (!ipv4_is_zeronet(saddr)) {
2154 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2157 goto martian_source;
2159 flags |= RTCF_BROADCAST;
2160 res->type = RTN_BROADCAST;
2161 RT_CACHE_STAT_INC(in_brd);
2164 do_cache &= res->fi && !itag;
2166 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2168 rth = rcu_dereference(nhc->nhc_rth_input);
2169 if (rt_cache_valid(rth)) {
2170 skb_dst_set_noref(skb, &rth->dst);
2176 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2177 flags | RTCF_LOCAL, res->type,
2178 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2182 rth->dst.output= ip_rt_bug;
2183 #ifdef CONFIG_IP_ROUTE_CLASSID
2184 rth->dst.tclassid = itag;
2186 rth->rt_is_input = 1;
2188 RT_CACHE_STAT_INC(in_slow_tot);
2189 if (res->type == RTN_UNREACHABLE) {
2190 rth->dst.input= ip_error;
2191 rth->dst.error= -err;
2192 rth->rt_flags &= ~RTCF_LOCAL;
2196 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2198 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2199 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2200 WARN_ON(rth->dst.input == lwtunnel_input);
2201 rth->dst.lwtstate->orig_input = rth->dst.input;
2202 rth->dst.input = lwtunnel_input;
2205 if (unlikely(!rt_cache_route(nhc, rth)))
2206 rt_add_uncached_list(rth);
2208 skb_dst_set(skb, &rth->dst);
2213 RT_CACHE_STAT_INC(in_no_route);
2214 res->type = RTN_UNREACHABLE;
2220 * Do not cache martian addresses: they should be logged (RFC1812)
2222 martian_destination:
2223 RT_CACHE_STAT_INC(in_martian_dst);
2224 #ifdef CONFIG_IP_ROUTE_VERBOSE
2225 if (IN_DEV_LOG_MARTIANS(in_dev))
2226 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2227 &daddr, &saddr, dev->name);
2239 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2243 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2244 u8 tos, struct net_device *dev)
2246 struct fib_result res;
2249 tos &= IPTOS_RT_MASK;
2251 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2256 EXPORT_SYMBOL(ip_route_input_noref);
2258 /* called with rcu_read_lock held */
2259 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2260 u8 tos, struct net_device *dev, struct fib_result *res)
2262 /* Multicast recognition logic is moved from route cache to here.
2263 The problem was that too many Ethernet cards have broken/missing
2264 hardware multicast filters :-( As result the host on multicasting
2265 network acquires a lot of useless route cache entries, sort of
2266 SDR messages from all the world. Now we try to get rid of them.
2267 Really, provided software IP multicast filter is organized
2268 reasonably (at least, hashed), it does not result in a slowdown
2269 comparing with route cache reject entries.
2270 Note, that multicast routers are not affected, because
2271 route cache entry is created eventually.
2273 if (ipv4_is_multicast(daddr)) {
2274 struct in_device *in_dev = __in_dev_get_rcu(dev);
2280 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2281 ip_hdr(skb)->protocol);
2283 /* check l3 master if no match yet */
2284 if (!our && netif_is_l3_slave(dev)) {
2285 struct in_device *l3_in_dev;
2287 l3_in_dev = __in_dev_get_rcu(skb->dev);
2289 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2290 ip_hdr(skb)->protocol);
2294 #ifdef CONFIG_IP_MROUTE
2296 (!ipv4_is_local_multicast(daddr) &&
2297 IN_DEV_MFORWARD(in_dev))
2300 err = ip_route_input_mc(skb, daddr, saddr,
2306 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2309 /* called with rcu_read_lock() */
2310 static struct rtable *__mkroute_output(const struct fib_result *res,
2311 const struct flowi4 *fl4, int orig_oif,
2312 struct net_device *dev_out,
2315 struct fib_info *fi = res->fi;
2316 struct fib_nh_exception *fnhe;
2317 struct in_device *in_dev;
2318 u16 type = res->type;
2322 in_dev = __in_dev_get_rcu(dev_out);
2324 return ERR_PTR(-EINVAL);
2326 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2327 if (ipv4_is_loopback(fl4->saddr) &&
2328 !(dev_out->flags & IFF_LOOPBACK) &&
2329 !netif_is_l3_master(dev_out))
2330 return ERR_PTR(-EINVAL);
2332 if (ipv4_is_lbcast(fl4->daddr))
2333 type = RTN_BROADCAST;
2334 else if (ipv4_is_multicast(fl4->daddr))
2335 type = RTN_MULTICAST;
2336 else if (ipv4_is_zeronet(fl4->daddr))
2337 return ERR_PTR(-EINVAL);
2339 if (dev_out->flags & IFF_LOOPBACK)
2340 flags |= RTCF_LOCAL;
2343 if (type == RTN_BROADCAST) {
2344 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2346 } else if (type == RTN_MULTICAST) {
2347 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2348 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2350 flags &= ~RTCF_LOCAL;
2353 /* If multicast route do not exist use
2354 * default one, but do not gateway in this case.
2357 if (fi && res->prefixlen < 4)
2359 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2360 (orig_oif != dev_out->ifindex)) {
2361 /* For local routes that require a particular output interface
2362 * we do not want to cache the result. Caching the result
2363 * causes incorrect behaviour when there are multiple source
2364 * addresses on the interface, the end result being that if the
2365 * intended recipient is waiting on that interface for the
2366 * packet he won't receive it because it will be delivered on
2367 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2368 * be set to the loopback interface as well.
2374 do_cache &= fi != NULL;
2376 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2377 struct rtable __rcu **prth;
2379 fnhe = find_exception(nhc, fl4->daddr);
2383 prth = &fnhe->fnhe_rth_output;
2385 if (unlikely(fl4->flowi4_flags &
2386 FLOWI_FLAG_KNOWN_NH &&
2387 !(nhc->nhc_gw_family &&
2388 nhc->nhc_scope == RT_SCOPE_LINK))) {
2392 prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2394 rth = rcu_dereference(*prth);
2395 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2400 rth = rt_dst_alloc(dev_out, flags, type,
2401 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2402 IN_DEV_CONF_GET(in_dev, NOXFRM),
2405 return ERR_PTR(-ENOBUFS);
2407 rth->rt_iif = orig_oif;
2409 RT_CACHE_STAT_INC(out_slow_tot);
2411 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2412 if (flags & RTCF_LOCAL &&
2413 !(dev_out->flags & IFF_LOOPBACK)) {
2414 rth->dst.output = ip_mc_output;
2415 RT_CACHE_STAT_INC(out_slow_mc);
2417 #ifdef CONFIG_IP_MROUTE
2418 if (type == RTN_MULTICAST) {
2419 if (IN_DEV_MFORWARD(in_dev) &&
2420 !ipv4_is_local_multicast(fl4->daddr)) {
2421 rth->dst.input = ip_mr_input;
2422 rth->dst.output = ip_mc_output;
2428 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2429 lwtunnel_set_redirect(&rth->dst);
2435 * Major route resolver routine.
2438 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2439 const struct sk_buff *skb)
2441 __u8 tos = RT_FL_TOS(fl4);
2442 struct fib_result res = {
2450 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2451 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2452 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2453 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2456 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2461 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2463 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2464 struct fib_result *res,
2465 const struct sk_buff *skb)
2467 struct net_device *dev_out = NULL;
2468 int orig_oif = fl4->flowi4_oif;
2469 unsigned int flags = 0;
2471 int err = -ENETUNREACH;
2474 rth = ERR_PTR(-EINVAL);
2475 if (ipv4_is_multicast(fl4->saddr) ||
2476 ipv4_is_lbcast(fl4->saddr) ||
2477 ipv4_is_zeronet(fl4->saddr))
2480 /* I removed check for oif == dev_out->oif here.
2481 It was wrong for two reasons:
2482 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2483 is assigned to multiple interfaces.
2484 2. Moreover, we are allowed to send packets with saddr
2485 of another iface. --ANK
2488 if (fl4->flowi4_oif == 0 &&
2489 (ipv4_is_multicast(fl4->daddr) ||
2490 ipv4_is_lbcast(fl4->daddr))) {
2491 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2492 dev_out = __ip_dev_find(net, fl4->saddr, false);
2496 /* Special hack: user can direct multicasts
2497 and limited broadcast via necessary interface
2498 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2499 This hack is not just for fun, it allows
2500 vic,vat and friends to work.
2501 They bind socket to loopback, set ttl to zero
2502 and expect that it will work.
2503 From the viewpoint of routing cache they are broken,
2504 because we are not allowed to build multicast path
2505 with loopback source addr (look, routing cache
2506 cannot know, that ttl is zero, so that packet
2507 will not leave this host and route is valid).
2508 Luckily, this hack is good workaround.
2511 fl4->flowi4_oif = dev_out->ifindex;
2515 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2516 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2517 if (!__ip_dev_find(net, fl4->saddr, false))
2523 if (fl4->flowi4_oif) {
2524 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2525 rth = ERR_PTR(-ENODEV);
2529 /* RACE: Check return value of inet_select_addr instead. */
2530 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2531 rth = ERR_PTR(-ENETUNREACH);
2534 if (ipv4_is_local_multicast(fl4->daddr) ||
2535 ipv4_is_lbcast(fl4->daddr) ||
2536 fl4->flowi4_proto == IPPROTO_IGMP) {
2538 fl4->saddr = inet_select_addr(dev_out, 0,
2543 if (ipv4_is_multicast(fl4->daddr))
2544 fl4->saddr = inet_select_addr(dev_out, 0,
2546 else if (!fl4->daddr)
2547 fl4->saddr = inet_select_addr(dev_out, 0,
2553 fl4->daddr = fl4->saddr;
2555 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2556 dev_out = net->loopback_dev;
2557 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2558 res->type = RTN_LOCAL;
2559 flags |= RTCF_LOCAL;
2563 err = fib_lookup(net, fl4, res, 0);
2567 if (fl4->flowi4_oif &&
2568 (ipv4_is_multicast(fl4->daddr) ||
2569 !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2570 /* Apparently, routing tables are wrong. Assume,
2571 that the destination is on link.
2574 Because we are allowed to send to iface
2575 even if it has NO routes and NO assigned
2576 addresses. When oif is specified, routing
2577 tables are looked up with only one purpose:
2578 to catch if destination is gatewayed, rather than
2579 direct. Moreover, if MSG_DONTROUTE is set,
2580 we send packet, ignoring both routing tables
2581 and ifaddr state. --ANK
2584 We could make it even if oif is unknown,
2585 likely IPv6, but we do not.
2588 if (fl4->saddr == 0)
2589 fl4->saddr = inet_select_addr(dev_out, 0,
2591 res->type = RTN_UNICAST;
2598 if (res->type == RTN_LOCAL) {
2600 if (res->fi->fib_prefsrc)
2601 fl4->saddr = res->fi->fib_prefsrc;
2603 fl4->saddr = fl4->daddr;
2606 /* L3 master device is the loopback for that domain */
2607 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2610 /* make sure orig_oif points to fib result device even
2611 * though packet rx/tx happens over loopback or l3mdev
2613 orig_oif = FIB_RES_OIF(*res);
2615 fl4->flowi4_oif = dev_out->ifindex;
2616 flags |= RTCF_LOCAL;
2620 fib_select_path(net, res, fl4, skb);
2622 dev_out = FIB_RES_DEV(*res);
2623 fl4->flowi4_oif = dev_out->ifindex;
2627 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2633 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2638 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2640 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2642 return mtu ? : dst->dev->mtu;
2645 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2646 struct sk_buff *skb, u32 mtu)
2650 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2651 struct sk_buff *skb)
2655 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2661 static struct dst_ops ipv4_dst_blackhole_ops = {
2663 .check = ipv4_blackhole_dst_check,
2664 .mtu = ipv4_blackhole_mtu,
2665 .default_advmss = ipv4_default_advmss,
2666 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2667 .redirect = ipv4_rt_blackhole_redirect,
2668 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2669 .neigh_lookup = ipv4_neigh_lookup,
2672 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2674 struct rtable *ort = (struct rtable *) dst_orig;
2677 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2679 struct dst_entry *new = &rt->dst;
2682 new->input = dst_discard;
2683 new->output = dst_discard_out;
2685 new->dev = net->loopback_dev;
2689 rt->rt_is_input = ort->rt_is_input;
2690 rt->rt_iif = ort->rt_iif;
2691 rt->rt_pmtu = ort->rt_pmtu;
2692 rt->rt_mtu_locked = ort->rt_mtu_locked;
2694 rt->rt_genid = rt_genid_ipv4(net);
2695 rt->rt_flags = ort->rt_flags;
2696 rt->rt_type = ort->rt_type;
2697 rt->rt_gw_family = ort->rt_gw_family;
2698 if (rt->rt_gw_family == AF_INET)
2699 rt->rt_gw4 = ort->rt_gw4;
2700 else if (rt->rt_gw_family == AF_INET6)
2701 rt->rt_gw6 = ort->rt_gw6;
2703 INIT_LIST_HEAD(&rt->rt_uncached);
2706 dst_release(dst_orig);
2708 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2711 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2712 const struct sock *sk)
2714 struct rtable *rt = __ip_route_output_key(net, flp4);
2719 if (flp4->flowi4_proto)
2720 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2721 flowi4_to_flowi(flp4),
2726 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2728 /* called with rcu_read_lock held */
2729 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2730 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2731 struct sk_buff *skb, u32 portid, u32 seq)
2734 struct nlmsghdr *nlh;
2735 unsigned long expires = 0;
2737 u32 metrics[RTAX_MAX];
2739 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
2743 r = nlmsg_data(nlh);
2744 r->rtm_family = AF_INET;
2745 r->rtm_dst_len = 32;
2747 r->rtm_tos = fl4 ? fl4->flowi4_tos : 0;
2748 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2749 if (nla_put_u32(skb, RTA_TABLE, table_id))
2750 goto nla_put_failure;
2751 r->rtm_type = rt->rt_type;
2752 r->rtm_scope = RT_SCOPE_UNIVERSE;
2753 r->rtm_protocol = RTPROT_UNSPEC;
2754 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2755 if (rt->rt_flags & RTCF_NOTIFY)
2756 r->rtm_flags |= RTM_F_NOTIFY;
2757 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2758 r->rtm_flags |= RTCF_DOREDIRECT;
2760 if (nla_put_in_addr(skb, RTA_DST, dst))
2761 goto nla_put_failure;
2763 r->rtm_src_len = 32;
2764 if (nla_put_in_addr(skb, RTA_SRC, src))
2765 goto nla_put_failure;
2768 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2769 goto nla_put_failure;
2770 #ifdef CONFIG_IP_ROUTE_CLASSID
2771 if (rt->dst.tclassid &&
2772 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2773 goto nla_put_failure;
2775 if (fl4 && !rt_is_input_route(rt) &&
2776 fl4->saddr != src) {
2777 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2778 goto nla_put_failure;
2780 if (rt->rt_gw_family == AF_INET &&
2781 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2782 goto nla_put_failure;
2783 } else if (rt->rt_gw_family == AF_INET6) {
2784 int alen = sizeof(struct in6_addr);
2788 nla = nla_reserve(skb, RTA_VIA, alen + 2);
2790 goto nla_put_failure;
2792 via = nla_data(nla);
2793 via->rtvia_family = AF_INET6;
2794 memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2797 expires = rt->dst.expires;
2799 unsigned long now = jiffies;
2801 if (time_before(now, expires))
2807 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2808 if (rt->rt_pmtu && expires)
2809 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2810 if (rt->rt_mtu_locked && expires)
2811 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2812 if (rtnetlink_put_metrics(skb, metrics) < 0)
2813 goto nla_put_failure;
2816 if (fl4->flowi4_mark &&
2817 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2818 goto nla_put_failure;
2820 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2821 nla_put_u32(skb, RTA_UID,
2822 from_kuid_munged(current_user_ns(),
2824 goto nla_put_failure;
2826 if (rt_is_input_route(rt)) {
2827 #ifdef CONFIG_IP_MROUTE
2828 if (ipv4_is_multicast(dst) &&
2829 !ipv4_is_local_multicast(dst) &&
2830 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2831 int err = ipmr_get_route(net, skb,
2832 fl4->saddr, fl4->daddr,
2838 goto nla_put_failure;
2842 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
2843 goto nla_put_failure;
2847 error = rt->dst.error;
2849 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2850 goto nla_put_failure;
2852 nlmsg_end(skb, nlh);
2856 nlmsg_cancel(skb, nlh);
2860 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2861 struct netlink_callback *cb, u32 table_id,
2862 struct fnhe_hash_bucket *bucket, int genid,
2863 int *fa_index, int fa_start)
2867 for (i = 0; i < FNHE_HASH_SIZE; i++) {
2868 struct fib_nh_exception *fnhe;
2870 for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
2871 fnhe = rcu_dereference(fnhe->fnhe_next)) {
2875 if (*fa_index < fa_start)
2878 if (fnhe->fnhe_genid != genid)
2881 if (fnhe->fnhe_expires &&
2882 time_after(jiffies, fnhe->fnhe_expires))
2885 rt = rcu_dereference(fnhe->fnhe_rth_input);
2887 rt = rcu_dereference(fnhe->fnhe_rth_output);
2891 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
2892 table_id, NULL, skb,
2893 NETLINK_CB(cb->skb).portid,
2894 cb->nlh->nlmsg_seq);
2905 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2906 u32 table_id, struct fib_info *fi,
2907 int *fa_index, int fa_start)
2909 struct net *net = sock_net(cb->skb->sk);
2910 int nhsel, genid = fnhe_genid(net);
2912 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
2913 struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
2914 struct fnhe_hash_bucket *bucket;
2917 if (nhc->nhc_flags & RTNH_F_DEAD)
2921 bucket = rcu_dereference(nhc->nhc_exceptions);
2924 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
2925 genid, fa_index, fa_start);
2934 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
2935 u8 ip_proto, __be16 sport,
2938 struct sk_buff *skb;
2941 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2945 /* Reserve room for dummy headers, this skb can pass
2946 * through good chunk of routing engine.
2948 skb_reset_mac_header(skb);
2949 skb_reset_network_header(skb);
2950 skb->protocol = htons(ETH_P_IP);
2951 iph = skb_put(skb, sizeof(struct iphdr));
2952 iph->protocol = ip_proto;
2958 skb_set_transport_header(skb, skb->len);
2960 switch (iph->protocol) {
2962 struct udphdr *udph;
2964 udph = skb_put_zero(skb, sizeof(struct udphdr));
2965 udph->source = sport;
2967 udph->len = sizeof(struct udphdr);
2972 struct tcphdr *tcph;
2974 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
2975 tcph->source = sport;
2977 tcph->doff = sizeof(struct tcphdr) / 4;
2979 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
2983 case IPPROTO_ICMP: {
2984 struct icmphdr *icmph;
2986 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
2987 icmph->type = ICMP_ECHO;
2995 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
2996 const struct nlmsghdr *nlh,
2998 struct netlink_ext_ack *extack)
3003 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
3004 NL_SET_ERR_MSG(extack,
3005 "ipv4: Invalid header for route get request");
3009 if (!netlink_strict_get_check(skb))
3010 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
3011 rtm_ipv4_policy, extack);
3013 rtm = nlmsg_data(nlh);
3014 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3015 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3016 rtm->rtm_table || rtm->rtm_protocol ||
3017 rtm->rtm_scope || rtm->rtm_type) {
3018 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3022 if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3023 RTM_F_LOOKUP_TABLE |
3025 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3029 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3030 rtm_ipv4_policy, extack);
3034 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3035 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3036 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3040 for (i = 0; i <= RTA_MAX; i++) {
3056 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3064 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3065 struct netlink_ext_ack *extack)
3067 struct net *net = sock_net(in_skb->sk);
3068 struct nlattr *tb[RTA_MAX+1];
3069 u32 table_id = RT_TABLE_MAIN;
3070 __be16 sport = 0, dport = 0;
3071 struct fib_result res = {};
3072 u8 ip_proto = IPPROTO_UDP;
3073 struct rtable *rt = NULL;
3074 struct sk_buff *skb;
3076 struct flowi4 fl4 = {};
3084 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3088 rtm = nlmsg_data(nlh);
3089 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3090 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3091 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3092 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3094 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3096 uid = (iif ? INVALID_UID : current_uid());
3098 if (tb[RTA_IP_PROTO]) {
3099 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3100 &ip_proto, AF_INET, extack);
3106 sport = nla_get_be16(tb[RTA_SPORT]);
3109 dport = nla_get_be16(tb[RTA_DPORT]);
3111 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3117 fl4.flowi4_tos = rtm->rtm_tos;
3118 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3119 fl4.flowi4_mark = mark;
3120 fl4.flowi4_uid = uid;
3122 fl4.fl4_sport = sport;
3124 fl4.fl4_dport = dport;
3125 fl4.flowi4_proto = ip_proto;
3130 struct net_device *dev;
3132 dev = dev_get_by_index_rcu(net, iif);
3138 fl4.flowi4_iif = iif; /* for rt_fill_info */
3141 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
3144 rt = skb_rtable(skb);
3145 if (err == 0 && rt->dst.error)
3146 err = -rt->dst.error;
3148 fl4.flowi4_iif = LOOPBACK_IFINDEX;
3149 skb->dev = net->loopback_dev;
3150 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3155 skb_dst_set(skb, &rt->dst);
3161 if (rtm->rtm_flags & RTM_F_NOTIFY)
3162 rt->rt_flags |= RTCF_NOTIFY;
3164 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3165 table_id = res.table ? res.table->tb_id : 0;
3167 /* reset skb for netlink reply msg */
3169 skb_reset_network_header(skb);
3170 skb_reset_transport_header(skb);
3171 skb_reset_mac_header(skb);
3173 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3175 err = fib_props[res.type].error;
3177 err = -EHOSTUNREACH;
3180 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3181 nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
3182 rt->rt_type, res.prefix, res.prefixlen,
3183 fl4.flowi4_tos, res.fi, 0);
3185 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3186 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
3193 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3203 void ip_rt_multicast_event(struct in_device *in_dev)
3205 rt_cache_flush(dev_net(in_dev->dev));
3208 #ifdef CONFIG_SYSCTL
3209 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
3210 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
3211 static int ip_rt_gc_elasticity __read_mostly = 8;
3212 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3214 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3215 void __user *buffer,
3216 size_t *lenp, loff_t *ppos)
3218 struct net *net = (struct net *)__ctl->extra1;
3221 rt_cache_flush(net);
3222 fnhe_genid_bump(net);
3229 static struct ctl_table ipv4_route_table[] = {
3231 .procname = "gc_thresh",
3232 .data = &ipv4_dst_ops.gc_thresh,
3233 .maxlen = sizeof(int),
3235 .proc_handler = proc_dointvec,
3238 .procname = "max_size",
3239 .data = &ip_rt_max_size,
3240 .maxlen = sizeof(int),
3242 .proc_handler = proc_dointvec,
3245 /* Deprecated. Use gc_min_interval_ms */
3247 .procname = "gc_min_interval",
3248 .data = &ip_rt_gc_min_interval,
3249 .maxlen = sizeof(int),
3251 .proc_handler = proc_dointvec_jiffies,
3254 .procname = "gc_min_interval_ms",
3255 .data = &ip_rt_gc_min_interval,
3256 .maxlen = sizeof(int),
3258 .proc_handler = proc_dointvec_ms_jiffies,
3261 .procname = "gc_timeout",
3262 .data = &ip_rt_gc_timeout,
3263 .maxlen = sizeof(int),
3265 .proc_handler = proc_dointvec_jiffies,
3268 .procname = "gc_interval",
3269 .data = &ip_rt_gc_interval,
3270 .maxlen = sizeof(int),
3272 .proc_handler = proc_dointvec_jiffies,
3275 .procname = "redirect_load",
3276 .data = &ip_rt_redirect_load,
3277 .maxlen = sizeof(int),
3279 .proc_handler = proc_dointvec,
3282 .procname = "redirect_number",
3283 .data = &ip_rt_redirect_number,
3284 .maxlen = sizeof(int),
3286 .proc_handler = proc_dointvec,
3289 .procname = "redirect_silence",
3290 .data = &ip_rt_redirect_silence,
3291 .maxlen = sizeof(int),
3293 .proc_handler = proc_dointvec,
3296 .procname = "error_cost",
3297 .data = &ip_rt_error_cost,
3298 .maxlen = sizeof(int),
3300 .proc_handler = proc_dointvec,
3303 .procname = "error_burst",
3304 .data = &ip_rt_error_burst,
3305 .maxlen = sizeof(int),
3307 .proc_handler = proc_dointvec,
3310 .procname = "gc_elasticity",
3311 .data = &ip_rt_gc_elasticity,
3312 .maxlen = sizeof(int),
3314 .proc_handler = proc_dointvec,
3317 .procname = "mtu_expires",
3318 .data = &ip_rt_mtu_expires,
3319 .maxlen = sizeof(int),
3321 .proc_handler = proc_dointvec_jiffies,
3324 .procname = "min_pmtu",
3325 .data = &ip_rt_min_pmtu,
3326 .maxlen = sizeof(int),
3328 .proc_handler = proc_dointvec_minmax,
3329 .extra1 = &ip_min_valid_pmtu,
3332 .procname = "min_adv_mss",
3333 .data = &ip_rt_min_advmss,
3334 .maxlen = sizeof(int),
3336 .proc_handler = proc_dointvec,
3341 static const char ipv4_route_flush_procname[] = "flush";
3343 static struct ctl_table ipv4_route_flush_table[] = {
3345 .procname = ipv4_route_flush_procname,
3346 .maxlen = sizeof(int),
3348 .proc_handler = ipv4_sysctl_rtcache_flush,
3353 static __net_init int sysctl_route_net_init(struct net *net)
3355 struct ctl_table *tbl;
3357 tbl = ipv4_route_flush_table;
3358 if (!net_eq(net, &init_net)) {
3359 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3363 /* Don't export non-whitelisted sysctls to unprivileged users */
3364 if (net->user_ns != &init_user_ns) {
3365 if (tbl[0].procname != ipv4_route_flush_procname)
3366 tbl[0].procname = NULL;
3369 tbl[0].extra1 = net;
3371 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3372 if (!net->ipv4.route_hdr)
3377 if (tbl != ipv4_route_flush_table)
3383 static __net_exit void sysctl_route_net_exit(struct net *net)
3385 struct ctl_table *tbl;
3387 tbl = net->ipv4.route_hdr->ctl_table_arg;
3388 unregister_net_sysctl_table(net->ipv4.route_hdr);
3389 BUG_ON(tbl == ipv4_route_flush_table);
3393 static __net_initdata struct pernet_operations sysctl_route_ops = {
3394 .init = sysctl_route_net_init,
3395 .exit = sysctl_route_net_exit,
3399 static __net_init int rt_genid_init(struct net *net)
3401 atomic_set(&net->ipv4.rt_genid, 0);
3402 atomic_set(&net->fnhe_genid, 0);
3403 atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3407 static __net_initdata struct pernet_operations rt_genid_ops = {
3408 .init = rt_genid_init,
3411 static int __net_init ipv4_inetpeer_init(struct net *net)
3413 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3417 inet_peer_base_init(bp);
3418 net->ipv4.peers = bp;
3422 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3424 struct inet_peer_base *bp = net->ipv4.peers;
3426 net->ipv4.peers = NULL;
3427 inetpeer_invalidate_tree(bp);
3431 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3432 .init = ipv4_inetpeer_init,
3433 .exit = ipv4_inetpeer_exit,
3436 #ifdef CONFIG_IP_ROUTE_CLASSID
3437 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3438 #endif /* CONFIG_IP_ROUTE_CLASSID */
3440 int __init ip_rt_init(void)
3444 ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
3447 panic("IP: failed to allocate ip_idents\n");
3449 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3451 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3453 panic("IP: failed to allocate ip_tstamps\n");
3455 for_each_possible_cpu(cpu) {
3456 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3458 INIT_LIST_HEAD(&ul->head);
3459 spin_lock_init(&ul->lock);
3461 #ifdef CONFIG_IP_ROUTE_CLASSID
3462 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3464 panic("IP: failed to allocate ip_rt_acct\n");
3467 ipv4_dst_ops.kmem_cachep =
3468 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3469 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3471 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3473 if (dst_entries_init(&ipv4_dst_ops) < 0)
3474 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3476 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3477 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3479 ipv4_dst_ops.gc_thresh = ~0;
3480 ip_rt_max_size = INT_MAX;
3485 if (ip_rt_proc_init())
3486 pr_err("Unable to create route proc files\n");
3491 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3492 RTNL_FLAG_DOIT_UNLOCKED);
3494 #ifdef CONFIG_SYSCTL
3495 register_pernet_subsys(&sysctl_route_ops);
3497 register_pernet_subsys(&rt_genid_ops);
3498 register_pernet_subsys(&ipv4_inetpeer_ops);
3502 #ifdef CONFIG_SYSCTL
3504 * We really need to sanitize the damn ipv4 init order, then all
3505 * this nonsense will go away.
3507 void __init ip_static_sysctl_init(void)
3509 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);