1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * ROUTE - implementation of the IP router.
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox, <gw4pts@gw4pts.ampr.org>
12 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
13 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
16 * Alan Cox : Verify area fixes.
17 * Alan Cox : cli() protects routing changes
18 * Rui Oliveira : ICMP routing table updates
19 * (rco@di.uminho.pt) Routing table insertion and update
20 * Linus Torvalds : Rewrote bits to be sensible
21 * Alan Cox : Added BSD route gw semantics
22 * Alan Cox : Super /proc >4K
23 * Alan Cox : MTU in route table
24 * Alan Cox : MSS actually. Also added the window
26 * Sam Lantinga : Fixed route matching in rt_del()
27 * Alan Cox : Routing cache support.
28 * Alan Cox : Removed compatibility cruft.
29 * Alan Cox : RTF_REJECT support.
30 * Alan Cox : TCP irtt support.
31 * Jonathan Naylor : Added Metric support.
32 * Miquel van Smoorenburg : BSD API fixes.
33 * Miquel van Smoorenburg : Metrics.
34 * Alan Cox : Use __u32 properly
35 * Alan Cox : Aligned routing errors more closely with BSD
36 * our system is still very different.
37 * Alan Cox : Faster /proc handling
38 * Alexey Kuznetsov : Massive rework to support tree based routing,
39 * routing caches and better behaviour.
41 * Olaf Erb : irtt wasn't being copied right.
42 * Bjorn Ekwall : Kerneld route support.
43 * Alan Cox : Multicast fixed (I hope)
44 * Pavel Krauz : Limited broadcast fixed
45 * Mike McLagan : Routing by source
46 * Alexey Kuznetsov : End of old history. Split to fib.c and
47 * route.c and rewritten from scratch.
48 * Andi Kleen : Load-limit warning messages.
49 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
50 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
51 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
52 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
53 * Marc Boucher : routing by fwmark
54 * Robert Olsson : Added rt_cache statistics
55 * Arnaldo C. Melo : Convert proc stuff to seq_file
56 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
57 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
58 * Ilia Sotnikov : Removed TOS from hash calculations
61 #define pr_fmt(fmt) "IPv4: " fmt
63 #include <linux/module.h>
64 #include <linux/uaccess.h>
65 #include <linux/bitops.h>
66 #include <linux/types.h>
67 #include <linux/kernel.h>
69 #include <linux/string.h>
70 #include <linux/socket.h>
71 #include <linux/sockios.h>
72 #include <linux/errno.h>
74 #include <linux/inet.h>
75 #include <linux/netdevice.h>
76 #include <linux/proc_fs.h>
77 #include <linux/init.h>
78 #include <linux/skbuff.h>
79 #include <linux/inetdevice.h>
80 #include <linux/igmp.h>
81 #include <linux/pkt_sched.h>
82 #include <linux/mroute.h>
83 #include <linux/netfilter_ipv4.h>
84 #include <linux/random.h>
85 #include <linux/rcupdate.h>
86 #include <linux/times.h>
87 #include <linux/slab.h>
88 #include <linux/jhash.h>
90 #include <net/dst_metadata.h>
91 #include <net/net_namespace.h>
92 #include <net/protocol.h>
94 #include <net/route.h>
95 #include <net/inetpeer.h>
97 #include <net/ip_fib.h>
98 #include <net/nexthop.h>
101 #include <net/icmp.h>
102 #include <net/xfrm.h>
103 #include <net/lwtunnel.h>
104 #include <net/netevent.h>
105 #include <net/rtnetlink.h>
107 #include <linux/sysctl.h>
109 #include <net/secure_seq.h>
110 #include <net/ip_tunnels.h>
111 #include <net/l3mdev.h>
113 #include "fib_lookup.h"
115 #define RT_FL_TOS(oldflp4) \
116 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
118 #define RT_GC_TIMEOUT (300*HZ)
120 static int ip_rt_max_size;
121 static int ip_rt_redirect_number __read_mostly = 9;
122 static int ip_rt_redirect_load __read_mostly = HZ / 50;
123 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
124 static int ip_rt_error_cost __read_mostly = HZ;
125 static int ip_rt_error_burst __read_mostly = 5 * HZ;
126 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
127 static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
128 static int ip_rt_min_advmss __read_mostly = 256;
130 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
133 * Interface to generic destination cache.
136 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
137 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
138 static unsigned int ipv4_mtu(const struct dst_entry *dst);
139 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
140 static void ipv4_link_failure(struct sk_buff *skb);
141 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
142 struct sk_buff *skb, u32 mtu);
143 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
144 struct sk_buff *skb);
145 static void ipv4_dst_destroy(struct dst_entry *dst);
147 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
153 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
156 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
158 static struct dst_ops ipv4_dst_ops = {
160 .check = ipv4_dst_check,
161 .default_advmss = ipv4_default_advmss,
163 .cow_metrics = ipv4_cow_metrics,
164 .destroy = ipv4_dst_destroy,
165 .negative_advice = ipv4_negative_advice,
166 .link_failure = ipv4_link_failure,
167 .update_pmtu = ip_rt_update_pmtu,
168 .redirect = ip_do_redirect,
169 .local_out = __ip_local_out,
170 .neigh_lookup = ipv4_neigh_lookup,
171 .confirm_neigh = ipv4_confirm_neigh,
174 #define ECN_OR_COST(class) TC_PRIO_##class
176 const __u8 ip_tos2prio[16] = {
178 ECN_OR_COST(BESTEFFORT),
180 ECN_OR_COST(BESTEFFORT),
186 ECN_OR_COST(INTERACTIVE),
188 ECN_OR_COST(INTERACTIVE),
189 TC_PRIO_INTERACTIVE_BULK,
190 ECN_OR_COST(INTERACTIVE_BULK),
191 TC_PRIO_INTERACTIVE_BULK,
192 ECN_OR_COST(INTERACTIVE_BULK)
194 EXPORT_SYMBOL(ip_tos2prio);
196 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
197 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
199 #ifdef CONFIG_PROC_FS
200 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
204 return SEQ_START_TOKEN;
207 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
213 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
217 static int rt_cache_seq_show(struct seq_file *seq, void *v)
219 if (v == SEQ_START_TOKEN)
220 seq_printf(seq, "%-127s\n",
221 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
222 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
227 static const struct seq_operations rt_cache_seq_ops = {
228 .start = rt_cache_seq_start,
229 .next = rt_cache_seq_next,
230 .stop = rt_cache_seq_stop,
231 .show = rt_cache_seq_show,
234 static int rt_cache_seq_open(struct inode *inode, struct file *file)
236 return seq_open(file, &rt_cache_seq_ops);
239 static const struct file_operations rt_cache_seq_fops = {
240 .open = rt_cache_seq_open,
243 .release = seq_release,
247 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
252 return SEQ_START_TOKEN;
254 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
255 if (!cpu_possible(cpu))
258 return &per_cpu(rt_cache_stat, cpu);
263 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
267 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
268 if (!cpu_possible(cpu))
271 return &per_cpu(rt_cache_stat, cpu);
277 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
282 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
284 struct rt_cache_stat *st = v;
286 if (v == SEQ_START_TOKEN) {
287 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
291 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
292 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
293 dst_entries_get_slow(&ipv4_dst_ops),
306 0, /* st->gc_total */
307 0, /* st->gc_ignored */
308 0, /* st->gc_goal_miss */
309 0, /* st->gc_dst_overflow */
310 0, /* st->in_hlist_search */
311 0 /* st->out_hlist_search */
316 static const struct seq_operations rt_cpu_seq_ops = {
317 .start = rt_cpu_seq_start,
318 .next = rt_cpu_seq_next,
319 .stop = rt_cpu_seq_stop,
320 .show = rt_cpu_seq_show,
324 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
326 return seq_open(file, &rt_cpu_seq_ops);
329 static const struct file_operations rt_cpu_seq_fops = {
330 .open = rt_cpu_seq_open,
333 .release = seq_release,
336 #ifdef CONFIG_IP_ROUTE_CLASSID
337 static int rt_acct_proc_show(struct seq_file *m, void *v)
339 struct ip_rt_acct *dst, *src;
342 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
346 for_each_possible_cpu(i) {
347 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
348 for (j = 0; j < 256; j++) {
349 dst[j].o_bytes += src[j].o_bytes;
350 dst[j].o_packets += src[j].o_packets;
351 dst[j].i_bytes += src[j].i_bytes;
352 dst[j].i_packets += src[j].i_packets;
356 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
362 static int __net_init ip_rt_do_proc_init(struct net *net)
364 struct proc_dir_entry *pde;
366 pde = proc_create("rt_cache", 0444, net->proc_net,
371 pde = proc_create("rt_cache", 0444,
372 net->proc_net_stat, &rt_cpu_seq_fops);
376 #ifdef CONFIG_IP_ROUTE_CLASSID
377 pde = proc_create_single("rt_acct", 0, net->proc_net,
384 #ifdef CONFIG_IP_ROUTE_CLASSID
386 remove_proc_entry("rt_cache", net->proc_net_stat);
389 remove_proc_entry("rt_cache", net->proc_net);
394 static void __net_exit ip_rt_do_proc_exit(struct net *net)
396 remove_proc_entry("rt_cache", net->proc_net_stat);
397 remove_proc_entry("rt_cache", net->proc_net);
398 #ifdef CONFIG_IP_ROUTE_CLASSID
399 remove_proc_entry("rt_acct", net->proc_net);
403 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
404 .init = ip_rt_do_proc_init,
405 .exit = ip_rt_do_proc_exit,
408 static int __init ip_rt_proc_init(void)
410 return register_pernet_subsys(&ip_rt_proc_ops);
414 static inline int ip_rt_proc_init(void)
418 #endif /* CONFIG_PROC_FS */
420 static inline bool rt_is_expired(const struct rtable *rth)
422 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
425 void rt_cache_flush(struct net *net)
427 rt_genid_bump_ipv4(net);
430 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
434 const struct rtable *rt = container_of(dst, struct rtable, dst);
435 struct net_device *dev = dst->dev;
440 if (likely(rt->rt_gw_family == AF_INET)) {
441 n = ip_neigh_gw4(dev, rt->rt_gw4);
442 } else if (rt->rt_gw_family == AF_INET6) {
443 n = ip_neigh_gw6(dev, &rt->rt_gw6);
447 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
448 n = ip_neigh_gw4(dev, pkey);
451 if (n && !refcount_inc_not_zero(&n->refcnt))
454 rcu_read_unlock_bh();
459 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
461 const struct rtable *rt = container_of(dst, struct rtable, dst);
462 struct net_device *dev = dst->dev;
463 const __be32 *pkey = daddr;
465 if (rt->rt_gw_family == AF_INET) {
466 pkey = (const __be32 *)&rt->rt_gw4;
467 } else if (rt->rt_gw_family == AF_INET6) {
468 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
471 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
474 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
477 #define IP_IDENTS_SZ 2048u
479 static atomic_t *ip_idents __read_mostly;
480 static u32 *ip_tstamps __read_mostly;
482 /* In order to protect privacy, we add a perturbation to identifiers
483 * if one generator is seldom used. This makes hard for an attacker
484 * to infer how many packets were sent between two points in time.
486 u32 ip_idents_reserve(u32 hash, int segs)
488 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
489 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
490 u32 old = READ_ONCE(*p_tstamp);
491 u32 now = (u32)jiffies;
494 if (old != now && cmpxchg(p_tstamp, old, now) == old)
495 delta = prandom_u32_max(now - old);
497 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
499 old = (u32)atomic_read(p_id);
500 new = old + delta + segs;
501 } while (atomic_cmpxchg(p_id, old, new) != old);
505 EXPORT_SYMBOL(ip_idents_reserve);
507 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
511 /* Note the following code is not safe, but this is okay. */
512 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
513 get_random_bytes(&net->ipv4.ip_id_key,
514 sizeof(net->ipv4.ip_id_key));
516 hash = siphash_3u32((__force u32)iph->daddr,
517 (__force u32)iph->saddr,
519 &net->ipv4.ip_id_key);
520 id = ip_idents_reserve(hash, segs);
523 EXPORT_SYMBOL(__ip_select_ident);
525 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
526 const struct sock *sk,
527 const struct iphdr *iph,
529 u8 prot, u32 mark, int flow_flags)
532 const struct inet_sock *inet = inet_sk(sk);
534 oif = sk->sk_bound_dev_if;
536 tos = RT_CONN_FLAGS(sk);
537 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
539 flowi4_init_output(fl4, oif, mark, tos,
540 RT_SCOPE_UNIVERSE, prot,
542 iph->daddr, iph->saddr, 0, 0,
543 sock_net_uid(net, sk));
546 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
547 const struct sock *sk)
549 const struct net *net = dev_net(skb->dev);
550 const struct iphdr *iph = ip_hdr(skb);
551 int oif = skb->dev->ifindex;
552 u8 tos = RT_TOS(iph->tos);
553 u8 prot = iph->protocol;
554 u32 mark = skb->mark;
556 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
559 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
561 const struct inet_sock *inet = inet_sk(sk);
562 const struct ip_options_rcu *inet_opt;
563 __be32 daddr = inet->inet_daddr;
566 inet_opt = rcu_dereference(inet->inet_opt);
567 if (inet_opt && inet_opt->opt.srr)
568 daddr = inet_opt->opt.faddr;
569 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
570 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
571 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
572 inet_sk_flowi_flags(sk),
573 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
577 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
578 const struct sk_buff *skb)
581 build_skb_flow_key(fl4, skb, sk);
583 build_sk_flow_key(fl4, sk);
586 static DEFINE_SPINLOCK(fnhe_lock);
588 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
592 rt = rcu_dereference(fnhe->fnhe_rth_input);
594 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
595 dst_dev_put(&rt->dst);
596 dst_release(&rt->dst);
598 rt = rcu_dereference(fnhe->fnhe_rth_output);
600 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
601 dst_dev_put(&rt->dst);
602 dst_release(&rt->dst);
606 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
608 struct fib_nh_exception *fnhe, *oldest;
610 oldest = rcu_dereference(hash->chain);
611 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
612 fnhe = rcu_dereference(fnhe->fnhe_next)) {
613 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
616 fnhe_flush_routes(oldest);
620 static inline u32 fnhe_hashfun(__be32 daddr)
622 static u32 fnhe_hashrnd __read_mostly;
625 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
626 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
627 return hash_32(hval, FNHE_HASH_SHIFT);
630 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
632 rt->rt_pmtu = fnhe->fnhe_pmtu;
633 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
634 rt->dst.expires = fnhe->fnhe_expires;
637 rt->rt_flags |= RTCF_REDIRECTED;
638 rt->rt_gw_family = AF_INET;
639 rt->rt_gw4 = fnhe->fnhe_gw;
643 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
644 __be32 gw, u32 pmtu, bool lock,
645 unsigned long expires)
647 struct fnhe_hash_bucket *hash;
648 struct fib_nh_exception *fnhe;
654 genid = fnhe_genid(dev_net(nhc->nhc_dev));
655 hval = fnhe_hashfun(daddr);
657 spin_lock_bh(&fnhe_lock);
659 hash = rcu_dereference(nhc->nhc_exceptions);
661 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
664 rcu_assign_pointer(nhc->nhc_exceptions, hash);
670 for (fnhe = rcu_dereference(hash->chain); fnhe;
671 fnhe = rcu_dereference(fnhe->fnhe_next)) {
672 if (fnhe->fnhe_daddr == daddr)
678 if (fnhe->fnhe_genid != genid)
679 fnhe->fnhe_genid = genid;
683 fnhe->fnhe_pmtu = pmtu;
684 fnhe->fnhe_mtu_locked = lock;
686 fnhe->fnhe_expires = max(1UL, expires);
687 /* Update all cached dsts too */
688 rt = rcu_dereference(fnhe->fnhe_rth_input);
690 fill_route_from_fnhe(rt, fnhe);
691 rt = rcu_dereference(fnhe->fnhe_rth_output);
693 fill_route_from_fnhe(rt, fnhe);
695 if (depth > FNHE_RECLAIM_DEPTH)
696 fnhe = fnhe_oldest(hash);
698 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
702 fnhe->fnhe_next = hash->chain;
703 rcu_assign_pointer(hash->chain, fnhe);
705 fnhe->fnhe_genid = genid;
706 fnhe->fnhe_daddr = daddr;
708 fnhe->fnhe_pmtu = pmtu;
709 fnhe->fnhe_mtu_locked = lock;
710 fnhe->fnhe_expires = max(1UL, expires);
712 /* Exception created; mark the cached routes for the nexthop
713 * stale, so anyone caching it rechecks if this exception
716 rt = rcu_dereference(nhc->nhc_rth_input);
718 rt->dst.obsolete = DST_OBSOLETE_KILL;
720 for_each_possible_cpu(i) {
721 struct rtable __rcu **prt;
722 prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
723 rt = rcu_dereference(*prt);
725 rt->dst.obsolete = DST_OBSOLETE_KILL;
729 fnhe->fnhe_stamp = jiffies;
732 spin_unlock_bh(&fnhe_lock);
735 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
738 __be32 new_gw = icmp_hdr(skb)->un.gateway;
739 __be32 old_gw = ip_hdr(skb)->saddr;
740 struct net_device *dev = skb->dev;
741 struct in_device *in_dev;
742 struct fib_result res;
746 switch (icmp_hdr(skb)->code & 7) {
748 case ICMP_REDIR_NETTOS:
749 case ICMP_REDIR_HOST:
750 case ICMP_REDIR_HOSTTOS:
757 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
760 in_dev = __in_dev_get_rcu(dev);
765 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
766 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
767 ipv4_is_zeronet(new_gw))
768 goto reject_redirect;
770 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
771 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
772 goto reject_redirect;
773 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
774 goto reject_redirect;
776 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
777 goto reject_redirect;
780 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
782 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
784 if (!(n->nud_state & NUD_VALID)) {
785 neigh_event_send(n, NULL);
787 if (fib_lookup(net, fl4, &res, 0) == 0) {
788 struct fib_nh_common *nhc = FIB_RES_NHC(res);
790 update_or_create_fnhe(nhc, fl4->daddr, new_gw,
792 jiffies + ip_rt_gc_timeout);
795 rt->dst.obsolete = DST_OBSOLETE_KILL;
796 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
803 #ifdef CONFIG_IP_ROUTE_VERBOSE
804 if (IN_DEV_LOG_MARTIANS(in_dev)) {
805 const struct iphdr *iph = (const struct iphdr *) skb->data;
806 __be32 daddr = iph->daddr;
807 __be32 saddr = iph->saddr;
809 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
810 " Advised path = %pI4 -> %pI4\n",
811 &old_gw, dev->name, &new_gw,
818 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
822 const struct iphdr *iph = (const struct iphdr *) skb->data;
823 struct net *net = dev_net(skb->dev);
824 int oif = skb->dev->ifindex;
825 u8 tos = RT_TOS(iph->tos);
826 u8 prot = iph->protocol;
827 u32 mark = skb->mark;
829 rt = (struct rtable *) dst;
831 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
832 __ip_do_redirect(rt, skb, &fl4, true);
835 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
837 struct rtable *rt = (struct rtable *)dst;
838 struct dst_entry *ret = dst;
841 if (dst->obsolete > 0) {
844 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
855 * 1. The first ip_rt_redirect_number redirects are sent
856 * with exponential backoff, then we stop sending them at all,
857 * assuming that the host ignores our redirects.
858 * 2. If we did not see packets requiring redirects
859 * during ip_rt_redirect_silence, we assume that the host
860 * forgot redirected route and start to send redirects again.
862 * This algorithm is much cheaper and more intelligent than dumb load limiting
865 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
866 * and "frag. need" (breaks PMTU discovery) in icmp.c.
869 void ip_rt_send_redirect(struct sk_buff *skb)
871 struct rtable *rt = skb_rtable(skb);
872 struct in_device *in_dev;
873 struct inet_peer *peer;
879 in_dev = __in_dev_get_rcu(rt->dst.dev);
880 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
884 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
885 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
888 net = dev_net(rt->dst.dev);
889 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
891 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
892 rt_nexthop(rt, ip_hdr(skb)->daddr));
896 /* No redirected packets during ip_rt_redirect_silence;
897 * reset the algorithm.
899 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
900 peer->rate_tokens = 0;
901 peer->n_redirects = 0;
904 /* Too many ignored redirects; do not send anything
905 * set dst.rate_last to the last seen redirected packet.
907 if (peer->n_redirects >= ip_rt_redirect_number) {
908 peer->rate_last = jiffies;
912 /* Check for load limit; set rate_last to the latest sent
915 if (peer->rate_tokens == 0 ||
918 (ip_rt_redirect_load << peer->rate_tokens)))) {
919 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
921 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
922 peer->rate_last = jiffies;
925 #ifdef CONFIG_IP_ROUTE_VERBOSE
927 peer->rate_tokens == ip_rt_redirect_number)
928 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
929 &ip_hdr(skb)->saddr, inet_iif(skb),
930 &ip_hdr(skb)->daddr, &gw);
937 static int ip_error(struct sk_buff *skb)
939 struct rtable *rt = skb_rtable(skb);
940 struct net_device *dev = skb->dev;
941 struct in_device *in_dev;
942 struct inet_peer *peer;
948 if (netif_is_l3_master(skb->dev)) {
949 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
954 in_dev = __in_dev_get_rcu(dev);
956 /* IP on this device is disabled. */
960 net = dev_net(rt->dst.dev);
961 if (!IN_DEV_FORWARD(in_dev)) {
962 switch (rt->dst.error) {
964 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
968 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
974 switch (rt->dst.error) {
979 code = ICMP_HOST_UNREACH;
982 code = ICMP_NET_UNREACH;
983 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
986 code = ICMP_PKT_FILTERED;
990 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
991 l3mdev_master_ifindex(skb->dev), 1);
996 peer->rate_tokens += now - peer->rate_last;
997 if (peer->rate_tokens > ip_rt_error_burst)
998 peer->rate_tokens = ip_rt_error_burst;
999 peer->rate_last = now;
1000 if (peer->rate_tokens >= ip_rt_error_cost)
1001 peer->rate_tokens -= ip_rt_error_cost;
1007 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1009 out: kfree_skb(skb);
1013 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1015 struct dst_entry *dst = &rt->dst;
1016 u32 old_mtu = ipv4_mtu(dst);
1017 struct fib_result res;
1020 if (ip_mtu_locked(dst))
1026 if (mtu < ip_rt_min_pmtu) {
1028 mtu = min(old_mtu, ip_rt_min_pmtu);
1031 if (rt->rt_pmtu == mtu && !lock &&
1032 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1036 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1037 struct fib_nh_common *nhc = FIB_RES_NHC(res);
1039 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1040 jiffies + ip_rt_mtu_expires);
1045 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1046 struct sk_buff *skb, u32 mtu)
1048 struct rtable *rt = (struct rtable *) dst;
1051 ip_rt_build_flow_key(&fl4, sk, skb);
1052 __ip_rt_update_pmtu(rt, &fl4, mtu);
1055 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1056 int oif, u8 protocol)
1058 const struct iphdr *iph = (const struct iphdr *) skb->data;
1061 u32 mark = IP4_REPLY_MARK(net, skb->mark);
1063 __build_flow_key(net, &fl4, NULL, iph, oif,
1064 RT_TOS(iph->tos), protocol, mark, 0);
1065 rt = __ip_route_output_key(net, &fl4);
1067 __ip_rt_update_pmtu(rt, &fl4, mtu);
1071 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1073 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1075 const struct iphdr *iph = (const struct iphdr *) skb->data;
1079 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1081 if (!fl4.flowi4_mark)
1082 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1084 rt = __ip_route_output_key(sock_net(sk), &fl4);
1086 __ip_rt_update_pmtu(rt, &fl4, mtu);
1091 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1093 const struct iphdr *iph = (const struct iphdr *) skb->data;
1096 struct dst_entry *odst = NULL;
1098 struct net *net = sock_net(sk);
1102 if (!ip_sk_accept_pmtu(sk))
1105 odst = sk_dst_get(sk);
1107 if (sock_owned_by_user(sk) || !odst) {
1108 __ipv4_sk_update_pmtu(skb, sk, mtu);
1112 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1114 rt = (struct rtable *)odst;
1115 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1116 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1123 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1125 if (!dst_check(&rt->dst, 0)) {
1127 dst_release(&rt->dst);
1129 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1137 sk_dst_set(sk, &rt->dst);
1143 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1145 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1146 int oif, u8 protocol)
1148 const struct iphdr *iph = (const struct iphdr *) skb->data;
1152 __build_flow_key(net, &fl4, NULL, iph, oif,
1153 RT_TOS(iph->tos), protocol, 0, 0);
1154 rt = __ip_route_output_key(net, &fl4);
1156 __ip_do_redirect(rt, skb, &fl4, false);
1160 EXPORT_SYMBOL_GPL(ipv4_redirect);
1162 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1164 const struct iphdr *iph = (const struct iphdr *) skb->data;
1167 struct net *net = sock_net(sk);
1169 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1170 rt = __ip_route_output_key(net, &fl4);
1172 __ip_do_redirect(rt, skb, &fl4, false);
1176 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1178 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1180 struct rtable *rt = (struct rtable *) dst;
1182 /* All IPV4 dsts are created with ->obsolete set to the value
1183 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1184 * into this function always.
1186 * When a PMTU/redirect information update invalidates a route,
1187 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1188 * DST_OBSOLETE_DEAD.
1190 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1195 static void ipv4_send_dest_unreach(struct sk_buff *skb)
1197 struct ip_options opt;
1200 /* Recompile ip options since IPCB may not be valid anymore.
1201 * Also check we have a reasonable ipv4 header.
1203 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1204 ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1207 memset(&opt, 0, sizeof(opt));
1208 if (ip_hdr(skb)->ihl > 5) {
1209 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1211 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1214 res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1220 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1223 static void ipv4_link_failure(struct sk_buff *skb)
1227 ipv4_send_dest_unreach(skb);
1229 rt = skb_rtable(skb);
1231 dst_set_expires(&rt->dst, 0);
1234 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1236 pr_debug("%s: %pI4 -> %pI4, %s\n",
1237 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1238 skb->dev ? skb->dev->name : "?");
1245 We do not cache source address of outgoing interface,
1246 because it is used only by IP RR, TS and SRR options,
1247 so that it out of fast path.
1249 BTW remember: "addr" is allowed to be not aligned
1253 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1257 if (rt_is_output_route(rt))
1258 src = ip_hdr(skb)->saddr;
1260 struct fib_result res;
1261 struct iphdr *iph = ip_hdr(skb);
1262 struct flowi4 fl4 = {
1263 .daddr = iph->daddr,
1264 .saddr = iph->saddr,
1265 .flowi4_tos = RT_TOS(iph->tos),
1266 .flowi4_oif = rt->dst.dev->ifindex,
1267 .flowi4_iif = skb->dev->ifindex,
1268 .flowi4_mark = skb->mark,
1272 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1273 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1275 src = inet_select_addr(rt->dst.dev,
1276 rt_nexthop(rt, iph->daddr),
1280 memcpy(addr, &src, 4);
1283 #ifdef CONFIG_IP_ROUTE_CLASSID
1284 static void set_class_tag(struct rtable *rt, u32 tag)
1286 if (!(rt->dst.tclassid & 0xFFFF))
1287 rt->dst.tclassid |= tag & 0xFFFF;
1288 if (!(rt->dst.tclassid & 0xFFFF0000))
1289 rt->dst.tclassid |= tag & 0xFFFF0000;
1293 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1295 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1296 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1299 return min(advmss, IPV4_MAX_PMTU - header_size);
1302 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1304 const struct rtable *rt = (const struct rtable *) dst;
1305 unsigned int mtu = rt->rt_pmtu;
1307 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1308 mtu = dst_metric_raw(dst, RTAX_MTU);
1313 mtu = READ_ONCE(dst->dev->mtu);
1315 if (unlikely(ip_mtu_locked(dst))) {
1316 if (rt->rt_gw_family && mtu > 576)
1320 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1322 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1325 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1327 struct fnhe_hash_bucket *hash;
1328 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1329 u32 hval = fnhe_hashfun(daddr);
1331 spin_lock_bh(&fnhe_lock);
1333 hash = rcu_dereference_protected(nhc->nhc_exceptions,
1334 lockdep_is_held(&fnhe_lock));
1337 fnhe_p = &hash->chain;
1338 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1340 if (fnhe->fnhe_daddr == daddr) {
1341 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1342 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1343 /* set fnhe_daddr to 0 to ensure it won't bind with
1344 * new dsts in rt_bind_exception().
1346 fnhe->fnhe_daddr = 0;
1347 fnhe_flush_routes(fnhe);
1348 kfree_rcu(fnhe, rcu);
1351 fnhe_p = &fnhe->fnhe_next;
1352 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1353 lockdep_is_held(&fnhe_lock));
1356 spin_unlock_bh(&fnhe_lock);
1359 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1362 struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1363 struct fib_nh_exception *fnhe;
1369 hval = fnhe_hashfun(daddr);
1371 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1372 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1373 if (fnhe->fnhe_daddr == daddr) {
1374 if (fnhe->fnhe_expires &&
1375 time_after(jiffies, fnhe->fnhe_expires)) {
1376 ip_del_fnhe(nhc, daddr);
1386 * 1. mtu on route is locked - use it
1387 * 2. mtu from nexthop exception
1388 * 3. mtu from egress device
1391 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1393 struct fib_nh_common *nhc = res->nhc;
1394 struct net_device *dev = nhc->nhc_dev;
1395 struct fib_info *fi = res->fi;
1398 if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1399 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1403 struct fib_nh_exception *fnhe;
1405 fnhe = find_exception(nhc, daddr);
1406 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1407 mtu = fnhe->fnhe_pmtu;
1411 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1413 return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1416 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1417 __be32 daddr, const bool do_cache)
1421 spin_lock_bh(&fnhe_lock);
1423 if (daddr == fnhe->fnhe_daddr) {
1424 struct rtable __rcu **porig;
1425 struct rtable *orig;
1426 int genid = fnhe_genid(dev_net(rt->dst.dev));
1428 if (rt_is_input_route(rt))
1429 porig = &fnhe->fnhe_rth_input;
1431 porig = &fnhe->fnhe_rth_output;
1432 orig = rcu_dereference(*porig);
1434 if (fnhe->fnhe_genid != genid) {
1435 fnhe->fnhe_genid = genid;
1437 fnhe->fnhe_pmtu = 0;
1438 fnhe->fnhe_expires = 0;
1439 fnhe->fnhe_mtu_locked = false;
1440 fnhe_flush_routes(fnhe);
1443 fill_route_from_fnhe(rt, fnhe);
1446 rt->rt_gw_family = AF_INET;
1451 rcu_assign_pointer(*porig, rt);
1453 dst_dev_put(&orig->dst);
1454 dst_release(&orig->dst);
1459 fnhe->fnhe_stamp = jiffies;
1461 spin_unlock_bh(&fnhe_lock);
1466 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1468 struct rtable *orig, *prev, **p;
1471 if (rt_is_input_route(rt)) {
1472 p = (struct rtable **)&nhc->nhc_rth_input;
1474 p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1478 /* hold dst before doing cmpxchg() to avoid race condition
1482 prev = cmpxchg(p, orig, rt);
1485 dst_dev_put(&orig->dst);
1486 dst_release(&orig->dst);
1489 dst_release(&rt->dst);
1496 struct uncached_list {
1498 struct list_head head;
1501 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1503 void rt_add_uncached_list(struct rtable *rt)
1505 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1507 rt->rt_uncached_list = ul;
1509 spin_lock_bh(&ul->lock);
1510 list_add_tail(&rt->rt_uncached, &ul->head);
1511 spin_unlock_bh(&ul->lock);
1514 void rt_del_uncached_list(struct rtable *rt)
1516 if (!list_empty(&rt->rt_uncached)) {
1517 struct uncached_list *ul = rt->rt_uncached_list;
1519 spin_lock_bh(&ul->lock);
1520 list_del(&rt->rt_uncached);
1521 spin_unlock_bh(&ul->lock);
1525 static void ipv4_dst_destroy(struct dst_entry *dst)
1527 struct rtable *rt = (struct rtable *)dst;
1529 ip_dst_metrics_put(dst);
1530 rt_del_uncached_list(rt);
1533 void rt_flush_dev(struct net_device *dev)
1535 struct net *net = dev_net(dev);
1539 for_each_possible_cpu(cpu) {
1540 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1542 spin_lock_bh(&ul->lock);
1543 list_for_each_entry(rt, &ul->head, rt_uncached) {
1544 if (rt->dst.dev != dev)
1546 rt->dst.dev = net->loopback_dev;
1547 dev_hold(rt->dst.dev);
1550 spin_unlock_bh(&ul->lock);
1554 static bool rt_cache_valid(const struct rtable *rt)
1557 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1561 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1562 const struct fib_result *res,
1563 struct fib_nh_exception *fnhe,
1564 struct fib_info *fi, u16 type, u32 itag,
1565 const bool do_cache)
1567 bool cached = false;
1570 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1572 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1573 rt->rt_gw_family = nhc->nhc_gw_family;
1574 /* only INET and INET6 are supported */
1575 if (likely(nhc->nhc_gw_family == AF_INET))
1576 rt->rt_gw4 = nhc->nhc_gw.ipv4;
1578 rt->rt_gw6 = nhc->nhc_gw.ipv6;
1581 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1583 #ifdef CONFIG_IP_ROUTE_CLASSID
1584 if (nhc->nhc_family == AF_INET) {
1587 nh = container_of(nhc, struct fib_nh, nh_common);
1588 rt->dst.tclassid = nh->nh_tclassid;
1591 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1593 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1595 cached = rt_cache_route(nhc, rt);
1596 if (unlikely(!cached)) {
1597 /* Routes we intend to cache in nexthop exception or
1598 * FIB nexthop have the DST_NOCACHE bit clear.
1599 * However, if we are unsuccessful at storing this
1600 * route into the cache we really need to set it.
1603 rt->rt_gw_family = AF_INET;
1606 rt_add_uncached_list(rt);
1609 rt_add_uncached_list(rt);
1611 #ifdef CONFIG_IP_ROUTE_CLASSID
1612 #ifdef CONFIG_IP_MULTIPLE_TABLES
1613 set_class_tag(rt, res->tclassid);
1615 set_class_tag(rt, itag);
1619 struct rtable *rt_dst_alloc(struct net_device *dev,
1620 unsigned int flags, u16 type,
1621 bool nopolicy, bool noxfrm, bool will_cache)
1625 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1626 (will_cache ? 0 : DST_HOST) |
1627 (nopolicy ? DST_NOPOLICY : 0) |
1628 (noxfrm ? DST_NOXFRM : 0));
1631 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1632 rt->rt_flags = flags;
1634 rt->rt_is_input = 0;
1637 rt->rt_mtu_locked = 0;
1638 rt->rt_gw_family = 0;
1640 INIT_LIST_HEAD(&rt->rt_uncached);
1642 rt->dst.output = ip_output;
1643 if (flags & RTCF_LOCAL)
1644 rt->dst.input = ip_local_deliver;
1649 EXPORT_SYMBOL(rt_dst_alloc);
1651 /* called in rcu_read_lock() section */
1652 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1653 u8 tos, struct net_device *dev,
1654 struct in_device *in_dev, u32 *itag)
1658 /* Primary sanity checks. */
1662 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1663 skb->protocol != htons(ETH_P_IP))
1666 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1669 if (ipv4_is_zeronet(saddr)) {
1670 if (!ipv4_is_local_multicast(daddr) &&
1671 ip_hdr(skb)->protocol != IPPROTO_IGMP)
1674 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1682 /* called in rcu_read_lock() section */
1683 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1684 u8 tos, struct net_device *dev, int our)
1686 struct in_device *in_dev = __in_dev_get_rcu(dev);
1687 unsigned int flags = RTCF_MULTICAST;
1692 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1697 flags |= RTCF_LOCAL;
1699 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1700 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1704 #ifdef CONFIG_IP_ROUTE_CLASSID
1705 rth->dst.tclassid = itag;
1707 rth->dst.output = ip_rt_bug;
1708 rth->rt_is_input= 1;
1710 #ifdef CONFIG_IP_MROUTE
1711 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1712 rth->dst.input = ip_mr_input;
1714 RT_CACHE_STAT_INC(in_slow_mc);
1716 skb_dst_set(skb, &rth->dst);
1721 static void ip_handle_martian_source(struct net_device *dev,
1722 struct in_device *in_dev,
1723 struct sk_buff *skb,
1727 RT_CACHE_STAT_INC(in_martian_src);
1728 #ifdef CONFIG_IP_ROUTE_VERBOSE
1729 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1731 * RFC1812 recommendation, if source is martian,
1732 * the only hint is MAC header.
1734 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1735 &daddr, &saddr, dev->name);
1736 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1737 print_hex_dump(KERN_WARNING, "ll header: ",
1738 DUMP_PREFIX_OFFSET, 16, 1,
1739 skb_mac_header(skb),
1740 dev->hard_header_len, false);
1746 /* called in rcu_read_lock() section */
1747 static int __mkroute_input(struct sk_buff *skb,
1748 const struct fib_result *res,
1749 struct in_device *in_dev,
1750 __be32 daddr, __be32 saddr, u32 tos)
1752 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1753 struct net_device *dev = nhc->nhc_dev;
1754 struct fib_nh_exception *fnhe;
1757 struct in_device *out_dev;
1761 /* get a working reference to the output device */
1762 out_dev = __in_dev_get_rcu(dev);
1764 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1768 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1769 in_dev->dev, in_dev, &itag);
1771 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1777 do_cache = res->fi && !itag;
1778 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1779 skb->protocol == htons(ETH_P_IP)) {
1782 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1783 if (IN_DEV_SHARED_MEDIA(out_dev) ||
1784 inet_addr_onlink(out_dev, saddr, gw))
1785 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1788 if (skb->protocol != htons(ETH_P_IP)) {
1789 /* Not IP (i.e. ARP). Do not create route, if it is
1790 * invalid for proxy arp. DNAT routes are always valid.
1792 * Proxy arp feature have been extended to allow, ARP
1793 * replies back to the same interface, to support
1794 * Private VLAN switch technologies. See arp.c.
1796 if (out_dev == in_dev &&
1797 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1803 fnhe = find_exception(nhc, daddr);
1806 rth = rcu_dereference(fnhe->fnhe_rth_input);
1808 rth = rcu_dereference(nhc->nhc_rth_input);
1809 if (rt_cache_valid(rth)) {
1810 skb_dst_set_noref(skb, &rth->dst);
1815 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1816 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1817 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1823 rth->rt_is_input = 1;
1824 RT_CACHE_STAT_INC(in_slow_tot);
1826 rth->dst.input = ip_forward;
1828 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1830 lwtunnel_set_redirect(&rth->dst);
1831 skb_dst_set(skb, &rth->dst);
1838 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1839 /* To make ICMP packets follow the right flow, the multipath hash is
1840 * calculated from the inner IP addresses.
1842 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1843 struct flow_keys *hash_keys)
1845 const struct iphdr *outer_iph = ip_hdr(skb);
1846 const struct iphdr *key_iph = outer_iph;
1847 const struct iphdr *inner_iph;
1848 const struct icmphdr *icmph;
1849 struct iphdr _inner_iph;
1850 struct icmphdr _icmph;
1852 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1855 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1858 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1863 if (icmph->type != ICMP_DEST_UNREACH &&
1864 icmph->type != ICMP_REDIRECT &&
1865 icmph->type != ICMP_TIME_EXCEEDED &&
1866 icmph->type != ICMP_PARAMETERPROB)
1869 inner_iph = skb_header_pointer(skb,
1870 outer_iph->ihl * 4 + sizeof(_icmph),
1871 sizeof(_inner_iph), &_inner_iph);
1875 key_iph = inner_iph;
1877 hash_keys->addrs.v4addrs.src = key_iph->saddr;
1878 hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1881 /* if skb is set it will be used and fl4 can be NULL */
1882 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
1883 const struct sk_buff *skb, struct flow_keys *flkeys)
1885 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
1886 struct flow_keys hash_keys;
1889 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1891 memset(&hash_keys, 0, sizeof(hash_keys));
1892 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1894 ip_multipath_l3_keys(skb, &hash_keys);
1896 hash_keys.addrs.v4addrs.src = fl4->saddr;
1897 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1901 /* skb is currently provided only when forwarding */
1903 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1904 struct flow_keys keys;
1906 /* short-circuit if we already have L4 hash present */
1908 return skb_get_hash_raw(skb) >> 1;
1910 memset(&hash_keys, 0, sizeof(hash_keys));
1913 skb_flow_dissect_flow_keys(skb, &keys, flag);
1917 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1918 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
1919 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
1920 hash_keys.ports.src = flkeys->ports.src;
1921 hash_keys.ports.dst = flkeys->ports.dst;
1922 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
1924 memset(&hash_keys, 0, sizeof(hash_keys));
1925 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1926 hash_keys.addrs.v4addrs.src = fl4->saddr;
1927 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1928 hash_keys.ports.src = fl4->fl4_sport;
1929 hash_keys.ports.dst = fl4->fl4_dport;
1930 hash_keys.basic.ip_proto = fl4->flowi4_proto;
1934 mhash = flow_hash_from_keys(&hash_keys);
1937 mhash = jhash_2words(mhash, multipath_hash, 0);
1941 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
1943 static int ip_mkroute_input(struct sk_buff *skb,
1944 struct fib_result *res,
1945 struct in_device *in_dev,
1946 __be32 daddr, __be32 saddr, u32 tos,
1947 struct flow_keys *hkeys)
1949 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1950 if (res->fi && fib_info_num_path(res->fi) > 1) {
1951 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
1953 fib_select_multipath(res, h);
1957 /* create a routing cache entry */
1958 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1962 * NOTE. We drop all the packets that has local source
1963 * addresses, because every properly looped back packet
1964 * must have correct destination already attached by output routine.
1966 * Such approach solves two big problems:
1967 * 1. Not simplex devices are handled properly.
1968 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1969 * called with rcu_read_lock()
1972 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1973 u8 tos, struct net_device *dev,
1974 struct fib_result *res)
1976 struct in_device *in_dev = __in_dev_get_rcu(dev);
1977 struct flow_keys *flkeys = NULL, _flkeys;
1978 struct net *net = dev_net(dev);
1979 struct ip_tunnel_info *tun_info;
1981 unsigned int flags = 0;
1985 bool do_cache = true;
1987 /* IP on this device is disabled. */
1992 /* Check for the most weird martians, which can be not detected
1996 tun_info = skb_tunnel_info(skb);
1997 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1998 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2000 fl4.flowi4_tun_key.tun_id = 0;
2003 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2004 goto martian_source;
2008 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2011 /* Accept zero addresses only to limited broadcast;
2012 * I even do not know to fix it or not. Waiting for complains :-)
2014 if (ipv4_is_zeronet(saddr))
2015 goto martian_source;
2017 if (ipv4_is_zeronet(daddr))
2018 goto martian_destination;
2020 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2021 * and call it once if daddr or/and saddr are loopback addresses
2023 if (ipv4_is_loopback(daddr)) {
2024 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2025 goto martian_destination;
2026 } else if (ipv4_is_loopback(saddr)) {
2027 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2028 goto martian_source;
2032 * Now we are ready to route packet.
2035 fl4.flowi4_iif = dev->ifindex;
2036 fl4.flowi4_mark = skb->mark;
2037 fl4.flowi4_tos = tos;
2038 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2039 fl4.flowi4_flags = 0;
2042 fl4.flowi4_uid = sock_net_uid(net, NULL);
2044 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2047 fl4.flowi4_proto = 0;
2052 err = fib_lookup(net, &fl4, res, 0);
2054 if (!IN_DEV_FORWARD(in_dev))
2055 err = -EHOSTUNREACH;
2059 if (res->type == RTN_BROADCAST) {
2060 if (IN_DEV_BFORWARD(in_dev))
2062 /* not do cache if bc_forwarding is enabled */
2063 if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2068 if (res->type == RTN_LOCAL) {
2069 err = fib_validate_source(skb, saddr, daddr, tos,
2070 0, dev, in_dev, &itag);
2072 goto martian_source;
2076 if (!IN_DEV_FORWARD(in_dev)) {
2077 err = -EHOSTUNREACH;
2080 if (res->type != RTN_UNICAST)
2081 goto martian_destination;
2084 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2088 if (skb->protocol != htons(ETH_P_IP))
2091 if (!ipv4_is_zeronet(saddr)) {
2092 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2095 goto martian_source;
2097 flags |= RTCF_BROADCAST;
2098 res->type = RTN_BROADCAST;
2099 RT_CACHE_STAT_INC(in_brd);
2102 do_cache &= res->fi && !itag;
2104 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2106 rth = rcu_dereference(nhc->nhc_rth_input);
2107 if (rt_cache_valid(rth)) {
2108 skb_dst_set_noref(skb, &rth->dst);
2114 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2115 flags | RTCF_LOCAL, res->type,
2116 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2120 rth->dst.output= ip_rt_bug;
2121 #ifdef CONFIG_IP_ROUTE_CLASSID
2122 rth->dst.tclassid = itag;
2124 rth->rt_is_input = 1;
2126 RT_CACHE_STAT_INC(in_slow_tot);
2127 if (res->type == RTN_UNREACHABLE) {
2128 rth->dst.input= ip_error;
2129 rth->dst.error= -err;
2130 rth->rt_flags &= ~RTCF_LOCAL;
2134 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2136 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2137 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2138 WARN_ON(rth->dst.input == lwtunnel_input);
2139 rth->dst.lwtstate->orig_input = rth->dst.input;
2140 rth->dst.input = lwtunnel_input;
2143 if (unlikely(!rt_cache_route(nhc, rth)))
2144 rt_add_uncached_list(rth);
2146 skb_dst_set(skb, &rth->dst);
2151 RT_CACHE_STAT_INC(in_no_route);
2152 res->type = RTN_UNREACHABLE;
2158 * Do not cache martian addresses: they should be logged (RFC1812)
2160 martian_destination:
2161 RT_CACHE_STAT_INC(in_martian_dst);
2162 #ifdef CONFIG_IP_ROUTE_VERBOSE
2163 if (IN_DEV_LOG_MARTIANS(in_dev))
2164 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2165 &daddr, &saddr, dev->name);
2177 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2181 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2182 u8 tos, struct net_device *dev)
2184 struct fib_result res;
2187 tos &= IPTOS_RT_MASK;
2189 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2194 EXPORT_SYMBOL(ip_route_input_noref);
2196 /* called with rcu_read_lock held */
2197 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2198 u8 tos, struct net_device *dev, struct fib_result *res)
2200 /* Multicast recognition logic is moved from route cache to here.
2201 The problem was that too many Ethernet cards have broken/missing
2202 hardware multicast filters :-( As result the host on multicasting
2203 network acquires a lot of useless route cache entries, sort of
2204 SDR messages from all the world. Now we try to get rid of them.
2205 Really, provided software IP multicast filter is organized
2206 reasonably (at least, hashed), it does not result in a slowdown
2207 comparing with route cache reject entries.
2208 Note, that multicast routers are not affected, because
2209 route cache entry is created eventually.
2211 if (ipv4_is_multicast(daddr)) {
2212 struct in_device *in_dev = __in_dev_get_rcu(dev);
2218 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2219 ip_hdr(skb)->protocol);
2221 /* check l3 master if no match yet */
2222 if (!our && netif_is_l3_slave(dev)) {
2223 struct in_device *l3_in_dev;
2225 l3_in_dev = __in_dev_get_rcu(skb->dev);
2227 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2228 ip_hdr(skb)->protocol);
2232 #ifdef CONFIG_IP_MROUTE
2234 (!ipv4_is_local_multicast(daddr) &&
2235 IN_DEV_MFORWARD(in_dev))
2238 err = ip_route_input_mc(skb, daddr, saddr,
2244 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2247 /* called with rcu_read_lock() */
2248 static struct rtable *__mkroute_output(const struct fib_result *res,
2249 const struct flowi4 *fl4, int orig_oif,
2250 struct net_device *dev_out,
2253 struct fib_info *fi = res->fi;
2254 struct fib_nh_exception *fnhe;
2255 struct in_device *in_dev;
2256 u16 type = res->type;
2260 in_dev = __in_dev_get_rcu(dev_out);
2262 return ERR_PTR(-EINVAL);
2264 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2265 if (ipv4_is_loopback(fl4->saddr) &&
2266 !(dev_out->flags & IFF_LOOPBACK) &&
2267 !netif_is_l3_master(dev_out))
2268 return ERR_PTR(-EINVAL);
2270 if (ipv4_is_lbcast(fl4->daddr))
2271 type = RTN_BROADCAST;
2272 else if (ipv4_is_multicast(fl4->daddr))
2273 type = RTN_MULTICAST;
2274 else if (ipv4_is_zeronet(fl4->daddr))
2275 return ERR_PTR(-EINVAL);
2277 if (dev_out->flags & IFF_LOOPBACK)
2278 flags |= RTCF_LOCAL;
2281 if (type == RTN_BROADCAST) {
2282 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2284 } else if (type == RTN_MULTICAST) {
2285 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2286 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2288 flags &= ~RTCF_LOCAL;
2291 /* If multicast route do not exist use
2292 * default one, but do not gateway in this case.
2295 if (fi && res->prefixlen < 4)
2297 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2298 (orig_oif != dev_out->ifindex)) {
2299 /* For local routes that require a particular output interface
2300 * we do not want to cache the result. Caching the result
2301 * causes incorrect behaviour when there are multiple source
2302 * addresses on the interface, the end result being that if the
2303 * intended recipient is waiting on that interface for the
2304 * packet he won't receive it because it will be delivered on
2305 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2306 * be set to the loopback interface as well.
2312 do_cache &= fi != NULL;
2314 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2315 struct rtable __rcu **prth;
2317 fnhe = find_exception(nhc, fl4->daddr);
2321 prth = &fnhe->fnhe_rth_output;
2323 if (unlikely(fl4->flowi4_flags &
2324 FLOWI_FLAG_KNOWN_NH &&
2325 !(nhc->nhc_gw_family &&
2326 nhc->nhc_scope == RT_SCOPE_LINK))) {
2330 prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2332 rth = rcu_dereference(*prth);
2333 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2338 rth = rt_dst_alloc(dev_out, flags, type,
2339 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2340 IN_DEV_CONF_GET(in_dev, NOXFRM),
2343 return ERR_PTR(-ENOBUFS);
2345 rth->rt_iif = orig_oif;
2347 RT_CACHE_STAT_INC(out_slow_tot);
2349 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2350 if (flags & RTCF_LOCAL &&
2351 !(dev_out->flags & IFF_LOOPBACK)) {
2352 rth->dst.output = ip_mc_output;
2353 RT_CACHE_STAT_INC(out_slow_mc);
2355 #ifdef CONFIG_IP_MROUTE
2356 if (type == RTN_MULTICAST) {
2357 if (IN_DEV_MFORWARD(in_dev) &&
2358 !ipv4_is_local_multicast(fl4->daddr)) {
2359 rth->dst.input = ip_mr_input;
2360 rth->dst.output = ip_mc_output;
2366 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2367 lwtunnel_set_redirect(&rth->dst);
2373 * Major route resolver routine.
2376 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2377 const struct sk_buff *skb)
2379 __u8 tos = RT_FL_TOS(fl4);
2380 struct fib_result res = {
2388 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2389 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2390 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2391 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2394 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2399 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2401 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2402 struct fib_result *res,
2403 const struct sk_buff *skb)
2405 struct net_device *dev_out = NULL;
2406 int orig_oif = fl4->flowi4_oif;
2407 unsigned int flags = 0;
2409 int err = -ENETUNREACH;
2412 rth = ERR_PTR(-EINVAL);
2413 if (ipv4_is_multicast(fl4->saddr) ||
2414 ipv4_is_lbcast(fl4->saddr) ||
2415 ipv4_is_zeronet(fl4->saddr))
2418 /* I removed check for oif == dev_out->oif here.
2419 It was wrong for two reasons:
2420 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2421 is assigned to multiple interfaces.
2422 2. Moreover, we are allowed to send packets with saddr
2423 of another iface. --ANK
2426 if (fl4->flowi4_oif == 0 &&
2427 (ipv4_is_multicast(fl4->daddr) ||
2428 ipv4_is_lbcast(fl4->daddr))) {
2429 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2430 dev_out = __ip_dev_find(net, fl4->saddr, false);
2434 /* Special hack: user can direct multicasts
2435 and limited broadcast via necessary interface
2436 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2437 This hack is not just for fun, it allows
2438 vic,vat and friends to work.
2439 They bind socket to loopback, set ttl to zero
2440 and expect that it will work.
2441 From the viewpoint of routing cache they are broken,
2442 because we are not allowed to build multicast path
2443 with loopback source addr (look, routing cache
2444 cannot know, that ttl is zero, so that packet
2445 will not leave this host and route is valid).
2446 Luckily, this hack is good workaround.
2449 fl4->flowi4_oif = dev_out->ifindex;
2453 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2454 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2455 if (!__ip_dev_find(net, fl4->saddr, false))
2461 if (fl4->flowi4_oif) {
2462 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2463 rth = ERR_PTR(-ENODEV);
2467 /* RACE: Check return value of inet_select_addr instead. */
2468 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2469 rth = ERR_PTR(-ENETUNREACH);
2472 if (ipv4_is_local_multicast(fl4->daddr) ||
2473 ipv4_is_lbcast(fl4->daddr) ||
2474 fl4->flowi4_proto == IPPROTO_IGMP) {
2476 fl4->saddr = inet_select_addr(dev_out, 0,
2481 if (ipv4_is_multicast(fl4->daddr))
2482 fl4->saddr = inet_select_addr(dev_out, 0,
2484 else if (!fl4->daddr)
2485 fl4->saddr = inet_select_addr(dev_out, 0,
2491 fl4->daddr = fl4->saddr;
2493 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2494 dev_out = net->loopback_dev;
2495 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2496 res->type = RTN_LOCAL;
2497 flags |= RTCF_LOCAL;
2501 err = fib_lookup(net, fl4, res, 0);
2505 if (fl4->flowi4_oif &&
2506 (ipv4_is_multicast(fl4->daddr) ||
2507 !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2508 /* Apparently, routing tables are wrong. Assume,
2509 that the destination is on link.
2512 Because we are allowed to send to iface
2513 even if it has NO routes and NO assigned
2514 addresses. When oif is specified, routing
2515 tables are looked up with only one purpose:
2516 to catch if destination is gatewayed, rather than
2517 direct. Moreover, if MSG_DONTROUTE is set,
2518 we send packet, ignoring both routing tables
2519 and ifaddr state. --ANK
2522 We could make it even if oif is unknown,
2523 likely IPv6, but we do not.
2526 if (fl4->saddr == 0)
2527 fl4->saddr = inet_select_addr(dev_out, 0,
2529 res->type = RTN_UNICAST;
2536 if (res->type == RTN_LOCAL) {
2538 if (res->fi->fib_prefsrc)
2539 fl4->saddr = res->fi->fib_prefsrc;
2541 fl4->saddr = fl4->daddr;
2544 /* L3 master device is the loopback for that domain */
2545 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2548 /* make sure orig_oif points to fib result device even
2549 * though packet rx/tx happens over loopback or l3mdev
2551 orig_oif = FIB_RES_OIF(*res);
2553 fl4->flowi4_oif = dev_out->ifindex;
2554 flags |= RTCF_LOCAL;
2558 fib_select_path(net, res, fl4, skb);
2560 dev_out = FIB_RES_DEV(*res);
2561 fl4->flowi4_oif = dev_out->ifindex;
2565 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2571 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2576 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2578 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2580 return mtu ? : dst->dev->mtu;
2583 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2584 struct sk_buff *skb, u32 mtu)
2588 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2589 struct sk_buff *skb)
2593 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2599 static struct dst_ops ipv4_dst_blackhole_ops = {
2601 .check = ipv4_blackhole_dst_check,
2602 .mtu = ipv4_blackhole_mtu,
2603 .default_advmss = ipv4_default_advmss,
2604 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2605 .redirect = ipv4_rt_blackhole_redirect,
2606 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2607 .neigh_lookup = ipv4_neigh_lookup,
2610 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2612 struct rtable *ort = (struct rtable *) dst_orig;
2615 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2617 struct dst_entry *new = &rt->dst;
2620 new->input = dst_discard;
2621 new->output = dst_discard_out;
2623 new->dev = net->loopback_dev;
2627 rt->rt_is_input = ort->rt_is_input;
2628 rt->rt_iif = ort->rt_iif;
2629 rt->rt_pmtu = ort->rt_pmtu;
2630 rt->rt_mtu_locked = ort->rt_mtu_locked;
2632 rt->rt_genid = rt_genid_ipv4(net);
2633 rt->rt_flags = ort->rt_flags;
2634 rt->rt_type = ort->rt_type;
2635 rt->rt_gw_family = ort->rt_gw_family;
2636 if (rt->rt_gw_family == AF_INET)
2637 rt->rt_gw4 = ort->rt_gw4;
2638 else if (rt->rt_gw_family == AF_INET6)
2639 rt->rt_gw6 = ort->rt_gw6;
2641 INIT_LIST_HEAD(&rt->rt_uncached);
2644 dst_release(dst_orig);
2646 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2649 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2650 const struct sock *sk)
2652 struct rtable *rt = __ip_route_output_key(net, flp4);
2657 if (flp4->flowi4_proto)
2658 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2659 flowi4_to_flowi(flp4),
2664 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2666 /* called with rcu_read_lock held */
2667 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2668 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2669 struct sk_buff *skb, u32 portid, u32 seq)
2672 struct nlmsghdr *nlh;
2673 unsigned long expires = 0;
2675 u32 metrics[RTAX_MAX];
2677 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
2681 r = nlmsg_data(nlh);
2682 r->rtm_family = AF_INET;
2683 r->rtm_dst_len = 32;
2685 r->rtm_tos = fl4->flowi4_tos;
2686 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2687 if (nla_put_u32(skb, RTA_TABLE, table_id))
2688 goto nla_put_failure;
2689 r->rtm_type = rt->rt_type;
2690 r->rtm_scope = RT_SCOPE_UNIVERSE;
2691 r->rtm_protocol = RTPROT_UNSPEC;
2692 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2693 if (rt->rt_flags & RTCF_NOTIFY)
2694 r->rtm_flags |= RTM_F_NOTIFY;
2695 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2696 r->rtm_flags |= RTCF_DOREDIRECT;
2698 if (nla_put_in_addr(skb, RTA_DST, dst))
2699 goto nla_put_failure;
2701 r->rtm_src_len = 32;
2702 if (nla_put_in_addr(skb, RTA_SRC, src))
2703 goto nla_put_failure;
2706 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2707 goto nla_put_failure;
2708 #ifdef CONFIG_IP_ROUTE_CLASSID
2709 if (rt->dst.tclassid &&
2710 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2711 goto nla_put_failure;
2713 if (!rt_is_input_route(rt) &&
2714 fl4->saddr != src) {
2715 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2716 goto nla_put_failure;
2718 if (rt->rt_gw_family == AF_INET &&
2719 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2720 goto nla_put_failure;
2721 } else if (rt->rt_gw_family == AF_INET6) {
2722 int alen = sizeof(struct in6_addr);
2726 nla = nla_reserve(skb, RTA_VIA, alen + 2);
2728 goto nla_put_failure;
2730 via = nla_data(nla);
2731 via->rtvia_family = AF_INET6;
2732 memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2735 expires = rt->dst.expires;
2737 unsigned long now = jiffies;
2739 if (time_before(now, expires))
2745 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2746 if (rt->rt_pmtu && expires)
2747 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2748 if (rt->rt_mtu_locked && expires)
2749 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2750 if (rtnetlink_put_metrics(skb, metrics) < 0)
2751 goto nla_put_failure;
2753 if (fl4->flowi4_mark &&
2754 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2755 goto nla_put_failure;
2757 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2758 nla_put_u32(skb, RTA_UID,
2759 from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
2760 goto nla_put_failure;
2762 error = rt->dst.error;
2764 if (rt_is_input_route(rt)) {
2765 #ifdef CONFIG_IP_MROUTE
2766 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2767 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2768 int err = ipmr_get_route(net, skb,
2769 fl4->saddr, fl4->daddr,
2775 goto nla_put_failure;
2779 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
2780 goto nla_put_failure;
2783 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2784 goto nla_put_failure;
2786 nlmsg_end(skb, nlh);
2790 nlmsg_cancel(skb, nlh);
2794 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
2795 u8 ip_proto, __be16 sport,
2798 struct sk_buff *skb;
2801 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2805 /* Reserve room for dummy headers, this skb can pass
2806 * through good chunk of routing engine.
2808 skb_reset_mac_header(skb);
2809 skb_reset_network_header(skb);
2810 skb->protocol = htons(ETH_P_IP);
2811 iph = skb_put(skb, sizeof(struct iphdr));
2812 iph->protocol = ip_proto;
2818 skb_set_transport_header(skb, skb->len);
2820 switch (iph->protocol) {
2822 struct udphdr *udph;
2824 udph = skb_put_zero(skb, sizeof(struct udphdr));
2825 udph->source = sport;
2827 udph->len = sizeof(struct udphdr);
2832 struct tcphdr *tcph;
2834 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
2835 tcph->source = sport;
2837 tcph->doff = sizeof(struct tcphdr) / 4;
2839 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
2843 case IPPROTO_ICMP: {
2844 struct icmphdr *icmph;
2846 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
2847 icmph->type = ICMP_ECHO;
2855 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
2856 const struct nlmsghdr *nlh,
2858 struct netlink_ext_ack *extack)
2863 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
2864 NL_SET_ERR_MSG(extack,
2865 "ipv4: Invalid header for route get request");
2869 if (!netlink_strict_get_check(skb))
2870 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
2871 rtm_ipv4_policy, extack);
2873 rtm = nlmsg_data(nlh);
2874 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
2875 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
2876 rtm->rtm_table || rtm->rtm_protocol ||
2877 rtm->rtm_scope || rtm->rtm_type) {
2878 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
2882 if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
2883 RTM_F_LOOKUP_TABLE |
2885 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
2889 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
2890 rtm_ipv4_policy, extack);
2894 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
2895 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
2896 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
2900 for (i = 0; i <= RTA_MAX; i++) {
2916 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
2924 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2925 struct netlink_ext_ack *extack)
2927 struct net *net = sock_net(in_skb->sk);
2928 struct nlattr *tb[RTA_MAX+1];
2929 u32 table_id = RT_TABLE_MAIN;
2930 __be16 sport = 0, dport = 0;
2931 struct fib_result res = {};
2932 u8 ip_proto = IPPROTO_UDP;
2933 struct rtable *rt = NULL;
2934 struct sk_buff *skb;
2936 struct flowi4 fl4 = {};
2944 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
2948 rtm = nlmsg_data(nlh);
2949 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2950 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2951 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2952 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2954 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
2956 uid = (iif ? INVALID_UID : current_uid());
2958 if (tb[RTA_IP_PROTO]) {
2959 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
2960 &ip_proto, AF_INET, extack);
2966 sport = nla_get_be16(tb[RTA_SPORT]);
2969 dport = nla_get_be16(tb[RTA_DPORT]);
2971 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
2977 fl4.flowi4_tos = rtm->rtm_tos;
2978 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2979 fl4.flowi4_mark = mark;
2980 fl4.flowi4_uid = uid;
2982 fl4.fl4_sport = sport;
2984 fl4.fl4_dport = dport;
2985 fl4.flowi4_proto = ip_proto;
2990 struct net_device *dev;
2992 dev = dev_get_by_index_rcu(net, iif);
2998 fl4.flowi4_iif = iif; /* for rt_fill_info */
3001 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
3004 rt = skb_rtable(skb);
3005 if (err == 0 && rt->dst.error)
3006 err = -rt->dst.error;
3008 fl4.flowi4_iif = LOOPBACK_IFINDEX;
3009 skb->dev = net->loopback_dev;
3010 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3015 skb_dst_set(skb, &rt->dst);
3021 if (rtm->rtm_flags & RTM_F_NOTIFY)
3022 rt->rt_flags |= RTCF_NOTIFY;
3024 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3025 table_id = res.table ? res.table->tb_id : 0;
3027 /* reset skb for netlink reply msg */
3029 skb_reset_network_header(skb);
3030 skb_reset_transport_header(skb);
3031 skb_reset_mac_header(skb);
3033 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3035 err = fib_props[res.type].error;
3037 err = -EHOSTUNREACH;
3040 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3041 nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
3042 rt->rt_type, res.prefix, res.prefixlen,
3043 fl4.flowi4_tos, res.fi, 0);
3045 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3046 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
3053 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3063 void ip_rt_multicast_event(struct in_device *in_dev)
3065 rt_cache_flush(dev_net(in_dev->dev));
3068 #ifdef CONFIG_SYSCTL
3069 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
3070 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
3071 static int ip_rt_gc_elasticity __read_mostly = 8;
3072 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3074 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3075 void __user *buffer,
3076 size_t *lenp, loff_t *ppos)
3078 struct net *net = (struct net *)__ctl->extra1;
3081 rt_cache_flush(net);
3082 fnhe_genid_bump(net);
3089 static struct ctl_table ipv4_route_table[] = {
3091 .procname = "gc_thresh",
3092 .data = &ipv4_dst_ops.gc_thresh,
3093 .maxlen = sizeof(int),
3095 .proc_handler = proc_dointvec,
3098 .procname = "max_size",
3099 .data = &ip_rt_max_size,
3100 .maxlen = sizeof(int),
3102 .proc_handler = proc_dointvec,
3105 /* Deprecated. Use gc_min_interval_ms */
3107 .procname = "gc_min_interval",
3108 .data = &ip_rt_gc_min_interval,
3109 .maxlen = sizeof(int),
3111 .proc_handler = proc_dointvec_jiffies,
3114 .procname = "gc_min_interval_ms",
3115 .data = &ip_rt_gc_min_interval,
3116 .maxlen = sizeof(int),
3118 .proc_handler = proc_dointvec_ms_jiffies,
3121 .procname = "gc_timeout",
3122 .data = &ip_rt_gc_timeout,
3123 .maxlen = sizeof(int),
3125 .proc_handler = proc_dointvec_jiffies,
3128 .procname = "gc_interval",
3129 .data = &ip_rt_gc_interval,
3130 .maxlen = sizeof(int),
3132 .proc_handler = proc_dointvec_jiffies,
3135 .procname = "redirect_load",
3136 .data = &ip_rt_redirect_load,
3137 .maxlen = sizeof(int),
3139 .proc_handler = proc_dointvec,
3142 .procname = "redirect_number",
3143 .data = &ip_rt_redirect_number,
3144 .maxlen = sizeof(int),
3146 .proc_handler = proc_dointvec,
3149 .procname = "redirect_silence",
3150 .data = &ip_rt_redirect_silence,
3151 .maxlen = sizeof(int),
3153 .proc_handler = proc_dointvec,
3156 .procname = "error_cost",
3157 .data = &ip_rt_error_cost,
3158 .maxlen = sizeof(int),
3160 .proc_handler = proc_dointvec,
3163 .procname = "error_burst",
3164 .data = &ip_rt_error_burst,
3165 .maxlen = sizeof(int),
3167 .proc_handler = proc_dointvec,
3170 .procname = "gc_elasticity",
3171 .data = &ip_rt_gc_elasticity,
3172 .maxlen = sizeof(int),
3174 .proc_handler = proc_dointvec,
3177 .procname = "mtu_expires",
3178 .data = &ip_rt_mtu_expires,
3179 .maxlen = sizeof(int),
3181 .proc_handler = proc_dointvec_jiffies,
3184 .procname = "min_pmtu",
3185 .data = &ip_rt_min_pmtu,
3186 .maxlen = sizeof(int),
3188 .proc_handler = proc_dointvec_minmax,
3189 .extra1 = &ip_min_valid_pmtu,
3192 .procname = "min_adv_mss",
3193 .data = &ip_rt_min_advmss,
3194 .maxlen = sizeof(int),
3196 .proc_handler = proc_dointvec,
3201 static struct ctl_table ipv4_route_flush_table[] = {
3203 .procname = "flush",
3204 .maxlen = sizeof(int),
3206 .proc_handler = ipv4_sysctl_rtcache_flush,
3211 static __net_init int sysctl_route_net_init(struct net *net)
3213 struct ctl_table *tbl;
3215 tbl = ipv4_route_flush_table;
3216 if (!net_eq(net, &init_net)) {
3217 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3221 /* Don't export sysctls to unprivileged users */
3222 if (net->user_ns != &init_user_ns)
3223 tbl[0].procname = NULL;
3225 tbl[0].extra1 = net;
3227 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3228 if (!net->ipv4.route_hdr)
3233 if (tbl != ipv4_route_flush_table)
3239 static __net_exit void sysctl_route_net_exit(struct net *net)
3241 struct ctl_table *tbl;
3243 tbl = net->ipv4.route_hdr->ctl_table_arg;
3244 unregister_net_sysctl_table(net->ipv4.route_hdr);
3245 BUG_ON(tbl == ipv4_route_flush_table);
3249 static __net_initdata struct pernet_operations sysctl_route_ops = {
3250 .init = sysctl_route_net_init,
3251 .exit = sysctl_route_net_exit,
3255 static __net_init int rt_genid_init(struct net *net)
3257 atomic_set(&net->ipv4.rt_genid, 0);
3258 atomic_set(&net->fnhe_genid, 0);
3259 atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3263 static __net_initdata struct pernet_operations rt_genid_ops = {
3264 .init = rt_genid_init,
3267 static int __net_init ipv4_inetpeer_init(struct net *net)
3269 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3273 inet_peer_base_init(bp);
3274 net->ipv4.peers = bp;
3278 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3280 struct inet_peer_base *bp = net->ipv4.peers;
3282 net->ipv4.peers = NULL;
3283 inetpeer_invalidate_tree(bp);
3287 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3288 .init = ipv4_inetpeer_init,
3289 .exit = ipv4_inetpeer_exit,
3292 #ifdef CONFIG_IP_ROUTE_CLASSID
3293 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3294 #endif /* CONFIG_IP_ROUTE_CLASSID */
3296 int __init ip_rt_init(void)
3300 ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
3303 panic("IP: failed to allocate ip_idents\n");
3305 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3307 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3309 panic("IP: failed to allocate ip_tstamps\n");
3311 for_each_possible_cpu(cpu) {
3312 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3314 INIT_LIST_HEAD(&ul->head);
3315 spin_lock_init(&ul->lock);
3317 #ifdef CONFIG_IP_ROUTE_CLASSID
3318 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3320 panic("IP: failed to allocate ip_rt_acct\n");
3323 ipv4_dst_ops.kmem_cachep =
3324 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3325 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3327 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3329 if (dst_entries_init(&ipv4_dst_ops) < 0)
3330 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3332 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3333 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3335 ipv4_dst_ops.gc_thresh = ~0;
3336 ip_rt_max_size = INT_MAX;
3341 if (ip_rt_proc_init())
3342 pr_err("Unable to create route proc files\n");
3347 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3348 RTNL_FLAG_DOIT_UNLOCKED);
3350 #ifdef CONFIG_SYSCTL
3351 register_pernet_subsys(&sysctl_route_ops);
3353 register_pernet_subsys(&rt_genid_ops);
3354 register_pernet_subsys(&ipv4_inetpeer_ops);
3358 #ifdef CONFIG_SYSCTL
3360 * We really need to sanitize the damn ipv4 init order, then all
3361 * this nonsense will go away.
3363 void __init ip_static_sysctl_init(void)
3365 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);