2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 Forwarding Information Base: semantics.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <asm/uaccess.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/jiffies.h>
22 #include <linux/string.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/errno.h>
27 #include <linux/inet.h>
28 #include <linux/inetdevice.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/proc_fs.h>
32 #include <linux/skbuff.h>
33 #include <linux/init.h>
34 #include <linux/slab.h>
38 #include <net/protocol.h>
39 #include <net/route.h>
42 #include <net/ip_fib.h>
43 #include <net/netlink.h>
44 #include <net/nexthop.h>
46 #include "fib_lookup.h"
48 static DEFINE_SPINLOCK(fib_info_lock);
49 static struct hlist_head *fib_info_hash;
50 static struct hlist_head *fib_info_laddrhash;
51 static unsigned int fib_info_hash_size;
52 static unsigned int fib_info_cnt;
54 #define DEVINDEX_HASHBITS 8
55 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
56 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
58 #ifdef CONFIG_IP_ROUTE_MULTIPATH
60 static DEFINE_SPINLOCK(fib_multipath_lock);
62 #define for_nexthops(fi) { \
63 int nhsel; const struct fib_nh *nh; \
64 for (nhsel = 0, nh = (fi)->fib_nh; \
65 nhsel < (fi)->fib_nhs; \
68 #define change_nexthops(fi) { \
69 int nhsel; struct fib_nh *nexthop_nh; \
70 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
71 nhsel < (fi)->fib_nhs; \
72 nexthop_nh++, nhsel++)
74 #else /* CONFIG_IP_ROUTE_MULTIPATH */
76 /* Hope, that gcc will optimize it to get rid of dummy loop */
78 #define for_nexthops(fi) { \
79 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
80 for (nhsel = 0; nhsel < 1; nhsel++)
82 #define change_nexthops(fi) { \
84 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
85 for (nhsel = 0; nhsel < 1; nhsel++)
87 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
89 #define endfor_nexthops(fi) }
92 const struct fib_prop fib_props[RTN_MAX + 1] = {
95 .scope = RT_SCOPE_NOWHERE,
99 .scope = RT_SCOPE_UNIVERSE,
103 .scope = RT_SCOPE_HOST,
107 .scope = RT_SCOPE_LINK,
111 .scope = RT_SCOPE_LINK,
115 .scope = RT_SCOPE_UNIVERSE,
119 .scope = RT_SCOPE_UNIVERSE,
121 [RTN_UNREACHABLE] = {
122 .error = -EHOSTUNREACH,
123 .scope = RT_SCOPE_UNIVERSE,
127 .scope = RT_SCOPE_UNIVERSE,
131 .scope = RT_SCOPE_UNIVERSE,
135 .scope = RT_SCOPE_NOWHERE,
139 .scope = RT_SCOPE_NOWHERE,
143 static void free_nh_exceptions(struct fib_nh *nh)
145 struct fnhe_hash_bucket *hash = nh->nh_exceptions;
148 for (i = 0; i < FNHE_HASH_SIZE; i++) {
149 struct fib_nh_exception *fnhe;
151 fnhe = rcu_dereference_protected(hash[i].chain, 1);
153 struct fib_nh_exception *next;
155 next = rcu_dereference_protected(fnhe->fnhe_next, 1);
164 static void rt_nexthop_free(struct rtable __rcu **rtp)
166 struct rtable *rt = rcu_dereference_protected(*rtp, 1);
171 /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
172 * because we waited an RCU grace period before calling
173 * free_fib_info_rcu()
179 static void rt_nexthop_free_cpus(struct rtable __rcu * __percpu *rtp)
186 for_each_possible_cpu(cpu) {
189 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
196 /* Release a nexthop info record */
197 static void free_fib_info_rcu(struct rcu_head *head)
199 struct fib_info *fi = container_of(head, struct fib_info, rcu);
201 change_nexthops(fi) {
202 if (nexthop_nh->nh_dev)
203 dev_put(nexthop_nh->nh_dev);
204 if (nexthop_nh->nh_exceptions)
205 free_nh_exceptions(nexthop_nh);
206 rt_nexthop_free_cpus(nexthop_nh->nh_pcpu_rth_output);
207 rt_nexthop_free(&nexthop_nh->nh_rth_input);
208 } endfor_nexthops(fi);
210 release_net(fi->fib_net);
211 if (fi->fib_metrics != (u32 *) dst_default_metrics)
212 kfree(fi->fib_metrics);
216 void free_fib_info(struct fib_info *fi)
218 if (fi->fib_dead == 0) {
219 pr_warn("Freeing alive fib_info %p\n", fi);
223 #ifdef CONFIG_IP_ROUTE_CLASSID
224 change_nexthops(fi) {
225 if (nexthop_nh->nh_tclassid)
226 fi->fib_net->ipv4.fib_num_tclassid_users--;
227 } endfor_nexthops(fi);
229 call_rcu(&fi->rcu, free_fib_info_rcu);
232 void fib_release_info(struct fib_info *fi)
234 spin_lock_bh(&fib_info_lock);
235 if (fi && --fi->fib_treeref == 0) {
236 hlist_del(&fi->fib_hash);
238 hlist_del(&fi->fib_lhash);
239 change_nexthops(fi) {
240 if (!nexthop_nh->nh_dev)
242 hlist_del(&nexthop_nh->nh_hash);
243 } endfor_nexthops(fi)
247 spin_unlock_bh(&fib_info_lock);
250 static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
252 const struct fib_nh *onh = ofi->fib_nh;
255 if (nh->nh_oif != onh->nh_oif ||
256 nh->nh_gw != onh->nh_gw ||
257 nh->nh_scope != onh->nh_scope ||
258 #ifdef CONFIG_IP_ROUTE_MULTIPATH
259 nh->nh_weight != onh->nh_weight ||
261 #ifdef CONFIG_IP_ROUTE_CLASSID
262 nh->nh_tclassid != onh->nh_tclassid ||
264 ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
267 } endfor_nexthops(fi);
271 static inline unsigned int fib_devindex_hashfn(unsigned int val)
273 unsigned int mask = DEVINDEX_HASHSIZE - 1;
276 (val >> DEVINDEX_HASHBITS) ^
277 (val >> (DEVINDEX_HASHBITS * 2))) & mask;
280 static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
282 unsigned int mask = (fib_info_hash_size - 1);
283 unsigned int val = fi->fib_nhs;
285 val ^= (fi->fib_protocol << 8) | fi->fib_scope;
286 val ^= (__force u32)fi->fib_prefsrc;
287 val ^= fi->fib_priority;
289 val ^= fib_devindex_hashfn(nh->nh_oif);
290 } endfor_nexthops(fi)
292 return (val ^ (val >> 7) ^ (val >> 12)) & mask;
295 static struct fib_info *fib_find_info(const struct fib_info *nfi)
297 struct hlist_head *head;
298 struct hlist_node *node;
302 hash = fib_info_hashfn(nfi);
303 head = &fib_info_hash[hash];
305 hlist_for_each_entry(fi, node, head, fib_hash) {
306 if (!net_eq(fi->fib_net, nfi->fib_net))
308 if (fi->fib_nhs != nfi->fib_nhs)
310 if (nfi->fib_protocol == fi->fib_protocol &&
311 nfi->fib_scope == fi->fib_scope &&
312 nfi->fib_prefsrc == fi->fib_prefsrc &&
313 nfi->fib_priority == fi->fib_priority &&
314 memcmp(nfi->fib_metrics, fi->fib_metrics,
315 sizeof(u32) * RTAX_MAX) == 0 &&
316 ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 &&
317 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
324 /* Check, that the gateway is already configured.
325 * Used only by redirect accept routine.
327 int ip_fib_check_default(__be32 gw, struct net_device *dev)
329 struct hlist_head *head;
330 struct hlist_node *node;
334 spin_lock(&fib_info_lock);
336 hash = fib_devindex_hashfn(dev->ifindex);
337 head = &fib_info_devhash[hash];
338 hlist_for_each_entry(nh, node, head, nh_hash) {
339 if (nh->nh_dev == dev &&
341 !(nh->nh_flags & RTNH_F_DEAD)) {
342 spin_unlock(&fib_info_lock);
347 spin_unlock(&fib_info_lock);
352 static inline size_t fib_nlmsg_size(struct fib_info *fi)
354 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
355 + nla_total_size(4) /* RTA_TABLE */
356 + nla_total_size(4) /* RTA_DST */
357 + nla_total_size(4) /* RTA_PRIORITY */
358 + nla_total_size(4); /* RTA_PREFSRC */
360 /* space for nested metrics */
361 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
364 /* Also handles the special case fib_nhs == 1 */
366 /* each nexthop is packed in an attribute */
367 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
369 /* may contain flow and gateway attribute */
370 nhsize += 2 * nla_total_size(4);
372 /* all nexthops are packed in a nested attribute */
373 payload += nla_total_size(fi->fib_nhs * nhsize);
379 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
380 int dst_len, u32 tb_id, struct nl_info *info,
381 unsigned int nlm_flags)
384 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
387 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
391 err = fib_dump_info(skb, info->pid, seq, event, tb_id,
392 fa->fa_type, key, dst_len,
393 fa->fa_tos, fa->fa_info, nlm_flags);
395 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
396 WARN_ON(err == -EMSGSIZE);
400 rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE,
401 info->nlh, GFP_KERNEL);
405 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
408 /* Return the first fib alias matching TOS with
409 * priority less than or equal to PRIO.
411 struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
414 struct fib_alias *fa;
415 list_for_each_entry(fa, fah, fa_list) {
416 if (fa->fa_tos > tos)
418 if (fa->fa_info->fib_priority >= prio ||
426 int fib_detect_death(struct fib_info *fi, int order,
427 struct fib_info **last_resort, int *last_idx, int dflt)
430 int state = NUD_NONE;
432 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
434 state = n->nud_state;
437 if (state == NUD_REACHABLE)
439 if ((state & NUD_VALID) && order != dflt)
441 if ((state & NUD_VALID) ||
442 (*last_idx < 0 && order > dflt)) {
449 #ifdef CONFIG_IP_ROUTE_MULTIPATH
451 static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
455 while (rtnh_ok(rtnh, remaining)) {
457 rtnh = rtnh_next(rtnh, &remaining);
460 /* leftover implies invalid nexthop configuration, discard it */
461 return remaining > 0 ? 0 : nhs;
464 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
465 int remaining, struct fib_config *cfg)
467 change_nexthops(fi) {
470 if (!rtnh_ok(rtnh, remaining))
473 nexthop_nh->nh_flags =
474 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
475 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
476 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
478 attrlen = rtnh_attrlen(rtnh);
480 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
482 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
483 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
484 #ifdef CONFIG_IP_ROUTE_CLASSID
485 nla = nla_find(attrs, attrlen, RTA_FLOW);
486 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
487 if (nexthop_nh->nh_tclassid)
488 fi->fib_net->ipv4.fib_num_tclassid_users++;
492 rtnh = rtnh_next(rtnh, &remaining);
493 } endfor_nexthops(fi);
500 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
502 #ifdef CONFIG_IP_ROUTE_MULTIPATH
503 struct rtnexthop *rtnh;
507 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
510 if (cfg->fc_oif || cfg->fc_gw) {
511 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
512 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
517 #ifdef CONFIG_IP_ROUTE_MULTIPATH
518 if (cfg->fc_mp == NULL)
522 remaining = cfg->fc_mp_len;
527 if (!rtnh_ok(rtnh, remaining))
530 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
533 attrlen = rtnh_attrlen(rtnh);
535 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
537 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
538 if (nla && nla_get_be32(nla) != nh->nh_gw)
540 #ifdef CONFIG_IP_ROUTE_CLASSID
541 nla = nla_find(attrs, attrlen, RTA_FLOW);
542 if (nla && nla_get_u32(nla) != nh->nh_tclassid)
547 rtnh = rtnh_next(rtnh, &remaining);
548 } endfor_nexthops(fi);
558 * Semantics of nexthop is very messy by historical reasons.
559 * We have to take into account, that:
560 * a) gateway can be actually local interface address,
561 * so that gatewayed route is direct.
562 * b) gateway must be on-link address, possibly
563 * described not by an ifaddr, but also by a direct route.
564 * c) If both gateway and interface are specified, they should not
566 * d) If we use tunnel routes, gateway could be not on-link.
568 * Attempt to reconcile all of these (alas, self-contradictory) conditions
569 * results in pretty ugly and hairy code with obscure logic.
571 * I chose to generalized it instead, so that the size
572 * of code does not increase practically, but it becomes
574 * Every prefix is assigned a "scope" value: "host" is local address,
575 * "link" is direct route,
576 * [ ... "site" ... "interior" ... ]
577 * and "universe" is true gateway route with global meaning.
579 * Every prefix refers to a set of "nexthop"s (gw, oif),
580 * where gw must have narrower scope. This recursion stops
581 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
582 * which means that gw is forced to be on link.
584 * Code is still hairy, but now it is apparently logically
585 * consistent and very flexible. F.e. as by-product it allows
586 * to co-exists in peace independent exterior and interior
589 * Normally it looks as following.
591 * {universe prefix} -> (gw, oif) [scope link]
593 * |-> {link prefix} -> (gw, oif) [scope local]
595 * |-> {local prefix} (terminal node)
597 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
602 struct net_device *dev;
604 net = cfg->fc_nlinfo.nl_net;
606 struct fib_result res;
608 if (nh->nh_flags & RTNH_F_ONLINK) {
610 if (cfg->fc_scope >= RT_SCOPE_LINK)
612 if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST)
614 dev = __dev_get_by_index(net, nh->nh_oif);
617 if (!(dev->flags & IFF_UP))
621 nh->nh_scope = RT_SCOPE_LINK;
626 struct flowi4 fl4 = {
628 .flowi4_scope = cfg->fc_scope + 1,
629 .flowi4_oif = nh->nh_oif,
632 /* It is not necessary, but requires a bit of thinking */
633 if (fl4.flowi4_scope < RT_SCOPE_LINK)
634 fl4.flowi4_scope = RT_SCOPE_LINK;
635 err = fib_lookup(net, &fl4, &res);
642 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
644 nh->nh_scope = res.scope;
645 nh->nh_oif = FIB_RES_OIF(res);
646 nh->nh_dev = dev = FIB_RES_DEV(res);
650 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
652 struct in_device *in_dev;
654 if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
659 in_dev = inetdev_by_index(net, nh->nh_oif);
663 if (!(in_dev->dev->flags & IFF_UP))
665 nh->nh_dev = in_dev->dev;
666 dev_hold(nh->nh_dev);
667 nh->nh_scope = RT_SCOPE_HOST;
675 static inline unsigned int fib_laddr_hashfn(__be32 val)
677 unsigned int mask = (fib_info_hash_size - 1);
679 return ((__force u32)val ^
680 ((__force u32)val >> 7) ^
681 ((__force u32)val >> 14)) & mask;
684 static struct hlist_head *fib_info_hash_alloc(int bytes)
686 if (bytes <= PAGE_SIZE)
687 return kzalloc(bytes, GFP_KERNEL);
689 return (struct hlist_head *)
690 __get_free_pages(GFP_KERNEL | __GFP_ZERO,
694 static void fib_info_hash_free(struct hlist_head *hash, int bytes)
699 if (bytes <= PAGE_SIZE)
702 free_pages((unsigned long) hash, get_order(bytes));
705 static void fib_info_hash_move(struct hlist_head *new_info_hash,
706 struct hlist_head *new_laddrhash,
707 unsigned int new_size)
709 struct hlist_head *old_info_hash, *old_laddrhash;
710 unsigned int old_size = fib_info_hash_size;
711 unsigned int i, bytes;
713 spin_lock_bh(&fib_info_lock);
714 old_info_hash = fib_info_hash;
715 old_laddrhash = fib_info_laddrhash;
716 fib_info_hash_size = new_size;
718 for (i = 0; i < old_size; i++) {
719 struct hlist_head *head = &fib_info_hash[i];
720 struct hlist_node *node, *n;
723 hlist_for_each_entry_safe(fi, node, n, head, fib_hash) {
724 struct hlist_head *dest;
725 unsigned int new_hash;
727 hlist_del(&fi->fib_hash);
729 new_hash = fib_info_hashfn(fi);
730 dest = &new_info_hash[new_hash];
731 hlist_add_head(&fi->fib_hash, dest);
734 fib_info_hash = new_info_hash;
736 for (i = 0; i < old_size; i++) {
737 struct hlist_head *lhead = &fib_info_laddrhash[i];
738 struct hlist_node *node, *n;
741 hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) {
742 struct hlist_head *ldest;
743 unsigned int new_hash;
745 hlist_del(&fi->fib_lhash);
747 new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
748 ldest = &new_laddrhash[new_hash];
749 hlist_add_head(&fi->fib_lhash, ldest);
752 fib_info_laddrhash = new_laddrhash;
754 spin_unlock_bh(&fib_info_lock);
756 bytes = old_size * sizeof(struct hlist_head *);
757 fib_info_hash_free(old_info_hash, bytes);
758 fib_info_hash_free(old_laddrhash, bytes);
761 __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
763 nh->nh_saddr = inet_select_addr(nh->nh_dev,
765 nh->nh_parent->fib_scope);
766 nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
771 struct fib_info *fib_create_info(struct fib_config *cfg)
774 struct fib_info *fi = NULL;
775 struct fib_info *ofi;
777 struct net *net = cfg->fc_nlinfo.nl_net;
779 if (cfg->fc_type > RTN_MAX)
782 /* Fast check to catch the most weird cases */
783 if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
786 #ifdef CONFIG_IP_ROUTE_MULTIPATH
788 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
795 if (fib_info_cnt >= fib_info_hash_size) {
796 unsigned int new_size = fib_info_hash_size << 1;
797 struct hlist_head *new_info_hash;
798 struct hlist_head *new_laddrhash;
803 bytes = new_size * sizeof(struct hlist_head *);
804 new_info_hash = fib_info_hash_alloc(bytes);
805 new_laddrhash = fib_info_hash_alloc(bytes);
806 if (!new_info_hash || !new_laddrhash) {
807 fib_info_hash_free(new_info_hash, bytes);
808 fib_info_hash_free(new_laddrhash, bytes);
810 fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
812 if (!fib_info_hash_size)
816 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
820 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
821 if (!fi->fib_metrics)
824 fi->fib_metrics = (u32 *) dst_default_metrics;
827 fi->fib_net = hold_net(net);
828 fi->fib_protocol = cfg->fc_protocol;
829 fi->fib_scope = cfg->fc_scope;
830 fi->fib_flags = cfg->fc_flags;
831 fi->fib_priority = cfg->fc_priority;
832 fi->fib_prefsrc = cfg->fc_prefsrc;
835 change_nexthops(fi) {
836 nexthop_nh->nh_parent = fi;
837 nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
838 } endfor_nexthops(fi)
844 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
845 int type = nla_type(nla);
852 val = nla_get_u32(nla);
853 if (type == RTAX_ADVMSS && val > 65535 - 40)
855 if (type == RTAX_MTU && val > 65535 - 15)
857 fi->fib_metrics[type - 1] = val;
863 #ifdef CONFIG_IP_ROUTE_MULTIPATH
864 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
867 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
869 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
871 #ifdef CONFIG_IP_ROUTE_CLASSID
872 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
879 struct fib_nh *nh = fi->fib_nh;
881 nh->nh_oif = cfg->fc_oif;
882 nh->nh_gw = cfg->fc_gw;
883 nh->nh_flags = cfg->fc_flags;
884 #ifdef CONFIG_IP_ROUTE_CLASSID
885 nh->nh_tclassid = cfg->fc_flow;
887 fi->fib_net->ipv4.fib_num_tclassid_users++;
889 #ifdef CONFIG_IP_ROUTE_MULTIPATH
894 if (fib_props[cfg->fc_type].error) {
895 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
899 switch (cfg->fc_type) {
911 if (cfg->fc_scope > RT_SCOPE_HOST)
914 if (cfg->fc_scope == RT_SCOPE_HOST) {
915 struct fib_nh *nh = fi->fib_nh;
917 /* Local address is added. */
918 if (nhs != 1 || nh->nh_gw)
920 nh->nh_scope = RT_SCOPE_NOWHERE;
921 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
923 if (nh->nh_dev == NULL)
926 change_nexthops(fi) {
927 err = fib_check_nh(cfg, fi, nexthop_nh);
930 } endfor_nexthops(fi)
933 if (fi->fib_prefsrc) {
934 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
935 fi->fib_prefsrc != cfg->fc_dst)
936 if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL)
940 change_nexthops(fi) {
941 fib_info_update_nh_saddr(net, nexthop_nh);
942 } endfor_nexthops(fi)
945 ofi = fib_find_info(fi);
954 atomic_inc(&fi->fib_clntref);
955 spin_lock_bh(&fib_info_lock);
956 hlist_add_head(&fi->fib_hash,
957 &fib_info_hash[fib_info_hashfn(fi)]);
958 if (fi->fib_prefsrc) {
959 struct hlist_head *head;
961 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
962 hlist_add_head(&fi->fib_lhash, head);
964 change_nexthops(fi) {
965 struct hlist_head *head;
968 if (!nexthop_nh->nh_dev)
970 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
971 head = &fib_info_devhash[hash];
972 hlist_add_head(&nexthop_nh->nh_hash, head);
973 } endfor_nexthops(fi)
974 spin_unlock_bh(&fib_info_lock);
989 int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
990 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
991 struct fib_info *fi, unsigned int flags)
993 struct nlmsghdr *nlh;
996 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
1000 rtm = nlmsg_data(nlh);
1001 rtm->rtm_family = AF_INET;
1002 rtm->rtm_dst_len = dst_len;
1003 rtm->rtm_src_len = 0;
1006 rtm->rtm_table = tb_id;
1008 rtm->rtm_table = RT_TABLE_COMPAT;
1009 if (nla_put_u32(skb, RTA_TABLE, tb_id))
1010 goto nla_put_failure;
1011 rtm->rtm_type = type;
1012 rtm->rtm_flags = fi->fib_flags;
1013 rtm->rtm_scope = fi->fib_scope;
1014 rtm->rtm_protocol = fi->fib_protocol;
1016 if (rtm->rtm_dst_len &&
1017 nla_put_be32(skb, RTA_DST, dst))
1018 goto nla_put_failure;
1019 if (fi->fib_priority &&
1020 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1021 goto nla_put_failure;
1022 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
1023 goto nla_put_failure;
1025 if (fi->fib_prefsrc &&
1026 nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc))
1027 goto nla_put_failure;
1028 if (fi->fib_nhs == 1) {
1029 if (fi->fib_nh->nh_gw &&
1030 nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
1031 goto nla_put_failure;
1032 if (fi->fib_nh->nh_oif &&
1033 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
1034 goto nla_put_failure;
1035 #ifdef CONFIG_IP_ROUTE_CLASSID
1036 if (fi->fib_nh[0].nh_tclassid &&
1037 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1038 goto nla_put_failure;
1041 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1042 if (fi->fib_nhs > 1) {
1043 struct rtnexthop *rtnh;
1046 mp = nla_nest_start(skb, RTA_MULTIPATH);
1048 goto nla_put_failure;
1051 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1053 goto nla_put_failure;
1055 rtnh->rtnh_flags = nh->nh_flags & 0xFF;
1056 rtnh->rtnh_hops = nh->nh_weight - 1;
1057 rtnh->rtnh_ifindex = nh->nh_oif;
1060 nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw))
1061 goto nla_put_failure;
1062 #ifdef CONFIG_IP_ROUTE_CLASSID
1063 if (nh->nh_tclassid &&
1064 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1065 goto nla_put_failure;
1067 /* length of rtnetlink header + attributes */
1068 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1069 } endfor_nexthops(fi);
1071 nla_nest_end(skb, mp);
1074 return nlmsg_end(skb, nlh);
1077 nlmsg_cancel(skb, nlh);
1083 * - local address disappeared -> we must delete all the entries
1085 * - device went down -> we must shutdown all nexthops going via it.
1087 int fib_sync_down_addr(struct net *net, __be32 local)
1090 unsigned int hash = fib_laddr_hashfn(local);
1091 struct hlist_head *head = &fib_info_laddrhash[hash];
1092 struct hlist_node *node;
1093 struct fib_info *fi;
1095 if (fib_info_laddrhash == NULL || local == 0)
1098 hlist_for_each_entry(fi, node, head, fib_lhash) {
1099 if (!net_eq(fi->fib_net, net))
1101 if (fi->fib_prefsrc == local) {
1102 fi->fib_flags |= RTNH_F_DEAD;
1109 int fib_sync_down_dev(struct net_device *dev, int force)
1112 int scope = RT_SCOPE_NOWHERE;
1113 struct fib_info *prev_fi = NULL;
1114 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1115 struct hlist_head *head = &fib_info_devhash[hash];
1116 struct hlist_node *node;
1122 hlist_for_each_entry(nh, node, head, nh_hash) {
1123 struct fib_info *fi = nh->nh_parent;
1126 BUG_ON(!fi->fib_nhs);
1127 if (nh->nh_dev != dev || fi == prev_fi)
1131 change_nexthops(fi) {
1132 if (nexthop_nh->nh_flags & RTNH_F_DEAD)
1134 else if (nexthop_nh->nh_dev == dev &&
1135 nexthop_nh->nh_scope != scope) {
1136 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1137 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1138 spin_lock_bh(&fib_multipath_lock);
1139 fi->fib_power -= nexthop_nh->nh_power;
1140 nexthop_nh->nh_power = 0;
1141 spin_unlock_bh(&fib_multipath_lock);
1145 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1146 if (force > 1 && nexthop_nh->nh_dev == dev) {
1151 } endfor_nexthops(fi)
1152 if (dead == fi->fib_nhs) {
1153 fi->fib_flags |= RTNH_F_DEAD;
1161 /* Must be invoked inside of an RCU protected region. */
1162 void fib_select_default(struct fib_result *res)
1164 struct fib_info *fi = NULL, *last_resort = NULL;
1165 struct list_head *fa_head = res->fa_head;
1166 struct fib_table *tb = res->table;
1167 int order = -1, last_idx = -1;
1168 struct fib_alias *fa;
1170 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1171 struct fib_info *next_fi = fa->fa_info;
1173 if (next_fi->fib_scope != res->scope ||
1174 fa->fa_type != RTN_UNICAST)
1177 if (next_fi->fib_priority > res->fi->fib_priority)
1179 if (!next_fi->fib_nh[0].nh_gw ||
1180 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1183 fib_alias_accessed(fa);
1186 if (next_fi != res->fi)
1188 } else if (!fib_detect_death(fi, order, &last_resort,
1189 &last_idx, tb->tb_default)) {
1190 fib_result_assign(res, fi);
1191 tb->tb_default = order;
1198 if (order <= 0 || fi == NULL) {
1199 tb->tb_default = -1;
1203 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1205 fib_result_assign(res, fi);
1206 tb->tb_default = order;
1211 fib_result_assign(res, last_resort);
1212 tb->tb_default = last_idx;
1217 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1220 * Dead device goes up. We wake up dead nexthops.
1221 * It takes sense only on multipath routes.
1223 int fib_sync_up(struct net_device *dev)
1225 struct fib_info *prev_fi;
1227 struct hlist_head *head;
1228 struct hlist_node *node;
1232 if (!(dev->flags & IFF_UP))
1236 hash = fib_devindex_hashfn(dev->ifindex);
1237 head = &fib_info_devhash[hash];
1240 hlist_for_each_entry(nh, node, head, nh_hash) {
1241 struct fib_info *fi = nh->nh_parent;
1244 BUG_ON(!fi->fib_nhs);
1245 if (nh->nh_dev != dev || fi == prev_fi)
1250 change_nexthops(fi) {
1251 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
1255 if (nexthop_nh->nh_dev == NULL ||
1256 !(nexthop_nh->nh_dev->flags & IFF_UP))
1258 if (nexthop_nh->nh_dev != dev ||
1259 !__in_dev_get_rtnl(dev))
1262 spin_lock_bh(&fib_multipath_lock);
1263 nexthop_nh->nh_power = 0;
1264 nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
1265 spin_unlock_bh(&fib_multipath_lock);
1266 } endfor_nexthops(fi)
1269 fi->fib_flags &= ~RTNH_F_DEAD;
1278 * The algorithm is suboptimal, but it provides really
1279 * fair weighted route distribution.
1281 void fib_select_multipath(struct fib_result *res)
1283 struct fib_info *fi = res->fi;
1286 spin_lock_bh(&fib_multipath_lock);
1287 if (fi->fib_power <= 0) {
1289 change_nexthops(fi) {
1290 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
1291 power += nexthop_nh->nh_weight;
1292 nexthop_nh->nh_power = nexthop_nh->nh_weight;
1294 } endfor_nexthops(fi);
1295 fi->fib_power = power;
1297 spin_unlock_bh(&fib_multipath_lock);
1298 /* Race condition: route has just become dead. */
1305 /* w should be random number [0..fi->fib_power-1],
1306 * it is pretty bad approximation.
1309 w = jiffies % fi->fib_power;
1311 change_nexthops(fi) {
1312 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) &&
1313 nexthop_nh->nh_power) {
1314 w -= nexthop_nh->nh_power;
1316 nexthop_nh->nh_power--;
1318 res->nh_sel = nhsel;
1319 spin_unlock_bh(&fib_multipath_lock);
1323 } endfor_nexthops(fi);
1325 /* Race condition: route has just become dead. */
1327 spin_unlock_bh(&fib_multipath_lock);