2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * DECnet Routing Forwarding Information Base (Routing Tables)
8 * Author: Steve Whitehouse <SteveW@ACM.org>
9 * Mostly copied from the IPv4 routing code
15 #include <linux/string.h>
16 #include <linux/net.h>
17 #include <linux/socket.h>
18 #include <linux/slab.h>
19 #include <linux/sockios.h>
20 #include <linux/init.h>
21 #include <linux/skbuff.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/proc_fs.h>
24 #include <linux/netdevice.h>
25 #include <linux/timer.h>
26 #include <linux/spinlock.h>
27 #include <linux/atomic.h>
28 #include <asm/uaccess.h>
29 #include <linux/route.h> /* RTF_xxx */
30 #include <net/neighbour.h>
31 #include <net/netlink.h>
34 #include <net/fib_rules.h>
36 #include <net/dn_route.h>
37 #include <net/dn_fib.h>
38 #include <net/dn_neigh.h>
39 #include <net/dn_dev.h>
43 struct dn_zone *dz_next;
44 struct dn_fib_node **dz_hash;
48 #define DZ_HASHMASK(dz) ((dz)->dz_hashmask)
51 #define DZ_MASK(dz) ((dz)->dz_mask)
56 struct dn_zone *dh_zones[17];
57 struct dn_zone *dh_zone_list;
60 #define dz_key_0(key) ((key).datum = 0)
62 #define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
63 for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
65 #define endfor_nexthops(fi) }
67 #define DN_MAX_DIVISOR 1024
69 #define DN_S_ACCESSED 2
71 #define DN_FIB_SCAN(f, fp) \
72 for( ; ((f) = *(fp)) != NULL; (fp) = &(f)->fn_next)
74 #define DN_FIB_SCAN_KEY(f, fp, key) \
75 for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next)
77 #define RT_TABLE_MIN 1
78 #define DN_FIB_TABLE_HASHSZ 256
79 static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ];
80 static DEFINE_RWLOCK(dn_fib_tables_lock);
82 static struct kmem_cache *dn_hash_kmem __read_mostly;
83 static int dn_fib_hash_zombies;
85 static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
87 u16 h = le16_to_cpu(key.datum)>>(16 - dz->dz_order);
91 return *(dn_fib_idx_t *)&h;
94 static inline dn_fib_key_t dz_key(__le16 dst, struct dn_zone *dz)
97 k.datum = dst & DZ_MASK(dz);
101 static inline struct dn_fib_node **dn_chain_p(dn_fib_key_t key, struct dn_zone *dz)
103 return &dz->dz_hash[dn_hash(key, dz).datum];
106 static inline struct dn_fib_node *dz_chain(dn_fib_key_t key, struct dn_zone *dz)
108 return dz->dz_hash[dn_hash(key, dz).datum];
111 static inline int dn_key_eq(dn_fib_key_t a, dn_fib_key_t b)
113 return a.datum == b.datum;
116 static inline int dn_key_leq(dn_fib_key_t a, dn_fib_key_t b)
118 return a.datum <= b.datum;
121 static inline void dn_rebuild_zone(struct dn_zone *dz,
122 struct dn_fib_node **old_ht,
125 struct dn_fib_node *f, **fp, *next;
128 for(i = 0; i < old_divisor; i++) {
129 for(f = old_ht[i]; f; f = next) {
131 for(fp = dn_chain_p(f->fn_key, dz);
132 *fp && dn_key_leq((*fp)->fn_key, f->fn_key);
133 fp = &(*fp)->fn_next)
141 static void dn_rehash_zone(struct dn_zone *dz)
143 struct dn_fib_node **ht, **old_ht;
144 int old_divisor, new_divisor;
147 old_divisor = dz->dz_divisor;
149 switch (old_divisor) {
155 printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n",
159 new_hashmask = 0x3FF;
163 ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL);
167 write_lock_bh(&dn_fib_tables_lock);
168 old_ht = dz->dz_hash;
170 dz->dz_hashmask = new_hashmask;
171 dz->dz_divisor = new_divisor;
172 dn_rebuild_zone(dz, old_ht, old_divisor);
173 write_unlock_bh(&dn_fib_tables_lock);
177 static void dn_free_node(struct dn_fib_node *f)
179 dn_fib_release_info(DN_FIB_INFO(f));
180 kmem_cache_free(dn_hash_kmem, f);
184 static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
187 struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL);
193 dz->dz_hashmask = 0x0F;
199 dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL);
206 dz->dz_mask = dnet_make_mask(z);
208 for(i = z + 1; i <= 16; i++)
209 if (table->dh_zones[i])
212 write_lock_bh(&dn_fib_tables_lock);
214 dz->dz_next = table->dh_zone_list;
215 table->dh_zone_list = dz;
217 dz->dz_next = table->dh_zones[i]->dz_next;
218 table->dh_zones[i]->dz_next = dz;
220 table->dh_zones[z] = dz;
221 write_unlock_bh(&dn_fib_tables_lock);
226 static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct nlattr *attrs[], struct dn_fib_info *fi)
228 struct rtnexthop *nhp;
231 if (attrs[RTA_PRIORITY] &&
232 nla_get_u32(attrs[RTA_PRIORITY]) != fi->fib_priority)
235 if (attrs[RTA_OIF] || attrs[RTA_GATEWAY]) {
236 if ((!attrs[RTA_OIF] || nla_get_u32(attrs[RTA_OIF]) == fi->fib_nh->nh_oif) &&
237 (!attrs[RTA_GATEWAY] || nla_get_le16(attrs[RTA_GATEWAY]) != fi->fib_nh->nh_gw))
242 if (!attrs[RTA_MULTIPATH])
245 nhp = nla_data(attrs[RTA_MULTIPATH]);
246 nhlen = nla_len(attrs[RTA_MULTIPATH]);
249 int attrlen = nhlen - sizeof(struct rtnexthop);
252 if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
254 if (nhp->rtnh_ifindex && nhp->rtnh_ifindex != nh->nh_oif)
257 struct nlattr *gw_attr;
259 gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
260 gw = gw_attr ? nla_get_le16(gw_attr) : 0;
262 if (gw && gw != nh->nh_gw)
265 nhp = RTNH_NEXT(nhp);
266 } endfor_nexthops(fi);
271 static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi)
273 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
274 + nla_total_size(4) /* RTA_TABLE */
275 + nla_total_size(2) /* RTA_DST */
276 + nla_total_size(4); /* RTA_PRIORITY */
278 /* space for nested metrics */
279 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
282 /* Also handles the special case fib_nhs == 1 */
284 /* each nexthop is packed in an attribute */
285 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
287 /* may contain a gateway attribute */
288 nhsize += nla_total_size(4);
290 /* all nexthops are packed in a nested attribute */
291 payload += nla_total_size(fi->fib_nhs * nhsize);
297 static int dn_fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
298 u32 tb_id, u8 type, u8 scope, void *dst, int dst_len,
299 struct dn_fib_info *fi, unsigned int flags)
302 struct nlmsghdr *nlh;
304 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
308 rtm = nlmsg_data(nlh);
309 rtm->rtm_family = AF_DECnet;
310 rtm->rtm_dst_len = dst_len;
311 rtm->rtm_src_len = 0;
313 rtm->rtm_table = tb_id;
314 rtm->rtm_flags = fi->fib_flags;
315 rtm->rtm_scope = scope;
316 rtm->rtm_type = type;
317 rtm->rtm_protocol = fi->fib_protocol;
319 if (nla_put_u32(skb, RTA_TABLE, tb_id) < 0)
322 if (rtm->rtm_dst_len &&
323 nla_put(skb, RTA_DST, 2, dst) < 0)
326 if (fi->fib_priority &&
327 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority) < 0)
330 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
333 if (fi->fib_nhs == 1) {
334 if (fi->fib_nh->nh_gw &&
335 nla_put_le16(skb, RTA_GATEWAY, fi->fib_nh->nh_gw) < 0)
338 if (fi->fib_nh->nh_oif &&
339 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif) < 0)
343 if (fi->fib_nhs > 1) {
344 struct rtnexthop *nhp;
345 struct nlattr *mp_head;
347 if (!(mp_head = nla_nest_start(skb, RTA_MULTIPATH)))
351 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp))))
354 nhp->rtnh_flags = nh->nh_flags & 0xFF;
355 nhp->rtnh_hops = nh->nh_weight - 1;
356 nhp->rtnh_ifindex = nh->nh_oif;
359 nla_put_le16(skb, RTA_GATEWAY, nh->nh_gw) < 0)
362 nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp;
363 } endfor_nexthops(fi);
365 nla_nest_end(skb, mp_head);
368 return nlmsg_end(skb, nlh);
371 nlmsg_cancel(skb, nlh);
376 static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
377 struct nlmsghdr *nlh, struct netlink_skb_parms *req)
380 u32 portid = req ? req->portid : 0;
383 skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL);
387 err = dn_fib_dump_info(skb, portid, nlh->nlmsg_seq, event, tb_id,
388 f->fn_type, f->fn_scope, &f->fn_key, z,
391 /* -EMSGSIZE implies BUG in dn_fib_nlmsg_size() */
392 WARN_ON(err == -EMSGSIZE);
396 rtnl_notify(skb, &init_net, portid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
400 rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_ROUTE, err);
403 static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
404 struct netlink_callback *cb,
405 struct dn_fib_table *tb,
407 struct dn_fib_node *f)
412 for(i = 0; f; i++, f = f->fn_next) {
415 if (f->fn_state & DN_S_ZOMBIE)
417 if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
421 (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type,
422 f->fn_scope, &f->fn_key, dz->dz_order,
423 f->fn_info, NLM_F_MULTI) < 0) {
432 static __inline__ int dn_hash_dump_zone(struct sk_buff *skb,
433 struct netlink_callback *cb,
434 struct dn_fib_table *tb,
440 for(h = 0; h < dz->dz_divisor; h++) {
444 memset(&cb->args[4], 0, sizeof(cb->args) - 4*sizeof(cb->args[0]));
445 if (dz->dz_hash == NULL || dz->dz_hash[h] == NULL)
447 if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) {
456 static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb,
457 struct netlink_callback *cb)
461 struct dn_hash *table = (struct dn_hash *)tb->data;
464 read_lock(&dn_fib_tables_lock);
465 for(dz = table->dh_zone_list, m = 0; dz; dz = dz->dz_next, m++) {
469 memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(cb->args[0]));
471 if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) {
473 read_unlock(&dn_fib_tables_lock);
477 read_unlock(&dn_fib_tables_lock);
483 int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
485 struct net *net = sock_net(skb->sk);
487 unsigned int e = 0, s_e;
488 struct dn_fib_table *tb;
491 if (!net_eq(net, &init_net))
494 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
495 ((struct rtmsg *)nlmsg_data(cb->nlh))->rtm_flags&RTM_F_CLONED)
496 return dn_cache_dump(skb, cb);
501 for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) {
503 hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) {
507 memset(&cb->args[2], 0, sizeof(cb->args) -
508 2 * sizeof(cb->args[0]));
509 if (tb->dump(tb, skb, cb) < 0)
523 static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[],
524 struct nlmsghdr *n, struct netlink_skb_parms *req)
526 struct dn_hash *table = (struct dn_hash *)tb->data;
527 struct dn_fib_node *new_f, *f, **fp, **del_fp;
529 struct dn_fib_info *fi;
530 int z = r->rtm_dst_len;
531 int type = r->rtm_type;
538 dz = table->dh_zones[z];
539 if (!dz && !(dz = dn_new_zone(table, z)))
543 if (attrs[RTA_DST]) {
544 __le16 dst = nla_get_le16(attrs[RTA_DST]);
545 if (dst & ~DZ_MASK(dz))
547 key = dz_key(dst, dz);
550 if ((fi = dn_fib_create_info(r, attrs, n, &err)) == NULL)
553 if (dz->dz_nent > (dz->dz_divisor << 2) &&
554 dz->dz_divisor > DN_MAX_DIVISOR &&
555 (z==16 || (1<<z) > dz->dz_divisor))
558 fp = dn_chain_p(key, dz);
561 if (dn_key_leq(key, f->fn_key))
567 if (f && (f->fn_state & DN_S_ZOMBIE) &&
568 dn_key_eq(f->fn_key, key)) {
575 DN_FIB_SCAN_KEY(f, fp, key) {
576 if (fi->fib_priority <= DN_FIB_INFO(f)->fib_priority)
580 if (f && dn_key_eq(f->fn_key, key) &&
581 fi->fib_priority == DN_FIB_INFO(f)->fib_priority) {
582 struct dn_fib_node **ins_fp;
585 if (n->nlmsg_flags & NLM_F_EXCL)
588 if (n->nlmsg_flags & NLM_F_REPLACE) {
598 DN_FIB_SCAN_KEY(f, fp, key) {
599 if (fi->fib_priority != DN_FIB_INFO(f)->fib_priority)
601 if (f->fn_type == type &&
602 f->fn_scope == r->rtm_scope &&
603 DN_FIB_INFO(f) == fi)
607 if (!(n->nlmsg_flags & NLM_F_APPEND)) {
615 if (!(n->nlmsg_flags & NLM_F_CREATE))
620 new_f = kmem_cache_zalloc(dn_hash_kmem, GFP_KERNEL);
625 new_f->fn_type = type;
626 new_f->fn_scope = r->rtm_scope;
627 DN_FIB_INFO(new_f) = fi;
630 write_lock_bh(&dn_fib_tables_lock);
632 write_unlock_bh(&dn_fib_tables_lock);
637 write_lock_bh(&dn_fib_tables_lock);
638 *del_fp = f->fn_next;
639 write_unlock_bh(&dn_fib_tables_lock);
641 if (!(f->fn_state & DN_S_ZOMBIE))
642 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
643 if (f->fn_state & DN_S_ACCESSED)
644 dn_rt_cache_flush(-1);
648 dn_rt_cache_flush(-1);
651 dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req);
655 dn_fib_release_info(fi);
660 static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[],
661 struct nlmsghdr *n, struct netlink_skb_parms *req)
663 struct dn_hash *table = (struct dn_hash*)tb->data;
664 struct dn_fib_node **fp, **del_fp, *f;
665 int z = r->rtm_dst_len;
674 if ((dz = table->dh_zones[z]) == NULL)
678 if (attrs[RTA_DST]) {
679 __le16 dst = nla_get_le16(attrs[RTA_DST]);
680 if (dst & ~DZ_MASK(dz))
682 key = dz_key(dst, dz);
685 fp = dn_chain_p(key, dz);
688 if (dn_key_eq(f->fn_key, key))
690 if (dn_key_leq(key, f->fn_key))
696 DN_FIB_SCAN_KEY(f, fp, key) {
697 struct dn_fib_info *fi = DN_FIB_INFO(f);
699 if (f->fn_state & DN_S_ZOMBIE)
704 if (del_fp == NULL &&
705 (!r->rtm_type || f->fn_type == r->rtm_type) &&
706 (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) &&
708 fi->fib_protocol == r->rtm_protocol) &&
709 dn_fib_nh_match(r, n, attrs, fi) == 0)
715 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
718 write_lock_bh(&dn_fib_tables_lock);
719 *del_fp = f->fn_next;
720 write_unlock_bh(&dn_fib_tables_lock);
722 if (f->fn_state & DN_S_ACCESSED)
723 dn_rt_cache_flush(-1);
727 f->fn_state |= DN_S_ZOMBIE;
728 if (f->fn_state & DN_S_ACCESSED) {
729 f->fn_state &= ~DN_S_ACCESSED;
730 dn_rt_cache_flush(-1);
732 if (++dn_fib_hash_zombies > 128)
742 static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table)
745 struct dn_fib_node *f;
747 while((f = *fp) != NULL) {
748 struct dn_fib_info *fi = DN_FIB_INFO(f);
750 if (fi && ((f->fn_state & DN_S_ZOMBIE) || (fi->fib_flags & RTNH_F_DEAD))) {
751 write_lock_bh(&dn_fib_tables_lock);
753 write_unlock_bh(&dn_fib_tables_lock);
765 static int dn_fib_table_flush(struct dn_fib_table *tb)
767 struct dn_hash *table = (struct dn_hash *)tb->data;
771 dn_fib_hash_zombies = 0;
772 for(dz = table->dh_zone_list; dz; dz = dz->dz_next) {
775 for(i = dz->dz_divisor-1; i >= 0; i--)
776 tmp += dn_flush_list(&dz->dz_hash[i], dz->dz_order, table);
784 static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowidn *flp, struct dn_fib_res *res)
788 struct dn_hash *t = (struct dn_hash *)tb->data;
790 read_lock(&dn_fib_tables_lock);
791 for(dz = t->dh_zone_list; dz; dz = dz->dz_next) {
792 struct dn_fib_node *f;
793 dn_fib_key_t k = dz_key(flp->daddr, dz);
795 for(f = dz_chain(k, dz); f; f = f->fn_next) {
796 if (!dn_key_eq(k, f->fn_key)) {
797 if (dn_key_leq(k, f->fn_key))
803 f->fn_state |= DN_S_ACCESSED;
805 if (f->fn_state&DN_S_ZOMBIE)
808 if (f->fn_scope < flp->flowidn_scope)
811 err = dn_fib_semantic_match(f->fn_type, DN_FIB_INFO(f), flp, res);
814 res->type = f->fn_type;
815 res->scope = f->fn_scope;
816 res->prefixlen = dz->dz_order;
825 read_unlock(&dn_fib_tables_lock);
830 struct dn_fib_table *dn_fib_get_table(u32 n, int create)
832 struct dn_fib_table *t;
835 if (n < RT_TABLE_MIN)
838 if (n > RT_TABLE_MAX)
841 h = n & (DN_FIB_TABLE_HASHSZ - 1);
843 hlist_for_each_entry_rcu(t, &dn_fib_table_hash[h], hlist) {
854 if (in_interrupt()) {
855 net_dbg_ratelimited("DECnet: BUG! Attempt to create routing table from interrupt\n");
859 t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash),
865 t->insert = dn_fib_table_insert;
866 t->delete = dn_fib_table_delete;
867 t->lookup = dn_fib_table_lookup;
868 t->flush = dn_fib_table_flush;
869 t->dump = dn_fib_table_dump;
870 hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]);
875 struct dn_fib_table *dn_fib_empty_table(void)
879 for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++)
880 if (dn_fib_get_table(id, 0) == NULL)
881 return dn_fib_get_table(id, 1);
885 void dn_fib_flush(void)
888 struct dn_fib_table *tb;
891 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
892 hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist)
893 flushed += tb->flush(tb);
897 dn_rt_cache_flush(-1);
900 void __init dn_fib_table_init(void)
902 dn_hash_kmem = kmem_cache_create("dn_fib_info_cache",
903 sizeof(struct dn_fib_info),
904 0, SLAB_HWCACHE_ALIGN,
908 void __exit dn_fib_table_cleanup(void)
910 struct dn_fib_table *t;
911 struct hlist_node *next;
914 write_lock(&dn_fib_tables_lock);
915 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
916 hlist_for_each_entry_safe(t, next, &dn_fib_table_hash[h],
918 hlist_del(&t->hlist);
922 write_unlock(&dn_fib_tables_lock);