2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
9 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/socket.h>
15 #include <linux/kernel.h>
16 #include <linux/timer.h>
17 #include <linux/string.h>
18 #include <linux/sockios.h>
19 #include <linux/net.h>
20 #include <linux/slab.h>
22 #include <linux/inet.h>
23 #include <linux/netdevice.h>
25 #include <linux/if_arp.h>
26 #include <linux/skbuff.h>
28 #include <asm/uaccess.h>
29 #include <linux/fcntl.h>
30 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
32 #include <linux/interrupt.h>
33 #include <linux/notifier.h>
34 #include <linux/netfilter.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <net/netrom.h>
38 #include <linux/seq_file.h>
39 #include <linux/export.h>
41 static unsigned int nr_neigh_no = 1;
43 static HLIST_HEAD(nr_node_list);
44 static DEFINE_SPINLOCK(nr_node_list_lock);
45 static HLIST_HEAD(nr_neigh_list);
46 static DEFINE_SPINLOCK(nr_neigh_list_lock);
48 static struct nr_node *nr_node_get(ax25_address *callsign)
50 struct nr_node *found = NULL;
51 struct nr_node *nr_node;
52 struct hlist_node *node;
54 spin_lock_bh(&nr_node_list_lock);
55 nr_node_for_each(nr_node, node, &nr_node_list)
56 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
57 nr_node_hold(nr_node);
61 spin_unlock_bh(&nr_node_list_lock);
65 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
66 struct net_device *dev)
68 struct nr_neigh *found = NULL;
69 struct nr_neigh *nr_neigh;
70 struct hlist_node *node;
72 spin_lock_bh(&nr_neigh_list_lock);
73 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list)
74 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
75 nr_neigh->dev == dev) {
76 nr_neigh_hold(nr_neigh);
80 spin_unlock_bh(&nr_neigh_list_lock);
84 static void nr_remove_neigh(struct nr_neigh *);
87 * Add a new route to a node, and in the process add the node and the
88 * neighbour if it is new.
90 static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
91 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev,
92 int quality, int obs_count)
94 struct nr_node *nr_node;
95 struct nr_neigh *nr_neigh;
96 struct nr_route nr_route;
98 struct net_device *odev;
100 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */
105 nr_node = nr_node_get(nr);
107 nr_neigh = nr_neigh_get_dev(ax25, dev);
110 * The L2 link to a neighbour has failed in the past
111 * and now a frame comes from this neighbour. We assume
112 * it was a temporary trouble with the link and reset the
113 * routes now (and not wait for a node broadcast).
115 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
116 struct nr_node *nr_nodet;
117 struct hlist_node *node;
119 spin_lock_bh(&nr_node_list_lock);
120 nr_node_for_each(nr_nodet, node, &nr_node_list) {
121 nr_node_lock(nr_nodet);
122 for (i = 0; i < nr_nodet->count; i++)
123 if (nr_nodet->routes[i].neighbour == nr_neigh)
124 if (i < nr_nodet->which)
126 nr_node_unlock(nr_nodet);
128 spin_unlock_bh(&nr_node_list_lock);
131 if (nr_neigh != NULL)
132 nr_neigh->failed = 0;
134 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
135 nr_neigh_put(nr_neigh);
136 nr_node_put(nr_node);
140 if (nr_neigh == NULL) {
141 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
143 nr_node_put(nr_node);
147 nr_neigh->callsign = *ax25;
148 nr_neigh->digipeat = NULL;
149 nr_neigh->ax25 = NULL;
151 nr_neigh->quality = sysctl_netrom_default_path_quality;
152 nr_neigh->locked = 0;
154 nr_neigh->number = nr_neigh_no++;
155 nr_neigh->failed = 0;
156 atomic_set(&nr_neigh->refcount, 1);
158 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
159 nr_neigh->digipeat = kmemdup(ax25_digi,
162 if (nr_neigh->digipeat == NULL) {
165 nr_node_put(nr_node);
170 spin_lock_bh(&nr_neigh_list_lock);
171 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
172 nr_neigh_hold(nr_neigh);
173 spin_unlock_bh(&nr_neigh_list_lock);
176 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
177 nr_neigh->quality = quality;
179 if (nr_node == NULL) {
180 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
182 nr_neigh_put(nr_neigh);
186 nr_node->callsign = *nr;
187 strcpy(nr_node->mnemonic, mnemonic);
191 atomic_set(&nr_node->refcount, 1);
192 spin_lock_init(&nr_node->node_lock);
194 nr_node->routes[0].quality = quality;
195 nr_node->routes[0].obs_count = obs_count;
196 nr_node->routes[0].neighbour = nr_neigh;
198 nr_neigh_hold(nr_neigh);
201 spin_lock_bh(&nr_node_list_lock);
202 hlist_add_head(&nr_node->node_node, &nr_node_list);
203 /* refcount initialized at 1 */
204 spin_unlock_bh(&nr_node_list_lock);
208 nr_node_lock(nr_node);
211 strcpy(nr_node->mnemonic, mnemonic);
213 for (found = 0, i = 0; i < nr_node->count; i++) {
214 if (nr_node->routes[i].neighbour == nr_neigh) {
215 nr_node->routes[i].quality = quality;
216 nr_node->routes[i].obs_count = obs_count;
223 /* We have space at the bottom, slot it in */
224 if (nr_node->count < 3) {
225 nr_node->routes[2] = nr_node->routes[1];
226 nr_node->routes[1] = nr_node->routes[0];
228 nr_node->routes[0].quality = quality;
229 nr_node->routes[0].obs_count = obs_count;
230 nr_node->routes[0].neighbour = nr_neigh;
234 nr_neigh_hold(nr_neigh);
237 /* It must be better than the worst */
238 if (quality > nr_node->routes[2].quality) {
239 nr_node->routes[2].neighbour->count--;
240 nr_neigh_put(nr_node->routes[2].neighbour);
242 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
243 nr_remove_neigh(nr_node->routes[2].neighbour);
245 nr_node->routes[2].quality = quality;
246 nr_node->routes[2].obs_count = obs_count;
247 nr_node->routes[2].neighbour = nr_neigh;
249 nr_neigh_hold(nr_neigh);
255 /* Now re-sort the routes in quality order */
256 switch (nr_node->count) {
258 if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
259 switch (nr_node->which) {
267 nr_route = nr_node->routes[0];
268 nr_node->routes[0] = nr_node->routes[1];
269 nr_node->routes[1] = nr_route;
271 if (nr_node->routes[2].quality > nr_node->routes[1].quality) {
272 switch (nr_node->which) {
273 case 1: nr_node->which = 2;
276 case 2: nr_node->which = 1;
282 nr_route = nr_node->routes[1];
283 nr_node->routes[1] = nr_node->routes[2];
284 nr_node->routes[2] = nr_route;
287 if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
288 switch (nr_node->which) {
289 case 0: nr_node->which = 1;
292 case 1: nr_node->which = 0;
297 nr_route = nr_node->routes[0];
298 nr_node->routes[0] = nr_node->routes[1];
299 nr_node->routes[1] = nr_route;
305 for (i = 0; i < nr_node->count; i++) {
306 if (nr_node->routes[i].neighbour == nr_neigh) {
307 if (i < nr_node->which)
313 nr_neigh_put(nr_neigh);
314 nr_node_unlock(nr_node);
315 nr_node_put(nr_node);
319 static inline void __nr_remove_node(struct nr_node *nr_node)
321 hlist_del_init(&nr_node->node_node);
322 nr_node_put(nr_node);
325 #define nr_remove_node_locked(__node) \
326 __nr_remove_node(__node)
328 static void nr_remove_node(struct nr_node *nr_node)
330 spin_lock_bh(&nr_node_list_lock);
331 __nr_remove_node(nr_node);
332 spin_unlock_bh(&nr_node_list_lock);
335 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
337 hlist_del_init(&nr_neigh->neigh_node);
338 nr_neigh_put(nr_neigh);
341 #define nr_remove_neigh_locked(__neigh) \
342 __nr_remove_neigh(__neigh)
344 static void nr_remove_neigh(struct nr_neigh *nr_neigh)
346 spin_lock_bh(&nr_neigh_list_lock);
347 __nr_remove_neigh(nr_neigh);
348 spin_unlock_bh(&nr_neigh_list_lock);
352 * "Delete" a node. Strictly speaking remove a route to a node. The node
353 * is only deleted if no routes are left to it.
355 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
357 struct nr_node *nr_node;
358 struct nr_neigh *nr_neigh;
361 nr_node = nr_node_get(callsign);
366 nr_neigh = nr_neigh_get_dev(neighbour, dev);
368 if (nr_neigh == NULL) {
369 nr_node_put(nr_node);
373 nr_node_lock(nr_node);
374 for (i = 0; i < nr_node->count; i++) {
375 if (nr_node->routes[i].neighbour == nr_neigh) {
377 nr_neigh_put(nr_neigh);
379 if (nr_neigh->count == 0 && !nr_neigh->locked)
380 nr_remove_neigh(nr_neigh);
381 nr_neigh_put(nr_neigh);
385 if (nr_node->count == 0) {
386 nr_remove_node(nr_node);
390 nr_node->routes[0] = nr_node->routes[1];
392 nr_node->routes[1] = nr_node->routes[2];
396 nr_node_put(nr_node);
398 nr_node_unlock(nr_node);
403 nr_neigh_put(nr_neigh);
404 nr_node_unlock(nr_node);
405 nr_node_put(nr_node);
411 * Lock a neighbour with a quality.
413 static int __must_check nr_add_neigh(ax25_address *callsign,
414 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
416 struct nr_neigh *nr_neigh;
418 nr_neigh = nr_neigh_get_dev(callsign, dev);
420 nr_neigh->quality = quality;
421 nr_neigh->locked = 1;
422 nr_neigh_put(nr_neigh);
426 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
429 nr_neigh->callsign = *callsign;
430 nr_neigh->digipeat = NULL;
431 nr_neigh->ax25 = NULL;
433 nr_neigh->quality = quality;
434 nr_neigh->locked = 1;
436 nr_neigh->number = nr_neigh_no++;
437 nr_neigh->failed = 0;
438 atomic_set(&nr_neigh->refcount, 1);
440 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
441 nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),
443 if (nr_neigh->digipeat == NULL) {
449 spin_lock_bh(&nr_neigh_list_lock);
450 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
451 /* refcount is initialized at 1 */
452 spin_unlock_bh(&nr_neigh_list_lock);
458 * "Delete" a neighbour. The neighbour is only removed if the number
459 * of nodes that may use it is zero.
461 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
463 struct nr_neigh *nr_neigh;
465 nr_neigh = nr_neigh_get_dev(callsign, dev);
467 if (nr_neigh == NULL) return -EINVAL;
469 nr_neigh->quality = quality;
470 nr_neigh->locked = 0;
472 if (nr_neigh->count == 0)
473 nr_remove_neigh(nr_neigh);
474 nr_neigh_put(nr_neigh);
480 * Decrement the obsolescence count by one. If a route is reduced to a
481 * count of zero, remove it. Also remove any unlocked neighbours with
482 * zero nodes routing via it.
484 static int nr_dec_obs(void)
486 struct nr_neigh *nr_neigh;
488 struct hlist_node *node, *nodet;
491 spin_lock_bh(&nr_node_list_lock);
492 nr_node_for_each_safe(s, node, nodet, &nr_node_list) {
494 for (i = 0; i < s->count; i++) {
495 switch (s->routes[i].obs_count) {
496 case 0: /* A locked entry */
499 case 1: /* From 1 -> 0 */
500 nr_neigh = s->routes[i].neighbour;
503 nr_neigh_put(nr_neigh);
505 if (nr_neigh->count == 0 && !nr_neigh->locked)
506 nr_remove_neigh(nr_neigh);
512 s->routes[0] = s->routes[1];
515 s->routes[1] = s->routes[2];
522 s->routes[i].obs_count--;
529 nr_remove_node_locked(s);
532 spin_unlock_bh(&nr_node_list_lock);
538 * A device has been removed. Remove its routes and neighbours.
540 void nr_rt_device_down(struct net_device *dev)
543 struct hlist_node *node, *nodet, *node2, *node2t;
547 spin_lock_bh(&nr_neigh_list_lock);
548 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
550 spin_lock_bh(&nr_node_list_lock);
551 nr_node_for_each_safe(t, node2, node2t, &nr_node_list) {
553 for (i = 0; i < t->count; i++) {
554 if (t->routes[i].neighbour == s) {
559 t->routes[0] = t->routes[1];
561 t->routes[1] = t->routes[2];
569 nr_remove_node_locked(t);
572 spin_unlock_bh(&nr_node_list_lock);
574 nr_remove_neigh_locked(s);
577 spin_unlock_bh(&nr_neigh_list_lock);
581 * Check that the device given is a valid AX.25 interface that is "up".
582 * Or a valid ethernet interface with an AX.25 callsign binding.
584 static struct net_device *nr_ax25_dev_get(char *devname)
586 struct net_device *dev;
588 if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
591 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
599 * Find the first active NET/ROM device, usually "nr0".
601 struct net_device *nr_dev_first(void)
603 struct net_device *dev, *first = NULL;
606 for_each_netdev_rcu(&init_net, dev) {
607 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
608 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
619 * Find the NET/ROM device for the given callsign.
621 struct net_device *nr_dev_get(ax25_address *addr)
623 struct net_device *dev;
626 for_each_netdev_rcu(&init_net, dev) {
627 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
628 ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
639 static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
640 ax25_address *digipeaters)
647 for (i = 0; i < ndigis; i++) {
648 digi->calls[i] = digipeaters[i];
649 digi->repeated[i] = 0;
652 digi->ndigi = ndigis;
653 digi->lastrepeat = -1;
659 * Handle the ioctls that control the routing functions.
661 int nr_rt_ioctl(unsigned int cmd, void __user *arg)
663 struct nr_route_struct nr_route;
664 struct net_device *dev;
670 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
672 if (nr_route.ndigis > AX25_MAX_DIGIS)
674 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
676 switch (nr_route.type) {
678 if (strnlen(nr_route.mnemonic, 7) == 7) {
683 ret = nr_add_node(&nr_route.callsign,
686 nr_call_to_digi(&digi, nr_route.ndigis,
687 nr_route.digipeaters),
688 dev, nr_route.quality,
692 ret = nr_add_neigh(&nr_route.callsign,
693 nr_call_to_digi(&digi, nr_route.ndigis,
694 nr_route.digipeaters),
695 dev, nr_route.quality);
704 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
706 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
708 switch (nr_route.type) {
710 ret = nr_del_node(&nr_route.callsign,
711 &nr_route.neighbour, dev);
714 ret = nr_del_neigh(&nr_route.callsign,
715 dev, nr_route.quality);
734 * A level 2 link has timed out, therefore it appears to be a poor link,
735 * then don't use that neighbour until it is reset.
737 void nr_link_failed(ax25_cb *ax25, int reason)
739 struct nr_neigh *s, *nr_neigh = NULL;
740 struct hlist_node *node;
741 struct nr_node *nr_node = NULL;
743 spin_lock_bh(&nr_neigh_list_lock);
744 nr_neigh_for_each(s, node, &nr_neigh_list) {
745 if (s->ax25 == ax25) {
751 spin_unlock_bh(&nr_neigh_list_lock);
753 if (nr_neigh == NULL)
756 nr_neigh->ax25 = NULL;
759 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
760 nr_neigh_put(nr_neigh);
763 spin_lock_bh(&nr_node_list_lock);
764 nr_node_for_each(nr_node, node, &nr_node_list) {
765 nr_node_lock(nr_node);
766 if (nr_node->which < nr_node->count &&
767 nr_node->routes[nr_node->which].neighbour == nr_neigh)
769 nr_node_unlock(nr_node);
771 spin_unlock_bh(&nr_node_list_lock);
772 nr_neigh_put(nr_neigh);
776 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
777 * indicates an internally generated frame.
779 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
781 ax25_address *nr_src, *nr_dest;
782 struct nr_neigh *nr_neigh;
783 struct nr_node *nr_node;
784 struct net_device *dev;
788 struct sk_buff *skbn;
791 nr_src = (ax25_address *)(skb->data + 0);
792 nr_dest = (ax25_address *)(skb->data + 7);
795 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
796 ax25->ax25_dev->dev, 0,
797 sysctl_netrom_obsolescence_count_initialiser);
802 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */
803 if (ax25 == NULL) /* Its from me */
804 ret = nr_loopback_queue(skb);
806 ret = nr_rx_frame(skb, dev);
811 if (!sysctl_netrom_routing_control && ax25 != NULL)
814 /* Its Time-To-Live has expired */
815 if (skb->data[14] == 1) {
819 nr_node = nr_node_get(nr_dest);
822 nr_node_lock(nr_node);
824 if (nr_node->which >= nr_node->count) {
825 nr_node_unlock(nr_node);
826 nr_node_put(nr_node);
830 nr_neigh = nr_node->routes[nr_node->which].neighbour;
832 if ((dev = nr_dev_first()) == NULL) {
833 nr_node_unlock(nr_node);
834 nr_node_put(nr_node);
838 /* We are going to change the netrom headers so we should get our
839 own skb, we also did not know until now how much header space
840 we had to reserve... - RXQ */
841 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
842 nr_node_unlock(nr_node);
843 nr_node_put(nr_node);
851 dptr = skb_push(skb, 1);
852 *dptr = AX25_P_NETROM;
854 ax25s = nr_neigh->ax25;
855 nr_neigh->ax25 = ax25_send_frame(skb, 256,
856 (ax25_address *)dev->dev_addr,
858 nr_neigh->digipeat, nr_neigh->dev);
863 ret = (nr_neigh->ax25 != NULL);
864 nr_node_unlock(nr_node);
865 nr_node_put(nr_node);
870 #ifdef CONFIG_PROC_FS
872 static void *nr_node_start(struct seq_file *seq, loff_t *pos)
874 spin_lock_bh(&nr_node_list_lock);
875 return seq_hlist_start_head(&nr_node_list, *pos);
878 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
880 return seq_hlist_next(v, &nr_node_list, pos);
883 static void nr_node_stop(struct seq_file *seq, void *v)
885 spin_unlock_bh(&nr_node_list_lock);
888 static int nr_node_show(struct seq_file *seq, void *v)
893 if (v == SEQ_START_TOKEN)
895 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
897 struct nr_node *nr_node = hlist_entry(v, struct nr_node,
900 nr_node_lock(nr_node);
901 seq_printf(seq, "%-9s %-7s %d %d",
902 ax2asc(buf, &nr_node->callsign),
903 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
907 for (i = 0; i < nr_node->count; i++) {
908 seq_printf(seq, " %3d %d %05d",
909 nr_node->routes[i].quality,
910 nr_node->routes[i].obs_count,
911 nr_node->routes[i].neighbour->number);
913 nr_node_unlock(nr_node);
920 static const struct seq_operations nr_node_seqops = {
921 .start = nr_node_start,
922 .next = nr_node_next,
923 .stop = nr_node_stop,
924 .show = nr_node_show,
927 static int nr_node_info_open(struct inode *inode, struct file *file)
929 return seq_open(file, &nr_node_seqops);
932 const struct file_operations nr_nodes_fops = {
933 .owner = THIS_MODULE,
934 .open = nr_node_info_open,
937 .release = seq_release,
940 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
942 spin_lock_bh(&nr_neigh_list_lock);
943 return seq_hlist_start_head(&nr_neigh_list, *pos);
946 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
948 return seq_hlist_next(v, &nr_neigh_list, pos);
951 static void nr_neigh_stop(struct seq_file *seq, void *v)
953 spin_unlock_bh(&nr_neigh_list_lock);
956 static int nr_neigh_show(struct seq_file *seq, void *v)
961 if (v == SEQ_START_TOKEN)
962 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
964 struct nr_neigh *nr_neigh;
966 nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
967 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
969 ax2asc(buf, &nr_neigh->callsign),
970 nr_neigh->dev ? nr_neigh->dev->name : "???",
976 if (nr_neigh->digipeat != NULL) {
977 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
978 seq_printf(seq, " %s",
979 ax2asc(buf, &nr_neigh->digipeat->calls[i]));
987 static const struct seq_operations nr_neigh_seqops = {
988 .start = nr_neigh_start,
989 .next = nr_neigh_next,
990 .stop = nr_neigh_stop,
991 .show = nr_neigh_show,
994 static int nr_neigh_info_open(struct inode *inode, struct file *file)
996 return seq_open(file, &nr_neigh_seqops);
999 const struct file_operations nr_neigh_fops = {
1000 .owner = THIS_MODULE,
1001 .open = nr_neigh_info_open,
1003 .llseek = seq_lseek,
1004 .release = seq_release,
1010 * Free all memory associated with the nodes and routes lists.
1012 void __exit nr_rt_free(void)
1014 struct nr_neigh *s = NULL;
1015 struct nr_node *t = NULL;
1016 struct hlist_node *node, *nodet;
1018 spin_lock_bh(&nr_neigh_list_lock);
1019 spin_lock_bh(&nr_node_list_lock);
1020 nr_node_for_each_safe(t, node, nodet, &nr_node_list) {
1022 nr_remove_node_locked(t);
1025 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
1030 nr_remove_neigh_locked(s);
1032 spin_unlock_bh(&nr_node_list_lock);
1033 spin_unlock_bh(&nr_neigh_list_lock);