1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
6 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/socket.h>
12 #include <linux/kernel.h>
13 #include <linux/timer.h>
14 #include <linux/string.h>
15 #include <linux/sockios.h>
16 #include <linux/net.h>
17 #include <linux/slab.h>
19 #include <linux/inet.h>
20 #include <linux/netdevice.h>
22 #include <linux/if_arp.h>
23 #include <linux/skbuff.h>
25 #include <linux/uaccess.h>
26 #include <linux/fcntl.h>
27 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
29 #include <linux/interrupt.h>
30 #include <linux/notifier.h>
31 #include <linux/init.h>
32 #include <linux/spinlock.h>
33 #include <net/netrom.h>
34 #include <linux/seq_file.h>
35 #include <linux/export.h>
37 static unsigned int nr_neigh_no = 1;
39 static HLIST_HEAD(nr_node_list);
40 static DEFINE_SPINLOCK(nr_node_list_lock);
41 static HLIST_HEAD(nr_neigh_list);
42 static DEFINE_SPINLOCK(nr_neigh_list_lock);
44 static struct nr_node *nr_node_get(ax25_address *callsign)
46 struct nr_node *found = NULL;
47 struct nr_node *nr_node;
49 spin_lock_bh(&nr_node_list_lock);
50 nr_node_for_each(nr_node, &nr_node_list)
51 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
52 nr_node_hold(nr_node);
56 spin_unlock_bh(&nr_node_list_lock);
60 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
61 struct net_device *dev)
63 struct nr_neigh *found = NULL;
64 struct nr_neigh *nr_neigh;
66 spin_lock_bh(&nr_neigh_list_lock);
67 nr_neigh_for_each(nr_neigh, &nr_neigh_list)
68 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
69 nr_neigh->dev == dev) {
70 nr_neigh_hold(nr_neigh);
74 spin_unlock_bh(&nr_neigh_list_lock);
78 static void nr_remove_neigh(struct nr_neigh *);
80 /* re-sort the routes in quality order. */
81 static void re_sort_routes(struct nr_node *nr_node, int x, int y)
83 if (nr_node->routes[y].quality > nr_node->routes[x].quality) {
84 if (nr_node->which == x)
86 else if (nr_node->which == y)
89 swap(nr_node->routes[x], nr_node->routes[y]);
94 * Add a new route to a node, and in the process add the node and the
95 * neighbour if it is new.
97 static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
98 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev,
99 int quality, int obs_count)
101 struct nr_node *nr_node;
102 struct nr_neigh *nr_neigh;
104 struct net_device *odev;
106 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */
111 nr_node = nr_node_get(nr);
113 nr_neigh = nr_neigh_get_dev(ax25, dev);
116 * The L2 link to a neighbour has failed in the past
117 * and now a frame comes from this neighbour. We assume
118 * it was a temporary trouble with the link and reset the
119 * routes now (and not wait for a node broadcast).
121 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
122 struct nr_node *nr_nodet;
124 spin_lock_bh(&nr_node_list_lock);
125 nr_node_for_each(nr_nodet, &nr_node_list) {
126 nr_node_lock(nr_nodet);
127 for (i = 0; i < nr_nodet->count; i++)
128 if (nr_nodet->routes[i].neighbour == nr_neigh)
129 if (i < nr_nodet->which)
131 nr_node_unlock(nr_nodet);
133 spin_unlock_bh(&nr_node_list_lock);
136 if (nr_neigh != NULL)
137 nr_neigh->failed = 0;
139 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
140 nr_neigh_put(nr_neigh);
141 nr_node_put(nr_node);
145 if (nr_neigh == NULL) {
146 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
148 nr_node_put(nr_node);
152 nr_neigh->callsign = *ax25;
153 nr_neigh->digipeat = NULL;
154 nr_neigh->ax25 = NULL;
156 nr_neigh->quality = sysctl_netrom_default_path_quality;
157 nr_neigh->locked = 0;
159 nr_neigh->number = nr_neigh_no++;
160 nr_neigh->failed = 0;
161 refcount_set(&nr_neigh->refcount, 1);
163 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
164 nr_neigh->digipeat = kmemdup(ax25_digi,
167 if (nr_neigh->digipeat == NULL) {
170 nr_node_put(nr_node);
175 spin_lock_bh(&nr_neigh_list_lock);
176 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
177 nr_neigh_hold(nr_neigh);
178 spin_unlock_bh(&nr_neigh_list_lock);
181 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
182 nr_neigh->quality = quality;
184 if (nr_node == NULL) {
185 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
187 nr_neigh_put(nr_neigh);
191 nr_node->callsign = *nr;
192 strcpy(nr_node->mnemonic, mnemonic);
196 refcount_set(&nr_node->refcount, 1);
197 spin_lock_init(&nr_node->node_lock);
199 nr_node->routes[0].quality = quality;
200 nr_node->routes[0].obs_count = obs_count;
201 nr_node->routes[0].neighbour = nr_neigh;
203 nr_neigh_hold(nr_neigh);
206 spin_lock_bh(&nr_node_list_lock);
207 hlist_add_head(&nr_node->node_node, &nr_node_list);
208 /* refcount initialized at 1 */
209 spin_unlock_bh(&nr_node_list_lock);
211 nr_neigh_put(nr_neigh);
214 nr_node_lock(nr_node);
217 strcpy(nr_node->mnemonic, mnemonic);
219 for (found = 0, i = 0; i < nr_node->count; i++) {
220 if (nr_node->routes[i].neighbour == nr_neigh) {
221 nr_node->routes[i].quality = quality;
222 nr_node->routes[i].obs_count = obs_count;
229 /* We have space at the bottom, slot it in */
230 if (nr_node->count < 3) {
231 nr_node->routes[2] = nr_node->routes[1];
232 nr_node->routes[1] = nr_node->routes[0];
234 nr_node->routes[0].quality = quality;
235 nr_node->routes[0].obs_count = obs_count;
236 nr_node->routes[0].neighbour = nr_neigh;
240 nr_neigh_hold(nr_neigh);
243 /* It must be better than the worst */
244 if (quality > nr_node->routes[2].quality) {
245 nr_node->routes[2].neighbour->count--;
246 nr_neigh_put(nr_node->routes[2].neighbour);
248 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
249 nr_remove_neigh(nr_node->routes[2].neighbour);
251 nr_node->routes[2].quality = quality;
252 nr_node->routes[2].obs_count = obs_count;
253 nr_node->routes[2].neighbour = nr_neigh;
255 nr_neigh_hold(nr_neigh);
261 /* Now re-sort the routes in quality order */
262 switch (nr_node->count) {
264 re_sort_routes(nr_node, 0, 1);
265 re_sort_routes(nr_node, 1, 2);
268 re_sort_routes(nr_node, 0, 1);
274 for (i = 0; i < nr_node->count; i++) {
275 if (nr_node->routes[i].neighbour == nr_neigh) {
276 if (i < nr_node->which)
282 nr_neigh_put(nr_neigh);
283 nr_node_unlock(nr_node);
284 nr_node_put(nr_node);
288 static inline void __nr_remove_node(struct nr_node *nr_node)
290 hlist_del_init(&nr_node->node_node);
291 nr_node_put(nr_node);
294 #define nr_remove_node_locked(__node) \
295 __nr_remove_node(__node)
297 static void nr_remove_node(struct nr_node *nr_node)
299 spin_lock_bh(&nr_node_list_lock);
300 __nr_remove_node(nr_node);
301 spin_unlock_bh(&nr_node_list_lock);
304 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
306 hlist_del_init(&nr_neigh->neigh_node);
307 nr_neigh_put(nr_neigh);
310 #define nr_remove_neigh_locked(__neigh) \
311 __nr_remove_neigh(__neigh)
313 static void nr_remove_neigh(struct nr_neigh *nr_neigh)
315 spin_lock_bh(&nr_neigh_list_lock);
316 __nr_remove_neigh(nr_neigh);
317 spin_unlock_bh(&nr_neigh_list_lock);
321 * "Delete" a node. Strictly speaking remove a route to a node. The node
322 * is only deleted if no routes are left to it.
324 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
326 struct nr_node *nr_node;
327 struct nr_neigh *nr_neigh;
330 nr_node = nr_node_get(callsign);
335 nr_neigh = nr_neigh_get_dev(neighbour, dev);
337 if (nr_neigh == NULL) {
338 nr_node_put(nr_node);
342 nr_node_lock(nr_node);
343 for (i = 0; i < nr_node->count; i++) {
344 if (nr_node->routes[i].neighbour == nr_neigh) {
346 nr_neigh_put(nr_neigh);
348 if (nr_neigh->count == 0 && !nr_neigh->locked)
349 nr_remove_neigh(nr_neigh);
350 nr_neigh_put(nr_neigh);
354 if (nr_node->count == 0) {
355 nr_remove_node(nr_node);
359 nr_node->routes[0] = nr_node->routes[1];
362 nr_node->routes[1] = nr_node->routes[2];
367 nr_node_put(nr_node);
369 nr_node_unlock(nr_node);
374 nr_neigh_put(nr_neigh);
375 nr_node_unlock(nr_node);
376 nr_node_put(nr_node);
382 * Lock a neighbour with a quality.
384 static int __must_check nr_add_neigh(ax25_address *callsign,
385 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
387 struct nr_neigh *nr_neigh;
389 nr_neigh = nr_neigh_get_dev(callsign, dev);
391 nr_neigh->quality = quality;
392 nr_neigh->locked = 1;
393 nr_neigh_put(nr_neigh);
397 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
400 nr_neigh->callsign = *callsign;
401 nr_neigh->digipeat = NULL;
402 nr_neigh->ax25 = NULL;
404 nr_neigh->quality = quality;
405 nr_neigh->locked = 1;
407 nr_neigh->number = nr_neigh_no++;
408 nr_neigh->failed = 0;
409 refcount_set(&nr_neigh->refcount, 1);
411 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
412 nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),
414 if (nr_neigh->digipeat == NULL) {
420 spin_lock_bh(&nr_neigh_list_lock);
421 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
422 /* refcount is initialized at 1 */
423 spin_unlock_bh(&nr_neigh_list_lock);
429 * "Delete" a neighbour. The neighbour is only removed if the number
430 * of nodes that may use it is zero.
432 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
434 struct nr_neigh *nr_neigh;
436 nr_neigh = nr_neigh_get_dev(callsign, dev);
438 if (nr_neigh == NULL) return -EINVAL;
440 nr_neigh->quality = quality;
441 nr_neigh->locked = 0;
443 if (nr_neigh->count == 0)
444 nr_remove_neigh(nr_neigh);
445 nr_neigh_put(nr_neigh);
451 * Decrement the obsolescence count by one. If a route is reduced to a
452 * count of zero, remove it. Also remove any unlocked neighbours with
453 * zero nodes routing via it.
455 static int nr_dec_obs(void)
457 struct nr_neigh *nr_neigh;
459 struct hlist_node *nodet;
462 spin_lock_bh(&nr_node_list_lock);
463 nr_node_for_each_safe(s, nodet, &nr_node_list) {
465 for (i = 0; i < s->count; i++) {
466 switch (s->routes[i].obs_count) {
467 case 0: /* A locked entry */
470 case 1: /* From 1 -> 0 */
471 nr_neigh = s->routes[i].neighbour;
474 nr_neigh_put(nr_neigh);
476 if (nr_neigh->count == 0 && !nr_neigh->locked)
477 nr_remove_neigh(nr_neigh);
483 s->routes[0] = s->routes[1];
486 s->routes[1] = s->routes[2];
494 s->routes[i].obs_count--;
501 nr_remove_node_locked(s);
504 spin_unlock_bh(&nr_node_list_lock);
510 * A device has been removed. Remove its routes and neighbours.
512 void nr_rt_device_down(struct net_device *dev)
515 struct hlist_node *nodet, *node2t;
519 spin_lock_bh(&nr_neigh_list_lock);
520 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
522 spin_lock_bh(&nr_node_list_lock);
523 nr_node_for_each_safe(t, node2t, &nr_node_list) {
525 for (i = 0; i < t->count; i++) {
526 if (t->routes[i].neighbour == s) {
531 t->routes[0] = t->routes[1];
534 t->routes[1] = t->routes[2];
543 nr_remove_node_locked(t);
546 spin_unlock_bh(&nr_node_list_lock);
548 nr_remove_neigh_locked(s);
551 spin_unlock_bh(&nr_neigh_list_lock);
555 * Check that the device given is a valid AX.25 interface that is "up".
556 * Or a valid ethernet interface with an AX.25 callsign binding.
558 static struct net_device *nr_ax25_dev_get(char *devname)
560 struct net_device *dev;
562 if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
565 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
573 * Find the first active NET/ROM device, usually "nr0".
575 struct net_device *nr_dev_first(void)
577 struct net_device *dev, *first = NULL;
580 for_each_netdev_rcu(&init_net, dev) {
581 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
582 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
592 * Find the NET/ROM device for the given callsign.
594 struct net_device *nr_dev_get(ax25_address *addr)
596 struct net_device *dev;
599 for_each_netdev_rcu(&init_net, dev) {
600 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
601 ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
612 static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
613 ax25_address *digipeaters)
620 for (i = 0; i < ndigis; i++) {
621 digi->calls[i] = digipeaters[i];
622 digi->repeated[i] = 0;
625 digi->ndigi = ndigis;
626 digi->lastrepeat = -1;
632 * Handle the ioctls that control the routing functions.
634 int nr_rt_ioctl(unsigned int cmd, void __user *arg)
636 struct nr_route_struct nr_route;
637 struct net_device *dev;
643 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
645 if (nr_route.ndigis > AX25_MAX_DIGIS)
647 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
649 switch (nr_route.type) {
651 if (strnlen(nr_route.mnemonic, 7) == 7) {
656 ret = nr_add_node(&nr_route.callsign,
659 nr_call_to_digi(&digi, nr_route.ndigis,
660 nr_route.digipeaters),
661 dev, nr_route.quality,
665 ret = nr_add_neigh(&nr_route.callsign,
666 nr_call_to_digi(&digi, nr_route.ndigis,
667 nr_route.digipeaters),
668 dev, nr_route.quality);
677 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
679 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
681 switch (nr_route.type) {
683 ret = nr_del_node(&nr_route.callsign,
684 &nr_route.neighbour, dev);
687 ret = nr_del_neigh(&nr_route.callsign,
688 dev, nr_route.quality);
707 * A level 2 link has timed out, therefore it appears to be a poor link,
708 * then don't use that neighbour until it is reset.
710 void nr_link_failed(ax25_cb *ax25, int reason)
712 struct nr_neigh *s, *nr_neigh = NULL;
713 struct nr_node *nr_node = NULL;
715 spin_lock_bh(&nr_neigh_list_lock);
716 nr_neigh_for_each(s, &nr_neigh_list) {
717 if (s->ax25 == ax25) {
723 spin_unlock_bh(&nr_neigh_list_lock);
725 if (nr_neigh == NULL)
728 nr_neigh->ax25 = NULL;
731 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
732 nr_neigh_put(nr_neigh);
735 spin_lock_bh(&nr_node_list_lock);
736 nr_node_for_each(nr_node, &nr_node_list) {
737 nr_node_lock(nr_node);
738 if (nr_node->which < nr_node->count &&
739 nr_node->routes[nr_node->which].neighbour == nr_neigh)
741 nr_node_unlock(nr_node);
743 spin_unlock_bh(&nr_node_list_lock);
744 nr_neigh_put(nr_neigh);
748 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
749 * indicates an internally generated frame.
751 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
753 ax25_address *nr_src, *nr_dest;
754 struct nr_neigh *nr_neigh;
755 struct nr_node *nr_node;
756 struct net_device *dev;
760 struct sk_buff *skbn;
763 nr_src = (ax25_address *)(skb->data + 0);
764 nr_dest = (ax25_address *)(skb->data + 7);
767 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
768 ax25->ax25_dev->dev, 0,
769 sysctl_netrom_obsolescence_count_initialiser);
774 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */
775 if (ax25 == NULL) /* Its from me */
776 ret = nr_loopback_queue(skb);
778 ret = nr_rx_frame(skb, dev);
783 if (!sysctl_netrom_routing_control && ax25 != NULL)
786 /* Its Time-To-Live has expired */
787 if (skb->data[14] == 1) {
791 nr_node = nr_node_get(nr_dest);
794 nr_node_lock(nr_node);
796 if (nr_node->which >= nr_node->count) {
797 nr_node_unlock(nr_node);
798 nr_node_put(nr_node);
802 nr_neigh = nr_node->routes[nr_node->which].neighbour;
804 if ((dev = nr_dev_first()) == NULL) {
805 nr_node_unlock(nr_node);
806 nr_node_put(nr_node);
810 /* We are going to change the netrom headers so we should get our
811 own skb, we also did not know until now how much header space
812 we had to reserve... - RXQ */
813 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
814 nr_node_unlock(nr_node);
815 nr_node_put(nr_node);
823 dptr = skb_push(skb, 1);
824 *dptr = AX25_P_NETROM;
826 ax25s = nr_neigh->ax25;
827 nr_neigh->ax25 = ax25_send_frame(skb, 256,
828 (ax25_address *)dev->dev_addr,
830 nr_neigh->digipeat, nr_neigh->dev);
835 ret = (nr_neigh->ax25 != NULL);
836 nr_node_unlock(nr_node);
837 nr_node_put(nr_node);
842 #ifdef CONFIG_PROC_FS
844 static void *nr_node_start(struct seq_file *seq, loff_t *pos)
845 __acquires(&nr_node_list_lock)
847 spin_lock_bh(&nr_node_list_lock);
848 return seq_hlist_start_head(&nr_node_list, *pos);
851 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
853 return seq_hlist_next(v, &nr_node_list, pos);
856 static void nr_node_stop(struct seq_file *seq, void *v)
857 __releases(&nr_node_list_lock)
859 spin_unlock_bh(&nr_node_list_lock);
862 static int nr_node_show(struct seq_file *seq, void *v)
867 if (v == SEQ_START_TOKEN)
869 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
871 struct nr_node *nr_node = hlist_entry(v, struct nr_node,
874 nr_node_lock(nr_node);
875 seq_printf(seq, "%-9s %-7s %d %d",
876 ax2asc(buf, &nr_node->callsign),
877 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
881 for (i = 0; i < nr_node->count; i++) {
882 seq_printf(seq, " %3d %d %05d",
883 nr_node->routes[i].quality,
884 nr_node->routes[i].obs_count,
885 nr_node->routes[i].neighbour->number);
887 nr_node_unlock(nr_node);
894 const struct seq_operations nr_node_seqops = {
895 .start = nr_node_start,
896 .next = nr_node_next,
897 .stop = nr_node_stop,
898 .show = nr_node_show,
901 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
902 __acquires(&nr_neigh_list_lock)
904 spin_lock_bh(&nr_neigh_list_lock);
905 return seq_hlist_start_head(&nr_neigh_list, *pos);
908 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
910 return seq_hlist_next(v, &nr_neigh_list, pos);
913 static void nr_neigh_stop(struct seq_file *seq, void *v)
914 __releases(&nr_neigh_list_lock)
916 spin_unlock_bh(&nr_neigh_list_lock);
919 static int nr_neigh_show(struct seq_file *seq, void *v)
924 if (v == SEQ_START_TOKEN)
925 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
927 struct nr_neigh *nr_neigh;
929 nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
930 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
932 ax2asc(buf, &nr_neigh->callsign),
933 nr_neigh->dev ? nr_neigh->dev->name : "???",
939 if (nr_neigh->digipeat != NULL) {
940 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
941 seq_printf(seq, " %s",
942 ax2asc(buf, &nr_neigh->digipeat->calls[i]));
950 const struct seq_operations nr_neigh_seqops = {
951 .start = nr_neigh_start,
952 .next = nr_neigh_next,
953 .stop = nr_neigh_stop,
954 .show = nr_neigh_show,
959 * Free all memory associated with the nodes and routes lists.
961 void nr_rt_free(void)
963 struct nr_neigh *s = NULL;
964 struct nr_node *t = NULL;
965 struct hlist_node *nodet;
967 spin_lock_bh(&nr_neigh_list_lock);
968 spin_lock_bh(&nr_node_list_lock);
969 nr_node_for_each_safe(t, nodet, &nr_node_list) {
971 nr_remove_node_locked(t);
974 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
979 nr_remove_neigh_locked(s);
981 spin_unlock_bh(&nr_node_list_lock);
982 spin_unlock_bh(&nr_neigh_list_lock);