1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * X.25 Packet Layer release 002
5 * This is ALPHA test software. This code may break your machine,
6 * randomly fail to work with new releases, misbehave and/or generally
7 * screw up. It might even work.
9 * This code REQUIRES 2.1.15 or higher
12 * X.25 001 Jonathan Naylor Started coding.
13 * X.25 002 Jonathan Naylor New timer architecture.
14 * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
16 * 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh.
19 #define pr_fmt(fmt) "X25: " fmt
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/timer.h>
24 #include <linux/slab.h>
25 #include <linux/netdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/uaccess.h>
28 #include <linux/init.h>
31 LIST_HEAD(x25_neigh_list);
32 DEFINE_RWLOCK(x25_neigh_list_lock);
34 static void x25_t20timer_expiry(struct timer_list *);
36 static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
37 static void x25_transmit_restart_request(struct x25_neigh *nb);
40 * Linux set/reset timer routines
42 static inline void x25_start_t20timer(struct x25_neigh *nb)
44 mod_timer(&nb->t20timer, jiffies + nb->t20);
47 static void x25_t20timer_expiry(struct timer_list *t)
49 struct x25_neigh *nb = from_timer(nb, t, t20timer);
51 x25_transmit_restart_request(nb);
53 x25_start_t20timer(nb);
56 static inline void x25_stop_t20timer(struct x25_neigh *nb)
58 del_timer(&nb->t20timer);
62 * This handles all restart and diagnostic frames.
64 void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
65 unsigned short frametype)
70 case X25_RESTART_REQUEST:
72 case X25_LINK_STATE_0:
73 /* This can happen when the x25 module just gets loaded
74 * and doesn't know layer 2 has already connected
76 nb->state = X25_LINK_STATE_3;
77 x25_transmit_restart_confirmation(nb);
79 case X25_LINK_STATE_2:
80 x25_stop_t20timer(nb);
81 nb->state = X25_LINK_STATE_3;
83 case X25_LINK_STATE_3:
84 /* clear existing virtual calls */
85 x25_kill_by_neigh(nb);
87 x25_transmit_restart_confirmation(nb);
92 case X25_RESTART_CONFIRMATION:
94 case X25_LINK_STATE_2:
95 x25_stop_t20timer(nb);
96 nb->state = X25_LINK_STATE_3;
98 case X25_LINK_STATE_3:
99 /* clear existing virtual calls */
100 x25_kill_by_neigh(nb);
102 x25_transmit_restart_request(nb);
103 nb->state = X25_LINK_STATE_2;
104 x25_start_t20timer(nb);
110 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
113 pr_warn("diagnostic #%d - %02X %02X %02X\n",
114 skb->data[3], skb->data[4],
115 skb->data[5], skb->data[6]);
119 pr_warn("received unknown %02X with LCI 000\n",
124 if (nb->state == X25_LINK_STATE_3)
125 while ((skbn = skb_dequeue(&nb->queue)) != NULL)
126 x25_send_frame(skbn, nb);
130 * This routine is called when a Restart Request is needed
132 static void x25_transmit_restart_request(struct x25_neigh *nb)
135 int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
136 struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
141 skb_reserve(skb, X25_MAX_L2_LEN);
143 dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
145 *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
147 *dptr++ = X25_RESTART_REQUEST;
153 x25_send_frame(skb, nb);
157 * This routine is called when a Restart Confirmation is needed
159 static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
162 int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
163 struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
168 skb_reserve(skb, X25_MAX_L2_LEN);
170 dptr = skb_put(skb, X25_STD_MIN_LEN);
172 *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
174 *dptr++ = X25_RESTART_CONFIRMATION;
178 x25_send_frame(skb, nb);
182 * This routine is called when a Clear Request is needed outside of the context
183 * of a connected socket.
185 void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
189 int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
190 struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
195 skb_reserve(skb, X25_MAX_L2_LEN);
197 dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
199 *dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
202 *dptr++ = (lci >> 0) & 0xFF;
203 *dptr++ = X25_CLEAR_REQUEST;
209 x25_send_frame(skb, nb);
212 void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
215 case X25_LINK_STATE_0:
216 skb_queue_tail(&nb->queue, skb);
217 nb->state = X25_LINK_STATE_1;
218 x25_establish_link(nb);
220 case X25_LINK_STATE_1:
221 case X25_LINK_STATE_2:
222 skb_queue_tail(&nb->queue, skb);
224 case X25_LINK_STATE_3:
225 x25_send_frame(skb, nb);
231 * Called when the link layer has become established.
233 void x25_link_established(struct x25_neigh *nb)
236 case X25_LINK_STATE_0:
237 case X25_LINK_STATE_1:
238 x25_transmit_restart_request(nb);
239 nb->state = X25_LINK_STATE_2;
240 x25_start_t20timer(nb);
246 * Called when the link layer has terminated, or an establishment
247 * request has failed.
250 void x25_link_terminated(struct x25_neigh *nb)
252 nb->state = X25_LINK_STATE_0;
253 skb_queue_purge(&nb->queue);
254 x25_stop_t20timer(nb);
256 /* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
257 x25_kill_by_neigh(nb);
263 void x25_link_device_up(struct net_device *dev)
265 struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
270 skb_queue_head_init(&nb->queue);
271 timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
275 nb->state = X25_LINK_STATE_0;
278 * Enables negotiation
280 nb->global_facil_mask = X25_MASK_REVERSE |
281 X25_MASK_THROUGHPUT |
282 X25_MASK_PACKET_SIZE |
283 X25_MASK_WINDOW_SIZE;
284 nb->t20 = sysctl_x25_restart_request_timeout;
285 refcount_set(&nb->refcnt, 1);
287 write_lock_bh(&x25_neigh_list_lock);
288 list_add(&nb->node, &x25_neigh_list);
289 write_unlock_bh(&x25_neigh_list_lock);
293 * __x25_remove_neigh - remove neighbour from x25_neigh_list
294 * @nb: - neigh to remove
296 * Remove neighbour from x25_neigh_list. If it was there.
297 * Caller must hold x25_neigh_list_lock.
299 static void __x25_remove_neigh(struct x25_neigh *nb)
308 * A device has been removed, remove its links.
310 void x25_link_device_down(struct net_device *dev)
312 struct x25_neigh *nb;
313 struct list_head *entry, *tmp;
315 write_lock_bh(&x25_neigh_list_lock);
317 list_for_each_safe(entry, tmp, &x25_neigh_list) {
318 nb = list_entry(entry, struct x25_neigh, node);
320 if (nb->dev == dev) {
321 __x25_remove_neigh(nb);
326 write_unlock_bh(&x25_neigh_list_lock);
330 * Given a device, return the neighbour address.
332 struct x25_neigh *x25_get_neigh(struct net_device *dev)
334 struct x25_neigh *nb, *use = NULL;
336 read_lock_bh(&x25_neigh_list_lock);
337 list_for_each_entry(nb, &x25_neigh_list, node) {
338 if (nb->dev == dev) {
346 read_unlock_bh(&x25_neigh_list_lock);
351 * Handle the ioctls that control the subscription functions.
353 int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
355 struct x25_subscrip_struct x25_subscr;
356 struct x25_neigh *nb;
357 struct net_device *dev;
360 if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
364 if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
368 if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
371 if ((nb = x25_get_neigh(dev)) == NULL)
376 if (cmd == SIOCX25GSUBSCRIP) {
377 read_lock_bh(&x25_neigh_list_lock);
378 x25_subscr.extended = nb->extended;
379 x25_subscr.global_facil_mask = nb->global_facil_mask;
380 read_unlock_bh(&x25_neigh_list_lock);
381 rc = copy_to_user(arg, &x25_subscr,
382 sizeof(x25_subscr)) ? -EFAULT : 0;
385 if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
387 write_lock_bh(&x25_neigh_list_lock);
388 nb->extended = x25_subscr.extended;
389 nb->global_facil_mask = x25_subscr.global_facil_mask;
390 write_unlock_bh(&x25_neigh_list_lock);
403 * Release all memory associated with X.25 neighbour structures.
405 void __exit x25_link_free(void)
407 struct x25_neigh *nb;
408 struct list_head *entry, *tmp;
410 write_lock_bh(&x25_neigh_list_lock);
412 list_for_each_safe(entry, tmp, &x25_neigh_list) {
413 struct net_device *dev;
415 nb = list_entry(entry, struct x25_neigh, node);
417 __x25_remove_neigh(nb);
420 write_unlock_bh(&x25_neigh_list_lock);