1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic HDLC support routines for Linux
6 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
9 #include <linux/errno.h>
10 #include <linux/gfp.h>
11 #include <linux/hdlc.h>
12 #include <linux/if_arp.h>
13 #include <linux/inetdevice.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/lapb.h>
17 #include <linux/module.h>
18 #include <linux/pkt_sched.h>
19 #include <linux/poll.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/skbuff.h>
22 #include <net/x25device.h>
25 x25_hdlc_proto settings;
27 spinlock_t up_lock; /* Protects "up" */
28 struct sk_buff_head rx_queue;
29 struct tasklet_struct rx_tasklet;
32 static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
34 static struct x25_state *state(hdlc_device *hdlc)
39 static void x25_rx_queue_kick(struct tasklet_struct *t)
41 struct x25_state *x25st = from_tasklet(x25st, t, rx_tasklet);
42 struct sk_buff *skb = skb_dequeue(&x25st->rx_queue);
45 netif_receive_skb_core(skb);
46 skb = skb_dequeue(&x25st->rx_queue);
50 /* These functions are callbacks called by LAPB layer */
52 static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
54 struct x25_state *x25st = state(dev_to_hdlc(dev));
58 skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
62 ptr = skb_put(skb, 1);
65 skb->protocol = x25_type_trans(skb, dev);
67 skb_queue_tail(&x25st->rx_queue, skb);
68 tasklet_schedule(&x25st->rx_tasklet);
71 static void x25_connected(struct net_device *dev, int reason)
73 x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT);
76 static void x25_disconnected(struct net_device *dev, int reason)
78 x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT);
81 static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
83 struct x25_state *x25st = state(dev_to_hdlc(dev));
86 if (skb_cow(skb, 1)) {
94 *ptr = X25_IFACE_DATA;
96 skb->protocol = x25_type_trans(skb, dev);
98 skb_queue_tail(&x25st->rx_queue, skb);
99 tasklet_schedule(&x25st->rx_tasklet);
100 return NET_RX_SUCCESS;
103 static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
105 hdlc_device *hdlc = dev_to_hdlc(dev);
107 skb_reset_network_header(skb);
108 skb->protocol = hdlc_type_trans(skb, dev);
110 if (dev_nit_active(dev))
111 dev_queue_xmit_nit(skb, dev);
113 hdlc->xmit(skb, dev); /* Ignore return value :-( */
116 static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
118 hdlc_device *hdlc = dev_to_hdlc(dev);
119 struct x25_state *x25st = state(hdlc);
122 /* There should be a pseudo header of 1 byte added by upper layers.
123 * Check to make sure it is there before reading it.
130 spin_lock_bh(&x25st->up_lock);
132 spin_unlock_bh(&x25st->up_lock);
137 switch (skb->data[0]) {
138 case X25_IFACE_DATA: /* Data to be transmitted */
140 result = lapb_data_request(dev, skb);
141 if (result != LAPB_OK)
143 spin_unlock_bh(&x25st->up_lock);
146 case X25_IFACE_CONNECT:
147 result = lapb_connect_request(dev);
148 if (result != LAPB_OK) {
149 if (result == LAPB_CONNECTED)
150 /* Send connect confirm. msg to level 3 */
151 x25_connected(dev, 0);
153 netdev_err(dev, "LAPB connect request failed, error code = %i\n",
158 case X25_IFACE_DISCONNECT:
159 result = lapb_disconnect_request(dev);
160 if (result != LAPB_OK) {
161 if (result == LAPB_NOTCONNECTED)
162 /* Send disconnect confirm. msg to level 3 */
163 x25_disconnected(dev, 0);
165 netdev_err(dev, "LAPB disconnect request failed, error code = %i\n",
170 default: /* to be defined */
174 spin_unlock_bh(&x25st->up_lock);
179 static int x25_open(struct net_device *dev)
181 static const struct lapb_register_struct cb = {
182 .connect_confirmation = x25_connected,
183 .connect_indication = x25_connected,
184 .disconnect_confirmation = x25_disconnected,
185 .disconnect_indication = x25_disconnected,
186 .data_indication = x25_data_indication,
187 .data_transmit = x25_data_transmit,
189 hdlc_device *hdlc = dev_to_hdlc(dev);
190 struct x25_state *x25st = state(hdlc);
191 struct lapb_parms_struct params;
194 result = lapb_register(dev, &cb);
195 if (result != LAPB_OK)
198 result = lapb_getparms(dev, ¶ms);
199 if (result != LAPB_OK)
202 if (state(hdlc)->settings.dce)
203 params.mode = params.mode | LAPB_DCE;
205 if (state(hdlc)->settings.modulo == 128)
206 params.mode = params.mode | LAPB_EXTENDED;
208 params.window = state(hdlc)->settings.window;
209 params.t1 = state(hdlc)->settings.t1;
210 params.t2 = state(hdlc)->settings.t2;
211 params.n2 = state(hdlc)->settings.n2;
213 result = lapb_setparms(dev, ¶ms);
214 if (result != LAPB_OK)
217 spin_lock_bh(&x25st->up_lock);
219 spin_unlock_bh(&x25st->up_lock);
224 static void x25_close(struct net_device *dev)
226 hdlc_device *hdlc = dev_to_hdlc(dev);
227 struct x25_state *x25st = state(hdlc);
229 spin_lock_bh(&x25st->up_lock);
231 spin_unlock_bh(&x25st->up_lock);
233 lapb_unregister(dev);
234 tasklet_kill(&x25st->rx_tasklet);
237 static int x25_rx(struct sk_buff *skb)
239 struct net_device *dev = skb->dev;
240 hdlc_device *hdlc = dev_to_hdlc(dev);
241 struct x25_state *x25st = state(hdlc);
243 skb = skb_share_check(skb, GFP_ATOMIC);
245 dev->stats.rx_dropped++;
249 spin_lock_bh(&x25st->up_lock);
251 spin_unlock_bh(&x25st->up_lock);
253 dev->stats.rx_dropped++;
257 if (lapb_data_received(dev, skb) == LAPB_OK) {
258 spin_unlock_bh(&x25st->up_lock);
259 return NET_RX_SUCCESS;
262 spin_unlock_bh(&x25st->up_lock);
263 dev->stats.rx_errors++;
264 dev_kfree_skb_any(skb);
268 static struct hdlc_proto proto = {
274 .module = THIS_MODULE,
277 static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
279 x25_hdlc_proto __user *x25_s = ifr->ifr_settings.ifs_ifsu.x25;
280 const size_t size = sizeof(x25_hdlc_proto);
281 hdlc_device *hdlc = dev_to_hdlc(dev);
282 x25_hdlc_proto new_settings;
285 switch (ifr->ifr_settings.type) {
287 if (dev_to_hdlc(dev)->proto != &proto)
289 ifr->ifr_settings.type = IF_PROTO_X25;
290 if (ifr->ifr_settings.size < size) {
291 ifr->ifr_settings.size = size; /* data size wanted */
294 if (copy_to_user(x25_s, &state(hdlc)->settings, size))
299 if (!capable(CAP_NET_ADMIN))
302 if (dev->flags & IFF_UP)
305 /* backward compatibility */
306 if (ifr->ifr_settings.size == 0) {
307 new_settings.dce = 0;
308 new_settings.modulo = 8;
309 new_settings.window = 7;
312 new_settings.n2 = 10;
314 if (copy_from_user(&new_settings, x25_s, size))
317 if ((new_settings.dce != 0 &&
318 new_settings.dce != 1) ||
319 (new_settings.modulo != 8 &&
320 new_settings.modulo != 128) ||
321 new_settings.window < 1 ||
322 (new_settings.modulo == 8 &&
323 new_settings.window > 7) ||
324 (new_settings.modulo == 128 &&
325 new_settings.window > 127) ||
326 new_settings.t1 < 1 ||
327 new_settings.t1 > 255 ||
328 new_settings.t2 < 1 ||
329 new_settings.t2 > 255 ||
330 new_settings.n2 < 1 ||
331 new_settings.n2 > 255)
335 result = hdlc->attach(dev, ENCODING_NRZ,
336 PARITY_CRC16_PR1_CCITT);
340 result = attach_hdlc_protocol(dev, &proto,
341 sizeof(struct x25_state));
345 memcpy(&state(hdlc)->settings, &new_settings, size);
346 state(hdlc)->up = false;
347 spin_lock_init(&state(hdlc)->up_lock);
348 skb_queue_head_init(&state(hdlc)->rx_queue);
349 tasklet_setup(&state(hdlc)->rx_tasklet, x25_rx_queue_kick);
351 /* There's no header_ops so hard_header_len should be 0. */
352 dev->hard_header_len = 0;
353 /* When transmitting data:
354 * first we'll remove a pseudo header of 1 byte,
355 * then we'll prepend an LAPB header of at most 3 bytes.
357 dev->needed_headroom = 3 - 1;
359 dev->type = ARPHRD_X25;
360 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
361 netif_dormant_off(dev);
368 static int __init mod_init(void)
370 register_hdlc_protocol(&proto);
374 static void __exit mod_exit(void)
376 unregister_hdlc_protocol(&proto);
379 module_init(mod_init);
380 module_exit(mod_exit);
382 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
383 MODULE_DESCRIPTION("X.25 protocol support for generic HDLC");
384 MODULE_LICENSE("GPL v2");