1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
5 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
6 * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/socket.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/timer.h>
15 #include <linux/string.h>
16 #include <linux/sockios.h>
17 #include <linux/spinlock.h>
18 #include <linux/net.h>
19 #include <linux/slab.h>
21 #include <linux/inet.h>
22 #include <linux/netdevice.h>
23 #include <linux/skbuff.h>
25 #include <linux/uaccess.h>
26 #include <linux/fcntl.h>
28 #include <linux/interrupt.h>
30 static DEFINE_SPINLOCK(ax25_frag_lock);
32 ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
38 * Take the default packet length for the device if zero is
42 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
45 paclen = ax25_dev->values[AX25_VALUES_PACLEN];
49 * Look for an existing connection.
51 if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
52 ax25_output(ax25, paclen, skb);
53 return ax25; /* It already existed */
56 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
59 if ((ax25 = ax25_create_cb()) == NULL)
62 ax25_fillin_cb(ax25, ax25_dev);
64 ax25->source_addr = *src;
65 ax25->dest_addr = *dest;
68 ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
69 if (ax25->digipeat == NULL) {
75 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
76 case AX25_PROTO_STD_SIMPLEX:
77 case AX25_PROTO_STD_DUPLEX:
78 ax25_std_establish_data_link(ax25);
81 #ifdef CONFIG_AX25_DAMA_SLAVE
82 case AX25_PROTO_DAMA_SLAVE:
83 if (ax25_dev->dama.slave)
84 ax25_ds_establish_data_link(ax25);
86 ax25_std_establish_data_link(ax25);
92 * There is one ref for the state machine; a caller needs
93 * one more to put it back, just like with the existing one.
99 ax25->state = AX25_STATE_1;
101 ax25_start_heartbeat(ax25);
103 ax25_output(ax25, paclen, skb);
105 return ax25; /* We had to create it */
108 EXPORT_SYMBOL(ax25_send_frame);
111 * All outgoing AX.25 I frames pass via this routine. Therefore this is
112 * where the fragmentation of frames takes place. If fragment is set to
113 * zero then we are not allowed to do fragmentation, even if the frame
116 void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
118 struct sk_buff *skbn;
120 int frontlen, len, fragno, ka9qfrag, first = 1;
128 if ((skb->len - 1) > paclen) {
129 if (*skb->data == AX25_P_TEXT) {
130 skb_pull(skb, 1); /* skip PID */
133 paclen -= 2; /* Allow for fragment control info */
137 fragno = skb->len / paclen;
138 if (skb->len % paclen == 0) fragno--;
140 frontlen = skb_headroom(skb); /* Address space + CTRL */
142 while (skb->len > 0) {
143 spin_lock_bh(&ax25_frag_lock);
144 if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
145 spin_unlock_bh(&ax25_frag_lock);
146 printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
151 skb_set_owner_w(skbn, skb->sk);
153 spin_unlock_bh(&ax25_frag_lock);
155 len = (paclen > skb->len) ? skb->len : paclen;
158 skb_reserve(skbn, frontlen + 2);
159 skb_set_network_header(skbn,
160 skb_network_offset(skb));
161 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
162 p = skb_push(skbn, 2);
164 *p++ = AX25_P_SEGMENT;
168 *p |= AX25_SEG_FIRST;
172 skb_reserve(skbn, frontlen + 1);
173 skb_set_network_header(skbn,
174 skb_network_offset(skb));
175 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
176 p = skb_push(skbn, 1);
181 skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
186 skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */
189 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
190 case AX25_PROTO_STD_SIMPLEX:
191 case AX25_PROTO_STD_DUPLEX:
195 #ifdef CONFIG_AX25_DAMA_SLAVE
197 * A DAMA slave is _required_ to work as normal AX.25L2V2
198 * if no DAMA master is available.
200 case AX25_PROTO_DAMA_SLAVE:
201 if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
208 * This procedure is passed a buffer descriptor for an iframe. It builds
209 * the rest of the control part of the frame and then writes it out.
211 static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
213 unsigned char *frame;
218 skb_reset_network_header(skb);
220 if (ax25->modulus == AX25_MODULUS) {
221 frame = skb_push(skb, 1);
224 *frame |= (poll_bit) ? AX25_PF : 0;
225 *frame |= (ax25->vr << 5);
226 *frame |= (ax25->vs << 1);
228 frame = skb_push(skb, 2);
231 frame[0] |= (ax25->vs << 1);
232 frame[1] = (poll_bit) ? AX25_EPF : 0;
233 frame[1] |= (ax25->vr << 1);
236 ax25_start_idletimer(ax25);
238 ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
241 void ax25_kick(ax25_cb *ax25)
243 struct sk_buff *skb, *skbn;
245 unsigned short start, end, next;
247 if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
250 if (ax25->condition & AX25_COND_PEER_RX_BUSY)
253 if (skb_peek(&ax25->write_queue) == NULL)
256 start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
257 end = (ax25->va + ax25->window) % ax25->modulus;
263 * Transmit data until either we're out of data to send or
264 * the window is full. Send a poll on the final I frame if
265 * the window is filled.
269 * Dequeue the frame and copy it.
270 * Check for race with ax25_clear_queues().
272 skb = skb_dequeue(&ax25->write_queue);
279 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
280 skb_queue_head(&ax25->write_queue, skb);
285 skb_set_owner_w(skbn, skb->sk);
287 next = (ax25->vs + 1) % ax25->modulus;
288 last = (next == end);
291 * Transmit the frame copy.
292 * bke 960114: do not set the Poll bit on the last frame
295 switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
296 case AX25_PROTO_STD_SIMPLEX:
297 case AX25_PROTO_STD_DUPLEX:
298 ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
301 #ifdef CONFIG_AX25_DAMA_SLAVE
302 case AX25_PROTO_DAMA_SLAVE:
303 ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
311 * Requeue the original data frame.
313 skb_queue_tail(&ax25->ack_queue, skb);
315 } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
317 ax25->condition &= ~AX25_COND_ACK_PENDING;
319 if (!ax25_t1timer_running(ax25)) {
320 ax25_stop_t3timer(ax25);
321 ax25_calculate_t1(ax25);
322 ax25_start_t1timer(ax25);
326 void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
331 if (ax25->ax25_dev == NULL) {
332 ax25_disconnect(ax25, ENETUNREACH);
336 headroom = ax25_addr_size(ax25->digipeat);
338 if (unlikely(skb_headroom(skb) < headroom)) {
339 skb = skb_expand_head(skb, headroom);
341 printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
346 ptr = skb_push(skb, headroom);
348 ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
350 ax25_queue_xmit(skb, ax25->ax25_dev->dev);
354 * A small shim to dev_queue_xmit to add the KISS control byte, and do
355 * any packet forwarding in operation.
357 void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
361 skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
363 ptr = skb_push(skb, 1);
364 *ptr = 0x00; /* KISS */
369 int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
371 if (ax25->vs == nr) {
372 ax25_frames_acked(ax25, nr);
373 ax25_calculate_rtt(ax25);
374 ax25_stop_t1timer(ax25);
375 ax25_start_t3timer(ax25);
378 if (ax25->va != nr) {
379 ax25_frames_acked(ax25, nr);
380 ax25_calculate_t1(ax25);
381 ax25_start_t1timer(ax25);