1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for high-speed SCC boards (those with DMA support)
4 * Copyright (C) 1997-2000 Klaus Kudielka
6 * S5SCC/DMA support by Janko Koleznik S52HI
10 #include <linux/module.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/if_arp.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/kernel.h>
21 #include <linux/netdevice.h>
22 #include <linux/slab.h>
23 #include <linux/rtnetlink.h>
24 #include <linux/sockios.h>
25 #include <linux/workqueue.h>
26 #include <linux/atomic.h>
30 #include <linux/uaccess.h>
35 /* Number of buffers per channel */
37 #define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
38 #define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
39 #define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
44 #define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
45 0, 8, 1843200, 3686400 }
46 #define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
47 0, 8, 3686400, 7372800 }
48 #define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
49 0, 4, 6144000, 6144000 }
50 #define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
51 0, 8, 4915200, 9830400 }
53 #define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
55 #define TMR_0_HZ 25600 /* Frequency of timer 0 */
63 #define MAX_NUM_DEVS 32
66 /* SCC chips supported */
72 #define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
77 /* 8530 registers relative to card base */
79 #define SCCB_DATA 0x01
81 #define SCCA_DATA 0x03
83 /* 8253/8254 registers relative to card base */
89 /* Additional PI/PI2 registers relative to card base */
90 #define PI_DREQ_MASK 0x04
92 /* Additional PackeTwin registers relative to card base */
93 #define TWIN_INT_REG 0x08
94 #define TWIN_CLR_TMR1 0x09
95 #define TWIN_CLR_TMR2 0x0a
96 #define TWIN_SPARE_1 0x0b
97 #define TWIN_DMA_CFG 0x08
98 #define TWIN_SERIAL_CFG 0x09
99 #define TWIN_DMA_CLR_FF 0x0a
100 #define TWIN_SPARE_2 0x0b
103 /* PackeTwin I/O register values */
106 #define TWIN_SCC_MSK 0x01
107 #define TWIN_TMR1_MSK 0x02
108 #define TWIN_TMR2_MSK 0x04
109 #define TWIN_INT_MSK 0x07
112 #define TWIN_DTRA_ON 0x01
113 #define TWIN_DTRB_ON 0x02
114 #define TWIN_EXTCLKA 0x04
115 #define TWIN_EXTCLKB 0x08
116 #define TWIN_LOOPA_ON 0x10
117 #define TWIN_LOOPB_ON 0x20
121 #define TWIN_DMA_HDX_T1 0x08
122 #define TWIN_DMA_HDX_R1 0x0a
123 #define TWIN_DMA_HDX_T3 0x14
124 #define TWIN_DMA_HDX_R3 0x16
125 #define TWIN_DMA_FDX_T3R1 0x1b
126 #define TWIN_DMA_FDX_T1R3 0x1d
145 #define SIOCGSCCPARAM SIOCDEVPRIVATE
146 #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
152 int pclk_hz; /* frequency of BRG input (don't change) */
153 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
154 int nrzi; /* 0 (nrz), 1 (nrzi) */
155 int clocks; /* see dmascc_cfg documentation */
156 int txdelay; /* [1/TMR_0_HZ] */
157 int txtimeout; /* [1/HZ] */
158 int txtail; /* [1/TMR_0_HZ] */
159 int waittime; /* [1/TMR_0_HZ] */
160 int slottime; /* [1/TMR_0_HZ] */
161 int persist; /* 1 ... 256 */
162 int dma; /* -1 (disable), 0, 1, 3 */
163 int txpause; /* [1/TMR_0_HZ] */
164 int rtsoff; /* [1/TMR_0_HZ] */
165 int dcdon; /* [1/TMR_0_HZ] */
166 int dcdoff; /* [1/TMR_0_HZ] */
169 struct scc_hardware {
184 struct net_device *dev;
185 struct scc_info *info;
188 int card_base, scc_cmd, scc_data;
189 int tmr_cnt, tmr_ctrl, tmr_mode;
190 struct scc_param param;
191 char rx_buf[NUM_RX_BUF][BUF_SIZE];
192 int rx_len[NUM_RX_BUF];
194 struct work_struct rx_work;
195 int rx_head, rx_tail, rx_count;
197 char tx_buf[NUM_TX_BUF][BUF_SIZE];
198 int tx_len[NUM_TX_BUF];
200 int tx_head, tx_tail, tx_count;
202 unsigned long tx_start;
204 spinlock_t *register_lock; /* Per scc_info */
205 spinlock_t ring_lock;
211 struct net_device *dev[2];
212 struct scc_priv priv[2];
213 struct scc_info *next;
214 spinlock_t register_lock; /* Per device register lock */
218 /* Function declarations */
219 static int setup_adapter(int card_base, int type, int n) __init;
221 static void write_scc(struct scc_priv *priv, int reg, int val);
222 static void write_scc_data(struct scc_priv *priv, int val, int fast);
223 static int read_scc(struct scc_priv *priv, int reg);
224 static int read_scc_data(struct scc_priv *priv);
226 static int scc_open(struct net_device *dev);
227 static int scc_close(struct net_device *dev);
228 static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
229 void __user *data, int cmd);
230 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
231 static int scc_set_mac_address(struct net_device *dev, void *sa);
233 static inline void tx_on(struct scc_priv *priv);
234 static inline void rx_on(struct scc_priv *priv);
235 static inline void rx_off(struct scc_priv *priv);
236 static void start_timer(struct scc_priv *priv, int t, int r15);
237 static inline unsigned char random(void);
239 static inline void z8530_isr(struct scc_info *info);
240 static irqreturn_t scc_isr(int irq, void *dev_id);
241 static void rx_isr(struct scc_priv *priv);
242 static void special_condition(struct scc_priv *priv, int rc);
243 static void rx_bh(struct work_struct *);
244 static void tx_isr(struct scc_priv *priv);
245 static void es_isr(struct scc_priv *priv);
246 static void tm_isr(struct scc_priv *priv);
249 /* Initialization variables */
251 static int io[MAX_NUM_DEVS] __initdata = { 0, };
253 /* Beware! hw[] is also used in dmascc_exit(). */
254 static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
257 /* Global variables */
259 static struct scc_info *first;
260 static unsigned long rand;
263 MODULE_AUTHOR("Klaus Kudielka");
264 MODULE_DESCRIPTION("Driver for high-speed SCC boards");
265 module_param_hw_array(io, int, ioport, NULL, 0);
266 MODULE_LICENSE("GPL");
268 static void __exit dmascc_exit(void)
271 struct scc_info *info;
276 /* Unregister devices */
277 for (i = 0; i < 2; i++)
278 unregister_netdev(info->dev[i]);
281 if (info->priv[0].type == TYPE_TWIN)
282 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
283 write_scc(&info->priv[0], R9, FHWRES);
284 release_region(info->dev[0]->base_addr,
285 hw[info->priv[0].type].io_size);
287 for (i = 0; i < 2; i++)
288 free_netdev(info->dev[i]);
296 static int __init dmascc_init(void)
299 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
302 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
303 counting[MAX_NUM_DEVS];
305 /* Initialize random number generator */
307 /* Cards found = 0 */
309 /* Warning message */
311 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
313 /* Run autodetection for each card type */
314 for (h = 0; h < NUM_TYPES; h++) {
317 /* User-specified I/O address regions */
318 for (i = 0; i < hw[h].num_devs; i++)
320 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
322 hw[h].io_region) / hw[h].io_delta;
323 if (j >= 0 && j < hw[h].num_devs &&
325 j * hw[h].io_delta == io[i]) {
330 /* Default I/O address regions */
331 for (i = 0; i < hw[h].num_devs; i++) {
333 hw[h].io_region + i * hw[h].io_delta;
337 /* Check valid I/O address regions */
338 for (i = 0; i < hw[h].num_devs; i++)
341 (base[i], hw[h].io_size, "dmascc"))
345 base[i] + hw[h].tmr_offset +
348 base[i] + hw[h].tmr_offset +
351 base[i] + hw[h].tmr_offset +
357 for (i = 0; i < hw[h].num_devs; i++)
359 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
361 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
363 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
365 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
367 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
368 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
372 /* Timer 2: LSB+MSB, Mode 0 */
376 /* Wait until counter registers are loaded */
377 udelay(2000000 / TMR_0_HZ);
380 while (jiffies - time < 13) {
381 for (i = 0; i < hw[h].num_devs; i++)
382 if (base[i] && counting[i]) {
383 /* Read back Timer 1: latch; read LSB; read MSB */
386 inb(t1[i]) + (inb(t1[i]) << 8);
387 /* Also check whether counter did wrap */
389 t_val > TMR_0_HZ / HZ * 10)
391 delay[i] = jiffies - start[i];
395 /* Evaluate measurements */
396 for (i = 0; i < hw[h].num_devs; i++)
398 if ((delay[i] >= 9 && delay[i] <= 11) &&
399 /* Ok, we have found an adapter */
400 (setup_adapter(base[i], h, n) == 0))
403 release_region(base[i],
409 /* If any adapter was successfully initialized, return ok */
413 /* If no adapter found, return error */
414 printk(KERN_INFO "dmascc: no adapters found\n");
418 module_init(dmascc_init);
419 module_exit(dmascc_exit);
421 static void __init dev_setup(struct net_device *dev)
423 dev->type = ARPHRD_AX25;
424 dev->hard_header_len = AX25_MAX_HEADER_LEN;
426 dev->addr_len = AX25_ADDR_LEN;
427 dev->tx_queue_len = 64;
428 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
429 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
432 static const struct net_device_ops scc_netdev_ops = {
433 .ndo_open = scc_open,
434 .ndo_stop = scc_close,
435 .ndo_start_xmit = scc_send_packet,
436 .ndo_siocdevprivate = scc_siocdevprivate,
437 .ndo_set_mac_address = scc_set_mac_address,
440 static int __init setup_adapter(int card_base, int type, int n)
442 int i, irq, chip, err;
443 struct scc_info *info;
444 struct net_device *dev;
445 struct scc_priv *priv;
448 int tmr_base = card_base + hw[type].tmr_offset;
449 int scc_base = card_base + hw[type].scc_offset;
450 char *chipnames[] = CHIPNAMES;
452 /* Initialize what is necessary for write_scc and write_scc_data */
453 info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
459 info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
461 printk(KERN_ERR "dmascc: "
462 "could not allocate memory for %s at %#3x\n",
463 hw[type].name, card_base);
468 info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
470 printk(KERN_ERR "dmascc: "
471 "could not allocate memory for %s at %#3x\n",
472 hw[type].name, card_base);
476 spin_lock_init(&info->register_lock);
478 priv = &info->priv[0];
480 priv->card_base = card_base;
481 priv->scc_cmd = scc_base + SCCA_CMD;
482 priv->scc_data = scc_base + SCCA_DATA;
483 priv->register_lock = &info->register_lock;
486 write_scc(priv, R9, FHWRES | MIE | NV);
488 /* Determine type of chip by enabling SDLC/HDLC enhancements */
489 write_scc(priv, R15, SHDLCE);
490 if (!read_scc(priv, R15)) {
491 /* WR7' not present. This is an ordinary Z8530 SCC. */
494 /* Put one character in TX FIFO */
495 write_scc_data(priv, 0, 0);
496 if (read_scc(priv, R0) & Tx_BUF_EMP) {
497 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
500 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
504 write_scc(priv, R15, 0);
506 /* Start IRQ auto-detection */
507 irqs = probe_irq_on();
509 /* Enable interrupts */
510 if (type == TYPE_TWIN) {
511 outb(0, card_base + TWIN_DMA_CFG);
512 inb(card_base + TWIN_CLR_TMR1);
513 inb(card_base + TWIN_CLR_TMR2);
514 info->twin_serial_cfg = TWIN_EI;
515 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
517 write_scc(priv, R15, CTSIE);
518 write_scc(priv, R0, RES_EXT_INT);
519 write_scc(priv, R1, EXT_INT_ENAB);
523 outb(1, tmr_base + TMR_CNT1);
524 outb(0, tmr_base + TMR_CNT1);
526 /* Wait and detect IRQ */
528 while (jiffies - time < 2 + HZ / TMR_0_HZ);
529 irq = probe_irq_off(irqs);
531 /* Clear pending interrupt, disable interrupts */
532 if (type == TYPE_TWIN) {
533 inb(card_base + TWIN_CLR_TMR1);
535 write_scc(priv, R1, 0);
536 write_scc(priv, R15, 0);
537 write_scc(priv, R0, RES_EXT_INT);
542 "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
543 hw[type].name, card_base, irq);
548 /* Set up data structures */
549 for (i = 0; i < 2; i++) {
551 priv = &info->priv[i];
557 spin_lock_init(&priv->ring_lock);
558 priv->register_lock = &info->register_lock;
559 priv->card_base = card_base;
560 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
561 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
562 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
563 priv->tmr_ctrl = tmr_base + TMR_CTRL;
564 priv->tmr_mode = i ? 0xb0 : 0x70;
565 priv->param.pclk_hz = hw[type].pclk_hz;
566 priv->param.brg_tc = -1;
567 priv->param.clocks = TCTRxCP | RCRTxCP;
568 priv->param.persist = 256;
569 priv->param.dma = -1;
570 INIT_WORK(&priv->rx_work, rx_bh);
572 snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i);
573 dev->base_addr = card_base;
575 dev->netdev_ops = &scc_netdev_ops;
576 dev->header_ops = &ax25_header_ops;
578 if (register_netdev(info->dev[0])) {
579 printk(KERN_ERR "dmascc: could not register %s\n",
584 if (register_netdev(info->dev[1])) {
585 printk(KERN_ERR "dmascc: could not register %s\n",
594 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
595 hw[type].name, chipnames[chip], card_base, irq);
599 unregister_netdev(info->dev[0]);
601 if (info->priv[0].type == TYPE_TWIN)
602 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
603 write_scc(&info->priv[0], R9, FHWRES);
604 free_netdev(info->dev[1]);
606 free_netdev(info->dev[0]);
614 /* Driver functions */
616 static void write_scc(struct scc_priv *priv, int reg, int val)
619 switch (priv->type) {
622 outb(reg, priv->scc_cmd);
623 outb(val, priv->scc_cmd);
627 outb_p(reg, priv->scc_cmd);
628 outb_p(val, priv->scc_cmd);
631 spin_lock_irqsave(priv->register_lock, flags);
632 outb_p(0, priv->card_base + PI_DREQ_MASK);
634 outb_p(reg, priv->scc_cmd);
635 outb_p(val, priv->scc_cmd);
636 outb(1, priv->card_base + PI_DREQ_MASK);
637 spin_unlock_irqrestore(priv->register_lock, flags);
643 static void write_scc_data(struct scc_priv *priv, int val, int fast)
646 switch (priv->type) {
648 outb(val, priv->scc_data);
651 outb_p(val, priv->scc_data);
655 outb_p(val, priv->scc_data);
657 spin_lock_irqsave(priv->register_lock, flags);
658 outb_p(0, priv->card_base + PI_DREQ_MASK);
659 outb_p(val, priv->scc_data);
660 outb(1, priv->card_base + PI_DREQ_MASK);
661 spin_unlock_irqrestore(priv->register_lock, flags);
668 static int read_scc(struct scc_priv *priv, int reg)
672 switch (priv->type) {
675 outb(reg, priv->scc_cmd);
676 return inb(priv->scc_cmd);
679 outb_p(reg, priv->scc_cmd);
680 return inb_p(priv->scc_cmd);
682 spin_lock_irqsave(priv->register_lock, flags);
683 outb_p(0, priv->card_base + PI_DREQ_MASK);
685 outb_p(reg, priv->scc_cmd);
686 rc = inb_p(priv->scc_cmd);
687 outb(1, priv->card_base + PI_DREQ_MASK);
688 spin_unlock_irqrestore(priv->register_lock, flags);
694 static int read_scc_data(struct scc_priv *priv)
698 switch (priv->type) {
700 return inb(priv->scc_data);
702 return inb_p(priv->scc_data);
704 spin_lock_irqsave(priv->register_lock, flags);
705 outb_p(0, priv->card_base + PI_DREQ_MASK);
706 rc = inb_p(priv->scc_data);
707 outb(1, priv->card_base + PI_DREQ_MASK);
708 spin_unlock_irqrestore(priv->register_lock, flags);
714 static int scc_open(struct net_device *dev)
716 struct scc_priv *priv = dev->ml_priv;
717 struct scc_info *info = priv->info;
718 int card_base = priv->card_base;
720 /* Request IRQ if not already used by other channel */
721 if (!info->irq_used) {
722 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
728 /* Request DMA if required */
729 if (priv->param.dma >= 0) {
730 if (request_dma(priv->param.dma, "dmascc")) {
731 if (--info->irq_used == 0)
732 free_irq(dev->irq, info);
735 unsigned long flags = claim_dma_lock();
736 clear_dma_ff(priv->param.dma);
737 release_dma_lock(flags);
741 /* Initialize local variables */
744 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
746 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
750 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
751 /* X1 clock, SDLC mode */
752 write_scc(priv, R4, SDLC | X1CLK);
754 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
755 /* 8 bit RX char, RX disable */
756 write_scc(priv, R3, Rx8);
757 /* 8 bit TX char, TX disable */
758 write_scc(priv, R5, Tx8);
759 /* SDLC address field */
760 write_scc(priv, R6, 0);
762 write_scc(priv, R7, FLAG);
763 switch (priv->chip) {
766 write_scc(priv, R15, SHDLCE);
768 write_scc(priv, R7, AUTOEOM);
769 write_scc(priv, R15, 0);
773 write_scc(priv, R15, SHDLCE);
774 /* The following bits are set (see 2.5.2.1):
775 - Automatic EOM reset
776 - Interrupt request if RX FIFO is half full
777 This bit should be ignored in DMA mode (according to the
778 documentation), but actually isn't. The receiver doesn't work if
779 it is set. Thus, we have to clear it in DMA mode.
780 - Interrupt/DMA request if TX FIFO is completely empty
781 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
783 b) If cleared, DMA requests may follow each other very quickly,
784 filling up the TX FIFO.
785 Advantage: TX works even in case of high bus latency.
786 Disadvantage: Edge-triggered DMA request circuitry may miss
787 a request. No more data is delivered, resulting
788 in a TX FIFO underrun.
789 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
790 The PackeTwin doesn't. I don't know about the PI, but let's
791 assume it behaves like the PI2.
793 if (priv->param.dma >= 0) {
794 if (priv->type == TYPE_TWIN)
795 write_scc(priv, R7, AUTOEOM | TXFIFOE);
797 write_scc(priv, R7, AUTOEOM);
799 write_scc(priv, R7, AUTOEOM | RXFIFOH);
801 write_scc(priv, R15, 0);
804 /* Preset CRC, NRZ(I) encoding */
805 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
807 /* Configure baud rate generator */
808 if (priv->param.brg_tc >= 0) {
809 /* Program BR generator */
810 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
811 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
812 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
813 PackeTwin, not connected on the PI2); set DPLL source to BRG */
814 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
816 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
818 /* Disable BR generator */
819 write_scc(priv, R14, DTRREQ | BRSRC);
822 /* Configure clocks */
823 if (priv->type == TYPE_TWIN) {
824 /* Disable external TX clock receiver */
825 outb((info->twin_serial_cfg &=
826 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
827 card_base + TWIN_SERIAL_CFG);
829 write_scc(priv, R11, priv->param.clocks);
830 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
831 /* Enable external TX clock receiver */
832 outb((info->twin_serial_cfg |=
833 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
834 card_base + TWIN_SERIAL_CFG);
837 /* Configure PackeTwin */
838 if (priv->type == TYPE_TWIN) {
839 /* Assert DTR, enable interrupts */
840 outb((info->twin_serial_cfg |= TWIN_EI |
841 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
842 card_base + TWIN_SERIAL_CFG);
845 /* Read current status */
846 priv->rr0 = read_scc(priv, R0);
847 /* Enable DCD interrupt */
848 write_scc(priv, R15, DCDIE);
850 netif_start_queue(dev);
856 static int scc_close(struct net_device *dev)
858 struct scc_priv *priv = dev->ml_priv;
859 struct scc_info *info = priv->info;
860 int card_base = priv->card_base;
862 netif_stop_queue(dev);
864 if (priv->type == TYPE_TWIN) {
866 outb((info->twin_serial_cfg &=
867 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
868 card_base + TWIN_SERIAL_CFG);
871 /* Reset channel, free DMA and IRQ */
872 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
873 if (priv->param.dma >= 0) {
874 if (priv->type == TYPE_TWIN)
875 outb(0, card_base + TWIN_DMA_CFG);
876 free_dma(priv->param.dma);
878 if (--info->irq_used == 0)
879 free_irq(dev->irq, info);
885 static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
887 struct scc_priv *priv = dev->ml_priv;
891 if (copy_to_user(data, &priv->param, sizeof(struct scc_param)))
895 if (!capable(CAP_NET_ADMIN))
897 if (netif_running(dev))
899 if (copy_from_user(&priv->param, data,
900 sizeof(struct scc_param)))
909 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
911 struct scc_priv *priv = dev->ml_priv;
915 if (skb->protocol == htons(ETH_P_IP))
916 return ax25_ip_xmit(skb);
918 /* Temporarily stop the scheduler feeding us packets */
919 netif_stop_queue(dev);
921 /* Transfer data to DMA buffer */
923 skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
924 priv->tx_len[i] = skb->len - 1;
926 /* Clear interrupts while we touch our circular buffers */
928 spin_lock_irqsave(&priv->ring_lock, flags);
929 /* Move the ring buffer's head */
930 priv->tx_head = (i + 1) % NUM_TX_BUF;
933 /* If we just filled up the last buffer, leave queue stopped.
934 The higher layers must wait until we have a DMA buffer
935 to accept the data. */
936 if (priv->tx_count < NUM_TX_BUF)
937 netif_wake_queue(dev);
939 /* Set new TX state */
940 if (priv->state == IDLE) {
941 /* Assert RTS, start timer */
942 priv->state = TX_HEAD;
943 priv->tx_start = jiffies;
944 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
945 write_scc(priv, R15, 0);
946 start_timer(priv, priv->param.txdelay, 0);
949 /* Turn interrupts back on and free buffer */
950 spin_unlock_irqrestore(&priv->ring_lock, flags);
957 static int scc_set_mac_address(struct net_device *dev, void *sa)
959 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
965 static inline void tx_on(struct scc_priv *priv)
970 if (priv->param.dma >= 0) {
971 n = (priv->chip == Z85230) ? 3 : 1;
972 /* Program DMA controller */
973 flags = claim_dma_lock();
974 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
975 set_dma_addr(priv->param.dma,
976 virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
977 set_dma_count(priv->param.dma,
978 priv->tx_len[priv->tx_tail] - n);
979 release_dma_lock(flags);
980 /* Enable TX underrun interrupt */
981 write_scc(priv, R15, TxUIE);
983 if (priv->type == TYPE_TWIN)
984 outb((priv->param.dma ==
985 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
986 priv->card_base + TWIN_DMA_CFG);
989 EXT_INT_ENAB | WT_FN_RDYFN |
991 /* Write first byte(s) */
992 spin_lock_irqsave(priv->register_lock, flags);
993 for (i = 0; i < n; i++)
995 priv->tx_buf[priv->tx_tail][i], 1);
996 enable_dma(priv->param.dma);
997 spin_unlock_irqrestore(priv->register_lock, flags);
999 write_scc(priv, R15, TxUIE);
1001 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1004 /* Reset EOM latch if we do not have the AUTOEOM feature */
1005 if (priv->chip == Z8530)
1006 write_scc(priv, R0, RES_EOM_L);
1010 static inline void rx_on(struct scc_priv *priv)
1012 unsigned long flags;
1015 while (read_scc(priv, R0) & Rx_CH_AV)
1016 read_scc_data(priv);
1018 if (priv->param.dma >= 0) {
1019 /* Program DMA controller */
1020 flags = claim_dma_lock();
1021 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1022 set_dma_addr(priv->param.dma,
1023 virt_to_bus(priv->rx_buf[priv->rx_head]));
1024 set_dma_count(priv->param.dma, BUF_SIZE);
1025 release_dma_lock(flags);
1026 enable_dma(priv->param.dma);
1027 /* Configure PackeTwin DMA */
1028 if (priv->type == TYPE_TWIN) {
1029 outb((priv->param.dma ==
1030 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1031 priv->card_base + TWIN_DMA_CFG);
1033 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1034 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1035 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1037 /* Reset current frame */
1039 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1040 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1043 write_scc(priv, R0, ERR_RES);
1044 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1048 static inline void rx_off(struct scc_priv *priv)
1050 /* Disable receiver */
1051 write_scc(priv, R3, Rx8);
1052 /* Disable DREQ / RX interrupt */
1053 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1054 outb(0, priv->card_base + TWIN_DMA_CFG);
1056 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1058 if (priv->param.dma >= 0)
1059 disable_dma(priv->param.dma);
1063 static void start_timer(struct scc_priv *priv, int t, int r15)
1065 outb(priv->tmr_mode, priv->tmr_ctrl);
1069 outb(t & 0xFF, priv->tmr_cnt);
1070 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1071 if (priv->type != TYPE_TWIN) {
1072 write_scc(priv, R15, r15 | CTSIE);
1079 static inline unsigned char random(void)
1081 /* See "Numerical Recipes in C", second edition, p. 284 */
1082 rand = rand * 1664525L + 1013904223L;
1083 return (unsigned char) (rand >> 24);
1086 static inline void z8530_isr(struct scc_info *info)
1090 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1092 rx_isr(&info->priv[0]);
1093 } else if (is & CHATxIP) {
1094 tx_isr(&info->priv[0]);
1095 } else if (is & CHAEXT) {
1096 es_isr(&info->priv[0]);
1097 } else if (is & CHBRxIP) {
1098 rx_isr(&info->priv[1]);
1099 } else if (is & CHBTxIP) {
1100 tx_isr(&info->priv[1]);
1102 es_isr(&info->priv[1]);
1104 write_scc(&info->priv[0], R0, RES_H_IUS);
1108 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1111 /* Ok, no interrupts pending from this 8530. The INT line should
1116 static irqreturn_t scc_isr(int irq, void *dev_id)
1118 struct scc_info *info = dev_id;
1120 spin_lock(info->priv[0].register_lock);
1121 /* At this point interrupts are enabled, and the interrupt under service
1122 is already acknowledged, but masked off.
1124 Interrupt processing: We loop until we know that the IRQ line is
1125 low. If another positive edge occurs afterwards during the ISR,
1126 another interrupt will be triggered by the interrupt controller
1127 as soon as the IRQ level is enabled again (see asm/irq.h).
1129 Bottom-half handlers will be processed after scc_isr(). This is
1130 important, since we only have small ringbuffers and want new data
1131 to be fetched/delivered immediately. */
1133 if (info->priv[0].type == TYPE_TWIN) {
1134 int is, card_base = info->priv[0].card_base;
1135 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1137 if (is & TWIN_SCC_MSK) {
1139 } else if (is & TWIN_TMR1_MSK) {
1140 inb(card_base + TWIN_CLR_TMR1);
1141 tm_isr(&info->priv[0]);
1143 inb(card_base + TWIN_CLR_TMR2);
1144 tm_isr(&info->priv[1]);
1149 spin_unlock(info->priv[0].register_lock);
1154 static void rx_isr(struct scc_priv *priv)
1156 if (priv->param.dma >= 0) {
1157 /* Check special condition and perform error reset. See 2.4.7.5. */
1158 special_condition(priv, read_scc(priv, R1));
1159 write_scc(priv, R0, ERR_RES);
1161 /* Check special condition for each character. Error reset not necessary.
1162 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1164 while (read_scc(priv, R0) & Rx_CH_AV) {
1165 rc = read_scc(priv, R1);
1166 if (priv->rx_ptr < BUF_SIZE)
1167 priv->rx_buf[priv->rx_head][priv->
1169 read_scc_data(priv);
1172 read_scc_data(priv);
1174 special_condition(priv, rc);
1180 static void special_condition(struct scc_priv *priv, int rc)
1183 unsigned long flags;
1185 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1188 /* Receiver overrun */
1190 if (priv->param.dma < 0)
1191 write_scc(priv, R0, ERR_RES);
1192 } else if (rc & END_FR) {
1193 /* End of frame. Get byte count */
1194 if (priv->param.dma >= 0) {
1195 flags = claim_dma_lock();
1196 cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1198 release_dma_lock(flags);
1200 cb = priv->rx_ptr - 2;
1202 if (priv->rx_over) {
1203 /* We had an overrun */
1204 priv->dev->stats.rx_errors++;
1205 if (priv->rx_over == 2)
1206 priv->dev->stats.rx_length_errors++;
1208 priv->dev->stats.rx_fifo_errors++;
1210 } else if (rc & CRC_ERR) {
1211 /* Count invalid CRC only if packet length >= minimum */
1213 priv->dev->stats.rx_errors++;
1214 priv->dev->stats.rx_crc_errors++;
1218 if (priv->rx_count < NUM_RX_BUF - 1) {
1219 /* Put good frame in FIFO */
1220 priv->rx_len[priv->rx_head] = cb;
1225 schedule_work(&priv->rx_work);
1227 priv->dev->stats.rx_errors++;
1228 priv->dev->stats.rx_over_errors++;
1232 /* Get ready for new frame */
1233 if (priv->param.dma >= 0) {
1234 flags = claim_dma_lock();
1235 set_dma_addr(priv->param.dma,
1236 virt_to_bus(priv->rx_buf[priv->rx_head]));
1237 set_dma_count(priv->param.dma, BUF_SIZE);
1238 release_dma_lock(flags);
1246 static void rx_bh(struct work_struct *ugli_api)
1248 struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
1249 int i = priv->rx_tail;
1251 unsigned long flags;
1252 struct sk_buff *skb;
1253 unsigned char *data;
1255 spin_lock_irqsave(&priv->ring_lock, flags);
1256 while (priv->rx_count) {
1257 spin_unlock_irqrestore(&priv->ring_lock, flags);
1258 cb = priv->rx_len[i];
1259 /* Allocate buffer */
1260 skb = dev_alloc_skb(cb + 1);
1263 priv->dev->stats.rx_dropped++;
1266 data = skb_put(skb, cb + 1);
1268 memcpy(&data[1], priv->rx_buf[i], cb);
1269 skb->protocol = ax25_type_trans(skb, priv->dev);
1271 priv->dev->stats.rx_packets++;
1272 priv->dev->stats.rx_bytes += cb;
1274 spin_lock_irqsave(&priv->ring_lock, flags);
1276 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1279 spin_unlock_irqrestore(&priv->ring_lock, flags);
1283 static void tx_isr(struct scc_priv *priv)
1285 int i = priv->tx_tail, p = priv->tx_ptr;
1287 /* Suspend TX interrupts if we don't want to send anything.
1289 if (p == priv->tx_len[i]) {
1290 write_scc(priv, R0, RES_Tx_P);
1294 /* Write characters */
1295 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1296 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1299 /* Reset EOM latch of Z8530 */
1300 if (!priv->tx_ptr && p && priv->chip == Z8530)
1301 write_scc(priv, R0, RES_EOM_L);
1307 static void es_isr(struct scc_priv *priv)
1309 int i, rr0, drr0, res;
1310 unsigned long flags;
1312 /* Read status, reset interrupt bit (open latches) */
1313 rr0 = read_scc(priv, R0);
1314 write_scc(priv, R0, RES_EXT_INT);
1315 drr0 = priv->rr0 ^ rr0;
1318 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1319 it might have already been cleared again by AUTOEOM. */
1320 if (priv->state == TX_DATA) {
1321 /* Get remaining bytes */
1323 if (priv->param.dma >= 0) {
1324 disable_dma(priv->param.dma);
1325 flags = claim_dma_lock();
1326 res = get_dma_residue(priv->param.dma);
1327 release_dma_lock(flags);
1329 res = priv->tx_len[i] - priv->tx_ptr;
1332 /* Disable DREQ / TX interrupt */
1333 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1334 outb(0, priv->card_base + TWIN_DMA_CFG);
1336 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1338 /* Update packet statistics */
1339 priv->dev->stats.tx_errors++;
1340 priv->dev->stats.tx_fifo_errors++;
1341 /* Other underrun interrupts may already be waiting */
1342 write_scc(priv, R0, RES_EXT_INT);
1343 write_scc(priv, R0, RES_EXT_INT);
1345 /* Update packet statistics */
1346 priv->dev->stats.tx_packets++;
1347 priv->dev->stats.tx_bytes += priv->tx_len[i];
1348 /* Remove frame from FIFO */
1349 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1351 /* Inform upper layers */
1352 netif_wake_queue(priv->dev);
1355 write_scc(priv, R15, 0);
1356 if (priv->tx_count &&
1357 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1358 priv->state = TX_PAUSE;
1359 start_timer(priv, priv->param.txpause, 0);
1361 priv->state = TX_TAIL;
1362 start_timer(priv, priv->param.txtail, 0);
1366 /* DCD transition */
1369 switch (priv->state) {
1372 priv->state = DCD_ON;
1373 write_scc(priv, R15, 0);
1374 start_timer(priv, priv->param.dcdon, 0);
1377 switch (priv->state) {
1380 priv->state = DCD_OFF;
1381 write_scc(priv, R15, 0);
1382 start_timer(priv, priv->param.dcdoff, 0);
1387 /* CTS transition */
1388 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1394 static void tm_isr(struct scc_priv *priv)
1396 switch (priv->state) {
1400 priv->state = TX_DATA;
1403 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1404 priv->state = RTS_OFF;
1405 if (priv->type != TYPE_TWIN)
1406 write_scc(priv, R15, 0);
1407 start_timer(priv, priv->param.rtsoff, 0);
1410 write_scc(priv, R15, DCDIE);
1411 priv->rr0 = read_scc(priv, R0);
1412 if (priv->rr0 & DCD) {
1413 priv->dev->stats.collisions++;
1415 priv->state = RX_ON;
1418 start_timer(priv, priv->param.waittime, DCDIE);
1422 if (priv->tx_count) {
1423 priv->state = TX_HEAD;
1424 priv->tx_start = jiffies;
1426 TxCRC_ENAB | RTS | TxENAB | Tx8);
1427 write_scc(priv, R15, 0);
1428 start_timer(priv, priv->param.txdelay, 0);
1431 if (priv->type != TYPE_TWIN)
1432 write_scc(priv, R15, DCDIE);
1437 write_scc(priv, R15, DCDIE);
1438 priv->rr0 = read_scc(priv, R0);
1439 if (priv->rr0 & DCD) {
1441 priv->state = RX_ON;
1445 random() / priv->param.persist *
1446 priv->param.slottime, DCDIE);