1 // SPDX-License-Identifier: GPL-2.0-or-later
3 A FORE Systems 200E-series driver for ATM on Linux.
4 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
6 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
8 This driver simultaneously supports PCA-200E and SBA-200E adapters
9 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/capability.h>
18 #include <linux/interrupt.h>
19 #include <linux/bitops.h>
20 #include <linux/pci.h>
21 #include <linux/module.h>
22 #include <linux/atmdev.h>
23 #include <linux/sonet.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/delay.h>
26 #include <linux/firmware.h>
27 #include <linux/pgtable.h>
29 #include <asm/string.h>
33 #include <asm/byteorder.h>
34 #include <linux/uaccess.h>
35 #include <linux/atomic.h>
39 #include <linux/of_device.h>
40 #include <asm/idprom.h>
41 #include <asm/openprom.h>
42 #include <asm/oplib.h>
45 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
46 #define FORE200E_USE_TASKLET
49 #if 0 /* enable the debugging code of the buffer supply queues */
50 #define FORE200E_BSQ_DEBUG
53 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
54 #define FORE200E_52BYTE_AAL0_SDU
60 #define FORE200E_VERSION "0.3e"
62 #define FORE200E "fore200e: "
64 #if 0 /* override .config */
65 #define CONFIG_ATM_FORE200E_DEBUG 1
67 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
68 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
69 printk(FORE200E format, ##args); } while (0)
71 #define DPRINTK(level, format, args...) do {} while (0)
75 #define FORE200E_ALIGN(addr, alignment) \
76 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
78 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
80 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
82 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo))
85 #define ASSERT(expr) if (!(expr)) { \
86 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
87 __func__, __LINE__, #expr); \
88 panic(FORE200E "%s", __func__); \
91 #define ASSERT(expr) do {} while (0)
95 static const struct atmdev_ops fore200e_ops;
97 static LIST_HEAD(fore200e_boards);
100 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
101 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
103 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
104 { BUFFER_S1_NBR, BUFFER_L1_NBR },
105 { BUFFER_S2_NBR, BUFFER_L2_NBR }
108 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
109 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
110 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
114 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
115 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
119 #if 0 /* currently unused */
121 fore200e_fore2atm_aal(enum fore200e_aal aal)
124 case FORE200E_AAL0: return ATM_AAL0;
125 case FORE200E_AAL34: return ATM_AAL34;
126 case FORE200E_AAL5: return ATM_AAL5;
134 static enum fore200e_aal
135 fore200e_atm2fore_aal(int aal)
138 case ATM_AAL0: return FORE200E_AAL0;
139 case ATM_AAL34: return FORE200E_AAL34;
142 case ATM_AAL5: return FORE200E_AAL5;
150 fore200e_irq_itoa(int irq)
153 sprintf(str, "%d", irq);
158 /* allocate and align a chunk of memory intended to hold the data behing exchanged
159 between the driver and the adapter (using streaming DVMA) */
162 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
164 unsigned long offset = 0;
166 if (alignment <= sizeof(int))
169 chunk->alloc_size = size + alignment;
170 chunk->direction = direction;
172 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL);
173 if (chunk->alloc_addr == NULL)
177 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
179 chunk->align_addr = chunk->alloc_addr + offset;
181 chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr,
183 if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) {
184 kfree(chunk->alloc_addr);
191 /* free a chunk of memory */
194 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
196 dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size,
198 kfree(chunk->alloc_addr);
202 * Allocate a DMA consistent chunk of memory intended to act as a communication
203 * mechanism (to hold descriptors, status, queues, etc.) shared by the driver
207 fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
208 int size, int nbr, int alignment)
210 /* returned chunks are page-aligned */
211 chunk->alloc_size = size * nbr;
212 chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size,
213 &chunk->dma_addr, GFP_KERNEL);
214 if (!chunk->alloc_addr)
216 chunk->align_addr = chunk->alloc_addr;
221 * Free a DMA consistent chunk of memory.
224 fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
226 dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr,
231 fore200e_spin(int msecs)
233 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
234 while (time_before(jiffies, timeout));
239 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
241 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
246 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
249 } while (time_before(jiffies, timeout));
253 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
263 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
265 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
269 if ((ok = (fore200e->bus->read(addr) == val)))
272 } while (time_before(jiffies, timeout));
276 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
277 fore200e->bus->read(addr), val);
286 fore200e_free_rx_buf(struct fore200e* fore200e)
288 int scheme, magn, nbr;
289 struct buffer* buffer;
291 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
292 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
294 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
296 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
298 struct chunk* data = &buffer[ nbr ].data;
300 if (data->alloc_addr != NULL)
301 fore200e_chunk_free(fore200e, data);
310 fore200e_uninit_bs_queue(struct fore200e* fore200e)
314 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
315 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
317 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
318 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
320 if (status->alloc_addr)
321 fore200e_dma_chunk_free(fore200e, status);
323 if (rbd_block->alloc_addr)
324 fore200e_dma_chunk_free(fore200e, rbd_block);
331 fore200e_reset(struct fore200e* fore200e, int diag)
335 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
337 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
339 fore200e->bus->reset(fore200e);
342 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
345 printk(FORE200E "device %s self-test failed\n", fore200e->name);
349 printk(FORE200E "device %s self-test passed\n", fore200e->name);
351 fore200e->state = FORE200E_STATE_RESET;
359 fore200e_shutdown(struct fore200e* fore200e)
361 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
362 fore200e->name, fore200e->phys_base,
363 fore200e_irq_itoa(fore200e->irq));
365 if (fore200e->state > FORE200E_STATE_RESET) {
366 /* first, reset the board to prevent further interrupts or data transfers */
367 fore200e_reset(fore200e, 0);
370 /* then, release all allocated resources */
371 switch(fore200e->state) {
373 case FORE200E_STATE_COMPLETE:
374 kfree(fore200e->stats);
377 case FORE200E_STATE_IRQ:
378 free_irq(fore200e->irq, fore200e->atm_dev);
381 case FORE200E_STATE_ALLOC_BUF:
382 fore200e_free_rx_buf(fore200e);
385 case FORE200E_STATE_INIT_BSQ:
386 fore200e_uninit_bs_queue(fore200e);
389 case FORE200E_STATE_INIT_RXQ:
390 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status);
391 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
394 case FORE200E_STATE_INIT_TXQ:
395 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status);
396 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
399 case FORE200E_STATE_INIT_CMDQ:
400 fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
403 case FORE200E_STATE_INITIALIZE:
404 /* nothing to do for that state */
406 case FORE200E_STATE_START_FW:
407 /* nothing to do for that state */
409 case FORE200E_STATE_RESET:
410 /* nothing to do for that state */
412 case FORE200E_STATE_MAP:
413 fore200e->bus->unmap(fore200e);
416 case FORE200E_STATE_CONFIGURE:
417 /* nothing to do for that state */
419 case FORE200E_STATE_REGISTER:
420 /* XXX shouldn't we *start* by deregistering the device? */
421 atm_dev_deregister(fore200e->atm_dev);
424 case FORE200E_STATE_BLANK:
425 /* nothing to do for that state */
433 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
435 /* on big-endian hosts, the board is configured to convert
436 the endianess of slave RAM accesses */
437 return le32_to_cpu(readl(addr));
441 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
443 /* on big-endian hosts, the board is configured to convert
444 the endianess of slave RAM accesses */
445 writel(cpu_to_le32(val), addr);
449 fore200e_pca_irq_check(struct fore200e* fore200e)
451 /* this is a 1 bit register */
452 int irq_posted = readl(fore200e->regs.pca.psr);
454 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
455 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
456 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
465 fore200e_pca_irq_ack(struct fore200e* fore200e)
467 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
472 fore200e_pca_reset(struct fore200e* fore200e)
474 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
476 writel(0, fore200e->regs.pca.hcr);
480 static int fore200e_pca_map(struct fore200e* fore200e)
482 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
484 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
486 if (fore200e->virt_base == NULL) {
487 printk(FORE200E "can't map device %s\n", fore200e->name);
491 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
493 /* gain access to the PCA specific registers */
494 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
495 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
496 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
498 fore200e->state = FORE200E_STATE_MAP;
504 fore200e_pca_unmap(struct fore200e* fore200e)
506 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
508 if (fore200e->virt_base != NULL)
509 iounmap(fore200e->virt_base);
513 static int fore200e_pca_configure(struct fore200e *fore200e)
515 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
516 u8 master_ctrl, latency;
518 DPRINTK(2, "device %s being configured\n", fore200e->name);
520 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
521 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
525 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
527 master_ctrl = master_ctrl
528 #if defined(__BIG_ENDIAN)
529 /* request the PCA board to convert the endianess of slave RAM accesses */
530 | PCA200E_CTRL_CONVERT_ENDIAN
533 | PCA200E_CTRL_DIS_CACHE_RD
534 | PCA200E_CTRL_DIS_WRT_INVAL
535 | PCA200E_CTRL_ENA_CONT_REQ_MODE
536 | PCA200E_CTRL_2_CACHE_WRT_INVAL
538 | PCA200E_CTRL_LARGE_PCI_BURSTS;
540 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
542 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
543 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
544 this may impact the performances of other PCI devices on the same bus, though */
546 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
548 fore200e->state = FORE200E_STATE_CONFIGURE;
554 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
556 struct host_cmdq* cmdq = &fore200e->host_cmdq;
557 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
558 struct prom_opcode opcode;
562 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
564 opcode.opcode = OPCODE_GET_PROM;
567 prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data),
569 if (dma_mapping_error(fore200e->dev, prom_dma))
572 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
574 *entry->status = STATUS_PENDING;
576 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
578 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
580 *entry->status = STATUS_FREE;
582 dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
585 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
589 #if defined(__BIG_ENDIAN)
591 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
593 /* MAC address is stored as little-endian */
594 swap_here(&prom->mac_addr[0]);
595 swap_here(&prom->mac_addr[4]);
603 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
605 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
607 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
608 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
611 static const struct fore200e_bus fore200e_pci_ops = {
612 .model_name = "PCA-200E",
613 .proc_name = "pca200e",
614 .descr_alignment = 32,
615 .buffer_alignment = 4,
616 .status_alignment = 32,
617 .read = fore200e_pca_read,
618 .write = fore200e_pca_write,
619 .configure = fore200e_pca_configure,
620 .map = fore200e_pca_map,
621 .reset = fore200e_pca_reset,
622 .prom_read = fore200e_pca_prom_read,
623 .unmap = fore200e_pca_unmap,
624 .irq_check = fore200e_pca_irq_check,
625 .irq_ack = fore200e_pca_irq_ack,
626 .proc_read = fore200e_pca_proc_read,
628 #endif /* CONFIG_PCI */
632 static u32 fore200e_sba_read(volatile u32 __iomem *addr)
634 return sbus_readl(addr);
637 static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
639 sbus_writel(val, addr);
642 static void fore200e_sba_irq_enable(struct fore200e *fore200e)
644 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
645 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
648 static int fore200e_sba_irq_check(struct fore200e *fore200e)
650 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
653 static void fore200e_sba_irq_ack(struct fore200e *fore200e)
655 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
656 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
659 static void fore200e_sba_reset(struct fore200e *fore200e)
661 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
663 fore200e->bus->write(0, fore200e->regs.sba.hcr);
666 static int __init fore200e_sba_map(struct fore200e *fore200e)
668 struct platform_device *op = to_platform_device(fore200e->dev);
671 /* gain access to the SBA specific registers */
672 fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
673 fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
674 fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
675 fore200e->virt_base = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
677 if (!fore200e->virt_base) {
678 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
682 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
684 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
686 /* get the supported DVMA burst sizes */
687 bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
689 if (sbus_can_dma_64bit())
690 sbus_set_sbus64(&op->dev, bursts);
692 fore200e->state = FORE200E_STATE_MAP;
696 static void fore200e_sba_unmap(struct fore200e *fore200e)
698 struct platform_device *op = to_platform_device(fore200e->dev);
700 of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
701 of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
702 of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
703 of_iounmap(&op->resource[3], fore200e->virt_base, SBA200E_RAM_LENGTH);
706 static int __init fore200e_sba_configure(struct fore200e *fore200e)
708 fore200e->state = FORE200E_STATE_CONFIGURE;
712 static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
714 struct platform_device *op = to_platform_device(fore200e->dev);
718 prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
721 memcpy(&prom->mac_addr[4], prop, 4);
723 prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
726 memcpy(&prom->mac_addr[2], prop, 4);
728 prom->serial_number = of_getintprop_default(op->dev.of_node,
730 prom->hw_revision = of_getintprop_default(op->dev.of_node,
736 static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
738 struct platform_device *op = to_platform_device(fore200e->dev);
739 const struct linux_prom_registers *regs;
741 regs = of_get_property(op->dev.of_node, "reg", NULL);
743 return sprintf(page, " SBUS slot/device:\t\t%d/'%pOFn'\n",
744 (regs ? regs->which_io : 0), op->dev.of_node);
747 static const struct fore200e_bus fore200e_sbus_ops = {
748 .model_name = "SBA-200E",
749 .proc_name = "sba200e",
750 .descr_alignment = 32,
751 .buffer_alignment = 64,
752 .status_alignment = 32,
753 .read = fore200e_sba_read,
754 .write = fore200e_sba_write,
755 .configure = fore200e_sba_configure,
756 .map = fore200e_sba_map,
757 .reset = fore200e_sba_reset,
758 .prom_read = fore200e_sba_prom_read,
759 .unmap = fore200e_sba_unmap,
760 .irq_enable = fore200e_sba_irq_enable,
761 .irq_check = fore200e_sba_irq_check,
762 .irq_ack = fore200e_sba_irq_ack,
763 .proc_read = fore200e_sba_proc_read,
765 #endif /* CONFIG_SBUS */
768 fore200e_tx_irq(struct fore200e* fore200e)
770 struct host_txq* txq = &fore200e->host_txq;
771 struct host_txq_entry* entry;
773 struct fore200e_vc_map* vc_map;
775 if (fore200e->host_txq.txing == 0)
780 entry = &txq->host_entry[ txq->tail ];
782 if ((*entry->status & STATUS_COMPLETE) == 0) {
786 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
787 entry, txq->tail, entry->vc_map, entry->skb);
789 /* free copy of misaligned data */
792 /* remove DMA mapping */
793 dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
796 vc_map = entry->vc_map;
798 /* vcc closed since the time the entry was submitted for tx? */
799 if ((vc_map->vcc == NULL) ||
800 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
802 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
803 fore200e->atm_dev->number);
805 dev_kfree_skb_any(entry->skb);
810 /* vcc closed then immediately re-opened? */
811 if (vc_map->incarn != entry->incarn) {
813 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
814 if the same vcc is immediately re-opened, those pending PDUs must
815 not be popped after the completion of their emission, as they refer
816 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
817 would be decremented by the size of the (unrelated) skb, possibly
818 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
819 we thus bind the tx entry to the current incarnation of the vcc
820 when the entry is submitted for tx. When the tx later completes,
821 if the incarnation number of the tx entry does not match the one
822 of the vcc, then this implies that the vcc has been closed then re-opened.
823 we thus just drop the skb here. */
825 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
826 fore200e->atm_dev->number);
828 dev_kfree_skb_any(entry->skb);
834 /* notify tx completion */
836 vcc->pop(vcc, entry->skb);
839 dev_kfree_skb_any(entry->skb);
842 /* check error condition */
843 if (*entry->status & STATUS_ERROR)
844 atomic_inc(&vcc->stats->tx_err);
846 atomic_inc(&vcc->stats->tx);
850 *entry->status = STATUS_FREE;
852 fore200e->host_txq.txing--;
854 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
859 #ifdef FORE200E_BSQ_DEBUG
860 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
862 struct buffer* buffer;
865 buffer = bsq->freebuf;
868 if (buffer->supplied) {
869 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
870 where, scheme, magn, buffer->index);
873 if (buffer->magn != magn) {
874 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
875 where, scheme, magn, buffer->index, buffer->magn);
878 if (buffer->scheme != scheme) {
879 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
880 where, scheme, magn, buffer->index, buffer->scheme);
883 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
884 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
885 where, scheme, magn, buffer->index);
889 buffer = buffer->next;
892 if (count != bsq->freebuf_count) {
893 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
894 where, scheme, magn, count, bsq->freebuf_count);
902 fore200e_supply(struct fore200e* fore200e)
906 struct host_bsq* bsq;
907 struct host_bsq_entry* entry;
908 struct buffer* buffer;
910 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
911 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
913 bsq = &fore200e->host_bsq[ scheme ][ magn ];
915 #ifdef FORE200E_BSQ_DEBUG
916 bsq_audit(1, bsq, scheme, magn);
918 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
920 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
921 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
923 entry = &bsq->host_entry[ bsq->head ];
925 for (i = 0; i < RBD_BLK_SIZE; i++) {
927 /* take the first buffer in the free buffer list */
928 buffer = bsq->freebuf;
930 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
931 scheme, magn, bsq->freebuf_count);
934 bsq->freebuf = buffer->next;
936 #ifdef FORE200E_BSQ_DEBUG
937 if (buffer->supplied)
938 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
939 scheme, magn, buffer->index);
940 buffer->supplied = 1;
942 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
943 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
946 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
948 /* decrease accordingly the number of free rx buffers */
949 bsq->freebuf_count -= RBD_BLK_SIZE;
951 *entry->status = STATUS_PENDING;
952 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
960 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
963 struct buffer* buffer;
964 struct fore200e_vcc* fore200e_vcc;
966 #ifdef FORE200E_52BYTE_AAL0_SDU
972 fore200e_vcc = FORE200E_VCC(vcc);
973 ASSERT(fore200e_vcc);
975 #ifdef FORE200E_52BYTE_AAL0_SDU
976 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
978 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
979 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
980 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
981 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
987 /* compute total PDU length */
988 for (i = 0; i < rpd->nseg; i++)
989 pdu_len += rpd->rsd[ i ].length;
991 skb = alloc_skb(pdu_len, GFP_ATOMIC);
993 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
995 atomic_inc(&vcc->stats->rx_drop);
999 __net_timestamp(skb);
1001 #ifdef FORE200E_52BYTE_AAL0_SDU
1003 *((u32*)skb_put(skb, 4)) = cell_header;
1007 /* reassemble segments */
1008 for (i = 0; i < rpd->nseg; i++) {
1010 /* rebuild rx buffer address from rsd handle */
1011 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1013 /* Make device DMA transfer visible to CPU. */
1014 dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr,
1015 rpd->rsd[i].length, DMA_FROM_DEVICE);
1017 skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length);
1019 /* Now let the device get at it again. */
1020 dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr,
1021 rpd->rsd[i].length, DMA_FROM_DEVICE);
1024 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1026 if (pdu_len < fore200e_vcc->rx_min_pdu)
1027 fore200e_vcc->rx_min_pdu = pdu_len;
1028 if (pdu_len > fore200e_vcc->rx_max_pdu)
1029 fore200e_vcc->rx_max_pdu = pdu_len;
1030 fore200e_vcc->rx_pdu++;
1033 if (atm_charge(vcc, skb->truesize) == 0) {
1035 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1036 vcc->itf, vcc->vpi, vcc->vci);
1038 dev_kfree_skb_any(skb);
1040 atomic_inc(&vcc->stats->rx_drop);
1044 vcc->push(vcc, skb);
1045 atomic_inc(&vcc->stats->rx);
1052 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1054 struct host_bsq* bsq;
1055 struct buffer* buffer;
1058 for (i = 0; i < rpd->nseg; i++) {
1060 /* rebuild rx buffer address from rsd handle */
1061 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1063 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1065 #ifdef FORE200E_BSQ_DEBUG
1066 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1068 if (buffer->supplied == 0)
1069 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1070 buffer->scheme, buffer->magn, buffer->index);
1071 buffer->supplied = 0;
1074 /* re-insert the buffer into the free buffer list */
1075 buffer->next = bsq->freebuf;
1076 bsq->freebuf = buffer;
1078 /* then increment the number of free rx buffers */
1079 bsq->freebuf_count++;
1085 fore200e_rx_irq(struct fore200e* fore200e)
1087 struct host_rxq* rxq = &fore200e->host_rxq;
1088 struct host_rxq_entry* entry;
1089 struct atm_vcc* vcc;
1090 struct fore200e_vc_map* vc_map;
1094 entry = &rxq->host_entry[ rxq->head ];
1096 /* no more received PDUs */
1097 if ((*entry->status & STATUS_COMPLETE) == 0)
1100 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1102 if ((vc_map->vcc == NULL) ||
1103 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1105 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1106 fore200e->atm_dev->number,
1107 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1113 if ((*entry->status & STATUS_ERROR) == 0) {
1115 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1118 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1119 fore200e->atm_dev->number,
1120 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1121 atomic_inc(&vcc->stats->rx_err);
1125 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1127 fore200e_collect_rpd(fore200e, entry->rpd);
1129 /* rewrite the rpd address to ack the received PDU */
1130 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1131 *entry->status = STATUS_FREE;
1133 fore200e_supply(fore200e);
1138 #ifndef FORE200E_USE_TASKLET
1140 fore200e_irq(struct fore200e* fore200e)
1142 unsigned long flags;
1144 spin_lock_irqsave(&fore200e->q_lock, flags);
1145 fore200e_rx_irq(fore200e);
1146 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1148 spin_lock_irqsave(&fore200e->q_lock, flags);
1149 fore200e_tx_irq(fore200e);
1150 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1156 fore200e_interrupt(int irq, void* dev)
1158 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1160 if (fore200e->bus->irq_check(fore200e) == 0) {
1162 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1165 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1167 #ifdef FORE200E_USE_TASKLET
1168 tasklet_schedule(&fore200e->tx_tasklet);
1169 tasklet_schedule(&fore200e->rx_tasklet);
1171 fore200e_irq(fore200e);
1174 fore200e->bus->irq_ack(fore200e);
1179 #ifdef FORE200E_USE_TASKLET
1181 fore200e_tx_tasklet(unsigned long data)
1183 struct fore200e* fore200e = (struct fore200e*) data;
1184 unsigned long flags;
1186 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1188 spin_lock_irqsave(&fore200e->q_lock, flags);
1189 fore200e_tx_irq(fore200e);
1190 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1195 fore200e_rx_tasklet(unsigned long data)
1197 struct fore200e* fore200e = (struct fore200e*) data;
1198 unsigned long flags;
1200 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1202 spin_lock_irqsave(&fore200e->q_lock, flags);
1203 fore200e_rx_irq((struct fore200e*) data);
1204 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1210 fore200e_select_scheme(struct atm_vcc* vcc)
1212 /* fairly balance the VCs over (identical) buffer schemes */
1213 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1215 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1216 vcc->itf, vcc->vpi, vcc->vci, scheme);
1223 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1225 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1226 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1227 struct activate_opcode activ_opcode;
1228 struct deactivate_opcode deactiv_opcode;
1231 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1233 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1236 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1238 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1239 activ_opcode.aal = aal;
1240 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1241 activ_opcode.pad = 0;
1244 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1245 deactiv_opcode.pad = 0;
1248 vpvc.vci = vcc->vci;
1249 vpvc.vpi = vcc->vpi;
1251 *entry->status = STATUS_PENDING;
1255 #ifdef FORE200E_52BYTE_AAL0_SDU
1258 /* the MTU is not used by the cp, except in the case of AAL0 */
1259 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1260 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1261 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1264 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1265 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1268 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1270 *entry->status = STATUS_FREE;
1273 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1274 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1278 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1279 activate ? "open" : "clos");
1285 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1288 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1290 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1292 /* compute the data cells to idle cells ratio from the tx PCR */
1293 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1294 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1297 /* disable rate control */
1298 rate->data_cells = rate->idle_cells = 0;
1304 fore200e_open(struct atm_vcc *vcc)
1306 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1307 struct fore200e_vcc* fore200e_vcc;
1308 struct fore200e_vc_map* vc_map;
1309 unsigned long flags;
1311 short vpi = vcc->vpi;
1313 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1314 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1316 spin_lock_irqsave(&fore200e->q_lock, flags);
1318 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1321 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1323 printk(FORE200E "VC %d.%d.%d already in use\n",
1324 fore200e->atm_dev->number, vpi, vci);
1331 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1333 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1334 if (fore200e_vcc == NULL) {
1339 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1340 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1341 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1342 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1343 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1344 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1345 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1347 /* pseudo-CBR bandwidth requested? */
1348 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1350 mutex_lock(&fore200e->rate_mtx);
1351 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1352 mutex_unlock(&fore200e->rate_mtx);
1354 kfree(fore200e_vcc);
1359 /* reserve bandwidth */
1360 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1361 mutex_unlock(&fore200e->rate_mtx);
1364 vcc->itf = vcc->dev->number;
1366 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1367 set_bit(ATM_VF_ADDR, &vcc->flags);
1369 vcc->dev_data = fore200e_vcc;
1371 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1375 clear_bit(ATM_VF_ADDR, &vcc->flags);
1376 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1378 vcc->dev_data = NULL;
1380 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1382 kfree(fore200e_vcc);
1386 /* compute rate control parameters */
1387 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1389 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1390 set_bit(ATM_VF_HASQOS, &vcc->flags);
1392 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1393 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1394 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1395 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1398 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1399 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1400 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1402 /* new incarnation of the vcc */
1403 vc_map->incarn = ++fore200e->incarn_count;
1405 /* VC unusable before this flag is set */
1406 set_bit(ATM_VF_READY, &vcc->flags);
1413 fore200e_close(struct atm_vcc* vcc)
1415 struct fore200e_vcc* fore200e_vcc;
1416 struct fore200e* fore200e;
1417 struct fore200e_vc_map* vc_map;
1418 unsigned long flags;
1421 fore200e = FORE200E_DEV(vcc->dev);
1423 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1424 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1426 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1428 clear_bit(ATM_VF_READY, &vcc->flags);
1430 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1432 spin_lock_irqsave(&fore200e->q_lock, flags);
1434 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1436 /* the vc is no longer considered as "in use" by fore200e_open() */
1439 vcc->itf = vcc->vci = vcc->vpi = 0;
1441 fore200e_vcc = FORE200E_VCC(vcc);
1442 vcc->dev_data = NULL;
1444 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1446 /* release reserved bandwidth, if any */
1447 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1449 mutex_lock(&fore200e->rate_mtx);
1450 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1451 mutex_unlock(&fore200e->rate_mtx);
1453 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1456 clear_bit(ATM_VF_ADDR, &vcc->flags);
1457 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1459 ASSERT(fore200e_vcc);
1460 kfree(fore200e_vcc);
1465 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1467 struct fore200e* fore200e;
1468 struct fore200e_vcc* fore200e_vcc;
1469 struct fore200e_vc_map* vc_map;
1470 struct host_txq* txq;
1471 struct host_txq_entry* entry;
1473 struct tpd_haddr tpd_haddr;
1474 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1476 int tx_len = skb->len;
1477 u32* cell_header = NULL;
1478 unsigned char* skb_data;
1480 unsigned char* data;
1481 unsigned long flags;
1486 fore200e = FORE200E_DEV(vcc->dev);
1487 fore200e_vcc = FORE200E_VCC(vcc);
1492 txq = &fore200e->host_txq;
1496 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1497 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1498 dev_kfree_skb_any(skb);
1502 #ifdef FORE200E_52BYTE_AAL0_SDU
1503 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1504 cell_header = (u32*) skb->data;
1505 skb_data = skb->data + 4; /* skip 4-byte cell header */
1506 skb_len = tx_len = skb->len - 4;
1508 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1513 skb_data = skb->data;
1517 if (((unsigned long)skb_data) & 0x3) {
1519 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1524 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1526 /* this simply NUKES the PCA board */
1527 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1529 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1533 data = kmalloc(tx_len, GFP_ATOMIC);
1539 dev_kfree_skb_any(skb);
1544 memcpy(data, skb_data, skb_len);
1545 if (skb_len < tx_len)
1546 memset(data + skb_len, 0x00, tx_len - skb_len);
1552 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1553 ASSERT(vc_map->vcc == vcc);
1557 spin_lock_irqsave(&fore200e->q_lock, flags);
1559 entry = &txq->host_entry[ txq->head ];
1561 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1563 /* try to free completed tx queue entries */
1564 fore200e_tx_irq(fore200e);
1566 if (*entry->status != STATUS_FREE) {
1568 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1570 /* retry once again? */
1576 atomic_inc(&vcc->stats->tx_err);
1579 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1580 fore200e->name, fore200e->cp_queues->heartbeat);
1585 dev_kfree_skb_any(skb);
1595 entry->incarn = vc_map->incarn;
1596 entry->vc_map = vc_map;
1598 entry->data = tx_copy ? data : NULL;
1601 tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len,
1603 if (dma_mapping_error(fore200e->dev, tpd->tsd[0].buffer)) {
1606 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1609 tpd->tsd[ 0 ].length = tx_len;
1611 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1614 /* The dma_map call above implies a dma_sync so the device can use it,
1615 * thus no explicit dma_sync call is necessary here.
1618 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1619 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1620 tpd->tsd[0].length, skb_len);
1622 if (skb_len < fore200e_vcc->tx_min_pdu)
1623 fore200e_vcc->tx_min_pdu = skb_len;
1624 if (skb_len > fore200e_vcc->tx_max_pdu)
1625 fore200e_vcc->tx_max_pdu = skb_len;
1626 fore200e_vcc->tx_pdu++;
1628 /* set tx rate control information */
1629 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1630 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1633 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1634 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1635 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1636 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1637 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1640 /* set the ATM header, common to all cells conveying the PDU */
1641 tpd->atm_header.clp = 0;
1642 tpd->atm_header.plt = 0;
1643 tpd->atm_header.vci = vcc->vci;
1644 tpd->atm_header.vpi = vcc->vpi;
1645 tpd->atm_header.gfc = 0;
1648 tpd->spec.length = tx_len;
1650 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1653 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1655 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1657 *entry->status = STATUS_PENDING;
1658 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1660 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1667 fore200e_getstats(struct fore200e* fore200e)
1669 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1670 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1671 struct stats_opcode opcode;
1675 if (fore200e->stats == NULL) {
1676 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL);
1677 if (fore200e->stats == NULL)
1681 stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats,
1682 sizeof(struct stats), DMA_FROM_DEVICE);
1683 if (dma_mapping_error(fore200e->dev, stats_dma_addr))
1686 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1688 opcode.opcode = OPCODE_GET_STATS;
1691 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1693 *entry->status = STATUS_PENDING;
1695 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1697 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1699 *entry->status = STATUS_FREE;
1701 dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1704 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1711 #if 0 /* currently unused */
1713 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1715 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1716 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1717 struct oc3_opcode opcode;
1719 u32 oc3_regs_dma_addr;
1721 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1723 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1725 opcode.opcode = OPCODE_GET_OC3;
1730 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1732 *entry->status = STATUS_PENDING;
1734 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1736 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1738 *entry->status = STATUS_FREE;
1740 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1743 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1753 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1755 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1756 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1757 struct oc3_opcode opcode;
1760 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1762 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1764 opcode.opcode = OPCODE_SET_OC3;
1766 opcode.value = value;
1769 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1771 *entry->status = STATUS_PENDING;
1773 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1775 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1777 *entry->status = STATUS_FREE;
1780 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1789 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1791 u32 mct_value, mct_mask;
1794 if (!capable(CAP_NET_ADMIN))
1797 switch (loop_mode) {
1801 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1804 case ATM_LM_LOC_PHY:
1805 mct_value = mct_mask = SUNI_MCT_DLE;
1808 case ATM_LM_RMT_PHY:
1809 mct_value = mct_mask = SUNI_MCT_LLE;
1816 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1818 fore200e->loop_mode = loop_mode;
1825 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1827 struct sonet_stats tmp;
1829 if (fore200e_getstats(fore200e) < 0)
1832 tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1833 tmp.line_bip = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1834 tmp.path_bip = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1835 tmp.line_febe = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1836 tmp.path_febe = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1837 tmp.corr_hcs = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1838 tmp.uncorr_hcs = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1839 tmp.tx_cells = be32_to_cpu(fore200e->stats->aal0.cells_transmitted) +
1840 be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1841 be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1842 tmp.rx_cells = be32_to_cpu(fore200e->stats->aal0.cells_received) +
1843 be32_to_cpu(fore200e->stats->aal34.cells_received) +
1844 be32_to_cpu(fore200e->stats->aal5.cells_received);
1847 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
1854 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
1856 struct fore200e* fore200e = FORE200E_DEV(dev);
1858 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
1863 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
1866 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
1869 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
1872 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
1875 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
1878 return -ENOSYS; /* not implemented */
1883 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
1885 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1886 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1888 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1889 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
1893 DPRINTK(2, "change_qos %d.%d.%d, "
1894 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1895 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
1896 "available_cell_rate = %u",
1897 vcc->itf, vcc->vpi, vcc->vci,
1898 fore200e_traffic_class[ qos->txtp.traffic_class ],
1899 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
1900 fore200e_traffic_class[ qos->rxtp.traffic_class ],
1901 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
1902 flags, fore200e->available_cell_rate);
1904 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
1906 mutex_lock(&fore200e->rate_mtx);
1907 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
1908 mutex_unlock(&fore200e->rate_mtx);
1912 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1913 fore200e->available_cell_rate -= qos->txtp.max_pcr;
1915 mutex_unlock(&fore200e->rate_mtx);
1917 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
1919 /* update rate control parameters */
1920 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
1922 set_bit(ATM_VF_HASQOS, &vcc->flags);
1931 static int fore200e_irq_request(struct fore200e *fore200e)
1933 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
1935 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
1936 fore200e_irq_itoa(fore200e->irq), fore200e->name);
1940 printk(FORE200E "IRQ %s reserved for device %s\n",
1941 fore200e_irq_itoa(fore200e->irq), fore200e->name);
1943 #ifdef FORE200E_USE_TASKLET
1944 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
1945 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
1948 fore200e->state = FORE200E_STATE_IRQ;
1953 static int fore200e_get_esi(struct fore200e *fore200e)
1955 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL);
1961 ok = fore200e->bus->prom_read(fore200e, prom);
1967 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
1969 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
1970 prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
1972 for (i = 0; i < ESI_LEN; i++) {
1973 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
1982 static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
1984 int scheme, magn, nbr, size, i;
1986 struct host_bsq* bsq;
1987 struct buffer* buffer;
1989 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1990 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1992 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1994 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
1995 size = fore200e_rx_buf_size[ scheme ][ magn ];
1997 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
1999 /* allocate the array of receive buffers */
2000 buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer),
2006 bsq->freebuf = NULL;
2008 for (i = 0; i < nbr; i++) {
2010 buffer[ i ].scheme = scheme;
2011 buffer[ i ].magn = magn;
2012 #ifdef FORE200E_BSQ_DEBUG
2013 buffer[ i ].index = i;
2014 buffer[ i ].supplied = 0;
2017 /* allocate the receive buffer body */
2018 if (fore200e_chunk_alloc(fore200e,
2019 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2020 DMA_FROM_DEVICE) < 0) {
2023 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2029 /* insert the buffer into the free buffer list */
2030 buffer[ i ].next = bsq->freebuf;
2031 bsq->freebuf = &buffer[ i ];
2033 /* all the buffers are free, initially */
2034 bsq->freebuf_count = nbr;
2036 #ifdef FORE200E_BSQ_DEBUG
2037 bsq_audit(3, bsq, scheme, magn);
2042 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2047 static int fore200e_init_bs_queue(struct fore200e *fore200e)
2049 int scheme, magn, i;
2051 struct host_bsq* bsq;
2052 struct cp_bsq_entry __iomem * cp_entry;
2054 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2055 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2057 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2059 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2061 /* allocate and align the array of status words */
2062 if (fore200e_dma_chunk_alloc(fore200e,
2064 sizeof(enum status),
2066 fore200e->bus->status_alignment) < 0) {
2070 /* allocate and align the array of receive buffer descriptors */
2071 if (fore200e_dma_chunk_alloc(fore200e,
2073 sizeof(struct rbd_block),
2075 fore200e->bus->descr_alignment) < 0) {
2077 fore200e_dma_chunk_free(fore200e, &bsq->status);
2081 /* get the base address of the cp resident buffer supply queue entries */
2082 cp_entry = fore200e->virt_base +
2083 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2085 /* fill the host resident and cp resident buffer supply queue entries */
2086 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2088 bsq->host_entry[ i ].status =
2089 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2090 bsq->host_entry[ i ].rbd_block =
2091 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2092 bsq->host_entry[ i ].rbd_block_dma =
2093 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2094 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2096 *bsq->host_entry[ i ].status = STATUS_FREE;
2098 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2099 &cp_entry[ i ].status_haddr);
2104 fore200e->state = FORE200E_STATE_INIT_BSQ;
2109 static int fore200e_init_rx_queue(struct fore200e *fore200e)
2111 struct host_rxq* rxq = &fore200e->host_rxq;
2112 struct cp_rxq_entry __iomem * cp_entry;
2115 DPRINTK(2, "receive queue is being initialized\n");
2117 /* allocate and align the array of status words */
2118 if (fore200e_dma_chunk_alloc(fore200e,
2120 sizeof(enum status),
2122 fore200e->bus->status_alignment) < 0) {
2126 /* allocate and align the array of receive PDU descriptors */
2127 if (fore200e_dma_chunk_alloc(fore200e,
2131 fore200e->bus->descr_alignment) < 0) {
2133 fore200e_dma_chunk_free(fore200e, &rxq->status);
2137 /* get the base address of the cp resident rx queue entries */
2138 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2140 /* fill the host resident and cp resident rx entries */
2141 for (i=0; i < QUEUE_SIZE_RX; i++) {
2143 rxq->host_entry[ i ].status =
2144 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2145 rxq->host_entry[ i ].rpd =
2146 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2147 rxq->host_entry[ i ].rpd_dma =
2148 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2149 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2151 *rxq->host_entry[ i ].status = STATUS_FREE;
2153 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2154 &cp_entry[ i ].status_haddr);
2156 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2157 &cp_entry[ i ].rpd_haddr);
2160 /* set the head entry of the queue */
2163 fore200e->state = FORE200E_STATE_INIT_RXQ;
2168 static int fore200e_init_tx_queue(struct fore200e *fore200e)
2170 struct host_txq* txq = &fore200e->host_txq;
2171 struct cp_txq_entry __iomem * cp_entry;
2174 DPRINTK(2, "transmit queue is being initialized\n");
2176 /* allocate and align the array of status words */
2177 if (fore200e_dma_chunk_alloc(fore200e,
2179 sizeof(enum status),
2181 fore200e->bus->status_alignment) < 0) {
2185 /* allocate and align the array of transmit PDU descriptors */
2186 if (fore200e_dma_chunk_alloc(fore200e,
2190 fore200e->bus->descr_alignment) < 0) {
2192 fore200e_dma_chunk_free(fore200e, &txq->status);
2196 /* get the base address of the cp resident tx queue entries */
2197 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2199 /* fill the host resident and cp resident tx entries */
2200 for (i=0; i < QUEUE_SIZE_TX; i++) {
2202 txq->host_entry[ i ].status =
2203 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2204 txq->host_entry[ i ].tpd =
2205 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2206 txq->host_entry[ i ].tpd_dma =
2207 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2208 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2210 *txq->host_entry[ i ].status = STATUS_FREE;
2212 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2213 &cp_entry[ i ].status_haddr);
2215 /* although there is a one-to-one mapping of tx queue entries and tpds,
2216 we do not write here the DMA (physical) base address of each tpd into
2217 the related cp resident entry, because the cp relies on this write
2218 operation to detect that a new pdu has been submitted for tx */
2221 /* set the head and tail entries of the queue */
2225 fore200e->state = FORE200E_STATE_INIT_TXQ;
2230 static int fore200e_init_cmd_queue(struct fore200e *fore200e)
2232 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2233 struct cp_cmdq_entry __iomem * cp_entry;
2236 DPRINTK(2, "command queue is being initialized\n");
2238 /* allocate and align the array of status words */
2239 if (fore200e_dma_chunk_alloc(fore200e,
2241 sizeof(enum status),
2243 fore200e->bus->status_alignment) < 0) {
2247 /* get the base address of the cp resident cmd queue entries */
2248 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2250 /* fill the host resident and cp resident cmd entries */
2251 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2253 cmdq->host_entry[ i ].status =
2254 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2255 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2257 *cmdq->host_entry[ i ].status = STATUS_FREE;
2259 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2260 &cp_entry[ i ].status_haddr);
2263 /* set the head entry of the queue */
2266 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2271 static void fore200e_param_bs_queue(struct fore200e *fore200e,
2272 enum buffer_scheme scheme,
2273 enum buffer_magn magn, int queue_length,
2274 int pool_size, int supply_blksize)
2276 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2278 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2279 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2280 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2281 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2285 static int fore200e_initialize(struct fore200e *fore200e)
2287 struct cp_queues __iomem * cpq;
2288 int ok, scheme, magn;
2290 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2292 mutex_init(&fore200e->rate_mtx);
2293 spin_lock_init(&fore200e->q_lock);
2295 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2297 /* enable cp to host interrupts */
2298 fore200e->bus->write(1, &cpq->imask);
2300 if (fore200e->bus->irq_enable)
2301 fore200e->bus->irq_enable(fore200e);
2303 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2305 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2306 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2307 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2309 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2310 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2312 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2313 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2314 fore200e_param_bs_queue(fore200e, scheme, magn,
2316 fore200e_rx_buf_nbr[ scheme ][ magn ],
2319 /* issue the initialize command */
2320 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2321 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2323 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2325 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2329 printk(FORE200E "device %s initialized\n", fore200e->name);
2331 fore200e->state = FORE200E_STATE_INITIALIZE;
2336 static void fore200e_monitor_putc(struct fore200e *fore200e, char c)
2338 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2343 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2347 static int fore200e_monitor_getc(struct fore200e *fore200e)
2349 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2350 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2353 while (time_before(jiffies, timeout)) {
2355 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2357 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2359 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2361 printk("%c", c & 0xFF);
2371 static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
2375 /* the i960 monitor doesn't accept any new character if it has something to say */
2376 while (fore200e_monitor_getc(fore200e) >= 0);
2378 fore200e_monitor_putc(fore200e, *str++);
2381 while (fore200e_monitor_getc(fore200e) >= 0);
2384 #ifdef __LITTLE_ENDIAN
2385 #define FW_EXT ".bin"
2387 #define FW_EXT "_ecd.bin2"
2390 static int fore200e_load_and_start_fw(struct fore200e *fore200e)
2392 const struct firmware *firmware;
2393 const struct fw_header *fw_header;
2394 const __le32 *fw_data;
2396 u32 __iomem *load_addr;
2400 sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2401 if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) {
2402 printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
2406 fw_data = (const __le32 *)firmware->data;
2407 fw_size = firmware->size / sizeof(u32);
2408 fw_header = (const struct fw_header *)firmware->data;
2409 load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2411 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2412 fore200e->name, load_addr, fw_size);
2414 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2415 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2419 for (; fw_size--; fw_data++, load_addr++)
2420 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2422 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2424 #if defined(__sparc_v9__)
2425 /* reported to be required by SBA cards on some sparc64 hosts */
2429 sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2430 fore200e_monitor_puts(fore200e, buf);
2432 if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2433 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2437 printk(FORE200E "device %s firmware started\n", fore200e->name);
2439 fore200e->state = FORE200E_STATE_START_FW;
2443 release_firmware(firmware);
2448 static int fore200e_register(struct fore200e *fore200e, struct device *parent)
2450 struct atm_dev* atm_dev;
2452 DPRINTK(2, "device %s being registered\n", fore200e->name);
2454 atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops,
2456 if (atm_dev == NULL) {
2457 printk(FORE200E "unable to register device %s\n", fore200e->name);
2461 atm_dev->dev_data = fore200e;
2462 fore200e->atm_dev = atm_dev;
2464 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2465 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2467 fore200e->available_cell_rate = ATM_OC3_PCR;
2469 fore200e->state = FORE200E_STATE_REGISTER;
2474 static int fore200e_init(struct fore200e *fore200e, struct device *parent)
2476 if (fore200e_register(fore200e, parent) < 0)
2479 if (fore200e->bus->configure(fore200e) < 0)
2482 if (fore200e->bus->map(fore200e) < 0)
2485 if (fore200e_reset(fore200e, 1) < 0)
2488 if (fore200e_load_and_start_fw(fore200e) < 0)
2491 if (fore200e_initialize(fore200e) < 0)
2494 if (fore200e_init_cmd_queue(fore200e) < 0)
2497 if (fore200e_init_tx_queue(fore200e) < 0)
2500 if (fore200e_init_rx_queue(fore200e) < 0)
2503 if (fore200e_init_bs_queue(fore200e) < 0)
2506 if (fore200e_alloc_rx_buf(fore200e) < 0)
2509 if (fore200e_get_esi(fore200e) < 0)
2512 if (fore200e_irq_request(fore200e) < 0)
2515 fore200e_supply(fore200e);
2517 /* all done, board initialization is now complete */
2518 fore200e->state = FORE200E_STATE_COMPLETE;
2523 static const struct of_device_id fore200e_sba_match[];
2524 static int fore200e_sba_probe(struct platform_device *op)
2526 const struct of_device_id *match;
2527 struct fore200e *fore200e;
2528 static int index = 0;
2531 match = of_match_device(fore200e_sba_match, &op->dev);
2535 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2539 fore200e->bus = &fore200e_sbus_ops;
2540 fore200e->dev = &op->dev;
2541 fore200e->irq = op->archdata.irqs[0];
2542 fore200e->phys_base = op->resource[0].start;
2544 sprintf(fore200e->name, "SBA-200E-%d", index);
2546 err = fore200e_init(fore200e, &op->dev);
2548 fore200e_shutdown(fore200e);
2554 dev_set_drvdata(&op->dev, fore200e);
2559 static int fore200e_sba_remove(struct platform_device *op)
2561 struct fore200e *fore200e = dev_get_drvdata(&op->dev);
2563 fore200e_shutdown(fore200e);
2569 static const struct of_device_id fore200e_sba_match[] = {
2571 .name = SBA200E_PROM_NAME,
2575 MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2577 static struct platform_driver fore200e_sba_driver = {
2579 .name = "fore_200e",
2580 .of_match_table = fore200e_sba_match,
2582 .probe = fore200e_sba_probe,
2583 .remove = fore200e_sba_remove,
2588 static int fore200e_pca_detect(struct pci_dev *pci_dev,
2589 const struct pci_device_id *pci_ent)
2591 struct fore200e* fore200e;
2593 static int index = 0;
2595 if (pci_enable_device(pci_dev)) {
2600 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
2605 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2606 if (fore200e == NULL) {
2611 fore200e->bus = &fore200e_pci_ops;
2612 fore200e->dev = &pci_dev->dev;
2613 fore200e->irq = pci_dev->irq;
2614 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2616 sprintf(fore200e->name, "PCA-200E-%d", index - 1);
2618 pci_set_master(pci_dev);
2620 printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n",
2621 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2623 sprintf(fore200e->name, "PCA-200E-%d", index);
2625 err = fore200e_init(fore200e, &pci_dev->dev);
2627 fore200e_shutdown(fore200e);
2632 pci_set_drvdata(pci_dev, fore200e);
2640 pci_disable_device(pci_dev);
2645 static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
2647 struct fore200e *fore200e;
2649 fore200e = pci_get_drvdata(pci_dev);
2651 fore200e_shutdown(fore200e);
2653 pci_disable_device(pci_dev);
2657 static const struct pci_device_id fore200e_pca_tbl[] = {
2658 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID },
2662 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2664 static struct pci_driver fore200e_pca_driver = {
2665 .name = "fore_200e",
2666 .probe = fore200e_pca_detect,
2667 .remove = fore200e_pca_remove_one,
2668 .id_table = fore200e_pca_tbl,
2672 static int __init fore200e_module_init(void)
2676 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2679 err = platform_driver_register(&fore200e_sba_driver);
2685 err = pci_register_driver(&fore200e_pca_driver);
2690 platform_driver_unregister(&fore200e_sba_driver);
2696 static void __exit fore200e_module_cleanup(void)
2699 pci_unregister_driver(&fore200e_pca_driver);
2702 platform_driver_unregister(&fore200e_sba_driver);
2707 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2709 struct fore200e* fore200e = FORE200E_DEV(dev);
2710 struct fore200e_vcc* fore200e_vcc;
2711 struct atm_vcc* vcc;
2712 int i, len, left = *pos;
2713 unsigned long flags;
2717 if (fore200e_getstats(fore200e) < 0)
2720 len = sprintf(page,"\n"
2722 " internal name:\t\t%s\n", fore200e->name);
2724 /* print bus-specific information */
2725 if (fore200e->bus->proc_read)
2726 len += fore200e->bus->proc_read(fore200e, page + len);
2728 len += sprintf(page + len,
2729 " interrupt line:\t\t%s\n"
2730 " physical base address:\t0x%p\n"
2731 " virtual base address:\t0x%p\n"
2732 " factory address (ESI):\t%pM\n"
2733 " board serial number:\t\t%d\n\n",
2734 fore200e_irq_itoa(fore200e->irq),
2735 (void*)fore200e->phys_base,
2736 fore200e->virt_base,
2738 fore200e->esi[4] * 256 + fore200e->esi[5]);
2744 return sprintf(page,
2745 " free small bufs, scheme 1:\t%d\n"
2746 " free large bufs, scheme 1:\t%d\n"
2747 " free small bufs, scheme 2:\t%d\n"
2748 " free large bufs, scheme 2:\t%d\n",
2749 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2750 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2751 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2752 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2755 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2757 len = sprintf(page,"\n\n"
2758 " cell processor:\n"
2759 " heartbeat state:\t\t");
2761 if (hb >> 16 != 0xDEAD)
2762 len += sprintf(page + len, "0x%08x\n", hb);
2764 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2770 static const char* media_name[] = {
2771 "unshielded twisted pair",
2772 "multimode optical fiber ST",
2773 "multimode optical fiber SC",
2774 "single-mode optical fiber ST",
2775 "single-mode optical fiber SC",
2779 static const char* oc3_mode[] = {
2781 "diagnostic loopback",
2786 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2787 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2788 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2789 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2792 if (media_index > 4)
2795 switch (fore200e->loop_mode) {
2796 case ATM_LM_NONE: oc3_index = 0;
2798 case ATM_LM_LOC_PHY: oc3_index = 1;
2800 case ATM_LM_RMT_PHY: oc3_index = 2;
2802 default: oc3_index = 3;
2805 return sprintf(page,
2806 " firmware release:\t\t%d.%d.%d\n"
2807 " monitor release:\t\t%d.%d\n"
2808 " media type:\t\t\t%s\n"
2809 " OC-3 revision:\t\t0x%x\n"
2810 " OC-3 mode:\t\t\t%s",
2811 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2812 mon960_release >> 16, mon960_release << 16 >> 16,
2813 media_name[ media_index ],
2815 oc3_mode[ oc3_index ]);
2819 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2821 return sprintf(page,
2824 " version number:\t\t%d\n"
2825 " boot status word:\t\t0x%08x\n",
2826 fore200e->bus->read(&cp_monitor->mon_version),
2827 fore200e->bus->read(&cp_monitor->bstat));
2831 return sprintf(page,
2833 " device statistics:\n"
2835 " crc_header_errors:\t\t%10u\n"
2836 " framing_errors:\t\t%10u\n",
2837 be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2838 be32_to_cpu(fore200e->stats->phy.framing_errors));
2841 return sprintf(page, "\n"
2843 " section_bip8_errors:\t%10u\n"
2844 " path_bip8_errors:\t\t%10u\n"
2845 " line_bip24_errors:\t\t%10u\n"
2846 " line_febe_errors:\t\t%10u\n"
2847 " path_febe_errors:\t\t%10u\n"
2848 " corr_hcs_errors:\t\t%10u\n"
2849 " ucorr_hcs_errors:\t\t%10u\n",
2850 be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2851 be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2852 be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2853 be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2854 be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2855 be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2856 be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
2859 return sprintf(page,"\n"
2860 " ATM:\t\t\t\t cells\n"
2863 " vpi out of range:\t\t%10u\n"
2864 " vpi no conn:\t\t%10u\n"
2865 " vci out of range:\t\t%10u\n"
2866 " vci no conn:\t\t%10u\n",
2867 be32_to_cpu(fore200e->stats->atm.cells_transmitted),
2868 be32_to_cpu(fore200e->stats->atm.cells_received),
2869 be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
2870 be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
2871 be32_to_cpu(fore200e->stats->atm.vci_bad_range),
2872 be32_to_cpu(fore200e->stats->atm.vci_no_conn));
2875 return sprintf(page,"\n"
2876 " AAL0:\t\t\t cells\n"
2879 " dropped:\t\t\t%10u\n",
2880 be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
2881 be32_to_cpu(fore200e->stats->aal0.cells_received),
2882 be32_to_cpu(fore200e->stats->aal0.cells_dropped));
2885 return sprintf(page,"\n"
2887 " SAR sublayer:\t\t cells\n"
2890 " dropped:\t\t\t%10u\n"
2891 " CRC errors:\t\t%10u\n"
2892 " protocol errors:\t\t%10u\n\n"
2893 " CS sublayer:\t\t PDUs\n"
2896 " dropped:\t\t\t%10u\n"
2897 " protocol errors:\t\t%10u\n",
2898 be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
2899 be32_to_cpu(fore200e->stats->aal34.cells_received),
2900 be32_to_cpu(fore200e->stats->aal34.cells_dropped),
2901 be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
2902 be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
2903 be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
2904 be32_to_cpu(fore200e->stats->aal34.cspdus_received),
2905 be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
2906 be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
2909 return sprintf(page,"\n"
2911 " SAR sublayer:\t\t cells\n"
2914 " dropped:\t\t\t%10u\n"
2915 " congestions:\t\t%10u\n\n"
2916 " CS sublayer:\t\t PDUs\n"
2919 " dropped:\t\t\t%10u\n"
2920 " CRC errors:\t\t%10u\n"
2921 " protocol errors:\t\t%10u\n",
2922 be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
2923 be32_to_cpu(fore200e->stats->aal5.cells_received),
2924 be32_to_cpu(fore200e->stats->aal5.cells_dropped),
2925 be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
2926 be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
2927 be32_to_cpu(fore200e->stats->aal5.cspdus_received),
2928 be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
2929 be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
2930 be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
2933 return sprintf(page,"\n"
2934 " AUX:\t\t allocation failures\n"
2935 " small b1:\t\t\t%10u\n"
2936 " large b1:\t\t\t%10u\n"
2937 " small b2:\t\t\t%10u\n"
2938 " large b2:\t\t\t%10u\n"
2939 " RX PDUs:\t\t\t%10u\n"
2940 " TX PDUs:\t\t\t%10lu\n",
2941 be32_to_cpu(fore200e->stats->aux.small_b1_failed),
2942 be32_to_cpu(fore200e->stats->aux.large_b1_failed),
2943 be32_to_cpu(fore200e->stats->aux.small_b2_failed),
2944 be32_to_cpu(fore200e->stats->aux.large_b2_failed),
2945 be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
2949 return sprintf(page,"\n"
2950 " receive carrier:\t\t\t%s\n",
2951 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
2954 return sprintf(page,"\n"
2955 " VCCs:\n address VPI VCI AAL "
2956 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
2959 for (i = 0; i < NBR_CONNECT; i++) {
2961 vcc = fore200e->vc_map[i].vcc;
2966 spin_lock_irqsave(&fore200e->q_lock, flags);
2968 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
2970 fore200e_vcc = FORE200E_VCC(vcc);
2971 ASSERT(fore200e_vcc);
2974 " %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
2976 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
2977 fore200e_vcc->tx_pdu,
2978 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
2979 fore200e_vcc->tx_max_pdu,
2980 fore200e_vcc->rx_pdu,
2981 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
2982 fore200e_vcc->rx_max_pdu);
2984 spin_unlock_irqrestore(&fore200e->q_lock, flags);
2988 spin_unlock_irqrestore(&fore200e->q_lock, flags);
2994 module_init(fore200e_module_init);
2995 module_exit(fore200e_module_cleanup);
2998 static const struct atmdev_ops fore200e_ops = {
2999 .open = fore200e_open,
3000 .close = fore200e_close,
3001 .ioctl = fore200e_ioctl,
3002 .send = fore200e_send,
3003 .change_qos = fore200e_change_qos,
3004 .proc_read = fore200e_proc_read,
3005 .owner = THIS_MODULE
3008 MODULE_LICENSE("GPL");
3010 #ifdef __LITTLE_ENDIAN__
3011 MODULE_FIRMWARE("pca200e.bin");
3013 MODULE_FIRMWARE("pca200e_ecd.bin2");
3015 #endif /* CONFIG_PCI */
3017 MODULE_FIRMWARE("sba200e_ecd.bin2");