5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h>
71 #include <linux/slab.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW /* still confused about this */
86 #include <linux/atm_he.h>
88 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
91 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
93 #define HPRINTK(fmt,args...) do { } while (0)
98 static int he_open(struct atm_vcc *vcc);
99 static void he_close(struct atm_vcc *vcc);
100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102 static irqreturn_t he_irq_handler(int irq, void *dev_id);
103 static void he_tasklet(unsigned long data);
104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105 static int he_start(struct atm_dev *dev);
106 static void he_stop(struct he_dev *dev);
107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
114 static struct he_dev *he_devs;
115 static bool disable64;
116 static short nvpibits = -1;
117 static short nvcibits = -1;
118 static short rx_skb_reserve = 16;
119 static bool irq_coalesce = 1;
122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab[] = {
138 CLK_HIGH | SI_HIGH, /* 1 */
140 CLK_HIGH | SI_HIGH /* 1 */
143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab[] = {
164 static struct atmdev_ops he_ops =
170 .phy_put = he_phy_put,
171 .phy_get = he_phy_get,
172 .proc_read = he_proc_read,
176 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg) readl((dev)->membase + (reg))
179 /* section 2.12 connection memory access */
181 static __inline__ void
182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
185 he_writel(he_dev, val, CON_DAT);
186 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
187 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
191 #define he_writel_rcm(dev, val, reg) \
192 he_writel_internal(dev, val, reg, CON_CTL_RCM)
194 #define he_writel_tcm(dev, val, reg) \
195 he_writel_internal(dev, val, reg, CON_CTL_TCM)
197 #define he_writel_mbox(dev, val, reg) \
198 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
203 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 return he_readl(he_dev, CON_DAT);
208 #define he_readl_rcm(dev, reg) \
209 he_readl_internal(dev, reg, CON_CTL_RCM)
211 #define he_readl_tcm(dev, reg) \
212 he_readl_internal(dev, reg, CON_CTL_TCM)
214 #define he_readl_mbox(dev, reg) \
215 he_readl_internal(dev, reg, CON_CTL_MBOX)
218 /* figure 2.2 connection id */
220 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
222 /* 2.5.1 per connection transmit state registers */
224 #define he_writel_tsr0(dev, val, cid) \
225 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \
227 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
229 #define he_writel_tsr1(dev, val, cid) \
230 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
232 #define he_writel_tsr2(dev, val, cid) \
233 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
235 #define he_writel_tsr3(dev, val, cid) \
236 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
238 #define he_writel_tsr4(dev, val, cid) \
239 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
243 * NOTE While the transmit connection is active, bits 23 through 0
244 * of this register must not be written by the host. Byte
245 * enables should be used during normal operation when writing
246 * the most significant byte.
249 #define he_writel_tsr4_upper(dev, val, cid) \
250 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
252 | CON_BYTE_DISABLE_2 \
253 | CON_BYTE_DISABLE_1 \
254 | CON_BYTE_DISABLE_0)
256 #define he_readl_tsr4(dev, cid) \
257 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
259 #define he_writel_tsr5(dev, val, cid) \
260 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
262 #define he_writel_tsr6(dev, val, cid) \
263 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
265 #define he_writel_tsr7(dev, val, cid) \
266 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
269 #define he_writel_tsr8(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
272 #define he_writel_tsr9(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
275 #define he_writel_tsr10(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
278 #define he_writel_tsr11(dev, val, cid) \
279 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
282 #define he_writel_tsr12(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
285 #define he_writel_tsr13(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
289 #define he_writel_tsr14(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
292 #define he_writel_tsr14_upper(dev, val, cid) \
293 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
295 | CON_BYTE_DISABLE_2 \
296 | CON_BYTE_DISABLE_1 \
297 | CON_BYTE_DISABLE_0)
299 /* 2.7.1 per connection receive state registers */
301 #define he_writel_rsr0(dev, val, cid) \
302 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \
304 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
306 #define he_writel_rsr1(dev, val, cid) \
307 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
309 #define he_writel_rsr2(dev, val, cid) \
310 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
312 #define he_writel_rsr3(dev, val, cid) \
313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
315 #define he_writel_rsr4(dev, val, cid) \
316 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
318 #define he_writel_rsr5(dev, val, cid) \
319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
321 #define he_writel_rsr6(dev, val, cid) \
322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
324 #define he_writel_rsr7(dev, val, cid) \
325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
327 static __inline__ struct atm_vcc*
328 __find_vcc(struct he_dev *he_dev, unsigned cid)
330 struct hlist_head *head;
332 struct hlist_node *node;
337 vpi = cid >> he_dev->vcibits;
338 vci = cid & ((1 << he_dev->vcibits) - 1);
339 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
341 sk_for_each(s, node, head) {
343 if (vcc->dev == he_dev->atm_dev &&
344 vcc->vci == vci && vcc->vpi == vpi &&
345 vcc->qos.rxtp.traffic_class != ATM_NONE) {
352 static int he_init_one(struct pci_dev *pci_dev,
353 const struct pci_device_id *pci_ent)
355 struct atm_dev *atm_dev = NULL;
356 struct he_dev *he_dev = NULL;
359 printk(KERN_INFO "ATM he driver\n");
361 if (pci_enable_device(pci_dev))
363 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
364 printk(KERN_WARNING "he: no suitable dma available\n");
366 goto init_one_failure;
369 atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
372 goto init_one_failure;
374 pci_set_drvdata(pci_dev, atm_dev);
376 he_dev = kzalloc(sizeof(struct he_dev),
380 goto init_one_failure;
382 he_dev->pci_dev = pci_dev;
383 he_dev->atm_dev = atm_dev;
384 he_dev->atm_dev->dev_data = he_dev;
385 atm_dev->dev_data = he_dev;
386 he_dev->number = atm_dev->number;
387 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
388 spin_lock_init(&he_dev->global_lock);
390 if (he_start(atm_dev)) {
393 goto init_one_failure;
397 he_dev->next = he_devs;
403 atm_dev_deregister(atm_dev);
405 pci_disable_device(pci_dev);
409 static void he_remove_one(struct pci_dev *pci_dev)
411 struct atm_dev *atm_dev;
412 struct he_dev *he_dev;
414 atm_dev = pci_get_drvdata(pci_dev);
415 he_dev = HE_DEV(atm_dev);
417 /* need to remove from he_devs */
420 atm_dev_deregister(atm_dev);
423 pci_set_drvdata(pci_dev, NULL);
424 pci_disable_device(pci_dev);
429 rate_to_atmf(unsigned rate) /* cps to atm forum format */
431 #define NONZERO (1 << 14)
439 while (rate > 0x3ff) {
444 return (NONZERO | (exp << 9) | (rate & 0x1ff));
447 static void he_init_rx_lbfp0(struct he_dev *he_dev)
449 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
450 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
451 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
452 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
455 lbm_offset = he_readl(he_dev, RCMLBM_BA);
457 he_writel(he_dev, lbufd_index, RLBF0_H);
459 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
461 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
463 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
464 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
466 if (++lbuf_count == lbufs_per_row) {
468 row_offset += he_dev->bytes_per_row;
473 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
474 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
477 static void he_init_rx_lbfp1(struct he_dev *he_dev)
479 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
480 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
481 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
482 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
485 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
487 he_writel(he_dev, lbufd_index, RLBF1_H);
489 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
491 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
493 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
494 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
496 if (++lbuf_count == lbufs_per_row) {
498 row_offset += he_dev->bytes_per_row;
503 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
504 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
507 static void he_init_tx_lbfp(struct he_dev *he_dev)
509 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
510 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
511 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
512 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
514 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
515 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
517 he_writel(he_dev, lbufd_index, TLBF_H);
519 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
521 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
523 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
524 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
526 if (++lbuf_count == lbufs_per_row) {
528 row_offset += he_dev->bytes_per_row;
533 he_writel(he_dev, lbufd_index - 1, TLBF_T);
536 static int he_init_tpdrq(struct he_dev *he_dev)
538 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
539 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
540 if (he_dev->tpdrq_base == NULL) {
541 hprintk("failed to alloc tpdrq\n");
544 memset(he_dev->tpdrq_base, 0,
545 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
547 he_dev->tpdrq_tail = he_dev->tpdrq_base;
548 he_dev->tpdrq_head = he_dev->tpdrq_base;
550 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
551 he_writel(he_dev, 0, TPDRQ_T);
552 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
557 static void he_init_cs_block(struct he_dev *he_dev)
559 unsigned clock, rate, delta;
562 /* 5.1.7 cs block initialization */
564 for (reg = 0; reg < 0x20; ++reg)
565 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
567 /* rate grid timer reload values */
569 clock = he_is622(he_dev) ? 66667000 : 50000000;
570 rate = he_dev->atm_dev->link_rate;
571 delta = rate / 16 / 2;
573 for (reg = 0; reg < 0x10; ++reg) {
574 /* 2.4 internal transmit function
576 * we initialize the first row in the rate grid.
577 * values are period (in clock cycles) of timer
579 unsigned period = clock / rate;
581 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
585 if (he_is622(he_dev)) {
586 /* table 5.2 (4 cells per lbuf) */
587 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
588 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
589 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
590 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
591 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
593 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
594 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
595 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
596 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
597 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
598 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
599 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
601 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
604 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
605 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
606 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
607 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
608 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
609 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
612 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
613 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
615 /* table 5.1 (4 cells per lbuf) */
616 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
617 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
618 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
619 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
620 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
622 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
623 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
624 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
625 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
626 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
627 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
628 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
630 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
633 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
634 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
635 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
636 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
637 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
638 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
641 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
642 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
645 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
647 for (reg = 0; reg < 0x8; ++reg)
648 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
652 static int he_init_cs_block_rcm(struct he_dev *he_dev)
654 unsigned (*rategrid)[16][16];
655 unsigned rate, delta;
658 unsigned rate_atmf, exp, man;
659 unsigned long long rate_cps;
660 int mult, buf, buf_limit = 4;
662 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
666 /* initialize rate grid group table */
668 for (reg = 0x0; reg < 0xff; ++reg)
669 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
671 /* initialize rate controller groups */
673 for (reg = 0x100; reg < 0x1ff; ++reg)
674 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
676 /* initialize tNrm lookup table */
678 /* the manual makes reference to a routine in a sample driver
679 for proper configuration; fortunately, we only need this
680 in order to support abr connection */
682 /* initialize rate to group table */
684 rate = he_dev->atm_dev->link_rate;
688 * 2.4 transmit internal functions
690 * we construct a copy of the rate grid used by the scheduler
691 * in order to construct the rate to group table below
694 for (j = 0; j < 16; j++) {
695 (*rategrid)[0][j] = rate;
699 for (i = 1; i < 16; i++)
700 for (j = 0; j < 16; j++)
702 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
704 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
707 * 2.4 transmit internal function
709 * this table maps the upper 5 bits of exponent and mantissa
710 * of the atm forum representation of the rate into an index
715 while (rate_atmf < 0x400) {
716 man = (rate_atmf & 0x1f) << 4;
717 exp = rate_atmf >> 5;
720 instead of '/ 512', use '>> 9' to prevent a call
721 to divdu3 on x86 platforms
723 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
726 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
728 for (i = 255; i > 0; i--)
729 if ((*rategrid)[i/16][i%16] >= rate_cps)
730 break; /* pick nearest rate instead? */
733 * each table entry is 16 bits: (rate grid index (8 bits)
734 * and a buffer limit (8 bits)
735 * there are two table entries in each 32-bit register
739 buf = rate_cps * he_dev->tx_numbuffs /
740 (he_dev->atm_dev->link_rate * 2);
742 /* this is pretty, but avoids _divdu3 and is mostly correct */
743 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
744 if (rate_cps > (272 * mult))
746 else if (rate_cps > (204 * mult))
748 else if (rate_cps > (136 * mult))
750 else if (rate_cps > (68 * mult))
757 reg = (reg << 16) | ((i << 8) | buf);
759 #define RTGTBL_OFFSET 0x400
762 he_writel_rcm(he_dev, reg,
763 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
772 static int he_init_group(struct he_dev *he_dev, int group)
774 struct he_buff *heb, *next;
778 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
779 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
780 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
781 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
782 G0_RBPS_BS + (group * 32));
785 he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
786 * sizeof(unsigned long), GFP_KERNEL);
787 if (!he_dev->rbpl_table) {
788 hprintk("unable to allocate rbpl bitmap table\n");
791 bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
793 /* rbpl_virt 64-bit pointers */
794 he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
795 * sizeof(struct he_buff *), GFP_KERNEL);
796 if (!he_dev->rbpl_virt) {
797 hprintk("unable to allocate rbpl virt table\n");
798 goto out_free_rbpl_table;
801 /* large buffer pool */
802 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
803 CONFIG_RBPL_BUFSIZE, 64, 0);
804 if (he_dev->rbpl_pool == NULL) {
805 hprintk("unable to create rbpl pool\n");
806 goto out_free_rbpl_virt;
809 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
810 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
811 if (he_dev->rbpl_base == NULL) {
812 hprintk("failed to alloc rbpl_base\n");
813 goto out_destroy_rbpl_pool;
815 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
817 INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
819 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
821 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
824 heb->mapping = mapping;
825 list_add(&heb->entry, &he_dev->rbpl_outstanding);
827 set_bit(i, he_dev->rbpl_table);
828 he_dev->rbpl_virt[i] = heb;
829 he_dev->rbpl_hint = i + 1;
830 he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET;
831 he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
833 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
835 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
836 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
837 G0_RBPL_T + (group * 32));
838 he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
839 G0_RBPL_BS + (group * 32));
841 RBP_THRESH(CONFIG_RBPL_THRESH) |
842 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
844 G0_RBPL_QI + (group * 32));
846 /* rx buffer ready queue */
848 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
849 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
850 if (he_dev->rbrq_base == NULL) {
851 hprintk("failed to allocate rbrq\n");
854 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
856 he_dev->rbrq_head = he_dev->rbrq_base;
857 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
858 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
860 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
861 G0_RBRQ_Q + (group * 16));
863 hprintk("coalescing interrupts\n");
864 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
865 G0_RBRQ_I + (group * 16));
867 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
868 G0_RBRQ_I + (group * 16));
870 /* tx buffer ready queue */
872 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
873 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
874 if (he_dev->tbrq_base == NULL) {
875 hprintk("failed to allocate tbrq\n");
876 goto out_free_rbpq_base;
878 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
880 he_dev->tbrq_head = he_dev->tbrq_base;
882 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
883 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
884 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
885 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
890 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
891 sizeof(struct he_rbrq), he_dev->rbrq_base,
894 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
895 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
897 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
898 sizeof(struct he_rbp), he_dev->rbpl_base,
900 out_destroy_rbpl_pool:
901 pci_pool_destroy(he_dev->rbpl_pool);
903 kfree(he_dev->rbpl_virt);
905 kfree(he_dev->rbpl_table);
910 static int he_init_irq(struct he_dev *he_dev)
914 /* 2.9.3.5 tail offset for each interrupt queue is located after the
915 end of the interrupt queue */
917 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
918 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
919 if (he_dev->irq_base == NULL) {
920 hprintk("failed to allocate irq\n");
923 he_dev->irq_tailoffset = (unsigned *)
924 &he_dev->irq_base[CONFIG_IRQ_SIZE];
925 *he_dev->irq_tailoffset = 0;
926 he_dev->irq_head = he_dev->irq_base;
927 he_dev->irq_tail = he_dev->irq_base;
929 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
930 he_dev->irq_base[i].isw = ITYPE_INVALID;
932 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
934 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
936 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
937 he_writel(he_dev, 0x0, IRQ0_DATA);
939 he_writel(he_dev, 0x0, IRQ1_BASE);
940 he_writel(he_dev, 0x0, IRQ1_HEAD);
941 he_writel(he_dev, 0x0, IRQ1_CNTL);
942 he_writel(he_dev, 0x0, IRQ1_DATA);
944 he_writel(he_dev, 0x0, IRQ2_BASE);
945 he_writel(he_dev, 0x0, IRQ2_HEAD);
946 he_writel(he_dev, 0x0, IRQ2_CNTL);
947 he_writel(he_dev, 0x0, IRQ2_DATA);
949 he_writel(he_dev, 0x0, IRQ3_BASE);
950 he_writel(he_dev, 0x0, IRQ3_HEAD);
951 he_writel(he_dev, 0x0, IRQ3_CNTL);
952 he_writel(he_dev, 0x0, IRQ3_DATA);
954 /* 2.9.3.2 interrupt queue mapping registers */
956 he_writel(he_dev, 0x0, GRP_10_MAP);
957 he_writel(he_dev, 0x0, GRP_32_MAP);
958 he_writel(he_dev, 0x0, GRP_54_MAP);
959 he_writel(he_dev, 0x0, GRP_76_MAP);
961 if (request_irq(he_dev->pci_dev->irq,
962 he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
963 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
967 he_dev->irq = he_dev->pci_dev->irq;
972 static int he_start(struct atm_dev *dev)
974 struct he_dev *he_dev;
975 struct pci_dev *pci_dev;
976 unsigned long membase;
979 u32 gen_cntl_0, host_cntl, lb_swap;
980 u8 cache_size, timer;
983 unsigned int status, reg;
986 he_dev = HE_DEV(dev);
987 pci_dev = he_dev->pci_dev;
989 membase = pci_resource_start(pci_dev, 0);
990 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
993 * pci bus controller initialization
996 /* 4.3 pci bus controller-specific initialization */
997 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
998 hprintk("can't read GEN_CNTL_0\n");
1001 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1002 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1003 hprintk("can't write GEN_CNTL_0.\n");
1007 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1008 hprintk("can't read PCI_COMMAND.\n");
1012 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1013 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1014 hprintk("can't enable memory.\n");
1018 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1019 hprintk("can't read cache line size?\n");
1023 if (cache_size < 16) {
1025 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1026 hprintk("can't set cache line size to %d\n", cache_size);
1029 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1030 hprintk("can't read latency timer?\n");
1036 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1038 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1039 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1042 #define LAT_TIMER 209
1043 if (timer < LAT_TIMER) {
1044 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1046 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1047 hprintk("can't set latency timer to %d\n", timer);
1050 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1051 hprintk("can't set up page mapping\n");
1055 /* 4.4 card reset */
1056 he_writel(he_dev, 0x0, RESET_CNTL);
1057 he_writel(he_dev, 0xff, RESET_CNTL);
1059 udelay(16*1000); /* 16 ms */
1060 status = he_readl(he_dev, RESET_CNTL);
1061 if ((status & BOARD_RST_STATUS) == 0) {
1062 hprintk("reset failed\n");
1066 /* 4.5 set bus width */
1067 host_cntl = he_readl(he_dev, HOST_CNTL);
1068 if (host_cntl & PCI_BUS_SIZE64)
1069 gen_cntl_0 |= ENBL_64;
1071 gen_cntl_0 &= ~ENBL_64;
1073 if (disable64 == 1) {
1074 hprintk("disabling 64-bit pci bus transfers\n");
1075 gen_cntl_0 &= ~ENBL_64;
1078 if (gen_cntl_0 & ENBL_64)
1079 hprintk("64-bit transfers enabled\n");
1081 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1083 /* 4.7 read prom contents */
1084 for (i = 0; i < PROD_ID_LEN; ++i)
1085 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1087 he_dev->media = read_prom_byte(he_dev, MEDIA);
1089 for (i = 0; i < 6; ++i)
1090 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1092 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1094 he_dev->media & 0x40 ? "SM" : "MM",
1101 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1102 ATM_OC12_PCR : ATM_OC3_PCR;
1104 /* 4.6 set host endianess */
1105 lb_swap = he_readl(he_dev, LB_SWAP);
1106 if (he_is622(he_dev))
1107 lb_swap &= ~XFER_SIZE; /* 4 cells */
1109 lb_swap |= XFER_SIZE; /* 8 cells */
1111 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1113 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1114 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1115 #endif /* __BIG_ENDIAN */
1116 he_writel(he_dev, lb_swap, LB_SWAP);
1118 /* 4.8 sdram controller initialization */
1119 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1121 /* 4.9 initialize rnum value */
1122 lb_swap |= SWAP_RNUM_MAX(0xf);
1123 he_writel(he_dev, lb_swap, LB_SWAP);
1125 /* 4.10 initialize the interrupt queues */
1126 if ((err = he_init_irq(he_dev)) != 0)
1129 /* 4.11 enable pci bus controller state machines */
1130 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1131 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1132 he_writel(he_dev, host_cntl, HOST_CNTL);
1134 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1135 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1138 * atm network controller initialization
1141 /* 5.1.1 generic configuration state */
1144 * local (cell) buffer memory map
1148 * 0 ____________1023 bytes 0 _______________________2047 bytes
1150 * | utility | | rx0 | |
1151 * 5|____________| 255|___________________| u |
1154 * | rx0 | row | tx | l |
1156 * | | 767|___________________| t |
1157 * 517|____________| 768| | y |
1158 * row 518| | | rx1 | |
1159 * | | 1023|___________________|___|
1164 * 1535|____________|
1167 * 2047|____________|
1171 /* total 4096 connections */
1172 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1173 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1175 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1176 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1180 if (nvpibits != -1) {
1181 he_dev->vpibits = nvpibits;
1182 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1185 if (nvcibits != -1) {
1186 he_dev->vcibits = nvcibits;
1187 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1191 if (he_is622(he_dev)) {
1192 he_dev->cells_per_row = 40;
1193 he_dev->bytes_per_row = 2048;
1194 he_dev->r0_numrows = 256;
1195 he_dev->tx_numrows = 512;
1196 he_dev->r1_numrows = 256;
1197 he_dev->r0_startrow = 0;
1198 he_dev->tx_startrow = 256;
1199 he_dev->r1_startrow = 768;
1201 he_dev->cells_per_row = 20;
1202 he_dev->bytes_per_row = 1024;
1203 he_dev->r0_numrows = 512;
1204 he_dev->tx_numrows = 1018;
1205 he_dev->r1_numrows = 512;
1206 he_dev->r0_startrow = 6;
1207 he_dev->tx_startrow = 518;
1208 he_dev->r1_startrow = 1536;
1211 he_dev->cells_per_lbuf = 4;
1212 he_dev->buffer_limit = 4;
1213 he_dev->r0_numbuffs = he_dev->r0_numrows *
1214 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1215 if (he_dev->r0_numbuffs > 2560)
1216 he_dev->r0_numbuffs = 2560;
1218 he_dev->r1_numbuffs = he_dev->r1_numrows *
1219 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1220 if (he_dev->r1_numbuffs > 2560)
1221 he_dev->r1_numbuffs = 2560;
1223 he_dev->tx_numbuffs = he_dev->tx_numrows *
1224 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1225 if (he_dev->tx_numbuffs > 5120)
1226 he_dev->tx_numbuffs = 5120;
1228 /* 5.1.2 configure hardware dependent registers */
1231 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1232 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1233 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1234 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1237 he_writel(he_dev, BANK_ON |
1238 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1242 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1243 RM_RW_WAIT(1), RCMCONFIG);
1245 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1246 TM_RW_WAIT(1), TCMCONFIG);
1248 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1251 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1252 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1253 RX_VALVP(he_dev->vpibits) |
1254 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1256 he_writel(he_dev, DRF_THRESH(0x20) |
1257 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1258 TX_VCI_MASK(he_dev->vcibits) |
1259 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1261 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1263 he_writel(he_dev, PHY_INT_ENB |
1264 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1267 /* 5.1.3 initialize connection memory */
1269 for (i = 0; i < TCM_MEM_SIZE; ++i)
1270 he_writel_tcm(he_dev, 0, i);
1272 for (i = 0; i < RCM_MEM_SIZE; ++i)
1273 he_writel_rcm(he_dev, 0, i);
1276 * transmit connection memory map
1279 * 0x0 ___________________
1285 * 0x8000|___________________|
1288 * 0xc000|___________________|
1291 * 0xe000|___________________|
1293 * 0xf000|___________________|
1295 * 0x10000|___________________|
1298 * |___________________|
1301 * 0x1ffff|___________________|
1306 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1307 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1308 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1309 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1310 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1314 * receive connection memory map
1316 * 0x0 ___________________
1322 * 0x8000|___________________|
1325 * | LBM | link lists of local
1326 * | tx | buffer memory
1328 * 0xd000|___________________|
1331 * 0xe000|___________________|
1334 * |___________________|
1337 * 0xffff|___________________|
1340 he_writel(he_dev, 0x08000, RCMLBM_BA);
1341 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1342 he_writel(he_dev, 0x0d800, RCMABR_BA);
1344 /* 5.1.4 initialize local buffer free pools linked lists */
1346 he_init_rx_lbfp0(he_dev);
1347 he_init_rx_lbfp1(he_dev);
1349 he_writel(he_dev, 0x0, RLBC_H);
1350 he_writel(he_dev, 0x0, RLBC_T);
1351 he_writel(he_dev, 0x0, RLBC_H2);
1353 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1354 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1356 he_init_tx_lbfp(he_dev);
1358 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1360 /* 5.1.5 initialize intermediate receive queues */
1362 if (he_is622(he_dev)) {
1363 he_writel(he_dev, 0x000f, G0_INMQ_S);
1364 he_writel(he_dev, 0x200f, G0_INMQ_L);
1366 he_writel(he_dev, 0x001f, G1_INMQ_S);
1367 he_writel(he_dev, 0x201f, G1_INMQ_L);
1369 he_writel(he_dev, 0x002f, G2_INMQ_S);
1370 he_writel(he_dev, 0x202f, G2_INMQ_L);
1372 he_writel(he_dev, 0x003f, G3_INMQ_S);
1373 he_writel(he_dev, 0x203f, G3_INMQ_L);
1375 he_writel(he_dev, 0x004f, G4_INMQ_S);
1376 he_writel(he_dev, 0x204f, G4_INMQ_L);
1378 he_writel(he_dev, 0x005f, G5_INMQ_S);
1379 he_writel(he_dev, 0x205f, G5_INMQ_L);
1381 he_writel(he_dev, 0x006f, G6_INMQ_S);
1382 he_writel(he_dev, 0x206f, G6_INMQ_L);
1384 he_writel(he_dev, 0x007f, G7_INMQ_S);
1385 he_writel(he_dev, 0x207f, G7_INMQ_L);
1387 he_writel(he_dev, 0x0000, G0_INMQ_S);
1388 he_writel(he_dev, 0x0008, G0_INMQ_L);
1390 he_writel(he_dev, 0x0001, G1_INMQ_S);
1391 he_writel(he_dev, 0x0009, G1_INMQ_L);
1393 he_writel(he_dev, 0x0002, G2_INMQ_S);
1394 he_writel(he_dev, 0x000a, G2_INMQ_L);
1396 he_writel(he_dev, 0x0003, G3_INMQ_S);
1397 he_writel(he_dev, 0x000b, G3_INMQ_L);
1399 he_writel(he_dev, 0x0004, G4_INMQ_S);
1400 he_writel(he_dev, 0x000c, G4_INMQ_L);
1402 he_writel(he_dev, 0x0005, G5_INMQ_S);
1403 he_writel(he_dev, 0x000d, G5_INMQ_L);
1405 he_writel(he_dev, 0x0006, G6_INMQ_S);
1406 he_writel(he_dev, 0x000e, G6_INMQ_L);
1408 he_writel(he_dev, 0x0007, G7_INMQ_S);
1409 he_writel(he_dev, 0x000f, G7_INMQ_L);
1412 /* 5.1.6 application tunable parameters */
1414 he_writel(he_dev, 0x0, MCC);
1415 he_writel(he_dev, 0x0, OEC);
1416 he_writel(he_dev, 0x0, DCC);
1417 he_writel(he_dev, 0x0, CEC);
1419 /* 5.1.7 cs block initialization */
1421 he_init_cs_block(he_dev);
1423 /* 5.1.8 cs block connection memory initialization */
1425 if (he_init_cs_block_rcm(he_dev) < 0)
1428 /* 5.1.10 initialize host structures */
1430 he_init_tpdrq(he_dev);
1432 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1433 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1434 if (he_dev->tpd_pool == NULL) {
1435 hprintk("unable to create tpd pci_pool\n");
1439 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1441 if (he_init_group(he_dev, 0) != 0)
1444 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1445 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1446 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1447 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1448 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1449 G0_RBPS_BS + (group * 32));
1451 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1452 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1453 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1454 G0_RBPL_QI + (group * 32));
1455 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1457 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1458 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1459 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1460 G0_RBRQ_Q + (group * 16));
1461 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1463 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1464 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1465 he_writel(he_dev, TBRQ_THRESH(0x1),
1466 G0_TBRQ_THRESH + (group * 16));
1467 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1470 /* host status page */
1472 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1473 sizeof(struct he_hsp), &he_dev->hsp_phys);
1474 if (he_dev->hsp == NULL) {
1475 hprintk("failed to allocate host status page\n");
1478 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1479 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1481 /* initialize framer */
1483 #ifdef CONFIG_ATM_HE_USE_SUNI
1484 if (he_isMM(he_dev))
1485 suni_init(he_dev->atm_dev);
1486 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1487 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1488 #endif /* CONFIG_ATM_HE_USE_SUNI */
1491 /* this really should be in suni.c but for now... */
1494 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1495 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1496 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1497 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1500 /* 5.1.12 enable transmit and receive */
1502 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1503 reg |= TX_ENABLE|ER_ENABLE;
1504 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1506 reg = he_readl(he_dev, RC_CONFIG);
1508 he_writel(he_dev, reg, RC_CONFIG);
1510 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1511 he_dev->cs_stper[i].inuse = 0;
1512 he_dev->cs_stper[i].pcr = -1;
1514 he_dev->total_bw = 0;
1517 /* atm linux initialization */
1519 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1520 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1522 he_dev->irq_peak = 0;
1523 he_dev->rbrq_peak = 0;
1524 he_dev->rbpl_peak = 0;
1525 he_dev->tbrq_peak = 0;
1527 HPRINTK("hell bent for leather!\n");
1533 he_stop(struct he_dev *he_dev)
1535 struct he_buff *heb, *next;
1536 struct pci_dev *pci_dev;
1537 u32 gen_cntl_0, reg;
1540 pci_dev = he_dev->pci_dev;
1542 /* disable interrupts */
1544 if (he_dev->membase) {
1545 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1546 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1547 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1549 tasklet_disable(&he_dev->tasklet);
1551 /* disable recv and transmit */
1553 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1554 reg &= ~(TX_ENABLE|ER_ENABLE);
1555 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1557 reg = he_readl(he_dev, RC_CONFIG);
1558 reg &= ~(RX_ENABLE);
1559 he_writel(he_dev, reg, RC_CONFIG);
1562 #ifdef CONFIG_ATM_HE_USE_SUNI
1563 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1564 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1565 #endif /* CONFIG_ATM_HE_USE_SUNI */
1568 free_irq(he_dev->irq, he_dev);
1570 if (he_dev->irq_base)
1571 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1572 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1575 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1576 he_dev->hsp, he_dev->hsp_phys);
1578 if (he_dev->rbpl_base) {
1579 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1580 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1582 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1583 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1586 kfree(he_dev->rbpl_virt);
1587 kfree(he_dev->rbpl_table);
1589 if (he_dev->rbpl_pool)
1590 pci_pool_destroy(he_dev->rbpl_pool);
1592 if (he_dev->rbrq_base)
1593 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1594 he_dev->rbrq_base, he_dev->rbrq_phys);
1596 if (he_dev->tbrq_base)
1597 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1598 he_dev->tbrq_base, he_dev->tbrq_phys);
1600 if (he_dev->tpdrq_base)
1601 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1602 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1604 if (he_dev->tpd_pool)
1605 pci_pool_destroy(he_dev->tpd_pool);
1607 if (he_dev->pci_dev) {
1608 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1609 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1610 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1613 if (he_dev->membase)
1614 iounmap(he_dev->membase);
1617 static struct he_tpd *
1618 __alloc_tpd(struct he_dev *he_dev)
1623 tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1627 tpd->status = TPD_ADDR(mapping);
1629 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1630 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1631 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1636 #define AAL5_LEN(buf,len) \
1637 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1638 (((unsigned char *)(buf))[(len)-5]))
1642 * aal5 packets can optionally return the tcp checksum in the lower
1643 * 16 bits of the crc (RSR0_TCP_CKSUM)
1646 #define TCP_CKSUM(buf,len) \
1647 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1648 (((unsigned char *)(buf))[(len-1)]))
1651 he_service_rbrq(struct he_dev *he_dev, int group)
1653 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1654 ((unsigned long)he_dev->rbrq_base |
1655 he_dev->hsp->group[group].rbrq_tail);
1656 unsigned cid, lastcid = -1;
1657 struct sk_buff *skb;
1658 struct atm_vcc *vcc = NULL;
1659 struct he_vcc *he_vcc;
1660 struct he_buff *heb, *next;
1662 int pdus_assembled = 0;
1665 read_lock(&vcc_sklist_lock);
1666 while (he_dev->rbrq_head != rbrq_tail) {
1669 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1670 he_dev->rbrq_head, group,
1671 RBRQ_ADDR(he_dev->rbrq_head),
1672 RBRQ_BUFLEN(he_dev->rbrq_head),
1673 RBRQ_CID(he_dev->rbrq_head),
1674 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1675 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1676 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1677 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1678 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1679 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1681 i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1682 heb = he_dev->rbpl_virt[i];
1684 cid = RBRQ_CID(he_dev->rbrq_head);
1686 vcc = __find_vcc(he_dev, cid);
1689 if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1690 hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid);
1691 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1692 clear_bit(i, he_dev->rbpl_table);
1693 list_del(&heb->entry);
1694 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1697 goto next_rbrq_entry;
1700 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1701 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1702 atomic_inc(&vcc->stats->rx_drop);
1703 goto return_host_buffers;
1706 heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1707 clear_bit(i, he_dev->rbpl_table);
1708 list_move_tail(&heb->entry, &he_vcc->buffers);
1709 he_vcc->pdu_len += heb->len;
1711 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1713 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1714 wake_up(&he_vcc->rx_waitq);
1715 goto return_host_buffers;
1718 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1719 goto next_rbrq_entry;
1721 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1722 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1723 HPRINTK("%s%s (%d.%d)\n",
1724 RBRQ_CRC_ERR(he_dev->rbrq_head)
1726 RBRQ_LEN_ERR(he_dev->rbrq_head)
1728 vcc->vpi, vcc->vci);
1729 atomic_inc(&vcc->stats->rx_err);
1730 goto return_host_buffers;
1733 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1736 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1737 goto return_host_buffers;
1740 if (rx_skb_reserve > 0)
1741 skb_reserve(skb, rx_skb_reserve);
1743 __net_timestamp(skb);
1745 list_for_each_entry(heb, &he_vcc->buffers, entry)
1746 memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1748 switch (vcc->qos.aal) {
1750 /* 2.10.1.5 raw cell receive */
1751 skb->len = ATM_AAL0_SDU;
1752 skb_set_tail_pointer(skb, skb->len);
1755 /* 2.10.1.2 aal5 receive */
1757 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1758 skb_set_tail_pointer(skb, skb->len);
1759 #ifdef USE_CHECKSUM_HW
1760 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1761 skb->ip_summed = CHECKSUM_COMPLETE;
1762 skb->csum = TCP_CKSUM(skb->data,
1769 #ifdef should_never_happen
1770 if (skb->len > vcc->qos.rxtp.max_sdu)
1771 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1775 ATM_SKB(skb)->vcc = vcc;
1777 spin_unlock(&he_dev->global_lock);
1778 vcc->push(vcc, skb);
1779 spin_lock(&he_dev->global_lock);
1781 atomic_inc(&vcc->stats->rx);
1783 return_host_buffers:
1786 list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1787 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1788 INIT_LIST_HEAD(&he_vcc->buffers);
1789 he_vcc->pdu_len = 0;
1792 he_dev->rbrq_head = (struct he_rbrq *)
1793 ((unsigned long) he_dev->rbrq_base |
1794 RBRQ_MASK(he_dev->rbrq_head + 1));
1797 read_unlock(&vcc_sklist_lock);
1800 if (updated > he_dev->rbrq_peak)
1801 he_dev->rbrq_peak = updated;
1803 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1804 G0_RBRQ_H + (group * 16));
1807 return pdus_assembled;
1811 he_service_tbrq(struct he_dev *he_dev, int group)
1813 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1814 ((unsigned long)he_dev->tbrq_base |
1815 he_dev->hsp->group[group].tbrq_tail);
1817 int slot, updated = 0;
1818 struct he_tpd *__tpd;
1820 /* 2.1.6 transmit buffer return queue */
1822 while (he_dev->tbrq_head != tbrq_tail) {
1825 HPRINTK("tbrq%d 0x%x%s%s\n",
1827 TBRQ_TPD(he_dev->tbrq_head),
1828 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1829 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1831 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1832 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1834 list_del(&__tpd->entry);
1840 hprintk("unable to locate tpd for dma buffer %x\n",
1841 TBRQ_TPD(he_dev->tbrq_head));
1842 goto next_tbrq_entry;
1845 if (TBRQ_EOS(he_dev->tbrq_head)) {
1846 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1847 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1849 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1851 goto next_tbrq_entry;
1854 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1855 if (tpd->iovec[slot].addr)
1856 pci_unmap_single(he_dev->pci_dev,
1857 tpd->iovec[slot].addr,
1858 tpd->iovec[slot].len & TPD_LEN_MASK,
1860 if (tpd->iovec[slot].len & TPD_LST)
1865 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1866 if (tpd->vcc && tpd->vcc->pop)
1867 tpd->vcc->pop(tpd->vcc, tpd->skb);
1869 dev_kfree_skb_any(tpd->skb);
1874 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1875 he_dev->tbrq_head = (struct he_tbrq *)
1876 ((unsigned long) he_dev->tbrq_base |
1877 TBRQ_MASK(he_dev->tbrq_head + 1));
1881 if (updated > he_dev->tbrq_peak)
1882 he_dev->tbrq_peak = updated;
1884 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1885 G0_TBRQ_H + (group * 16));
1890 he_service_rbpl(struct he_dev *he_dev, int group)
1892 struct he_rbp *new_tail;
1893 struct he_rbp *rbpl_head;
1894 struct he_buff *heb;
1899 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1900 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1903 new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1904 RBPL_MASK(he_dev->rbpl_tail+1));
1906 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1907 if (new_tail == rbpl_head)
1910 i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1911 if (i > (RBPL_TABLE_SIZE - 1)) {
1912 i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1913 if (i > (RBPL_TABLE_SIZE - 1))
1916 he_dev->rbpl_hint = i + 1;
1918 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1921 heb->mapping = mapping;
1922 list_add(&heb->entry, &he_dev->rbpl_outstanding);
1923 he_dev->rbpl_virt[i] = heb;
1924 set_bit(i, he_dev->rbpl_table);
1925 new_tail->idx = i << RBP_IDX_OFFSET;
1926 new_tail->phys = mapping + offsetof(struct he_buff, data);
1928 he_dev->rbpl_tail = new_tail;
1933 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1937 he_tasklet(unsigned long data)
1939 unsigned long flags;
1940 struct he_dev *he_dev = (struct he_dev *) data;
1944 HPRINTK("tasklet (0x%lx)\n", data);
1945 spin_lock_irqsave(&he_dev->global_lock, flags);
1947 while (he_dev->irq_head != he_dev->irq_tail) {
1950 type = ITYPE_TYPE(he_dev->irq_head->isw);
1951 group = ITYPE_GROUP(he_dev->irq_head->isw);
1954 case ITYPE_RBRQ_THRESH:
1955 HPRINTK("rbrq%d threshold\n", group);
1957 case ITYPE_RBRQ_TIMER:
1958 if (he_service_rbrq(he_dev, group))
1959 he_service_rbpl(he_dev, group);
1961 case ITYPE_TBRQ_THRESH:
1962 HPRINTK("tbrq%d threshold\n", group);
1964 case ITYPE_TPD_COMPLETE:
1965 he_service_tbrq(he_dev, group);
1967 case ITYPE_RBPL_THRESH:
1968 he_service_rbpl(he_dev, group);
1970 case ITYPE_RBPS_THRESH:
1971 /* shouldn't happen unless small buffers enabled */
1974 HPRINTK("phy interrupt\n");
1975 #ifdef CONFIG_ATM_HE_USE_SUNI
1976 spin_unlock_irqrestore(&he_dev->global_lock, flags);
1977 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1978 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1979 spin_lock_irqsave(&he_dev->global_lock, flags);
1983 switch (type|group) {
1985 hprintk("parity error\n");
1988 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1992 case ITYPE_TYPE(ITYPE_INVALID):
1993 /* see 8.1.1 -- check all queues */
1995 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1997 he_service_rbrq(he_dev, 0);
1998 he_service_rbpl(he_dev, 0);
1999 he_service_tbrq(he_dev, 0);
2002 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2005 he_dev->irq_head->isw = ITYPE_INVALID;
2007 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2011 if (updated > he_dev->irq_peak)
2012 he_dev->irq_peak = updated;
2015 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2016 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2017 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2018 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2020 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2024 he_irq_handler(int irq, void *dev_id)
2026 unsigned long flags;
2027 struct he_dev *he_dev = (struct he_dev * )dev_id;
2033 spin_lock_irqsave(&he_dev->global_lock, flags);
2035 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2036 (*he_dev->irq_tailoffset << 2));
2038 if (he_dev->irq_tail == he_dev->irq_head) {
2039 HPRINTK("tailoffset not updated?\n");
2040 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2041 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2042 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2046 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2047 hprintk("spurious (or shared) interrupt?\n");
2050 if (he_dev->irq_head != he_dev->irq_tail) {
2052 tasklet_schedule(&he_dev->tasklet);
2053 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2054 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2056 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2057 return IRQ_RETVAL(handled);
2061 static __inline__ void
2062 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2064 struct he_tpdrq *new_tail;
2066 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2067 tpd, cid, he_dev->tpdrq_tail);
2069 /* new_tail = he_dev->tpdrq_tail; */
2070 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2071 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2074 * check to see if we are about to set the tail == head
2075 * if true, update the head pointer from the adapter
2076 * to see if this is really the case (reading the queue
2077 * head for every enqueue would be unnecessarily slow)
2080 if (new_tail == he_dev->tpdrq_head) {
2081 he_dev->tpdrq_head = (struct he_tpdrq *)
2082 (((unsigned long)he_dev->tpdrq_base) |
2083 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2085 if (new_tail == he_dev->tpdrq_head) {
2088 hprintk("tpdrq full (cid 0x%x)\n", cid);
2091 * push tpd onto a transmit backlog queue
2092 * after service_tbrq, service the backlog
2093 * for now, we just drop the pdu
2095 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2096 if (tpd->iovec[slot].addr)
2097 pci_unmap_single(he_dev->pci_dev,
2098 tpd->iovec[slot].addr,
2099 tpd->iovec[slot].len & TPD_LEN_MASK,
2104 tpd->vcc->pop(tpd->vcc, tpd->skb);
2106 dev_kfree_skb_any(tpd->skb);
2107 atomic_inc(&tpd->vcc->stats->tx_err);
2109 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2114 /* 2.1.5 transmit packet descriptor ready queue */
2115 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2116 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2117 he_dev->tpdrq_tail->cid = cid;
2120 he_dev->tpdrq_tail = new_tail;
2122 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2123 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2127 he_open(struct atm_vcc *vcc)
2129 unsigned long flags;
2130 struct he_dev *he_dev = HE_DEV(vcc->dev);
2131 struct he_vcc *he_vcc;
2133 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2134 short vpi = vcc->vpi;
2137 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2140 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2142 set_bit(ATM_VF_ADDR, &vcc->flags);
2144 cid = he_mkcid(he_dev, vpi, vci);
2146 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2147 if (he_vcc == NULL) {
2148 hprintk("unable to allocate he_vcc during open\n");
2152 INIT_LIST_HEAD(&he_vcc->buffers);
2153 he_vcc->pdu_len = 0;
2154 he_vcc->rc_index = -1;
2156 init_waitqueue_head(&he_vcc->rx_waitq);
2157 init_waitqueue_head(&he_vcc->tx_waitq);
2159 vcc->dev_data = he_vcc;
2161 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2164 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2166 pcr_goal = he_dev->atm_dev->link_rate;
2167 if (pcr_goal < 0) /* means round down, technically */
2168 pcr_goal = -pcr_goal;
2170 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2172 switch (vcc->qos.aal) {
2174 tsr0_aal = TSR0_AAL5;
2178 tsr0_aal = TSR0_AAL0_SDU;
2179 tsr4 = TSR4_AAL0_SDU;
2186 spin_lock_irqsave(&he_dev->global_lock, flags);
2187 tsr0 = he_readl_tsr0(he_dev, cid);
2188 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2190 if (TSR0_CONN_STATE(tsr0) != 0) {
2191 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2196 switch (vcc->qos.txtp.traffic_class) {
2198 /* 2.3.3.1 open connection ubr */
2200 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2201 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2205 /* 2.3.3.2 open connection cbr */
2207 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2208 if ((he_dev->total_bw + pcr_goal)
2209 > (he_dev->atm_dev->link_rate * 9 / 10))
2215 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2217 /* find an unused cs_stper register */
2218 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2219 if (he_dev->cs_stper[reg].inuse == 0 ||
2220 he_dev->cs_stper[reg].pcr == pcr_goal)
2223 if (reg == HE_NUM_CS_STPER) {
2225 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2229 he_dev->total_bw += pcr_goal;
2231 he_vcc->rc_index = reg;
2232 ++he_dev->cs_stper[reg].inuse;
2233 he_dev->cs_stper[reg].pcr = pcr_goal;
2235 clock = he_is622(he_dev) ? 66667000 : 50000000;
2236 period = clock / pcr_goal;
2238 HPRINTK("rc_index = %d period = %d\n",
2241 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2243 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2245 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2254 spin_lock_irqsave(&he_dev->global_lock, flags);
2256 he_writel_tsr0(he_dev, tsr0, cid);
2257 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2258 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2259 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2260 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2261 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2263 he_writel_tsr3(he_dev, 0x0, cid);
2264 he_writel_tsr5(he_dev, 0x0, cid);
2265 he_writel_tsr6(he_dev, 0x0, cid);
2266 he_writel_tsr7(he_dev, 0x0, cid);
2267 he_writel_tsr8(he_dev, 0x0, cid);
2268 he_writel_tsr10(he_dev, 0x0, cid);
2269 he_writel_tsr11(he_dev, 0x0, cid);
2270 he_writel_tsr12(he_dev, 0x0, cid);
2271 he_writel_tsr13(he_dev, 0x0, cid);
2272 he_writel_tsr14(he_dev, 0x0, cid);
2273 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2274 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2277 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2280 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2281 &HE_VCC(vcc)->rx_waitq);
2283 switch (vcc->qos.aal) {
2295 spin_lock_irqsave(&he_dev->global_lock, flags);
2297 rsr0 = he_readl_rsr0(he_dev, cid);
2298 if (rsr0 & RSR0_OPEN_CONN) {
2299 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2301 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2306 rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2307 rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2308 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2309 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2311 #ifdef USE_CHECKSUM_HW
2312 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2313 rsr0 |= RSR0_TCP_CKSUM;
2316 he_writel_rsr4(he_dev, rsr4, cid);
2317 he_writel_rsr1(he_dev, rsr1, cid);
2318 /* 5.1.11 last parameter initialized should be
2319 the open/closed indication in rsr0 */
2320 he_writel_rsr0(he_dev,
2321 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2322 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2324 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2331 clear_bit(ATM_VF_ADDR, &vcc->flags);
2334 set_bit(ATM_VF_READY, &vcc->flags);
2340 he_close(struct atm_vcc *vcc)
2342 unsigned long flags;
2343 DECLARE_WAITQUEUE(wait, current);
2344 struct he_dev *he_dev = HE_DEV(vcc->dev);
2347 struct he_vcc *he_vcc = HE_VCC(vcc);
2348 #define MAX_RETRY 30
2349 int retry = 0, sleep = 1, tx_inuse;
2351 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2353 clear_bit(ATM_VF_READY, &vcc->flags);
2354 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2356 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2359 HPRINTK("close rx cid 0x%x\n", cid);
2361 /* 2.7.2.2 close receive operation */
2363 /* wait for previous close (if any) to finish */
2365 spin_lock_irqsave(&he_dev->global_lock, flags);
2366 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2367 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2371 set_current_state(TASK_UNINTERRUPTIBLE);
2372 add_wait_queue(&he_vcc->rx_waitq, &wait);
2374 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2375 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2376 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2377 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2379 timeout = schedule_timeout(30*HZ);
2381 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2382 set_current_state(TASK_RUNNING);
2385 hprintk("close rx timeout cid 0x%x\n", cid);
2387 HPRINTK("close rx cid 0x%x complete\n", cid);
2391 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2392 volatile unsigned tsr4, tsr0;
2395 HPRINTK("close tx cid 0x%x\n", cid);
2399 * ... the host must first stop queueing packets to the TPDRQ
2400 * on the connection to be closed, then wait for all outstanding
2401 * packets to be transmitted and their buffers returned to the
2402 * TBRQ. When the last packet on the connection arrives in the
2403 * TBRQ, the host issues the close command to the adapter.
2406 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2407 (retry < MAX_RETRY)) {
2416 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2418 /* 2.3.1.1 generic close operations with flush */
2420 spin_lock_irqsave(&he_dev->global_lock, flags);
2421 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2422 /* also clears TSR4_SESSION_ENDED */
2424 switch (vcc->qos.txtp.traffic_class) {
2426 he_writel_tsr1(he_dev,
2427 TSR1_MCR(rate_to_atmf(200000))
2428 | TSR1_PCR(0), cid);
2431 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2434 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2436 tpd = __alloc_tpd(he_dev);
2438 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2439 goto close_tx_incomplete;
2441 tpd->status |= TPD_EOS | TPD_INT;
2446 set_current_state(TASK_UNINTERRUPTIBLE);
2447 add_wait_queue(&he_vcc->tx_waitq, &wait);
2448 __enqueue_tpd(he_dev, tpd, cid);
2449 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2451 timeout = schedule_timeout(30*HZ);
2453 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2454 set_current_state(TASK_RUNNING);
2456 spin_lock_irqsave(&he_dev->global_lock, flags);
2459 hprintk("close tx timeout cid 0x%x\n", cid);
2460 goto close_tx_incomplete;
2463 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2464 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2468 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2469 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2473 close_tx_incomplete:
2475 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2476 int reg = he_vcc->rc_index;
2478 HPRINTK("cs_stper reg = %d\n", reg);
2480 if (he_dev->cs_stper[reg].inuse == 0)
2481 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2483 --he_dev->cs_stper[reg].inuse;
2485 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2487 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2489 HPRINTK("close tx cid 0x%x complete\n", cid);
2494 clear_bit(ATM_VF_ADDR, &vcc->flags);
2498 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2500 unsigned long flags;
2501 struct he_dev *he_dev = HE_DEV(vcc->dev);
2502 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2504 #ifdef USE_SCATTERGATHER
2508 #define HE_TPD_BUFSIZE 0xffff
2510 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2512 if ((skb->len > HE_TPD_BUFSIZE) ||
2513 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2514 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2518 dev_kfree_skb_any(skb);
2519 atomic_inc(&vcc->stats->tx_err);
2523 #ifndef USE_SCATTERGATHER
2524 if (skb_shinfo(skb)->nr_frags) {
2525 hprintk("no scatter/gather support\n");
2529 dev_kfree_skb_any(skb);
2530 atomic_inc(&vcc->stats->tx_err);
2534 spin_lock_irqsave(&he_dev->global_lock, flags);
2536 tpd = __alloc_tpd(he_dev);
2541 dev_kfree_skb_any(skb);
2542 atomic_inc(&vcc->stats->tx_err);
2543 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2547 if (vcc->qos.aal == ATM_AAL5)
2548 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2550 char *pti_clp = (void *) (skb->data + 3);
2553 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2554 clp = (*pti_clp & ATM_HDR_CLP);
2555 tpd->status |= TPD_CELLTYPE(pti);
2557 tpd->status |= TPD_CLP;
2559 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2562 #ifdef USE_SCATTERGATHER
2563 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2564 skb_headlen(skb), PCI_DMA_TODEVICE);
2565 tpd->iovec[slot].len = skb_headlen(skb);
2568 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2569 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2571 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2573 tpd->skb = NULL; /* not the last fragment
2574 so dont ->push() yet */
2577 __enqueue_tpd(he_dev, tpd, cid);
2578 tpd = __alloc_tpd(he_dev);
2583 dev_kfree_skb_any(skb);
2584 atomic_inc(&vcc->stats->tx_err);
2585 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2588 tpd->status |= TPD_USERCELL;
2592 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2593 (void *) page_address(frag->page) + frag->page_offset,
2594 frag->size, PCI_DMA_TODEVICE);
2595 tpd->iovec[slot].len = frag->size;
2600 tpd->iovec[slot - 1].len |= TPD_LST;
2602 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2603 tpd->length0 = skb->len | TPD_LST;
2605 tpd->status |= TPD_INT;
2610 ATM_SKB(skb)->vcc = vcc;
2612 __enqueue_tpd(he_dev, tpd, cid);
2613 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2615 atomic_inc(&vcc->stats->tx);
2621 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2623 unsigned long flags;
2624 struct he_dev *he_dev = HE_DEV(atm_dev);
2625 struct he_ioctl_reg reg;
2630 if (!capable(CAP_NET_ADMIN))
2633 if (copy_from_user(®, arg,
2634 sizeof(struct he_ioctl_reg)))
2637 spin_lock_irqsave(&he_dev->global_lock, flags);
2639 case HE_REGTYPE_PCI:
2640 if (reg.addr >= HE_REGMAP_SIZE) {
2645 reg.val = he_readl(he_dev, reg.addr);
2647 case HE_REGTYPE_RCM:
2649 he_readl_rcm(he_dev, reg.addr);
2651 case HE_REGTYPE_TCM:
2653 he_readl_tcm(he_dev, reg.addr);
2655 case HE_REGTYPE_MBOX:
2657 he_readl_mbox(he_dev, reg.addr);
2663 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2665 if (copy_to_user(arg, ®,
2666 sizeof(struct he_ioctl_reg)))
2670 #ifdef CONFIG_ATM_HE_USE_SUNI
2671 if (atm_dev->phy && atm_dev->phy->ioctl)
2672 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2673 #else /* CONFIG_ATM_HE_USE_SUNI */
2675 #endif /* CONFIG_ATM_HE_USE_SUNI */
2683 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2685 unsigned long flags;
2686 struct he_dev *he_dev = HE_DEV(atm_dev);
2688 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2690 spin_lock_irqsave(&he_dev->global_lock, flags);
2691 he_writel(he_dev, val, FRAMER + (addr*4));
2692 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2693 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2697 static unsigned char
2698 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2700 unsigned long flags;
2701 struct he_dev *he_dev = HE_DEV(atm_dev);
2704 spin_lock_irqsave(&he_dev->global_lock, flags);
2705 reg = he_readl(he_dev, FRAMER + (addr*4));
2706 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2708 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2713 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2715 unsigned long flags;
2716 struct he_dev *he_dev = HE_DEV(dev);
2719 struct he_rbrq *rbrq_tail;
2720 struct he_tpdrq *tpdrq_head;
2721 int rbpl_head, rbpl_tail;
2723 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2728 return sprintf(page, "ATM he driver\n");
2731 return sprintf(page, "%s%s\n\n",
2732 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2735 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2737 spin_lock_irqsave(&he_dev->global_lock, flags);
2738 mcc += he_readl(he_dev, MCC);
2739 oec += he_readl(he_dev, OEC);
2740 dcc += he_readl(he_dev, DCC);
2741 cec += he_readl(he_dev, CEC);
2742 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2745 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2746 mcc, oec, dcc, cec);
2749 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2750 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2753 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2757 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2758 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2761 return sprintf(page, "tbrq_size = %d peak = %d\n",
2762 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2766 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2767 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2769 inuse = rbpl_head - rbpl_tail;
2771 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2772 inuse /= sizeof(struct he_rbp);
2775 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2776 CONFIG_RBPL_SIZE, inuse);
2780 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2782 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2784 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2785 he_dev->cs_stper[i].pcr,
2786 he_dev->cs_stper[i].inuse);
2789 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2790 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2795 /* eeprom routines -- see 4.7 */
2797 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2799 u32 val = 0, tmp_read = 0;
2803 val = readl(he_dev->membase + HOST_CNTL);
2806 /* Turn on write enable */
2808 he_writel(he_dev, val, HOST_CNTL);
2810 /* Send READ instruction */
2811 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2812 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2813 udelay(EEPROM_DELAY);
2816 /* Next, we need to send the byte address to read from */
2817 for (i = 7; i >= 0; i--) {
2818 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2819 udelay(EEPROM_DELAY);
2820 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2821 udelay(EEPROM_DELAY);
2826 val &= 0xFFFFF7FF; /* Turn off write enable */
2827 he_writel(he_dev, val, HOST_CNTL);
2829 /* Now, we can read data from the EEPROM by clocking it in */
2830 for (i = 7; i >= 0; i--) {
2831 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2832 udelay(EEPROM_DELAY);
2833 tmp_read = he_readl(he_dev, HOST_CNTL);
2834 byte_read |= (unsigned char)
2835 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2836 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2837 udelay(EEPROM_DELAY);
2840 he_writel(he_dev, val | ID_CS, HOST_CNTL);
2841 udelay(EEPROM_DELAY);
2846 MODULE_LICENSE("GPL");
2847 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2848 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2849 module_param(disable64, bool, 0);
2850 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2851 module_param(nvpibits, short, 0);
2852 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2853 module_param(nvcibits, short, 0);
2854 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2855 module_param(rx_skb_reserve, short, 0);
2856 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2857 module_param(irq_coalesce, bool, 0);
2858 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2859 module_param(sdh, bool, 0);
2860 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2862 static struct pci_device_id he_pci_tbl[] = {
2863 { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2867 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2869 static struct pci_driver he_driver = {
2871 .probe = he_init_one,
2872 .remove = he_remove_one,
2873 .id_table = he_pci_tbl,
2876 static int __init he_init(void)
2878 return pci_register_driver(&he_driver);
2881 static void __exit he_cleanup(void)
2883 pci_unregister_driver(&he_driver);
2886 module_init(he_init);
2887 module_exit(he_cleanup);