1 /* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
15 See the file COPYING in this distribution for more information.
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #define DRV_NAME "de2104x"
33 #define DRV_RELDATE "Mar 17, 2004"
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/pci.h>
42 #include <linux/delay.h>
43 #include <linux/ethtool.h>
44 #include <linux/compiler.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/crc32.h>
47 #include <linux/slab.h>
51 #include <linux/uaccess.h>
52 #include <asm/unaligned.h>
54 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
55 MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
56 MODULE_LICENSE("GPL");
58 static int debug = -1;
59 module_param (debug, int, 0);
60 MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
62 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
63 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
64 defined(CONFIG_SPARC) || defined(__ia64__) || \
65 defined(__sh__) || defined(__mips__)
66 static int rx_copybreak = 1518;
68 static int rx_copybreak = 100;
70 module_param (rx_copybreak, int, 0);
71 MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
73 #define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
81 /* Descriptor skip length in 32 bit longwords. */
82 #ifndef CONFIG_DE2104X_DSL
85 #define DSL CONFIG_DE2104X_DSL
88 #define DE_RX_RING_SIZE 64
89 #define DE_TX_RING_SIZE 64
90 #define DE_RING_BYTES \
91 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
92 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
93 #define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
94 #define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
95 #define TX_BUFFS_AVAIL(CP) \
96 (((CP)->tx_tail <= (CP)->tx_head) ? \
97 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
98 (CP)->tx_tail - (CP)->tx_head - 1)
100 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
103 #define DE_SETUP_SKB ((struct sk_buff *) 1)
104 #define DE_DUMMY_SKB ((struct sk_buff *) 2)
105 #define DE_SETUP_FRAME_WORDS 96
106 #define DE_EEPROM_WORDS 256
107 #define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
108 #define DE_MAX_MEDIA 5
110 #define DE_MEDIA_TP_AUTO 0
111 #define DE_MEDIA_BNC 1
112 #define DE_MEDIA_AUI 2
113 #define DE_MEDIA_TP 3
114 #define DE_MEDIA_TP_FD 4
115 #define DE_MEDIA_INVALID DE_MAX_MEDIA
116 #define DE_MEDIA_FIRST 0
117 #define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
118 #define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
120 #define DE_TIMER_LINK (60 * HZ)
121 #define DE_TIMER_NO_LINK (5 * HZ)
123 #define DE_NUM_REGS 16
124 #define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
125 #define DE_REGS_VER 1
127 /* Time in jiffies before concluding the transmitter is hung. */
128 #define TX_TIMEOUT (6*HZ)
130 /* This is a mysterious value that can be written to CSR11 in the 21040 (only)
131 to support a pre-NWay full-duplex signaling mechanism using short frames.
132 No one knows what it should be, but if left at its default value some
133 10base2(!) packets trigger a full-duplex-request interrupt. */
134 #define FULL_DUPLEX_MAGIC 0x6969
157 CacheAlign16 = 0x00008000,
158 BurstLen4 = 0x00000400,
159 DescSkipLen = (DSL << 2),
162 NormalTxPoll = (1 << 0),
163 NormalRxPoll = (1 << 0),
165 /* Tx/Rx descriptor status bits */
168 RxErrLong = (1 << 7),
170 RxErrFIFO = (1 << 0),
171 RxErrRunt = (1 << 11),
172 RxErrFrame = (1 << 14),
174 FirstFrag = (1 << 29),
175 LastFrag = (1 << 30),
177 TxFIFOUnder = (1 << 1),
178 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
181 TxJabber = (1 << 14),
182 SetupFrame = (1 << 27),
193 TxState = (1 << 22) | (1 << 21) | (1 << 20),
194 RxState = (1 << 19) | (1 << 18) | (1 << 17),
195 LinkFail = (1 << 12),
197 RxStopped = (1 << 8),
198 TxStopped = (1 << 1),
201 TxEnable = (1 << 13),
203 RxTx = TxEnable | RxEnable,
204 FullDuplex = (1 << 9),
205 AcceptAllMulticast = (1 << 7),
206 AcceptAllPhys = (1 << 6),
208 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
209 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
212 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
213 EE_CS = 0x01, /* EEPROM chip select. */
214 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
217 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
218 EE_ENB = (0x4800 | EE_CS),
220 /* The EEPROM commands include the alway-set leading bit. */
224 RxMissedOver = (1 << 16),
225 RxMissedMask = 0xffff,
227 /* SROM-related bits */
229 MediaBlockMask = 0x3f,
230 MediaCustomCSRs = (1 << 6),
233 PM_Sleep = (1 << 31),
234 PM_Snooze = (1 << 30),
235 PM_Mask = PM_Sleep | PM_Snooze,
238 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
239 NWayRestart = (1 << 12),
240 NonselPortActive = (1 << 9),
241 SelPortActive = (1 << 8),
242 LinkFailStatus = (1 << 2),
243 NetCxnErr = (1 << 1),
246 static const u32 de_intr_mask =
247 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
248 LinkPass | LinkFail | PciErr;
251 * Set the programmable burst length to 4 longwords for all:
252 * DMA errors result without these values. Cache align 16 long.
254 static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
256 struct de_srom_media_block {
263 struct de_srom_info_leaf {
280 u16 type; /* DE_MEDIA_xxx */
297 struct net_device *dev;
300 struct de_desc *rx_ring;
301 struct de_desc *tx_ring;
302 struct ring_info tx_skb[DE_TX_RING_SIZE];
303 struct ring_info rx_skb[DE_RX_RING_SIZE];
309 struct pci_dev *pdev;
311 u16 setup_frame[DE_SETUP_FRAME_WORDS];
316 struct media_info media[DE_MAX_MEDIA];
317 struct timer_list media_timer;
321 unsigned de21040 : 1;
322 unsigned media_lock : 1;
326 static void de_set_rx_mode (struct net_device *dev);
327 static void de_tx (struct de_private *de);
328 static void de_clean_rings (struct de_private *de);
329 static void de_media_interrupt (struct de_private *de, u32 status);
330 static void de21040_media_timer (struct timer_list *t);
331 static void de21041_media_timer (struct timer_list *t);
332 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
335 static const struct pci_device_id de_pci_tbl[] = {
336 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
337 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
338 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
339 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
342 MODULE_DEVICE_TABLE(pci, de_pci_tbl);
344 static const char * const media_name[DE_MAX_MEDIA] = {
352 /* 21040 transceiver register settings:
353 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
354 static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
355 static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
356 static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
358 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
359 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
360 static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
361 /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
362 static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
363 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
366 #define dr32(reg) ioread32(de->regs + (reg))
367 #define dw32(reg, val) iowrite32((val), de->regs + (reg))
370 static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
373 netif_dbg(de, rx_err, de->dev,
374 "rx err, slot %d status 0x%x len %d\n",
375 rx_tail, status, len);
377 if ((status & 0x38000300) != 0x0300) {
378 /* Ingore earlier buffers. */
379 if ((status & 0xffff) != 0x7fff) {
380 netif_warn(de, rx_err, de->dev,
381 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
383 de->dev->stats.rx_length_errors++;
385 } else if (status & RxError) {
386 /* There was a fatal error. */
387 de->dev->stats.rx_errors++; /* end of a packet.*/
388 if (status & 0x0890) de->dev->stats.rx_length_errors++;
389 if (status & RxErrCRC) de->dev->stats.rx_crc_errors++;
390 if (status & RxErrFIFO) de->dev->stats.rx_fifo_errors++;
394 static void de_rx (struct de_private *de)
396 unsigned rx_tail = de->rx_tail;
397 unsigned rx_work = DE_RX_RING_SIZE;
404 struct sk_buff *skb, *copy_skb;
405 unsigned copying_skb, buflen;
407 skb = de->rx_skb[rx_tail].skb;
410 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
411 if (status & DescOwn)
414 /* the length is actually a 15 bit value here according
415 * to Table 4-1 in the DE2104x spec so mask is 0x7fff
417 len = ((status >> 16) & 0x7fff) - 4;
418 mapping = de->rx_skb[rx_tail].mapping;
420 if (unlikely(drop)) {
421 de->dev->stats.rx_dropped++;
425 if (unlikely((status & 0x38008300) != 0x0300)) {
426 de_rx_err_acct(de, rx_tail, status, len);
430 copying_skb = (len <= rx_copybreak);
432 netif_dbg(de, rx_status, de->dev,
433 "rx slot %d status 0x%x len %d copying? %d\n",
434 rx_tail, status, len, copying_skb);
436 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
437 copy_skb = netdev_alloc_skb(de->dev, buflen);
438 if (unlikely(!copy_skb)) {
439 de->dev->stats.rx_dropped++;
446 pci_unmap_single(de->pdev, mapping,
447 buflen, PCI_DMA_FROMDEVICE);
451 de->rx_skb[rx_tail].mapping =
452 pci_map_single(de->pdev, copy_skb->data,
453 buflen, PCI_DMA_FROMDEVICE);
454 de->rx_skb[rx_tail].skb = copy_skb;
456 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
457 skb_reserve(copy_skb, RX_OFFSET);
458 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
460 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
462 /* We'll reuse the original ring buffer. */
466 skb->protocol = eth_type_trans (skb, de->dev);
468 de->dev->stats.rx_packets++;
469 de->dev->stats.rx_bytes += skb->len;
471 if (rc == NET_RX_DROP)
475 if (rx_tail == (DE_RX_RING_SIZE - 1))
476 de->rx_ring[rx_tail].opts2 =
477 cpu_to_le32(RingEnd | de->rx_buf_sz);
479 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
480 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
482 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
483 rx_tail = NEXT_RX(rx_tail);
487 netdev_warn(de->dev, "rx work limit reached\n");
489 de->rx_tail = rx_tail;
492 static irqreturn_t de_interrupt (int irq, void *dev_instance)
494 struct net_device *dev = dev_instance;
495 struct de_private *de = netdev_priv(dev);
498 status = dr32(MacStatus);
499 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
502 netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n",
503 status, dr32(MacMode),
504 de->rx_tail, de->tx_head, de->tx_tail);
506 dw32(MacStatus, status);
508 if (status & (RxIntr | RxEmpty)) {
510 if (status & RxEmpty)
511 dw32(RxPoll, NormalRxPoll);
514 spin_lock(&de->lock);
516 if (status & (TxIntr | TxEmpty))
519 if (status & (LinkPass | LinkFail))
520 de_media_interrupt(de, status);
522 spin_unlock(&de->lock);
524 if (status & PciErr) {
527 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
528 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
530 "PCI bus error, status=%08x, PCI status=%04x\n",
537 static void de_tx (struct de_private *de)
539 unsigned tx_head = de->tx_head;
540 unsigned tx_tail = de->tx_tail;
542 while (tx_tail != tx_head) {
547 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
548 if (status & DescOwn)
551 skb = de->tx_skb[tx_tail].skb;
553 if (unlikely(skb == DE_DUMMY_SKB))
556 if (unlikely(skb == DE_SETUP_SKB)) {
557 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
558 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
562 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
563 skb->len, PCI_DMA_TODEVICE);
565 if (status & LastFrag) {
566 if (status & TxError) {
567 netif_dbg(de, tx_err, de->dev,
568 "tx err, status 0x%x\n",
570 de->dev->stats.tx_errors++;
572 de->dev->stats.tx_window_errors++;
573 if (status & TxMaxCol)
574 de->dev->stats.tx_aborted_errors++;
575 if (status & TxLinkFail)
576 de->dev->stats.tx_carrier_errors++;
577 if (status & TxFIFOUnder)
578 de->dev->stats.tx_fifo_errors++;
580 de->dev->stats.tx_packets++;
581 de->dev->stats.tx_bytes += skb->len;
582 netif_dbg(de, tx_done, de->dev,
583 "tx done, slot %d\n", tx_tail);
585 dev_consume_skb_irq(skb);
589 de->tx_skb[tx_tail].skb = NULL;
591 tx_tail = NEXT_TX(tx_tail);
594 de->tx_tail = tx_tail;
596 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
597 netif_wake_queue(de->dev);
600 static netdev_tx_t de_start_xmit (struct sk_buff *skb,
601 struct net_device *dev)
603 struct de_private *de = netdev_priv(dev);
604 unsigned int entry, tx_free;
605 u32 mapping, len, flags = FirstFrag | LastFrag;
608 spin_lock_irq(&de->lock);
610 tx_free = TX_BUFFS_AVAIL(de);
612 netif_stop_queue(dev);
613 spin_unlock_irq(&de->lock);
614 return NETDEV_TX_BUSY;
620 txd = &de->tx_ring[entry];
623 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
624 if (entry == (DE_TX_RING_SIZE - 1))
626 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
629 txd->opts2 = cpu_to_le32(flags);
630 txd->addr1 = cpu_to_le32(mapping);
632 de->tx_skb[entry].skb = skb;
633 de->tx_skb[entry].mapping = mapping;
636 txd->opts1 = cpu_to_le32(DescOwn);
639 de->tx_head = NEXT_TX(entry);
640 netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n",
644 netif_stop_queue(dev);
646 spin_unlock_irq(&de->lock);
648 /* Trigger an immediate transmit demand. */
649 dw32(TxPoll, NormalTxPoll);
654 /* Set or clear the multicast filter for this adaptor.
655 Note that we only use exclusion around actually queueing the
656 new frame, not around filling de->setup_frame. This is non-deterministic
657 when re-entered but still correct. */
659 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
661 struct de_private *de = netdev_priv(dev);
663 struct netdev_hw_addr *ha;
667 memset(hash_table, 0, sizeof(hash_table));
668 __set_bit_le(255, hash_table); /* Broadcast entry */
669 /* This should work on big-endian machines as well. */
670 netdev_for_each_mc_addr(ha, dev) {
671 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
673 __set_bit_le(index, hash_table);
676 for (i = 0; i < 32; i++) {
677 *setup_frm++ = hash_table[i];
678 *setup_frm++ = hash_table[i];
680 setup_frm = &de->setup_frame[13*6];
682 /* Fill the final entry with our physical address. */
683 eaddrs = (u16 *)dev->dev_addr;
684 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
685 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
686 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
689 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
691 struct de_private *de = netdev_priv(dev);
692 struct netdev_hw_addr *ha;
695 /* We have <= 14 addresses so we can use the wonderful
696 16 address perfect filtering of the Tulip. */
697 netdev_for_each_mc_addr(ha, dev) {
698 eaddrs = (u16 *) ha->addr;
699 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
700 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
701 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
703 /* Fill the unused entries with the broadcast address. */
704 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
705 setup_frm = &de->setup_frame[15*6];
707 /* Fill the final entry with our physical address. */
708 eaddrs = (u16 *)dev->dev_addr;
709 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
710 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
711 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
715 static void __de_set_rx_mode (struct net_device *dev)
717 struct de_private *de = netdev_priv(dev);
722 struct de_desc *dummy_txd = NULL;
724 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
726 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
727 macmode |= AcceptAllMulticast | AcceptAllPhys;
731 if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
732 /* Too many to filter well -- accept all multicasts. */
733 macmode |= AcceptAllMulticast;
737 /* Note that only the low-address shortword of setup_frame is valid!
738 The values are doubled for big-endian architectures. */
739 if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
740 build_setup_frame_hash (de->setup_frame, dev);
742 build_setup_frame_perfect (de->setup_frame, dev);
745 * Now add this frame to the Tx list.
750 /* Avoid a chip errata by prefixing a dummy entry. */
752 de->tx_skb[entry].skb = DE_DUMMY_SKB;
754 dummy_txd = &de->tx_ring[entry];
755 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
756 cpu_to_le32(RingEnd) : 0;
757 dummy_txd->addr1 = 0;
759 /* Must set DescOwned later to avoid race with chip */
761 entry = NEXT_TX(entry);
764 de->tx_skb[entry].skb = DE_SETUP_SKB;
765 de->tx_skb[entry].mapping = mapping =
766 pci_map_single (de->pdev, de->setup_frame,
767 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
769 /* Put the setup frame on the Tx list. */
770 txd = &de->tx_ring[entry];
771 if (entry == (DE_TX_RING_SIZE - 1))
772 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
774 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
775 txd->addr1 = cpu_to_le32(mapping);
778 txd->opts1 = cpu_to_le32(DescOwn);
782 dummy_txd->opts1 = cpu_to_le32(DescOwn);
786 de->tx_head = NEXT_TX(entry);
788 if (TX_BUFFS_AVAIL(de) == 0)
789 netif_stop_queue(dev);
791 /* Trigger an immediate transmit demand. */
792 dw32(TxPoll, NormalTxPoll);
795 if (macmode != dr32(MacMode))
796 dw32(MacMode, macmode);
799 static void de_set_rx_mode (struct net_device *dev)
802 struct de_private *de = netdev_priv(dev);
804 spin_lock_irqsave (&de->lock, flags);
805 __de_set_rx_mode(dev);
806 spin_unlock_irqrestore (&de->lock, flags);
809 static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
811 if (unlikely(rx_missed & RxMissedOver))
812 de->dev->stats.rx_missed_errors += RxMissedMask;
814 de->dev->stats.rx_missed_errors += (rx_missed & RxMissedMask);
817 static void __de_get_stats(struct de_private *de)
819 u32 tmp = dr32(RxMissed); /* self-clearing */
821 de_rx_missed(de, tmp);
824 static struct net_device_stats *de_get_stats(struct net_device *dev)
826 struct de_private *de = netdev_priv(dev);
828 /* The chip only need report frame silently dropped. */
829 spin_lock_irq(&de->lock);
830 if (netif_running(dev) && netif_device_present(dev))
832 spin_unlock_irq(&de->lock);
837 static inline int de_is_running (struct de_private *de)
839 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
842 static void de_stop_rxtx (struct de_private *de)
845 unsigned int i = 1300/100;
847 macmode = dr32(MacMode);
848 if (macmode & RxTx) {
849 dw32(MacMode, macmode & ~RxTx);
853 /* wait until in-flight frame completes.
854 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
855 * Typically expect this loop to end in < 50 us on 100BT.
858 if (!de_is_running(de))
863 netdev_warn(de->dev, "timeout expired, stopping DMA\n");
866 static inline void de_start_rxtx (struct de_private *de)
870 macmode = dr32(MacMode);
871 if ((macmode & RxTx) != RxTx) {
872 dw32(MacMode, macmode | RxTx);
877 static void de_stop_hw (struct de_private *de)
885 dw32(MacStatus, dr32(MacStatus));
890 de->tx_head = de->tx_tail = 0;
893 static void de_link_up(struct de_private *de)
895 if (!netif_carrier_ok(de->dev)) {
896 netif_carrier_on(de->dev);
897 netif_info(de, link, de->dev, "link up, media %s\n",
898 media_name[de->media_type]);
902 static void de_link_down(struct de_private *de)
904 if (netif_carrier_ok(de->dev)) {
905 netif_carrier_off(de->dev);
906 netif_info(de, link, de->dev, "link down\n");
910 static void de_set_media (struct de_private *de)
912 unsigned media = de->media_type;
913 u32 macmode = dr32(MacMode);
915 if (de_is_running(de))
916 netdev_warn(de->dev, "chip is running while changing media!\n");
919 dw32(CSR11, FULL_DUPLEX_MAGIC);
920 dw32(CSR13, 0); /* Reset phy */
921 dw32(CSR14, de->media[media].csr14);
922 dw32(CSR15, de->media[media].csr15);
923 dw32(CSR13, de->media[media].csr13);
925 /* must delay 10ms before writing to other registers,
930 if (media == DE_MEDIA_TP_FD)
931 macmode |= FullDuplex;
933 macmode &= ~FullDuplex;
935 netif_info(de, link, de->dev, "set link %s\n", media_name[media]);
936 netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
937 dr32(MacMode), dr32(SIAStatus),
938 dr32(CSR13), dr32(CSR14), dr32(CSR15));
939 netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
940 macmode, de->media[media].csr13,
941 de->media[media].csr14, de->media[media].csr15);
942 if (macmode != dr32(MacMode))
943 dw32(MacMode, macmode);
946 static void de_next_media (struct de_private *de, const u32 *media,
947 unsigned int n_media)
951 for (i = 0; i < n_media; i++) {
952 if (de_ok_to_advertise(de, media[i])) {
953 de->media_type = media[i];
959 static void de21040_media_timer (struct timer_list *t)
961 struct de_private *de = from_timer(de, t, media_timer);
962 struct net_device *dev = de->dev;
963 u32 status = dr32(SIAStatus);
964 unsigned int carrier;
967 carrier = (status & NetCxnErr) ? 0 : 1;
970 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
973 de->media_timer.expires = jiffies + DE_TIMER_LINK;
974 add_timer(&de->media_timer);
975 if (!netif_carrier_ok(dev))
978 netif_info(de, timer, dev, "%s link ok, status %x\n",
979 media_name[de->media_type], status);
988 if (de->media_type == DE_MEDIA_AUI) {
989 static const u32 next_state = DE_MEDIA_TP;
990 de_next_media(de, &next_state, 1);
992 static const u32 next_state = DE_MEDIA_AUI;
993 de_next_media(de, &next_state, 1);
996 spin_lock_irqsave(&de->lock, flags);
998 spin_unlock_irqrestore(&de->lock, flags);
1003 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1004 add_timer(&de->media_timer);
1006 netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1007 media_name[de->media_type], status);
1010 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1012 switch (new_media) {
1013 case DE_MEDIA_TP_AUTO:
1014 if (!(de->media_advertise & ADVERTISED_Autoneg))
1016 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1020 if (!(de->media_advertise & ADVERTISED_BNC))
1024 if (!(de->media_advertise & ADVERTISED_AUI))
1028 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1031 case DE_MEDIA_TP_FD:
1032 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1040 static void de21041_media_timer (struct timer_list *t)
1042 struct de_private *de = from_timer(de, t, media_timer);
1043 struct net_device *dev = de->dev;
1044 u32 status = dr32(SIAStatus);
1045 unsigned int carrier;
1046 unsigned long flags;
1048 /* clear port active bits */
1049 dw32(SIAStatus, NonselPortActive | SelPortActive);
1051 carrier = (status & NetCxnErr) ? 0 : 1;
1054 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1055 de->media_type == DE_MEDIA_TP ||
1056 de->media_type == DE_MEDIA_TP_FD) &&
1057 (status & LinkFailStatus))
1060 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1061 add_timer(&de->media_timer);
1062 if (!netif_carrier_ok(dev))
1065 netif_info(de, timer, dev,
1066 "%s link ok, mode %x status %x\n",
1067 media_name[de->media_type],
1068 dr32(MacMode), status);
1074 /* if media type locked, don't switch media */
1078 /* if activity detected, use that as hint for new media type */
1079 if (status & NonselPortActive) {
1080 unsigned int have_media = 1;
1082 /* if AUI/BNC selected, then activity is on TP port */
1083 if (de->media_type == DE_MEDIA_AUI ||
1084 de->media_type == DE_MEDIA_BNC) {
1085 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1086 de->media_type = DE_MEDIA_TP_AUTO;
1091 /* TP selected. If there is only TP and BNC, then it's BNC */
1092 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1093 de_ok_to_advertise(de, DE_MEDIA_BNC))
1094 de->media_type = DE_MEDIA_BNC;
1096 /* TP selected. If there is only TP and AUI, then it's AUI */
1097 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1098 de_ok_to_advertise(de, DE_MEDIA_AUI))
1099 de->media_type = DE_MEDIA_AUI;
1101 /* otherwise, ignore the hint */
1110 * Absent or ambiguous activity hint, move to next advertised
1111 * media state. If de->media_type is left unchanged, this
1112 * simply resets the PHY and reloads the current media settings.
1114 if (de->media_type == DE_MEDIA_AUI) {
1115 static const u32 next_states[] = {
1116 DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1118 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1119 } else if (de->media_type == DE_MEDIA_BNC) {
1120 static const u32 next_states[] = {
1121 DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
1123 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1125 static const u32 next_states[] = {
1126 DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1128 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1132 spin_lock_irqsave(&de->lock, flags);
1134 spin_unlock_irqrestore(&de->lock, flags);
1139 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1140 add_timer(&de->media_timer);
1142 netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1143 media_name[de->media_type], status);
1146 static void de_media_interrupt (struct de_private *de, u32 status)
1148 if (status & LinkPass) {
1149 /* Ignore if current media is AUI or BNC and we can't use TP */
1150 if ((de->media_type == DE_MEDIA_AUI ||
1151 de->media_type == DE_MEDIA_BNC) &&
1153 !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1155 /* If current media is not TP, change it to TP */
1156 if ((de->media_type == DE_MEDIA_AUI ||
1157 de->media_type == DE_MEDIA_BNC)) {
1158 de->media_type = DE_MEDIA_TP_AUTO;
1164 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1168 BUG_ON(!(status & LinkFail));
1169 /* Mark the link as down only if current media is TP */
1170 if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1171 de->media_type != DE_MEDIA_BNC) {
1173 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1177 static int de_reset_mac (struct de_private *de)
1182 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1186 if (dr32(BusMode) == 0xffffffff)
1189 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1190 dw32 (BusMode, CmdReset);
1193 dw32 (BusMode, de_bus_mode);
1196 for (tmp = 0; tmp < 5; tmp++) {
1203 status = dr32(MacStatus);
1204 if (status & (RxState | TxState))
1206 if (status == 0xffffffff)
1211 static void de_adapter_wake (struct de_private *de)
1218 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1219 if (pmctl & PM_Mask) {
1221 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1223 /* de4x5.c delays, so we do too */
1228 static void de_adapter_sleep (struct de_private *de)
1235 dw32(CSR13, 0); /* Reset phy */
1236 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1238 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1241 static int de_init_hw (struct de_private *de)
1243 struct net_device *dev = de->dev;
1247 de_adapter_wake(de);
1249 macmode = dr32(MacMode) & ~MacModeClear;
1251 rc = de_reset_mac(de);
1255 de_set_media(de); /* reset phy */
1257 dw32(RxRingAddr, de->ring_dma);
1258 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1260 dw32(MacMode, RxTx | macmode);
1262 dr32(RxMissed); /* self-clearing */
1264 dw32(IntrMask, de_intr_mask);
1266 de_set_rx_mode(dev);
1271 static int de_refill_rx (struct de_private *de)
1275 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1276 struct sk_buff *skb;
1278 skb = netdev_alloc_skb(de->dev, de->rx_buf_sz);
1282 de->rx_skb[i].mapping = pci_map_single(de->pdev,
1283 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1284 de->rx_skb[i].skb = skb;
1286 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1287 if (i == (DE_RX_RING_SIZE - 1))
1288 de->rx_ring[i].opts2 =
1289 cpu_to_le32(RingEnd | de->rx_buf_sz);
1291 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1292 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1293 de->rx_ring[i].addr2 = 0;
1303 static int de_init_rings (struct de_private *de)
1305 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1306 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1309 de->tx_head = de->tx_tail = 0;
1311 return de_refill_rx (de);
1314 static int de_alloc_rings (struct de_private *de)
1316 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1319 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1320 return de_init_rings(de);
1323 static void de_clean_rings (struct de_private *de)
1327 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1328 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1330 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1331 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1334 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1335 if (de->rx_skb[i].skb) {
1336 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1337 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1338 dev_kfree_skb(de->rx_skb[i].skb);
1342 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1343 struct sk_buff *skb = de->tx_skb[i].skb;
1344 if ((skb) && (skb != DE_DUMMY_SKB)) {
1345 if (skb != DE_SETUP_SKB) {
1346 de->dev->stats.tx_dropped++;
1347 pci_unmap_single(de->pdev,
1348 de->tx_skb[i].mapping,
1349 skb->len, PCI_DMA_TODEVICE);
1352 pci_unmap_single(de->pdev,
1353 de->tx_skb[i].mapping,
1354 sizeof(de->setup_frame),
1360 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1361 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1364 static void de_free_rings (struct de_private *de)
1367 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1372 static int de_open (struct net_device *dev)
1374 struct de_private *de = netdev_priv(dev);
1375 const int irq = de->pdev->irq;
1378 netif_dbg(de, ifup, dev, "enabling interface\n");
1380 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1382 rc = de_alloc_rings(de);
1384 netdev_err(dev, "ring allocation failure, err=%d\n", rc);
1390 rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1392 netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc);
1396 rc = de_init_hw(de);
1398 netdev_err(dev, "h/w init failure, err=%d\n", rc);
1399 goto err_out_free_irq;
1402 netif_start_queue(dev);
1403 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1414 static int de_close (struct net_device *dev)
1416 struct de_private *de = netdev_priv(dev);
1417 unsigned long flags;
1419 netif_dbg(de, ifdown, dev, "disabling interface\n");
1421 del_timer_sync(&de->media_timer);
1423 spin_lock_irqsave(&de->lock, flags);
1425 netif_stop_queue(dev);
1426 netif_carrier_off(dev);
1427 spin_unlock_irqrestore(&de->lock, flags);
1429 free_irq(de->pdev->irq, dev);
1432 de_adapter_sleep(de);
1436 static void de_tx_timeout (struct net_device *dev, unsigned int txqueue)
1438 struct de_private *de = netdev_priv(dev);
1439 const int irq = de->pdev->irq;
1441 netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1442 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1443 de->rx_tail, de->tx_head, de->tx_tail);
1445 del_timer_sync(&de->media_timer);
1448 spin_lock_irq(&de->lock);
1451 netif_stop_queue(dev);
1452 netif_carrier_off(dev);
1454 spin_unlock_irq(&de->lock);
1457 /* Update the error counts. */
1460 synchronize_irq(irq);
1467 netif_wake_queue(dev);
1470 static void __de_get_regs(struct de_private *de, u8 *buf)
1473 u32 *rbuf = (u32 *)buf;
1476 for (i = 0; i < DE_NUM_REGS; i++)
1477 rbuf[i] = dr32(i * 8);
1479 /* handle self-clearing RxMissed counter, CSR8 */
1480 de_rx_missed(de, rbuf[8]);
1483 static void __de_get_link_ksettings(struct de_private *de,
1484 struct ethtool_link_ksettings *cmd)
1486 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1487 de->media_supported);
1488 cmd->base.phy_address = 0;
1489 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1490 de->media_advertise);
1492 switch (de->media_type) {
1494 cmd->base.port = PORT_AUI;
1497 cmd->base.port = PORT_BNC;
1500 cmd->base.port = PORT_TP;
1504 cmd->base.speed = 10;
1506 if (dr32(MacMode) & FullDuplex)
1507 cmd->base.duplex = DUPLEX_FULL;
1509 cmd->base.duplex = DUPLEX_HALF;
1512 cmd->base.autoneg = AUTONEG_DISABLE;
1514 cmd->base.autoneg = AUTONEG_ENABLE;
1516 /* ignore maxtxpkt, maxrxpkt for now */
1519 static int __de_set_link_ksettings(struct de_private *de,
1520 const struct ethtool_link_ksettings *cmd)
1523 unsigned int media_lock;
1524 u8 duplex = cmd->base.duplex;
1525 u8 port = cmd->base.port;
1526 u8 autoneg = cmd->base.autoneg;
1529 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1530 cmd->link_modes.advertising);
1532 if (cmd->base.speed != 10)
1534 if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL)
1536 if (port != PORT_TP && port != PORT_AUI && port != PORT_BNC)
1538 if (de->de21040 && port == PORT_BNC)
1540 if (autoneg != AUTONEG_DISABLE && autoneg != AUTONEG_ENABLE)
1542 if (advertising & ~de->media_supported)
1544 if (autoneg == AUTONEG_ENABLE &&
1545 (!(advertising & ADVERTISED_Autoneg)))
1550 new_media = DE_MEDIA_AUI;
1551 if (!(advertising & ADVERTISED_AUI))
1555 new_media = DE_MEDIA_BNC;
1556 if (!(advertising & ADVERTISED_BNC))
1560 if (autoneg == AUTONEG_ENABLE)
1561 new_media = DE_MEDIA_TP_AUTO;
1562 else if (duplex == DUPLEX_FULL)
1563 new_media = DE_MEDIA_TP_FD;
1565 new_media = DE_MEDIA_TP;
1566 if (!(advertising & ADVERTISED_TP))
1568 if (!(advertising & (ADVERTISED_10baseT_Full |
1569 ADVERTISED_10baseT_Half)))
1574 media_lock = (autoneg == AUTONEG_ENABLE) ? 0 : 1;
1576 if ((new_media == de->media_type) &&
1577 (media_lock == de->media_lock) &&
1578 (advertising == de->media_advertise))
1579 return 0; /* nothing to change */
1582 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1585 de->media_type = new_media;
1586 de->media_lock = media_lock;
1587 de->media_advertise = advertising;
1589 if (netif_running(de->dev))
1595 static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1597 struct de_private *de = netdev_priv(dev);
1599 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1600 strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
1603 static int de_get_regs_len(struct net_device *dev)
1605 return DE_REGS_SIZE;
1608 static int de_get_link_ksettings(struct net_device *dev,
1609 struct ethtool_link_ksettings *cmd)
1611 struct de_private *de = netdev_priv(dev);
1613 spin_lock_irq(&de->lock);
1614 __de_get_link_ksettings(de, cmd);
1615 spin_unlock_irq(&de->lock);
1620 static int de_set_link_ksettings(struct net_device *dev,
1621 const struct ethtool_link_ksettings *cmd)
1623 struct de_private *de = netdev_priv(dev);
1626 spin_lock_irq(&de->lock);
1627 rc = __de_set_link_ksettings(de, cmd);
1628 spin_unlock_irq(&de->lock);
1633 static u32 de_get_msglevel(struct net_device *dev)
1635 struct de_private *de = netdev_priv(dev);
1637 return de->msg_enable;
1640 static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1642 struct de_private *de = netdev_priv(dev);
1644 de->msg_enable = msglvl;
1647 static int de_get_eeprom(struct net_device *dev,
1648 struct ethtool_eeprom *eeprom, u8 *data)
1650 struct de_private *de = netdev_priv(dev);
1654 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1655 (eeprom->len != DE_EEPROM_SIZE))
1657 memcpy(data, de->ee_data, eeprom->len);
1662 static int de_nway_reset(struct net_device *dev)
1664 struct de_private *de = netdev_priv(dev);
1667 if (de->media_type != DE_MEDIA_TP_AUTO)
1669 if (netif_carrier_ok(de->dev))
1672 status = dr32(SIAStatus);
1673 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1674 netif_info(de, link, dev, "link nway restart, status %x,%x\n",
1675 status, dr32(SIAStatus));
1679 static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1682 struct de_private *de = netdev_priv(dev);
1684 regs->version = (DE_REGS_VER << 2) | de->de21040;
1686 spin_lock_irq(&de->lock);
1687 __de_get_regs(de, data);
1688 spin_unlock_irq(&de->lock);
1691 static const struct ethtool_ops de_ethtool_ops = {
1692 .get_link = ethtool_op_get_link,
1693 .get_drvinfo = de_get_drvinfo,
1694 .get_regs_len = de_get_regs_len,
1695 .get_msglevel = de_get_msglevel,
1696 .set_msglevel = de_set_msglevel,
1697 .get_eeprom = de_get_eeprom,
1698 .nway_reset = de_nway_reset,
1699 .get_regs = de_get_regs,
1700 .get_link_ksettings = de_get_link_ksettings,
1701 .set_link_ksettings = de_set_link_ksettings,
1704 static void de21040_get_mac_address(struct de_private *de)
1708 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
1711 for (i = 0; i < 6; i++) {
1712 int value, boguscnt = 100000;
1714 value = dr32(ROMCmd);
1716 } while (value < 0 && --boguscnt > 0);
1717 de->dev->dev_addr[i] = value;
1720 pr_warn("timeout reading 21040 MAC address byte %u\n",
1725 static void de21040_get_media_info(struct de_private *de)
1729 de->media_type = DE_MEDIA_TP;
1730 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1731 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1732 de->media_advertise = de->media_supported;
1734 for (i = 0; i < DE_MAX_MEDIA; i++) {
1738 case DE_MEDIA_TP_FD:
1739 de->media[i].type = i;
1740 de->media[i].csr13 = t21040_csr13[i];
1741 de->media[i].csr14 = t21040_csr14[i];
1742 de->media[i].csr15 = t21040_csr15[i];
1745 de->media[i].type = DE_MEDIA_INVALID;
1751 /* Note: this routine returns extra data bits for size detection. */
1752 static unsigned tulip_read_eeprom(void __iomem *regs, int location,
1756 unsigned retval = 0;
1757 void __iomem *ee_addr = regs + ROMCmd;
1758 int read_cmd = location | (EE_READ_CMD << addr_len);
1760 writel(EE_ENB & ~EE_CS, ee_addr);
1761 writel(EE_ENB, ee_addr);
1763 /* Shift the read command bits out. */
1764 for (i = 4 + addr_len; i >= 0; i--) {
1765 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1766 writel(EE_ENB | dataval, ee_addr);
1768 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1770 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1772 writel(EE_ENB, ee_addr);
1775 for (i = 16; i > 0; i--) {
1776 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1778 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1779 writel(EE_ENB, ee_addr);
1783 /* Terminate the EEPROM access. */
1784 writel(EE_ENB & ~EE_CS, ee_addr);
1788 static void de21041_get_srom_info(struct de_private *de)
1790 unsigned i, sa_offset = 0, ofs;
1791 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1792 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1793 struct de_srom_info_leaf *il;
1796 /* download entire eeprom */
1797 for (i = 0; i < DE_EEPROM_WORDS; i++)
1798 ((__le16 *)ee_data)[i] =
1799 cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1801 /* DEC now has a specification but early board makers
1802 just put the address in the first EEPROM locations. */
1803 /* This does memcmp(eedata, eedata+16, 8) */
1805 #ifndef CONFIG_MIPS_COBALT
1807 for (i = 0; i < 8; i ++)
1808 if (ee_data[i] != ee_data[16+i])
1813 /* store MAC address */
1814 for (i = 0; i < 6; i ++)
1815 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1817 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1818 ofs = ee_data[SROMC0InfoLeaf];
1819 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1822 /* get pointer to info leaf */
1823 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1825 /* paranoia checks */
1826 if (il->n_blocks == 0)
1828 if ((sizeof(ee_data) - ofs) <
1829 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1832 /* get default media type */
1833 switch (get_unaligned(&il->default_media)) {
1834 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1835 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1836 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1837 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1840 if (netif_msg_probe(de))
1841 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1842 de->board_idx, ofs, media_name[de->media_type]);
1844 /* init SIA register values to defaults */
1845 for (i = 0; i < DE_MAX_MEDIA; i++) {
1846 de->media[i].type = DE_MEDIA_INVALID;
1847 de->media[i].csr13 = 0xffff;
1848 de->media[i].csr14 = 0xffff;
1849 de->media[i].csr15 = 0xffff;
1852 /* parse media blocks to see what medias are supported,
1853 * and if any custom CSR values are provided
1855 bufp = ((void *)il) + sizeof(*il);
1856 for (i = 0; i < il->n_blocks; i++) {
1857 struct de_srom_media_block *ib = bufp;
1860 /* index based on media type in media block */
1861 switch(ib->opts & MediaBlockMask) {
1862 case 0: /* 10baseT */
1863 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1864 | SUPPORTED_Autoneg;
1866 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1869 de->media_supported |= SUPPORTED_BNC;
1873 de->media_supported |= SUPPORTED_AUI;
1876 case 4: /* 10baseT-FD */
1877 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1878 | SUPPORTED_Autoneg;
1879 idx = DE_MEDIA_TP_FD;
1880 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1886 de->media[idx].type = idx;
1888 if (netif_msg_probe(de))
1889 pr_info("de%d: media block #%u: %s",
1891 media_name[de->media[idx].type]);
1893 bufp += sizeof (ib->opts);
1895 if (ib->opts & MediaCustomCSRs) {
1896 de->media[idx].csr13 = get_unaligned(&ib->csr13);
1897 de->media[idx].csr14 = get_unaligned(&ib->csr14);
1898 de->media[idx].csr15 = get_unaligned(&ib->csr15);
1899 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1902 if (netif_msg_probe(de))
1903 pr_cont(" (%x,%x,%x)\n",
1904 de->media[idx].csr13,
1905 de->media[idx].csr14,
1906 de->media[idx].csr15);
1909 if (netif_msg_probe(de))
1913 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1917 de->media_advertise = de->media_supported;
1920 /* fill in defaults, for cases where custom CSRs not used */
1921 for (i = 0; i < DE_MAX_MEDIA; i++) {
1922 if (de->media[i].csr13 == 0xffff)
1923 de->media[i].csr13 = t21041_csr13[i];
1924 if (de->media[i].csr14 == 0xffff) {
1925 /* autonegotiation is broken at least on some chip
1926 revisions - rev. 0x21 works, 0x11 does not */
1927 if (de->pdev->revision < 0x20)
1928 de->media[i].csr14 = t21041_csr14_brk[i];
1930 de->media[i].csr14 = t21041_csr14[i];
1932 if (de->media[i].csr15 == 0xffff)
1933 de->media[i].csr15 = t21041_csr15[i];
1936 de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1941 /* for error cases, it's ok to assume we support all these */
1942 for (i = 0; i < DE_MAX_MEDIA; i++)
1943 de->media[i].type = i;
1944 de->media_supported =
1945 SUPPORTED_10baseT_Half |
1946 SUPPORTED_10baseT_Full |
1954 static const struct net_device_ops de_netdev_ops = {
1955 .ndo_open = de_open,
1956 .ndo_stop = de_close,
1957 .ndo_set_rx_mode = de_set_rx_mode,
1958 .ndo_start_xmit = de_start_xmit,
1959 .ndo_get_stats = de_get_stats,
1960 .ndo_tx_timeout = de_tx_timeout,
1961 .ndo_set_mac_address = eth_mac_addr,
1962 .ndo_validate_addr = eth_validate_addr,
1965 static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1967 struct net_device *dev;
1968 struct de_private *de;
1971 unsigned long pciaddr;
1972 static int board_idx = -1;
1976 /* allocate a new ethernet device structure, and fill in defaults */
1977 dev = alloc_etherdev(sizeof(struct de_private));
1981 dev->netdev_ops = &de_netdev_ops;
1982 SET_NETDEV_DEV(dev, &pdev->dev);
1983 dev->ethtool_ops = &de_ethtool_ops;
1984 dev->watchdog_timeo = TX_TIMEOUT;
1986 de = netdev_priv(dev);
1987 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1990 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
1991 de->board_idx = board_idx;
1992 spin_lock_init (&de->lock);
1993 timer_setup(&de->media_timer,
1994 de->de21040 ? de21040_media_timer : de21041_media_timer,
1997 netif_carrier_off(dev);
1999 /* wake up device, assign resources */
2000 rc = pci_enable_device(pdev);
2004 /* reserve PCI resources to ensure driver atomicity */
2005 rc = pci_request_regions(pdev, DRV_NAME);
2007 goto err_out_disable;
2009 /* check for invalid IRQ value */
2010 if (pdev->irq < 2) {
2012 pr_err("invalid irq (%d) for pci dev %s\n",
2013 pdev->irq, pci_name(pdev));
2017 /* obtain and check validity of PCI I/O address */
2018 pciaddr = pci_resource_start(pdev, 1);
2021 pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev));
2024 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2026 pr_err("MMIO resource (%llx) too small on pci dev %s\n",
2027 (unsigned long long)pci_resource_len(pdev, 1),
2032 /* remap CSR registers */
2033 regs = ioremap(pciaddr, DE_REGS_SIZE);
2036 pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2037 (unsigned long long)pci_resource_len(pdev, 1),
2038 pciaddr, pci_name(pdev));
2043 de_adapter_wake(de);
2045 /* make sure hardware is not running */
2046 rc = de_reset_mac(de);
2048 pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2052 /* get MAC address, initialize default media type and
2053 * get list of supported media
2056 de21040_get_mac_address(de);
2057 de21040_get_media_info(de);
2059 de21041_get_srom_info(de);
2062 /* register new network interface with kernel */
2063 rc = register_netdev(dev);
2067 /* print info about board and interface just registered */
2068 netdev_info(dev, "%s at %p, %pM, IRQ %d\n",
2069 de->de21040 ? "21040" : "21041",
2070 regs, dev->dev_addr, pdev->irq);
2072 pci_set_drvdata(pdev, dev);
2074 /* enable busmastering */
2075 pci_set_master(pdev);
2077 /* put adapter to sleep */
2078 de_adapter_sleep(de);
2086 pci_release_regions(pdev);
2088 pci_disable_device(pdev);
2094 static void de_remove_one(struct pci_dev *pdev)
2096 struct net_device *dev = pci_get_drvdata(pdev);
2097 struct de_private *de = netdev_priv(dev);
2100 unregister_netdev(dev);
2103 pci_release_regions(pdev);
2104 pci_disable_device(pdev);
2108 static int __maybe_unused de_suspend(struct device *dev_d)
2110 struct pci_dev *pdev = to_pci_dev(dev_d);
2111 struct net_device *dev = pci_get_drvdata(pdev);
2112 struct de_private *de = netdev_priv(dev);
2115 if (netif_running (dev)) {
2116 const int irq = pdev->irq;
2118 del_timer_sync(&de->media_timer);
2121 spin_lock_irq(&de->lock);
2124 netif_stop_queue(dev);
2125 netif_device_detach(dev);
2126 netif_carrier_off(dev);
2128 spin_unlock_irq(&de->lock);
2131 /* Update the error counts. */
2134 synchronize_irq(irq);
2137 de_adapter_sleep(de);
2139 netif_device_detach(dev);
2145 static int __maybe_unused de_resume(struct device *dev_d)
2147 struct pci_dev *pdev = to_pci_dev(dev_d);
2148 struct net_device *dev = pci_get_drvdata(pdev);
2149 struct de_private *de = netdev_priv(dev);
2152 if (netif_device_present(dev))
2154 if (!netif_running(dev))
2156 pci_set_master(pdev);
2160 netif_device_attach(dev);
2166 static SIMPLE_DEV_PM_OPS(de_pm_ops, de_suspend, de_resume);
2168 static struct pci_driver de_driver = {
2170 .id_table = de_pci_tbl,
2171 .probe = de_init_one,
2172 .remove = de_remove_one,
2173 .driver.pm = &de_pm_ops,
2176 static int __init de_init (void)
2178 return pci_register_driver(&de_driver);
2181 static void __exit de_exit (void)
2183 pci_unregister_driver (&de_driver);
2186 module_init(de_init);
2187 module_exit(de_exit);