1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
7 * Distribute under GPL.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
24 #include <asm/uaccess.h>
30 #define DRV_MODULE_NAME "b44"
31 #define PFX DRV_MODULE_NAME ": "
32 #define DRV_MODULE_VERSION "1.01"
33 #define DRV_MODULE_RELDATE "Jun 16, 2006"
35 #define B44_DEF_MSG_ENABLE \
45 /* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
48 #define B44_TX_TIMEOUT (5 * HZ)
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU 60
52 #define B44_MAX_MTU 1500
54 #define B44_RX_RING_SIZE 512
55 #define B44_DEF_RX_RING_PENDING 200
56 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
58 #define B44_TX_RING_SIZE 512
59 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
63 #define TX_RING_GAP(BP) \
64 (B44_TX_RING_SIZE - (BP)->tx_pending)
65 #define TX_BUFFS_AVAIL(BP) \
66 (((BP)->tx_cons <= (BP)->tx_prod) ? \
67 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
68 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
69 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
71 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
72 #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
74 /* minimum number of free TX descriptors required to wake up TX process */
75 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
77 /* b44 internal pattern match filter info */
78 #define B44_PATTERN_BASE 0x400
79 #define B44_PATTERN_SIZE 0x80
80 #define B44_PMASK_BASE 0x600
81 #define B44_PMASK_SIZE 0x10
82 #define B44_MAX_PATTERNS 16
83 #define B44_ETHIPV6UDP_HLEN 62
84 #define B44_ETHIPV4UDP_HLEN 42
86 static char version[] __devinitdata =
87 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
89 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
90 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_MODULE_VERSION);
94 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
95 module_param(b44_debug, int, 0);
96 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98 static struct pci_device_id b44_pci_tbl[] = {
99 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
100 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
101 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
102 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
103 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
104 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
105 { } /* terminate list with empty entry */
108 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
110 static void b44_halt(struct b44 *);
111 static void b44_init_rings(struct b44 *);
113 #define B44_FULL_RESET 1
114 #define B44_FULL_RESET_SKIP_PHY 2
115 #define B44_PARTIAL_RESET 3
117 static void b44_init_hw(struct b44 *, int);
119 static int dma_desc_align_mask;
120 static int dma_desc_sync_size;
122 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
123 #define _B44(x...) # x,
128 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
130 unsigned long offset,
131 enum dma_data_direction dir)
133 dma_sync_single_range_for_device(&pdev->dev, dma_base,
134 offset & dma_desc_align_mask,
135 dma_desc_sync_size, dir);
138 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
140 unsigned long offset,
141 enum dma_data_direction dir)
143 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
144 offset & dma_desc_align_mask,
145 dma_desc_sync_size, dir);
148 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
150 return readl(bp->regs + reg);
153 static inline void bw32(const struct b44 *bp,
154 unsigned long reg, unsigned long val)
156 writel(val, bp->regs + reg);
159 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
160 u32 bit, unsigned long timeout, const int clear)
164 for (i = 0; i < timeout; i++) {
165 u32 val = br32(bp, reg);
167 if (clear && !(val & bit))
169 if (!clear && (val & bit))
174 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
178 (clear ? "clear" : "set"));
184 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
185 * buzz words used on this company's website :-)
187 * All of these routines must be invoked with bp->lock held and
188 * interrupts disabled.
191 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
192 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
194 static u32 ssb_get_core_rev(struct b44 *bp)
196 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
199 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
201 u32 bar_orig, pci_rev, val;
203 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
204 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
205 pci_rev = ssb_get_core_rev(bp);
207 val = br32(bp, B44_SBINTVEC);
209 bw32(bp, B44_SBINTVEC, val);
211 val = br32(bp, SSB_PCI_TRANS_2);
212 val |= SSB_PCI_PREF | SSB_PCI_BURST;
213 bw32(bp, SSB_PCI_TRANS_2, val);
215 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
220 static void ssb_core_disable(struct b44 *bp)
222 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
225 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
226 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
227 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
228 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
229 SBTMSLOW_REJECT | SBTMSLOW_RESET));
230 br32(bp, B44_SBTMSLOW);
232 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
233 br32(bp, B44_SBTMSLOW);
237 static void ssb_core_reset(struct b44 *bp)
241 ssb_core_disable(bp);
242 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
243 br32(bp, B44_SBTMSLOW);
246 /* Clear SERR if set, this is a hw bug workaround. */
247 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
248 bw32(bp, B44_SBTMSHIGH, 0);
250 val = br32(bp, B44_SBIMSTATE);
251 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
252 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
254 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
255 br32(bp, B44_SBTMSLOW);
258 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
259 br32(bp, B44_SBTMSLOW);
263 static int ssb_core_unit(struct b44 *bp)
266 u32 val = br32(bp, B44_SBADMATCH0);
269 type = val & SBADMATCH0_TYPE_MASK;
272 base = val & SBADMATCH0_BS0_MASK;
276 base = val & SBADMATCH0_BS1_MASK;
281 base = val & SBADMATCH0_BS2_MASK;
288 static int ssb_is_core_up(struct b44 *bp)
290 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
294 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
298 val = ((u32) data[2]) << 24;
299 val |= ((u32) data[3]) << 16;
300 val |= ((u32) data[4]) << 8;
301 val |= ((u32) data[5]) << 0;
302 bw32(bp, B44_CAM_DATA_LO, val);
303 val = (CAM_DATA_HI_VALID |
304 (((u32) data[0]) << 8) |
305 (((u32) data[1]) << 0));
306 bw32(bp, B44_CAM_DATA_HI, val);
307 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
308 (index << CAM_CTRL_INDEX_SHIFT)));
309 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
312 static inline void __b44_disable_ints(struct b44 *bp)
314 bw32(bp, B44_IMASK, 0);
317 static void b44_disable_ints(struct b44 *bp)
319 __b44_disable_ints(bp);
321 /* Flush posted writes. */
325 static void b44_enable_ints(struct b44 *bp)
327 bw32(bp, B44_IMASK, bp->imask);
330 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
334 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
335 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
336 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
337 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
338 (reg << MDIO_DATA_RA_SHIFT) |
339 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
340 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
341 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
346 static int b44_writephy(struct b44 *bp, int reg, u32 val)
348 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
349 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
350 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
351 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
352 (reg << MDIO_DATA_RA_SHIFT) |
353 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
354 (val & MDIO_DATA_DATA)));
355 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
358 /* miilib interface */
359 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
360 * due to code existing before miilib use was added to this driver.
361 * Someone should remove this artificial driver limitation in
362 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
364 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
367 struct b44 *bp = netdev_priv(dev);
368 int rc = b44_readphy(bp, location, &val);
374 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
377 struct b44 *bp = netdev_priv(dev);
378 b44_writephy(bp, location, val);
381 static int b44_phy_reset(struct b44 *bp)
386 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
390 err = b44_readphy(bp, MII_BMCR, &val);
392 if (val & BMCR_RESET) {
393 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
402 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
406 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
407 bp->flags |= pause_flags;
409 val = br32(bp, B44_RXCONFIG);
410 if (pause_flags & B44_FLAG_RX_PAUSE)
411 val |= RXCONFIG_FLOW;
413 val &= ~RXCONFIG_FLOW;
414 bw32(bp, B44_RXCONFIG, val);
416 val = br32(bp, B44_MAC_FLOW);
417 if (pause_flags & B44_FLAG_TX_PAUSE)
418 val |= (MAC_FLOW_PAUSE_ENAB |
419 (0xc0 & MAC_FLOW_RX_HI_WATER));
421 val &= ~MAC_FLOW_PAUSE_ENAB;
422 bw32(bp, B44_MAC_FLOW, val);
425 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
429 /* The driver supports only rx pause by default because
430 the b44 mac tx pause mechanism generates excessive
432 Use ethtool to turn on b44 tx pause if necessary.
434 if ((local & ADVERTISE_PAUSE_CAP) &&
435 (local & ADVERTISE_PAUSE_ASYM)){
436 if ((remote & LPA_PAUSE_ASYM) &&
437 !(remote & LPA_PAUSE_CAP))
438 pause_enab |= B44_FLAG_RX_PAUSE;
441 __b44_set_flow_ctrl(bp, pause_enab);
444 static int b44_setup_phy(struct b44 *bp)
449 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
451 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
452 val & MII_ALEDCTRL_ALLMSK)) != 0)
454 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
456 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
457 val | MII_TLEDCTRL_ENABLE)) != 0)
460 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
461 u32 adv = ADVERTISE_CSMA;
463 if (bp->flags & B44_FLAG_ADV_10HALF)
464 adv |= ADVERTISE_10HALF;
465 if (bp->flags & B44_FLAG_ADV_10FULL)
466 adv |= ADVERTISE_10FULL;
467 if (bp->flags & B44_FLAG_ADV_100HALF)
468 adv |= ADVERTISE_100HALF;
469 if (bp->flags & B44_FLAG_ADV_100FULL)
470 adv |= ADVERTISE_100FULL;
472 if (bp->flags & B44_FLAG_PAUSE_AUTO)
473 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
475 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
477 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
478 BMCR_ANRESTART))) != 0)
483 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
485 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
486 if (bp->flags & B44_FLAG_100_BASE_T)
487 bmcr |= BMCR_SPEED100;
488 if (bp->flags & B44_FLAG_FULL_DUPLEX)
489 bmcr |= BMCR_FULLDPLX;
490 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
493 /* Since we will not be negotiating there is no safe way
494 * to determine if the link partner supports flow control
495 * or not. So just disable it completely in this case.
497 b44_set_flow_ctrl(bp, 0, 0);
504 static void b44_stats_update(struct b44 *bp)
509 val = &bp->hw_stats.tx_good_octets;
510 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
511 *val++ += br32(bp, reg);
517 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
518 *val++ += br32(bp, reg);
522 static void b44_link_report(struct b44 *bp)
524 if (!netif_carrier_ok(bp->dev)) {
525 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
527 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
529 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
530 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
532 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
535 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
536 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
540 static void b44_check_phy(struct b44 *bp)
544 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
545 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
547 if (aux & MII_AUXCTRL_SPEED)
548 bp->flags |= B44_FLAG_100_BASE_T;
550 bp->flags &= ~B44_FLAG_100_BASE_T;
551 if (aux & MII_AUXCTRL_DUPLEX)
552 bp->flags |= B44_FLAG_FULL_DUPLEX;
554 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
556 if (!netif_carrier_ok(bp->dev) &&
557 (bmsr & BMSR_LSTATUS)) {
558 u32 val = br32(bp, B44_TX_CTRL);
559 u32 local_adv, remote_adv;
561 if (bp->flags & B44_FLAG_FULL_DUPLEX)
562 val |= TX_CTRL_DUPLEX;
564 val &= ~TX_CTRL_DUPLEX;
565 bw32(bp, B44_TX_CTRL, val);
567 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
568 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
569 !b44_readphy(bp, MII_LPA, &remote_adv))
570 b44_set_flow_ctrl(bp, local_adv, remote_adv);
573 netif_carrier_on(bp->dev);
575 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
577 netif_carrier_off(bp->dev);
581 if (bmsr & BMSR_RFAULT)
582 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
585 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
590 static void b44_timer(unsigned long __opaque)
592 struct b44 *bp = (struct b44 *) __opaque;
594 spin_lock_irq(&bp->lock);
598 b44_stats_update(bp);
600 spin_unlock_irq(&bp->lock);
602 bp->timer.expires = jiffies + HZ;
603 add_timer(&bp->timer);
606 static void b44_tx(struct b44 *bp)
610 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
611 cur /= sizeof(struct dma_desc);
613 /* XXX needs updating when NETIF_F_SG is supported */
614 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
615 struct ring_info *rp = &bp->tx_buffers[cons];
616 struct sk_buff *skb = rp->skb;
620 pci_unmap_single(bp->pdev,
621 pci_unmap_addr(rp, mapping),
625 dev_kfree_skb_irq(skb);
629 if (netif_queue_stopped(bp->dev) &&
630 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
631 netif_wake_queue(bp->dev);
633 bw32(bp, B44_GPTIMER, 0);
636 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
637 * before the DMA address you give it. So we allocate 30 more bytes
638 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
639 * point the chip at 30 bytes past where the rx_header will go.
641 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
644 struct ring_info *src_map, *map;
645 struct rx_header *rh;
653 src_map = &bp->rx_buffers[src_idx];
654 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
655 map = &bp->rx_buffers[dest_idx];
656 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
660 mapping = pci_map_single(bp->pdev, skb->data,
664 /* Hardware bug work-around, the chip is unable to do PCI DMA
665 to/from anything above 1GB :-( */
666 if (dma_mapping_error(mapping) ||
667 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
669 if (!dma_mapping_error(mapping))
670 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
671 dev_kfree_skb_any(skb);
672 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
675 mapping = pci_map_single(bp->pdev, skb->data,
678 if (dma_mapping_error(mapping) ||
679 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
680 if (!dma_mapping_error(mapping))
681 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
682 dev_kfree_skb_any(skb);
688 skb_reserve(skb, bp->rx_offset);
690 rh = (struct rx_header *)
691 (skb->data - bp->rx_offset);
696 pci_unmap_addr_set(map, mapping, mapping);
701 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
702 if (dest_idx == (B44_RX_RING_SIZE - 1))
703 ctrl |= DESC_CTRL_EOT;
705 dp = &bp->rx_ring[dest_idx];
706 dp->ctrl = cpu_to_le32(ctrl);
707 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
709 if (bp->flags & B44_FLAG_RX_RING_HACK)
710 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
711 dest_idx * sizeof(dp),
714 return RX_PKT_BUF_SZ;
717 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
719 struct dma_desc *src_desc, *dest_desc;
720 struct ring_info *src_map, *dest_map;
721 struct rx_header *rh;
725 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
726 dest_desc = &bp->rx_ring[dest_idx];
727 dest_map = &bp->rx_buffers[dest_idx];
728 src_desc = &bp->rx_ring[src_idx];
729 src_map = &bp->rx_buffers[src_idx];
731 dest_map->skb = src_map->skb;
732 rh = (struct rx_header *) src_map->skb->data;
735 pci_unmap_addr_set(dest_map, mapping,
736 pci_unmap_addr(src_map, mapping));
738 if (bp->flags & B44_FLAG_RX_RING_HACK)
739 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
740 src_idx * sizeof(src_desc),
743 ctrl = src_desc->ctrl;
744 if (dest_idx == (B44_RX_RING_SIZE - 1))
745 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
747 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
749 dest_desc->ctrl = ctrl;
750 dest_desc->addr = src_desc->addr;
754 if (bp->flags & B44_FLAG_RX_RING_HACK)
755 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
756 dest_idx * sizeof(dest_desc),
759 pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
764 static int b44_rx(struct b44 *bp, int budget)
770 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
771 prod /= sizeof(struct dma_desc);
774 while (cons != prod && budget > 0) {
775 struct ring_info *rp = &bp->rx_buffers[cons];
776 struct sk_buff *skb = rp->skb;
777 dma_addr_t map = pci_unmap_addr(rp, mapping);
778 struct rx_header *rh;
781 pci_dma_sync_single_for_cpu(bp->pdev, map,
784 rh = (struct rx_header *) skb->data;
785 len = le16_to_cpu(rh->len);
786 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
787 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
789 b44_recycle_rx(bp, cons, bp->rx_prod);
791 bp->stats.rx_dropped++;
801 len = le16_to_cpu(rh->len);
802 } while (len == 0 && i++ < 5);
810 if (len > RX_COPY_THRESHOLD) {
812 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
815 pci_unmap_single(bp->pdev, map,
816 skb_size, PCI_DMA_FROMDEVICE);
817 /* Leave out rx_header */
818 skb_put(skb, len+bp->rx_offset);
819 skb_pull(skb,bp->rx_offset);
821 struct sk_buff *copy_skb;
823 b44_recycle_rx(bp, cons, bp->rx_prod);
824 copy_skb = dev_alloc_skb(len + 2);
825 if (copy_skb == NULL)
826 goto drop_it_no_recycle;
828 skb_reserve(copy_skb, 2);
829 skb_put(copy_skb, len);
830 /* DMA sync done above, copy just the actual packet */
831 skb_copy_from_linear_data_offset(skb, bp->rx_offset,
832 copy_skb->data, len);
835 skb->ip_summed = CHECKSUM_NONE;
836 skb->protocol = eth_type_trans(skb, bp->dev);
837 netif_receive_skb(skb);
838 bp->dev->last_rx = jiffies;
842 bp->rx_prod = (bp->rx_prod + 1) &
843 (B44_RX_RING_SIZE - 1);
844 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
848 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
853 static int b44_poll(struct net_device *netdev, int *budget)
855 struct b44 *bp = netdev_priv(netdev);
858 spin_lock_irq(&bp->lock);
860 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
861 /* spin_lock(&bp->tx_lock); */
863 /* spin_unlock(&bp->tx_lock); */
865 spin_unlock_irq(&bp->lock);
868 if (bp->istat & ISTAT_RX) {
869 int orig_budget = *budget;
872 if (orig_budget > netdev->quota)
873 orig_budget = netdev->quota;
875 work_done = b44_rx(bp, orig_budget);
877 *budget -= work_done;
878 netdev->quota -= work_done;
880 if (work_done >= orig_budget)
884 if (bp->istat & ISTAT_ERRORS) {
887 spin_lock_irqsave(&bp->lock, flags);
890 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
891 netif_wake_queue(bp->dev);
892 spin_unlock_irqrestore(&bp->lock, flags);
897 netif_rx_complete(netdev);
901 return (done ? 0 : 1);
904 static irqreturn_t b44_interrupt(int irq, void *dev_id)
906 struct net_device *dev = dev_id;
907 struct b44 *bp = netdev_priv(dev);
911 spin_lock(&bp->lock);
913 istat = br32(bp, B44_ISTAT);
914 imask = br32(bp, B44_IMASK);
916 /* The interrupt mask register controls which interrupt bits
917 * will actually raise an interrupt to the CPU when set by hw/firmware,
918 * but doesn't mask off the bits.
924 if (unlikely(!netif_running(dev))) {
925 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
929 if (netif_rx_schedule_prep(dev)) {
930 /* NOTE: These writes are posted by the readback of
931 * the ISTAT register below.
934 __b44_disable_ints(bp);
935 __netif_rx_schedule(dev);
937 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
942 bw32(bp, B44_ISTAT, istat);
945 spin_unlock(&bp->lock);
946 return IRQ_RETVAL(handled);
949 static void b44_tx_timeout(struct net_device *dev)
951 struct b44 *bp = netdev_priv(dev);
953 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
956 spin_lock_irq(&bp->lock);
960 b44_init_hw(bp, B44_FULL_RESET);
962 spin_unlock_irq(&bp->lock);
966 netif_wake_queue(dev);
969 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
971 struct b44 *bp = netdev_priv(dev);
972 struct sk_buff *bounce_skb;
973 int rc = NETDEV_TX_OK;
975 u32 len, entry, ctrl;
978 spin_lock_irq(&bp->lock);
980 /* This is a hard error, log it. */
981 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
982 netif_stop_queue(dev);
983 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
988 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
989 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
990 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
991 if (!dma_mapping_error(mapping))
992 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
994 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
999 mapping = pci_map_single(bp->pdev, bounce_skb->data,
1000 len, PCI_DMA_TODEVICE);
1001 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
1002 if (!dma_mapping_error(mapping))
1003 pci_unmap_single(bp->pdev, mapping,
1004 len, PCI_DMA_TODEVICE);
1005 dev_kfree_skb_any(bounce_skb);
1009 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len),
1011 dev_kfree_skb_any(skb);
1015 entry = bp->tx_prod;
1016 bp->tx_buffers[entry].skb = skb;
1017 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1019 ctrl = (len & DESC_CTRL_LEN);
1020 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1021 if (entry == (B44_TX_RING_SIZE - 1))
1022 ctrl |= DESC_CTRL_EOT;
1024 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1025 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1027 if (bp->flags & B44_FLAG_TX_RING_HACK)
1028 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1029 entry * sizeof(bp->tx_ring[0]),
1032 entry = NEXT_TX(entry);
1034 bp->tx_prod = entry;
1038 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1039 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1040 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1041 if (bp->flags & B44_FLAG_REORDER_BUG)
1042 br32(bp, B44_DMATX_PTR);
1044 if (TX_BUFFS_AVAIL(bp) < 1)
1045 netif_stop_queue(dev);
1047 dev->trans_start = jiffies;
1050 spin_unlock_irq(&bp->lock);
1055 rc = NETDEV_TX_BUSY;
1059 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1061 struct b44 *bp = netdev_priv(dev);
1063 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1066 if (!netif_running(dev)) {
1067 /* We'll just catch it later when the
1074 spin_lock_irq(&bp->lock);
1078 b44_init_hw(bp, B44_FULL_RESET);
1079 spin_unlock_irq(&bp->lock);
1081 b44_enable_ints(bp);
1086 /* Free up pending packets in all rx/tx rings.
1088 * The chip has been shut down and the driver detached from
1089 * the networking, so no interrupts or new tx packets will
1090 * end up in the driver. bp->lock is not held and we are not
1091 * in an interrupt context and thus may sleep.
1093 static void b44_free_rings(struct b44 *bp)
1095 struct ring_info *rp;
1098 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1099 rp = &bp->rx_buffers[i];
1101 if (rp->skb == NULL)
1103 pci_unmap_single(bp->pdev,
1104 pci_unmap_addr(rp, mapping),
1106 PCI_DMA_FROMDEVICE);
1107 dev_kfree_skb_any(rp->skb);
1111 /* XXX needs changes once NETIF_F_SG is set... */
1112 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1113 rp = &bp->tx_buffers[i];
1115 if (rp->skb == NULL)
1117 pci_unmap_single(bp->pdev,
1118 pci_unmap_addr(rp, mapping),
1121 dev_kfree_skb_any(rp->skb);
1126 /* Initialize tx/rx rings for packet processing.
1128 * The chip has been shut down and the driver detached from
1129 * the networking, so no interrupts or new tx packets will
1130 * end up in the driver.
1132 static void b44_init_rings(struct b44 *bp)
1138 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1139 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1141 if (bp->flags & B44_FLAG_RX_RING_HACK)
1142 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1144 PCI_DMA_BIDIRECTIONAL);
1146 if (bp->flags & B44_FLAG_TX_RING_HACK)
1147 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1151 for (i = 0; i < bp->rx_pending; i++) {
1152 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1158 * Must not be invoked with interrupt sources disabled and
1159 * the hardware shutdown down.
1161 static void b44_free_consistent(struct b44 *bp)
1163 kfree(bp->rx_buffers);
1164 bp->rx_buffers = NULL;
1165 kfree(bp->tx_buffers);
1166 bp->tx_buffers = NULL;
1168 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1169 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1174 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1175 bp->rx_ring, bp->rx_ring_dma);
1177 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1180 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1181 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1186 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1187 bp->tx_ring, bp->tx_ring_dma);
1189 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1194 * Must not be invoked with interrupt sources disabled and
1195 * the hardware shutdown down. Can sleep.
1197 static int b44_alloc_consistent(struct b44 *bp)
1201 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1202 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1203 if (!bp->rx_buffers)
1206 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1207 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1208 if (!bp->tx_buffers)
1211 size = DMA_TABLE_BYTES;
1212 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1214 /* Allocation may have failed due to pci_alloc_consistent
1215 insisting on use of GFP_DMA, which is more restrictive
1216 than necessary... */
1217 struct dma_desc *rx_ring;
1218 dma_addr_t rx_ring_dma;
1220 rx_ring = kzalloc(size, GFP_KERNEL);
1224 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1228 if (dma_mapping_error(rx_ring_dma) ||
1229 rx_ring_dma + size > DMA_30BIT_MASK) {
1234 bp->rx_ring = rx_ring;
1235 bp->rx_ring_dma = rx_ring_dma;
1236 bp->flags |= B44_FLAG_RX_RING_HACK;
1239 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1241 /* Allocation may have failed due to pci_alloc_consistent
1242 insisting on use of GFP_DMA, which is more restrictive
1243 than necessary... */
1244 struct dma_desc *tx_ring;
1245 dma_addr_t tx_ring_dma;
1247 tx_ring = kzalloc(size, GFP_KERNEL);
1251 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1255 if (dma_mapping_error(tx_ring_dma) ||
1256 tx_ring_dma + size > DMA_30BIT_MASK) {
1261 bp->tx_ring = tx_ring;
1262 bp->tx_ring_dma = tx_ring_dma;
1263 bp->flags |= B44_FLAG_TX_RING_HACK;
1269 b44_free_consistent(bp);
1273 /* bp->lock is held. */
1274 static void b44_clear_stats(struct b44 *bp)
1278 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1279 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1281 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1285 /* bp->lock is held. */
1286 static void b44_chip_reset(struct b44 *bp)
1288 if (ssb_is_core_up(bp)) {
1289 bw32(bp, B44_RCV_LAZY, 0);
1290 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1291 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1292 bw32(bp, B44_DMATX_CTRL, 0);
1293 bp->tx_prod = bp->tx_cons = 0;
1294 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1295 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1298 bw32(bp, B44_DMARX_CTRL, 0);
1299 bp->rx_prod = bp->rx_cons = 0;
1301 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1308 b44_clear_stats(bp);
1310 /* Make PHY accessible. */
1311 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1312 (0x0d & MDIO_CTRL_MAXF_MASK)));
1313 br32(bp, B44_MDIO_CTRL);
1315 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1316 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1317 br32(bp, B44_ENET_CTRL);
1318 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1320 u32 val = br32(bp, B44_DEVCTRL);
1322 if (val & DEVCTRL_EPR) {
1323 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1324 br32(bp, B44_DEVCTRL);
1327 bp->flags |= B44_FLAG_INTERNAL_PHY;
1331 /* bp->lock is held. */
1332 static void b44_halt(struct b44 *bp)
1334 b44_disable_ints(bp);
1338 /* bp->lock is held. */
1339 static void __b44_set_mac_addr(struct b44 *bp)
1341 bw32(bp, B44_CAM_CTRL, 0);
1342 if (!(bp->dev->flags & IFF_PROMISC)) {
1345 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1346 val = br32(bp, B44_CAM_CTRL);
1347 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1351 static int b44_set_mac_addr(struct net_device *dev, void *p)
1353 struct b44 *bp = netdev_priv(dev);
1354 struct sockaddr *addr = p;
1356 if (netif_running(dev))
1359 if (!is_valid_ether_addr(addr->sa_data))
1362 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1364 spin_lock_irq(&bp->lock);
1365 __b44_set_mac_addr(bp);
1366 spin_unlock_irq(&bp->lock);
1371 /* Called at device open time to get the chip ready for
1372 * packet processing. Invoked with bp->lock held.
1374 static void __b44_set_rx_mode(struct net_device *);
1375 static void b44_init_hw(struct b44 *bp, int reset_kind)
1380 if (reset_kind == B44_FULL_RESET) {
1385 /* Enable CRC32, set proper LED modes and power on PHY */
1386 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1387 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1389 /* This sets the MAC address too. */
1390 __b44_set_rx_mode(bp->dev);
1392 /* MTU + eth header + possible VLAN tag + struct rx_header */
1393 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1394 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1396 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1397 if (reset_kind == B44_PARTIAL_RESET) {
1398 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1399 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1401 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1402 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1403 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1404 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1405 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1407 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1408 bp->rx_prod = bp->rx_pending;
1410 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1413 val = br32(bp, B44_ENET_CTRL);
1414 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1417 static int b44_open(struct net_device *dev)
1419 struct b44 *bp = netdev_priv(dev);
1422 err = b44_alloc_consistent(bp);
1427 b44_init_hw(bp, B44_FULL_RESET);
1431 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1432 if (unlikely(err < 0)) {
1435 b44_free_consistent(bp);
1439 init_timer(&bp->timer);
1440 bp->timer.expires = jiffies + HZ;
1441 bp->timer.data = (unsigned long) bp;
1442 bp->timer.function = b44_timer;
1443 add_timer(&bp->timer);
1445 b44_enable_ints(bp);
1446 netif_start_queue(dev);
1452 /*static*/ void b44_dump_state(struct b44 *bp)
1454 u32 val32, val32_2, val32_3, val32_4, val32_5;
1457 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1458 printk("DEBUG: PCI status [%04x] \n", val16);
1463 #ifdef CONFIG_NET_POLL_CONTROLLER
1465 * Polling receive - used by netconsole and other diagnostic tools
1466 * to allow network i/o with interrupts disabled.
1468 static void b44_poll_controller(struct net_device *dev)
1470 disable_irq(dev->irq);
1471 b44_interrupt(dev->irq, dev);
1472 enable_irq(dev->irq);
1476 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1479 u32 *pattern = (u32 *) pp;
1481 for (i = 0; i < bytes; i += sizeof(u32)) {
1482 bw32(bp, B44_FILT_ADDR, table_offset + i);
1483 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1487 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1490 int k, j, len = offset;
1491 int ethaddr_bytes = ETH_ALEN;
1493 memset(ppattern + offset, 0xff, magicsync);
1494 for (j = 0; j < magicsync; j++)
1495 set_bit(len++, (unsigned long *) pmask);
1497 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1498 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1499 ethaddr_bytes = ETH_ALEN;
1501 ethaddr_bytes = B44_PATTERN_SIZE - len;
1502 if (ethaddr_bytes <=0)
1504 for (k = 0; k< ethaddr_bytes; k++) {
1505 ppattern[offset + magicsync +
1506 (j * ETH_ALEN) + k] = macaddr[k];
1508 set_bit(len, (unsigned long *) pmask);
1514 /* Setup magic packet patterns in the b44 WOL
1515 * pattern matching filter.
1517 static void b44_setup_pseudo_magicp(struct b44 *bp)
1521 int plen0, plen1, plen2;
1523 u8 pwol_mask[B44_PMASK_SIZE];
1525 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1526 if (!pwol_pattern) {
1527 printk(KERN_ERR PFX "Memory not available for WOL\n");
1531 /* Ipv4 magic packet pattern - pattern 0.*/
1532 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1533 memset(pwol_mask, 0, B44_PMASK_SIZE);
1534 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1535 B44_ETHIPV4UDP_HLEN);
1537 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1538 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1540 /* Raw ethernet II magic packet pattern - pattern 1 */
1541 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1542 memset(pwol_mask, 0, B44_PMASK_SIZE);
1543 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1546 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1547 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1548 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1549 B44_PMASK_BASE + B44_PMASK_SIZE);
1551 /* Ipv6 magic packet pattern - pattern 2 */
1552 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1553 memset(pwol_mask, 0, B44_PMASK_SIZE);
1554 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1555 B44_ETHIPV6UDP_HLEN);
1557 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1558 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1559 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1560 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1562 kfree(pwol_pattern);
1564 /* set these pattern's lengths: one less than each real length */
1565 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1566 bw32(bp, B44_WKUP_LEN, val);
1568 /* enable wakeup pattern matching */
1569 val = br32(bp, B44_DEVCTRL);
1570 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1574 static void b44_setup_wol(struct b44 *bp)
1579 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1581 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1583 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1585 val = bp->dev->dev_addr[2] << 24 |
1586 bp->dev->dev_addr[3] << 16 |
1587 bp->dev->dev_addr[4] << 8 |
1588 bp->dev->dev_addr[5];
1589 bw32(bp, B44_ADDR_LO, val);
1591 val = bp->dev->dev_addr[0] << 8 |
1592 bp->dev->dev_addr[1];
1593 bw32(bp, B44_ADDR_HI, val);
1595 val = br32(bp, B44_DEVCTRL);
1596 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1599 b44_setup_pseudo_magicp(bp);
1602 val = br32(bp, B44_SBTMSLOW);
1603 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1605 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1606 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1610 static int b44_close(struct net_device *dev)
1612 struct b44 *bp = netdev_priv(dev);
1614 netif_stop_queue(dev);
1616 netif_poll_disable(dev);
1618 del_timer_sync(&bp->timer);
1620 spin_lock_irq(&bp->lock);
1627 netif_carrier_off(dev);
1629 spin_unlock_irq(&bp->lock);
1631 free_irq(dev->irq, dev);
1633 netif_poll_enable(dev);
1635 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1636 b44_init_hw(bp, B44_PARTIAL_RESET);
1640 b44_free_consistent(bp);
1645 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1647 struct b44 *bp = netdev_priv(dev);
1648 struct net_device_stats *nstat = &bp->stats;
1649 struct b44_hw_stats *hwstat = &bp->hw_stats;
1651 /* Convert HW stats into netdevice stats. */
1652 nstat->rx_packets = hwstat->rx_pkts;
1653 nstat->tx_packets = hwstat->tx_pkts;
1654 nstat->rx_bytes = hwstat->rx_octets;
1655 nstat->tx_bytes = hwstat->tx_octets;
1656 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1657 hwstat->tx_oversize_pkts +
1658 hwstat->tx_underruns +
1659 hwstat->tx_excessive_cols +
1660 hwstat->tx_late_cols);
1661 nstat->multicast = hwstat->tx_multicast_pkts;
1662 nstat->collisions = hwstat->tx_total_cols;
1664 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1665 hwstat->rx_undersize);
1666 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1667 nstat->rx_frame_errors = hwstat->rx_align_errs;
1668 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1669 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1670 hwstat->rx_oversize_pkts +
1671 hwstat->rx_missed_pkts +
1672 hwstat->rx_crc_align_errs +
1673 hwstat->rx_undersize +
1674 hwstat->rx_crc_errs +
1675 hwstat->rx_align_errs +
1676 hwstat->rx_symbol_errs);
1678 nstat->tx_aborted_errors = hwstat->tx_underruns;
1680 /* Carrier lost counter seems to be broken for some devices */
1681 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1687 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1689 struct dev_mc_list *mclist;
1692 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1693 mclist = dev->mc_list;
1694 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1695 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1700 static void __b44_set_rx_mode(struct net_device *dev)
1702 struct b44 *bp = netdev_priv(dev);
1705 val = br32(bp, B44_RXCONFIG);
1706 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1707 if (dev->flags & IFF_PROMISC) {
1708 val |= RXCONFIG_PROMISC;
1709 bw32(bp, B44_RXCONFIG, val);
1711 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1714 __b44_set_mac_addr(bp);
1716 if ((dev->flags & IFF_ALLMULTI) ||
1717 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1718 val |= RXCONFIG_ALLMULTI;
1720 i = __b44_load_mcast(bp, dev);
1723 __b44_cam_write(bp, zero, i);
1725 bw32(bp, B44_RXCONFIG, val);
1726 val = br32(bp, B44_CAM_CTRL);
1727 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1731 static void b44_set_rx_mode(struct net_device *dev)
1733 struct b44 *bp = netdev_priv(dev);
1735 spin_lock_irq(&bp->lock);
1736 __b44_set_rx_mode(dev);
1737 spin_unlock_irq(&bp->lock);
1740 static u32 b44_get_msglevel(struct net_device *dev)
1742 struct b44 *bp = netdev_priv(dev);
1743 return bp->msg_enable;
1746 static void b44_set_msglevel(struct net_device *dev, u32 value)
1748 struct b44 *bp = netdev_priv(dev);
1749 bp->msg_enable = value;
1752 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1754 struct b44 *bp = netdev_priv(dev);
1755 struct pci_dev *pci_dev = bp->pdev;
1757 strcpy (info->driver, DRV_MODULE_NAME);
1758 strcpy (info->version, DRV_MODULE_VERSION);
1759 strcpy (info->bus_info, pci_name(pci_dev));
1762 static int b44_nway_reset(struct net_device *dev)
1764 struct b44 *bp = netdev_priv(dev);
1768 spin_lock_irq(&bp->lock);
1769 b44_readphy(bp, MII_BMCR, &bmcr);
1770 b44_readphy(bp, MII_BMCR, &bmcr);
1772 if (bmcr & BMCR_ANENABLE) {
1773 b44_writephy(bp, MII_BMCR,
1774 bmcr | BMCR_ANRESTART);
1777 spin_unlock_irq(&bp->lock);
1782 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1784 struct b44 *bp = netdev_priv(dev);
1786 cmd->supported = (SUPPORTED_Autoneg);
1787 cmd->supported |= (SUPPORTED_100baseT_Half |
1788 SUPPORTED_100baseT_Full |
1789 SUPPORTED_10baseT_Half |
1790 SUPPORTED_10baseT_Full |
1793 cmd->advertising = 0;
1794 if (bp->flags & B44_FLAG_ADV_10HALF)
1795 cmd->advertising |= ADVERTISED_10baseT_Half;
1796 if (bp->flags & B44_FLAG_ADV_10FULL)
1797 cmd->advertising |= ADVERTISED_10baseT_Full;
1798 if (bp->flags & B44_FLAG_ADV_100HALF)
1799 cmd->advertising |= ADVERTISED_100baseT_Half;
1800 if (bp->flags & B44_FLAG_ADV_100FULL)
1801 cmd->advertising |= ADVERTISED_100baseT_Full;
1802 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1803 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1804 SPEED_100 : SPEED_10;
1805 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1806 DUPLEX_FULL : DUPLEX_HALF;
1808 cmd->phy_address = bp->phy_addr;
1809 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1810 XCVR_INTERNAL : XCVR_EXTERNAL;
1811 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1812 AUTONEG_DISABLE : AUTONEG_ENABLE;
1813 if (cmd->autoneg == AUTONEG_ENABLE)
1814 cmd->advertising |= ADVERTISED_Autoneg;
1815 if (!netif_running(dev)){
1824 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1826 struct b44 *bp = netdev_priv(dev);
1828 /* We do not support gigabit. */
1829 if (cmd->autoneg == AUTONEG_ENABLE) {
1830 if (cmd->advertising &
1831 (ADVERTISED_1000baseT_Half |
1832 ADVERTISED_1000baseT_Full))
1834 } else if ((cmd->speed != SPEED_100 &&
1835 cmd->speed != SPEED_10) ||
1836 (cmd->duplex != DUPLEX_HALF &&
1837 cmd->duplex != DUPLEX_FULL)) {
1841 spin_lock_irq(&bp->lock);
1843 if (cmd->autoneg == AUTONEG_ENABLE) {
1844 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1845 B44_FLAG_100_BASE_T |
1846 B44_FLAG_FULL_DUPLEX |
1847 B44_FLAG_ADV_10HALF |
1848 B44_FLAG_ADV_10FULL |
1849 B44_FLAG_ADV_100HALF |
1850 B44_FLAG_ADV_100FULL);
1851 if (cmd->advertising == 0) {
1852 bp->flags |= (B44_FLAG_ADV_10HALF |
1853 B44_FLAG_ADV_10FULL |
1854 B44_FLAG_ADV_100HALF |
1855 B44_FLAG_ADV_100FULL);
1857 if (cmd->advertising & ADVERTISED_10baseT_Half)
1858 bp->flags |= B44_FLAG_ADV_10HALF;
1859 if (cmd->advertising & ADVERTISED_10baseT_Full)
1860 bp->flags |= B44_FLAG_ADV_10FULL;
1861 if (cmd->advertising & ADVERTISED_100baseT_Half)
1862 bp->flags |= B44_FLAG_ADV_100HALF;
1863 if (cmd->advertising & ADVERTISED_100baseT_Full)
1864 bp->flags |= B44_FLAG_ADV_100FULL;
1867 bp->flags |= B44_FLAG_FORCE_LINK;
1868 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1869 if (cmd->speed == SPEED_100)
1870 bp->flags |= B44_FLAG_100_BASE_T;
1871 if (cmd->duplex == DUPLEX_FULL)
1872 bp->flags |= B44_FLAG_FULL_DUPLEX;
1875 if (netif_running(dev))
1878 spin_unlock_irq(&bp->lock);
1883 static void b44_get_ringparam(struct net_device *dev,
1884 struct ethtool_ringparam *ering)
1886 struct b44 *bp = netdev_priv(dev);
1888 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1889 ering->rx_pending = bp->rx_pending;
1891 /* XXX ethtool lacks a tx_max_pending, oops... */
1894 static int b44_set_ringparam(struct net_device *dev,
1895 struct ethtool_ringparam *ering)
1897 struct b44 *bp = netdev_priv(dev);
1899 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1900 (ering->rx_mini_pending != 0) ||
1901 (ering->rx_jumbo_pending != 0) ||
1902 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1905 spin_lock_irq(&bp->lock);
1907 bp->rx_pending = ering->rx_pending;
1908 bp->tx_pending = ering->tx_pending;
1912 b44_init_hw(bp, B44_FULL_RESET);
1913 netif_wake_queue(bp->dev);
1914 spin_unlock_irq(&bp->lock);
1916 b44_enable_ints(bp);
1921 static void b44_get_pauseparam(struct net_device *dev,
1922 struct ethtool_pauseparam *epause)
1924 struct b44 *bp = netdev_priv(dev);
1927 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1929 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1931 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1934 static int b44_set_pauseparam(struct net_device *dev,
1935 struct ethtool_pauseparam *epause)
1937 struct b44 *bp = netdev_priv(dev);
1939 spin_lock_irq(&bp->lock);
1940 if (epause->autoneg)
1941 bp->flags |= B44_FLAG_PAUSE_AUTO;
1943 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1944 if (epause->rx_pause)
1945 bp->flags |= B44_FLAG_RX_PAUSE;
1947 bp->flags &= ~B44_FLAG_RX_PAUSE;
1948 if (epause->tx_pause)
1949 bp->flags |= B44_FLAG_TX_PAUSE;
1951 bp->flags &= ~B44_FLAG_TX_PAUSE;
1952 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1955 b44_init_hw(bp, B44_FULL_RESET);
1957 __b44_set_flow_ctrl(bp, bp->flags);
1959 spin_unlock_irq(&bp->lock);
1961 b44_enable_ints(bp);
1966 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1970 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1975 static int b44_get_stats_count(struct net_device *dev)
1977 return ARRAY_SIZE(b44_gstrings);
1980 static void b44_get_ethtool_stats(struct net_device *dev,
1981 struct ethtool_stats *stats, u64 *data)
1983 struct b44 *bp = netdev_priv(dev);
1984 u32 *val = &bp->hw_stats.tx_good_octets;
1987 spin_lock_irq(&bp->lock);
1989 b44_stats_update(bp);
1991 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1994 spin_unlock_irq(&bp->lock);
1997 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1999 struct b44 *bp = netdev_priv(dev);
2001 wol->supported = WAKE_MAGIC;
2002 if (bp->flags & B44_FLAG_WOL_ENABLE)
2003 wol->wolopts = WAKE_MAGIC;
2006 memset(&wol->sopass, 0, sizeof(wol->sopass));
2009 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2011 struct b44 *bp = netdev_priv(dev);
2013 spin_lock_irq(&bp->lock);
2014 if (wol->wolopts & WAKE_MAGIC)
2015 bp->flags |= B44_FLAG_WOL_ENABLE;
2017 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2018 spin_unlock_irq(&bp->lock);
2023 static const struct ethtool_ops b44_ethtool_ops = {
2024 .get_drvinfo = b44_get_drvinfo,
2025 .get_settings = b44_get_settings,
2026 .set_settings = b44_set_settings,
2027 .nway_reset = b44_nway_reset,
2028 .get_link = ethtool_op_get_link,
2029 .get_wol = b44_get_wol,
2030 .set_wol = b44_set_wol,
2031 .get_ringparam = b44_get_ringparam,
2032 .set_ringparam = b44_set_ringparam,
2033 .get_pauseparam = b44_get_pauseparam,
2034 .set_pauseparam = b44_set_pauseparam,
2035 .get_msglevel = b44_get_msglevel,
2036 .set_msglevel = b44_set_msglevel,
2037 .get_strings = b44_get_strings,
2038 .get_stats_count = b44_get_stats_count,
2039 .get_ethtool_stats = b44_get_ethtool_stats,
2040 .get_perm_addr = ethtool_op_get_perm_addr,
2043 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2045 struct mii_ioctl_data *data = if_mii(ifr);
2046 struct b44 *bp = netdev_priv(dev);
2049 if (!netif_running(dev))
2052 spin_lock_irq(&bp->lock);
2053 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2054 spin_unlock_irq(&bp->lock);
2059 /* Read 128-bytes of EEPROM. */
2060 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2063 __le16 *ptr = (__le16 *) data;
2065 for (i = 0; i < 128; i += 2)
2066 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2071 static int __devinit b44_get_invariants(struct b44 *bp)
2076 err = b44_read_eeprom(bp, &eeprom[0]);
2080 bp->dev->dev_addr[0] = eeprom[79];
2081 bp->dev->dev_addr[1] = eeprom[78];
2082 bp->dev->dev_addr[2] = eeprom[81];
2083 bp->dev->dev_addr[3] = eeprom[80];
2084 bp->dev->dev_addr[4] = eeprom[83];
2085 bp->dev->dev_addr[5] = eeprom[82];
2087 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2088 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2092 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2094 bp->phy_addr = eeprom[90] & 0x1f;
2096 /* With this, plus the rx_header prepended to the data by the
2097 * hardware, we'll land the ethernet header on a 2-byte boundary.
2101 bp->imask = IMASK_DEF;
2103 bp->core_unit = ssb_core_unit(bp);
2104 bp->dma_offset = SB_PCI_DMA;
2106 /* XXX - really required?
2107 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2110 if (ssb_get_core_rev(bp) >= 7)
2111 bp->flags |= B44_FLAG_B0_ANDLATER;
2117 static int __devinit b44_init_one(struct pci_dev *pdev,
2118 const struct pci_device_id *ent)
2120 static int b44_version_printed = 0;
2121 unsigned long b44reg_base, b44reg_len;
2122 struct net_device *dev;
2126 if (b44_version_printed++ == 0)
2127 printk(KERN_INFO "%s", version);
2129 err = pci_enable_device(pdev);
2131 dev_err(&pdev->dev, "Cannot enable PCI device, "
2136 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2138 "Cannot find proper PCI device "
2139 "base address, aborting.\n");
2141 goto err_out_disable_pdev;
2144 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2147 "Cannot obtain PCI resources, aborting.\n");
2148 goto err_out_disable_pdev;
2151 pci_set_master(pdev);
2153 err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2155 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2156 goto err_out_free_res;
2159 err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2161 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2162 goto err_out_free_res;
2165 b44reg_base = pci_resource_start(pdev, 0);
2166 b44reg_len = pci_resource_len(pdev, 0);
2168 dev = alloc_etherdev(sizeof(*bp));
2170 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2172 goto err_out_free_res;
2175 SET_MODULE_OWNER(dev);
2176 SET_NETDEV_DEV(dev,&pdev->dev);
2178 /* No interesting netdevice features in this card... */
2181 bp = netdev_priv(dev);
2185 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2187 spin_lock_init(&bp->lock);
2189 bp->regs = ioremap(b44reg_base, b44reg_len);
2190 if (bp->regs == 0UL) {
2191 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2193 goto err_out_free_dev;
2196 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2197 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2199 dev->open = b44_open;
2200 dev->stop = b44_close;
2201 dev->hard_start_xmit = b44_start_xmit;
2202 dev->get_stats = b44_get_stats;
2203 dev->set_multicast_list = b44_set_rx_mode;
2204 dev->set_mac_address = b44_set_mac_addr;
2205 dev->do_ioctl = b44_ioctl;
2206 dev->tx_timeout = b44_tx_timeout;
2207 dev->poll = b44_poll;
2209 dev->watchdog_timeo = B44_TX_TIMEOUT;
2210 #ifdef CONFIG_NET_POLL_CONTROLLER
2211 dev->poll_controller = b44_poll_controller;
2213 dev->change_mtu = b44_change_mtu;
2214 dev->irq = pdev->irq;
2215 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2217 netif_carrier_off(dev);
2219 err = b44_get_invariants(bp);
2222 "Problem fetching invariants of chip, aborting.\n");
2223 goto err_out_iounmap;
2226 bp->mii_if.dev = dev;
2227 bp->mii_if.mdio_read = b44_mii_read;
2228 bp->mii_if.mdio_write = b44_mii_write;
2229 bp->mii_if.phy_id = bp->phy_addr;
2230 bp->mii_if.phy_id_mask = 0x1f;
2231 bp->mii_if.reg_num_mask = 0x1f;
2233 /* By default, advertise all speed/duplex settings. */
2234 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2235 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2237 /* By default, auto-negotiate PAUSE. */
2238 bp->flags |= B44_FLAG_PAUSE_AUTO;
2240 err = register_netdev(dev);
2242 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2243 goto err_out_iounmap;
2246 pci_set_drvdata(pdev, dev);
2248 pci_save_state(bp->pdev);
2250 /* Chip reset provides power to the b44 MAC & PCI cores, which
2251 * is necessary for MAC register access.
2255 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2256 for (i = 0; i < 6; i++)
2257 printk("%2.2x%c", dev->dev_addr[i],
2258 i == 5 ? '\n' : ':');
2269 pci_release_regions(pdev);
2271 err_out_disable_pdev:
2272 pci_disable_device(pdev);
2273 pci_set_drvdata(pdev, NULL);
2277 static void __devexit b44_remove_one(struct pci_dev *pdev)
2279 struct net_device *dev = pci_get_drvdata(pdev);
2280 struct b44 *bp = netdev_priv(dev);
2282 unregister_netdev(dev);
2285 pci_release_regions(pdev);
2286 pci_disable_device(pdev);
2287 pci_set_drvdata(pdev, NULL);
2290 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2292 struct net_device *dev = pci_get_drvdata(pdev);
2293 struct b44 *bp = netdev_priv(dev);
2295 if (!netif_running(dev))
2298 del_timer_sync(&bp->timer);
2300 spin_lock_irq(&bp->lock);
2303 netif_carrier_off(bp->dev);
2304 netif_device_detach(bp->dev);
2307 spin_unlock_irq(&bp->lock);
2309 free_irq(dev->irq, dev);
2310 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2311 b44_init_hw(bp, B44_PARTIAL_RESET);
2314 pci_disable_device(pdev);
2318 static int b44_resume(struct pci_dev *pdev)
2320 struct net_device *dev = pci_get_drvdata(pdev);
2321 struct b44 *bp = netdev_priv(dev);
2324 pci_restore_state(pdev);
2325 rc = pci_enable_device(pdev);
2327 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2332 pci_set_master(pdev);
2334 if (!netif_running(dev))
2337 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2339 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2340 pci_disable_device(pdev);
2344 spin_lock_irq(&bp->lock);
2347 b44_init_hw(bp, B44_FULL_RESET);
2348 netif_device_attach(bp->dev);
2349 spin_unlock_irq(&bp->lock);
2351 bp->timer.expires = jiffies + HZ;
2352 add_timer(&bp->timer);
2354 b44_enable_ints(bp);
2355 netif_wake_queue(dev);
2359 static struct pci_driver b44_driver = {
2360 .name = DRV_MODULE_NAME,
2361 .id_table = b44_pci_tbl,
2362 .probe = b44_init_one,
2363 .remove = __devexit_p(b44_remove_one),
2364 .suspend = b44_suspend,
2365 .resume = b44_resume,
2368 static int __init b44_init(void)
2370 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2372 /* Setup paramaters for syncing RX/TX DMA descriptors */
2373 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2374 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2376 return pci_register_driver(&b44_driver);
2379 static void __exit b44_cleanup(void)
2381 pci_unregister_driver(&b44_driver);
2384 module_init(b44_init);
2385 module_exit(b44_cleanup);