1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
7 * Distribute under GPL.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
19 #include <linux/etherdevice.h>
20 #include <linux/pci.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/dma-mapping.h>
25 #include <asm/uaccess.h>
31 #define DRV_MODULE_NAME "b44"
32 #define PFX DRV_MODULE_NAME ": "
33 #define DRV_MODULE_VERSION "1.01"
34 #define DRV_MODULE_RELDATE "Jun 16, 2006"
36 #define B44_DEF_MSG_ENABLE \
46 /* length of time before we decide the hardware is borked,
47 * and dev->tx_timeout() should be called to fix the problem
49 #define B44_TX_TIMEOUT (5 * HZ)
51 /* hardware minimum and maximum for a single frame's data payload */
52 #define B44_MIN_MTU 60
53 #define B44_MAX_MTU 1500
55 #define B44_RX_RING_SIZE 512
56 #define B44_DEF_RX_RING_PENDING 200
57 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
59 #define B44_TX_RING_SIZE 512
60 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
61 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
64 #define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
72 #define RX_PKT_OFFSET 30
73 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET + 64)
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE 0x400
80 #define B44_PATTERN_SIZE 0x80
81 #define B44_PMASK_BASE 0x600
82 #define B44_PMASK_SIZE 0x10
83 #define B44_MAX_PATTERNS 16
84 #define B44_ETHIPV6UDP_HLEN 62
85 #define B44_ETHIPV4UDP_HLEN 42
87 static char version[] __devinitdata =
88 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
95 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
99 static struct pci_device_id b44_pci_tbl[] = {
100 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106 { } /* terminate list with empty entry */
109 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
111 static void b44_halt(struct b44 *);
112 static void b44_init_rings(struct b44 *);
114 #define B44_FULL_RESET 1
115 #define B44_FULL_RESET_SKIP_PHY 2
116 #define B44_PARTIAL_RESET 3
118 static void b44_init_hw(struct b44 *, int);
120 static int dma_desc_align_mask;
121 static int dma_desc_sync_size;
123 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
124 #define _B44(x...) # x,
129 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
131 unsigned long offset,
132 enum dma_data_direction dir)
134 dma_sync_single_range_for_device(&pdev->dev, dma_base,
135 offset & dma_desc_align_mask,
136 dma_desc_sync_size, dir);
139 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
141 unsigned long offset,
142 enum dma_data_direction dir)
144 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
145 offset & dma_desc_align_mask,
146 dma_desc_sync_size, dir);
149 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
151 return readl(bp->regs + reg);
154 static inline void bw32(const struct b44 *bp,
155 unsigned long reg, unsigned long val)
157 writel(val, bp->regs + reg);
160 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
161 u32 bit, unsigned long timeout, const int clear)
165 for (i = 0; i < timeout; i++) {
166 u32 val = br32(bp, reg);
168 if (clear && !(val & bit))
170 if (!clear && (val & bit))
175 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
179 (clear ? "clear" : "set"));
185 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
186 * buzz words used on this company's website :-)
188 * All of these routines must be invoked with bp->lock held and
189 * interrupts disabled.
192 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
193 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
195 static u32 ssb_get_core_rev(struct b44 *bp)
197 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
200 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
202 u32 bar_orig, pci_rev, val;
204 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
205 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
206 pci_rev = ssb_get_core_rev(bp);
208 val = br32(bp, B44_SBINTVEC);
210 bw32(bp, B44_SBINTVEC, val);
212 val = br32(bp, SSB_PCI_TRANS_2);
213 val |= SSB_PCI_PREF | SSB_PCI_BURST;
214 bw32(bp, SSB_PCI_TRANS_2, val);
216 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
221 static void ssb_core_disable(struct b44 *bp)
223 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
226 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
227 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
228 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
229 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
230 SBTMSLOW_REJECT | SBTMSLOW_RESET));
231 br32(bp, B44_SBTMSLOW);
233 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
234 br32(bp, B44_SBTMSLOW);
238 static void ssb_core_reset(struct b44 *bp)
242 ssb_core_disable(bp);
243 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
244 br32(bp, B44_SBTMSLOW);
247 /* Clear SERR if set, this is a hw bug workaround. */
248 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
249 bw32(bp, B44_SBTMSHIGH, 0);
251 val = br32(bp, B44_SBIMSTATE);
252 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
253 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
255 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
256 br32(bp, B44_SBTMSLOW);
259 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
260 br32(bp, B44_SBTMSLOW);
264 static int ssb_core_unit(struct b44 *bp)
267 u32 val = br32(bp, B44_SBADMATCH0);
270 type = val & SBADMATCH0_TYPE_MASK;
273 base = val & SBADMATCH0_BS0_MASK;
277 base = val & SBADMATCH0_BS1_MASK;
282 base = val & SBADMATCH0_BS2_MASK;
289 static int ssb_is_core_up(struct b44 *bp)
291 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
295 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
299 val = ((u32) data[2]) << 24;
300 val |= ((u32) data[3]) << 16;
301 val |= ((u32) data[4]) << 8;
302 val |= ((u32) data[5]) << 0;
303 bw32(bp, B44_CAM_DATA_LO, val);
304 val = (CAM_DATA_HI_VALID |
305 (((u32) data[0]) << 8) |
306 (((u32) data[1]) << 0));
307 bw32(bp, B44_CAM_DATA_HI, val);
308 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
309 (index << CAM_CTRL_INDEX_SHIFT)));
310 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
313 static inline void __b44_disable_ints(struct b44 *bp)
315 bw32(bp, B44_IMASK, 0);
318 static void b44_disable_ints(struct b44 *bp)
320 __b44_disable_ints(bp);
322 /* Flush posted writes. */
326 static void b44_enable_ints(struct b44 *bp)
328 bw32(bp, B44_IMASK, bp->imask);
331 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
335 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
338 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339 (reg << MDIO_DATA_RA_SHIFT) |
340 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
341 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
342 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
347 static int b44_writephy(struct b44 *bp, int reg, u32 val)
349 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
350 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
351 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
352 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
353 (reg << MDIO_DATA_RA_SHIFT) |
354 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
355 (val & MDIO_DATA_DATA)));
356 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
359 /* miilib interface */
360 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
361 * due to code existing before miilib use was added to this driver.
362 * Someone should remove this artificial driver limitation in
363 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
365 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
368 struct b44 *bp = netdev_priv(dev);
369 int rc = b44_readphy(bp, location, &val);
375 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
378 struct b44 *bp = netdev_priv(dev);
379 b44_writephy(bp, location, val);
382 static int b44_phy_reset(struct b44 *bp)
387 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
391 err = b44_readphy(bp, MII_BMCR, &val);
393 if (val & BMCR_RESET) {
394 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
403 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
407 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
408 bp->flags |= pause_flags;
410 val = br32(bp, B44_RXCONFIG);
411 if (pause_flags & B44_FLAG_RX_PAUSE)
412 val |= RXCONFIG_FLOW;
414 val &= ~RXCONFIG_FLOW;
415 bw32(bp, B44_RXCONFIG, val);
417 val = br32(bp, B44_MAC_FLOW);
418 if (pause_flags & B44_FLAG_TX_PAUSE)
419 val |= (MAC_FLOW_PAUSE_ENAB |
420 (0xc0 & MAC_FLOW_RX_HI_WATER));
422 val &= ~MAC_FLOW_PAUSE_ENAB;
423 bw32(bp, B44_MAC_FLOW, val);
426 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
430 /* The driver supports only rx pause by default because
431 the b44 mac tx pause mechanism generates excessive
433 Use ethtool to turn on b44 tx pause if necessary.
435 if ((local & ADVERTISE_PAUSE_CAP) &&
436 (local & ADVERTISE_PAUSE_ASYM)){
437 if ((remote & LPA_PAUSE_ASYM) &&
438 !(remote & LPA_PAUSE_CAP))
439 pause_enab |= B44_FLAG_RX_PAUSE;
442 __b44_set_flow_ctrl(bp, pause_enab);
445 static int b44_setup_phy(struct b44 *bp)
450 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
452 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
453 val & MII_ALEDCTRL_ALLMSK)) != 0)
455 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
457 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
458 val | MII_TLEDCTRL_ENABLE)) != 0)
461 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
462 u32 adv = ADVERTISE_CSMA;
464 if (bp->flags & B44_FLAG_ADV_10HALF)
465 adv |= ADVERTISE_10HALF;
466 if (bp->flags & B44_FLAG_ADV_10FULL)
467 adv |= ADVERTISE_10FULL;
468 if (bp->flags & B44_FLAG_ADV_100HALF)
469 adv |= ADVERTISE_100HALF;
470 if (bp->flags & B44_FLAG_ADV_100FULL)
471 adv |= ADVERTISE_100FULL;
473 if (bp->flags & B44_FLAG_PAUSE_AUTO)
474 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
476 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
478 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
479 BMCR_ANRESTART))) != 0)
484 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
486 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
487 if (bp->flags & B44_FLAG_100_BASE_T)
488 bmcr |= BMCR_SPEED100;
489 if (bp->flags & B44_FLAG_FULL_DUPLEX)
490 bmcr |= BMCR_FULLDPLX;
491 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
494 /* Since we will not be negotiating there is no safe way
495 * to determine if the link partner supports flow control
496 * or not. So just disable it completely in this case.
498 b44_set_flow_ctrl(bp, 0, 0);
505 static void b44_stats_update(struct b44 *bp)
510 val = &bp->hw_stats.tx_good_octets;
511 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
512 *val++ += br32(bp, reg);
518 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
519 *val++ += br32(bp, reg);
523 static void b44_link_report(struct b44 *bp)
525 if (!netif_carrier_ok(bp->dev)) {
526 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
528 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
530 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
531 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
533 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
536 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
537 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
541 static void b44_check_phy(struct b44 *bp)
545 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
546 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
548 if (aux & MII_AUXCTRL_SPEED)
549 bp->flags |= B44_FLAG_100_BASE_T;
551 bp->flags &= ~B44_FLAG_100_BASE_T;
552 if (aux & MII_AUXCTRL_DUPLEX)
553 bp->flags |= B44_FLAG_FULL_DUPLEX;
555 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
557 if (!netif_carrier_ok(bp->dev) &&
558 (bmsr & BMSR_LSTATUS)) {
559 u32 val = br32(bp, B44_TX_CTRL);
560 u32 local_adv, remote_adv;
562 if (bp->flags & B44_FLAG_FULL_DUPLEX)
563 val |= TX_CTRL_DUPLEX;
565 val &= ~TX_CTRL_DUPLEX;
566 bw32(bp, B44_TX_CTRL, val);
568 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
569 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
570 !b44_readphy(bp, MII_LPA, &remote_adv))
571 b44_set_flow_ctrl(bp, local_adv, remote_adv);
574 netif_carrier_on(bp->dev);
576 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
578 netif_carrier_off(bp->dev);
582 if (bmsr & BMSR_RFAULT)
583 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
586 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
591 static void b44_timer(unsigned long __opaque)
593 struct b44 *bp = (struct b44 *) __opaque;
595 spin_lock_irq(&bp->lock);
599 b44_stats_update(bp);
601 spin_unlock_irq(&bp->lock);
603 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
606 static void b44_tx(struct b44 *bp)
610 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
611 cur /= sizeof(struct dma_desc);
613 /* XXX needs updating when NETIF_F_SG is supported */
614 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
615 struct ring_info *rp = &bp->tx_buffers[cons];
616 struct sk_buff *skb = rp->skb;
620 pci_unmap_single(bp->pdev,
621 pci_unmap_addr(rp, mapping),
625 dev_kfree_skb_irq(skb);
629 if (netif_queue_stopped(bp->dev) &&
630 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
631 netif_wake_queue(bp->dev);
633 bw32(bp, B44_GPTIMER, 0);
636 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
637 * before the DMA address you give it. So we allocate 30 more bytes
638 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
639 * point the chip at 30 bytes past where the rx_header will go.
641 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
644 struct ring_info *src_map, *map;
645 struct rx_header *rh;
653 src_map = &bp->rx_buffers[src_idx];
654 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
655 map = &bp->rx_buffers[dest_idx];
656 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
660 mapping = pci_map_single(bp->pdev, skb->data,
664 /* Hardware bug work-around, the chip is unable to do PCI DMA
665 to/from anything above 1GB :-( */
666 if (dma_mapping_error(mapping) ||
667 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
669 if (!dma_mapping_error(mapping))
670 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
671 dev_kfree_skb_any(skb);
672 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
675 mapping = pci_map_single(bp->pdev, skb->data,
678 if (dma_mapping_error(mapping) ||
679 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
680 if (!dma_mapping_error(mapping))
681 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
682 dev_kfree_skb_any(skb);
687 rh = (struct rx_header *) skb->data;
688 skb_reserve(skb, RX_PKT_OFFSET);
694 pci_unmap_addr_set(map, mapping, mapping);
699 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET));
700 if (dest_idx == (B44_RX_RING_SIZE - 1))
701 ctrl |= DESC_CTRL_EOT;
703 dp = &bp->rx_ring[dest_idx];
704 dp->ctrl = cpu_to_le32(ctrl);
705 dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
707 if (bp->flags & B44_FLAG_RX_RING_HACK)
708 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
709 dest_idx * sizeof(dp),
712 return RX_PKT_BUF_SZ;
715 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
717 struct dma_desc *src_desc, *dest_desc;
718 struct ring_info *src_map, *dest_map;
719 struct rx_header *rh;
723 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
724 dest_desc = &bp->rx_ring[dest_idx];
725 dest_map = &bp->rx_buffers[dest_idx];
726 src_desc = &bp->rx_ring[src_idx];
727 src_map = &bp->rx_buffers[src_idx];
729 dest_map->skb = src_map->skb;
730 rh = (struct rx_header *) src_map->skb->data;
733 pci_unmap_addr_set(dest_map, mapping,
734 pci_unmap_addr(src_map, mapping));
736 if (bp->flags & B44_FLAG_RX_RING_HACK)
737 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
738 src_idx * sizeof(src_desc),
741 ctrl = src_desc->ctrl;
742 if (dest_idx == (B44_RX_RING_SIZE - 1))
743 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
745 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
747 dest_desc->ctrl = ctrl;
748 dest_desc->addr = src_desc->addr;
752 if (bp->flags & B44_FLAG_RX_RING_HACK)
753 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
754 dest_idx * sizeof(dest_desc),
757 pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
762 static int b44_rx(struct b44 *bp, int budget)
768 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
769 prod /= sizeof(struct dma_desc);
772 while (cons != prod && budget > 0) {
773 struct ring_info *rp = &bp->rx_buffers[cons];
774 struct sk_buff *skb = rp->skb;
775 dma_addr_t map = pci_unmap_addr(rp, mapping);
776 struct rx_header *rh;
779 pci_dma_sync_single_for_cpu(bp->pdev, map,
782 rh = (struct rx_header *) skb->data;
783 len = le16_to_cpu(rh->len);
784 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
785 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
787 b44_recycle_rx(bp, cons, bp->rx_prod);
789 bp->stats.rx_dropped++;
799 len = le16_to_cpu(rh->len);
800 } while (len == 0 && i++ < 5);
808 if (len > RX_COPY_THRESHOLD) {
810 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
813 pci_unmap_single(bp->pdev, map,
814 skb_size, PCI_DMA_FROMDEVICE);
815 /* Leave out rx_header */
816 skb_put(skb, len + RX_PKT_OFFSET);
817 skb_pull(skb, RX_PKT_OFFSET);
819 struct sk_buff *copy_skb;
821 b44_recycle_rx(bp, cons, bp->rx_prod);
822 copy_skb = dev_alloc_skb(len + 2);
823 if (copy_skb == NULL)
824 goto drop_it_no_recycle;
826 skb_reserve(copy_skb, 2);
827 skb_put(copy_skb, len);
828 /* DMA sync done above, copy just the actual packet */
829 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
830 copy_skb->data, len);
833 skb->ip_summed = CHECKSUM_NONE;
834 skb->protocol = eth_type_trans(skb, bp->dev);
835 netif_receive_skb(skb);
836 bp->dev->last_rx = jiffies;
840 bp->rx_prod = (bp->rx_prod + 1) &
841 (B44_RX_RING_SIZE - 1);
842 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
846 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
851 static int b44_poll(struct net_device *netdev, int *budget)
853 struct b44 *bp = netdev_priv(netdev);
856 spin_lock_irq(&bp->lock);
858 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
859 /* spin_lock(&bp->tx_lock); */
861 /* spin_unlock(&bp->tx_lock); */
863 spin_unlock_irq(&bp->lock);
866 if (bp->istat & ISTAT_RX) {
867 int orig_budget = *budget;
870 if (orig_budget > netdev->quota)
871 orig_budget = netdev->quota;
873 work_done = b44_rx(bp, orig_budget);
875 *budget -= work_done;
876 netdev->quota -= work_done;
878 if (work_done >= orig_budget)
882 if (bp->istat & ISTAT_ERRORS) {
885 spin_lock_irqsave(&bp->lock, flags);
888 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
889 netif_wake_queue(bp->dev);
890 spin_unlock_irqrestore(&bp->lock, flags);
895 netif_rx_complete(netdev);
899 return (done ? 0 : 1);
902 static irqreturn_t b44_interrupt(int irq, void *dev_id)
904 struct net_device *dev = dev_id;
905 struct b44 *bp = netdev_priv(dev);
909 spin_lock(&bp->lock);
911 istat = br32(bp, B44_ISTAT);
912 imask = br32(bp, B44_IMASK);
914 /* The interrupt mask register controls which interrupt bits
915 * will actually raise an interrupt to the CPU when set by hw/firmware,
916 * but doesn't mask off the bits.
922 if (unlikely(!netif_running(dev))) {
923 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
927 if (netif_rx_schedule_prep(dev)) {
928 /* NOTE: These writes are posted by the readback of
929 * the ISTAT register below.
932 __b44_disable_ints(bp);
933 __netif_rx_schedule(dev);
935 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
940 bw32(bp, B44_ISTAT, istat);
943 spin_unlock(&bp->lock);
944 return IRQ_RETVAL(handled);
947 static void b44_tx_timeout(struct net_device *dev)
949 struct b44 *bp = netdev_priv(dev);
951 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
954 spin_lock_irq(&bp->lock);
958 b44_init_hw(bp, B44_FULL_RESET);
960 spin_unlock_irq(&bp->lock);
964 netif_wake_queue(dev);
967 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
969 struct b44 *bp = netdev_priv(dev);
970 int rc = NETDEV_TX_OK;
972 u32 len, entry, ctrl;
975 spin_lock_irq(&bp->lock);
977 /* This is a hard error, log it. */
978 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
979 netif_stop_queue(dev);
980 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
985 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
986 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
987 struct sk_buff *bounce_skb;
989 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
990 if (!dma_mapping_error(mapping))
991 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
993 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
997 mapping = pci_map_single(bp->pdev, bounce_skb->data,
998 len, PCI_DMA_TODEVICE);
999 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
1000 if (!dma_mapping_error(mapping))
1001 pci_unmap_single(bp->pdev, mapping,
1002 len, PCI_DMA_TODEVICE);
1003 dev_kfree_skb_any(bounce_skb);
1007 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1008 dev_kfree_skb_any(skb);
1012 entry = bp->tx_prod;
1013 bp->tx_buffers[entry].skb = skb;
1014 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1016 ctrl = (len & DESC_CTRL_LEN);
1017 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1018 if (entry == (B44_TX_RING_SIZE - 1))
1019 ctrl |= DESC_CTRL_EOT;
1021 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1022 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1024 if (bp->flags & B44_FLAG_TX_RING_HACK)
1025 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1026 entry * sizeof(bp->tx_ring[0]),
1029 entry = NEXT_TX(entry);
1031 bp->tx_prod = entry;
1035 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1036 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1037 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1038 if (bp->flags & B44_FLAG_REORDER_BUG)
1039 br32(bp, B44_DMATX_PTR);
1041 if (TX_BUFFS_AVAIL(bp) < 1)
1042 netif_stop_queue(dev);
1044 dev->trans_start = jiffies;
1047 spin_unlock_irq(&bp->lock);
1052 rc = NETDEV_TX_BUSY;
1056 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1058 struct b44 *bp = netdev_priv(dev);
1060 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1063 if (!netif_running(dev)) {
1064 /* We'll just catch it later when the
1071 spin_lock_irq(&bp->lock);
1075 b44_init_hw(bp, B44_FULL_RESET);
1076 spin_unlock_irq(&bp->lock);
1078 b44_enable_ints(bp);
1083 /* Free up pending packets in all rx/tx rings.
1085 * The chip has been shut down and the driver detached from
1086 * the networking, so no interrupts or new tx packets will
1087 * end up in the driver. bp->lock is not held and we are not
1088 * in an interrupt context and thus may sleep.
1090 static void b44_free_rings(struct b44 *bp)
1092 struct ring_info *rp;
1095 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1096 rp = &bp->rx_buffers[i];
1098 if (rp->skb == NULL)
1100 pci_unmap_single(bp->pdev,
1101 pci_unmap_addr(rp, mapping),
1103 PCI_DMA_FROMDEVICE);
1104 dev_kfree_skb_any(rp->skb);
1108 /* XXX needs changes once NETIF_F_SG is set... */
1109 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1110 rp = &bp->tx_buffers[i];
1112 if (rp->skb == NULL)
1114 pci_unmap_single(bp->pdev,
1115 pci_unmap_addr(rp, mapping),
1118 dev_kfree_skb_any(rp->skb);
1123 /* Initialize tx/rx rings for packet processing.
1125 * The chip has been shut down and the driver detached from
1126 * the networking, so no interrupts or new tx packets will
1127 * end up in the driver.
1129 static void b44_init_rings(struct b44 *bp)
1135 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1136 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1138 if (bp->flags & B44_FLAG_RX_RING_HACK)
1139 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1141 PCI_DMA_BIDIRECTIONAL);
1143 if (bp->flags & B44_FLAG_TX_RING_HACK)
1144 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1148 for (i = 0; i < bp->rx_pending; i++) {
1149 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1155 * Must not be invoked with interrupt sources disabled and
1156 * the hardware shutdown down.
1158 static void b44_free_consistent(struct b44 *bp)
1160 kfree(bp->rx_buffers);
1161 bp->rx_buffers = NULL;
1162 kfree(bp->tx_buffers);
1163 bp->tx_buffers = NULL;
1165 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1166 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1171 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1172 bp->rx_ring, bp->rx_ring_dma);
1174 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1177 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1178 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1183 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1184 bp->tx_ring, bp->tx_ring_dma);
1186 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1191 * Must not be invoked with interrupt sources disabled and
1192 * the hardware shutdown down. Can sleep.
1194 static int b44_alloc_consistent(struct b44 *bp)
1198 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1199 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1200 if (!bp->rx_buffers)
1203 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1204 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1205 if (!bp->tx_buffers)
1208 size = DMA_TABLE_BYTES;
1209 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1211 /* Allocation may have failed due to pci_alloc_consistent
1212 insisting on use of GFP_DMA, which is more restrictive
1213 than necessary... */
1214 struct dma_desc *rx_ring;
1215 dma_addr_t rx_ring_dma;
1217 rx_ring = kzalloc(size, GFP_KERNEL);
1221 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1225 if (dma_mapping_error(rx_ring_dma) ||
1226 rx_ring_dma + size > DMA_30BIT_MASK) {
1231 bp->rx_ring = rx_ring;
1232 bp->rx_ring_dma = rx_ring_dma;
1233 bp->flags |= B44_FLAG_RX_RING_HACK;
1236 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1238 /* Allocation may have failed due to pci_alloc_consistent
1239 insisting on use of GFP_DMA, which is more restrictive
1240 than necessary... */
1241 struct dma_desc *tx_ring;
1242 dma_addr_t tx_ring_dma;
1244 tx_ring = kzalloc(size, GFP_KERNEL);
1248 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1252 if (dma_mapping_error(tx_ring_dma) ||
1253 tx_ring_dma + size > DMA_30BIT_MASK) {
1258 bp->tx_ring = tx_ring;
1259 bp->tx_ring_dma = tx_ring_dma;
1260 bp->flags |= B44_FLAG_TX_RING_HACK;
1266 b44_free_consistent(bp);
1270 /* bp->lock is held. */
1271 static void b44_clear_stats(struct b44 *bp)
1275 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1276 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1278 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1282 /* bp->lock is held. */
1283 static void b44_chip_reset(struct b44 *bp)
1285 if (ssb_is_core_up(bp)) {
1286 bw32(bp, B44_RCV_LAZY, 0);
1287 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1288 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1289 bw32(bp, B44_DMATX_CTRL, 0);
1290 bp->tx_prod = bp->tx_cons = 0;
1291 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1292 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1295 bw32(bp, B44_DMARX_CTRL, 0);
1296 bp->rx_prod = bp->rx_cons = 0;
1298 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1305 b44_clear_stats(bp);
1307 /* Make PHY accessible. */
1308 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1309 (0x0d & MDIO_CTRL_MAXF_MASK)));
1310 br32(bp, B44_MDIO_CTRL);
1312 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1313 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1314 br32(bp, B44_ENET_CTRL);
1315 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1317 u32 val = br32(bp, B44_DEVCTRL);
1319 if (val & DEVCTRL_EPR) {
1320 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1321 br32(bp, B44_DEVCTRL);
1324 bp->flags |= B44_FLAG_INTERNAL_PHY;
1328 /* bp->lock is held. */
1329 static void b44_halt(struct b44 *bp)
1331 b44_disable_ints(bp);
1335 /* bp->lock is held. */
1336 static void __b44_set_mac_addr(struct b44 *bp)
1338 bw32(bp, B44_CAM_CTRL, 0);
1339 if (!(bp->dev->flags & IFF_PROMISC)) {
1342 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1343 val = br32(bp, B44_CAM_CTRL);
1344 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1348 static int b44_set_mac_addr(struct net_device *dev, void *p)
1350 struct b44 *bp = netdev_priv(dev);
1351 struct sockaddr *addr = p;
1353 if (netif_running(dev))
1356 if (!is_valid_ether_addr(addr->sa_data))
1359 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1361 spin_lock_irq(&bp->lock);
1362 __b44_set_mac_addr(bp);
1363 spin_unlock_irq(&bp->lock);
1368 /* Called at device open time to get the chip ready for
1369 * packet processing. Invoked with bp->lock held.
1371 static void __b44_set_rx_mode(struct net_device *);
1372 static void b44_init_hw(struct b44 *bp, int reset_kind)
1377 if (reset_kind == B44_FULL_RESET) {
1382 /* Enable CRC32, set proper LED modes and power on PHY */
1383 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1384 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1386 /* This sets the MAC address too. */
1387 __b44_set_rx_mode(bp->dev);
1389 /* MTU + eth header + possible VLAN tag + struct rx_header */
1390 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1391 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1393 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1394 if (reset_kind == B44_PARTIAL_RESET) {
1395 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1396 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1398 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1399 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1400 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1401 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1402 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1404 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1405 bp->rx_prod = bp->rx_pending;
1407 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1410 val = br32(bp, B44_ENET_CTRL);
1411 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1414 static int b44_open(struct net_device *dev)
1416 struct b44 *bp = netdev_priv(dev);
1419 err = b44_alloc_consistent(bp);
1424 b44_init_hw(bp, B44_FULL_RESET);
1428 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1429 if (unlikely(err < 0)) {
1432 b44_free_consistent(bp);
1436 init_timer(&bp->timer);
1437 bp->timer.expires = jiffies + HZ;
1438 bp->timer.data = (unsigned long) bp;
1439 bp->timer.function = b44_timer;
1440 add_timer(&bp->timer);
1442 b44_enable_ints(bp);
1443 netif_start_queue(dev);
1449 /*static*/ void b44_dump_state(struct b44 *bp)
1451 u32 val32, val32_2, val32_3, val32_4, val32_5;
1454 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1455 printk("DEBUG: PCI status [%04x] \n", val16);
1460 #ifdef CONFIG_NET_POLL_CONTROLLER
1462 * Polling receive - used by netconsole and other diagnostic tools
1463 * to allow network i/o with interrupts disabled.
1465 static void b44_poll_controller(struct net_device *dev)
1467 disable_irq(dev->irq);
1468 b44_interrupt(dev->irq, dev);
1469 enable_irq(dev->irq);
1473 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1476 u32 *pattern = (u32 *) pp;
1478 for (i = 0; i < bytes; i += sizeof(u32)) {
1479 bw32(bp, B44_FILT_ADDR, table_offset + i);
1480 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1484 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1487 int k, j, len = offset;
1488 int ethaddr_bytes = ETH_ALEN;
1490 memset(ppattern + offset, 0xff, magicsync);
1491 for (j = 0; j < magicsync; j++)
1492 set_bit(len++, (unsigned long *) pmask);
1494 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1495 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1496 ethaddr_bytes = ETH_ALEN;
1498 ethaddr_bytes = B44_PATTERN_SIZE - len;
1499 if (ethaddr_bytes <=0)
1501 for (k = 0; k< ethaddr_bytes; k++) {
1502 ppattern[offset + magicsync +
1503 (j * ETH_ALEN) + k] = macaddr[k];
1505 set_bit(len, (unsigned long *) pmask);
1511 /* Setup magic packet patterns in the b44 WOL
1512 * pattern matching filter.
1514 static void b44_setup_pseudo_magicp(struct b44 *bp)
1518 int plen0, plen1, plen2;
1520 u8 pwol_mask[B44_PMASK_SIZE];
1522 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1523 if (!pwol_pattern) {
1524 printk(KERN_ERR PFX "Memory not available for WOL\n");
1528 /* Ipv4 magic packet pattern - pattern 0.*/
1529 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1530 memset(pwol_mask, 0, B44_PMASK_SIZE);
1531 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1532 B44_ETHIPV4UDP_HLEN);
1534 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1535 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1537 /* Raw ethernet II magic packet pattern - pattern 1 */
1538 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1539 memset(pwol_mask, 0, B44_PMASK_SIZE);
1540 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1543 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1544 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1545 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1546 B44_PMASK_BASE + B44_PMASK_SIZE);
1548 /* Ipv6 magic packet pattern - pattern 2 */
1549 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1550 memset(pwol_mask, 0, B44_PMASK_SIZE);
1551 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1552 B44_ETHIPV6UDP_HLEN);
1554 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1555 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1556 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1557 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1559 kfree(pwol_pattern);
1561 /* set these pattern's lengths: one less than each real length */
1562 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1563 bw32(bp, B44_WKUP_LEN, val);
1565 /* enable wakeup pattern matching */
1566 val = br32(bp, B44_DEVCTRL);
1567 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1571 static void b44_setup_wol(struct b44 *bp)
1576 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1578 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1580 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1582 val = bp->dev->dev_addr[2] << 24 |
1583 bp->dev->dev_addr[3] << 16 |
1584 bp->dev->dev_addr[4] << 8 |
1585 bp->dev->dev_addr[5];
1586 bw32(bp, B44_ADDR_LO, val);
1588 val = bp->dev->dev_addr[0] << 8 |
1589 bp->dev->dev_addr[1];
1590 bw32(bp, B44_ADDR_HI, val);
1592 val = br32(bp, B44_DEVCTRL);
1593 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1596 b44_setup_pseudo_magicp(bp);
1599 val = br32(bp, B44_SBTMSLOW);
1600 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1602 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1603 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1607 static int b44_close(struct net_device *dev)
1609 struct b44 *bp = netdev_priv(dev);
1611 netif_stop_queue(dev);
1613 netif_poll_disable(dev);
1615 del_timer_sync(&bp->timer);
1617 spin_lock_irq(&bp->lock);
1624 netif_carrier_off(dev);
1626 spin_unlock_irq(&bp->lock);
1628 free_irq(dev->irq, dev);
1630 netif_poll_enable(dev);
1632 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1633 b44_init_hw(bp, B44_PARTIAL_RESET);
1637 b44_free_consistent(bp);
1642 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1644 struct b44 *bp = netdev_priv(dev);
1645 struct net_device_stats *nstat = &bp->stats;
1646 struct b44_hw_stats *hwstat = &bp->hw_stats;
1648 /* Convert HW stats into netdevice stats. */
1649 nstat->rx_packets = hwstat->rx_pkts;
1650 nstat->tx_packets = hwstat->tx_pkts;
1651 nstat->rx_bytes = hwstat->rx_octets;
1652 nstat->tx_bytes = hwstat->tx_octets;
1653 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1654 hwstat->tx_oversize_pkts +
1655 hwstat->tx_underruns +
1656 hwstat->tx_excessive_cols +
1657 hwstat->tx_late_cols);
1658 nstat->multicast = hwstat->tx_multicast_pkts;
1659 nstat->collisions = hwstat->tx_total_cols;
1661 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1662 hwstat->rx_undersize);
1663 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1664 nstat->rx_frame_errors = hwstat->rx_align_errs;
1665 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1666 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1667 hwstat->rx_oversize_pkts +
1668 hwstat->rx_missed_pkts +
1669 hwstat->rx_crc_align_errs +
1670 hwstat->rx_undersize +
1671 hwstat->rx_crc_errs +
1672 hwstat->rx_align_errs +
1673 hwstat->rx_symbol_errs);
1675 nstat->tx_aborted_errors = hwstat->tx_underruns;
1677 /* Carrier lost counter seems to be broken for some devices */
1678 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1684 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1686 struct dev_mc_list *mclist;
1689 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1690 mclist = dev->mc_list;
1691 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1692 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1697 static void __b44_set_rx_mode(struct net_device *dev)
1699 struct b44 *bp = netdev_priv(dev);
1702 val = br32(bp, B44_RXCONFIG);
1703 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1704 if (dev->flags & IFF_PROMISC) {
1705 val |= RXCONFIG_PROMISC;
1706 bw32(bp, B44_RXCONFIG, val);
1708 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1711 __b44_set_mac_addr(bp);
1713 if ((dev->flags & IFF_ALLMULTI) ||
1714 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1715 val |= RXCONFIG_ALLMULTI;
1717 i = __b44_load_mcast(bp, dev);
1720 __b44_cam_write(bp, zero, i);
1722 bw32(bp, B44_RXCONFIG, val);
1723 val = br32(bp, B44_CAM_CTRL);
1724 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1728 static void b44_set_rx_mode(struct net_device *dev)
1730 struct b44 *bp = netdev_priv(dev);
1732 spin_lock_irq(&bp->lock);
1733 __b44_set_rx_mode(dev);
1734 spin_unlock_irq(&bp->lock);
1737 static u32 b44_get_msglevel(struct net_device *dev)
1739 struct b44 *bp = netdev_priv(dev);
1740 return bp->msg_enable;
1743 static void b44_set_msglevel(struct net_device *dev, u32 value)
1745 struct b44 *bp = netdev_priv(dev);
1746 bp->msg_enable = value;
1749 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1751 struct b44 *bp = netdev_priv(dev);
1752 struct pci_dev *pci_dev = bp->pdev;
1754 strcpy (info->driver, DRV_MODULE_NAME);
1755 strcpy (info->version, DRV_MODULE_VERSION);
1756 strcpy (info->bus_info, pci_name(pci_dev));
1759 static int b44_nway_reset(struct net_device *dev)
1761 struct b44 *bp = netdev_priv(dev);
1765 spin_lock_irq(&bp->lock);
1766 b44_readphy(bp, MII_BMCR, &bmcr);
1767 b44_readphy(bp, MII_BMCR, &bmcr);
1769 if (bmcr & BMCR_ANENABLE) {
1770 b44_writephy(bp, MII_BMCR,
1771 bmcr | BMCR_ANRESTART);
1774 spin_unlock_irq(&bp->lock);
1779 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1781 struct b44 *bp = netdev_priv(dev);
1783 cmd->supported = (SUPPORTED_Autoneg);
1784 cmd->supported |= (SUPPORTED_100baseT_Half |
1785 SUPPORTED_100baseT_Full |
1786 SUPPORTED_10baseT_Half |
1787 SUPPORTED_10baseT_Full |
1790 cmd->advertising = 0;
1791 if (bp->flags & B44_FLAG_ADV_10HALF)
1792 cmd->advertising |= ADVERTISED_10baseT_Half;
1793 if (bp->flags & B44_FLAG_ADV_10FULL)
1794 cmd->advertising |= ADVERTISED_10baseT_Full;
1795 if (bp->flags & B44_FLAG_ADV_100HALF)
1796 cmd->advertising |= ADVERTISED_100baseT_Half;
1797 if (bp->flags & B44_FLAG_ADV_100FULL)
1798 cmd->advertising |= ADVERTISED_100baseT_Full;
1799 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1800 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1801 SPEED_100 : SPEED_10;
1802 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1803 DUPLEX_FULL : DUPLEX_HALF;
1805 cmd->phy_address = bp->phy_addr;
1806 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1807 XCVR_INTERNAL : XCVR_EXTERNAL;
1808 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1809 AUTONEG_DISABLE : AUTONEG_ENABLE;
1810 if (cmd->autoneg == AUTONEG_ENABLE)
1811 cmd->advertising |= ADVERTISED_Autoneg;
1812 if (!netif_running(dev)){
1821 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1823 struct b44 *bp = netdev_priv(dev);
1825 /* We do not support gigabit. */
1826 if (cmd->autoneg == AUTONEG_ENABLE) {
1827 if (cmd->advertising &
1828 (ADVERTISED_1000baseT_Half |
1829 ADVERTISED_1000baseT_Full))
1831 } else if ((cmd->speed != SPEED_100 &&
1832 cmd->speed != SPEED_10) ||
1833 (cmd->duplex != DUPLEX_HALF &&
1834 cmd->duplex != DUPLEX_FULL)) {
1838 spin_lock_irq(&bp->lock);
1840 if (cmd->autoneg == AUTONEG_ENABLE) {
1841 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1842 B44_FLAG_100_BASE_T |
1843 B44_FLAG_FULL_DUPLEX |
1844 B44_FLAG_ADV_10HALF |
1845 B44_FLAG_ADV_10FULL |
1846 B44_FLAG_ADV_100HALF |
1847 B44_FLAG_ADV_100FULL);
1848 if (cmd->advertising == 0) {
1849 bp->flags |= (B44_FLAG_ADV_10HALF |
1850 B44_FLAG_ADV_10FULL |
1851 B44_FLAG_ADV_100HALF |
1852 B44_FLAG_ADV_100FULL);
1854 if (cmd->advertising & ADVERTISED_10baseT_Half)
1855 bp->flags |= B44_FLAG_ADV_10HALF;
1856 if (cmd->advertising & ADVERTISED_10baseT_Full)
1857 bp->flags |= B44_FLAG_ADV_10FULL;
1858 if (cmd->advertising & ADVERTISED_100baseT_Half)
1859 bp->flags |= B44_FLAG_ADV_100HALF;
1860 if (cmd->advertising & ADVERTISED_100baseT_Full)
1861 bp->flags |= B44_FLAG_ADV_100FULL;
1864 bp->flags |= B44_FLAG_FORCE_LINK;
1865 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1866 if (cmd->speed == SPEED_100)
1867 bp->flags |= B44_FLAG_100_BASE_T;
1868 if (cmd->duplex == DUPLEX_FULL)
1869 bp->flags |= B44_FLAG_FULL_DUPLEX;
1872 if (netif_running(dev))
1875 spin_unlock_irq(&bp->lock);
1880 static void b44_get_ringparam(struct net_device *dev,
1881 struct ethtool_ringparam *ering)
1883 struct b44 *bp = netdev_priv(dev);
1885 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1886 ering->rx_pending = bp->rx_pending;
1888 /* XXX ethtool lacks a tx_max_pending, oops... */
1891 static int b44_set_ringparam(struct net_device *dev,
1892 struct ethtool_ringparam *ering)
1894 struct b44 *bp = netdev_priv(dev);
1896 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1897 (ering->rx_mini_pending != 0) ||
1898 (ering->rx_jumbo_pending != 0) ||
1899 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1902 spin_lock_irq(&bp->lock);
1904 bp->rx_pending = ering->rx_pending;
1905 bp->tx_pending = ering->tx_pending;
1909 b44_init_hw(bp, B44_FULL_RESET);
1910 netif_wake_queue(bp->dev);
1911 spin_unlock_irq(&bp->lock);
1913 b44_enable_ints(bp);
1918 static void b44_get_pauseparam(struct net_device *dev,
1919 struct ethtool_pauseparam *epause)
1921 struct b44 *bp = netdev_priv(dev);
1924 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1926 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1928 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1931 static int b44_set_pauseparam(struct net_device *dev,
1932 struct ethtool_pauseparam *epause)
1934 struct b44 *bp = netdev_priv(dev);
1936 spin_lock_irq(&bp->lock);
1937 if (epause->autoneg)
1938 bp->flags |= B44_FLAG_PAUSE_AUTO;
1940 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1941 if (epause->rx_pause)
1942 bp->flags |= B44_FLAG_RX_PAUSE;
1944 bp->flags &= ~B44_FLAG_RX_PAUSE;
1945 if (epause->tx_pause)
1946 bp->flags |= B44_FLAG_TX_PAUSE;
1948 bp->flags &= ~B44_FLAG_TX_PAUSE;
1949 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1952 b44_init_hw(bp, B44_FULL_RESET);
1954 __b44_set_flow_ctrl(bp, bp->flags);
1956 spin_unlock_irq(&bp->lock);
1958 b44_enable_ints(bp);
1963 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1967 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1972 static int b44_get_stats_count(struct net_device *dev)
1974 return ARRAY_SIZE(b44_gstrings);
1977 static void b44_get_ethtool_stats(struct net_device *dev,
1978 struct ethtool_stats *stats, u64 *data)
1980 struct b44 *bp = netdev_priv(dev);
1981 u32 *val = &bp->hw_stats.tx_good_octets;
1984 spin_lock_irq(&bp->lock);
1986 b44_stats_update(bp);
1988 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1991 spin_unlock_irq(&bp->lock);
1994 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1996 struct b44 *bp = netdev_priv(dev);
1998 wol->supported = WAKE_MAGIC;
1999 if (bp->flags & B44_FLAG_WOL_ENABLE)
2000 wol->wolopts = WAKE_MAGIC;
2003 memset(&wol->sopass, 0, sizeof(wol->sopass));
2006 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2008 struct b44 *bp = netdev_priv(dev);
2010 spin_lock_irq(&bp->lock);
2011 if (wol->wolopts & WAKE_MAGIC)
2012 bp->flags |= B44_FLAG_WOL_ENABLE;
2014 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2015 spin_unlock_irq(&bp->lock);
2020 static const struct ethtool_ops b44_ethtool_ops = {
2021 .get_drvinfo = b44_get_drvinfo,
2022 .get_settings = b44_get_settings,
2023 .set_settings = b44_set_settings,
2024 .nway_reset = b44_nway_reset,
2025 .get_link = ethtool_op_get_link,
2026 .get_wol = b44_get_wol,
2027 .set_wol = b44_set_wol,
2028 .get_ringparam = b44_get_ringparam,
2029 .set_ringparam = b44_set_ringparam,
2030 .get_pauseparam = b44_get_pauseparam,
2031 .set_pauseparam = b44_set_pauseparam,
2032 .get_msglevel = b44_get_msglevel,
2033 .set_msglevel = b44_set_msglevel,
2034 .get_strings = b44_get_strings,
2035 .get_stats_count = b44_get_stats_count,
2036 .get_ethtool_stats = b44_get_ethtool_stats,
2037 .get_perm_addr = ethtool_op_get_perm_addr,
2040 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2042 struct mii_ioctl_data *data = if_mii(ifr);
2043 struct b44 *bp = netdev_priv(dev);
2046 if (!netif_running(dev))
2049 spin_lock_irq(&bp->lock);
2050 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2051 spin_unlock_irq(&bp->lock);
2056 /* Read 128-bytes of EEPROM. */
2057 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2060 __le16 *ptr = (__le16 *) data;
2062 for (i = 0; i < 128; i += 2)
2063 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2068 static int __devinit b44_get_invariants(struct b44 *bp)
2073 err = b44_read_eeprom(bp, &eeprom[0]);
2077 bp->dev->dev_addr[0] = eeprom[79];
2078 bp->dev->dev_addr[1] = eeprom[78];
2079 bp->dev->dev_addr[2] = eeprom[81];
2080 bp->dev->dev_addr[3] = eeprom[80];
2081 bp->dev->dev_addr[4] = eeprom[83];
2082 bp->dev->dev_addr[5] = eeprom[82];
2084 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2085 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2089 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2091 bp->phy_addr = eeprom[90] & 0x1f;
2093 bp->imask = IMASK_DEF;
2095 bp->core_unit = ssb_core_unit(bp);
2096 bp->dma_offset = SB_PCI_DMA;
2098 /* XXX - really required?
2099 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2102 if (ssb_get_core_rev(bp) >= 7)
2103 bp->flags |= B44_FLAG_B0_ANDLATER;
2109 static int __devinit b44_init_one(struct pci_dev *pdev,
2110 const struct pci_device_id *ent)
2112 static int b44_version_printed = 0;
2113 unsigned long b44reg_base, b44reg_len;
2114 struct net_device *dev;
2118 if (b44_version_printed++ == 0)
2119 printk(KERN_INFO "%s", version);
2121 err = pci_enable_device(pdev);
2123 dev_err(&pdev->dev, "Cannot enable PCI device, "
2128 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2130 "Cannot find proper PCI device "
2131 "base address, aborting.\n");
2133 goto err_out_disable_pdev;
2136 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2139 "Cannot obtain PCI resources, aborting.\n");
2140 goto err_out_disable_pdev;
2143 pci_set_master(pdev);
2145 err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2147 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2148 goto err_out_free_res;
2151 err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2153 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2154 goto err_out_free_res;
2157 b44reg_base = pci_resource_start(pdev, 0);
2158 b44reg_len = pci_resource_len(pdev, 0);
2160 dev = alloc_etherdev(sizeof(*bp));
2162 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2164 goto err_out_free_res;
2167 SET_MODULE_OWNER(dev);
2168 SET_NETDEV_DEV(dev,&pdev->dev);
2170 /* No interesting netdevice features in this card... */
2173 bp = netdev_priv(dev);
2177 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2179 spin_lock_init(&bp->lock);
2181 bp->regs = ioremap(b44reg_base, b44reg_len);
2182 if (bp->regs == 0UL) {
2183 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2185 goto err_out_free_dev;
2188 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2189 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2191 dev->open = b44_open;
2192 dev->stop = b44_close;
2193 dev->hard_start_xmit = b44_start_xmit;
2194 dev->get_stats = b44_get_stats;
2195 dev->set_multicast_list = b44_set_rx_mode;
2196 dev->set_mac_address = b44_set_mac_addr;
2197 dev->do_ioctl = b44_ioctl;
2198 dev->tx_timeout = b44_tx_timeout;
2199 dev->poll = b44_poll;
2201 dev->watchdog_timeo = B44_TX_TIMEOUT;
2202 #ifdef CONFIG_NET_POLL_CONTROLLER
2203 dev->poll_controller = b44_poll_controller;
2205 dev->change_mtu = b44_change_mtu;
2206 dev->irq = pdev->irq;
2207 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2209 netif_carrier_off(dev);
2211 err = b44_get_invariants(bp);
2214 "Problem fetching invariants of chip, aborting.\n");
2215 goto err_out_iounmap;
2218 bp->mii_if.dev = dev;
2219 bp->mii_if.mdio_read = b44_mii_read;
2220 bp->mii_if.mdio_write = b44_mii_write;
2221 bp->mii_if.phy_id = bp->phy_addr;
2222 bp->mii_if.phy_id_mask = 0x1f;
2223 bp->mii_if.reg_num_mask = 0x1f;
2225 /* By default, advertise all speed/duplex settings. */
2226 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2227 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2229 /* By default, auto-negotiate PAUSE. */
2230 bp->flags |= B44_FLAG_PAUSE_AUTO;
2232 err = register_netdev(dev);
2234 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2235 goto err_out_iounmap;
2238 pci_set_drvdata(pdev, dev);
2240 pci_save_state(bp->pdev);
2242 /* Chip reset provides power to the b44 MAC & PCI cores, which
2243 * is necessary for MAC register access.
2247 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2248 for (i = 0; i < 6; i++)
2249 printk("%2.2x%c", dev->dev_addr[i],
2250 i == 5 ? '\n' : ':');
2261 pci_release_regions(pdev);
2263 err_out_disable_pdev:
2264 pci_disable_device(pdev);
2265 pci_set_drvdata(pdev, NULL);
2269 static void __devexit b44_remove_one(struct pci_dev *pdev)
2271 struct net_device *dev = pci_get_drvdata(pdev);
2272 struct b44 *bp = netdev_priv(dev);
2274 unregister_netdev(dev);
2277 pci_release_regions(pdev);
2278 pci_disable_device(pdev);
2279 pci_set_drvdata(pdev, NULL);
2282 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2284 struct net_device *dev = pci_get_drvdata(pdev);
2285 struct b44 *bp = netdev_priv(dev);
2287 if (!netif_running(dev))
2290 del_timer_sync(&bp->timer);
2292 spin_lock_irq(&bp->lock);
2295 netif_carrier_off(bp->dev);
2296 netif_device_detach(bp->dev);
2299 spin_unlock_irq(&bp->lock);
2301 free_irq(dev->irq, dev);
2302 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2303 b44_init_hw(bp, B44_PARTIAL_RESET);
2306 pci_disable_device(pdev);
2310 static int b44_resume(struct pci_dev *pdev)
2312 struct net_device *dev = pci_get_drvdata(pdev);
2313 struct b44 *bp = netdev_priv(dev);
2316 pci_restore_state(pdev);
2317 rc = pci_enable_device(pdev);
2319 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2324 pci_set_master(pdev);
2326 if (!netif_running(dev))
2329 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2331 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2332 pci_disable_device(pdev);
2336 spin_lock_irq(&bp->lock);
2339 b44_init_hw(bp, B44_FULL_RESET);
2340 netif_device_attach(bp->dev);
2341 spin_unlock_irq(&bp->lock);
2343 b44_enable_ints(bp);
2344 netif_wake_queue(dev);
2346 mod_timer(&bp->timer, jiffies + 1);
2351 static struct pci_driver b44_driver = {
2352 .name = DRV_MODULE_NAME,
2353 .id_table = b44_pci_tbl,
2354 .probe = b44_init_one,
2355 .remove = __devexit_p(b44_remove_one),
2356 .suspend = b44_suspend,
2357 .resume = b44_resume,
2360 static int __init b44_init(void)
2362 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2364 /* Setup paramaters for syncing RX/TX DMA descriptors */
2365 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2366 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2368 return pci_register_driver(&b44_driver);
2371 static void __exit b44_cleanup(void)
2373 pci_unregister_driver(&b44_driver);
2376 module_init(b44_init);
2377 module_exit(b44_cleanup);